mirror of
https://github.com/espressif/esp-idf.git
synced 2025-08-05 05:34:32 +02:00
Merge branch 'feature/freertos_10.4.3_remove_multicore_queue_locks' into 'master'
FreeRTOS(IDF): Remove/restore queue locks for multicore/single-core respectively (1) Closes IDF-5740 See merge request espressif/esp-idf!19763
This commit is contained in:
@@ -54,11 +54,21 @@
|
||||
* correct privileged Vs unprivileged linkage and placement. */
|
||||
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */
|
||||
|
||||
|
||||
/* Vanilla FreeRTOS uses queue locks to ensure that vTaskPlaceOnEventList()
|
||||
* calls are deterministic (as queue locks use scheduler suspension instead of
|
||||
* critical sections). However, the SMP implementation is not deterministic
|
||||
* anyways, so use of queue locks can be dropped (replaced with a critical
|
||||
* section) in exchange for better queue performance. */
|
||||
#if ( configNUM_CORES > 1 )
|
||||
#define queueUSE_LOCKS 0
|
||||
#define queueUNLOCKED ( ( int8_t ) 0 )
|
||||
#else
|
||||
#define queueUSE_LOCKS 1
|
||||
/* Constants used with the cRxLock and cTxLock structure members. */
|
||||
#define queueUNLOCKED ( ( int8_t ) -1 )
|
||||
#define queueLOCKED_UNMODIFIED ( ( int8_t ) 0 )
|
||||
#define queueINT8_MAX ( ( int8_t ) 127 )
|
||||
#endif /* configNUM_CORES > 1 */
|
||||
|
||||
/* When the Queue_t structure is used to represent a base queue its pcHead and
|
||||
* pcTail members are used as pointers into the queue storage area. When the
|
||||
@@ -121,8 +131,10 @@ typedef struct QueueDefinition /* The old naming convention is used to prevent b
|
||||
UBaseType_t uxLength; /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */
|
||||
UBaseType_t uxItemSize; /*< The size of each items that the queue will hold. */
|
||||
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
volatile int8_t cRxLock; /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
|
||||
volatile int8_t cTxLock; /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
|
||||
#endif
|
||||
|
||||
#if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
|
||||
uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the memory used by the queue was statically allocated to ensure no attempt is made to free the memory. */
|
||||
@@ -136,9 +148,8 @@ typedef struct QueueDefinition /* The old naming convention is used to prevent b
|
||||
UBaseType_t uxQueueNumber;
|
||||
uint8_t ucQueueType;
|
||||
#endif
|
||||
#ifdef ESP_PLATFORM
|
||||
|
||||
portMUX_TYPE xQueueLock; /* Spinlock required for SMP critical sections */
|
||||
#endif // ESP_PLATFORM
|
||||
} xQUEUE;
|
||||
|
||||
/* The old xQUEUE name is maintained above then typedefed to the new Queue_t
|
||||
@@ -171,12 +182,14 @@ typedef xQUEUE Queue_t;
|
||||
* The pcQueueName member of a structure being NULL is indicative of the
|
||||
* array position being vacant. */
|
||||
PRIVILEGED_DATA QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ];
|
||||
#ifdef ESP_PLATFORM
|
||||
|
||||
/* Spinlock required in SMP when accessing the queue registry */
|
||||
static portMUX_TYPE xQueueRegistryLock = portMUX_INITIALIZER_UNLOCKED;
|
||||
#endif // ESP_PLATFORM
|
||||
|
||||
#endif /* configQUEUE_REGISTRY_SIZE */
|
||||
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
|
||||
/*
|
||||
* Unlocks a queue locked by a call to prvLockQueue. Locking a queue does not
|
||||
* prevent an ISR from adding or removing items to the queue, but does prevent
|
||||
@@ -200,6 +213,7 @@ static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION
|
||||
* @return pdTRUE if there is no space, otherwise pdFALSE;
|
||||
*/
|
||||
static BaseType_t prvIsQueueFull( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION;
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
|
||||
/*
|
||||
* Copies an item into the queue, either at the front of the queue or the
|
||||
@@ -256,6 +270,8 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
|
||||
#endif
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
|
||||
/*
|
||||
* Macro to mark a queue as locked. Locking a queue prevents an ISR from
|
||||
* accessing the queue event lists.
|
||||
@@ -273,6 +289,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
|
||||
} \
|
||||
} \
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) )
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
BaseType_t xQueueGenericReset( QueueHandle_t xQueue,
|
||||
@@ -282,12 +299,10 @@ BaseType_t xQueueGenericReset( QueueHandle_t xQueue,
|
||||
|
||||
configASSERT( pxQueue );
|
||||
|
||||
#ifdef ESP_PLATFORM
|
||||
if( xNewQueue == pdTRUE )
|
||||
{
|
||||
portMUX_INITIALIZE( &( pxQueue->xQueueLock ) );
|
||||
}
|
||||
#endif // ESP_PLATFORM
|
||||
|
||||
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
{
|
||||
@@ -295,8 +310,12 @@ BaseType_t xQueueGenericReset( QueueHandle_t xQueue,
|
||||
pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
|
||||
pxQueue->pcWriteTo = pxQueue->pcHead;
|
||||
pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - 1U ) * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
{
|
||||
pxQueue->cRxLock = queueUNLOCKED;
|
||||
pxQueue->cTxLock = queueUNLOCKED;
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
|
||||
if( xNewQueue == pdFALSE )
|
||||
{
|
||||
@@ -523,9 +542,10 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
|
||||
|
||||
/* In case this is a recursive mutex. */
|
||||
pxNewQueue->u.xSemaphore.uxRecursiveCallCount = 0;
|
||||
#ifdef ESP_PLATFORM
|
||||
|
||||
/* Initialize the mutex's spinlock */
|
||||
portMUX_INITIALIZE( &( pxNewQueue->xQueueLock ) );
|
||||
#endif // ESP_PLATFORM
|
||||
|
||||
traceCREATE_MUTEX( pxNewQueue );
|
||||
|
||||
/* Start with the semaphore in the expected state. */
|
||||
@@ -955,17 +975,43 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
|
||||
/* If queue locks ARE NOT being used:
|
||||
* - At this point, the queue is full and entry time has been set
|
||||
* - We simply check for a time out, block if not timed out, or
|
||||
* return an error if we have timed out. */
|
||||
#if ( queueUSE_LOCKS == 0 )
|
||||
{
|
||||
/* Update the timeout state to see if it has expired yet. */
|
||||
if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
|
||||
{
|
||||
/* Not timed out yet. Block the current task. */
|
||||
traceBLOCKING_ON_QUEUE_SEND( pxQueue );
|
||||
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
|
||||
portYIELD_WITHIN_API();
|
||||
}
|
||||
else
|
||||
{
|
||||
/* We have timed out. Return an error. */
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
traceQUEUE_SEND_FAILED( pxQueue );
|
||||
return errQUEUE_FULL;
|
||||
}
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 0 */
|
||||
}
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
|
||||
/* If queue locks ARE being used:
|
||||
* - At this point, the queue is full and entry time has been set
|
||||
* - We follow the original procedure of locking the queue before
|
||||
* attempting to block. */
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
{
|
||||
/* Interrupts and other tasks can send to and receive from the queue
|
||||
* now the critical section has been exited. */
|
||||
|
||||
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
#else
|
||||
vTaskSuspendAll();
|
||||
#endif // ESP_PLATFORM
|
||||
prvLockQueue( pxQueue );
|
||||
|
||||
/* Update the timeout state to see if it has expired yet. */
|
||||
@@ -980,19 +1026,15 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
|
||||
* event list. It is possible that interrupts occurring now
|
||||
* remove this task from the event list again - but as the
|
||||
* scheduler is suspended the task will go onto the pending
|
||||
* ready list instead of the actual ready list. */
|
||||
* ready last instead of the actual ready list. */
|
||||
prvUnlockQueue( pxQueue );
|
||||
|
||||
/* Resuming the scheduler will move tasks from the pending
|
||||
* ready list into the ready list - so it is feasible that this
|
||||
* task is already in the ready list before it yields - in which
|
||||
* task is already in a ready list before it yields - in which
|
||||
* case the yield will not cause a context switch unless there
|
||||
* is also a higher priority task in the pending ready list. */
|
||||
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
#else
|
||||
if( xTaskResumeAll() == pdFALSE )
|
||||
#endif // ESP_PLATFORM
|
||||
{
|
||||
portYIELD_WITHIN_API();
|
||||
}
|
||||
@@ -1001,26 +1043,20 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
|
||||
{
|
||||
/* Try again. */
|
||||
prvUnlockQueue( pxQueue );
|
||||
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
#else
|
||||
( void ) xTaskResumeAll();
|
||||
#endif // ESP_PLATFORM
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* The timeout has expired. */
|
||||
prvUnlockQueue( pxQueue );
|
||||
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
#else
|
||||
( void ) xTaskResumeAll();
|
||||
#endif // ESP_PLATFORM
|
||||
|
||||
traceQUEUE_SEND_FAILED( pxQueue );
|
||||
return errQUEUE_FULL;
|
||||
}
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
} /*lint -restore */
|
||||
}
|
||||
/*-----------------------------------------------------------*/
|
||||
@@ -1059,13 +1095,21 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
|
||||
* read, instead return a flag to say whether a context switch is required or
|
||||
* not (i.e. has a task with a higher priority than us been woken by this
|
||||
* post). */
|
||||
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
|
||||
{
|
||||
#if ( configNUM_CORES > 1 )
|
||||
taskENTER_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
|
||||
|
||||
( void ) uxSavedInterruptStatus;
|
||||
#else
|
||||
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
|
||||
#endif
|
||||
{
|
||||
if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
|
||||
{
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
const int8_t cTxLock = pxQueue->cTxLock;
|
||||
#else
|
||||
/* Queue locks not used, so we treat it as unlocked. */
|
||||
const int8_t cTxLock = queueUNLOCKED;
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting;
|
||||
|
||||
traceQUEUE_SEND_FROM_ISR( pxQueue );
|
||||
@@ -1172,11 +1216,17 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
|
||||
#endif /* configUSE_QUEUE_SETS */
|
||||
}
|
||||
else
|
||||
{
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
{
|
||||
/* Increment the lock count so the task that unlocks the queue
|
||||
* knows that data was posted while it was locked. */
|
||||
configASSERT( cTxLock != queueINT8_MAX );
|
||||
|
||||
pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 );
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
}
|
||||
|
||||
xReturn = pdPASS;
|
||||
}
|
||||
@@ -1185,10 +1235,12 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
|
||||
traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
|
||||
xReturn = errQUEUE_FULL;
|
||||
}
|
||||
|
||||
taskEXIT_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
|
||||
}
|
||||
#if ( configNUM_CORES > 1 )
|
||||
taskEXIT_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
|
||||
#else
|
||||
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
|
||||
#endif
|
||||
|
||||
return xReturn;
|
||||
}
|
||||
@@ -1234,10 +1286,13 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
|
||||
* link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
|
||||
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
|
||||
|
||||
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
|
||||
{
|
||||
#if ( configNUM_CORES > 1 )
|
||||
taskENTER_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
|
||||
|
||||
( void ) uxSavedInterruptStatus;
|
||||
#else
|
||||
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
|
||||
#endif
|
||||
{
|
||||
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
|
||||
|
||||
/* When the queue is used to implement a semaphore no data is ever
|
||||
@@ -1245,7 +1300,12 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
|
||||
* space'. */
|
||||
if( uxMessagesWaiting < pxQueue->uxLength )
|
||||
{
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
const int8_t cTxLock = pxQueue->cTxLock;
|
||||
#else
|
||||
/* Queue locks not used, so we treat it as unlocked. */
|
||||
const int8_t cTxLock = queueUNLOCKED;
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
|
||||
traceQUEUE_GIVE_FROM_ISR( pxQueue );
|
||||
|
||||
@@ -1342,6 +1402,8 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
|
||||
#endif /* configUSE_QUEUE_SETS */
|
||||
}
|
||||
else
|
||||
{
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
{
|
||||
/* Increment the lock count so the task that unlocks the queue
|
||||
* knows that data was posted while it was locked. */
|
||||
@@ -1349,6 +1411,8 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
|
||||
|
||||
pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 );
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
}
|
||||
|
||||
xReturn = pdPASS;
|
||||
}
|
||||
@@ -1357,10 +1421,12 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
|
||||
traceQUEUE_GIVE_FROM_ISR_FAILED( pxQueue );
|
||||
xReturn = errQUEUE_FULL;
|
||||
}
|
||||
|
||||
taskEXIT_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
|
||||
}
|
||||
#if ( configNUM_CORES > 1 )
|
||||
taskEXIT_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
|
||||
#else
|
||||
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
|
||||
#endif
|
||||
|
||||
return xReturn;
|
||||
}
|
||||
@@ -1451,17 +1517,43 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
|
||||
/* If queue locks ARE NOT being used:
|
||||
* - At this point, the queue is empty and entry time has been set
|
||||
* - We simply check for a time out, block if not timed out, or
|
||||
* return an error if we have timed out. */
|
||||
#if ( queueUSE_LOCKS == 0 )
|
||||
{
|
||||
/* Update the timeout state to see if it has expired yet. */
|
||||
if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
|
||||
{
|
||||
/* Not timed out yet. Block the current task. */
|
||||
traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
|
||||
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
|
||||
portYIELD_WITHIN_API();
|
||||
}
|
||||
else
|
||||
{
|
||||
/* We have timed out. Return an error. */
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
traceQUEUE_RECEIVE_FAILED( pxQueue );
|
||||
return errQUEUE_EMPTY;
|
||||
}
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 0 */
|
||||
}
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
|
||||
/* If queue locks ARE being used:
|
||||
* - At this point, the queue is empty and entry time has been set
|
||||
* - We follow the original procedure for locking the queue before
|
||||
* attempting to block. */
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
{
|
||||
/* Interrupts and other tasks can send to and receive from the queue
|
||||
* now the critical section has been exited. */
|
||||
|
||||
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
#else
|
||||
vTaskSuspendAll();
|
||||
#endif // ESP_PLATFORM
|
||||
prvLockQueue( pxQueue );
|
||||
|
||||
/* Update the timeout state to see if it has expired yet. */
|
||||
@@ -1474,32 +1566,22 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
|
||||
traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
|
||||
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
|
||||
prvUnlockQueue( pxQueue );
|
||||
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
#else
|
||||
|
||||
if( xTaskResumeAll() == pdFALSE )
|
||||
#endif // ESP_PLATFORM
|
||||
{
|
||||
portYIELD_WITHIN_API();
|
||||
}
|
||||
|
||||
#ifndef ESP_PLATFORM
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
#endif // ESP_PLATFORM
|
||||
}
|
||||
else
|
||||
{
|
||||
/* The queue contains data again. Loop back to try and read the
|
||||
* data. */
|
||||
prvUnlockQueue( pxQueue );
|
||||
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
#else
|
||||
( void ) xTaskResumeAll();
|
||||
#endif // ESP_PLATFORM
|
||||
}
|
||||
}
|
||||
else
|
||||
@@ -1507,11 +1589,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
|
||||
/* Timed out. If there is no data in the queue exit, otherwise loop
|
||||
* back and attempt to read the data. */
|
||||
prvUnlockQueue( pxQueue );
|
||||
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
#else
|
||||
( void ) xTaskResumeAll();
|
||||
#endif // ESP_PLATFORM
|
||||
|
||||
if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
|
||||
{
|
||||
@@ -1523,6 +1601,8 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
} /*lint -restore */
|
||||
}
|
||||
/*-----------------------------------------------------------*/
|
||||
@@ -1641,17 +1721,69 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
|
||||
/* If queue locks ARE NOT being used:
|
||||
* - At this point, the semaphore/mutex is empty/held and entry time
|
||||
* has been set.
|
||||
* - We simply check for a time out, inherit priority and block if
|
||||
* not timed out, or return an error if we have timed out. */
|
||||
#if ( queueUSE_LOCKS == 0 )
|
||||
{
|
||||
/* Update the timeout state to see if it has expired yet. */
|
||||
if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
|
||||
{
|
||||
/* Not timed out yet. If this is a mutex, make the holder
|
||||
* inherit our priority, then block the current task. */
|
||||
traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
|
||||
#if ( configUSE_MUTEXES == 1 )
|
||||
{
|
||||
if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
|
||||
{
|
||||
xInheritanceOccurred = xTaskPriorityInherit( pxQueue->u.xSemaphore.xMutexHolder );
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
#endif /* if ( configUSE_MUTEXES == 1 ) */
|
||||
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
|
||||
portYIELD_WITHIN_API();
|
||||
}
|
||||
else
|
||||
{
|
||||
/* We have timed out. If this is a mutex, make the holder
|
||||
* disinherit our priority, then return an error. */
|
||||
#if ( configUSE_MUTEXES == 1 )
|
||||
{
|
||||
if( xInheritanceOccurred != pdFALSE )
|
||||
{
|
||||
UBaseType_t uxHighestWaitingPriority;
|
||||
uxHighestWaitingPriority = prvGetDisinheritPriorityAfterTimeout( pxQueue );
|
||||
vTaskPriorityDisinheritAfterTimeout( pxQueue->u.xSemaphore.xMutexHolder, uxHighestWaitingPriority );
|
||||
}
|
||||
}
|
||||
#endif /* configUSE_MUTEXES */
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
traceQUEUE_RECEIVE_FAILED( pxQueue );
|
||||
return errQUEUE_EMPTY;
|
||||
}
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 0 */
|
||||
}
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
|
||||
/* If queue locks ARE being used:
|
||||
* - At this point, the semaphore/mutex is empty/held and entry time
|
||||
* has been set.
|
||||
* - We follow the original procedure for locking the queue, inheriting
|
||||
* priority, then attempting to block. */
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
{
|
||||
/* Interrupts and other tasks can give to and take from the semaphore
|
||||
* now the critical section has been exited. */
|
||||
|
||||
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
#else
|
||||
vTaskSuspendAll();
|
||||
#endif // ESP_PLATFORM
|
||||
prvLockQueue( pxQueue );
|
||||
|
||||
/* Update the timeout state to see if it has expired yet. */
|
||||
@@ -1684,43 +1816,29 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
|
||||
|
||||
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
|
||||
prvUnlockQueue( pxQueue );
|
||||
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
#else
|
||||
|
||||
if( xTaskResumeAll() == pdFALSE )
|
||||
#endif // ESP_PLATFORM
|
||||
{
|
||||
portYIELD_WITHIN_API();
|
||||
}
|
||||
|
||||
#ifndef ESP_PLATFORM
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
#endif // ESP_PLATFORM
|
||||
}
|
||||
else
|
||||
{
|
||||
/* There was no timeout and the semaphore count was not 0, so
|
||||
* attempt to take the semaphore again. */
|
||||
prvUnlockQueue( pxQueue );
|
||||
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
#else
|
||||
( void ) xTaskResumeAll();
|
||||
#endif // ESP_PLATFORM
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Timed out. */
|
||||
prvUnlockQueue( pxQueue );
|
||||
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
#else
|
||||
( void ) xTaskResumeAll();
|
||||
#endif // ESP_PLATFORM
|
||||
|
||||
/* If the semaphore count is 0 exit now as the timeout has
|
||||
* expired. Otherwise return to attempt to take the semaphore that is
|
||||
@@ -1760,6 +1878,8 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
} /*lint -restore */
|
||||
}
|
||||
/*-----------------------------------------------------------*/
|
||||
@@ -1857,17 +1977,43 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
|
||||
/* If queue locks ARE NOT being used:
|
||||
* - At this point, the queue is empty and entry time has been set
|
||||
* - We simply check for a time out, block if not timed out, or
|
||||
* return an error if we have timed out. */
|
||||
#if ( queueUSE_LOCKS == 0 )
|
||||
{
|
||||
/* Update the timeout state to see if it has expired yet. */
|
||||
if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
|
||||
{
|
||||
/* Not timed out yet. Block the current task. */
|
||||
traceBLOCKING_ON_QUEUE_PEEK( pxQueue );
|
||||
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
|
||||
portYIELD_WITHIN_API();
|
||||
}
|
||||
else
|
||||
{
|
||||
/* We have timed out. Return an error. */
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
traceQUEUE_PEEK_FAILED( pxQueue );
|
||||
return errQUEUE_EMPTY;
|
||||
}
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 0 */
|
||||
}
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
|
||||
/* If queue locks ARE being used:
|
||||
* - At this point, the queue is empty and entry time has been set
|
||||
* - We follow the original procedure for locking the queue before
|
||||
* attempting to block. */
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
{
|
||||
/* Interrupts and other tasks can send to and receive from the queue
|
||||
* now the critical section has been exited. */
|
||||
|
||||
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
#else
|
||||
vTaskSuspendAll();
|
||||
#endif // ESP_PLATFORM
|
||||
prvLockQueue( pxQueue );
|
||||
|
||||
/* Update the timeout state to see if it has expired yet. */
|
||||
@@ -1880,32 +2026,22 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
|
||||
traceBLOCKING_ON_QUEUE_PEEK( pxQueue );
|
||||
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
|
||||
prvUnlockQueue( pxQueue );
|
||||
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
#else
|
||||
|
||||
if( xTaskResumeAll() == pdFALSE )
|
||||
#endif // ESP_PLATFORM
|
||||
{
|
||||
portYIELD_WITHIN_API();
|
||||
}
|
||||
|
||||
#ifndef ESP_PLATFORM
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
#endif // ESP_PLATFORM
|
||||
}
|
||||
else
|
||||
{
|
||||
/* There is data in the queue now, so don't enter the blocked
|
||||
* state, instead return to try and obtain the data. */
|
||||
prvUnlockQueue( pxQueue );
|
||||
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
#else
|
||||
( void ) xTaskResumeAll();
|
||||
#endif // ESP_PLATFORM
|
||||
}
|
||||
}
|
||||
else
|
||||
@@ -1913,11 +2049,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
|
||||
/* The timeout has expired. If there is still no data in the queue
|
||||
* exit, otherwise go back and try to read the data again. */
|
||||
prvUnlockQueue( pxQueue );
|
||||
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
#else
|
||||
( void ) xTaskResumeAll();
|
||||
#endif // ESP_PLATFORM
|
||||
|
||||
if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
|
||||
{
|
||||
@@ -1929,6 +2061,8 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
} /*lint -restore */
|
||||
}
|
||||
/*-----------------------------------------------------------*/
|
||||
@@ -1960,16 +2094,24 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
|
||||
* link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
|
||||
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
|
||||
|
||||
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
|
||||
{
|
||||
#if ( configNUM_CORES > 1 )
|
||||
taskENTER_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
|
||||
|
||||
( void ) uxSavedInterruptStatus;
|
||||
#else
|
||||
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
|
||||
#endif
|
||||
{
|
||||
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
|
||||
|
||||
/* Cannot block in an ISR, so check there is data available. */
|
||||
if( uxMessagesWaiting > ( UBaseType_t ) 0 )
|
||||
{
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
const int8_t cRxLock = pxQueue->cRxLock;
|
||||
#else
|
||||
/* Queue locks not used, so we treat it as unlocked. */
|
||||
const int8_t cRxLock = queueUNLOCKED;
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
|
||||
traceQUEUE_RECEIVE_FROM_ISR( pxQueue );
|
||||
|
||||
@@ -2008,6 +2150,8 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
{
|
||||
/* Increment the lock count so the task that unlocks the queue
|
||||
* knows that data was removed while it was locked. */
|
||||
@@ -2015,6 +2159,8 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
|
||||
|
||||
pxQueue->cRxLock = ( int8_t ) ( cRxLock + 1 );
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
}
|
||||
|
||||
xReturn = pdPASS;
|
||||
}
|
||||
@@ -2023,10 +2169,13 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
|
||||
xReturn = pdFAIL;
|
||||
traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );
|
||||
}
|
||||
|
||||
taskEXIT_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
|
||||
}
|
||||
#if ( configNUM_CORES > 1 )
|
||||
taskEXIT_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
|
||||
#else
|
||||
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
|
||||
#endif
|
||||
|
||||
|
||||
return xReturn;
|
||||
}
|
||||
@@ -2060,8 +2209,12 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,
|
||||
* link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
|
||||
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
|
||||
|
||||
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
|
||||
#if ( configNUM_CORES > 1 )
|
||||
taskENTER_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
|
||||
( void ) uxSavedInterruptStatus;
|
||||
#else
|
||||
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
|
||||
#endif
|
||||
{
|
||||
/* Cannot block in an ISR, so check there is data available. */
|
||||
if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
|
||||
@@ -2082,8 +2235,12 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,
|
||||
traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );
|
||||
}
|
||||
}
|
||||
#if ( configNUM_CORES > 1 )
|
||||
taskEXIT_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
|
||||
#else
|
||||
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
|
||||
#endif
|
||||
|
||||
|
||||
return xReturn;
|
||||
}
|
||||
@@ -2338,6 +2495,7 @@ static void prvCopyDataFromQueue( Queue_t * const pxQueue,
|
||||
}
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
static void prvUnlockQueue( Queue_t * const pxQueue )
|
||||
{
|
||||
/* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */
|
||||
@@ -2456,8 +2614,11 @@ static void prvUnlockQueue( Queue_t * const pxQueue )
|
||||
}
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue )
|
||||
{
|
||||
BaseType_t xReturn;
|
||||
@@ -2477,6 +2638,7 @@ static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue )
|
||||
|
||||
return xReturn;
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue )
|
||||
@@ -2499,13 +2661,12 @@ BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue )
|
||||
} /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
static BaseType_t prvIsQueueFull( const Queue_t * pxQueue )
|
||||
{
|
||||
BaseType_t xReturn;
|
||||
|
||||
#ifndef ESP_PLATFORM
|
||||
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
#endif
|
||||
taskENTER_CRITICAL( &( ( ( Queue_t * ) pxQueue )->xQueueLock ) );
|
||||
{
|
||||
if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
|
||||
{
|
||||
@@ -2516,12 +2677,11 @@ static BaseType_t prvIsQueueFull( const Queue_t * pxQueue )
|
||||
xReturn = pdFALSE;
|
||||
}
|
||||
}
|
||||
#ifndef ESP_PLATFORM
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
#endif
|
||||
taskEXIT_CRITICAL( &( ( ( Queue_t * ) pxQueue )->xQueueLock ) );
|
||||
|
||||
return xReturn;
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
||||
@@ -2949,6 +3109,8 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
||||
* so it should be called with the scheduler locked and not from a critical
|
||||
* section. */
|
||||
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
|
||||
/* Only do anything if there are no messages in the queue. This function
|
||||
* will not actually cause the task to block, just place it on a blocked
|
||||
* list. It will not block until the scheduler is unlocked - at which
|
||||
@@ -2956,6 +3118,12 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
||||
* the queue is locked, and the calling task blocks on the queue, then the
|
||||
* calling task will be immediately unblocked when the queue is unlocked. */
|
||||
prvLockQueue( pxQueue );
|
||||
#else
|
||||
|
||||
/* If queue locks are not used, we use a critical section instead
|
||||
* to thread safety. */
|
||||
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
|
||||
if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U )
|
||||
{
|
||||
@@ -2967,7 +3135,11 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
prvUnlockQueue( pxQueue );
|
||||
#else
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
}
|
||||
|
||||
#endif /* configUSE_TIMERS */
|
||||
@@ -3095,15 +3267,19 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
||||
/* This function must be called form a critical section. */
|
||||
|
||||
configASSERT( pxQueueSetContainer );
|
||||
|
||||
/* Acquire the Queue set's spinlock */
|
||||
taskENTER_CRITICAL( &( pxQueueSetContainer->xQueueLock ) );
|
||||
|
||||
configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );
|
||||
|
||||
/* We need to also acquire the queue set's spinlock as well. */
|
||||
taskENTER_CRITICAL( &( pxQueueSetContainer->xQueueLock ) );
|
||||
|
||||
if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )
|
||||
{
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
const int8_t cTxLock = pxQueueSetContainer->cTxLock;
|
||||
#else
|
||||
/* Queue locks not used, so we treat it as unlocked. */
|
||||
const int8_t cTxLock = queueUNLOCKED;
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
|
||||
traceQUEUE_SET_SEND( pxQueueSetContainer );
|
||||
|
||||
@@ -3130,18 +3306,22 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
{
|
||||
configASSERT( cTxLock != queueINT8_MAX );
|
||||
|
||||
pxQueueSetContainer->cTxLock = ( int8_t ) ( cTxLock + 1 );
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
|
||||
/* Release the Queue set's spinlock */
|
||||
/* Release the previously acquired queue set's spinlock. */
|
||||
taskEXIT_CRITICAL( &( pxQueueSetContainer->xQueueLock ) );
|
||||
|
||||
return xReturn;
|
||||
|
@@ -97,6 +97,9 @@ entries:
|
||||
event_groups: xEventGroupSetBits (default)
|
||||
event_groups: xEventGroupSync (default)
|
||||
event_groups: vEventGroupDelete (default)
|
||||
if FREERTOS_UNICORE = y:
|
||||
queue: prvUnlockQueue (default)
|
||||
queue: prvIsQueueEmpty (default)
|
||||
queue: prvIsQueueFull (default)
|
||||
queue: prvInitialiseNewQueue (default)
|
||||
queue: prvInitialiseMutex (default)
|
||||
|
Reference in New Issue
Block a user