diff --git a/components/freertos/FreeRTOS-Kernel/queue.c b/components/freertos/FreeRTOS-Kernel/queue.c index 10c264acec..e5929a21fe 100644 --- a/components/freertos/FreeRTOS-Kernel/queue.c +++ b/components/freertos/FreeRTOS-Kernel/queue.c @@ -54,11 +54,21 @@ * correct privileged Vs unprivileged linkage and placement. */ #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */ - -/* Constants used with the cRxLock and cTxLock structure members. */ -#define queueUNLOCKED ( ( int8_t ) -1 ) -#define queueLOCKED_UNMODIFIED ( ( int8_t ) 0 ) -#define queueINT8_MAX ( ( int8_t ) 127 ) +/* Vanilla FreeRTOS uses queue locks to ensure that vTaskPlaceOnEventList() + * calls are deterministic (as queue locks use scheduler suspension instead of + * critical sections). However, the SMP implementation is not deterministic + * anyways, so use of queue locks can be dropped (replaced with a critical + * section) in exchange for better queue performance. */ +#if ( configNUM_CORES > 1 ) + #define queueUSE_LOCKS 0 + #define queueUNLOCKED ( ( int8_t ) 0 ) +#else + #define queueUSE_LOCKS 1 + /* Constants used with the cRxLock and cTxLock structure members. */ + #define queueUNLOCKED ( ( int8_t ) -1 ) + #define queueLOCKED_UNMODIFIED ( ( int8_t ) 0 ) + #define queueINT8_MAX ( ( int8_t ) 127 ) +#endif /* configNUM_CORES > 1 */ /* When the Queue_t structure is used to represent a base queue its pcHead and * pcTail members are used as pointers into the queue storage area. When the @@ -69,8 +79,8 @@ * is maintained. The QueuePointers_t and SemaphoreData_t types are used to form * a union as their usage is mutually exclusive dependent on what the queue is * being used for. */ -#define uxQueueType pcHead -#define queueQUEUE_IS_MUTEX NULL +#define uxQueueType pcHead +#define queueQUEUE_IS_MUTEX NULL typedef struct QueuePointers { @@ -121,8 +131,10 @@ typedef struct QueueDefinition /* The old naming convention is used to prevent b UBaseType_t uxLength; /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */ UBaseType_t uxItemSize; /*< The size of each items that the queue will hold. */ - volatile int8_t cRxLock; /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */ - volatile int8_t cTxLock; /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */ + #if ( queueUSE_LOCKS == 1 ) + volatile int8_t cRxLock; /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */ + volatile int8_t cTxLock; /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */ + #endif #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the memory used by the queue was statically allocated to ensure no attempt is made to free the memory. */ @@ -136,9 +148,8 @@ typedef struct QueueDefinition /* The old naming convention is used to prevent b UBaseType_t uxQueueNumber; uint8_t ucQueueType; #endif - #ifdef ESP_PLATFORM - portMUX_TYPE xQueueLock; /* Spinlock required for SMP critical sections */ - #endif // ESP_PLATFORM + + portMUX_TYPE xQueueLock; /* Spinlock required for SMP critical sections */ } xQUEUE; /* The old xQUEUE name is maintained above then typedefed to the new Queue_t @@ -171,12 +182,14 @@ typedef xQUEUE Queue_t; * The pcQueueName member of a structure being NULL is indicative of the * array position being vacant. */ PRIVILEGED_DATA QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ]; - #ifdef ESP_PLATFORM - /* Spinlock required in SMP when accessing the queue registry */ - static portMUX_TYPE xQueueRegistryLock = portMUX_INITIALIZER_UNLOCKED; - #endif // ESP_PLATFORM + +/* Spinlock required in SMP when accessing the queue registry */ + static portMUX_TYPE xQueueRegistryLock = portMUX_INITIALIZER_UNLOCKED; + #endif /* configQUEUE_REGISTRY_SIZE */ +#if ( queueUSE_LOCKS == 1 ) + /* * Unlocks a queue locked by a call to prvLockQueue. Locking a queue does not * prevent an ISR from adding or removing items to the queue, but does prevent @@ -185,21 +198,22 @@ typedef xQUEUE Queue_t; * to indicate that a task may require unblocking. When the queue in unlocked * these lock counts are inspected, and the appropriate action taken. */ -static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION; + static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION; /* * Uses a critical section to determine if there is any data in a queue. * * @return pdTRUE if the queue contains no items, otherwise pdFALSE. */ -static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION; + static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION; /* * Uses a critical section to determine if there is any space in a queue. * * @return pdTRUE if there is no space, otherwise pdFALSE; */ -static BaseType_t prvIsQueueFull( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION; + static BaseType_t prvIsQueueFull( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION; +#endif /* queueUSE_LOCKS == 1 */ /* * Copies an item into the queue, either at the front of the queue or the @@ -256,11 +270,13 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, #endif /*-----------------------------------------------------------*/ +#if ( queueUSE_LOCKS == 1 ) + /* * Macro to mark a queue as locked. Locking a queue prevents an ISR from * accessing the queue event lists. */ -#define prvLockQueue( pxQueue ) \ + #define prvLockQueue( pxQueue ) \ taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); \ { \ if( ( pxQueue )->cRxLock == queueUNLOCKED ) \ @@ -273,6 +289,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, } \ } \ taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ) +#endif /* queueUSE_LOCKS == 1 */ /*-----------------------------------------------------------*/ BaseType_t xQueueGenericReset( QueueHandle_t xQueue, @@ -282,12 +299,10 @@ BaseType_t xQueueGenericReset( QueueHandle_t xQueue, configASSERT( pxQueue ); - #ifdef ESP_PLATFORM - if( xNewQueue == pdTRUE ) - { - portMUX_INITIALIZE( &( pxQueue->xQueueLock ) ); - } - #endif // ESP_PLATFORM + if( xNewQueue == pdTRUE ) + { + portMUX_INITIALIZE( &( pxQueue->xQueueLock ) ); + } taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); { @@ -295,8 +310,12 @@ BaseType_t xQueueGenericReset( QueueHandle_t xQueue, pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U; pxQueue->pcWriteTo = pxQueue->pcHead; pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - 1U ) * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */ - pxQueue->cRxLock = queueUNLOCKED; - pxQueue->cTxLock = queueUNLOCKED; + #if ( queueUSE_LOCKS == 1 ) + { + pxQueue->cRxLock = queueUNLOCKED; + pxQueue->cTxLock = queueUNLOCKED; + } + #endif /* queueUSE_LOCKS == 1 */ if( xNewQueue == pdFALSE ) { @@ -523,9 +542,10 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, /* In case this is a recursive mutex. */ pxNewQueue->u.xSemaphore.uxRecursiveCallCount = 0; - #ifdef ESP_PLATFORM - portMUX_INITIALIZE( &( pxNewQueue->xQueueLock ) ); - #endif // ESP_PLATFORM + + /* Initialize the mutex's spinlock */ + portMUX_INITIALIZE( &( pxNewQueue->xQueueLock ) ); + traceCREATE_MUTEX( pxNewQueue ); /* Start with the semaphore in the expected state. */ @@ -955,72 +975,88 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue, mtCOVERAGE_TEST_MARKER(); } } + + /* If queue locks ARE NOT being used: + * - At this point, the queue is full and entry time has been set + * - We simply check for a time out, block if not timed out, or + * return an error if we have timed out. */ + #if ( queueUSE_LOCKS == 0 ) + { + /* Update the timeout state to see if it has expired yet. */ + if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) + { + /* Not timed out yet. Block the current task. */ + traceBLOCKING_ON_QUEUE_SEND( pxQueue ); + vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait ); + portYIELD_WITHIN_API(); + } + else + { + /* We have timed out. Return an error. */ + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); + traceQUEUE_SEND_FAILED( pxQueue ); + return errQUEUE_FULL; + } + } + #endif /* queueUSE_LOCKS == 0 */ } taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); - /* Interrupts and other tasks can send to and receive from the queue - * now the critical section has been exited. */ - - #ifdef ESP_PLATFORM /* IDF-3755 */ - taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); - #else - vTaskSuspendAll(); - #endif // ESP_PLATFORM - prvLockQueue( pxQueue ); - - /* Update the timeout state to see if it has expired yet. */ - if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) - { - if( prvIsQueueFull( pxQueue ) != pdFALSE ) + /* If queue locks ARE being used: + * - At this point, the queue is full and entry time has been set + * - We follow the original procedure of locking the queue before + * attempting to block. */ + #if ( queueUSE_LOCKS == 1 ) { - traceBLOCKING_ON_QUEUE_SEND( pxQueue ); - vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait ); + /* Interrupts and other tasks can send to and receive from the queue + * now the critical section has been exited. */ - /* Unlocking the queue means queue events can effect the - * event list. It is possible that interrupts occurring now - * remove this task from the event list again - but as the - * scheduler is suspended the task will go onto the pending - * ready list instead of the actual ready list. */ - prvUnlockQueue( pxQueue ); + vTaskSuspendAll(); + prvLockQueue( pxQueue ); - /* Resuming the scheduler will move tasks from the pending - * ready list into the ready list - so it is feasible that this - * task is already in the ready list before it yields - in which - * case the yield will not cause a context switch unless there - * is also a higher priority task in the pending ready list. */ - #ifdef ESP_PLATFORM /* IDF-3755 */ - taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); - #else - if( xTaskResumeAll() == pdFALSE ) - #endif // ESP_PLATFORM + /* Update the timeout state to see if it has expired yet. */ + if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) { - portYIELD_WITHIN_API(); + if( prvIsQueueFull( pxQueue ) != pdFALSE ) + { + traceBLOCKING_ON_QUEUE_SEND( pxQueue ); + vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait ); + + /* Unlocking the queue means queue events can effect the + * event list. It is possible that interrupts occurring now + * remove this task from the event list again - but as the + * scheduler is suspended the task will go onto the pending + * ready last instead of the actual ready list. */ + prvUnlockQueue( pxQueue ); + + /* Resuming the scheduler will move tasks from the pending + * ready list into the ready list - so it is feasible that this + * task is already in a ready list before it yields - in which + * case the yield will not cause a context switch unless there + * is also a higher priority task in the pending ready list. */ + if( xTaskResumeAll() == pdFALSE ) + { + portYIELD_WITHIN_API(); + } + } + else + { + /* Try again. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + } + } + else + { + /* The timeout has expired. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + + traceQUEUE_SEND_FAILED( pxQueue ); + return errQUEUE_FULL; } } - else - { - /* Try again. */ - prvUnlockQueue( pxQueue ); - #ifdef ESP_PLATFORM /* IDF-3755 */ - taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); - #else - ( void ) xTaskResumeAll(); - #endif // ESP_PLATFORM - } - } - else - { - /* The timeout has expired. */ - prvUnlockQueue( pxQueue ); - #ifdef ESP_PLATFORM /* IDF-3755 */ - taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); - #else - ( void ) xTaskResumeAll(); - #endif // ESP_PLATFORM - - traceQUEUE_SEND_FAILED( pxQueue ); - return errQUEUE_FULL; - } + #endif /* queueUSE_LOCKS == 1 */ } /*lint -restore */ } /*-----------------------------------------------------------*/ @@ -1059,13 +1095,21 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, * read, instead return a flag to say whether a context switch is required or * not (i.e. has a task with a higher priority than us been woken by this * post). */ - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); - { + #if ( configNUM_CORES > 1 ) taskENTER_CRITICAL_ISR( &( pxQueue->xQueueLock ) ); - + ( void ) uxSavedInterruptStatus; + #else + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + #endif + { if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) ) { - const int8_t cTxLock = pxQueue->cTxLock; + #if ( queueUSE_LOCKS == 1 ) + const int8_t cTxLock = pxQueue->cTxLock; + #else + /* Queue locks not used, so we treat it as unlocked. */ + const int8_t cTxLock = queueUNLOCKED; + #endif /* queueUSE_LOCKS == 1 */ const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting; traceQUEUE_SEND_FROM_ISR( pxQueue ); @@ -1173,9 +1217,15 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, } else { - /* Increment the lock count so the task that unlocks the queue - * knows that data was posted while it was locked. */ - pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 ); + #if ( queueUSE_LOCKS == 1 ) + { + /* Increment the lock count so the task that unlocks the queue + * knows that data was posted while it was locked. */ + configASSERT( cTxLock != queueINT8_MAX ); + + pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 ); + } + #endif /* queueUSE_LOCKS == 1 */ } xReturn = pdPASS; @@ -1185,10 +1235,12 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue ); xReturn = errQUEUE_FULL; } - - taskEXIT_CRITICAL_ISR( &( pxQueue->xQueueLock ) ); } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + #if ( configNUM_CORES > 1 ) + taskEXIT_CRITICAL_ISR( &( pxQueue->xQueueLock ) ); + #else + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + #endif return xReturn; } @@ -1234,10 +1286,13 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); - { + #if ( configNUM_CORES > 1 ) taskENTER_CRITICAL_ISR( &( pxQueue->xQueueLock ) ); - + ( void ) uxSavedInterruptStatus; + #else + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + #endif + { const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; /* When the queue is used to implement a semaphore no data is ever @@ -1245,7 +1300,12 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, * space'. */ if( uxMessagesWaiting < pxQueue->uxLength ) { - const int8_t cTxLock = pxQueue->cTxLock; + #if ( queueUSE_LOCKS == 1 ) + const int8_t cTxLock = pxQueue->cTxLock; + #else + /* Queue locks not used, so we treat it as unlocked. */ + const int8_t cTxLock = queueUNLOCKED; + #endif /* queueUSE_LOCKS == 1 */ traceQUEUE_GIVE_FROM_ISR( pxQueue ); @@ -1343,11 +1403,15 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, } else { - /* Increment the lock count so the task that unlocks the queue - * knows that data was posted while it was locked. */ - configASSERT( cTxLock != queueINT8_MAX ); + #if ( queueUSE_LOCKS == 1 ) + { + /* Increment the lock count so the task that unlocks the queue + * knows that data was posted while it was locked. */ + configASSERT( cTxLock != queueINT8_MAX ); - pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 ); + pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 ); + } + #endif /* queueUSE_LOCKS == 1 */ } xReturn = pdPASS; @@ -1357,10 +1421,12 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, traceQUEUE_GIVE_FROM_ISR_FAILED( pxQueue ); xReturn = errQUEUE_FULL; } - - taskEXIT_CRITICAL_ISR( &( pxQueue->xQueueLock ) ); } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + #if ( configNUM_CORES > 1 ) + taskEXIT_CRITICAL_ISR( &( pxQueue->xQueueLock ) ); + #else + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + #endif return xReturn; } @@ -1451,78 +1517,92 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue, mtCOVERAGE_TEST_MARKER(); } } + + /* If queue locks ARE NOT being used: + * - At this point, the queue is empty and entry time has been set + * - We simply check for a time out, block if not timed out, or + * return an error if we have timed out. */ + #if ( queueUSE_LOCKS == 0 ) + { + /* Update the timeout state to see if it has expired yet. */ + if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) + { + /* Not timed out yet. Block the current task. */ + traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ); + vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); + portYIELD_WITHIN_API(); + } + else + { + /* We have timed out. Return an error. */ + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); + traceQUEUE_RECEIVE_FAILED( pxQueue ); + return errQUEUE_EMPTY; + } + } + #endif /* queueUSE_LOCKS == 0 */ } taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); - /* Interrupts and other tasks can send to and receive from the queue - * now the critical section has been exited. */ - - #ifdef ESP_PLATFORM /* IDF-3755 */ - taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); - #else - vTaskSuspendAll(); - #endif // ESP_PLATFORM - prvLockQueue( pxQueue ); - - /* Update the timeout state to see if it has expired yet. */ - if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) - { - /* The timeout has not expired. If the queue is still empty place - * the task on the list of tasks waiting to receive from the queue. */ - if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + /* If queue locks ARE being used: + * - At this point, the queue is empty and entry time has been set + * - We follow the original procedure for locking the queue before + * attempting to block. */ + #if ( queueUSE_LOCKS == 1 ) { - traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ); - vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); - prvUnlockQueue( pxQueue ); - #ifdef ESP_PLATFORM /* IDF-3755 */ - taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); - #else - if( xTaskResumeAll() == pdFALSE ) - #endif // ESP_PLATFORM - { - portYIELD_WITHIN_API(); - } + /* Interrupts and other tasks can send to and receive from the queue + * now the critical section has been exited. */ - #ifndef ESP_PLATFORM + vTaskSuspendAll(); + prvLockQueue( pxQueue ); + + /* Update the timeout state to see if it has expired yet. */ + if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) + { + /* The timeout has not expired. If the queue is still empty place + * the task on the list of tasks waiting to receive from the queue. */ + if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + { + traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ); + vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); + prvUnlockQueue( pxQueue ); + + if( xTaskResumeAll() == pdFALSE ) + { + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* The queue contains data again. Loop back to try and read the + * data. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + } + } + else + { + /* Timed out. If there is no data in the queue exit, otherwise loop + * back and attempt to read the data. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + + if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + { + traceQUEUE_RECEIVE_FAILED( pxQueue ); + return errQUEUE_EMPTY; + } else { mtCOVERAGE_TEST_MARKER(); } - #endif // ESP_PLATFORM + } } - else - { - /* The queue contains data again. Loop back to try and read the - * data. */ - prvUnlockQueue( pxQueue ); - #ifdef ESP_PLATFORM /* IDF-3755 */ - taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); - #else - ( void ) xTaskResumeAll(); - #endif // ESP_PLATFORM - } - } - else - { - /* Timed out. If there is no data in the queue exit, otherwise loop - * back and attempt to read the data. */ - prvUnlockQueue( pxQueue ); - #ifdef ESP_PLATFORM /* IDF-3755 */ - taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); - #else - ( void ) xTaskResumeAll(); - #endif // ESP_PLATFORM - - if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) - { - traceQUEUE_RECEIVE_FAILED( pxQueue ); - return errQUEUE_EMPTY; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } + #endif /* queueUSE_LOCKS == 1 */ } /*lint -restore */ } /*-----------------------------------------------------------*/ @@ -1641,125 +1721,165 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, mtCOVERAGE_TEST_MARKER(); } } + + /* If queue locks ARE NOT being used: + * - At this point, the semaphore/mutex is empty/held and entry time + * has been set. + * - We simply check for a time out, inherit priority and block if + * not timed out, or return an error if we have timed out. */ + #if ( queueUSE_LOCKS == 0 ) + { + /* Update the timeout state to see if it has expired yet. */ + if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) + { + /* Not timed out yet. If this is a mutex, make the holder + * inherit our priority, then block the current task. */ + traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ); + #if ( configUSE_MUTEXES == 1 ) + { + if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) + { + xInheritanceOccurred = xTaskPriorityInherit( pxQueue->u.xSemaphore.xMutexHolder ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* if ( configUSE_MUTEXES == 1 ) */ + vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); + portYIELD_WITHIN_API(); + } + else + { + /* We have timed out. If this is a mutex, make the holder + * disinherit our priority, then return an error. */ + #if ( configUSE_MUTEXES == 1 ) + { + if( xInheritanceOccurred != pdFALSE ) + { + UBaseType_t uxHighestWaitingPriority; + uxHighestWaitingPriority = prvGetDisinheritPriorityAfterTimeout( pxQueue ); + vTaskPriorityDisinheritAfterTimeout( pxQueue->u.xSemaphore.xMutexHolder, uxHighestWaitingPriority ); + } + } + #endif /* configUSE_MUTEXES */ + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); + traceQUEUE_RECEIVE_FAILED( pxQueue ); + return errQUEUE_EMPTY; + } + } + #endif /* queueUSE_LOCKS == 0 */ } taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); - /* Interrupts and other tasks can give to and take from the semaphore - * now the critical section has been exited. */ - - #ifdef ESP_PLATFORM /* IDF-3755 */ - taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); - #else - vTaskSuspendAll(); - #endif // ESP_PLATFORM - prvLockQueue( pxQueue ); - - /* Update the timeout state to see if it has expired yet. */ - if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) - { - /* A block time is specified and not expired. If the semaphore - * count is 0 then enter the Blocked state to wait for a semaphore to - * become available. As semaphores are implemented with queues the - * queue being empty is equivalent to the semaphore count being 0. */ - if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + /* If queue locks ARE being used: + * - At this point, the semaphore/mutex is empty/held and entry time + * has been set. + * - We follow the original procedure for locking the queue, inheriting + * priority, then attempting to block. */ + #if ( queueUSE_LOCKS == 1 ) { - traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ); + /* Interrupts and other tasks can give to and take from the semaphore + * now the critical section has been exited. */ - #if ( configUSE_MUTEXES == 1 ) + vTaskSuspendAll(); + prvLockQueue( pxQueue ); + + /* Update the timeout state to see if it has expired yet. */ + if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) + { + /* A block time is specified and not expired. If the semaphore + * count is 0 then enter the Blocked state to wait for a semaphore to + * become available. As semaphores are implemented with queues the + * queue being empty is equivalent to the semaphore count being 0. */ + if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) { - if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) - { - taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); + traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ); + + #if ( configUSE_MUTEXES == 1 ) { - xInheritanceOccurred = xTaskPriorityInherit( pxQueue->u.xSemaphore.xMutexHolder ); + if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) + { + taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); + { + xInheritanceOccurred = xTaskPriorityInherit( pxQueue->u.xSemaphore.xMutexHolder ); + } + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } - taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); + #endif /* if ( configUSE_MUTEXES == 1 ) */ + + vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); + prvUnlockQueue( pxQueue ); + + if( xTaskResumeAll() == pdFALSE ) + { + portYIELD_WITHIN_API(); } else { mtCOVERAGE_TEST_MARKER(); } } - #endif /* if ( configUSE_MUTEXES == 1 ) */ - - vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); - prvUnlockQueue( pxQueue ); - #ifdef ESP_PLATFORM /* IDF-3755 */ - taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); - #else - if( xTaskResumeAll() == pdFALSE ) - #endif // ESP_PLATFORM - { - portYIELD_WITHIN_API(); + else + { + /* There was no timeout and the semaphore count was not 0, so + * attempt to take the semaphore again. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + } } + else + { + /* Timed out. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); - #ifndef ESP_PLATFORM + /* If the semaphore count is 0 exit now as the timeout has + * expired. Otherwise return to attempt to take the semaphore that is + * known to be available. As semaphores are implemented by queues the + * queue being empty is equivalent to the semaphore count being 0. */ + if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + { + #if ( configUSE_MUTEXES == 1 ) + { + /* xInheritanceOccurred could only have be set if + * pxQueue->uxQueueType == queueQUEUE_IS_MUTEX so no need to + * test the mutex type again to check it is actually a mutex. */ + if( xInheritanceOccurred != pdFALSE ) + { + taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); + { + UBaseType_t uxHighestWaitingPriority; + + /* This task blocking on the mutex caused another + * task to inherit this task's priority. Now this task + * has timed out the priority should be disinherited + * again, but only as low as the next highest priority + * task that is waiting for the same mutex. */ + uxHighestWaitingPriority = prvGetDisinheritPriorityAfterTimeout( pxQueue ); + vTaskPriorityDisinheritAfterTimeout( pxQueue->u.xSemaphore.xMutexHolder, uxHighestWaitingPriority ); + } + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); + } + } + #endif /* configUSE_MUTEXES */ + + traceQUEUE_RECEIVE_FAILED( pxQueue ); + return errQUEUE_EMPTY; + } else { mtCOVERAGE_TEST_MARKER(); } - #endif // ESP_PLATFORM + } } - else - { - /* There was no timeout and the semaphore count was not 0, so - * attempt to take the semaphore again. */ - prvUnlockQueue( pxQueue ); - #ifdef ESP_PLATFORM /* IDF-3755 */ - taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); - #else - ( void ) xTaskResumeAll(); - #endif // ESP_PLATFORM - } - } - else - { - /* Timed out. */ - prvUnlockQueue( pxQueue ); - #ifdef ESP_PLATFORM /* IDF-3755 */ - taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); - #else - ( void ) xTaskResumeAll(); - #endif // ESP_PLATFORM - - /* If the semaphore count is 0 exit now as the timeout has - * expired. Otherwise return to attempt to take the semaphore that is - * known to be available. As semaphores are implemented by queues the - * queue being empty is equivalent to the semaphore count being 0. */ - if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) - { - #if ( configUSE_MUTEXES == 1 ) - { - /* xInheritanceOccurred could only have be set if - * pxQueue->uxQueueType == queueQUEUE_IS_MUTEX so no need to - * test the mutex type again to check it is actually a mutex. */ - if( xInheritanceOccurred != pdFALSE ) - { - taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); - { - UBaseType_t uxHighestWaitingPriority; - - /* This task blocking on the mutex caused another - * task to inherit this task's priority. Now this task - * has timed out the priority should be disinherited - * again, but only as low as the next highest priority - * task that is waiting for the same mutex. */ - uxHighestWaitingPriority = prvGetDisinheritPriorityAfterTimeout( pxQueue ); - vTaskPriorityDisinheritAfterTimeout( pxQueue->u.xSemaphore.xMutexHolder, uxHighestWaitingPriority ); - } - taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); - } - } - #endif /* configUSE_MUTEXES */ - - traceQUEUE_RECEIVE_FAILED( pxQueue ); - return errQUEUE_EMPTY; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } + #endif /* queueUSE_LOCKS == 1 */ } /*lint -restore */ } /*-----------------------------------------------------------*/ @@ -1857,78 +1977,92 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue, mtCOVERAGE_TEST_MARKER(); } } + + /* If queue locks ARE NOT being used: + * - At this point, the queue is empty and entry time has been set + * - We simply check for a time out, block if not timed out, or + * return an error if we have timed out. */ + #if ( queueUSE_LOCKS == 0 ) + { + /* Update the timeout state to see if it has expired yet. */ + if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) + { + /* Not timed out yet. Block the current task. */ + traceBLOCKING_ON_QUEUE_PEEK( pxQueue ); + vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); + portYIELD_WITHIN_API(); + } + else + { + /* We have timed out. Return an error. */ + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); + traceQUEUE_PEEK_FAILED( pxQueue ); + return errQUEUE_EMPTY; + } + } + #endif /* queueUSE_LOCKS == 0 */ } taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); - /* Interrupts and other tasks can send to and receive from the queue - * now the critical section has been exited. */ - - #ifdef ESP_PLATFORM /* IDF-3755 */ - taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); - #else - vTaskSuspendAll(); - #endif // ESP_PLATFORM - prvLockQueue( pxQueue ); - - /* Update the timeout state to see if it has expired yet. */ - if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) - { - /* Timeout has not expired yet, check to see if there is data in the - * queue now, and if not enter the Blocked state to wait for data. */ - if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + /* If queue locks ARE being used: + * - At this point, the queue is empty and entry time has been set + * - We follow the original procedure for locking the queue before + * attempting to block. */ + #if ( queueUSE_LOCKS == 1 ) { - traceBLOCKING_ON_QUEUE_PEEK( pxQueue ); - vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); - prvUnlockQueue( pxQueue ); - #ifdef ESP_PLATFORM /* IDF-3755 */ - taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); - #else - if( xTaskResumeAll() == pdFALSE ) - #endif // ESP_PLATFORM - { - portYIELD_WITHIN_API(); - } + /* Interrupts and other tasks can send to and receive from the queue + * now the critical section has been exited. */ - #ifndef ESP_PLATFORM + vTaskSuspendAll(); + prvLockQueue( pxQueue ); + + /* Update the timeout state to see if it has expired yet. */ + if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) + { + /* Timeout has not expired yet, check to see if there is data in the + * queue now, and if not enter the Blocked state to wait for data. */ + if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + { + traceBLOCKING_ON_QUEUE_PEEK( pxQueue ); + vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); + prvUnlockQueue( pxQueue ); + + if( xTaskResumeAll() == pdFALSE ) + { + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* There is data in the queue now, so don't enter the blocked + * state, instead return to try and obtain the data. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + } + } + else + { + /* The timeout has expired. If there is still no data in the queue + * exit, otherwise go back and try to read the data again. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + + if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + { + traceQUEUE_PEEK_FAILED( pxQueue ); + return errQUEUE_EMPTY; + } else { mtCOVERAGE_TEST_MARKER(); } - #endif // ESP_PLATFORM + } } - else - { - /* There is data in the queue now, so don't enter the blocked - * state, instead return to try and obtain the data. */ - prvUnlockQueue( pxQueue ); - #ifdef ESP_PLATFORM /* IDF-3755 */ - taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); - #else - ( void ) xTaskResumeAll(); - #endif // ESP_PLATFORM - } - } - else - { - /* The timeout has expired. If there is still no data in the queue - * exit, otherwise go back and try to read the data again. */ - prvUnlockQueue( pxQueue ); - #ifdef ESP_PLATFORM /* IDF-3755 */ - taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); - #else - ( void ) xTaskResumeAll(); - #endif // ESP_PLATFORM - - if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) - { - traceQUEUE_PEEK_FAILED( pxQueue ); - return errQUEUE_EMPTY; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } + #endif /* queueUSE_LOCKS == 1 */ } /*lint -restore */ } /*-----------------------------------------------------------*/ @@ -1960,16 +2094,24 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); - { + #if ( configNUM_CORES > 1 ) taskENTER_CRITICAL_ISR( &( pxQueue->xQueueLock ) ); - + ( void ) uxSavedInterruptStatus; + #else + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + #endif + { const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; /* Cannot block in an ISR, so check there is data available. */ if( uxMessagesWaiting > ( UBaseType_t ) 0 ) { - const int8_t cRxLock = pxQueue->cRxLock; + #if ( queueUSE_LOCKS == 1 ) + const int8_t cRxLock = pxQueue->cRxLock; + #else + /* Queue locks not used, so we treat it as unlocked. */ + const int8_t cRxLock = queueUNLOCKED; + #endif /* queueUSE_LOCKS == 1 */ traceQUEUE_RECEIVE_FROM_ISR( pxQueue ); @@ -2009,11 +2151,15 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, } else { - /* Increment the lock count so the task that unlocks the queue - * knows that data was removed while it was locked. */ - configASSERT( cRxLock != queueINT8_MAX ); + #if ( queueUSE_LOCKS == 1 ) + { + /* Increment the lock count so the task that unlocks the queue + * knows that data was removed while it was locked. */ + configASSERT( cRxLock != queueINT8_MAX ); - pxQueue->cRxLock = ( int8_t ) ( cRxLock + 1 ); + pxQueue->cRxLock = ( int8_t ) ( cRxLock + 1 ); + } + #endif /* queueUSE_LOCKS == 1 */ } xReturn = pdPASS; @@ -2023,10 +2169,13 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, xReturn = pdFAIL; traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue ); } - - taskEXIT_CRITICAL_ISR( &( pxQueue->xQueueLock ) ); } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + #if ( configNUM_CORES > 1 ) + taskEXIT_CRITICAL_ISR( &( pxQueue->xQueueLock ) ); + #else + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + #endif + return xReturn; } @@ -2060,8 +2209,12 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); - taskENTER_CRITICAL_ISR( &( pxQueue->xQueueLock ) ); + #if ( configNUM_CORES > 1 ) + taskENTER_CRITICAL_ISR( &( pxQueue->xQueueLock ) ); + ( void ) uxSavedInterruptStatus; + #else + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + #endif { /* Cannot block in an ISR, so check there is data available. */ if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 ) @@ -2082,8 +2235,12 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue ); } } - taskEXIT_CRITICAL_ISR( &( pxQueue->xQueueLock ) ); - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + #if ( configNUM_CORES > 1 ) + taskEXIT_CRITICAL_ISR( &( pxQueue->xQueueLock ) ); + #else + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + #endif + return xReturn; } @@ -2338,50 +2495,74 @@ static void prvCopyDataFromQueue( Queue_t * const pxQueue, } /*-----------------------------------------------------------*/ -static void prvUnlockQueue( Queue_t * const pxQueue ) -{ - /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */ - - /* The lock counts contains the number of extra data items placed or - * removed from the queue while the queue was locked. When a queue is - * locked items can be added or removed, but the event lists cannot be - * updated. */ - taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); +#if ( queueUSE_LOCKS == 1 ) + static void prvUnlockQueue( Queue_t * const pxQueue ) { - int8_t cTxLock = pxQueue->cTxLock; + /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */ - /* See if data was added to the queue while it was locked. */ - while( cTxLock > queueLOCKED_UNMODIFIED ) + /* The lock counts contains the number of extra data items placed or + * removed from the queue while the queue was locked. When a queue is + * locked items can be added or removed, but the event lists cannot be + * updated. */ + taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); { - /* Data was posted while the queue was locked. Are any tasks - * blocked waiting for data to become available? */ - #if ( configUSE_QUEUE_SETS == 1 ) - { - if( pxQueue->pxQueueSetContainer != NULL ) + int8_t cTxLock = pxQueue->cTxLock; + + /* See if data was added to the queue while it was locked. */ + while( cTxLock > queueLOCKED_UNMODIFIED ) + { + /* Data was posted while the queue was locked. Are any tasks + * blocked waiting for data to become available? */ + #if ( configUSE_QUEUE_SETS == 1 ) { - if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE ) + if( pxQueue->pxQueueSetContainer != NULL ) { - /* The queue is a member of a queue set, and posting to - * the queue set caused a higher priority task to unblock. - * A context switch is required. */ - vTaskMissedYield(); + if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE ) + { + /* The queue is a member of a queue set, and posting to + * the queue set caused a higher priority task to unblock. + * A context switch is required. */ + vTaskMissedYield(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } else { - mtCOVERAGE_TEST_MARKER(); + /* Tasks that are removed from the event list will get + * added to the pending ready list as the scheduler is still + * suspended. */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority so record that a + * context switch is required. */ + vTaskMissedYield(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + break; + } } } - else + #else /* configUSE_QUEUE_SETS */ { - /* Tasks that are removed from the event list will get - * added to the pending ready list as the scheduler is still - * suspended. */ + /* Tasks that are removed from the event list will get added to + * the pending ready list as the scheduler is still suspended. */ if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) { if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) { - /* The task waiting has a higher priority so record that a - * context switch is required. */ + /* The task waiting has a higher priority so record that + * a context switch is required. */ vTaskMissedYield(); } else @@ -2394,89 +2575,70 @@ static void prvUnlockQueue( Queue_t * const pxQueue ) break; } } - } - #else /* configUSE_QUEUE_SETS */ + #endif /* configUSE_QUEUE_SETS */ + + --cTxLock; + } + + pxQueue->cTxLock = queueUNLOCKED; + } + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); + + /* Do the same for the Rx lock. */ + taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); + { + int8_t cRxLock = pxQueue->cRxLock; + + while( cRxLock > queueLOCKED_UNMODIFIED ) + { + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) { - /* Tasks that are removed from the event list will get added to - * the pending ready list as the scheduler is still suspended. */ - if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) { - if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) - { - /* The task waiting has a higher priority so record that - * a context switch is required. */ - vTaskMissedYield(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + vTaskMissedYield(); } else { - break; + mtCOVERAGE_TEST_MARKER(); } - } - #endif /* configUSE_QUEUE_SETS */ - --cTxLock; - } - - pxQueue->cTxLock = queueUNLOCKED; - } - taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); - - /* Do the same for the Rx lock. */ - taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); - { - int8_t cRxLock = pxQueue->cRxLock; - - while( cRxLock > queueLOCKED_UNMODIFIED ) - { - if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) - { - if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) - { - vTaskMissedYield(); + --cRxLock; } else { - mtCOVERAGE_TEST_MARKER(); + break; } + } - --cRxLock; + pxQueue->cRxLock = queueUNLOCKED; + } + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); + } +#endif /* queueUSE_LOCKS == 1 */ + +/*-----------------------------------------------------------*/ + +#if ( queueUSE_LOCKS == 1 ) + static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue ) + { + BaseType_t xReturn; + + taskENTER_CRITICAL( &( ( ( Queue_t * ) pxQueue )->xQueueLock ) ); + { + if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 ) + { + xReturn = pdTRUE; } else { - break; + xReturn = pdFALSE; } } + taskEXIT_CRITICAL( &( ( ( Queue_t * ) pxQueue )->xQueueLock ) ); - pxQueue->cRxLock = queueUNLOCKED; + return xReturn; } - taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); -} -/*-----------------------------------------------------------*/ - -static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue ) -{ - BaseType_t xReturn; - - taskENTER_CRITICAL( &( ( ( Queue_t * ) pxQueue )->xQueueLock ) ); - { - if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 ) - { - xReturn = pdTRUE; - } - else - { - xReturn = pdFALSE; - } - } - taskEXIT_CRITICAL( &( ( ( Queue_t * ) pxQueue )->xQueueLock ) ); - - return xReturn; -} +#endif /* queueUSE_LOCKS == 1 */ /*-----------------------------------------------------------*/ BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue ) @@ -2499,29 +2661,27 @@ BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue ) } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */ /*-----------------------------------------------------------*/ -static BaseType_t prvIsQueueFull( const Queue_t * pxQueue ) -{ - BaseType_t xReturn; - - #ifndef ESP_PLATFORM - taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); - #endif +#if ( queueUSE_LOCKS == 1 ) + static BaseType_t prvIsQueueFull( const Queue_t * pxQueue ) { - if( pxQueue->uxMessagesWaiting == pxQueue->uxLength ) - { - xReturn = pdTRUE; - } - else - { - xReturn = pdFALSE; - } - } - #ifndef ESP_PLATFORM - taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); - #endif + BaseType_t xReturn; - return xReturn; -} + taskENTER_CRITICAL( &( ( ( Queue_t * ) pxQueue )->xQueueLock ) ); + { + if( pxQueue->uxMessagesWaiting == pxQueue->uxLength ) + { + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + } + taskEXIT_CRITICAL( &( ( ( Queue_t * ) pxQueue )->xQueueLock ) ); + + return xReturn; + } +#endif /* queueUSE_LOCKS == 1 */ /*-----------------------------------------------------------*/ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) @@ -2949,13 +3109,21 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) * so it should be called with the scheduler locked and not from a critical * section. */ - /* Only do anything if there are no messages in the queue. This function - * will not actually cause the task to block, just place it on a blocked - * list. It will not block until the scheduler is unlocked - at which - * time a yield will be performed. If an item is added to the queue while - * the queue is locked, and the calling task blocks on the queue, then the - * calling task will be immediately unblocked when the queue is unlocked. */ - prvLockQueue( pxQueue ); + #if ( queueUSE_LOCKS == 1 ) + + /* Only do anything if there are no messages in the queue. This function + * will not actually cause the task to block, just place it on a blocked + * list. It will not block until the scheduler is unlocked - at which + * time a yield will be performed. If an item is added to the queue while + * the queue is locked, and the calling task blocks on the queue, then the + * calling task will be immediately unblocked when the queue is unlocked. */ + prvLockQueue( pxQueue ); + #else + + /* If queue locks are not used, we use a critical section instead + * to thread safety. */ + taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); + #endif /* queueUSE_LOCKS == 1 */ if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U ) { @@ -2967,7 +3135,11 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) mtCOVERAGE_TEST_MARKER(); } - prvUnlockQueue( pxQueue ); + #if ( queueUSE_LOCKS == 1 ) + prvUnlockQueue( pxQueue ); + #else + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); + #endif /* queueUSE_LOCKS == 1 */ } #endif /* configUSE_TIMERS */ @@ -3095,15 +3267,19 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) /* This function must be called form a critical section. */ configASSERT( pxQueueSetContainer ); - - /* Acquire the Queue set's spinlock */ - taskENTER_CRITICAL( &( pxQueueSetContainer->xQueueLock ) ); - configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength ); + /* We need to also acquire the queue set's spinlock as well. */ + taskENTER_CRITICAL( &( pxQueueSetContainer->xQueueLock ) ); + if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength ) { - const int8_t cTxLock = pxQueueSetContainer->cTxLock; + #if ( queueUSE_LOCKS == 1 ) + const int8_t cTxLock = pxQueueSetContainer->cTxLock; + #else + /* Queue locks not used, so we treat it as unlocked. */ + const int8_t cTxLock = queueUNLOCKED; + #endif /* queueUSE_LOCKS == 1 */ traceQUEUE_SET_SEND( pxQueueSetContainer ); @@ -3131,9 +3307,13 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) } else { - configASSERT( cTxLock != queueINT8_MAX ); + #if ( queueUSE_LOCKS == 1 ) + { + configASSERT( cTxLock != queueINT8_MAX ); - pxQueueSetContainer->cTxLock = ( int8_t ) ( cTxLock + 1 ); + pxQueueSetContainer->cTxLock = ( int8_t ) ( cTxLock + 1 ); + } + #endif /* queueUSE_LOCKS == 1 */ } } else @@ -3141,7 +3321,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) mtCOVERAGE_TEST_MARKER(); } - /* Release the Queue set's spinlock */ + /* Release the previously acquired queue set's spinlock. */ taskEXIT_CRITICAL( &( pxQueueSetContainer->xQueueLock ) ); return xReturn; diff --git a/components/freertos/linker.lf b/components/freertos/linker.lf index 1c27b25384..c8a2e6b66e 100644 --- a/components/freertos/linker.lf +++ b/components/freertos/linker.lf @@ -97,7 +97,10 @@ entries: event_groups: xEventGroupSetBits (default) event_groups: xEventGroupSync (default) event_groups: vEventGroupDelete (default) - queue: prvIsQueueFull (default) + if FREERTOS_UNICORE = y: + queue: prvUnlockQueue (default) + queue: prvIsQueueEmpty (default) + queue: prvIsQueueFull (default) queue: prvInitialiseNewQueue (default) queue: prvInitialiseMutex (default) queue: uxQueueSpacesAvailable (default)