From 49af70506a969ae9cddc30341a33e7e521a59762 Mon Sep 17 00:00:00 2001 From: Darian Leung Date: Fri, 8 Sep 2023 00:29:57 +0800 Subject: [PATCH] feat(freertos): Add SMP critical section changes to FreeRTOS v10.5.1 This commit adds the SMP critical section changes to the v10.5.1 kernel. These changes are temporarily documented in the `idf_changes.md` document. This commit... - Adds granular spinlocks to each data group (e.g., kernel, queues, event_groups etc.) - Updates critical section macros to use those spinlocks - Add missing critical sections required in SMP --- .../FreeRTOS-Kernel-V10.5.1/event_groups.c | 60 +- .../FreeRTOS-Kernel-V10.5.1/idf_changes.md | 23 +- .../include/freertos/FreeRTOS.h | 3 + .../include/freertos/task.h | 6 +- .../freertos/FreeRTOS-Kernel-V10.5.1/queue.c | 1226 ++++++++----- .../FreeRTOS-Kernel-V10.5.1/stream_buffer.c | 88 +- .../freertos/FreeRTOS-Kernel-V10.5.1/tasks.c | 1601 ++++++++++------- .../freertos/FreeRTOS-Kernel-V10.5.1/timers.c | 38 +- .../freertos_tasks_c_additions.h | 12 +- 9 files changed, 1857 insertions(+), 1200 deletions(-) diff --git a/components/freertos/FreeRTOS-Kernel-V10.5.1/event_groups.c b/components/freertos/FreeRTOS-Kernel-V10.5.1/event_groups.c index 6e4e23ea66..4ad031aefd 100644 --- a/components/freertos/FreeRTOS-Kernel-V10.5.1/event_groups.c +++ b/components/freertos/FreeRTOS-Kernel-V10.5.1/event_groups.c @@ -43,6 +43,8 @@ #include "task.h" #include "timers.h" #include "event_groups.h" +/* Include private IDF API additions for critical thread safety macros */ +#include "esp_private/freertos_idf_additions_priv.h" /* Lint e961, e750 and e9021 are suppressed as a MISRA exception justified * because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined @@ -77,6 +79,8 @@ typedef struct EventGroupDef_t #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the event group is statically allocated to ensure no attempt is made to free the memory. */ #endif + + portMUX_TYPE xEventGroupLock; /* Spinlock required for SMP critical sections */ } EventGroup_t; /*-----------------------------------------------------------*/ @@ -131,6 +135,9 @@ static BaseType_t prvTestWaitCondition( const EventBits_t uxCurrentEventBits, } #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ + /* Initialize the event group's spinlock. */ + portMUX_INITIALIZE( &pxEventBits->xEventGroupLock ); + traceEVENT_GROUP_CREATE( pxEventBits ); } else @@ -182,6 +189,9 @@ static BaseType_t prvTestWaitCondition( const EventBits_t uxCurrentEventBits, } #endif /* configSUPPORT_STATIC_ALLOCATION */ + /* Initialize the event group's spinlock. */ + portMUX_INITIALIZE( &pxEventBits->xEventGroupLock ); + traceEVENT_GROUP_CREATE( pxEventBits ); } else @@ -213,7 +223,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup, } #endif - vTaskSuspendAll(); + prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxEventBits->xEventGroupLock ) ); { uxOriginalBitValue = pxEventBits->uxEventBits; @@ -256,7 +266,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup, } } } - xAlreadyYielded = xTaskResumeAll(); + xAlreadyYielded = prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxEventBits->xEventGroupLock ) ); if( xTicksToWait != ( TickType_t ) 0 ) { @@ -278,7 +288,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup, if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 ) { /* The task timed out, just return the current event bit value. */ - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( pxEventBits->xEventGroupLock ) ); { uxReturn = pxEventBits->uxEventBits; @@ -295,7 +305,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup, mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) ); xTimeoutOccurred = pdTRUE; } @@ -340,7 +350,7 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup, } #endif - vTaskSuspendAll(); + prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxEventBits->xEventGroupLock ) ); { const EventBits_t uxCurrentEventBits = pxEventBits->uxEventBits; @@ -408,7 +418,7 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup, traceEVENT_GROUP_WAIT_BITS_BLOCK( xEventGroup, uxBitsToWaitFor ); } } - xAlreadyYielded = xTaskResumeAll(); + xAlreadyYielded = prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxEventBits->xEventGroupLock ) ); if( xTicksToWait != ( TickType_t ) 0 ) { @@ -429,7 +439,7 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup, if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 ) { - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( pxEventBits->xEventGroupLock ) ); { /* The task timed out, just return the current event bit value. */ uxReturn = pxEventBits->uxEventBits; @@ -454,7 +464,7 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup, xTimeoutOccurred = pdTRUE; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) ); } else { @@ -485,7 +495,7 @@ EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup, configASSERT( xEventGroup ); configASSERT( ( uxBitsToClear & eventEVENT_BITS_CONTROL_BYTES ) == 0 ); - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( pxEventBits->xEventGroupLock ) ); { traceEVENT_GROUP_CLEAR_BITS( xEventGroup, uxBitsToClear ); @@ -496,7 +506,7 @@ EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup, /* Clear the bits. */ pxEventBits->uxEventBits &= ~uxBitsToClear; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) ); return uxReturn; } @@ -552,7 +562,14 @@ EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup, pxList = &( pxEventBits->xTasksWaitingForBits ); pxListEnd = listGET_END_MARKER( pxList ); /*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM. This is checked and valid. */ - vTaskSuspendAll(); + + prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxEventBits->xEventGroupLock ) ); + #if ( configNUMBER_OF_CORES > 1 ) + + /* We are about to traverse a task list which is a kernel data structure. + * Thus we need to call prvTakeKernelLock() to take the kernel lock. */ + prvTakeKernelLock(); + #endif /* configNUMBER_OF_CORES > 1 */ { traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet ); @@ -624,7 +641,12 @@ EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup, * bit was set in the control word. */ pxEventBits->uxEventBits &= ~uxBitsToClear; } - ( void ) xTaskResumeAll(); + #if ( configNUMBER_OF_CORES > 1 ) + /* Release the previously taken kernel lock. */ + prvReleaseKernelLock(); + #endif /* configNUMBER_OF_CORES > 1 */ + ( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxEventBits->xEventGroupLock ) ); + return pxEventBits->uxEventBits; } @@ -639,7 +661,13 @@ void vEventGroupDelete( EventGroupHandle_t xEventGroup ) pxTasksWaitingForBits = &( pxEventBits->xTasksWaitingForBits ); - vTaskSuspendAll(); + prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxEventBits->xEventGroupLock ) ); + #if ( configNUMBER_OF_CORES > 1 ) + + /* We are about to traverse a task list which is a kernel data structure. + * Thus we need to call prvTakeKernelLock() to take the kernel lock. */ + prvTakeKernelLock(); + #endif /* configNUMBER_OF_CORES > 1 */ { traceEVENT_GROUP_DELETE( xEventGroup ); @@ -651,7 +679,11 @@ void vEventGroupDelete( EventGroupHandle_t xEventGroup ) vTaskRemoveFromUnorderedEventList( pxTasksWaitingForBits->xListEnd.pxNext, eventUNBLOCKED_DUE_TO_BIT_SET ); } } - ( void ) xTaskResumeAll(); + #if ( configNUMBER_OF_CORES > 1 ) + /* Release the previously taken kernel lock. */ + prvReleaseKernelLock(); + #endif /* configNUMBER_OF_CORES > 1 */ + prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxEventBits->xEventGroupLock ) ); #if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) ) { diff --git a/components/freertos/FreeRTOS-Kernel-V10.5.1/idf_changes.md b/components/freertos/FreeRTOS-Kernel-V10.5.1/idf_changes.md index eb4094b442..cbaf5d6e00 100644 --- a/components/freertos/FreeRTOS-Kernel-V10.5.1/idf_changes.md +++ b/components/freertos/FreeRTOS-Kernel-V10.5.1/idf_changes.md @@ -74,8 +74,6 @@ The following APIs have been added to support SMP ### API Modifications -#### SMP Modifications - Added the following macros that abstract away single-core and SMP differences: - `taskYIELD_CORE()` triggers a particular core to yield @@ -122,9 +120,26 @@ The following functions were modified to accommodate SMP behavior: - `prvAddCurrentTaskToDelayedList()` - Added extra check to see if current blocking task has already been deleted by the other core. -#### Single-Core Modifications +### Critical Section Changes +- Granular Locks: The following objects are now given their own spinlocks + - Kernel objects (i.e., `tasks.c`): `xKernelLock` + - Each queue: `xQueueLock` + - Queue Registry: `xQueueRegistryLock` + - Each event group: `xEventGroupLock` + - Each stream buffer: `xStreamBufferLock` + - All timers: `xTimerLock` +- Critical sections now target the appropriate spinlocks +- Added missing critical sections for SMP (see `..._SMP_ONLY()` critical section calls) +- Queues no longer use queue locks (see `queueUSE_LOCKS`) + - Queues now just use critical sections and skips queue locking + - Queue functions can now execute within a single critical section block ## Single Core Differences -List of differences between Vanilla FreeRTOS V10.5.1 and building the dual-core SMP kernel with `congigNUMBER_OF_CORES == 1`. \ No newline at end of file +List of differences between Vanilla FreeRTOS V10.5.1 and building the dual-core SMP kernel with `congigNUMBER_OF_CORES == 1`. + +- `prvAddNewTaskToReadyList()` + - Extended critical section so that SMP can check for yields while still inside critical section +- `vTaskStepTick()` + - Extended critical section so that SMP can access `xTickCount` while still inside critical section diff --git a/components/freertos/FreeRTOS-Kernel-V10.5.1/include/freertos/FreeRTOS.h b/components/freertos/FreeRTOS-Kernel-V10.5.1/include/freertos/FreeRTOS.h index adf94251d8..ea168b1e9f 100644 --- a/components/freertos/FreeRTOS-Kernel-V10.5.1/include/freertos/FreeRTOS.h +++ b/components/freertos/FreeRTOS-Kernel-V10.5.1/include/freertos/FreeRTOS.h @@ -1371,6 +1371,7 @@ typedef struct xSTATIC_QUEUE UBaseType_t uxDummy8; uint8_t ucDummy9; #endif + portMUX_TYPE xDummyQueueLock; } StaticQueue_t; typedef StaticQueue_t StaticSemaphore_t; @@ -1400,6 +1401,7 @@ typedef struct xSTATIC_EVENT_GROUP #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) uint8_t ucDummy4; #endif + portMUX_TYPE xDummyEventGroupLock; } StaticEventGroup_t; /* @@ -1454,6 +1456,7 @@ typedef struct xSTATIC_STREAM_BUFFER #if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) void * pvDummy5[ 2 ]; #endif + portMUX_TYPE xDummyStreamBufferLock; } StaticStreamBuffer_t; /* Message buffers are built on stream buffers. */ diff --git a/components/freertos/FreeRTOS-Kernel-V10.5.1/include/freertos/task.h b/components/freertos/FreeRTOS-Kernel-V10.5.1/include/freertos/task.h index 624285f368..a3427087af 100644 --- a/components/freertos/FreeRTOS-Kernel-V10.5.1/include/freertos/task.h +++ b/components/freertos/FreeRTOS-Kernel-V10.5.1/include/freertos/task.h @@ -220,8 +220,9 @@ typedef enum * \defgroup taskENTER_CRITICAL taskENTER_CRITICAL * \ingroup SchedulerControl */ -#define taskENTER_CRITICAL() portENTER_CRITICAL() +#define taskENTER_CRITICAL( x ) portENTER_CRITICAL( x ) #define taskENTER_CRITICAL_FROM_ISR() portSET_INTERRUPT_MASK_FROM_ISR() +#define taskENTER_CRITICAL_ISR( x ) portENTER_CRITICAL_ISR( x ) /** * task. h @@ -235,8 +236,9 @@ typedef enum * \defgroup taskEXIT_CRITICAL taskEXIT_CRITICAL * \ingroup SchedulerControl */ -#define taskEXIT_CRITICAL() portEXIT_CRITICAL() +#define taskEXIT_CRITICAL( x ) portEXIT_CRITICAL( x ) #define taskEXIT_CRITICAL_FROM_ISR( x ) portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) +#define taskEXIT_CRITICAL_ISR( x ) portEXIT_CRITICAL_ISR( x ) /** * task. h diff --git a/components/freertos/FreeRTOS-Kernel-V10.5.1/queue.c b/components/freertos/FreeRTOS-Kernel-V10.5.1/queue.c index fa36ad70e6..faa22ca5a5 100644 --- a/components/freertos/FreeRTOS-Kernel-V10.5.1/queue.c +++ b/components/freertos/FreeRTOS-Kernel-V10.5.1/queue.c @@ -41,6 +41,8 @@ #include "FreeRTOS.h" #include "task.h" #include "queue.h" +/* Include private IDF API additions for critical thread safety macros */ +#include "esp_private/freertos_idf_additions_priv.h" #if ( configUSE_CO_ROUTINES == 1 ) #include "croutine.h" @@ -52,11 +54,71 @@ * correct privileged Vs unprivileged linkage and placement. */ #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */ +/* Some code sections require extra critical sections when building for SMP + * ( configNUMBER_OF_CORES > 1 ). */ +#if ( configNUMBER_OF_CORES > 1 ) +/* Macros that Enter/exit a critical section only when building for SMP */ + #define taskENTER_CRITICAL_SMP_ONLY( pxLock ) taskENTER_CRITICAL( pxLock ) + #define taskEXIT_CRITICAL_SMP_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock ) + #define taskENTER_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskEnterCriticalSafeSMPOnly( pxLock ) + #define taskEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskExitCriticalSafeSMPOnly( pxLock ) -/* Constants used with the cRxLock and cTxLock structure members. */ -#define queueUNLOCKED ( ( int8_t ) -1 ) -#define queueLOCKED_UNMODIFIED ( ( int8_t ) 0 ) -#define queueINT8_MAX ( ( int8_t ) 127 ) + static inline __attribute__( ( always_inline ) ) + void prvTaskEnterCriticalSafeSMPOnly( portMUX_TYPE * pxLock ) + { + if( portCHECK_IF_IN_ISR() == pdFALSE ) + { + taskENTER_CRITICAL( pxLock ); + } + else + { + #ifdef __clang_analyzer__ + /* Teach clang-tidy that ISR version macro can be different */ + configASSERT( 1 ); + #endif + taskENTER_CRITICAL_ISR( pxLock ); + } + } + + static inline __attribute__( ( always_inline ) ) + void prvTaskExitCriticalSafeSMPOnly( portMUX_TYPE * pxLock ) + { + if( portCHECK_IF_IN_ISR() == pdFALSE ) + { + taskEXIT_CRITICAL( pxLock ); + } + else + { + #ifdef __clang_analyzer__ + /* Teach clang-tidy that ISR version macro can be different */ + configASSERT( 1 ); + #endif + taskEXIT_CRITICAL_ISR( pxLock ); + } + } +#else /* configNUMBER_OF_CORES > 1 */ + /* Macros that Enter/exit a critical section only when building for SMP */ + #define taskENTER_CRITICAL_SMP_ONLY( pxLock ) + #define taskEXIT_CRITICAL_SMP_ONLY( pxLock ) + #define taskENTER_CRITICAL_SAFE_SMP_ONLY( pxLock ) + #define taskEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock ) +#endif /* configNUMBER_OF_CORES > 1 */ + +/* Single core FreeRTOS uses queue locks to ensure that vTaskPlaceOnEventList() + * calls are deterministic (as queue locks use scheduler suspension instead of + * critical sections). However, the SMP implementation is non-deterministic + * anyways, thus SMP can forego the use of queue locks (replaced with a critical + * sections) in exchange for better queue performance. */ +#if ( configNUMBER_OF_CORES > 1 ) + #define queueUSE_LOCKS 0 + #define queueUNLOCKED ( ( int8_t ) 0 ) +#else /* configNUMBER_OF_CORES > 1 */ + #define queueUSE_LOCKS 1 + /* Constants used with the cRxLock and cTxLock structure members. */ + #define queueUNLOCKED ( ( int8_t ) -1 ) + #define queueLOCKED_UNMODIFIED ( ( int8_t ) 0 ) + #define queueINT8_MAX ( ( int8_t ) 127 ) +#endif /* configNUMBER_OF_CORES > 1 */ /* When the Queue_t structure is used to represent a base queue its pcHead and * pcTail members are used as pointers into the queue storage area. When the @@ -67,8 +129,8 @@ * is maintained. The QueuePointers_t and SemaphoreData_t types are used to form * a union as their usage is mutually exclusive dependent on what the queue is * being used for. */ -#define uxQueueType pcHead -#define queueQUEUE_IS_MUTEX NULL +#define uxQueueType pcHead +#define queueQUEUE_IS_MUTEX NULL typedef struct QueuePointers { @@ -119,8 +181,10 @@ typedef struct QueueDefinition /* The old naming convention is used to prevent b UBaseType_t uxLength; /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */ UBaseType_t uxItemSize; /*< The size of each items that the queue will hold. */ - volatile int8_t cRxLock; /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */ - volatile int8_t cTxLock; /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */ + #if ( queueUSE_LOCKS == 1 ) + volatile int8_t cRxLock; /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */ + volatile int8_t cTxLock; /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */ + #endif /* queueUSE_LOCKS == 1 */ #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the memory used by the queue was statically allocated to ensure no attempt is made to free the memory. */ @@ -134,6 +198,8 @@ typedef struct QueueDefinition /* The old naming convention is used to prevent b UBaseType_t uxQueueNumber; uint8_t ucQueueType; #endif + + portMUX_TYPE xQueueLock; /* Spinlock required for SMP critical sections */ } xQUEUE; /* The old xQUEUE name is maintained above then typedefed to the new Queue_t @@ -167,8 +233,15 @@ typedef xQUEUE Queue_t; * array position being vacant. */ PRIVILEGED_DATA QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ]; + #if ( configNUMBER_OF_CORES > 1 ) +/* Spinlock required in SMP when accessing the queue registry */ + static portMUX_TYPE xQueueRegistryLock = portMUX_INITIALIZER_UNLOCKED; + #endif /* configNUMBER_OF_CORES > 1 */ + #endif /* configQUEUE_REGISTRY_SIZE */ +#if ( queueUSE_LOCKS == 1 ) + /* * Unlocks a queue locked by a call to prvLockQueue. Locking a queue does not * prevent an ISR from adding or removing items to the queue, but does prevent @@ -177,21 +250,22 @@ typedef xQUEUE Queue_t; * to indicate that a task may require unblocking. When the queue in unlocked * these lock counts are inspected, and the appropriate action taken. */ -static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION; + static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION; /* * Uses a critical section to determine if there is any data in a queue. * * @return pdTRUE if the queue contains no items, otherwise pdFALSE. */ -static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION; + static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION; /* * Uses a critical section to determine if there is any space in a queue. * * @return pdTRUE if there is no space, otherwise pdFALSE; */ -static BaseType_t prvIsQueueFull( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION; + static BaseType_t prvIsQueueFull( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION; +#endif /* queueUSE_LOCKS == 1 */ /* * Copies an item into the queue, either at the front of the queue or the @@ -248,12 +322,14 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, #endif /*-----------------------------------------------------------*/ +#if ( queueUSE_LOCKS == 1 ) + /* * Macro to mark a queue as locked. Locking a queue prevents an ISR from * accessing the queue event lists. */ -#define prvLockQueue( pxQueue ) \ - taskENTER_CRITICAL(); \ + #define prvLockQueue( pxQueue ) \ + taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); \ { \ if( ( pxQueue )->cRxLock == queueUNLOCKED ) \ { \ @@ -264,14 +340,14 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, ( pxQueue )->cTxLock = queueLOCKED_UNMODIFIED; \ } \ } \ - taskEXIT_CRITICAL() + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ) /* * Macro to increment cTxLock member of the queue data structure. It is * capped at the number of tasks in the system as we cannot unblock more * tasks than the number of tasks in the system. */ -#define prvIncrementQueueTxLock( pxQueue, cTxLock ) \ + #define prvIncrementQueueTxLock( pxQueue, cTxLock ) \ { \ const UBaseType_t uxNumberOfTasks = uxTaskGetNumberOfTasks(); \ if( ( UBaseType_t ) ( cTxLock ) < uxNumberOfTasks ) \ @@ -286,7 +362,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, * capped at the number of tasks in the system as we cannot unblock more * tasks than the number of tasks in the system. */ -#define prvIncrementQueueRxLock( pxQueue, cRxLock ) \ + #define prvIncrementQueueRxLock( pxQueue, cRxLock ) \ { \ const UBaseType_t uxNumberOfTasks = uxTaskGetNumberOfTasks(); \ if( ( UBaseType_t ) ( cRxLock ) < uxNumberOfTasks ) \ @@ -295,6 +371,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, ( pxQueue )->cRxLock = ( int8_t ) ( ( cRxLock ) + ( int8_t ) 1 ); \ } \ } +#endif /* queueUSE_LOCKS == 1 */ /*-----------------------------------------------------------*/ BaseType_t xQueueGenericReset( QueueHandle_t xQueue, @@ -305,19 +382,28 @@ BaseType_t xQueueGenericReset( QueueHandle_t xQueue, configASSERT( pxQueue ); + if( xNewQueue == pdTRUE ) + { + portMUX_INITIALIZE( &( pxQueue->xQueueLock ) ); + } + if( ( pxQueue != NULL ) && ( pxQueue->uxLength >= 1U ) && /* Check for multiplication overflow. */ ( ( SIZE_MAX / pxQueue->uxLength ) >= pxQueue->uxItemSize ) ) { - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); { pxQueue->u.xQueue.pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */ pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U; pxQueue->pcWriteTo = pxQueue->pcHead; pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - 1U ) * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */ - pxQueue->cRxLock = queueUNLOCKED; - pxQueue->cTxLock = queueUNLOCKED; + #if ( queueUSE_LOCKS == 1 ) + { + pxQueue->cRxLock = queueUNLOCKED; + pxQueue->cTxLock = queueUNLOCKED; + } + #endif /* queueUSE_LOCKS == 1 */ if( xNewQueue == pdFALSE ) { @@ -349,7 +435,7 @@ BaseType_t xQueueGenericReset( QueueHandle_t xQueue, vListInitialise( &( pxQueue->xTasksWaitingToReceive ) ); } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); } else { @@ -606,6 +692,9 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, /* In case this is a recursive mutex. */ pxNewQueue->u.xSemaphore.uxRecursiveCallCount = 0; + /* Initialize the mutex's spinlock */ + portMUX_INITIALIZE( &( pxNewQueue->xQueueLock ) ); + traceCREATE_MUTEX( pxNewQueue ); /* Start with the semaphore in the expected state. */ @@ -671,7 +760,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, * calling task is the mutex holder, but not a good way of determining the * identity of the mutex holder, as the holder may change between the * following critical section exiting and the function returning. */ - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( pxSemaphore->xQueueLock ) ); { if( pxSemaphore->uxQueueType == queueQUEUE_IS_MUTEX ) { @@ -682,7 +771,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, pxReturn = NULL; } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxSemaphore->xQueueLock ) ); return pxReturn; } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */ @@ -908,7 +997,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue, * interest of execution time efficiency. */ for( ; ; ) { - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); { /* Is there room on the queue now? The running task must be the * highest priority task wanting to access the queue. If the head item @@ -1014,7 +1103,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue, } #endif /* configUSE_QUEUE_SETS */ - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); return pdPASS; } else @@ -1023,7 +1112,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue, { /* The queue was full and no block time is specified (or * the block time has expired) so leave now. */ - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); /* Return to the original privilege level before exiting * the function. */ @@ -1043,56 +1132,88 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue, mtCOVERAGE_TEST_MARKER(); } } - } - taskEXIT_CRITICAL(); - /* Interrupts and other tasks can send to and receive from the queue - * now the critical section has been exited. */ - - vTaskSuspendAll(); - prvLockQueue( pxQueue ); - - /* Update the timeout state to see if it has expired yet. */ - if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) - { - if( prvIsQueueFull( pxQueue ) != pdFALSE ) + /* If queue locks ARE NOT being used: + * - At this point, the queue is full and entry time has been set + * - We simply check for a time out, block if not timed out, or + * return an error if we have timed out. */ + #if ( queueUSE_LOCKS == 0 ) { - traceBLOCKING_ON_QUEUE_SEND( pxQueue ); - vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait ); - - /* Unlocking the queue means queue events can effect the - * event list. It is possible that interrupts occurring now - * remove this task from the event list again - but as the - * scheduler is suspended the task will go onto the pending - * ready list instead of the actual ready list. */ - prvUnlockQueue( pxQueue ); - - /* Resuming the scheduler will move tasks from the pending - * ready list into the ready list - so it is feasible that this - * task is already in the ready list before it yields - in which - * case the yield will not cause a context switch unless there - * is also a higher priority task in the pending ready list. */ - if( xTaskResumeAll() == pdFALSE ) + /* Update the timeout state to see if it has expired yet. */ + if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) { + /* Not timed out yet. Block the current task. */ + traceBLOCKING_ON_QUEUE_SEND( pxQueue ); + vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait ); portYIELD_WITHIN_API(); } + else + { + /* We have timed out. Return an error. */ + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); + traceQUEUE_SEND_FAILED( pxQueue ); + return errQUEUE_FULL; + } + } + #endif /* queueUSE_LOCKS == 0 */ + } + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); + + /* If queue locks ARE being used: + * - At this point, the queue is full and entry time has been set + * - We follow the original procedure of locking the queue before + * attempting to block. */ + #if ( queueUSE_LOCKS == 1 ) + { + /* Interrupts and other tasks can send to and receive from the queue + * now the critical section has been exited. */ + + vTaskSuspendAll(); + prvLockQueue( pxQueue ); + + /* Update the timeout state to see if it has expired yet. */ + if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) + { + if( prvIsQueueFull( pxQueue ) != pdFALSE ) + { + traceBLOCKING_ON_QUEUE_SEND( pxQueue ); + vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait ); + + /* Unlocking the queue means queue events can effect the + * event list. It is possible that interrupts occurring now + * remove this task from the event list again - but as the + * scheduler is suspended the task will go onto the pending + * ready list instead of the actual ready list. */ + prvUnlockQueue( pxQueue ); + + /* Resuming the scheduler will move tasks from the pending + * ready list into the ready list - so it is feasible that this + * task is already in the ready list before it yields - in which + * case the yield will not cause a context switch unless there + * is also a higher priority task in the pending ready list. */ + if( xTaskResumeAll() == pdFALSE ) + { + portYIELD_WITHIN_API(); + } + } + else + { + /* Try again. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + } } else { - /* Try again. */ + /* The timeout has expired. */ prvUnlockQueue( pxQueue ); ( void ) xTaskResumeAll(); + + traceQUEUE_SEND_FAILED( pxQueue ); + return errQUEUE_FULL; } } - else - { - /* The timeout has expired. */ - prvUnlockQueue( pxQueue ); - ( void ) xTaskResumeAll(); - - traceQUEUE_SEND_FAILED( pxQueue ); - return errQUEUE_FULL; - } + #endif /* queueUSE_LOCKS == 1 */ } /*lint -restore */ } /*-----------------------------------------------------------*/ @@ -1131,11 +1252,16 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, * read, instead return a flag to say whether a context switch is required or * not (i.e. has a task with a higher priority than us been woken by this * post). */ - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + prvENTER_CRITICAL_OR_MASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus ); { if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) ) { - const int8_t cTxLock = pxQueue->cTxLock; + #if ( queueUSE_LOCKS == 1 ) + const int8_t cTxLock = pxQueue->cTxLock; + #else + /* Queue locks not used, so we treat it as unlocked. */ + const int8_t cTxLock = queueUNLOCKED; + #endif /* queueUSE_LOCKS == 1 */ const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting; traceQUEUE_SEND_FROM_ISR( pxQueue ); @@ -1243,9 +1369,13 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, } else { - /* Increment the lock count so the task that unlocks the queue - * knows that data was posted while it was locked. */ - prvIncrementQueueTxLock( pxQueue, cTxLock ); + #if ( queueUSE_LOCKS == 1 ) + { + /* Increment the lock count so the task that unlocks the queue + * knows that data was posted while it was locked. */ + prvIncrementQueueTxLock( pxQueue, cTxLock ); + } + #endif /* queueUSE_LOCKS == 1 */ } xReturn = pdPASS; @@ -1256,7 +1386,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, xReturn = errQUEUE_FULL; } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + prvEXIT_CRITICAL_OR_UNMASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus ); return xReturn; } @@ -1302,7 +1432,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + prvENTER_CRITICAL_OR_MASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus ); { const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; @@ -1311,7 +1441,12 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, * space'. */ if( uxMessagesWaiting < pxQueue->uxLength ) { - const int8_t cTxLock = pxQueue->cTxLock; + #if ( queueUSE_LOCKS == 1 ) + const int8_t cTxLock = pxQueue->cTxLock; + #else + /* Queue locks not used, so we treat it as unlocked. */ + const int8_t cTxLock = queueUNLOCKED; + #endif /* queueUSE_LOCKS == 1 */ traceQUEUE_SEND_FROM_ISR( pxQueue ); @@ -1409,9 +1544,13 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, } else { - /* Increment the lock count so the task that unlocks the queue - * knows that data was posted while it was locked. */ - prvIncrementQueueTxLock( pxQueue, cTxLock ); + #if ( queueUSE_LOCKS == 1 ) + { + /* Increment the lock count so the task that unlocks the queue + * knows that data was posted while it was locked. */ + prvIncrementQueueTxLock( pxQueue, cTxLock ); + } + #endif /* queueUSE_LOCKS == 1 */ } xReturn = pdPASS; @@ -1422,7 +1561,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, xReturn = errQUEUE_FULL; } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + prvEXIT_CRITICAL_OR_UNMASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus ); return xReturn; } @@ -1455,7 +1594,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue, * interest of execution time efficiency. */ for( ; ; ) { - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); { const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; @@ -1487,7 +1626,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue, mtCOVERAGE_TEST_MARKER(); } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); return pdPASS; } else @@ -1496,7 +1635,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue, { /* The queue was empty and no block time is specified (or * the block time has expired) so leave now. */ - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); traceQUEUE_RECEIVE_FAILED( pxQueue ); return errQUEUE_EMPTY; } @@ -1513,60 +1652,92 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue, mtCOVERAGE_TEST_MARKER(); } } - } - taskEXIT_CRITICAL(); - /* Interrupts and other tasks can send to and receive from the queue - * now the critical section has been exited. */ - - vTaskSuspendAll(); - prvLockQueue( pxQueue ); - - /* Update the timeout state to see if it has expired yet. */ - if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) - { - /* The timeout has not expired. If the queue is still empty place - * the task on the list of tasks waiting to receive from the queue. */ - if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + /* If queue locks ARE NOT being used: + * - At this point, the queue is empty and entry time has been set + * - We simply check for a time out, block if not timed out, or + * return an error if we have timed out. */ + #if ( queueUSE_LOCKS == 0 ) { - traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ); - vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); - prvUnlockQueue( pxQueue ); - - if( xTaskResumeAll() == pdFALSE ) + /* Update the timeout state to see if it has expired yet. */ + if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) { + /* Not timed out yet. Block the current task. */ + traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ); + vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); portYIELD_WITHIN_API(); } else + { + /* We have timed out. Return an error. */ + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); + traceQUEUE_RECEIVE_FAILED( pxQueue ); + return errQUEUE_EMPTY; + } + } + #endif /* queueUSE_LOCKS == 0 */ + } + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); + + /* If queue locks ARE being used: + * - At this point, the queue is empty and entry time has been set + * - We follow the original procedure for locking the queue before + * attempting to block. */ + #if ( queueUSE_LOCKS == 1 ) + { + /* Interrupts and other tasks can send to and receive from the queue + * now the critical section has been exited. */ + + vTaskSuspendAll(); + prvLockQueue( pxQueue ); + + /* Update the timeout state to see if it has expired yet. */ + if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) + { + /* The timeout has not expired. If the queue is still empty place + * the task on the list of tasks waiting to receive from the queue. */ + if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + { + traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ); + vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); + prvUnlockQueue( pxQueue ); + + if( xTaskResumeAll() == pdFALSE ) + { + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* The queue contains data again. Loop back to try and read the + * data. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + } + } + else + { + /* Timed out. If there is no data in the queue exit, otherwise loop + * back and attempt to read the data. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + + if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + { + traceQUEUE_RECEIVE_FAILED( pxQueue ); + return errQUEUE_EMPTY; + } + else { mtCOVERAGE_TEST_MARKER(); } } - else - { - /* The queue contains data again. Loop back to try and read the - * data. */ - prvUnlockQueue( pxQueue ); - ( void ) xTaskResumeAll(); - } - } - else - { - /* Timed out. If there is no data in the queue exit, otherwise loop - * back and attempt to read the data. */ - prvUnlockQueue( pxQueue ); - ( void ) xTaskResumeAll(); - - if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) - { - traceQUEUE_RECEIVE_FAILED( pxQueue ); - return errQUEUE_EMPTY; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } } + #endif /* queueUSE_LOCKS == 1 */ } /*lint -restore */ } /*-----------------------------------------------------------*/ @@ -1601,7 +1772,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, * of execution time efficiency. */ for( ; ; ) { - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); { /* Semaphores are queues with an item size of 0, and where the * number of messages in the queue is the semaphore's count value. */ @@ -1650,7 +1821,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, mtCOVERAGE_TEST_MARKER(); } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); return pdPASS; } else @@ -1659,7 +1830,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, { /* The semaphore count was 0 and no block time is specified * (or the block time has expired) so exit now. */ - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); traceQUEUE_RECEIVE_FAILED( pxQueue ); return errQUEUE_EMPTY; } @@ -1676,107 +1847,165 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, mtCOVERAGE_TEST_MARKER(); } } - } - taskEXIT_CRITICAL(); - /* Interrupts and other tasks can give to and take from the semaphore - * now the critical section has been exited. */ - - vTaskSuspendAll(); - prvLockQueue( pxQueue ); - - /* Update the timeout state to see if it has expired yet. */ - if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) - { - /* A block time is specified and not expired. If the semaphore - * count is 0 then enter the Blocked state to wait for a semaphore to - * become available. As semaphores are implemented with queues the - * queue being empty is equivalent to the semaphore count being 0. */ - if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + /* If queue locks ARE NOT being used: + * - At this point, the semaphore/mutex is empty/held and entry time + * has been set. + * - We simply check for a time out, inherit priority and block if + * not timed out, or return an error if we have timed out. */ + #if ( queueUSE_LOCKS == 0 ) { - traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ); - - #if ( configUSE_MUTEXES == 1 ) + /* Update the timeout state to see if it has expired yet. */ + if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) { - if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) + /* Not timed out yet. If this is a mutex, make the holder + * inherit our priority, then block the current task. */ + traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ); + #if ( configUSE_MUTEXES == 1 ) { - taskENTER_CRITICAL(); + if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) { xInheritanceOccurred = xTaskPriorityInherit( pxQueue->u.xSemaphore.xMutexHolder ); } - taskEXIT_CRITICAL(); + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* if ( configUSE_MUTEXES == 1 ) */ + vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); + portYIELD_WITHIN_API(); + } + else + { + /* We have timed out. If this is a mutex, make the holder + * disinherit our priority, then return an error. */ + #if ( configUSE_MUTEXES == 1 ) + { + if( xInheritanceOccurred != pdFALSE ) + { + UBaseType_t uxHighestWaitingPriority; + uxHighestWaitingPriority = prvGetDisinheritPriorityAfterTimeout( pxQueue ); + vTaskPriorityDisinheritAfterTimeout( pxQueue->u.xSemaphore.xMutexHolder, uxHighestWaitingPriority ); + } + } + #endif /* configUSE_MUTEXES */ + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); + traceQUEUE_RECEIVE_FAILED( pxQueue ); + return errQUEUE_EMPTY; + } + } + #endif /* queueUSE_LOCKS == 0 */ + } + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); + + /* If queue locks ARE being used: + * - At this point, the semaphore/mutex is empty/held and entry time + * has been set. + * - We follow the original procedure for locking the queue, inheriting + * priority, then attempting to block. */ + #if ( queueUSE_LOCKS == 1 ) + { + /* Interrupts and other tasks can give to and take from the semaphore + * now the critical section has been exited. */ + + vTaskSuspendAll(); + prvLockQueue( pxQueue ); + + /* Update the timeout state to see if it has expired yet. */ + if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) + { + /* A block time is specified and not expired. If the semaphore + * count is 0 then enter the Blocked state to wait for a semaphore to + * become available. As semaphores are implemented with queues the + * queue being empty is equivalent to the semaphore count being 0. */ + if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + { + traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ); + + #if ( configUSE_MUTEXES == 1 ) + { + if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) + { + taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); + { + xInheritanceOccurred = xTaskPriorityInherit( pxQueue->u.xSemaphore.xMutexHolder ); + } + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* if ( configUSE_MUTEXES == 1 ) */ + + vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); + prvUnlockQueue( pxQueue ); + + if( xTaskResumeAll() == pdFALSE ) + { + portYIELD_WITHIN_API(); } else { mtCOVERAGE_TEST_MARKER(); } } - #endif /* if ( configUSE_MUTEXES == 1 ) */ - - vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); - prvUnlockQueue( pxQueue ); - - if( xTaskResumeAll() == pdFALSE ) + else { - portYIELD_WITHIN_API(); + /* There was no timeout and the semaphore count was not 0, so + * attempt to take the semaphore again. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + } + } + else + { + /* Timed out. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + + /* If the semaphore count is 0 exit now as the timeout has + * expired. Otherwise return to attempt to take the semaphore that is + * known to be available. As semaphores are implemented by queues the + * queue being empty is equivalent to the semaphore count being 0. */ + if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + { + #if ( configUSE_MUTEXES == 1 ) + { + /* xInheritanceOccurred could only have be set if + * pxQueue->uxQueueType == queueQUEUE_IS_MUTEX so no need to + * test the mutex type again to check it is actually a mutex. */ + if( xInheritanceOccurred != pdFALSE ) + { + taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); + { + UBaseType_t uxHighestWaitingPriority; + + /* This task blocking on the mutex caused another + * task to inherit this task's priority. Now this task + * has timed out the priority should be disinherited + * again, but only as low as the next highest priority + * task that is waiting for the same mutex. */ + uxHighestWaitingPriority = prvGetDisinheritPriorityAfterTimeout( pxQueue ); + vTaskPriorityDisinheritAfterTimeout( pxQueue->u.xSemaphore.xMutexHolder, uxHighestWaitingPriority ); + } + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); + } + } + #endif /* configUSE_MUTEXES */ + + traceQUEUE_RECEIVE_FAILED( pxQueue ); + return errQUEUE_EMPTY; } else { mtCOVERAGE_TEST_MARKER(); } } - else - { - /* There was no timeout and the semaphore count was not 0, so - * attempt to take the semaphore again. */ - prvUnlockQueue( pxQueue ); - ( void ) xTaskResumeAll(); - } - } - else - { - /* Timed out. */ - prvUnlockQueue( pxQueue ); - ( void ) xTaskResumeAll(); - - /* If the semaphore count is 0 exit now as the timeout has - * expired. Otherwise return to attempt to take the semaphore that is - * known to be available. As semaphores are implemented by queues the - * queue being empty is equivalent to the semaphore count being 0. */ - if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) - { - #if ( configUSE_MUTEXES == 1 ) - { - /* xInheritanceOccurred could only have be set if - * pxQueue->uxQueueType == queueQUEUE_IS_MUTEX so no need to - * test the mutex type again to check it is actually a mutex. */ - if( xInheritanceOccurred != pdFALSE ) - { - taskENTER_CRITICAL(); - { - UBaseType_t uxHighestWaitingPriority; - - /* This task blocking on the mutex caused another - * task to inherit this task's priority. Now this task - * has timed out the priority should be disinherited - * again, but only as low as the next highest priority - * task that is waiting for the same mutex. */ - uxHighestWaitingPriority = prvGetDisinheritPriorityAfterTimeout( pxQueue ); - vTaskPriorityDisinheritAfterTimeout( pxQueue->u.xSemaphore.xMutexHolder, uxHighestWaitingPriority ); - } - taskEXIT_CRITICAL(); - } - } - #endif /* configUSE_MUTEXES */ - - traceQUEUE_RECEIVE_FAILED( pxQueue ); - return errQUEUE_EMPTY; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } } + #endif /* queueUSE_LOCKS == 1 */ } /*lint -restore */ } /*-----------------------------------------------------------*/ @@ -1809,7 +2038,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue, * interest of execution time efficiency. */ for( ; ; ) { - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); { const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; @@ -1847,7 +2076,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue, mtCOVERAGE_TEST_MARKER(); } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); return pdPASS; } else @@ -1856,7 +2085,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue, { /* The queue was empty and no block time is specified (or * the block time has expired) so leave now. */ - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); traceQUEUE_PEEK_FAILED( pxQueue ); return errQUEUE_EMPTY; } @@ -1874,60 +2103,92 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue, mtCOVERAGE_TEST_MARKER(); } } - } - taskEXIT_CRITICAL(); - /* Interrupts and other tasks can send to and receive from the queue - * now that the critical section has been exited. */ - - vTaskSuspendAll(); - prvLockQueue( pxQueue ); - - /* Update the timeout state to see if it has expired yet. */ - if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) - { - /* Timeout has not expired yet, check to see if there is data in the - * queue now, and if not enter the Blocked state to wait for data. */ - if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + /* If queue locks ARE NOT being used: + * - At this point, the queue is empty and entry time has been set + * - We simply check for a time out, block if not timed out, or + * return an error if we have timed out. */ + #if ( queueUSE_LOCKS == 0 ) { - traceBLOCKING_ON_QUEUE_PEEK( pxQueue ); - vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); - prvUnlockQueue( pxQueue ); - - if( xTaskResumeAll() == pdFALSE ) + /* Update the timeout state to see if it has expired yet. */ + if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) { + /* Not timed out yet. Block the current task. */ + traceBLOCKING_ON_QUEUE_PEEK( pxQueue ); + vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); portYIELD_WITHIN_API(); } else + { + /* We have timed out. Return an error. */ + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); + traceQUEUE_PEEK_FAILED( pxQueue ); + return errQUEUE_EMPTY; + } + } + #endif /* queueUSE_LOCKS == 0 */ + } + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); + + /* If queue locks ARE being used: + * - At this point, the queue is empty and entry time has been set + * - We follow the original procedure for locking the queue before + * attempting to block. */ + #if ( queueUSE_LOCKS == 1 ) + { + /* Interrupts and other tasks can send to and receive from the queue + * now that the critical section has been exited. */ + + vTaskSuspendAll(); + prvLockQueue( pxQueue ); + + /* Update the timeout state to see if it has expired yet. */ + if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) + { + /* Timeout has not expired yet, check to see if there is data in the + * queue now, and if not enter the Blocked state to wait for data. */ + if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + { + traceBLOCKING_ON_QUEUE_PEEK( pxQueue ); + vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); + prvUnlockQueue( pxQueue ); + + if( xTaskResumeAll() == pdFALSE ) + { + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* There is data in the queue now, so don't enter the blocked + * state, instead return to try and obtain the data. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + } + } + else + { + /* The timeout has expired. If there is still no data in the queue + * exit, otherwise go back and try to read the data again. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + + if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + { + traceQUEUE_PEEK_FAILED( pxQueue ); + return errQUEUE_EMPTY; + } + else { mtCOVERAGE_TEST_MARKER(); } } - else - { - /* There is data in the queue now, so don't enter the blocked - * state, instead return to try and obtain the data. */ - prvUnlockQueue( pxQueue ); - ( void ) xTaskResumeAll(); - } - } - else - { - /* The timeout has expired. If there is still no data in the queue - * exit, otherwise go back and try to read the data again. */ - prvUnlockQueue( pxQueue ); - ( void ) xTaskResumeAll(); - - if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) - { - traceQUEUE_PEEK_FAILED( pxQueue ); - return errQUEUE_EMPTY; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } } + #endif /* queueUSE_LOCKS == 1 */ } /*lint -restore */ } /*-----------------------------------------------------------*/ @@ -1959,14 +2220,19 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + prvENTER_CRITICAL_OR_MASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus ); { const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; /* Cannot block in an ISR, so check there is data available. */ if( uxMessagesWaiting > ( UBaseType_t ) 0 ) { - const int8_t cRxLock = pxQueue->cRxLock; + #if ( queueUSE_LOCKS == 1 ) + const int8_t cRxLock = pxQueue->cRxLock; + #else + /* Queue locks not used, so we treat it as unlocked. */ + const int8_t cRxLock = queueUNLOCKED; + #endif /* queueUSE_LOCKS == 1 */ traceQUEUE_RECEIVE_FROM_ISR( pxQueue ); @@ -2006,9 +2272,13 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, } else { - /* Increment the lock count so the task that unlocks the queue - * knows that data was removed while it was locked. */ - prvIncrementQueueRxLock( pxQueue, cRxLock ); + #if ( queueUSE_LOCKS == 1 ) + { + /* Increment the lock count so the task that unlocks the queue + * knows that data was removed while it was locked. */ + prvIncrementQueueRxLock( pxQueue, cRxLock ); + } + #endif /* queueUSE_LOCKS == 1 */ } xReturn = pdPASS; @@ -2019,7 +2289,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue ); } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + prvEXIT_CRITICAL_OR_UNMASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus ); return xReturn; } @@ -2053,7 +2323,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + prvENTER_CRITICAL_OR_MASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus ); { /* Cannot block in an ISR, so check there is data available. */ if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 ) @@ -2074,7 +2344,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue ); } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + prvEXIT_CRITICAL_OR_UNMASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus ); return xReturn; } @@ -2086,11 +2356,11 @@ UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue ) configASSERT( xQueue ); - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( ( ( Queue_t * ) xQueue )->xQueueLock ) ); { uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( ( ( Queue_t * ) xQueue )->xQueueLock ) ); return uxReturn; } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */ @@ -2103,11 +2373,11 @@ UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue ) configASSERT( pxQueue ); - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); { uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); return uxReturn; } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */ @@ -2329,50 +2599,74 @@ static void prvCopyDataFromQueue( Queue_t * const pxQueue, } /*-----------------------------------------------------------*/ -static void prvUnlockQueue( Queue_t * const pxQueue ) -{ - /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */ - - /* The lock counts contains the number of extra data items placed or - * removed from the queue while the queue was locked. When a queue is - * locked items can be added or removed, but the event lists cannot be - * updated. */ - taskENTER_CRITICAL(); +#if ( queueUSE_LOCKS == 1 ) + static void prvUnlockQueue( Queue_t * const pxQueue ) { - int8_t cTxLock = pxQueue->cTxLock; + /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */ - /* See if data was added to the queue while it was locked. */ - while( cTxLock > queueLOCKED_UNMODIFIED ) + /* The lock counts contains the number of extra data items placed or + * removed from the queue while the queue was locked. When a queue is + * locked items can be added or removed, but the event lists cannot be + * updated. */ + taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); { - /* Data was posted while the queue was locked. Are any tasks - * blocked waiting for data to become available? */ - #if ( configUSE_QUEUE_SETS == 1 ) + int8_t cTxLock = pxQueue->cTxLock; + + /* See if data was added to the queue while it was locked. */ + while( cTxLock > queueLOCKED_UNMODIFIED ) { - if( pxQueue->pxQueueSetContainer != NULL ) + /* Data was posted while the queue was locked. Are any tasks + * blocked waiting for data to become available? */ + #if ( configUSE_QUEUE_SETS == 1 ) { - if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE ) + if( pxQueue->pxQueueSetContainer != NULL ) { - /* The queue is a member of a queue set, and posting to - * the queue set caused a higher priority task to unblock. - * A context switch is required. */ - vTaskMissedYield(); + if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE ) + { + /* The queue is a member of a queue set, and posting to + * the queue set caused a higher priority task to unblock. + * A context switch is required. */ + vTaskMissedYield(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } else { - mtCOVERAGE_TEST_MARKER(); + /* Tasks that are removed from the event list will get + * added to the pending ready list as the scheduler is still + * suspended. */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority so record that a + * context switch is required. */ + vTaskMissedYield(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + break; + } } } - else + #else /* configUSE_QUEUE_SETS */ { - /* Tasks that are removed from the event list will get - * added to the pending ready list as the scheduler is still - * suspended. */ + /* Tasks that are removed from the event list will get added to + * the pending ready list as the scheduler is still suspended. */ if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) { if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) { - /* The task waiting has a higher priority so record that a - * context switch is required. */ + /* The task waiting has a higher priority so record that + * a context switch is required. */ vTaskMissedYield(); } else @@ -2385,89 +2679,69 @@ static void prvUnlockQueue( Queue_t * const pxQueue ) break; } } + #endif /* configUSE_QUEUE_SETS */ + + --cTxLock; } - #else /* configUSE_QUEUE_SETS */ + + pxQueue->cTxLock = queueUNLOCKED; + } + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); + + /* Do the same for the Rx lock. */ + taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); + { + int8_t cRxLock = pxQueue->cRxLock; + + while( cRxLock > queueLOCKED_UNMODIFIED ) { - /* Tasks that are removed from the event list will get added to - * the pending ready list as the scheduler is still suspended. */ - if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) { - if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) { - /* The task waiting has a higher priority so record that - * a context switch is required. */ vTaskMissedYield(); } else { mtCOVERAGE_TEST_MARKER(); } + + --cRxLock; } else { break; } } - #endif /* configUSE_QUEUE_SETS */ - --cTxLock; + pxQueue->cRxLock = queueUNLOCKED; } - - pxQueue->cTxLock = queueUNLOCKED; + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); } - taskEXIT_CRITICAL(); +#endif /* queueUSE_LOCKS == 1 */ +/*-----------------------------------------------------------*/ - /* Do the same for the Rx lock. */ - taskENTER_CRITICAL(); +#if ( queueUSE_LOCKS == 1 ) + static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue ) { - int8_t cRxLock = pxQueue->cRxLock; + BaseType_t xReturn; - while( cRxLock > queueLOCKED_UNMODIFIED ) + taskENTER_CRITICAL( &( ( ( Queue_t * ) pxQueue )->xQueueLock ) ); { - if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) + if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 ) { - if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) - { - vTaskMissedYield(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - - --cRxLock; + xReturn = pdTRUE; } else { - break; + xReturn = pdFALSE; } } + taskEXIT_CRITICAL( &( ( ( Queue_t * ) pxQueue )->xQueueLock ) ); - pxQueue->cRxLock = queueUNLOCKED; + return xReturn; } - taskEXIT_CRITICAL(); -} -/*-----------------------------------------------------------*/ - -static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue ) -{ - BaseType_t xReturn; - - taskENTER_CRITICAL(); - { - if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 ) - { - xReturn = pdTRUE; - } - else - { - xReturn = pdFALSE; - } - } - taskEXIT_CRITICAL(); - - return xReturn; -} +#endif /* queueUSE_LOCKS == 1 */ /*-----------------------------------------------------------*/ BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue ) @@ -2490,25 +2764,27 @@ BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue ) } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */ /*-----------------------------------------------------------*/ -static BaseType_t prvIsQueueFull( const Queue_t * pxQueue ) -{ - BaseType_t xReturn; - - taskENTER_CRITICAL(); +#if ( queueUSE_LOCKS == 1 ) + static BaseType_t prvIsQueueFull( const Queue_t * pxQueue ) { - if( pxQueue->uxMessagesWaiting == pxQueue->uxLength ) - { - xReturn = pdTRUE; - } - else - { - xReturn = pdFALSE; - } - } - taskEXIT_CRITICAL(); + BaseType_t xReturn; - return xReturn; -} + taskENTER_CRITICAL( &( ( ( Queue_t * ) pxQueue )->xQueueLock ) ); + { + if( pxQueue->uxMessagesWaiting == pxQueue->uxLength ) + { + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + } + taskEXIT_CRITICAL( &( ( ( Queue_t * ) pxQueue )->xQueueLock ) ); + + return xReturn; + } +#endif /* queueUSE_LOCKS == 1 */ /*-----------------------------------------------------------*/ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) @@ -2828,38 +3104,45 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) configASSERT( xQueue ); - if( pcQueueName != NULL ) + /* For SMP, we need to take the queue registry lock in case another + * core updates the register simultaneously. */ + taskENTER_CRITICAL_SMP_ONLY( &xQueueRegistryLock ); { - /* See if there is an empty space in the registry. A NULL name denotes - * a free slot. */ - for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ ) + if( pcQueueName != NULL ) { - /* Replace an existing entry if the queue is already in the registry. */ - if( xQueue == xQueueRegistry[ ux ].xHandle ) + /* See if there is an empty space in the registry. A NULL name denotes + * a free slot. */ + for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ ) { - pxEntryToWrite = &( xQueueRegistry[ ux ] ); - break; - } - /* Otherwise, store in the next empty location */ - else if( ( pxEntryToWrite == NULL ) && ( xQueueRegistry[ ux ].pcQueueName == NULL ) ) - { - pxEntryToWrite = &( xQueueRegistry[ ux ] ); - } - else - { - mtCOVERAGE_TEST_MARKER(); + /* Replace an existing entry if the queue is already in the registry. */ + if( xQueue == xQueueRegistry[ ux ].xHandle ) + { + pxEntryToWrite = &( xQueueRegistry[ ux ] ); + break; + } + /* Otherwise, store in the next empty location */ + else if( ( pxEntryToWrite == NULL ) && ( xQueueRegistry[ ux ].pcQueueName == NULL ) ) + { + pxEntryToWrite = &( xQueueRegistry[ ux ] ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } } - } - if( pxEntryToWrite != NULL ) - { - /* Store the information on this queue. */ - pxEntryToWrite->pcQueueName = pcQueueName; - pxEntryToWrite->xHandle = xQueue; + if( pxEntryToWrite != NULL ) + { + /* Store the information on this queue. */ + pxEntryToWrite->pcQueueName = pcQueueName; + pxEntryToWrite->xHandle = xQueue; - traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName ); + traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName ); + } } + /* Release the previously taken queue registry lock. */ + taskEXIT_CRITICAL_SMP_ONLY( &xQueueRegistryLock ); } #endif /* configQUEUE_REGISTRY_SIZE */ @@ -2874,21 +3157,28 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) configASSERT( xQueue ); - /* Note there is nothing here to protect against another task adding or - * removing entries from the registry while it is being searched. */ - - for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ ) + /* For SMP, we need to take the queue registry lock in case another + * core updates the register simultaneously. */ + taskENTER_CRITICAL_SMP_ONLY( &xQueueRegistryLock ); { - if( xQueueRegistry[ ux ].xHandle == xQueue ) + /* Note there is nothing here to protect against another task adding or + * removing entries from the registry while it is being searched. */ + + for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ ) { - pcReturn = xQueueRegistry[ ux ].pcQueueName; - break; - } - else - { - mtCOVERAGE_TEST_MARKER(); + if( xQueueRegistry[ ux ].xHandle == xQueue ) + { + pcReturn = xQueueRegistry[ ux ].pcQueueName; + break; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } } + /* Release the previously taken queue registry lock. */ + taskEXIT_CRITICAL_SMP_ONLY( &xQueueRegistryLock ); return pcReturn; } /*lint !e818 xQueue cannot be a pointer to const because it is a typedef. */ @@ -2904,26 +3194,33 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) configASSERT( xQueue ); - /* See if the handle of the queue being unregistered in actually in the - * registry. */ - for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ ) + /* For SMP, we need to take the queue registry lock in case another + * core updates the register simultaneously. */ + taskENTER_CRITICAL_SMP_ONLY( &xQueueRegistryLock ); { - if( xQueueRegistry[ ux ].xHandle == xQueue ) + /* See if the handle of the queue being unregistered in actually in the + * registry. */ + for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ ) { - /* Set the name to NULL to show that this slot if free again. */ - xQueueRegistry[ ux ].pcQueueName = NULL; + if( xQueueRegistry[ ux ].xHandle == xQueue ) + { + /* Set the name to NULL to show that this slot if free again. */ + xQueueRegistry[ ux ].pcQueueName = NULL; - /* Set the handle to NULL to ensure the same queue handle cannot - * appear in the registry twice if it is added, removed, then - * added again. */ - xQueueRegistry[ ux ].xHandle = ( QueueHandle_t ) 0; - break; - } - else - { - mtCOVERAGE_TEST_MARKER(); + /* Set the handle to NULL to ensure the same queue handle cannot + * appear in the registry twice if it is added, removed, then + * added again. */ + xQueueRegistry[ ux ].xHandle = ( QueueHandle_t ) 0; + break; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } } + /* Release the previously taken queue registry lock. */ + taskEXIT_CRITICAL_SMP_ONLY( &xQueueRegistryLock ); } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */ #endif /* configQUEUE_REGISTRY_SIZE */ @@ -2945,25 +3242,40 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) * so it should be called with the scheduler locked and not from a critical * section. */ - /* Only do anything if there are no messages in the queue. This function - * will not actually cause the task to block, just place it on a blocked - * list. It will not block until the scheduler is unlocked - at which - * time a yield will be performed. If an item is added to the queue while - * the queue is locked, and the calling task blocks on the queue, then the - * calling task will be immediately unblocked when the queue is unlocked. */ - prvLockQueue( pxQueue ); - - if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U ) + /* For SMP, we need to take the queue's xQueueLock as we are about to + * access the queue. */ + taskENTER_CRITICAL_SMP_ONLY( &( pxQueue->xQueueLock ) ); { - /* There is nothing in the queue, block for the specified period. */ - vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait, xWaitIndefinitely ); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + #if ( queueUSE_LOCKS == 1 ) + { + /* Only do anything if there are no messages in the queue. This function + * will not actually cause the task to block, just place it on a blocked + * list. It will not block until the scheduler is unlocked - at which + * time a yield will be performed. If an item is added to the queue while + * the queue is locked, and the calling task blocks on the queue, then the + * calling task will be immediately unblocked when the queue is unlocked. */ + prvLockQueue( pxQueue ); + } + #endif /* queueUSE_LOCKS == 1 */ - prvUnlockQueue( pxQueue ); + if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U ) + { + /* There is nothing in the queue, block for the specified period. */ + vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait, xWaitIndefinitely ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + #if ( queueUSE_LOCKS == 1 ) + { + prvUnlockQueue( pxQueue ); + } + #endif /* queueUSE_LOCKS == 1 */ + } + /* Release the previously taken xQueueLock. */ + taskEXIT_CRITICAL_SMP_ONLY( &( pxQueue->xQueueLock ) ); } #endif /* configUSE_TIMERS */ @@ -2990,7 +3302,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) { BaseType_t xReturn; - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( ( ( Queue_t * ) xQueueOrSemaphore )->xQueueLock ) ); { if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL ) { @@ -3009,7 +3321,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) xReturn = pdPASS; } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( ( ( Queue_t * ) xQueueOrSemaphore )->xQueueLock ) ); return xReturn; } @@ -3039,12 +3351,12 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) } else { - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( ( ( Queue_t * ) pxQueueOrSemaphore )->xQueueLock ) ); { /* The queue is no longer contained in the set. */ pxQueueOrSemaphore->pxQueueSetContainer = NULL; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( ( ( Queue_t * ) pxQueueOrSemaphore )->xQueueLock ) ); xReturn = pdPASS; } @@ -3096,23 +3408,37 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) configASSERT( pxQueueSetContainer ); /* LCOV_EXCL_BR_LINE */ configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength ); - if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength ) + /* In SMP, queue sets have their own xQueueLock. Thus we need to also + * acquire the queue set's xQueueLock before accessing it. */ + taskENTER_CRITICAL_SAFE_SMP_ONLY( &( pxQueueSetContainer->xQueueLock ) ); { - const int8_t cTxLock = pxQueueSetContainer->cTxLock; - - traceQUEUE_SET_SEND( pxQueueSetContainer ); - - /* The data copied is the handle of the queue that contains data. */ - xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, queueSEND_TO_BACK ); - - if( cTxLock == queueUNLOCKED ) + if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength ) { - if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE ) + #if ( queueUSE_LOCKS == 1 ) + const int8_t cTxLock = pxQueueSetContainer->cTxLock; + #else + /* Queue locks not used, so we treat it as unlocked. */ + const int8_t cTxLock = queueUNLOCKED; + #endif /* queueUSE_LOCKS == 1 */ + + traceQUEUE_SET_SEND( pxQueueSetContainer ); + + /* The data copied is the handle of the queue that contains data. */ + xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, queueSEND_TO_BACK ); + + if( cTxLock == queueUNLOCKED ) { - if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE ) + if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE ) { - /* The task waiting has a higher priority. */ - xReturn = pdTRUE; + if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority. */ + xReturn = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } else { @@ -3121,18 +3447,20 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) } else { - mtCOVERAGE_TEST_MARKER(); + #if ( queueUSE_LOCKS == 1 ) + { + prvIncrementQueueTxLock( pxQueueSetContainer, cTxLock ); + } + #endif /* queueUSE_LOCKS == 1 */ } } else { - prvIncrementQueueTxLock( pxQueueSetContainer, cTxLock ); + mtCOVERAGE_TEST_MARKER(); } } - else - { - mtCOVERAGE_TEST_MARKER(); - } + /* Release the previously acquired queue set's xQueueLock. */ + taskEXIT_CRITICAL_SAFE_SMP_ONLY( &( pxQueueSetContainer->xQueueLock ) ); return xReturn; } diff --git a/components/freertos/FreeRTOS-Kernel-V10.5.1/stream_buffer.c b/components/freertos/FreeRTOS-Kernel-V10.5.1/stream_buffer.c index 236729cee6..f474580ebc 100644 --- a/components/freertos/FreeRTOS-Kernel-V10.5.1/stream_buffer.c +++ b/components/freertos/FreeRTOS-Kernel-V10.5.1/stream_buffer.c @@ -43,6 +43,8 @@ #include "FreeRTOS.h" #include "task.h" #include "stream_buffer.h" +/* Include private IDF API additions for critical thread safety macros */ +#include "esp_private/freertos_idf_additions_priv.h" #if ( configUSE_TASK_NOTIFICATIONS != 1 ) #error configUSE_TASK_NOTIFICATIONS must be set to 1 to build stream_buffer.c @@ -63,18 +65,18 @@ * that uses task notifications. */ /*lint -save -e9026 Function like macros allowed and needed here so they can be overridden. */ #ifndef sbRECEIVE_COMPLETED - #define sbRECEIVE_COMPLETED( pxStreamBuffer ) \ - vTaskSuspendAll(); \ - { \ - if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \ - { \ - ( void ) xTaskNotify( ( pxStreamBuffer )->xTaskWaitingToSend, \ - ( uint32_t ) 0, \ - eNoAction ); \ - ( pxStreamBuffer )->xTaskWaitingToSend = NULL; \ - } \ - } \ - ( void ) xTaskResumeAll(); + #define sbRECEIVE_COMPLETED( pxStreamBuffer ) \ + prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxStreamBuffer->xStreamBufferLock ) ); \ + { \ + if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \ + { \ + ( void ) xTaskNotify( ( pxStreamBuffer )->xTaskWaitingToSend, \ + ( uint32_t ) 0, \ + eNoAction ); \ + ( pxStreamBuffer )->xTaskWaitingToSend = NULL; \ + } \ + } \ + ( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxStreamBuffer->xStreamBufferLock ) ); #endif /* sbRECEIVE_COMPLETED */ /* If user has provided a per-instance receive complete callback, then @@ -140,18 +142,18 @@ * implementation that uses task notifications. */ #ifndef sbSEND_COMPLETED - #define sbSEND_COMPLETED( pxStreamBuffer ) \ - vTaskSuspendAll(); \ - { \ - if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) \ - { \ - ( void ) xTaskNotify( ( pxStreamBuffer )->xTaskWaitingToReceive, \ - ( uint32_t ) 0, \ - eNoAction ); \ - ( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \ - } \ - } \ - ( void ) xTaskResumeAll(); + #define sbSEND_COMPLETED( pxStreamBuffer ) \ + prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxStreamBuffer->xStreamBufferLock ) ); \ + { \ + if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) \ + { \ + ( void ) xTaskNotify( ( pxStreamBuffer )->xTaskWaitingToReceive, \ + ( uint32_t ) 0, \ + eNoAction ); \ + ( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \ + } \ + } \ + ( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxStreamBuffer->xStreamBufferLock ) ); #endif /* sbSEND_COMPLETED */ /* If user has provided a per-instance send completed callback, then @@ -243,6 +245,8 @@ typedef struct StreamBufferDef_t /*lint !e9058 Style convention StreamBufferCallbackFunction_t pxSendCompletedCallback; /* Optional callback called on send complete. sbSEND_COMPLETED is called if this is NULL. */ StreamBufferCallbackFunction_t pxReceiveCompletedCallback; /* Optional callback called on receive complete. sbRECEIVE_COMPLETED is called if this is NULL. */ #endif + + portMUX_TYPE xStreamBufferLock; /* Spinlock required for SMP critical sections */ } StreamBuffer_t; /* @@ -385,6 +389,11 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, pxSendCompletedCallback, pxReceiveCompletedCallback ); + /* Initialize the stream buffer's spinlock separately, as + * prvInitialiseNewStreamBuffer() is also called from + * xStreamBufferReset(). */ + portMUX_INITIALIZE( &( ( ( StreamBuffer_t * ) pucAllocatedMemory )->xStreamBufferLock ) ); + traceSTREAM_BUFFER_CREATE( ( ( StreamBuffer_t * ) pucAllocatedMemory ), xIsMessageBuffer ); } else @@ -463,6 +472,11 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, * again. */ pxStreamBuffer->ucFlags |= sbFLAGS_IS_STATICALLY_ALLOCATED; + /* Initialize the stream buffer's spinlock separately, as + * prvInitialiseNewStreamBuffer() is also called from + * xStreamBufferReset(). */ + portMUX_INITIALIZE( &( pxStreamBuffer->xStreamBufferLock ) ); + traceSTREAM_BUFFER_CREATE( pxStreamBuffer, xIsMessageBuffer ); xReturn = ( StreamBufferHandle_t ) pxStaticStreamBuffer; /*lint !e9087 Data hiding requires cast to opaque type. */ @@ -560,7 +574,7 @@ BaseType_t xStreamBufferReset( StreamBufferHandle_t xStreamBuffer ) #endif /* Can only reset a message buffer if there are no tasks blocked on it. */ - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) ); { if( ( pxStreamBuffer->xTaskWaitingToReceive == NULL ) && ( pxStreamBuffer->xTaskWaitingToSend == NULL ) ) { @@ -590,7 +604,7 @@ BaseType_t xStreamBufferReset( StreamBufferHandle_t xStreamBuffer ) xReturn = pdPASS; } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) ); return xReturn; } @@ -736,7 +750,7 @@ size_t xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, { /* Wait until the required number of bytes are free in the message * buffer. */ - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) ); { xSpace = xStreamBufferSpacesAvailable( pxStreamBuffer ); @@ -751,11 +765,11 @@ size_t xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, } else { - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) ); break; } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) ); traceBLOCKING_ON_STREAM_BUFFER_SEND( xStreamBuffer ); ( void ) xTaskNotifyWait( ( uint32_t ) 0, ( uint32_t ) 0, NULL, xTicksToWait ); @@ -932,7 +946,7 @@ size_t xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, { /* Checking if there is data and clearing the notification state must be * performed atomically. */ - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) ); { xBytesAvailable = prvBytesInBuffer( pxStreamBuffer ); @@ -955,7 +969,7 @@ size_t xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) ); if( xBytesAvailable <= xBytesToStoreMessageLength ) { @@ -1409,7 +1423,17 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, } /*lint !e529 !e438 xWriteValue is only used if configASSERT() is defined. */ #endif - ( void ) memset( ( void * ) pxStreamBuffer, 0x00, sizeof( StreamBuffer_t ) ); /*lint !e9087 memset() requires void *. */ + /* This function could be called from xStreamBufferReset(), so we reset the + * stream buffer fields manually in order to avoid clearing + * xStreamBufferLock. The xStreamBufferLock is initialized separately on + * stream buffer creation. */ + pxStreamBuffer->xTail = ( size_t ) 0; + pxStreamBuffer->xHead = ( size_t ) 0; + pxStreamBuffer->xTaskWaitingToReceive = ( TaskHandle_t ) 0; + pxStreamBuffer->xTaskWaitingToSend = ( TaskHandle_t ) 0; + #if ( configUSE_TRACE_FACILITY == 1 ) + pxStreamBuffer->uxStreamBufferNumber = ( UBaseType_t ) 0; + #endif pxStreamBuffer->pucBuffer = pucBuffer; pxStreamBuffer->xLength = xBufferSizeBytes; pxStreamBuffer->xTriggerLevelBytes = xTriggerLevelBytes; diff --git a/components/freertos/FreeRTOS-Kernel-V10.5.1/tasks.c b/components/freertos/FreeRTOS-Kernel-V10.5.1/tasks.c index 47371e8ebd..5c25e0c129 100644 --- a/components/freertos/FreeRTOS-Kernel-V10.5.1/tasks.c +++ b/components/freertos/FreeRTOS-Kernel-V10.5.1/tasks.c @@ -44,6 +44,8 @@ #include "task.h" #include "timers.h" #include "stack_macros.h" +/* Include private IDF API additions for critical thread safety macros */ +#include "esp_private/freertos_idf_additions_priv.h" #include "freertos/idf_additions.h" /* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified @@ -63,6 +65,67 @@ #include #endif /* configUSE_STATS_FORMATTING_FUNCTIONS == 1 ) */ +/* Some code sections require extra critical sections when building for SMP + * ( configNUMBER_OF_CORES > 1 ). */ +#if ( configNUMBER_OF_CORES > 1 ) + /* Macros that Enter/exit a critical section only when building for SMP */ + #define taskENTER_CRITICAL_SMP_ONLY( pxLock ) taskENTER_CRITICAL( pxLock ) + #define taskEXIT_CRITICAL_SMP_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock ) + #define taskENTER_CRITICAL_ISR_SMP_ONLY( pxLock ) taskENTER_CRITICAL_ISR( pxLock ) + #define taskEXIT_CRITICAL_ISR_SMP_ONLY( pxLock ) taskEXIT_CRITICAL_ISR( pxLock ) + #define taskENTER_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskEnterCriticalSafeSMPOnly( pxLock ) + #define taskEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskExitCriticalSafeSMPOnly( pxLock ) + /* Macros that Enter/exit a critical section only when building for single-core */ + #define taskENTER_CRITICAL_SC_ONLY( pxLock ) taskENTER_CRITICAL( pxLock ) + #define taskEXIT_CRITICAL_SC_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock ) + + static inline __attribute__( ( always_inline ) ) + void prvTaskEnterCriticalSafeSMPOnly( portMUX_TYPE * pxLock ) + { + if( portCHECK_IF_IN_ISR() == pdFALSE ) + { + taskENTER_CRITICAL( pxLock ); + } + else + { + #ifdef __clang_analyzer__ + /* Teach clang-tidy that ISR version macro can be different */ + configASSERT( 1 ); + #endif + taskENTER_CRITICAL_ISR( pxLock ); + } + } + + static inline __attribute__( ( always_inline ) ) + void prvTaskExitCriticalSafeSMPOnly( portMUX_TYPE * pxLock ) + { + if( portCHECK_IF_IN_ISR() == pdFALSE ) + { + taskEXIT_CRITICAL( pxLock ); + } + else + { + #ifdef __clang_analyzer__ + /* Teach clang-tidy that ISR version macro can be different */ + configASSERT( 1 ); + #endif + taskEXIT_CRITICAL_ISR( pxLock ); + } + } + +#else /* configNUMBER_OF_CORES > 1 */ + /* Macros that Enter/exit a critical section only when building for SMP */ + #define taskENTER_CRITICAL_SMP_ONLY( pxLock ) + #define taskEXIT_CRITICAL_SMP_ONLY( pxLock ) + #define taskENTER_CRITICAL_ISR_SMP_ONLY( pxLock ) + #define taskEXIT_CRITICAL_ISR_SMP_ONLY( pxLock ) + #define taskENTER_CRITICAL_SAFE_SMP_ONLY( pxLock ) + #define taskEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock ) + /* Macros that Enter/exit a critical section only when building for single-core */ + #define taskENTER_CRITICAL_SC_ONLY( pxLock ) taskENTER_CRITICAL( pxLock ) + #define taskEXIT_CRITICAL_SC_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock ) +#endif /* configNUMBER_OF_CORES > 1 */ + #if ( configUSE_PREEMPTION == 0 ) /* If the cooperative scheduler is being used then a yield should not be @@ -490,6 +553,10 @@ PRIVILEGED_DATA static volatile configRUN_TIME_COUNTER_TYPE ulTotalRunTime = 0UL #endif +/* Spinlock required for SMP critical sections. This lock protects all of the + * kernel's data structures such as various tasks lists, flags, and tick counts. */ +PRIVILEGED_DATA static portMUX_TYPE xKernelLock = portMUX_INITIALIZER_UNLOCKED; + /*lint -restore */ /*-----------------------------------------------------------*/ @@ -1141,7 +1208,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) { /* Ensure interrupts don't access the task lists while the lists are being * updated. */ - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { uxCurrentNumberOfTasks++; @@ -1218,27 +1285,29 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) prvAddTaskToReadyList( pxNewTCB ); portSETUP_TCB( pxNewTCB ); - } - if( xSchedulerRunning != pdFALSE ) - { - /* If the created task is of a higher priority than the current task - * then it should run now. */ - if( taskIS_YIELD_REQUIRED( pxNewTCB, portGET_CORE_ID(), pdTRUE ) == pdTRUE ) + if( xSchedulerRunning != pdFALSE ) { - taskYIELD_IF_USING_PREEMPTION(); + /* If the created task is of a higher priority than the current task + * then it should run now. */ + if( taskIS_YIELD_REQUIRED( pxNewTCB, portGET_CORE_ID(), pdTRUE ) == pdTRUE ) + { + taskYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } else { mtCOVERAGE_TEST_MARKER(); } } - else - { - mtCOVERAGE_TEST_MARKER(); - } + taskEXIT_CRITICAL( &xKernelLock ); - taskEXIT_CRITICAL(); + /* SINGLE-CORE MODIFICATION: Extended critical section so that SMP can check + * for yield inside critical section. */ } /*-----------------------------------------------------------*/ @@ -1250,7 +1319,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) BaseType_t xSelfDelete; BaseType_t xIsCurRunning; - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { /* Get current core ID as we can no longer be preempted. */ const BaseType_t xCurCoreID = portGET_CORE_ID(); @@ -1355,7 +1424,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) prvResetNextTaskUnblockTime(); } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); /* If the task is currently running, call prvDeleteTCB from outside of * critical section. If a task is currently running, prvDeleteTCB is @@ -1366,20 +1435,27 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) prvDeleteTCB( pxTCB ); } - /* Force a reschedule if it is the currently running task that has just - * been deleted. */ - if( xSchedulerRunning != pdFALSE ) + /* For SMP, we need to take the kernel lock here as we are about to + * access kernel data structures. */ + taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); { - if( xSelfDelete == pdTRUE ) + /* Force a reschedule if it is the currently running task that has just + * been deleted. */ + if( xSchedulerRunning != pdFALSE ) { - configASSERT( taskIS_SCHEDULER_SUSPENDED() == pdFALSE ); - portYIELD_WITHIN_API(); - } - else - { - mtCOVERAGE_TEST_MARKER(); + if( xSelfDelete == pdTRUE ) + { + configASSERT( taskIS_SCHEDULER_SUSPENDED() == pdFALSE ); + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } } + /* Release the previously taken kernel lock. */ + taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); } #endif /* INCLUDE_vTaskDelete */ @@ -1397,7 +1473,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) configASSERT( ( xTimeIncrement > 0U ) ); configASSERT( taskIS_SCHEDULER_SUSPENDED() == pdFALSE ); - vTaskSuspendAll(); + prvENTER_CRITICAL_OR_SUSPEND_ALL( &xKernelLock ); { /* Minor optimisation. The tick count cannot change in this * block. */ @@ -1453,7 +1529,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) mtCOVERAGE_TEST_MARKER(); } } - xAlreadyYielded = xTaskResumeAll(); + xAlreadyYielded = prvEXIT_CRITICAL_OR_RESUME_ALL( &xKernelLock ); /* Force a reschedule if xTaskResumeAll has not already done so, we may * have put ourselves to sleep. */ @@ -1482,7 +1558,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) if( xTicksToDelay > ( TickType_t ) 0U ) { configASSERT( taskIS_SCHEDULER_SUSPENDED() == pdFALSE ); - vTaskSuspendAll(); + prvENTER_CRITICAL_OR_SUSPEND_ALL( &xKernelLock ); { traceTASK_DELAY(); @@ -1495,7 +1571,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * executing task. */ prvAddCurrentTaskToDelayedList( xTicksToDelay, pdFALSE ); } - xAlreadyYielded = xTaskResumeAll(); + xAlreadyYielded = prvEXIT_CRITICAL_OR_RESUME_ALL( &xKernelLock ); } else { @@ -1529,7 +1605,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) configASSERT( pxTCB ); - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { if( taskIS_CURRENTLY_RUNNING( pxTCB ) == pdTRUE ) { @@ -1608,7 +1684,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) } } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); return eReturn; } /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */ @@ -1623,14 +1699,14 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) TCB_t const * pxTCB; UBaseType_t uxReturn; - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { /* If null is passed in here then it is the priority of the task * that called uxTaskPriorityGet() that is being queried. */ pxTCB = prvGetTCBFromHandle( xTask ); uxReturn = pxTCB->uxPriority; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); return uxReturn; } @@ -1663,14 +1739,14 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - uxSavedInterruptState = portSET_INTERRUPT_MASK_FROM_ISR(); + prvENTER_CRITICAL_OR_MASK_ISR( &xKernelLock, uxSavedInterruptState ); { /* If null is passed in here then it is the priority of the calling * task that is being queried. */ pxTCB = prvGetTCBFromHandle( xTask ); uxReturn = pxTCB->uxPriority; } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptState ); + prvEXIT_CRITICAL_OR_UNMASK_ISR( &xKernelLock, uxSavedInterruptState ); return uxReturn; } @@ -1699,7 +1775,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) mtCOVERAGE_TEST_MARKER(); } - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { /* Get current core ID as we can no longer be preempted. */ const BaseType_t xCurCoreID = portGET_CORE_ID(); @@ -1851,7 +1927,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) ( void ) uxPriorityUsedOnEntry; } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); } #endif /* INCLUDE_vTaskPrioritySet */ @@ -1863,7 +1939,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) { TCB_t * pxTCB; - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { /* Get current core ID as we can no longer be preempted. */ const BaseType_t xCurCoreID = portGET_CORE_ID(); @@ -1978,7 +2054,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); } #endif /* INCLUDE_vTaskSuspend */ @@ -2044,7 +2120,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) /* It does not make sense to resume the calling task. */ configASSERT( xTaskToResume ); - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { /* The parameter cannot be NULL as it is impossible to resume the * currently executing task. */ @@ -2082,7 +2158,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); } #endif /* INCLUDE_vTaskSuspend */ @@ -2117,7 +2193,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + prvENTER_CRITICAL_OR_MASK_ISR( &xKernelLock, uxSavedInterruptStatus ); { if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE ) { @@ -2161,7 +2237,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) mtCOVERAGE_TEST_MARKER(); } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + prvEXIT_CRITICAL_OR_UNMASK_ISR( &xKernelLock, uxSavedInterruptStatus ); return xYieldRequired; } @@ -2255,17 +2331,24 @@ void vTaskStartScheduler( void ) * starts to run. */ portDISABLE_INTERRUPTS(); - #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) ) + /* For SMP, we need to take the kernel lock here as we are about to + * access kernel data structures. */ + taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); { - /* Switch C-Runtime's TLS Block to point to the TLS - * block specific to the task that will run first. */ - configSET_TLS_BLOCK( pxCurrentTCBs[ portGET_CORE_ID() ]->xTLSBlock ); - } - #endif + #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) ) + { + /* Switch C-Runtime's TLS Block to point to the TLS + * block specific to the task that will run first. */ + configSET_TLS_BLOCK( pxCurrentTCBs[ portGET_CORE_ID() ]->xTLSBlock ); + } + #endif - xNextTaskUnblockTime = portMAX_DELAY; - xSchedulerRunning = pdTRUE; - xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT; + xNextTaskUnblockTime = portMAX_DELAY; + xSchedulerRunning = pdTRUE; + xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT; + } + /* Release the previously taken kernel lock. */ + taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); /* If configGENERATE_RUN_TIME_STATS is defined then the following * macro must be defined to configure the timer/counter used to generate @@ -2312,7 +2395,15 @@ void vTaskEndScheduler( void ) * routine so the original ISRs can be restored if necessary. The port * layer must ensure interrupts enable bit is left in the correct state. */ portDISABLE_INTERRUPTS(); - xSchedulerRunning = pdFALSE; + + /* For SMP, we need to take the kernel lock here as we are about to access + * kernel data structures. */ + taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); + { + xSchedulerRunning = pdFALSE; + } + /* Release the previously taken kernel lock. */ + taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); vPortEndScheduler(); } /*----------------------------------------------------------*/ @@ -2415,7 +2506,7 @@ BaseType_t xTaskResumeAll( void ) * removed task will have been added to the xPendingReadyList. Once the * scheduler has been resumed it is safe to move all the pending ready * tasks from this list into their appropriate ready list. */ - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { /* Get current core ID as we can no longer be preempted. */ const BaseType_t xCurCoreID = portGET_CORE_ID(); @@ -2517,7 +2608,7 @@ BaseType_t xTaskResumeAll( void ) mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); return xAlreadyYielded; } @@ -2559,11 +2650,18 @@ TickType_t xTaskGetTickCountFromISR( void ) * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - uxSavedInterruptStatus = portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR(); + /* For SMP, we need to take the kernel lock here as we are about to access + * kernel data structures. */ + taskENTER_CRITICAL_ISR_SMP_ONLY( &xKernelLock ); { - xReturn = xTickCount; + uxSavedInterruptStatus = portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR(); + { + xReturn = xTickCount; + } + portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); } - portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + /* Release the previously taken kernel lock. */ + taskEXIT_CRITICAL_ISR_SMP_ONLY( &xKernelLock ); return xReturn; } @@ -2670,7 +2768,7 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char /* Task names will be truncated to configMAX_TASK_NAME_LEN - 1 bytes. */ configASSERT( strlen( pcNameToQuery ) < configMAX_TASK_NAME_LEN ); - vTaskSuspendAll(); + prvENTER_CRITICAL_OR_SUSPEND_ALL( &xKernelLock ); { /* Search the ready lists. */ do @@ -2716,7 +2814,7 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char } #endif } - ( void ) xTaskResumeAll(); + ( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &xKernelLock ); return pxTCB; } @@ -2780,7 +2878,7 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char { UBaseType_t uxTask = 0, uxQueue = configMAX_PRIORITIES; - vTaskSuspendAll(); + prvENTER_CRITICAL_OR_SUSPEND_ALL( &xKernelLock ); { /* Is there a space in the array for each task in the system? */ if( uxArraySize >= uxCurrentNumberOfTasks ) @@ -2839,7 +2937,7 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char mtCOVERAGE_TEST_MARKER(); } } - ( void ) xTaskResumeAll(); + ( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &xKernelLock ); return uxTask; } @@ -2865,9 +2963,9 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char void vTaskStepTick( TickType_t xTicksToJump ) { - /* SINGLE-CORE MODIFICATION: Expanded critical section to ensure thread - * safe access to xTickCount between multiple cores. */ - taskENTER_CRITICAL(); + /* SINGLE-CORE MODIFICATION: Expanded critical section so that SMP + * accesses xTickCount inside a critical section. */ + taskENTER_CRITICAL( &xKernelLock ); { /* Correct the tick count value after a period during which the tick * was suppressed. Note this does *not* call the tick hook function for @@ -2893,7 +2991,8 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char xTickCount += xTicksToJump; traceINCREASE_TICK_COUNT( xTicksToJump ); } - taskEXIT_CRITICAL(); + /* SINGLE-CORE MODIFICATION: Expanded critical section */ + taskEXIT_CRITICAL( &xKernelLock ); } #endif /* configUSE_TICKLESS_IDLE */ @@ -2912,11 +3011,11 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) vTaskSuspendAll(); /* Prevent the tick interrupt modifying xPendedTicks simultaneously. */ - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { xPendedTicks += xTicksToCatchUp; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); xYieldOccurred = xTaskResumeAll(); return xYieldOccurred; @@ -2932,7 +3031,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) configASSERT( pxTCB ); - vTaskSuspendAll(); + prvENTER_CRITICAL_OR_SUSPEND_ALL( &xKernelLock ); { /* A task can only be prematurely removed from the Blocked state if * it is actually in the Blocked state. */ @@ -2949,7 +3048,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) * the event list too. Interrupts can touch the event list item, * even though the scheduler is suspended, so a critical section * is used. */ - taskENTER_CRITICAL(); + taskENTER_CRITICAL_SC_ONLY( &xKernelLock ); { if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) { @@ -2965,7 +3064,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL_SC_ONLY( &xKernelLock ); /* Place the unblocked task into the appropriate ready list. */ prvAddTaskToReadyList( pxTCB ); @@ -2998,7 +3097,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) xReturn = pdFAIL; } } - ( void ) xTaskResumeAll(); + ( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &xKernelLock ); return xReturn; } @@ -3016,174 +3115,196 @@ BaseType_t xTaskIncrementTick( void ) TCB_t * pxTCB; TickType_t xItemValue; BaseType_t xSwitchRequired = pdFALSE; + #if ( configUSE_TICK_HOOK == 1 ) + BaseType_t xCallTickHook; + #endif /* configUSE_TICK_HOOK == 1 */ /* Called by the portable layer each time a tick interrupt occurs. * Increments the tick then checks to see if the new tick value will cause any * tasks to be unblocked. */ traceTASK_INCREMENT_TICK( xTickCount ); - if( uxSchedulerSuspended[ 0 ] == ( UBaseType_t ) pdFALSE ) + /* For SMP, we need to take the kernel lock here as we are about to access + * kernel data structures (unlike single core which calls this function with + * interrupts disabled). */ + taskENTER_CRITICAL_SAFE_SMP_ONLY( &xKernelLock ); { - /* Minor optimisation. The tick count cannot change in this - * block. */ - const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1; - - /* Increment the RTOS tick, switching the delayed and overflowed - * delayed lists if it wraps to 0. */ - xTickCount = xConstTickCount; - - if( xConstTickCount == ( TickType_t ) 0U ) /*lint !e774 'if' does not always evaluate to false as it is looking for an overflow. */ + if( uxSchedulerSuspended[ 0 ] == ( UBaseType_t ) pdFALSE ) { - taskSWITCH_DELAYED_LISTS(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + /* Minor optimisation. The tick count cannot change in this + * block. */ + const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1; - /* See if this tick has made a timeout expire. Tasks are stored in - * the queue in the order of their wake time - meaning once one task - * has been found whose block time has not expired there is no need to - * look any further down the list. */ - if( xConstTickCount >= xNextTaskUnblockTime ) - { - for( ; ; ) + /* Increment the RTOS tick, switching the delayed and overflowed + * delayed lists if it wraps to 0. */ + xTickCount = xConstTickCount; + + if( xConstTickCount == ( TickType_t ) 0U ) /*lint !e774 'if' does not always evaluate to false as it is looking for an overflow. */ { - if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE ) - { - /* The delayed list is empty. Set xNextTaskUnblockTime - * to the maximum possible value so it is extremely - * unlikely that the - * if( xTickCount >= xNextTaskUnblockTime ) test will pass - * next time through. */ - xNextTaskUnblockTime = portMAX_DELAY; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ - break; - } - else - { - /* The delayed list is not empty, get the value of the - * item at the head of the delayed list. This is the time - * at which the task at the head of the delayed list must - * be removed from the Blocked state. */ - pxTCB = listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ - xItemValue = listGET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ) ); + taskSWITCH_DELAYED_LISTS(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } - if( xConstTickCount < xItemValue ) + /* See if this tick has made a timeout expire. Tasks are stored in + * the queue in the order of their wake time - meaning once one task + * has been found whose block time has not expired there is no need to + * look any further down the list. */ + if( xConstTickCount >= xNextTaskUnblockTime ) + { + for( ; ; ) + { + if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE ) { - /* It is not time to unblock this item yet, but the - * item value is the time at which the task at the head - * of the blocked list must be removed from the Blocked - * state - so record the item value in - * xNextTaskUnblockTime. */ - xNextTaskUnblockTime = xItemValue; - break; /*lint !e9011 Code structure here is deemed easier to understand with multiple breaks. */ + /* The delayed list is empty. Set xNextTaskUnblockTime + * to the maximum possible value so it is extremely + * unlikely that the + * if( xTickCount >= xNextTaskUnblockTime ) test will pass + * next time through. */ + xNextTaskUnblockTime = portMAX_DELAY; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + break; } else { - mtCOVERAGE_TEST_MARKER(); - } + /* The delayed list is not empty, get the value of the + * item at the head of the delayed list. This is the time + * at which the task at the head of the delayed list must + * be removed from the Blocked state. */ + pxTCB = listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + xItemValue = listGET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ) ); - /* It is time to remove the item from the Blocked state. */ - listREMOVE_ITEM( &( pxTCB->xStateListItem ) ); - - /* Is the task waiting on an event also? If so remove - * it from the event list. */ - if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) - { - listREMOVE_ITEM( &( pxTCB->xEventListItem ) ); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - - /* Place the unblocked task into the appropriate ready - * list. */ - prvAddTaskToReadyList( pxTCB ); - - /* A task being unblocked cannot cause an immediate - * context switch if preemption is turned off. */ - #if ( configUSE_PREEMPTION == 1 ) - { - /* Preemption is on, but a context switch should - * only be performed if the unblocked task has a - * priority that is equal to or higher than the - * currently executing task. - * - * For SMP, since this function is only run on core - * 0, we only need to context switch if the unblocked - * task can run on core 0 and has a higher priority - * than the current task. */ - if( ( taskIS_AFFINITY_COMPATIBLE( 0, pxTCB->xCoreID ) == pdTRUE ) && ( pxTCB->uxPriority > pxCurrentTCBs[ 0 ]->uxPriority ) ) + if( xConstTickCount < xItemValue ) { - xSwitchRequired = pdTRUE; + /* It is not time to unblock this item yet, but the + * item value is the time at which the task at the head + * of the blocked list must be removed from the Blocked + * state - so record the item value in + * xNextTaskUnblockTime. */ + xNextTaskUnblockTime = xItemValue; + break; /*lint !e9011 Code structure here is deemed easier to understand with multiple breaks. */ } else { mtCOVERAGE_TEST_MARKER(); } + + /* It is time to remove the item from the Blocked state. */ + listREMOVE_ITEM( &( pxTCB->xStateListItem ) ); + + /* Is the task waiting on an event also? If so remove + * it from the event list. */ + if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) + { + listREMOVE_ITEM( &( pxTCB->xEventListItem ) ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Place the unblocked task into the appropriate ready + * list. */ + prvAddTaskToReadyList( pxTCB ); + + /* A task being unblocked cannot cause an immediate + * context switch if preemption is turned off. */ + #if ( configUSE_PREEMPTION == 1 ) + { + /* Preemption is on, but a context switch should + * only be performed if the unblocked task has a + * priority that is equal to or higher than the + * currently executing task. + * + * For SMP, since this function is only run on core + * 0, we only need to context switch if the unblocked + * task can run on core 0 and has a higher priority + * than the current task. */ + if( ( taskIS_AFFINITY_COMPATIBLE( 0, pxTCB->xCoreID ) == pdTRUE ) && ( pxTCB->uxPriority > pxCurrentTCBs[ 0 ]->uxPriority ) ) + { + xSwitchRequired = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_PREEMPTION */ } - #endif /* configUSE_PREEMPTION */ } } - } - /* Tasks of equal priority to the currently running task will share - * processing time (time slice) if preemption is on, and the application - * writer has not explicitly turned time slicing off. */ - #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) - { - if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCBs[ 0 ]->uxPriority ] ) ) > ( UBaseType_t ) 1 ) + /* Tasks of equal priority to the currently running task will share + * processing time (time slice) if preemption is on, and the application + * writer has not explicitly turned time slicing off. */ + #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) { - xSwitchRequired = pdTRUE; + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCBs[ 0 ]->uxPriority ] ) ) > ( UBaseType_t ) 1 ) + { + xSwitchRequired = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */ + #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */ - #if ( configUSE_TICK_HOOK == 1 ) - { - /* Guard against the tick hook being called when the pended tick - * count is being unwound (when the scheduler is being unlocked). */ - if( xPendedTicksTemp == ( TickType_t ) 0 ) + #if ( configUSE_TICK_HOOK == 1 ) { - vApplicationTickHook(); + /* Guard against the tick hook being called when the pended tick + * count is being unwound (when the scheduler is being unlocked). */ + if( xPendedTicksTemp == ( TickType_t ) 0 ) + { + xCallTickHook = pdTRUE; + } + else + { + xCallTickHook = pdFALSE; + } } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - #endif /* configUSE_TICK_HOOK */ + #endif /* configUSE_TICK_HOOK */ - #if ( configUSE_PREEMPTION == 1 ) - { - if( xYieldPending[ 0 ] != pdFALSE ) + #if ( configUSE_PREEMPTION == 1 ) { - xSwitchRequired = pdTRUE; - } - else - { - mtCOVERAGE_TEST_MARKER(); + if( xYieldPending[ 0 ] != pdFALSE ) + { + xSwitchRequired = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } + #endif /* configUSE_PREEMPTION */ + } + else + { + ++xPendedTicks; + + /* The tick hook gets called at regular intervals, even if the + * scheduler is locked. */ + #if ( configUSE_TICK_HOOK == 1 ) + { + xCallTickHook = pdTRUE; + } + #endif } - #endif /* configUSE_PREEMPTION */ } - else - { - ++xPendedTicks; - /* The tick hook gets called at regular intervals, even if the - * scheduler is locked. */ - #if ( configUSE_TICK_HOOK == 1 ) + /* Release the previously taken kernel lock as we have finished accessing + * the kernel data structures. */ + taskEXIT_CRITICAL_SAFE_SMP_ONLY( &xKernelLock ); + + #if ( configUSE_TICK_HOOK == 1 ) + { + if( xCallTickHook == pdTRUE ) { vApplicationTickHook(); } - #endif } + #endif return xSwitchRequired; } @@ -3209,11 +3330,11 @@ BaseType_t xTaskIncrementTick( void ) /* Save the hook function in the TCB. A critical section is required as * the value can be accessed from an interrupt. */ - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { xTCB->pxTaskTag = pxHookFunction; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); } #endif /* configUSE_APPLICATION_TASK_TAG */ @@ -3231,11 +3352,11 @@ BaseType_t xTaskIncrementTick( void ) /* Save the hook function in the TCB. A critical section is required as * the value can be accessed from an interrupt. */ - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { xReturn = pxTCB->pxTaskTag; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); return xReturn; } @@ -3256,11 +3377,11 @@ BaseType_t xTaskIncrementTick( void ) /* Save the hook function in the TCB. A critical section is required as * the value can be accessed from an interrupt. */ - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + prvENTER_CRITICAL_OR_MASK_ISR( &xKernelLock, uxSavedInterruptStatus ); { xReturn = pxTCB->pxTaskTag; } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + prvEXIT_CRITICAL_OR_UNMASK_ISR( &xKernelLock, uxSavedInterruptStatus ); return xReturn; } @@ -3394,78 +3515,88 @@ get_next_task: void vTaskSwitchContext( void ) { - /* Get current core ID as we can no longer be preempted. */ - const BaseType_t xCurCoreID = portGET_CORE_ID(); - - if( uxSchedulerSuspended[ xCurCoreID ] != ( UBaseType_t ) pdFALSE ) + /* For SMP, we need to take the kernel lock here as we are about to access + * kernel data structures (unlike single core which calls this function with + * either interrupts disabled or when the scheduler hasn't started yet). */ + taskENTER_CRITICAL_SAFE_SMP_ONLY( &xKernelLock ); { - /* The scheduler is currently suspended - do not allow a context - * switch. */ - xYieldPending[ xCurCoreID ] = pdTRUE; - } - else - { - xYieldPending[ xCurCoreID ] = pdFALSE; - traceTASK_SWITCHED_OUT(); + /* Get current core ID as we can no longer be preempted. */ + const BaseType_t xCurCoreID = portGET_CORE_ID(); - #if ( configGENERATE_RUN_TIME_STATS == 1 ) + if( uxSchedulerSuspended[ xCurCoreID ] != ( UBaseType_t ) pdFALSE ) { - #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE - portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime ); - #else - ulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE(); + /* The scheduler is currently suspended - do not allow a context + * switch. */ + xYieldPending[ xCurCoreID ] = pdTRUE; + } + else + { + xYieldPending[ xCurCoreID ] = pdFALSE; + traceTASK_SWITCHED_OUT(); + + #if ( configGENERATE_RUN_TIME_STATS == 1 ) + { + #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE + portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime ); + #else + ulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE(); + #endif + + /* Add the amount of time the task has been running to the + * accumulated time so far. The time the task started running was + * stored in ulTaskSwitchedInTime. Note that there is no overflow + * protection here so count values are only valid until the timer + * overflows. The guard against negative values is to protect + * against suspect run time stat counter implementations - which + * are provided by the application, not the kernel. */ + if( ulTotalRunTime > ulTaskSwitchedInTime[ xCurCoreID ] ) + { + pxCurrentTCBs[ xCurCoreID ]->ulRunTimeCounter += ( ulTotalRunTime - ulTaskSwitchedInTime[ xCurCoreID ] ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + ulTaskSwitchedInTime[ xCurCoreID ] = ulTotalRunTime; + } + #endif /* configGENERATE_RUN_TIME_STATS */ + + /* Check for stack overflow, if configured. */ + taskCHECK_FOR_STACK_OVERFLOW( xCurCoreID ); + + /* Before the currently running task is switched out, save its errno. */ + #if ( configUSE_POSIX_ERRNO == 1 ) + { + pxCurrentTCBs[ xCurCoreID ]->iTaskErrno = FreeRTOS_errno; + } #endif - /* Add the amount of time the task has been running to the - * accumulated time so far. The time the task started running was - * stored in ulTaskSwitchedInTime. Note that there is no overflow - * protection here so count values are only valid until the timer - * overflows. The guard against negative values is to protect - * against suspect run time stat counter implementations - which - * are provided by the application, not the kernel. */ - if( ulTotalRunTime > ulTaskSwitchedInTime[ xCurCoreID ] ) + /* Select a new task to run using either the generic C or port + * optimised asm code. */ + taskSELECT_HIGHEST_PRIORITY_TASK(); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + traceTASK_SWITCHED_IN(); + + /* After the new task is switched in, update the global errno. */ + #if ( configUSE_POSIX_ERRNO == 1 ) { - pxCurrentTCBs[ xCurCoreID ]->ulRunTimeCounter += ( ulTotalRunTime - ulTaskSwitchedInTime[ xCurCoreID ] ); + FreeRTOS_errno = pxCurrentTCBs[ xCurCoreID ]->iTaskErrno; } - else + #endif + + #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) ) { - mtCOVERAGE_TEST_MARKER(); + /* Switch C-Runtime's TLS Block to point to the TLS + * Block specific to this task. */ + configSET_TLS_BLOCK( pxCurrentTCBs[ xCurCoreID ]->xTLSBlock ); } - - ulTaskSwitchedInTime[ xCurCoreID ] = ulTotalRunTime; + #endif } - #endif /* configGENERATE_RUN_TIME_STATS */ - - /* Check for stack overflow, if configured. */ - taskCHECK_FOR_STACK_OVERFLOW( xCurCoreID ); - - /* Before the currently running task is switched out, save its errno. */ - #if ( configUSE_POSIX_ERRNO == 1 ) - { - pxCurrentTCBs[ xCurCoreID ]->iTaskErrno = FreeRTOS_errno; - } - #endif - - /* Select a new task to run using either the generic C or port - * optimised asm code. */ - taskSELECT_HIGHEST_PRIORITY_TASK(); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ - traceTASK_SWITCHED_IN(); - - /* After the new task is switched in, update the global errno. */ - #if ( configUSE_POSIX_ERRNO == 1 ) - { - FreeRTOS_errno = pxCurrentTCBs[ xCurCoreID ]->iTaskErrno; - } - #endif - - #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) ) - { - /* Switch C-Runtime's TLS Block to point to the TLS - * Block specific to this task. */ - configSET_TLS_BLOCK( pxCurrentTCBs[ xCurCoreID ]->xTLSBlock ); - } - #endif } + + /* Release the previously taken kernel lock as we have finished accessing + * the kernel data structures. */ + taskEXIT_CRITICAL_SAFE_SMP_ONLY( &xKernelLock ); } /*-----------------------------------------------------------*/ @@ -3474,23 +3605,31 @@ void vTaskPlaceOnEventList( List_t * const pxEventList, { configASSERT( pxEventList ); - /* THIS FUNCTION MUST BE CALLED WITH EITHER INTERRUPTS DISABLED OR THE - * SCHEDULER SUSPENDED AND THE QUEUE BEING ACCESSED LOCKED. */ + /* IN SINGLE-CORE THIS FUNCTION MUST BE CALLED WITH EITHER INTERRUPTS DISABLED + * OR THE SCHEDULER SUSPENDED AND THE QUEUE BEING ACCESSED LOCKED. IN SMP + * THIS FUNCTION MUST BE CALLED WITH THE QUEUE'S xQueueLock TAKEN. */ - /* Place the event list item of the TCB in the appropriate event list. - * This is placed in the list in priority order so the highest priority task - * is the first to be woken by the event. - * - * Note: Lists are sorted in ascending order by ListItem_t.xItemValue. - * Normally, the xItemValue of a TCB's ListItem_t members is: - * xItemValue = ( configMAX_PRIORITIES - uxPriority ) - * Therefore, the event list is sorted in descending priority order. - * - * The queue that contains the event list is locked, preventing - * simultaneous access from interrupts. */ - vListInsert( pxEventList, &( pxCurrentTCBs[ portGET_CORE_ID() ]->xEventListItem ) ); + /* For SMP, we need to take the kernel lock here as we are about to access + * kernel data structures. */ + taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); + { + /* Place the event list item of the TCB in the appropriate event list. + * This is placed in the list in priority order so the highest priority task + * is the first to be woken by the event. + * + * Note: Lists are sorted in ascending order by ListItem_t.xItemValue. + * Normally, the xItemValue of a TCB's ListItem_t members is: + * xItemValue = ( configMAX_PRIORITIES - uxPriority ) + * Therefore, the event list is sorted in descending priority order. + * + * The queue that contains the event list is locked, preventing + * simultaneous access from interrupts. */ + vListInsert( pxEventList, &( pxCurrentTCBs[ portGET_CORE_ID() ]->xEventListItem ) ); - prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); + prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); + } + /* Release the previously taken kernel lock. */ + taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); } /*-----------------------------------------------------------*/ @@ -3503,23 +3642,39 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, configASSERT( pxEventList ); - /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by - * the event groups implementation. */ - configASSERT( uxSchedulerSuspended[ xCurCoreID ] != 0 ); + #if ( configNUMBER_OF_CORES > 1 ) + { + /* IN SMP, THIS FUNCTION MUST BE CALLED WITH THE EVENT GROUP'S + * xEventGroupLock ALREADY TAKEN. */ + } + #else /* configNUMBER_OF_CORES > 1 */ + { + /* IN SINGLE-CORE, THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. + * It is used by the event groups implementation. */ + configASSERT( uxSchedulerSuspended[ xCurCoreID ] != 0 ); + } + #endif /* configNUMBER_OF_CORES > 1 */ - /* Store the item value in the event list item. It is safe to access the - * event list item here as interrupts won't access the event list item of a - * task that is not in the Blocked state. */ - listSET_LIST_ITEM_VALUE( &( pxCurrentTCBs[ xCurCoreID ]->xEventListItem ), xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE ); + /* For SMP, we need to take the kernel lock here as we are about to access + * kernel data structures. */ + taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); + { + /* Store the item value in the event list item. It is safe to access the + * event list item here as interrupts won't access the event list item of a + * task that is not in the Blocked state. */ + listSET_LIST_ITEM_VALUE( &( pxCurrentTCBs[ xCurCoreID ]->xEventListItem ), xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE ); - /* Place the event list item of the TCB at the end of the appropriate event - * list. It is safe to access the event list here because it is part of an - * event group implementation - and interrupts don't access event groups - * directly (instead they access them indirectly by pending function calls to - * the task level). */ - listINSERT_END( pxEventList, &( pxCurrentTCBs[ xCurCoreID ]->xEventListItem ) ); + /* Place the event list item of the TCB at the end of the appropriate event + * list. It is safe to access the event list here because it is part of an + * event group implementation - and interrupts don't access event groups + * directly (instead they access them indirectly by pending function calls to + * the task level). */ + listINSERT_END( pxEventList, &( pxCurrentTCBs[ xCurCoreID ]->xEventListItem ) ); - prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); + prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); + } + /* Release the previously taken kernel lock. */ + taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); } /*-----------------------------------------------------------*/ @@ -3534,25 +3689,32 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, /* This function should not be called by application code hence the * 'Restricted' in its name. It is not part of the public API. It is * designed for use by kernel code, and has special calling requirements - - * it should be called with the scheduler suspended. */ + * it should be called with the scheduler suspended in single-core, or + * with the queue's xQueueLock already taken in SMP. */ - - /* Place the event list item of the TCB in the appropriate event list. - * In this case it is assume that this is the only task that is going to - * be waiting on this event list, so the faster vListInsertEnd() function - * can be used in place of vListInsert. */ - listINSERT_END( pxEventList, &( pxCurrentTCBs[ portGET_CORE_ID() ]->xEventListItem ) ); - - /* If the task should block indefinitely then set the block time to a - * value that will be recognised as an indefinite delay inside the - * prvAddCurrentTaskToDelayedList() function. */ - if( xWaitIndefinitely != pdFALSE ) + /* For SMP, we need to take the kernel lock here as we are about to access + * kernel data structures. */ + taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); { - xTicksToWait = portMAX_DELAY; - } + /* Place the event list item of the TCB in the appropriate event list. + * In this case it is assume that this is the only task that is going to + * be waiting on this event list, so the faster vListInsertEnd() function + * can be used in place of vListInsert. */ + listINSERT_END( pxEventList, &( pxCurrentTCBs[ portGET_CORE_ID() ]->xEventListItem ) ); - traceTASK_DELAY_UNTIL( ( xTickCount + xTicksToWait ) ); - prvAddCurrentTaskToDelayedList( xTicksToWait, xWaitIndefinitely ); + /* If the task should block indefinitely then set the block time to a + * value that will be recognised as an indefinite delay inside the + * prvAddCurrentTaskToDelayedList() function. */ + if( xWaitIndefinitely != pdFALSE ) + { + xTicksToWait = portMAX_DELAY; + } + + traceTASK_DELAY_UNTIL( ( xTickCount + xTicksToWait ) ); + prvAddCurrentTaskToDelayedList( xTicksToWait, xWaitIndefinitely ); + } + /* Release the previously taken kernel lock. */ + taskEXIT_CRITICAL( &xKernelLock ); } #endif /* configUSE_TIMERS */ @@ -3565,76 +3727,102 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, TCB_t * pxUnblockedTCB; BaseType_t xReturn; - /* Before taking the kernel lock, another task/ISR could have already - * emptied the pxEventList. So we insert a check here to see if - * pxEventList is empty before attempting to remove an item from it. */ - if( listLIST_IS_EMPTY( pxEventList ) == pdFALSE ) + + /* For SMP, we need to take the kernel lock here as we are about to access + * kernel data structures. + * This function can also be called from an ISR context, so we + * need to check whether we are in an ISR.*/ + if( portCHECK_IF_IN_ISR() == pdFALSE ) { - /* Get current core ID as we can no longer be preempted. */ - const BaseType_t xCurCoreID = portGET_CORE_ID(); - - /* Remove the task from its current event list */ - pxUnblockedTCB = listGET_OWNER_OF_HEAD_ENTRY( pxEventList ); - configASSERT( pxUnblockedTCB ); - listREMOVE_ITEM( &( pxUnblockedTCB->xEventListItem ) ); - - /* Add the task to the ready list if a core with compatible affinity - * has NOT suspended its scheduler. This occurs when: - * - The task is pinned, and the pinned core's scheduler is running - * - The task is unpinned, and at least one of the core's scheduler is running */ - if( taskCAN_BE_SCHEDULED( pxUnblockedTCB ) == pdTRUE ) - { - listREMOVE_ITEM( &( pxUnblockedTCB->xStateListItem ) ); - prvAddTaskToReadyList( pxUnblockedTCB ); - - #if ( configUSE_TICKLESS_IDLE != 0 ) - { - /* If a task is blocked on a kernel object then xNextTaskUnblockTime - * might be set to the blocked task's time out time. If the task is - * unblocked for a reason other than a timeout xNextTaskUnblockTime is - * normally left unchanged, because it is automatically reset to a new - * value when the tick count equals xNextTaskUnblockTime. However if - * tickless idling is used it might be more important to enter sleep mode - * at the earliest possible time - so reset xNextTaskUnblockTime here to - * ensure it is updated at the earliest possible time. */ - prvResetNextTaskUnblockTime(); - } - #endif - } - else - { - /* We arrive here due to one of the following possibilities: - * - The task is pinned to core X and core X has suspended its scheduler - * - The task is unpinned and both cores have suspend their schedulers - * Therefore, we add the task to one of the pending lists: - * - If the task is pinned to core X, add it to core X's pending list - * - If the task is unpinned, add it to the current core's pending list */ - UBaseType_t uxPendCore = ( ( pxUnblockedTCB->xCoreID == tskNO_AFFINITY ) ? xCurCoreID : pxUnblockedTCB->xCoreID ); - configASSERT( uxSchedulerSuspended[ uxPendCore ] != ( UBaseType_t ) 0U ); - - /* Add the task to the current core's pending list */ - listINSERT_END( &( xPendingReadyList[ uxPendCore ] ), &( pxUnblockedTCB->xEventListItem ) ); - } - - if( taskIS_YIELD_REQUIRED( pxUnblockedTCB, xCurCoreID, pdFALSE ) == pdTRUE ) - { - /* The unblocked task requires a the current core to yield */ - xReturn = pdTRUE; - - /* Mark that a yield is pending in case the user is not using the - * "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */ - xYieldPending[ xCurCoreID ] = pdTRUE; - } - else - { - xReturn = pdFALSE; - } + taskENTER_CRITICAL( &xKernelLock ); } else { - /* The pxEventList was emptied before we entered the critical - * section, Nothing to do except return pdFALSE. */ - xReturn = pdFALSE; + taskENTER_CRITICAL_ISR( &xKernelLock ); + } + + { + /* Before taking the kernel lock, another task/ISR could have already + * emptied the pxEventList. So we insert a check here to see if + * pxEventList is empty before attempting to remove an item from it. */ + if( listLIST_IS_EMPTY( pxEventList ) == pdFALSE ) + { + /* Get current core ID as we can no longer be preempted. */ + const BaseType_t xCurCoreID = portGET_CORE_ID(); + + /* Remove the task from its current event list */ + pxUnblockedTCB = listGET_OWNER_OF_HEAD_ENTRY( pxEventList ); + configASSERT( pxUnblockedTCB ); + listREMOVE_ITEM( &( pxUnblockedTCB->xEventListItem ) ); + + /* Add the task to the ready list if a core with compatible affinity + * has NOT suspended its scheduler. This occurs when: + * - The task is pinned, and the pinned core's scheduler is running + * - The task is unpinned, and at least one of the core's scheduler is running */ + if( taskCAN_BE_SCHEDULED( pxUnblockedTCB ) == pdTRUE ) + { + listREMOVE_ITEM( &( pxUnblockedTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxUnblockedTCB ); + + #if ( configUSE_TICKLESS_IDLE != 0 ) + { + /* If a task is blocked on a kernel object then xNextTaskUnblockTime + * might be set to the blocked task's time out time. If the task is + * unblocked for a reason other than a timeout xNextTaskUnblockTime is + * normally left unchanged, because it is automatically reset to a new + * value when the tick count equals xNextTaskUnblockTime. However if + * tickless idling is used it might be more important to enter sleep mode + * at the earliest possible time - so reset xNextTaskUnblockTime here to + * ensure it is updated at the earliest possible time. */ + prvResetNextTaskUnblockTime(); + } + #endif + } + else + { + /* We arrive here due to one of the following possibilities: + * - The task is pinned to core X and core X has suspended its scheduler + * - The task is unpinned and both cores have suspend their schedulers + * Therefore, we add the task to one of the pending lists: + * - If the task is pinned to core X, add it to core X's pending list + * - If the task is unpinned, add it to the current core's pending list */ + UBaseType_t uxPendCore = ( ( pxUnblockedTCB->xCoreID == tskNO_AFFINITY ) ? xCurCoreID : pxUnblockedTCB->xCoreID ); + configASSERT( uxSchedulerSuspended[ uxPendCore ] != ( UBaseType_t ) 0U ); + + /* Add the task to the current core's pending list */ + listINSERT_END( &( xPendingReadyList[ uxPendCore ] ), &( pxUnblockedTCB->xEventListItem ) ); + } + + if( taskIS_YIELD_REQUIRED( pxUnblockedTCB, xCurCoreID, pdFALSE ) == pdTRUE ) + { + /* The unblocked task requires a the current core to yield */ + xReturn = pdTRUE; + + /* Mark that a yield is pending in case the user is not using the + * "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */ + xYieldPending[ xCurCoreID ] = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + } + else + { + /* The pxEventList was emptied before we entered the critical + * section, Nothing to do except return pdFALSE. */ + xReturn = pdFALSE; + } + } + + /* Release the previously taken kernel lock. */ + if( portCHECK_IF_IN_ISR() == pdFALSE ) + { + taskEXIT_CRITICAL( &xKernelLock ); + } + else + { + taskEXIT_CRITICAL_ISR( &xKernelLock ); } return xReturn; @@ -3805,12 +3993,12 @@ void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) { configASSERT( pxTimeOut ); - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { pxTimeOut->xOverflowCount = xNumOfOverflows; pxTimeOut->xTimeOnEntering = xTickCount; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); } /*-----------------------------------------------------------*/ @@ -3830,7 +4018,7 @@ BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, configASSERT( pxTimeOut ); configASSERT( pxTicksToWait ); - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { /* Minor optimisation. The tick count cannot change in this block. */ const TickType_t xConstTickCount = xTickCount; @@ -3883,7 +4071,7 @@ BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, xReturn = pdTRUE; } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); return xReturn; } @@ -4026,7 +4214,7 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP ) { - vTaskSuspendAll(); + prvENTER_CRITICAL_OR_SUSPEND_ALL( &xKernelLock ); { /* Now the scheduler is suspended, the expected idle * time can be sampled again, and this time its value can @@ -4050,7 +4238,7 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) mtCOVERAGE_TEST_MARKER(); } } - ( void ) xTaskResumeAll(); + ( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &xKernelLock ); } else { @@ -4232,7 +4420,7 @@ static void prvCheckTasksWaitingTermination( void ) #if ( configNUMBER_OF_CORES > 1 ) { pxTCB = NULL; - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { /* List may have already been cleared by the other core. Check again */ if( listLIST_IS_EMPTY( &xTasksWaitingTermination ) == pdFALSE ) @@ -4256,7 +4444,7 @@ static void prvCheckTasksWaitingTermination( void ) } } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); if( pxTCB != NULL ) { @@ -4270,14 +4458,14 @@ static void prvCheckTasksWaitingTermination( void ) } #else /* configNUMBER_OF_CORES > 1 */ { - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xTasksWaitingTermination ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); --uxCurrentNumberOfTasks; --uxDeletedTasksWaitingCleanUp; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); prvDeleteTCB( pxTCB ); } #endif /* configNUMBER_OF_CORES > 1 */ @@ -4299,94 +4487,111 @@ static void prvCheckTasksWaitingTermination( void ) /* xTask is NULL then get the state of the calling task. */ pxTCB = prvGetTCBFromHandle( xTask ); - pxTaskStatus->xHandle = ( TaskHandle_t ) pxTCB; - pxTaskStatus->pcTaskName = ( const char * ) &( pxTCB->pcTaskName[ 0 ] ); - pxTaskStatus->uxCurrentPriority = pxTCB->uxPriority; - pxTaskStatus->pxStackBase = pxTCB->pxStack; - #if ( ( portSTACK_GROWTH > 0 ) && ( configRECORD_STACK_HIGH_ADDRESS == 1 ) ) - pxTaskStatus->pxTopOfStack = pxTCB->pxTopOfStack; - pxTaskStatus->pxEndOfStack = pxTCB->pxEndOfStack; - #endif - pxTaskStatus->xTaskNumber = pxTCB->uxTCBNumber; - /* Todo: Remove xCoreID for single core builds (IDF-7894) */ - pxTaskStatus->xCoreID = pxTCB->xCoreID; + /* A critical section is required for SMP in case another core modifies + * the task simultaneously. */ + taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); + { + pxTaskStatus->xHandle = ( TaskHandle_t ) pxTCB; + pxTaskStatus->pcTaskName = ( const char * ) &( pxTCB->pcTaskName[ 0 ] ); + pxTaskStatus->uxCurrentPriority = pxTCB->uxPriority; + pxTaskStatus->pxStackBase = pxTCB->pxStack; + #if ( ( portSTACK_GROWTH > 0 ) && ( configRECORD_STACK_HIGH_ADDRESS == 1 ) ) + pxTaskStatus->pxTopOfStack = pxTCB->pxTopOfStack; + pxTaskStatus->pxEndOfStack = pxTCB->pxEndOfStack; + #endif + pxTaskStatus->xTaskNumber = pxTCB->uxTCBNumber; + /* Todo: Remove xCoreID for single core builds (IDF-7894) */ + pxTaskStatus->xCoreID = pxTCB->xCoreID; - #if ( configUSE_MUTEXES == 1 ) - { - pxTaskStatus->uxBasePriority = pxTCB->uxBasePriority; - } - #else - { - pxTaskStatus->uxBasePriority = 0; - } - #endif - - #if ( configGENERATE_RUN_TIME_STATS == 1 ) - { - pxTaskStatus->ulRunTimeCounter = pxTCB->ulRunTimeCounter; - } - #else - { - pxTaskStatus->ulRunTimeCounter = ( configRUN_TIME_COUNTER_TYPE ) 0; - } - #endif - - /* Obtaining the task state is a little fiddly, so is only done if the - * value of eState passed into this function is eInvalid - otherwise the - * state is just set to whatever is passed in. */ - if( eState != eInvalid ) - { - if( pxTCB == pxCurrentTCBs[ portGET_CORE_ID() ] ) + #if ( configUSE_MUTEXES == 1 ) { - pxTaskStatus->eCurrentState = eRunning; - } - else - { - pxTaskStatus->eCurrentState = eState; - - #if ( INCLUDE_vTaskSuspend == 1 ) - { - /* If the task is in the suspended list then there is a - * chance it is actually just blocked indefinitely - so really - * it should be reported as being in the Blocked state. */ - if( eState == eSuspended ) - { - vTaskSuspendAll(); - { - if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) - { - pxTaskStatus->eCurrentState = eBlocked; - } - } - ( void ) xTaskResumeAll(); - } - } - #endif /* INCLUDE_vTaskSuspend */ - } - } - else - { - pxTaskStatus->eCurrentState = eTaskGetState( pxTCB ); - } - - /* Obtaining the stack space takes some time, so the xGetFreeStackSpace - * parameter is provided to allow it to be skipped. */ - if( xGetFreeStackSpace != pdFALSE ) - { - #if ( portSTACK_GROWTH > 0 ) - { - pxTaskStatus->usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxTCB->pxEndOfStack ); + pxTaskStatus->uxBasePriority = pxTCB->uxBasePriority; } #else { - pxTaskStatus->usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxTCB->pxStack ); + pxTaskStatus->uxBasePriority = 0; } #endif + + #if ( configGENERATE_RUN_TIME_STATS == 1 ) + { + pxTaskStatus->ulRunTimeCounter = pxTCB->ulRunTimeCounter; + } + #else + { + pxTaskStatus->ulRunTimeCounter = ( configRUN_TIME_COUNTER_TYPE ) 0; + } + #endif + + /* Obtaining the task state is a little fiddly, so is only done if the + * value of eState passed into this function is eInvalid - otherwise the + * state is just set to whatever is passed in. */ + if( eState != eInvalid ) + { + if( pxTCB == pxCurrentTCBs[ portGET_CORE_ID() ] ) + { + pxTaskStatus->eCurrentState = eRunning; + } + else + { + pxTaskStatus->eCurrentState = eState; + + #if ( INCLUDE_vTaskSuspend == 1 ) + { + /* If the task is in the suspended list then there is a + * chance it is actually just blocked indefinitely - so really + * it should be reported as being in the Blocked state. */ + if( eState == eSuspended ) + { + #if ( configNUMBER_OF_CORES == 1 ) + { + /* Single core uses a scheduler suspension to + * atomically check if the task task is blocked. */ + vTaskSuspendAll(); + } + #endif /* configNUMBER_OF_CORES == 1 */ + { + if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) + { + pxTaskStatus->eCurrentState = eBlocked; + } + } + #if ( configNUMBER_OF_CORES == 1 ) + { + ( void ) xTaskResumeAll(); + } + #endif /* configNUMBER_OF_CORES == 1 */ + } + } + #endif /* INCLUDE_vTaskSuspend */ + } + } + else + { + pxTaskStatus->eCurrentState = eTaskGetState( pxTCB ); + } + + /* Obtaining the stack space takes some time, so the xGetFreeStackSpace + * parameter is provided to allow it to be skipped. */ + if( xGetFreeStackSpace != pdFALSE ) + { + #if ( portSTACK_GROWTH > 0 ) + { + pxTaskStatus->usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxTCB->pxEndOfStack ); + } + #else + { + pxTaskStatus->usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxTCB->pxStack ); + } + #endif + } + else + { + pxTaskStatus->usStackHighWaterMark = 0; + } } - else - { - pxTaskStatus->usStackHighWaterMark = 0; - } + /* Exit the previously entered critical section. */ + taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); } #endif /* configUSE_TRACE_FACILITY */ @@ -4609,21 +4814,28 @@ static void prvResetNextTaskUnblockTime( void ) { BaseType_t xReturn; - if( xSchedulerRunning == pdFALSE ) + /* For SMP, we need to take the kernel lock here as we are about to + * access kernel data structures. */ + taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); { - xReturn = taskSCHEDULER_NOT_STARTED; - } - else - { - if( uxSchedulerSuspended[ portGET_CORE_ID() ] == ( UBaseType_t ) pdFALSE ) + if( xSchedulerRunning == pdFALSE ) { - xReturn = taskSCHEDULER_RUNNING; + xReturn = taskSCHEDULER_NOT_STARTED; } else { - xReturn = taskSCHEDULER_SUSPENDED; + if( uxSchedulerSuspended[ portGET_CORE_ID() ] == ( UBaseType_t ) pdFALSE ) + { + xReturn = taskSCHEDULER_RUNNING; + } + else + { + xReturn = taskSCHEDULER_SUSPENDED; + } } } + /* Release the previously taken kernel lock. */ + taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); return xReturn; } @@ -4638,85 +4850,92 @@ static void prvResetNextTaskUnblockTime( void ) TCB_t * const pxMutexHolderTCB = pxMutexHolder; BaseType_t xReturn = pdFALSE; - /* Get current core ID as we can no longer be preempted. */ - const BaseType_t xCurCoreID = portGET_CORE_ID(); - - /* If the mutex was given back by an interrupt while the queue was - * locked then the mutex holder might now be NULL. _RB_ Is this still - * needed as interrupts can no longer use mutexes? */ - if( pxMutexHolder != NULL ) + /* For SMP, we need to take the kernel lock here as we are about to + * access kernel data structures. */ + taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); { - /* If the holder of the mutex has a priority below the priority of - * the task attempting to obtain the mutex then it will temporarily - * inherit the priority of the task attempting to obtain the mutex. */ - if( pxMutexHolderTCB->uxPriority < pxCurrentTCBs[ xCurCoreID ]->uxPriority ) - { - /* Adjust the mutex holder state to account for its new - * priority. Only reset the event list item value if the value is - * not being used for anything else. */ - if( ( listGET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL ) - { - listSET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCBs[ xCurCoreID ]->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + /* Get current core ID as we can no longer be preempted. */ + const BaseType_t xCurCoreID = portGET_CORE_ID(); - /* If the task being modified is in the ready state it will need - * to be moved into a new list. */ - if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxMutexHolderTCB->uxPriority ] ), &( pxMutexHolderTCB->xStateListItem ) ) != pdFALSE ) + /* If the mutex was given back by an interrupt while the queue was + * locked then the mutex holder might now be NULL. _RB_ Is this still + * needed as interrupts can no longer use mutexes? */ + if( pxMutexHolder != NULL ) + { + /* If the holder of the mutex has a priority below the priority of + * the task attempting to obtain the mutex then it will temporarily + * inherit the priority of the task attempting to obtain the mutex. */ + if( pxMutexHolderTCB->uxPriority < pxCurrentTCBs[ xCurCoreID ]->uxPriority ) { - if( uxListRemove( &( pxMutexHolderTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + /* Adjust the mutex holder state to account for its new + * priority. Only reset the event list item value if the value is + * not being used for anything else. */ + if( ( listGET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL ) { - /* It is known that the task is in its ready list so - * there is no need to check again and the port level - * reset macro can be called directly. */ - portRESET_READY_PRIORITY( pxMutexHolderTCB->uxPriority, uxTopReadyPriority ); + listSET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCBs[ xCurCoreID ]->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ } else { mtCOVERAGE_TEST_MARKER(); } - /* Inherit the priority before being moved into the new list. */ - pxMutexHolderTCB->uxPriority = pxCurrentTCBs[ xCurCoreID ]->uxPriority; - prvAddTaskToReadyList( pxMutexHolderTCB ); - } - else - { - /* Just inherit the priority. */ - pxMutexHolderTCB->uxPriority = pxCurrentTCBs[ xCurCoreID ]->uxPriority; - } + /* If the task being modified is in the ready state it will need + * to be moved into a new list. */ + if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxMutexHolderTCB->uxPriority ] ), &( pxMutexHolderTCB->xStateListItem ) ) != pdFALSE ) + { + if( uxListRemove( &( pxMutexHolderTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + { + /* It is known that the task is in its ready list so + * there is no need to check again and the port level + * reset macro can be called directly. */ + portRESET_READY_PRIORITY( pxMutexHolderTCB->uxPriority, uxTopReadyPriority ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } - traceTASK_PRIORITY_INHERIT( pxMutexHolderTCB, pxCurrentTCBs[ xCurCoreID ]->uxPriority ); + /* Inherit the priority before being moved into the new list. */ + pxMutexHolderTCB->uxPriority = pxCurrentTCBs[ xCurCoreID ]->uxPriority; + prvAddTaskToReadyList( pxMutexHolderTCB ); + } + else + { + /* Just inherit the priority. */ + pxMutexHolderTCB->uxPriority = pxCurrentTCBs[ xCurCoreID ]->uxPriority; + } - /* Inheritance occurred. */ - xReturn = pdTRUE; - } - else - { - if( pxMutexHolderTCB->uxBasePriority < pxCurrentTCBs[ xCurCoreID ]->uxPriority ) - { - /* The base priority of the mutex holder is lower than the - * priority of the task attempting to take the mutex, but the - * current priority of the mutex holder is not lower than the - * priority of the task attempting to take the mutex. - * Therefore the mutex holder must have already inherited a - * priority, but inheritance would have occurred if that had - * not been the case. */ + traceTASK_PRIORITY_INHERIT( pxMutexHolderTCB, pxCurrentTCBs[ xCurCoreID ]->uxPriority ); + + /* Inheritance occurred. */ xReturn = pdTRUE; } else { - mtCOVERAGE_TEST_MARKER(); + if( pxMutexHolderTCB->uxBasePriority < pxCurrentTCBs[ xCurCoreID ]->uxPriority ) + { + /* The base priority of the mutex holder is lower than the + * priority of the task attempting to take the mutex, but the + * current priority of the mutex holder is not lower than the + * priority of the task attempting to take the mutex. + * Therefore the mutex holder must have already inherited a + * priority, but inheritance would have occurred if that had + * not been the case. */ + xReturn = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } } + else + { + mtCOVERAGE_TEST_MARKER(); + } } - else - { - mtCOVERAGE_TEST_MARKER(); - } + /* Release the previously taken kernel lock. */ + taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); return xReturn; } @@ -4731,57 +4950,66 @@ static void prvResetNextTaskUnblockTime( void ) TCB_t * const pxTCB = pxMutexHolder; BaseType_t xReturn = pdFALSE; - if( pxMutexHolder != NULL ) + /* For SMP, we need to take the kernel lock here as we are about to + * access kernel data structures. */ + taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); { - /* A task can only have an inherited priority if it holds the mutex. - * If the mutex is held by a task then it cannot be given from an - * interrupt, and if a mutex is given by the holding task then it must - * be the running state task. */ - configASSERT( pxTCB == pxCurrentTCBs[ portGET_CORE_ID() ] ); - configASSERT( pxTCB->uxMutexesHeld ); - ( pxTCB->uxMutexesHeld )--; - - /* Has the holder of the mutex inherited the priority of another - * task? */ - if( pxTCB->uxPriority != pxTCB->uxBasePriority ) + if( pxMutexHolder != NULL ) { - /* Only disinherit if no other mutexes are held. */ - if( pxTCB->uxMutexesHeld == ( UBaseType_t ) 0 ) + /* A task can only have an inherited priority if it holds the mutex. + * If the mutex is held by a task then it cannot be given from an + * interrupt, and if a mutex is given by the holding task then it must + * be the running state task. */ + configASSERT( pxTCB == pxCurrentTCBs[ portGET_CORE_ID() ] ); + configASSERT( pxTCB->uxMutexesHeld ); + ( pxTCB->uxMutexesHeld )--; + + /* Has the holder of the mutex inherited the priority of another + * task? */ + if( pxTCB->uxPriority != pxTCB->uxBasePriority ) { - /* A task can only have an inherited priority if it holds - * the mutex. If the mutex is held by a task then it cannot be - * given from an interrupt, and if a mutex is given by the - * holding task then it must be the running state task. Remove - * the holding task from the ready list. */ - if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + /* Only disinherit if no other mutexes are held. */ + if( pxTCB->uxMutexesHeld == ( UBaseType_t ) 0 ) { - portRESET_READY_PRIORITY( pxTCB->uxPriority, uxTopReadyPriority ); + /* A task can only have an inherited priority if it holds + * the mutex. If the mutex is held by a task then it cannot be + * given from an interrupt, and if a mutex is given by the + * holding task then it must be the running state task. Remove + * the holding task from the ready list. */ + if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + { + portRESET_READY_PRIORITY( pxTCB->uxPriority, uxTopReadyPriority ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Disinherit the priority before adding the task into the + * new ready list. */ + traceTASK_PRIORITY_DISINHERIT( pxTCB, pxTCB->uxBasePriority ); + pxTCB->uxPriority = pxTCB->uxBasePriority; + + /* Reset the event list item value. It cannot be in use for + * any other purpose if this task is running, and it must be + * running to give back the mutex. */ + listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxTCB->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + prvAddTaskToReadyList( pxTCB ); + + /* Return true to indicate that a context switch is required. + * This is only actually required in the corner case whereby + * multiple mutexes were held and the mutexes were given back + * in an order different to that in which they were taken. + * If a context switch did not occur when the first mutex was + * returned, even if a task was waiting on it, then a context + * switch should occur when the last mutex is returned whether + * a task is waiting on it or not. */ + xReturn = pdTRUE; } else { mtCOVERAGE_TEST_MARKER(); } - - /* Disinherit the priority before adding the task into the - * new ready list. */ - traceTASK_PRIORITY_DISINHERIT( pxTCB, pxTCB->uxBasePriority ); - pxTCB->uxPriority = pxTCB->uxBasePriority; - - /* Reset the event list item value. It cannot be in use for - * any other purpose if this task is running, and it must be - * running to give back the mutex. */ - listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxTCB->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ - prvAddTaskToReadyList( pxTCB ); - - /* Return true to indicate that a context switch is required. - * This is only actually required in the corner case whereby - * multiple mutexes were held and the mutexes were given back - * in an order different to that in which they were taken. - * If a context switch did not occur when the first mutex was - * returned, even if a task was waiting on it, then a context - * switch should occur when the last mutex is returned whether - * a task is waiting on it or not. */ - xReturn = pdTRUE; } else { @@ -4793,10 +5021,8 @@ static void prvResetNextTaskUnblockTime( void ) mtCOVERAGE_TEST_MARKER(); } } - else - { - mtCOVERAGE_TEST_MARKER(); - } + /* Release the previously taken kernel lock. */ + taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); return xReturn; } @@ -4813,78 +5039,87 @@ static void prvResetNextTaskUnblockTime( void ) UBaseType_t uxPriorityUsedOnEntry, uxPriorityToUse; const UBaseType_t uxOnlyOneMutexHeld = ( UBaseType_t ) 1; - if( pxMutexHolder != NULL ) + /* For SMP, we need to take the kernel lock here as we are about to + * access kernel data structures. */ + taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); { - /* If pxMutexHolder is not NULL then the holder must hold at least - * one mutex. */ - configASSERT( pxTCB->uxMutexesHeld ); + if( pxMutexHolder != NULL ) + { + /* If pxMutexHolder is not NULL then the holder must hold at least + * one mutex. */ + configASSERT( pxTCB->uxMutexesHeld ); - /* Determine the priority to which the priority of the task that - * holds the mutex should be set. This will be the greater of the - * holding task's base priority and the priority of the highest - * priority task that is waiting to obtain the mutex. */ - if( pxTCB->uxBasePriority < uxHighestPriorityWaitingTask ) - { - uxPriorityToUse = uxHighestPriorityWaitingTask; - } - else - { - uxPriorityToUse = pxTCB->uxBasePriority; - } - - /* Does the priority need to change? */ - if( pxTCB->uxPriority != uxPriorityToUse ) - { - /* Only disinherit if no other mutexes are held. This is a - * simplification in the priority inheritance implementation. If - * the task that holds the mutex is also holding other mutexes then - * the other mutexes may have caused the priority inheritance. */ - if( pxTCB->uxMutexesHeld == uxOnlyOneMutexHeld ) + /* Determine the priority to which the priority of the task that + * holds the mutex should be set. This will be the greater of the + * holding task's base priority and the priority of the highest + * priority task that is waiting to obtain the mutex. */ + if( pxTCB->uxBasePriority < uxHighestPriorityWaitingTask ) { - /* If a task has timed out because it already holds the - * mutex it was trying to obtain then it cannot of inherited - * its own priority. */ - configASSERT( pxTCB != pxCurrentTCBs[ portGET_CORE_ID() ] ); + uxPriorityToUse = uxHighestPriorityWaitingTask; + } + else + { + uxPriorityToUse = pxTCB->uxBasePriority; + } - /* Disinherit the priority, remembering the previous - * priority to facilitate determining the subject task's - * state. */ - traceTASK_PRIORITY_DISINHERIT( pxTCB, uxPriorityToUse ); - uxPriorityUsedOnEntry = pxTCB->uxPriority; - pxTCB->uxPriority = uxPriorityToUse; + /* Does the priority need to change? */ + if( pxTCB->uxPriority != uxPriorityToUse ) + { + /* Only disinherit if no other mutexes are held. This is a + * simplification in the priority inheritance implementation. If + * the task that holds the mutex is also holding other mutexes then + * the other mutexes may have caused the priority inheritance. */ + if( pxTCB->uxMutexesHeld == uxOnlyOneMutexHeld ) + { + /* If a task has timed out because it already holds the + * mutex it was trying to obtain then it cannot of inherited + * its own priority. */ + configASSERT( pxTCB != pxCurrentTCBs[ portGET_CORE_ID() ] ); - /* Only reset the event list item value if the value is not - * being used for anything else. */ - if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL ) - { - listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriorityToUse ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + /* Disinherit the priority, remembering the previous + * priority to facilitate determining the subject task's + * state. */ + traceTASK_PRIORITY_DISINHERIT( pxTCB, uxPriorityToUse ); + uxPriorityUsedOnEntry = pxTCB->uxPriority; + pxTCB->uxPriority = uxPriorityToUse; - /* If the running task is not the task that holds the mutex - * then the task that holds the mutex could be in either the - * Ready, Blocked or Suspended states. Only remove the task - * from its current state list if it is in the Ready state as - * the task's priority is going to change and there is one - * Ready list per priority. */ - if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE ) - { - if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + /* Only reset the event list item value if the value is not + * being used for anything else. */ + if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL ) { - /* It is known that the task is in its ready list so - * there is no need to check again and the port level - * reset macro can be called directly. */ - portRESET_READY_PRIORITY( pxTCB->uxPriority, uxTopReadyPriority ); + listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriorityToUse ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ } else { mtCOVERAGE_TEST_MARKER(); } - prvAddTaskToReadyList( pxTCB ); + /* If the running task is not the task that holds the mutex + * then the task that holds the mutex could be in either the + * Ready, Blocked or Suspended states. Only remove the task + * from its current state list if it is in the Ready state as + * the task's priority is going to change and there is one + * Ready list per priority. */ + if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE ) + { + if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + { + /* It is known that the task is in its ready list so + * there is no need to check again and the port level + * reset macro can be called directly. */ + portRESET_READY_PRIORITY( pxTCB->uxPriority, uxTopReadyPriority ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + prvAddTaskToReadyList( pxTCB ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } else { @@ -4901,10 +5136,8 @@ static void prvResetNextTaskUnblockTime( void ) mtCOVERAGE_TEST_MARKER(); } } - else - { - mtCOVERAGE_TEST_MARKER(); - } + /* Release the previously taken kernel lock. */ + taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); } #endif /* configUSE_MUTEXES */ @@ -5237,14 +5470,21 @@ TickType_t uxTaskResetEventItemValue( void ) { TickType_t uxReturn; - /* Get current core ID as we can no longer be preempted. */ - const BaseType_t xCurCoreID = portGET_CORE_ID(); + /* For SMP, we need to take the kernel lock here to ensure nothing else + * modifies the task's event item value simultaneously. */ + taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); + { + /* Get current core ID as we can no longer be preempted. */ + const BaseType_t xCurCoreID = portGET_CORE_ID(); - uxReturn = listGET_LIST_ITEM_VALUE( &( pxCurrentTCBs[ xCurCoreID ]->xEventListItem ) ); + uxReturn = listGET_LIST_ITEM_VALUE( &( pxCurrentTCBs[ xCurCoreID ]->xEventListItem ) ); - /* Reset the event list item to its normal value - so it can be used with - * queues and semaphores. */ - listSET_LIST_ITEM_VALUE( &( pxCurrentTCBs[ xCurCoreID ]->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCBs[ xCurCoreID ]->uxPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + /* Reset the event list item to its normal value - so it can be used with + * queues and semaphores. */ + listSET_LIST_ITEM_VALUE( &( pxCurrentTCBs[ xCurCoreID ]->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCBs[ xCurCoreID ]->uxPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + } + taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); + /* Release the previously taken kernel lock. */ return uxReturn; } @@ -5256,17 +5496,24 @@ TickType_t uxTaskResetEventItemValue( void ) { TaskHandle_t xReturn; - /* Get current core ID as we can no longer be preempted. */ - const BaseType_t xCurCoreID = portGET_CORE_ID(); - - /* If xSemaphoreCreateMutex() is called before any tasks have been created - * then pxCurrentTCBs will be NULL. */ - if( pxCurrentTCBs[ xCurCoreID ] != NULL ) + /* For SMP, we need to take the kernel lock here as we are about to + * access kernel data structures. */ + taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); { - ( pxCurrentTCBs[ xCurCoreID ]->uxMutexesHeld )++; - } + /* Get current core ID as we can no longer be preempted. */ + const BaseType_t xCurCoreID = portGET_CORE_ID(); - xReturn = pxCurrentTCBs[ xCurCoreID ]; + /* If xSemaphoreCreateMutex() is called before any tasks have been created + * then pxCurrentTCBs will be NULL. */ + if( pxCurrentTCBs[ xCurCoreID ] != NULL ) + { + ( pxCurrentTCBs[ xCurCoreID ]->uxMutexesHeld )++; + } + + xReturn = pxCurrentTCBs[ xCurCoreID ]; + } + /* Release the previously taken kernel lock. */ + taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); return xReturn; } @@ -5284,7 +5531,7 @@ TickType_t uxTaskResetEventItemValue( void ) configASSERT( uxIndexToWait < configTASK_NOTIFICATION_ARRAY_ENTRIES ); - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { /* Get current core ID as we can no longer be preempted. */ const BaseType_t xCurCoreID = portGET_CORE_ID(); @@ -5316,9 +5563,9 @@ TickType_t uxTaskResetEventItemValue( void ) mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { /* Get current core ID as we can no longer be preempted. */ const BaseType_t xCurCoreID = portGET_CORE_ID(); @@ -5344,7 +5591,7 @@ TickType_t uxTaskResetEventItemValue( void ) pxCurrentTCBs[ xCurCoreID ]->ucNotifyState[ uxIndexToWait ] = taskNOT_WAITING_NOTIFICATION; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); return ulReturn; } @@ -5364,7 +5611,7 @@ TickType_t uxTaskResetEventItemValue( void ) configASSERT( uxIndexToWait < configTASK_NOTIFICATION_ARRAY_ENTRIES ); - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { /* Get current core ID as we can no longer be preempted. */ const BaseType_t xCurCoreID = portGET_CORE_ID(); @@ -5401,9 +5648,9 @@ TickType_t uxTaskResetEventItemValue( void ) mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { /* Get current core ID as we can no longer be preempted. */ const BaseType_t xCurCoreID = portGET_CORE_ID(); @@ -5436,7 +5683,7 @@ TickType_t uxTaskResetEventItemValue( void ) pxCurrentTCBs[ xCurCoreID ]->ucNotifyState[ uxIndexToWait ] = taskNOT_WAITING_NOTIFICATION; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); return xReturn; } @@ -5460,7 +5707,7 @@ TickType_t uxTaskResetEventItemValue( void ) configASSERT( xTaskToNotify ); pxTCB = xTaskToNotify; - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { if( pulPreviousNotificationValue != NULL ) { @@ -5559,7 +5806,7 @@ TickType_t uxTaskResetEventItemValue( void ) mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); return xReturn; } @@ -5604,7 +5851,7 @@ TickType_t uxTaskResetEventItemValue( void ) pxTCB = xTaskToNotify; - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + prvENTER_CRITICAL_OR_MASK_ISR( &xKernelLock, uxSavedInterruptStatus ); { /* Get current core ID as we can no longer be preempted. */ const BaseType_t xCurCoreID = portGET_CORE_ID(); @@ -5701,7 +5948,7 @@ TickType_t uxTaskResetEventItemValue( void ) } } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + prvEXIT_CRITICAL_OR_UNMASK_ISR( &xKernelLock, uxSavedInterruptStatus ); return xReturn; } @@ -5742,7 +5989,7 @@ TickType_t uxTaskResetEventItemValue( void ) pxTCB = xTaskToNotify; - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + prvENTER_CRITICAL_OR_MASK_ISR( &xKernelLock, uxSavedInterruptStatus ); { /* Get current core ID as we can no longer be preempted. */ const BaseType_t xCurCoreID = portGET_CORE_ID(); @@ -5795,7 +6042,7 @@ TickType_t uxTaskResetEventItemValue( void ) } } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + prvEXIT_CRITICAL_OR_UNMASK_ISR( &xKernelLock, uxSavedInterruptStatus ); } #endif /* configUSE_TASK_NOTIFICATIONS */ @@ -5815,7 +6062,7 @@ TickType_t uxTaskResetEventItemValue( void ) * its notification state cleared. */ pxTCB = prvGetTCBFromHandle( xTask ); - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { if( pxTCB->ucNotifyState[ uxIndexToClear ] == taskNOTIFICATION_RECEIVED ) { @@ -5827,7 +6074,7 @@ TickType_t uxTaskResetEventItemValue( void ) xReturn = pdFAIL; } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); return xReturn; } @@ -5848,14 +6095,14 @@ TickType_t uxTaskResetEventItemValue( void ) * its notification state cleared. */ pxTCB = prvGetTCBFromHandle( xTask ); - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { /* Return the notification as it was before the bits were cleared, * then clear the bit mask. */ ulReturn = pxTCB->ulNotifiedValue[ uxIndexToClear ]; pxTCB->ulNotifiedValue[ uxIndexToClear ] &= ~ulBitsToClear; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); return ulReturn; } diff --git a/components/freertos/FreeRTOS-Kernel-V10.5.1/timers.c b/components/freertos/FreeRTOS-Kernel-V10.5.1/timers.c index ba3a2841f2..084c7d9b48 100644 --- a/components/freertos/FreeRTOS-Kernel-V10.5.1/timers.c +++ b/components/freertos/FreeRTOS-Kernel-V10.5.1/timers.c @@ -42,6 +42,8 @@ #include "task.h" #include "queue.h" #include "timers.h" +/* Include private IDF API additions for critical thread safety macros */ +#include "esp_private/freertos_idf_additions_priv.h" #if ( INCLUDE_xTimerPendFunctionCall == 1 ) && ( configUSE_TIMERS == 0 ) #error configUSE_TIMERS must be set to 1 to make the xTimerPendFunctionCall() function available. @@ -147,6 +149,10 @@ PRIVILEGED_DATA static QueueHandle_t xTimerQueue = NULL; PRIVILEGED_DATA static TaskHandle_t xTimerTaskHandle = NULL; +/* Spinlock required in SMP when accessing the timers. For now we use a single lock + * Todo: Each timer could possible have its own lock for increased granularity. */ + PRIVILEGED_DATA static portMUX_TYPE xTimerLock = portMUX_INITIALIZER_UNLOCKED; + /*lint -restore */ /*-----------------------------------------------------------*/ @@ -462,7 +468,7 @@ Timer_t * pxTimer = xTimer; configASSERT( xTimer ); - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xTimerLock ); { if( xAutoReload != pdFALSE ) { @@ -473,7 +479,7 @@ pxTimer->ucStatus &= ( ( uint8_t ) ~tmrSTATUS_IS_AUTORELOAD ); } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xTimerLock ); } /*-----------------------------------------------------------*/ @@ -483,7 +489,7 @@ BaseType_t xReturn; configASSERT( xTimer ); - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xTimerLock ); { if( ( pxTimer->ucStatus & tmrSTATUS_IS_AUTORELOAD ) == 0 ) { @@ -496,7 +502,7 @@ xReturn = pdTRUE; } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xTimerLock ); return xReturn; } @@ -639,7 +645,7 @@ TickType_t xTimeNow; BaseType_t xTimerListsWereSwitched; - vTaskSuspendAll(); + prvENTER_CRITICAL_OR_SUSPEND_ALL( &xTimerLock ); { /* Obtain the time now to make an assessment as to whether the timer * has expired or not. If obtaining the time causes the lists to switch @@ -653,7 +659,7 @@ /* The tick count has not overflowed, has the timer expired? */ if( ( xListWasEmpty == pdFALSE ) && ( xNextExpireTime <= xTimeNow ) ) { - ( void ) xTaskResumeAll(); + ( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &xTimerLock ); prvProcessExpiredTimer( xNextExpireTime, xTimeNow ); } else @@ -673,7 +679,7 @@ vQueueWaitForMessageRestricted( xTimerQueue, ( xNextExpireTime - xTimeNow ), xListWasEmpty ); - if( xTaskResumeAll() == pdFALSE ) + if( prvEXIT_CRITICAL_OR_RESUME_ALL( &xTimerLock ) == pdFALSE ) { /* Yield to wait for either a command to arrive, or the * block time to expire. If a command arrived between the @@ -689,7 +695,7 @@ } else { - ( void ) xTaskResumeAll(); + ( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &xTimerLock ); } } } @@ -967,7 +973,7 @@ /* Check that the list from which active timers are referenced, and the * queue used to communicate with the timer service, have been * initialised. */ - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xTimerLock ); { if( xTimerQueue == NULL ) { @@ -1009,7 +1015,7 @@ mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xTimerLock ); } /*-----------------------------------------------------------*/ @@ -1021,7 +1027,7 @@ configASSERT( xTimer ); /* Is the timer in the list of active timers? */ - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xTimerLock ); { if( ( pxTimer->ucStatus & tmrSTATUS_IS_ACTIVE ) == 0 ) { @@ -1032,7 +1038,7 @@ xReturn = pdTRUE; } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xTimerLock ); return xReturn; } /*lint !e818 Can't be pointer to const due to the typedef. */ @@ -1045,11 +1051,11 @@ configASSERT( xTimer ); - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xTimerLock ); { pvReturn = pxTimer->pvTimerID; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xTimerLock ); return pvReturn; } @@ -1062,11 +1068,11 @@ configASSERT( xTimer ); - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xTimerLock ); { pxTimer->pvTimerID = pvNewID; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xTimerLock ); } /*-----------------------------------------------------------*/ diff --git a/components/freertos/esp_additions/freertos_tasks_c_additions.h b/components/freertos/esp_additions/freertos_tasks_c_additions.h index e43dfbe071..c7e08d0af1 100644 --- a/components/freertos/esp_additions/freertos_tasks_c_additions.h +++ b/components/freertos/esp_additions/freertos_tasks_c_additions.h @@ -487,12 +487,12 @@ BaseType_t xTaskGetCoreID( TaskHandle_t xTask ) * access kernel data structures. For single core, a critical section is * not required as this is not called from an interrupt and the current * TCB will always be the same for any individual execution thread. */ - taskENTER_CRITICAL_SMP_ONLY(); + taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); { xReturn = pxCurrentTCBs[ xCoreID ]; } /* Release the previously taken kernel lock. */ - taskEXIT_CRITICAL_SMP_ONLY(); + taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); } #else /* CONFIG_FREERTOS_USE_KERNEL_10_5_1 */ { @@ -532,12 +532,12 @@ BaseType_t xTaskGetCoreID( TaskHandle_t xTask ) /* For SMP, we need to take the kernel lock here as we are about to * access kernel data structures. */ - taskENTER_CRITICAL_SMP_ONLY(); + taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); { ulRunTimeCounter = xIdleTaskHandle[ xCoreID ]->ulRunTimeCounter; } /* Release the previously taken kernel lock. */ - taskEXIT_CRITICAL_SMP_ONLY(); + taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); return ulRunTimeCounter; } @@ -564,12 +564,12 @@ BaseType_t xTaskGetCoreID( TaskHandle_t xTask ) { /* For SMP, we need to take the kernel lock here as we are about * to access kernel data structures. */ - taskENTER_CRITICAL_SMP_ONLY(); + taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); { ulReturn = xIdleTaskHandle[ xCoreID ]->ulRunTimeCounter / ulTotalTime; } /* Release the previously taken kernel lock. */ - taskEXIT_CRITICAL_SMP_ONLY(); + taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); } else {