forked from espressif/esp-idf
feat(freertos): Add SMP critical section changes to FreeRTOS v10.5.1
This commit adds the SMP critical section changes to the v10.5.1 kernel. These changes are temporarily documented in the `idf_changes.md` document. This commit... - Adds granular spinlocks to each data group (e.g., kernel, queues, event_groups etc.) - Updates critical section macros to use those spinlocks - Add missing critical sections required in SMP
This commit is contained in:
@@ -43,6 +43,8 @@
|
||||
#include "task.h"
|
||||
#include "timers.h"
|
||||
#include "event_groups.h"
|
||||
/* Include private IDF API additions for critical thread safety macros */
|
||||
#include "esp_private/freertos_idf_additions_priv.h"
|
||||
|
||||
/* Lint e961, e750 and e9021 are suppressed as a MISRA exception justified
|
||||
* because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined
|
||||
@@ -77,6 +79,8 @@ typedef struct EventGroupDef_t
|
||||
#if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
|
||||
uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the event group is statically allocated to ensure no attempt is made to free the memory. */
|
||||
#endif
|
||||
|
||||
portMUX_TYPE xEventGroupLock; /* Spinlock required for SMP critical sections */
|
||||
} EventGroup_t;
|
||||
|
||||
/*-----------------------------------------------------------*/
|
||||
@@ -131,6 +135,9 @@ static BaseType_t prvTestWaitCondition( const EventBits_t uxCurrentEventBits,
|
||||
}
|
||||
#endif /* configSUPPORT_DYNAMIC_ALLOCATION */
|
||||
|
||||
/* Initialize the event group's spinlock. */
|
||||
portMUX_INITIALIZE( &pxEventBits->xEventGroupLock );
|
||||
|
||||
traceEVENT_GROUP_CREATE( pxEventBits );
|
||||
}
|
||||
else
|
||||
@@ -182,6 +189,9 @@ static BaseType_t prvTestWaitCondition( const EventBits_t uxCurrentEventBits,
|
||||
}
|
||||
#endif /* configSUPPORT_STATIC_ALLOCATION */
|
||||
|
||||
/* Initialize the event group's spinlock. */
|
||||
portMUX_INITIALIZE( &pxEventBits->xEventGroupLock );
|
||||
|
||||
traceEVENT_GROUP_CREATE( pxEventBits );
|
||||
}
|
||||
else
|
||||
@@ -213,7 +223,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
|
||||
}
|
||||
#endif
|
||||
|
||||
vTaskSuspendAll();
|
||||
prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxEventBits->xEventGroupLock ) );
|
||||
{
|
||||
uxOriginalBitValue = pxEventBits->uxEventBits;
|
||||
|
||||
@@ -256,7 +266,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
|
||||
}
|
||||
}
|
||||
}
|
||||
xAlreadyYielded = xTaskResumeAll();
|
||||
xAlreadyYielded = prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxEventBits->xEventGroupLock ) );
|
||||
|
||||
if( xTicksToWait != ( TickType_t ) 0 )
|
||||
{
|
||||
@@ -278,7 +288,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
|
||||
if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 )
|
||||
{
|
||||
/* The task timed out, just return the current event bit value. */
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &( pxEventBits->xEventGroupLock ) );
|
||||
{
|
||||
uxReturn = pxEventBits->uxEventBits;
|
||||
|
||||
@@ -295,7 +305,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) );
|
||||
|
||||
xTimeoutOccurred = pdTRUE;
|
||||
}
|
||||
@@ -340,7 +350,7 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
|
||||
}
|
||||
#endif
|
||||
|
||||
vTaskSuspendAll();
|
||||
prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxEventBits->xEventGroupLock ) );
|
||||
{
|
||||
const EventBits_t uxCurrentEventBits = pxEventBits->uxEventBits;
|
||||
|
||||
@@ -408,7 +418,7 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
|
||||
traceEVENT_GROUP_WAIT_BITS_BLOCK( xEventGroup, uxBitsToWaitFor );
|
||||
}
|
||||
}
|
||||
xAlreadyYielded = xTaskResumeAll();
|
||||
xAlreadyYielded = prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxEventBits->xEventGroupLock ) );
|
||||
|
||||
if( xTicksToWait != ( TickType_t ) 0 )
|
||||
{
|
||||
@@ -429,7 +439,7 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
|
||||
|
||||
if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 )
|
||||
{
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &( pxEventBits->xEventGroupLock ) );
|
||||
{
|
||||
/* The task timed out, just return the current event bit value. */
|
||||
uxReturn = pxEventBits->uxEventBits;
|
||||
@@ -454,7 +464,7 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
|
||||
|
||||
xTimeoutOccurred = pdTRUE;
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) );
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -485,7 +495,7 @@ EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup,
|
||||
configASSERT( xEventGroup );
|
||||
configASSERT( ( uxBitsToClear & eventEVENT_BITS_CONTROL_BYTES ) == 0 );
|
||||
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &( pxEventBits->xEventGroupLock ) );
|
||||
{
|
||||
traceEVENT_GROUP_CLEAR_BITS( xEventGroup, uxBitsToClear );
|
||||
|
||||
@@ -496,7 +506,7 @@ EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup,
|
||||
/* Clear the bits. */
|
||||
pxEventBits->uxEventBits &= ~uxBitsToClear;
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) );
|
||||
|
||||
return uxReturn;
|
||||
}
|
||||
@@ -552,7 +562,14 @@ EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup,
|
||||
|
||||
pxList = &( pxEventBits->xTasksWaitingForBits );
|
||||
pxListEnd = listGET_END_MARKER( pxList ); /*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM. This is checked and valid. */
|
||||
vTaskSuspendAll();
|
||||
|
||||
prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxEventBits->xEventGroupLock ) );
|
||||
#if ( configNUMBER_OF_CORES > 1 )
|
||||
|
||||
/* We are about to traverse a task list which is a kernel data structure.
|
||||
* Thus we need to call prvTakeKernelLock() to take the kernel lock. */
|
||||
prvTakeKernelLock();
|
||||
#endif /* configNUMBER_OF_CORES > 1 */
|
||||
{
|
||||
traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet );
|
||||
|
||||
@@ -624,7 +641,12 @@ EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup,
|
||||
* bit was set in the control word. */
|
||||
pxEventBits->uxEventBits &= ~uxBitsToClear;
|
||||
}
|
||||
( void ) xTaskResumeAll();
|
||||
#if ( configNUMBER_OF_CORES > 1 )
|
||||
/* Release the previously taken kernel lock. */
|
||||
prvReleaseKernelLock();
|
||||
#endif /* configNUMBER_OF_CORES > 1 */
|
||||
( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxEventBits->xEventGroupLock ) );
|
||||
|
||||
|
||||
return pxEventBits->uxEventBits;
|
||||
}
|
||||
@@ -639,7 +661,13 @@ void vEventGroupDelete( EventGroupHandle_t xEventGroup )
|
||||
|
||||
pxTasksWaitingForBits = &( pxEventBits->xTasksWaitingForBits );
|
||||
|
||||
vTaskSuspendAll();
|
||||
prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxEventBits->xEventGroupLock ) );
|
||||
#if ( configNUMBER_OF_CORES > 1 )
|
||||
|
||||
/* We are about to traverse a task list which is a kernel data structure.
|
||||
* Thus we need to call prvTakeKernelLock() to take the kernel lock. */
|
||||
prvTakeKernelLock();
|
||||
#endif /* configNUMBER_OF_CORES > 1 */
|
||||
{
|
||||
traceEVENT_GROUP_DELETE( xEventGroup );
|
||||
|
||||
@@ -651,7 +679,11 @@ void vEventGroupDelete( EventGroupHandle_t xEventGroup )
|
||||
vTaskRemoveFromUnorderedEventList( pxTasksWaitingForBits->xListEnd.pxNext, eventUNBLOCKED_DUE_TO_BIT_SET );
|
||||
}
|
||||
}
|
||||
( void ) xTaskResumeAll();
|
||||
#if ( configNUMBER_OF_CORES > 1 )
|
||||
/* Release the previously taken kernel lock. */
|
||||
prvReleaseKernelLock();
|
||||
#endif /* configNUMBER_OF_CORES > 1 */
|
||||
prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxEventBits->xEventGroupLock ) );
|
||||
|
||||
#if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
|
||||
{
|
||||
|
@@ -74,8 +74,6 @@ The following APIs have been added to support SMP
|
||||
|
||||
### API Modifications
|
||||
|
||||
#### SMP Modifications
|
||||
|
||||
Added the following macros that abstract away single-core and SMP differences:
|
||||
|
||||
- `taskYIELD_CORE()` triggers a particular core to yield
|
||||
@@ -122,9 +120,26 @@ The following functions were modified to accommodate SMP behavior:
|
||||
- `prvAddCurrentTaskToDelayedList()`
|
||||
- Added extra check to see if current blocking task has already been deleted by the other core.
|
||||
|
||||
#### Single-Core Modifications
|
||||
### Critical Section Changes
|
||||
|
||||
- Granular Locks: The following objects are now given their own spinlocks
|
||||
- Kernel objects (i.e., `tasks.c`): `xKernelLock`
|
||||
- Each queue: `xQueueLock`
|
||||
- Queue Registry: `xQueueRegistryLock`
|
||||
- Each event group: `xEventGroupLock`
|
||||
- Each stream buffer: `xStreamBufferLock`
|
||||
- All timers: `xTimerLock`
|
||||
- Critical sections now target the appropriate spinlocks
|
||||
- Added missing critical sections for SMP (see `..._SMP_ONLY()` critical section calls)
|
||||
- Queues no longer use queue locks (see `queueUSE_LOCKS`)
|
||||
- Queues now just use critical sections and skips queue locking
|
||||
- Queue functions can now execute within a single critical section block
|
||||
|
||||
## Single Core Differences
|
||||
|
||||
List of differences between Vanilla FreeRTOS V10.5.1 and building the dual-core SMP kernel with `congigNUMBER_OF_CORES == 1`.
|
||||
|
||||
- `prvAddNewTaskToReadyList()`
|
||||
- Extended critical section so that SMP can check for yields while still inside critical section
|
||||
- `vTaskStepTick()`
|
||||
- Extended critical section so that SMP can access `xTickCount` while still inside critical section
|
||||
|
@@ -1371,6 +1371,7 @@ typedef struct xSTATIC_QUEUE
|
||||
UBaseType_t uxDummy8;
|
||||
uint8_t ucDummy9;
|
||||
#endif
|
||||
portMUX_TYPE xDummyQueueLock;
|
||||
} StaticQueue_t;
|
||||
typedef StaticQueue_t StaticSemaphore_t;
|
||||
|
||||
@@ -1400,6 +1401,7 @@ typedef struct xSTATIC_EVENT_GROUP
|
||||
#if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
|
||||
uint8_t ucDummy4;
|
||||
#endif
|
||||
portMUX_TYPE xDummyEventGroupLock;
|
||||
} StaticEventGroup_t;
|
||||
|
||||
/*
|
||||
@@ -1454,6 +1456,7 @@ typedef struct xSTATIC_STREAM_BUFFER
|
||||
#if ( configUSE_SB_COMPLETED_CALLBACK == 1 )
|
||||
void * pvDummy5[ 2 ];
|
||||
#endif
|
||||
portMUX_TYPE xDummyStreamBufferLock;
|
||||
} StaticStreamBuffer_t;
|
||||
|
||||
/* Message buffers are built on stream buffers. */
|
||||
|
@@ -220,8 +220,9 @@ typedef enum
|
||||
* \defgroup taskENTER_CRITICAL taskENTER_CRITICAL
|
||||
* \ingroup SchedulerControl
|
||||
*/
|
||||
#define taskENTER_CRITICAL() portENTER_CRITICAL()
|
||||
#define taskENTER_CRITICAL( x ) portENTER_CRITICAL( x )
|
||||
#define taskENTER_CRITICAL_FROM_ISR() portSET_INTERRUPT_MASK_FROM_ISR()
|
||||
#define taskENTER_CRITICAL_ISR( x ) portENTER_CRITICAL_ISR( x )
|
||||
|
||||
/**
|
||||
* task. h
|
||||
@@ -235,8 +236,9 @@ typedef enum
|
||||
* \defgroup taskEXIT_CRITICAL taskEXIT_CRITICAL
|
||||
* \ingroup SchedulerControl
|
||||
*/
|
||||
#define taskEXIT_CRITICAL() portEXIT_CRITICAL()
|
||||
#define taskEXIT_CRITICAL( x ) portEXIT_CRITICAL( x )
|
||||
#define taskEXIT_CRITICAL_FROM_ISR( x ) portCLEAR_INTERRUPT_MASK_FROM_ISR( x )
|
||||
#define taskEXIT_CRITICAL_ISR( x ) portEXIT_CRITICAL_ISR( x )
|
||||
|
||||
/**
|
||||
* task. h
|
||||
|
@@ -41,6 +41,8 @@
|
||||
#include "FreeRTOS.h"
|
||||
#include "task.h"
|
||||
#include "queue.h"
|
||||
/* Include private IDF API additions for critical thread safety macros */
|
||||
#include "esp_private/freertos_idf_additions_priv.h"
|
||||
|
||||
#if ( configUSE_CO_ROUTINES == 1 )
|
||||
#include "croutine.h"
|
||||
@@ -52,11 +54,71 @@
|
||||
* correct privileged Vs unprivileged linkage and placement. */
|
||||
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */
|
||||
|
||||
/* Some code sections require extra critical sections when building for SMP
|
||||
* ( configNUMBER_OF_CORES > 1 ). */
|
||||
#if ( configNUMBER_OF_CORES > 1 )
|
||||
/* Macros that Enter/exit a critical section only when building for SMP */
|
||||
#define taskENTER_CRITICAL_SMP_ONLY( pxLock ) taskENTER_CRITICAL( pxLock )
|
||||
#define taskEXIT_CRITICAL_SMP_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock )
|
||||
#define taskENTER_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskEnterCriticalSafeSMPOnly( pxLock )
|
||||
#define taskEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskExitCriticalSafeSMPOnly( pxLock )
|
||||
|
||||
static inline __attribute__( ( always_inline ) )
|
||||
void prvTaskEnterCriticalSafeSMPOnly( portMUX_TYPE * pxLock )
|
||||
{
|
||||
if( portCHECK_IF_IN_ISR() == pdFALSE )
|
||||
{
|
||||
taskENTER_CRITICAL( pxLock );
|
||||
}
|
||||
else
|
||||
{
|
||||
#ifdef __clang_analyzer__
|
||||
/* Teach clang-tidy that ISR version macro can be different */
|
||||
configASSERT( 1 );
|
||||
#endif
|
||||
taskENTER_CRITICAL_ISR( pxLock );
|
||||
}
|
||||
}
|
||||
|
||||
static inline __attribute__( ( always_inline ) )
|
||||
void prvTaskExitCriticalSafeSMPOnly( portMUX_TYPE * pxLock )
|
||||
{
|
||||
if( portCHECK_IF_IN_ISR() == pdFALSE )
|
||||
{
|
||||
taskEXIT_CRITICAL( pxLock );
|
||||
}
|
||||
else
|
||||
{
|
||||
#ifdef __clang_analyzer__
|
||||
/* Teach clang-tidy that ISR version macro can be different */
|
||||
configASSERT( 1 );
|
||||
#endif
|
||||
taskEXIT_CRITICAL_ISR( pxLock );
|
||||
}
|
||||
}
|
||||
#else /* configNUMBER_OF_CORES > 1 */
|
||||
/* Macros that Enter/exit a critical section only when building for SMP */
|
||||
#define taskENTER_CRITICAL_SMP_ONLY( pxLock )
|
||||
#define taskEXIT_CRITICAL_SMP_ONLY( pxLock )
|
||||
#define taskENTER_CRITICAL_SAFE_SMP_ONLY( pxLock )
|
||||
#define taskEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock )
|
||||
#endif /* configNUMBER_OF_CORES > 1 */
|
||||
|
||||
/* Single core FreeRTOS uses queue locks to ensure that vTaskPlaceOnEventList()
|
||||
* calls are deterministic (as queue locks use scheduler suspension instead of
|
||||
* critical sections). However, the SMP implementation is non-deterministic
|
||||
* anyways, thus SMP can forego the use of queue locks (replaced with a critical
|
||||
* sections) in exchange for better queue performance. */
|
||||
#if ( configNUMBER_OF_CORES > 1 )
|
||||
#define queueUSE_LOCKS 0
|
||||
#define queueUNLOCKED ( ( int8_t ) 0 )
|
||||
#else /* configNUMBER_OF_CORES > 1 */
|
||||
#define queueUSE_LOCKS 1
|
||||
/* Constants used with the cRxLock and cTxLock structure members. */
|
||||
#define queueUNLOCKED ( ( int8_t ) -1 )
|
||||
#define queueLOCKED_UNMODIFIED ( ( int8_t ) 0 )
|
||||
#define queueINT8_MAX ( ( int8_t ) 127 )
|
||||
#endif /* configNUMBER_OF_CORES > 1 */
|
||||
|
||||
/* When the Queue_t structure is used to represent a base queue its pcHead and
|
||||
* pcTail members are used as pointers into the queue storage area. When the
|
||||
@@ -119,8 +181,10 @@ typedef struct QueueDefinition /* The old naming convention is used to prevent b
|
||||
UBaseType_t uxLength; /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */
|
||||
UBaseType_t uxItemSize; /*< The size of each items that the queue will hold. */
|
||||
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
volatile int8_t cRxLock; /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
|
||||
volatile int8_t cTxLock; /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
|
||||
#if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
|
||||
uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the memory used by the queue was statically allocated to ensure no attempt is made to free the memory. */
|
||||
@@ -134,6 +198,8 @@ typedef struct QueueDefinition /* The old naming convention is used to prevent b
|
||||
UBaseType_t uxQueueNumber;
|
||||
uint8_t ucQueueType;
|
||||
#endif
|
||||
|
||||
portMUX_TYPE xQueueLock; /* Spinlock required for SMP critical sections */
|
||||
} xQUEUE;
|
||||
|
||||
/* The old xQUEUE name is maintained above then typedefed to the new Queue_t
|
||||
@@ -167,8 +233,15 @@ typedef xQUEUE Queue_t;
|
||||
* array position being vacant. */
|
||||
PRIVILEGED_DATA QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ];
|
||||
|
||||
#if ( configNUMBER_OF_CORES > 1 )
|
||||
/* Spinlock required in SMP when accessing the queue registry */
|
||||
static portMUX_TYPE xQueueRegistryLock = portMUX_INITIALIZER_UNLOCKED;
|
||||
#endif /* configNUMBER_OF_CORES > 1 */
|
||||
|
||||
#endif /* configQUEUE_REGISTRY_SIZE */
|
||||
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
|
||||
/*
|
||||
* Unlocks a queue locked by a call to prvLockQueue. Locking a queue does not
|
||||
* prevent an ISR from adding or removing items to the queue, but does prevent
|
||||
@@ -192,6 +265,7 @@ static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION
|
||||
* @return pdTRUE if there is no space, otherwise pdFALSE;
|
||||
*/
|
||||
static BaseType_t prvIsQueueFull( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION;
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
|
||||
/*
|
||||
* Copies an item into the queue, either at the front of the queue or the
|
||||
@@ -248,12 +322,14 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
|
||||
#endif
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
|
||||
/*
|
||||
* Macro to mark a queue as locked. Locking a queue prevents an ISR from
|
||||
* accessing the queue event lists.
|
||||
*/
|
||||
#define prvLockQueue( pxQueue ) \
|
||||
taskENTER_CRITICAL(); \
|
||||
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); \
|
||||
{ \
|
||||
if( ( pxQueue )->cRxLock == queueUNLOCKED ) \
|
||||
{ \
|
||||
@@ -264,7 +340,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
|
||||
( pxQueue )->cTxLock = queueLOCKED_UNMODIFIED; \
|
||||
} \
|
||||
} \
|
||||
taskEXIT_CRITICAL()
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) )
|
||||
|
||||
/*
|
||||
* Macro to increment cTxLock member of the queue data structure. It is
|
||||
@@ -295,6 +371,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
|
||||
( pxQueue )->cRxLock = ( int8_t ) ( ( cRxLock ) + ( int8_t ) 1 ); \
|
||||
} \
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
BaseType_t xQueueGenericReset( QueueHandle_t xQueue,
|
||||
@@ -305,19 +382,28 @@ BaseType_t xQueueGenericReset( QueueHandle_t xQueue,
|
||||
|
||||
configASSERT( pxQueue );
|
||||
|
||||
if( xNewQueue == pdTRUE )
|
||||
{
|
||||
portMUX_INITIALIZE( &( pxQueue->xQueueLock ) );
|
||||
}
|
||||
|
||||
if( ( pxQueue != NULL ) &&
|
||||
( pxQueue->uxLength >= 1U ) &&
|
||||
/* Check for multiplication overflow. */
|
||||
( ( SIZE_MAX / pxQueue->uxLength ) >= pxQueue->uxItemSize ) )
|
||||
{
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
{
|
||||
pxQueue->u.xQueue.pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */
|
||||
pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
|
||||
pxQueue->pcWriteTo = pxQueue->pcHead;
|
||||
pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - 1U ) * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
{
|
||||
pxQueue->cRxLock = queueUNLOCKED;
|
||||
pxQueue->cTxLock = queueUNLOCKED;
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
|
||||
if( xNewQueue == pdFALSE )
|
||||
{
|
||||
@@ -349,7 +435,7 @@ BaseType_t xQueueGenericReset( QueueHandle_t xQueue,
|
||||
vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -606,6 +692,9 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
|
||||
/* In case this is a recursive mutex. */
|
||||
pxNewQueue->u.xSemaphore.uxRecursiveCallCount = 0;
|
||||
|
||||
/* Initialize the mutex's spinlock */
|
||||
portMUX_INITIALIZE( &( pxNewQueue->xQueueLock ) );
|
||||
|
||||
traceCREATE_MUTEX( pxNewQueue );
|
||||
|
||||
/* Start with the semaphore in the expected state. */
|
||||
@@ -671,7 +760,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
|
||||
* calling task is the mutex holder, but not a good way of determining the
|
||||
* identity of the mutex holder, as the holder may change between the
|
||||
* following critical section exiting and the function returning. */
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &( pxSemaphore->xQueueLock ) );
|
||||
{
|
||||
if( pxSemaphore->uxQueueType == queueQUEUE_IS_MUTEX )
|
||||
{
|
||||
@@ -682,7 +771,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
|
||||
pxReturn = NULL;
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( pxSemaphore->xQueueLock ) );
|
||||
|
||||
return pxReturn;
|
||||
} /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */
|
||||
@@ -908,7 +997,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
|
||||
* interest of execution time efficiency. */
|
||||
for( ; ; )
|
||||
{
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
{
|
||||
/* Is there room on the queue now? The running task must be the
|
||||
* highest priority task wanting to access the queue. If the head item
|
||||
@@ -1014,7 +1103,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
|
||||
}
|
||||
#endif /* configUSE_QUEUE_SETS */
|
||||
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
return pdPASS;
|
||||
}
|
||||
else
|
||||
@@ -1023,7 +1112,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
|
||||
{
|
||||
/* The queue was full and no block time is specified (or
|
||||
* the block time has expired) so leave now. */
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
|
||||
/* Return to the original privilege level before exiting
|
||||
* the function. */
|
||||
@@ -1043,9 +1132,39 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
|
||||
/* If queue locks ARE NOT being used:
|
||||
* - At this point, the queue is full and entry time has been set
|
||||
* - We simply check for a time out, block if not timed out, or
|
||||
* return an error if we have timed out. */
|
||||
#if ( queueUSE_LOCKS == 0 )
|
||||
{
|
||||
/* Update the timeout state to see if it has expired yet. */
|
||||
if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
|
||||
{
|
||||
/* Not timed out yet. Block the current task. */
|
||||
traceBLOCKING_ON_QUEUE_SEND( pxQueue );
|
||||
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
|
||||
portYIELD_WITHIN_API();
|
||||
}
|
||||
else
|
||||
{
|
||||
/* We have timed out. Return an error. */
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
traceQUEUE_SEND_FAILED( pxQueue );
|
||||
return errQUEUE_FULL;
|
||||
}
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 0 */
|
||||
}
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
|
||||
/* If queue locks ARE being used:
|
||||
* - At this point, the queue is full and entry time has been set
|
||||
* - We follow the original procedure of locking the queue before
|
||||
* attempting to block. */
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
{
|
||||
/* Interrupts and other tasks can send to and receive from the queue
|
||||
* now the critical section has been exited. */
|
||||
|
||||
@@ -1093,6 +1212,8 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
|
||||
traceQUEUE_SEND_FAILED( pxQueue );
|
||||
return errQUEUE_FULL;
|
||||
}
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
} /*lint -restore */
|
||||
}
|
||||
/*-----------------------------------------------------------*/
|
||||
@@ -1131,11 +1252,16 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
|
||||
* read, instead return a flag to say whether a context switch is required or
|
||||
* not (i.e. has a task with a higher priority than us been woken by this
|
||||
* post). */
|
||||
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
|
||||
prvENTER_CRITICAL_OR_MASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus );
|
||||
{
|
||||
if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
|
||||
{
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
const int8_t cTxLock = pxQueue->cTxLock;
|
||||
#else
|
||||
/* Queue locks not used, so we treat it as unlocked. */
|
||||
const int8_t cTxLock = queueUNLOCKED;
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting;
|
||||
|
||||
traceQUEUE_SEND_FROM_ISR( pxQueue );
|
||||
@@ -1242,11 +1368,15 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
|
||||
#endif /* configUSE_QUEUE_SETS */
|
||||
}
|
||||
else
|
||||
{
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
{
|
||||
/* Increment the lock count so the task that unlocks the queue
|
||||
* knows that data was posted while it was locked. */
|
||||
prvIncrementQueueTxLock( pxQueue, cTxLock );
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
}
|
||||
|
||||
xReturn = pdPASS;
|
||||
}
|
||||
@@ -1256,7 +1386,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
|
||||
xReturn = errQUEUE_FULL;
|
||||
}
|
||||
}
|
||||
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
|
||||
prvEXIT_CRITICAL_OR_UNMASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus );
|
||||
|
||||
return xReturn;
|
||||
}
|
||||
@@ -1302,7 +1432,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
|
||||
* link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
|
||||
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
|
||||
|
||||
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
|
||||
prvENTER_CRITICAL_OR_MASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus );
|
||||
{
|
||||
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
|
||||
|
||||
@@ -1311,7 +1441,12 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
|
||||
* space'. */
|
||||
if( uxMessagesWaiting < pxQueue->uxLength )
|
||||
{
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
const int8_t cTxLock = pxQueue->cTxLock;
|
||||
#else
|
||||
/* Queue locks not used, so we treat it as unlocked. */
|
||||
const int8_t cTxLock = queueUNLOCKED;
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
|
||||
traceQUEUE_SEND_FROM_ISR( pxQueue );
|
||||
|
||||
@@ -1408,11 +1543,15 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
|
||||
#endif /* configUSE_QUEUE_SETS */
|
||||
}
|
||||
else
|
||||
{
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
{
|
||||
/* Increment the lock count so the task that unlocks the queue
|
||||
* knows that data was posted while it was locked. */
|
||||
prvIncrementQueueTxLock( pxQueue, cTxLock );
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
}
|
||||
|
||||
xReturn = pdPASS;
|
||||
}
|
||||
@@ -1422,7 +1561,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
|
||||
xReturn = errQUEUE_FULL;
|
||||
}
|
||||
}
|
||||
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
|
||||
prvEXIT_CRITICAL_OR_UNMASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus );
|
||||
|
||||
return xReturn;
|
||||
}
|
||||
@@ -1455,7 +1594,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
|
||||
* interest of execution time efficiency. */
|
||||
for( ; ; )
|
||||
{
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
{
|
||||
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
|
||||
|
||||
@@ -1487,7 +1626,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
return pdPASS;
|
||||
}
|
||||
else
|
||||
@@ -1496,7 +1635,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
|
||||
{
|
||||
/* The queue was empty and no block time is specified (or
|
||||
* the block time has expired) so leave now. */
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
traceQUEUE_RECEIVE_FAILED( pxQueue );
|
||||
return errQUEUE_EMPTY;
|
||||
}
|
||||
@@ -1513,9 +1652,39 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
|
||||
/* If queue locks ARE NOT being used:
|
||||
* - At this point, the queue is empty and entry time has been set
|
||||
* - We simply check for a time out, block if not timed out, or
|
||||
* return an error if we have timed out. */
|
||||
#if ( queueUSE_LOCKS == 0 )
|
||||
{
|
||||
/* Update the timeout state to see if it has expired yet. */
|
||||
if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
|
||||
{
|
||||
/* Not timed out yet. Block the current task. */
|
||||
traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
|
||||
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
|
||||
portYIELD_WITHIN_API();
|
||||
}
|
||||
else
|
||||
{
|
||||
/* We have timed out. Return an error. */
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
traceQUEUE_RECEIVE_FAILED( pxQueue );
|
||||
return errQUEUE_EMPTY;
|
||||
}
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 0 */
|
||||
}
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
|
||||
/* If queue locks ARE being used:
|
||||
* - At this point, the queue is empty and entry time has been set
|
||||
* - We follow the original procedure for locking the queue before
|
||||
* attempting to block. */
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
{
|
||||
/* Interrupts and other tasks can send to and receive from the queue
|
||||
* now the critical section has been exited. */
|
||||
|
||||
@@ -1567,6 +1736,8 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
} /*lint -restore */
|
||||
}
|
||||
/*-----------------------------------------------------------*/
|
||||
@@ -1601,7 +1772,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
|
||||
* of execution time efficiency. */
|
||||
for( ; ; )
|
||||
{
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
{
|
||||
/* Semaphores are queues with an item size of 0, and where the
|
||||
* number of messages in the queue is the semaphore's count value. */
|
||||
@@ -1650,7 +1821,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
return pdPASS;
|
||||
}
|
||||
else
|
||||
@@ -1659,7 +1830,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
|
||||
{
|
||||
/* The semaphore count was 0 and no block time is specified
|
||||
* (or the block time has expired) so exit now. */
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
traceQUEUE_RECEIVE_FAILED( pxQueue );
|
||||
return errQUEUE_EMPTY;
|
||||
}
|
||||
@@ -1676,9 +1847,65 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
|
||||
/* If queue locks ARE NOT being used:
|
||||
* - At this point, the semaphore/mutex is empty/held and entry time
|
||||
* has been set.
|
||||
* - We simply check for a time out, inherit priority and block if
|
||||
* not timed out, or return an error if we have timed out. */
|
||||
#if ( queueUSE_LOCKS == 0 )
|
||||
{
|
||||
/* Update the timeout state to see if it has expired yet. */
|
||||
if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
|
||||
{
|
||||
/* Not timed out yet. If this is a mutex, make the holder
|
||||
* inherit our priority, then block the current task. */
|
||||
traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
|
||||
#if ( configUSE_MUTEXES == 1 )
|
||||
{
|
||||
if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
|
||||
{
|
||||
xInheritanceOccurred = xTaskPriorityInherit( pxQueue->u.xSemaphore.xMutexHolder );
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
#endif /* if ( configUSE_MUTEXES == 1 ) */
|
||||
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
|
||||
portYIELD_WITHIN_API();
|
||||
}
|
||||
else
|
||||
{
|
||||
/* We have timed out. If this is a mutex, make the holder
|
||||
* disinherit our priority, then return an error. */
|
||||
#if ( configUSE_MUTEXES == 1 )
|
||||
{
|
||||
if( xInheritanceOccurred != pdFALSE )
|
||||
{
|
||||
UBaseType_t uxHighestWaitingPriority;
|
||||
uxHighestWaitingPriority = prvGetDisinheritPriorityAfterTimeout( pxQueue );
|
||||
vTaskPriorityDisinheritAfterTimeout( pxQueue->u.xSemaphore.xMutexHolder, uxHighestWaitingPriority );
|
||||
}
|
||||
}
|
||||
#endif /* configUSE_MUTEXES */
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
traceQUEUE_RECEIVE_FAILED( pxQueue );
|
||||
return errQUEUE_EMPTY;
|
||||
}
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 0 */
|
||||
}
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
|
||||
/* If queue locks ARE being used:
|
||||
* - At this point, the semaphore/mutex is empty/held and entry time
|
||||
* has been set.
|
||||
* - We follow the original procedure for locking the queue, inheriting
|
||||
* priority, then attempting to block. */
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
{
|
||||
/* Interrupts and other tasks can give to and take from the semaphore
|
||||
* now the critical section has been exited. */
|
||||
|
||||
@@ -1700,11 +1927,11 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
|
||||
{
|
||||
if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
|
||||
{
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
{
|
||||
xInheritanceOccurred = xTaskPriorityInherit( pxQueue->u.xSemaphore.xMutexHolder );
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -1752,7 +1979,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
|
||||
* test the mutex type again to check it is actually a mutex. */
|
||||
if( xInheritanceOccurred != pdFALSE )
|
||||
{
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
{
|
||||
UBaseType_t uxHighestWaitingPriority;
|
||||
|
||||
@@ -1764,7 +1991,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
|
||||
uxHighestWaitingPriority = prvGetDisinheritPriorityAfterTimeout( pxQueue );
|
||||
vTaskPriorityDisinheritAfterTimeout( pxQueue->u.xSemaphore.xMutexHolder, uxHighestWaitingPriority );
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
}
|
||||
}
|
||||
#endif /* configUSE_MUTEXES */
|
||||
@@ -1777,6 +2004,8 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
} /*lint -restore */
|
||||
}
|
||||
/*-----------------------------------------------------------*/
|
||||
@@ -1809,7 +2038,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
|
||||
* interest of execution time efficiency. */
|
||||
for( ; ; )
|
||||
{
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
{
|
||||
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
|
||||
|
||||
@@ -1847,7 +2076,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
return pdPASS;
|
||||
}
|
||||
else
|
||||
@@ -1856,7 +2085,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
|
||||
{
|
||||
/* The queue was empty and no block time is specified (or
|
||||
* the block time has expired) so leave now. */
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
traceQUEUE_PEEK_FAILED( pxQueue );
|
||||
return errQUEUE_EMPTY;
|
||||
}
|
||||
@@ -1874,9 +2103,39 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
|
||||
/* If queue locks ARE NOT being used:
|
||||
* - At this point, the queue is empty and entry time has been set
|
||||
* - We simply check for a time out, block if not timed out, or
|
||||
* return an error if we have timed out. */
|
||||
#if ( queueUSE_LOCKS == 0 )
|
||||
{
|
||||
/* Update the timeout state to see if it has expired yet. */
|
||||
if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
|
||||
{
|
||||
/* Not timed out yet. Block the current task. */
|
||||
traceBLOCKING_ON_QUEUE_PEEK( pxQueue );
|
||||
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
|
||||
portYIELD_WITHIN_API();
|
||||
}
|
||||
else
|
||||
{
|
||||
/* We have timed out. Return an error. */
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
traceQUEUE_PEEK_FAILED( pxQueue );
|
||||
return errQUEUE_EMPTY;
|
||||
}
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 0 */
|
||||
}
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
|
||||
/* If queue locks ARE being used:
|
||||
* - At this point, the queue is empty and entry time has been set
|
||||
* - We follow the original procedure for locking the queue before
|
||||
* attempting to block. */
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
{
|
||||
/* Interrupts and other tasks can send to and receive from the queue
|
||||
* now that the critical section has been exited. */
|
||||
|
||||
@@ -1928,6 +2187,8 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
} /*lint -restore */
|
||||
}
|
||||
/*-----------------------------------------------------------*/
|
||||
@@ -1959,14 +2220,19 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
|
||||
* link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
|
||||
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
|
||||
|
||||
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
|
||||
prvENTER_CRITICAL_OR_MASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus );
|
||||
{
|
||||
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
|
||||
|
||||
/* Cannot block in an ISR, so check there is data available. */
|
||||
if( uxMessagesWaiting > ( UBaseType_t ) 0 )
|
||||
{
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
const int8_t cRxLock = pxQueue->cRxLock;
|
||||
#else
|
||||
/* Queue locks not used, so we treat it as unlocked. */
|
||||
const int8_t cRxLock = queueUNLOCKED;
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
|
||||
traceQUEUE_RECEIVE_FROM_ISR( pxQueue );
|
||||
|
||||
@@ -2005,11 +2271,15 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
{
|
||||
/* Increment the lock count so the task that unlocks the queue
|
||||
* knows that data was removed while it was locked. */
|
||||
prvIncrementQueueRxLock( pxQueue, cRxLock );
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
}
|
||||
|
||||
xReturn = pdPASS;
|
||||
}
|
||||
@@ -2019,7 +2289,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
|
||||
traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );
|
||||
}
|
||||
}
|
||||
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
|
||||
prvEXIT_CRITICAL_OR_UNMASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus );
|
||||
|
||||
return xReturn;
|
||||
}
|
||||
@@ -2053,7 +2323,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,
|
||||
* link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
|
||||
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
|
||||
|
||||
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
|
||||
prvENTER_CRITICAL_OR_MASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus );
|
||||
{
|
||||
/* Cannot block in an ISR, so check there is data available. */
|
||||
if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
|
||||
@@ -2074,7 +2344,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,
|
||||
traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );
|
||||
}
|
||||
}
|
||||
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
|
||||
prvEXIT_CRITICAL_OR_UNMASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus );
|
||||
|
||||
return xReturn;
|
||||
}
|
||||
@@ -2086,11 +2356,11 @@ UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )
|
||||
|
||||
configASSERT( xQueue );
|
||||
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &( ( ( Queue_t * ) xQueue )->xQueueLock ) );
|
||||
{
|
||||
uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( ( ( Queue_t * ) xQueue )->xQueueLock ) );
|
||||
|
||||
return uxReturn;
|
||||
} /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
|
||||
@@ -2103,11 +2373,11 @@ UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue )
|
||||
|
||||
configASSERT( pxQueue );
|
||||
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
{
|
||||
uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting;
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
|
||||
return uxReturn;
|
||||
} /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
|
||||
@@ -2329,6 +2599,7 @@ static void prvCopyDataFromQueue( Queue_t * const pxQueue,
|
||||
}
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
static void prvUnlockQueue( Queue_t * const pxQueue )
|
||||
{
|
||||
/* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */
|
||||
@@ -2337,7 +2608,7 @@ static void prvUnlockQueue( Queue_t * const pxQueue )
|
||||
* removed from the queue while the queue was locked. When a queue is
|
||||
* locked items can be added or removed, but the event lists cannot be
|
||||
* updated. */
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
{
|
||||
int8_t cTxLock = pxQueue->cTxLock;
|
||||
|
||||
@@ -2415,10 +2686,10 @@ static void prvUnlockQueue( Queue_t * const pxQueue )
|
||||
|
||||
pxQueue->cTxLock = queueUNLOCKED;
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
|
||||
/* Do the same for the Rx lock. */
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
{
|
||||
int8_t cRxLock = pxQueue->cRxLock;
|
||||
|
||||
@@ -2445,15 +2716,17 @@ static void prvUnlockQueue( Queue_t * const pxQueue )
|
||||
|
||||
pxQueue->cRxLock = queueUNLOCKED;
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue )
|
||||
{
|
||||
BaseType_t xReturn;
|
||||
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &( ( ( Queue_t * ) pxQueue )->xQueueLock ) );
|
||||
{
|
||||
if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
|
||||
{
|
||||
@@ -2464,10 +2737,11 @@ static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue )
|
||||
xReturn = pdFALSE;
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( ( ( Queue_t * ) pxQueue )->xQueueLock ) );
|
||||
|
||||
return xReturn;
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue )
|
||||
@@ -2490,11 +2764,12 @@ BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue )
|
||||
} /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
static BaseType_t prvIsQueueFull( const Queue_t * pxQueue )
|
||||
{
|
||||
BaseType_t xReturn;
|
||||
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &( ( ( Queue_t * ) pxQueue )->xQueueLock ) );
|
||||
{
|
||||
if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
|
||||
{
|
||||
@@ -2505,10 +2780,11 @@ static BaseType_t prvIsQueueFull( const Queue_t * pxQueue )
|
||||
xReturn = pdFALSE;
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( ( ( Queue_t * ) pxQueue )->xQueueLock ) );
|
||||
|
||||
return xReturn;
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
||||
@@ -2828,6 +3104,10 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
||||
|
||||
configASSERT( xQueue );
|
||||
|
||||
/* For SMP, we need to take the queue registry lock in case another
|
||||
* core updates the register simultaneously. */
|
||||
taskENTER_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
|
||||
{
|
||||
if( pcQueueName != NULL )
|
||||
{
|
||||
/* See if there is an empty space in the registry. A NULL name denotes
|
||||
@@ -2861,6 +3141,9 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
||||
traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName );
|
||||
}
|
||||
}
|
||||
/* Release the previously taken queue registry lock. */
|
||||
taskEXIT_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
|
||||
}
|
||||
|
||||
#endif /* configQUEUE_REGISTRY_SIZE */
|
||||
/*-----------------------------------------------------------*/
|
||||
@@ -2874,6 +3157,10 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
||||
|
||||
configASSERT( xQueue );
|
||||
|
||||
/* For SMP, we need to take the queue registry lock in case another
|
||||
* core updates the register simultaneously. */
|
||||
taskENTER_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
|
||||
{
|
||||
/* Note there is nothing here to protect against another task adding or
|
||||
* removing entries from the registry while it is being searched. */
|
||||
|
||||
@@ -2889,6 +3176,9 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
}
|
||||
/* Release the previously taken queue registry lock. */
|
||||
taskEXIT_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
|
||||
|
||||
return pcReturn;
|
||||
} /*lint !e818 xQueue cannot be a pointer to const because it is a typedef. */
|
||||
@@ -2904,6 +3194,10 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
||||
|
||||
configASSERT( xQueue );
|
||||
|
||||
/* For SMP, we need to take the queue registry lock in case another
|
||||
* core updates the register simultaneously. */
|
||||
taskENTER_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
|
||||
{
|
||||
/* See if the handle of the queue being unregistered in actually in the
|
||||
* registry. */
|
||||
for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
|
||||
@@ -2924,6 +3218,9 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
}
|
||||
/* Release the previously taken queue registry lock. */
|
||||
taskEXIT_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
|
||||
} /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
|
||||
|
||||
#endif /* configQUEUE_REGISTRY_SIZE */
|
||||
@@ -2945,6 +3242,12 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
||||
* so it should be called with the scheduler locked and not from a critical
|
||||
* section. */
|
||||
|
||||
/* For SMP, we need to take the queue's xQueueLock as we are about to
|
||||
* access the queue. */
|
||||
taskENTER_CRITICAL_SMP_ONLY( &( pxQueue->xQueueLock ) );
|
||||
{
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
{
|
||||
/* Only do anything if there are no messages in the queue. This function
|
||||
* will not actually cause the task to block, just place it on a blocked
|
||||
* list. It will not block until the scheduler is unlocked - at which
|
||||
@@ -2952,6 +3255,8 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
||||
* the queue is locked, and the calling task blocks on the queue, then the
|
||||
* calling task will be immediately unblocked when the queue is unlocked. */
|
||||
prvLockQueue( pxQueue );
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
|
||||
if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U )
|
||||
{
|
||||
@@ -2963,8 +3268,15 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
{
|
||||
prvUnlockQueue( pxQueue );
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
}
|
||||
/* Release the previously taken xQueueLock. */
|
||||
taskEXIT_CRITICAL_SMP_ONLY( &( pxQueue->xQueueLock ) );
|
||||
}
|
||||
|
||||
#endif /* configUSE_TIMERS */
|
||||
/*-----------------------------------------------------------*/
|
||||
@@ -2990,7 +3302,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
||||
{
|
||||
BaseType_t xReturn;
|
||||
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &( ( ( Queue_t * ) xQueueOrSemaphore )->xQueueLock ) );
|
||||
{
|
||||
if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL )
|
||||
{
|
||||
@@ -3009,7 +3321,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
||||
xReturn = pdPASS;
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( ( ( Queue_t * ) xQueueOrSemaphore )->xQueueLock ) );
|
||||
|
||||
return xReturn;
|
||||
}
|
||||
@@ -3039,12 +3351,12 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
||||
}
|
||||
else
|
||||
{
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &( ( ( Queue_t * ) pxQueueOrSemaphore )->xQueueLock ) );
|
||||
{
|
||||
/* The queue is no longer contained in the set. */
|
||||
pxQueueOrSemaphore->pxQueueSetContainer = NULL;
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( ( ( Queue_t * ) pxQueueOrSemaphore )->xQueueLock ) );
|
||||
xReturn = pdPASS;
|
||||
}
|
||||
|
||||
@@ -3096,9 +3408,18 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
||||
configASSERT( pxQueueSetContainer ); /* LCOV_EXCL_BR_LINE */
|
||||
configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );
|
||||
|
||||
/* In SMP, queue sets have their own xQueueLock. Thus we need to also
|
||||
* acquire the queue set's xQueueLock before accessing it. */
|
||||
taskENTER_CRITICAL_SAFE_SMP_ONLY( &( pxQueueSetContainer->xQueueLock ) );
|
||||
{
|
||||
if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )
|
||||
{
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
const int8_t cTxLock = pxQueueSetContainer->cTxLock;
|
||||
#else
|
||||
/* Queue locks not used, so we treat it as unlocked. */
|
||||
const int8_t cTxLock = queueUNLOCKED;
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
|
||||
traceQUEUE_SET_SEND( pxQueueSetContainer );
|
||||
|
||||
@@ -3125,14 +3446,21 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
{
|
||||
prvIncrementQueueTxLock( pxQueueSetContainer, cTxLock );
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
/* Release the previously acquired queue set's xQueueLock. */
|
||||
taskEXIT_CRITICAL_SAFE_SMP_ONLY( &( pxQueueSetContainer->xQueueLock ) );
|
||||
|
||||
return xReturn;
|
||||
}
|
||||
|
@@ -43,6 +43,8 @@
|
||||
#include "FreeRTOS.h"
|
||||
#include "task.h"
|
||||
#include "stream_buffer.h"
|
||||
/* Include private IDF API additions for critical thread safety macros */
|
||||
#include "esp_private/freertos_idf_additions_priv.h"
|
||||
|
||||
#if ( configUSE_TASK_NOTIFICATIONS != 1 )
|
||||
#error configUSE_TASK_NOTIFICATIONS must be set to 1 to build stream_buffer.c
|
||||
@@ -64,7 +66,7 @@
|
||||
/*lint -save -e9026 Function like macros allowed and needed here so they can be overridden. */
|
||||
#ifndef sbRECEIVE_COMPLETED
|
||||
#define sbRECEIVE_COMPLETED( pxStreamBuffer ) \
|
||||
vTaskSuspendAll(); \
|
||||
prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxStreamBuffer->xStreamBufferLock ) ); \
|
||||
{ \
|
||||
if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \
|
||||
{ \
|
||||
@@ -74,7 +76,7 @@
|
||||
( pxStreamBuffer )->xTaskWaitingToSend = NULL; \
|
||||
} \
|
||||
} \
|
||||
( void ) xTaskResumeAll();
|
||||
( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxStreamBuffer->xStreamBufferLock ) );
|
||||
#endif /* sbRECEIVE_COMPLETED */
|
||||
|
||||
/* If user has provided a per-instance receive complete callback, then
|
||||
@@ -141,7 +143,7 @@
|
||||
*/
|
||||
#ifndef sbSEND_COMPLETED
|
||||
#define sbSEND_COMPLETED( pxStreamBuffer ) \
|
||||
vTaskSuspendAll(); \
|
||||
prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxStreamBuffer->xStreamBufferLock ) ); \
|
||||
{ \
|
||||
if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) \
|
||||
{ \
|
||||
@@ -151,7 +153,7 @@
|
||||
( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \
|
||||
} \
|
||||
} \
|
||||
( void ) xTaskResumeAll();
|
||||
( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxStreamBuffer->xStreamBufferLock ) );
|
||||
#endif /* sbSEND_COMPLETED */
|
||||
|
||||
/* If user has provided a per-instance send completed callback, then
|
||||
@@ -243,6 +245,8 @@ typedef struct StreamBufferDef_t /*lint !e9058 Style convention
|
||||
StreamBufferCallbackFunction_t pxSendCompletedCallback; /* Optional callback called on send complete. sbSEND_COMPLETED is called if this is NULL. */
|
||||
StreamBufferCallbackFunction_t pxReceiveCompletedCallback; /* Optional callback called on receive complete. sbRECEIVE_COMPLETED is called if this is NULL. */
|
||||
#endif
|
||||
|
||||
portMUX_TYPE xStreamBufferLock; /* Spinlock required for SMP critical sections */
|
||||
} StreamBuffer_t;
|
||||
|
||||
/*
|
||||
@@ -385,6 +389,11 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer,
|
||||
pxSendCompletedCallback,
|
||||
pxReceiveCompletedCallback );
|
||||
|
||||
/* Initialize the stream buffer's spinlock separately, as
|
||||
* prvInitialiseNewStreamBuffer() is also called from
|
||||
* xStreamBufferReset(). */
|
||||
portMUX_INITIALIZE( &( ( ( StreamBuffer_t * ) pucAllocatedMemory )->xStreamBufferLock ) );
|
||||
|
||||
traceSTREAM_BUFFER_CREATE( ( ( StreamBuffer_t * ) pucAllocatedMemory ), xIsMessageBuffer );
|
||||
}
|
||||
else
|
||||
@@ -463,6 +472,11 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer,
|
||||
* again. */
|
||||
pxStreamBuffer->ucFlags |= sbFLAGS_IS_STATICALLY_ALLOCATED;
|
||||
|
||||
/* Initialize the stream buffer's spinlock separately, as
|
||||
* prvInitialiseNewStreamBuffer() is also called from
|
||||
* xStreamBufferReset(). */
|
||||
portMUX_INITIALIZE( &( pxStreamBuffer->xStreamBufferLock ) );
|
||||
|
||||
traceSTREAM_BUFFER_CREATE( pxStreamBuffer, xIsMessageBuffer );
|
||||
|
||||
xReturn = ( StreamBufferHandle_t ) pxStaticStreamBuffer; /*lint !e9087 Data hiding requires cast to opaque type. */
|
||||
@@ -560,7 +574,7 @@ BaseType_t xStreamBufferReset( StreamBufferHandle_t xStreamBuffer )
|
||||
#endif
|
||||
|
||||
/* Can only reset a message buffer if there are no tasks blocked on it. */
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) );
|
||||
{
|
||||
if( ( pxStreamBuffer->xTaskWaitingToReceive == NULL ) && ( pxStreamBuffer->xTaskWaitingToSend == NULL ) )
|
||||
{
|
||||
@@ -590,7 +604,7 @@ BaseType_t xStreamBufferReset( StreamBufferHandle_t xStreamBuffer )
|
||||
xReturn = pdPASS;
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) );
|
||||
|
||||
return xReturn;
|
||||
}
|
||||
@@ -736,7 +750,7 @@ size_t xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
|
||||
{
|
||||
/* Wait until the required number of bytes are free in the message
|
||||
* buffer. */
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) );
|
||||
{
|
||||
xSpace = xStreamBufferSpacesAvailable( pxStreamBuffer );
|
||||
|
||||
@@ -751,11 +765,11 @@ size_t xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
|
||||
}
|
||||
else
|
||||
{
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) );
|
||||
break;
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) );
|
||||
|
||||
traceBLOCKING_ON_STREAM_BUFFER_SEND( xStreamBuffer );
|
||||
( void ) xTaskNotifyWait( ( uint32_t ) 0, ( uint32_t ) 0, NULL, xTicksToWait );
|
||||
@@ -932,7 +946,7 @@ size_t xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
|
||||
{
|
||||
/* Checking if there is data and clearing the notification state must be
|
||||
* performed atomically. */
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) );
|
||||
{
|
||||
xBytesAvailable = prvBytesInBuffer( pxStreamBuffer );
|
||||
|
||||
@@ -955,7 +969,7 @@ size_t xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) );
|
||||
|
||||
if( xBytesAvailable <= xBytesToStoreMessageLength )
|
||||
{
|
||||
@@ -1409,7 +1423,17 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer,
|
||||
} /*lint !e529 !e438 xWriteValue is only used if configASSERT() is defined. */
|
||||
#endif
|
||||
|
||||
( void ) memset( ( void * ) pxStreamBuffer, 0x00, sizeof( StreamBuffer_t ) ); /*lint !e9087 memset() requires void *. */
|
||||
/* This function could be called from xStreamBufferReset(), so we reset the
|
||||
* stream buffer fields manually in order to avoid clearing
|
||||
* xStreamBufferLock. The xStreamBufferLock is initialized separately on
|
||||
* stream buffer creation. */
|
||||
pxStreamBuffer->xTail = ( size_t ) 0;
|
||||
pxStreamBuffer->xHead = ( size_t ) 0;
|
||||
pxStreamBuffer->xTaskWaitingToReceive = ( TaskHandle_t ) 0;
|
||||
pxStreamBuffer->xTaskWaitingToSend = ( TaskHandle_t ) 0;
|
||||
#if ( configUSE_TRACE_FACILITY == 1 )
|
||||
pxStreamBuffer->uxStreamBufferNumber = ( UBaseType_t ) 0;
|
||||
#endif
|
||||
pxStreamBuffer->pucBuffer = pucBuffer;
|
||||
pxStreamBuffer->xLength = xBufferSizeBytes;
|
||||
pxStreamBuffer->xTriggerLevelBytes = xTriggerLevelBytes;
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -42,6 +42,8 @@
|
||||
#include "task.h"
|
||||
#include "queue.h"
|
||||
#include "timers.h"
|
||||
/* Include private IDF API additions for critical thread safety macros */
|
||||
#include "esp_private/freertos_idf_additions_priv.h"
|
||||
|
||||
#if ( INCLUDE_xTimerPendFunctionCall == 1 ) && ( configUSE_TIMERS == 0 )
|
||||
#error configUSE_TIMERS must be set to 1 to make the xTimerPendFunctionCall() function available.
|
||||
@@ -147,6 +149,10 @@
|
||||
PRIVILEGED_DATA static QueueHandle_t xTimerQueue = NULL;
|
||||
PRIVILEGED_DATA static TaskHandle_t xTimerTaskHandle = NULL;
|
||||
|
||||
/* Spinlock required in SMP when accessing the timers. For now we use a single lock
|
||||
* Todo: Each timer could possible have its own lock for increased granularity. */
|
||||
PRIVILEGED_DATA static portMUX_TYPE xTimerLock = portMUX_INITIALIZER_UNLOCKED;
|
||||
|
||||
/*lint -restore */
|
||||
|
||||
/*-----------------------------------------------------------*/
|
||||
@@ -462,7 +468,7 @@
|
||||
Timer_t * pxTimer = xTimer;
|
||||
|
||||
configASSERT( xTimer );
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &xTimerLock );
|
||||
{
|
||||
if( xAutoReload != pdFALSE )
|
||||
{
|
||||
@@ -473,7 +479,7 @@
|
||||
pxTimer->ucStatus &= ( ( uint8_t ) ~tmrSTATUS_IS_AUTORELOAD );
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &xTimerLock );
|
||||
}
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
@@ -483,7 +489,7 @@
|
||||
BaseType_t xReturn;
|
||||
|
||||
configASSERT( xTimer );
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &xTimerLock );
|
||||
{
|
||||
if( ( pxTimer->ucStatus & tmrSTATUS_IS_AUTORELOAD ) == 0 )
|
||||
{
|
||||
@@ -496,7 +502,7 @@
|
||||
xReturn = pdTRUE;
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &xTimerLock );
|
||||
|
||||
return xReturn;
|
||||
}
|
||||
@@ -639,7 +645,7 @@
|
||||
TickType_t xTimeNow;
|
||||
BaseType_t xTimerListsWereSwitched;
|
||||
|
||||
vTaskSuspendAll();
|
||||
prvENTER_CRITICAL_OR_SUSPEND_ALL( &xTimerLock );
|
||||
{
|
||||
/* Obtain the time now to make an assessment as to whether the timer
|
||||
* has expired or not. If obtaining the time causes the lists to switch
|
||||
@@ -653,7 +659,7 @@
|
||||
/* The tick count has not overflowed, has the timer expired? */
|
||||
if( ( xListWasEmpty == pdFALSE ) && ( xNextExpireTime <= xTimeNow ) )
|
||||
{
|
||||
( void ) xTaskResumeAll();
|
||||
( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &xTimerLock );
|
||||
prvProcessExpiredTimer( xNextExpireTime, xTimeNow );
|
||||
}
|
||||
else
|
||||
@@ -673,7 +679,7 @@
|
||||
|
||||
vQueueWaitForMessageRestricted( xTimerQueue, ( xNextExpireTime - xTimeNow ), xListWasEmpty );
|
||||
|
||||
if( xTaskResumeAll() == pdFALSE )
|
||||
if( prvEXIT_CRITICAL_OR_RESUME_ALL( &xTimerLock ) == pdFALSE )
|
||||
{
|
||||
/* Yield to wait for either a command to arrive, or the
|
||||
* block time to expire. If a command arrived between the
|
||||
@@ -689,7 +695,7 @@
|
||||
}
|
||||
else
|
||||
{
|
||||
( void ) xTaskResumeAll();
|
||||
( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &xTimerLock );
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -967,7 +973,7 @@
|
||||
/* Check that the list from which active timers are referenced, and the
|
||||
* queue used to communicate with the timer service, have been
|
||||
* initialised. */
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &xTimerLock );
|
||||
{
|
||||
if( xTimerQueue == NULL )
|
||||
{
|
||||
@@ -1009,7 +1015,7 @@
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &xTimerLock );
|
||||
}
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
@@ -1021,7 +1027,7 @@
|
||||
configASSERT( xTimer );
|
||||
|
||||
/* Is the timer in the list of active timers? */
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &xTimerLock );
|
||||
{
|
||||
if( ( pxTimer->ucStatus & tmrSTATUS_IS_ACTIVE ) == 0 )
|
||||
{
|
||||
@@ -1032,7 +1038,7 @@
|
||||
xReturn = pdTRUE;
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &xTimerLock );
|
||||
|
||||
return xReturn;
|
||||
} /*lint !e818 Can't be pointer to const due to the typedef. */
|
||||
@@ -1045,11 +1051,11 @@
|
||||
|
||||
configASSERT( xTimer );
|
||||
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &xTimerLock );
|
||||
{
|
||||
pvReturn = pxTimer->pvTimerID;
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &xTimerLock );
|
||||
|
||||
return pvReturn;
|
||||
}
|
||||
@@ -1062,11 +1068,11 @@
|
||||
|
||||
configASSERT( xTimer );
|
||||
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &xTimerLock );
|
||||
{
|
||||
pxTimer->pvTimerID = pvNewID;
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &xTimerLock );
|
||||
}
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
|
@@ -487,12 +487,12 @@ BaseType_t xTaskGetCoreID( TaskHandle_t xTask )
|
||||
* access kernel data structures. For single core, a critical section is
|
||||
* not required as this is not called from an interrupt and the current
|
||||
* TCB will always be the same for any individual execution thread. */
|
||||
taskENTER_CRITICAL_SMP_ONLY();
|
||||
taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
|
||||
{
|
||||
xReturn = pxCurrentTCBs[ xCoreID ];
|
||||
}
|
||||
/* Release the previously taken kernel lock. */
|
||||
taskEXIT_CRITICAL_SMP_ONLY();
|
||||
taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
|
||||
}
|
||||
#else /* CONFIG_FREERTOS_USE_KERNEL_10_5_1 */
|
||||
{
|
||||
@@ -532,12 +532,12 @@ BaseType_t xTaskGetCoreID( TaskHandle_t xTask )
|
||||
|
||||
/* For SMP, we need to take the kernel lock here as we are about to
|
||||
* access kernel data structures. */
|
||||
taskENTER_CRITICAL_SMP_ONLY();
|
||||
taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
|
||||
{
|
||||
ulRunTimeCounter = xIdleTaskHandle[ xCoreID ]->ulRunTimeCounter;
|
||||
}
|
||||
/* Release the previously taken kernel lock. */
|
||||
taskEXIT_CRITICAL_SMP_ONLY();
|
||||
taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
|
||||
|
||||
return ulRunTimeCounter;
|
||||
}
|
||||
@@ -564,12 +564,12 @@ BaseType_t xTaskGetCoreID( TaskHandle_t xTask )
|
||||
{
|
||||
/* For SMP, we need to take the kernel lock here as we are about
|
||||
* to access kernel data structures. */
|
||||
taskENTER_CRITICAL_SMP_ONLY();
|
||||
taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
|
||||
{
|
||||
ulReturn = xIdleTaskHandle[ xCoreID ]->ulRunTimeCounter / ulTotalTime;
|
||||
}
|
||||
/* Release the previously taken kernel lock. */
|
||||
taskEXIT_CRITICAL_SMP_ONLY();
|
||||
taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
|
||||
}
|
||||
else
|
||||
{
|
||||
|
Reference in New Issue
Block a user