feat(freertos): Add SMP critical section changes to FreeRTOS v10.5.1

This commit adds the SMP critical section changes to the v10.5.1 kernel. These
changes are temporarily documented in the `idf_changes.md` document. This
commit...

- Adds granular spinlocks to each data group (e.g., kernel, queues, event_groups etc.)
- Updates critical section macros to use those spinlocks
- Add missing critical sections required in SMP
This commit is contained in:
Darian Leung
2023-09-08 00:29:57 +08:00
parent db64e51e53
commit 49af70506a
9 changed files with 1857 additions and 1200 deletions

View File

@@ -43,6 +43,8 @@
#include "task.h" #include "task.h"
#include "timers.h" #include "timers.h"
#include "event_groups.h" #include "event_groups.h"
/* Include private IDF API additions for critical thread safety macros */
#include "esp_private/freertos_idf_additions_priv.h"
/* Lint e961, e750 and e9021 are suppressed as a MISRA exception justified /* Lint e961, e750 and e9021 are suppressed as a MISRA exception justified
* because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined * because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined
@@ -77,6 +79,8 @@ typedef struct EventGroupDef_t
#if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the event group is statically allocated to ensure no attempt is made to free the memory. */ uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the event group is statically allocated to ensure no attempt is made to free the memory. */
#endif #endif
portMUX_TYPE xEventGroupLock; /* Spinlock required for SMP critical sections */
} EventGroup_t; } EventGroup_t;
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
@@ -131,6 +135,9 @@ static BaseType_t prvTestWaitCondition( const EventBits_t uxCurrentEventBits,
} }
#endif /* configSUPPORT_DYNAMIC_ALLOCATION */ #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
/* Initialize the event group's spinlock. */
portMUX_INITIALIZE( &pxEventBits->xEventGroupLock );
traceEVENT_GROUP_CREATE( pxEventBits ); traceEVENT_GROUP_CREATE( pxEventBits );
} }
else else
@@ -182,6 +189,9 @@ static BaseType_t prvTestWaitCondition( const EventBits_t uxCurrentEventBits,
} }
#endif /* configSUPPORT_STATIC_ALLOCATION */ #endif /* configSUPPORT_STATIC_ALLOCATION */
/* Initialize the event group's spinlock. */
portMUX_INITIALIZE( &pxEventBits->xEventGroupLock );
traceEVENT_GROUP_CREATE( pxEventBits ); traceEVENT_GROUP_CREATE( pxEventBits );
} }
else else
@@ -213,7 +223,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
} }
#endif #endif
vTaskSuspendAll(); prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxEventBits->xEventGroupLock ) );
{ {
uxOriginalBitValue = pxEventBits->uxEventBits; uxOriginalBitValue = pxEventBits->uxEventBits;
@@ -256,7 +266,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
} }
} }
} }
xAlreadyYielded = xTaskResumeAll(); xAlreadyYielded = prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxEventBits->xEventGroupLock ) );
if( xTicksToWait != ( TickType_t ) 0 ) if( xTicksToWait != ( TickType_t ) 0 )
{ {
@@ -278,7 +288,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 ) if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 )
{ {
/* The task timed out, just return the current event bit value. */ /* The task timed out, just return the current event bit value. */
taskENTER_CRITICAL(); taskENTER_CRITICAL( &( pxEventBits->xEventGroupLock ) );
{ {
uxReturn = pxEventBits->uxEventBits; uxReturn = pxEventBits->uxEventBits;
@@ -295,7 +305,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
taskEXIT_CRITICAL(); taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) );
xTimeoutOccurred = pdTRUE; xTimeoutOccurred = pdTRUE;
} }
@@ -340,7 +350,7 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
} }
#endif #endif
vTaskSuspendAll(); prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxEventBits->xEventGroupLock ) );
{ {
const EventBits_t uxCurrentEventBits = pxEventBits->uxEventBits; const EventBits_t uxCurrentEventBits = pxEventBits->uxEventBits;
@@ -408,7 +418,7 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
traceEVENT_GROUP_WAIT_BITS_BLOCK( xEventGroup, uxBitsToWaitFor ); traceEVENT_GROUP_WAIT_BITS_BLOCK( xEventGroup, uxBitsToWaitFor );
} }
} }
xAlreadyYielded = xTaskResumeAll(); xAlreadyYielded = prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxEventBits->xEventGroupLock ) );
if( xTicksToWait != ( TickType_t ) 0 ) if( xTicksToWait != ( TickType_t ) 0 )
{ {
@@ -429,7 +439,7 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 ) if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 )
{ {
taskENTER_CRITICAL(); taskENTER_CRITICAL( &( pxEventBits->xEventGroupLock ) );
{ {
/* The task timed out, just return the current event bit value. */ /* The task timed out, just return the current event bit value. */
uxReturn = pxEventBits->uxEventBits; uxReturn = pxEventBits->uxEventBits;
@@ -454,7 +464,7 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
xTimeoutOccurred = pdTRUE; xTimeoutOccurred = pdTRUE;
} }
taskEXIT_CRITICAL(); taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) );
} }
else else
{ {
@@ -485,7 +495,7 @@ EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup,
configASSERT( xEventGroup ); configASSERT( xEventGroup );
configASSERT( ( uxBitsToClear & eventEVENT_BITS_CONTROL_BYTES ) == 0 ); configASSERT( ( uxBitsToClear & eventEVENT_BITS_CONTROL_BYTES ) == 0 );
taskENTER_CRITICAL(); taskENTER_CRITICAL( &( pxEventBits->xEventGroupLock ) );
{ {
traceEVENT_GROUP_CLEAR_BITS( xEventGroup, uxBitsToClear ); traceEVENT_GROUP_CLEAR_BITS( xEventGroup, uxBitsToClear );
@@ -496,7 +506,7 @@ EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup,
/* Clear the bits. */ /* Clear the bits. */
pxEventBits->uxEventBits &= ~uxBitsToClear; pxEventBits->uxEventBits &= ~uxBitsToClear;
} }
taskEXIT_CRITICAL(); taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) );
return uxReturn; return uxReturn;
} }
@@ -552,7 +562,14 @@ EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup,
pxList = &( pxEventBits->xTasksWaitingForBits ); pxList = &( pxEventBits->xTasksWaitingForBits );
pxListEnd = listGET_END_MARKER( pxList ); /*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM. This is checked and valid. */ pxListEnd = listGET_END_MARKER( pxList ); /*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM. This is checked and valid. */
vTaskSuspendAll();
prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxEventBits->xEventGroupLock ) );
#if ( configNUMBER_OF_CORES > 1 )
/* We are about to traverse a task list which is a kernel data structure.
* Thus we need to call prvTakeKernelLock() to take the kernel lock. */
prvTakeKernelLock();
#endif /* configNUMBER_OF_CORES > 1 */
{ {
traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet ); traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet );
@@ -624,7 +641,12 @@ EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup,
* bit was set in the control word. */ * bit was set in the control word. */
pxEventBits->uxEventBits &= ~uxBitsToClear; pxEventBits->uxEventBits &= ~uxBitsToClear;
} }
( void ) xTaskResumeAll(); #if ( configNUMBER_OF_CORES > 1 )
/* Release the previously taken kernel lock. */
prvReleaseKernelLock();
#endif /* configNUMBER_OF_CORES > 1 */
( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxEventBits->xEventGroupLock ) );
return pxEventBits->uxEventBits; return pxEventBits->uxEventBits;
} }
@@ -639,7 +661,13 @@ void vEventGroupDelete( EventGroupHandle_t xEventGroup )
pxTasksWaitingForBits = &( pxEventBits->xTasksWaitingForBits ); pxTasksWaitingForBits = &( pxEventBits->xTasksWaitingForBits );
vTaskSuspendAll(); prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxEventBits->xEventGroupLock ) );
#if ( configNUMBER_OF_CORES > 1 )
/* We are about to traverse a task list which is a kernel data structure.
* Thus we need to call prvTakeKernelLock() to take the kernel lock. */
prvTakeKernelLock();
#endif /* configNUMBER_OF_CORES > 1 */
{ {
traceEVENT_GROUP_DELETE( xEventGroup ); traceEVENT_GROUP_DELETE( xEventGroup );
@@ -651,7 +679,11 @@ void vEventGroupDelete( EventGroupHandle_t xEventGroup )
vTaskRemoveFromUnorderedEventList( pxTasksWaitingForBits->xListEnd.pxNext, eventUNBLOCKED_DUE_TO_BIT_SET ); vTaskRemoveFromUnorderedEventList( pxTasksWaitingForBits->xListEnd.pxNext, eventUNBLOCKED_DUE_TO_BIT_SET );
} }
} }
( void ) xTaskResumeAll(); #if ( configNUMBER_OF_CORES > 1 )
/* Release the previously taken kernel lock. */
prvReleaseKernelLock();
#endif /* configNUMBER_OF_CORES > 1 */
prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxEventBits->xEventGroupLock ) );
#if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) ) #if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
{ {

View File

@@ -74,8 +74,6 @@ The following APIs have been added to support SMP
### API Modifications ### API Modifications
#### SMP Modifications
Added the following macros that abstract away single-core and SMP differences: Added the following macros that abstract away single-core and SMP differences:
- `taskYIELD_CORE()` triggers a particular core to yield - `taskYIELD_CORE()` triggers a particular core to yield
@@ -122,9 +120,26 @@ The following functions were modified to accommodate SMP behavior:
- `prvAddCurrentTaskToDelayedList()` - `prvAddCurrentTaskToDelayedList()`
- Added extra check to see if current blocking task has already been deleted by the other core. - Added extra check to see if current blocking task has already been deleted by the other core.
#### Single-Core Modifications ### Critical Section Changes
- Granular Locks: The following objects are now given their own spinlocks
- Kernel objects (i.e., `tasks.c`): `xKernelLock`
- Each queue: `xQueueLock`
- Queue Registry: `xQueueRegistryLock`
- Each event group: `xEventGroupLock`
- Each stream buffer: `xStreamBufferLock`
- All timers: `xTimerLock`
- Critical sections now target the appropriate spinlocks
- Added missing critical sections for SMP (see `..._SMP_ONLY()` critical section calls)
- Queues no longer use queue locks (see `queueUSE_LOCKS`)
- Queues now just use critical sections and skips queue locking
- Queue functions can now execute within a single critical section block
## Single Core Differences ## Single Core Differences
List of differences between Vanilla FreeRTOS V10.5.1 and building the dual-core SMP kernel with `congigNUMBER_OF_CORES == 1`. List of differences between Vanilla FreeRTOS V10.5.1 and building the dual-core SMP kernel with `congigNUMBER_OF_CORES == 1`.
- `prvAddNewTaskToReadyList()`
- Extended critical section so that SMP can check for yields while still inside critical section
- `vTaskStepTick()`
- Extended critical section so that SMP can access `xTickCount` while still inside critical section

View File

@@ -1371,6 +1371,7 @@ typedef struct xSTATIC_QUEUE
UBaseType_t uxDummy8; UBaseType_t uxDummy8;
uint8_t ucDummy9; uint8_t ucDummy9;
#endif #endif
portMUX_TYPE xDummyQueueLock;
} StaticQueue_t; } StaticQueue_t;
typedef StaticQueue_t StaticSemaphore_t; typedef StaticQueue_t StaticSemaphore_t;
@@ -1400,6 +1401,7 @@ typedef struct xSTATIC_EVENT_GROUP
#if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
uint8_t ucDummy4; uint8_t ucDummy4;
#endif #endif
portMUX_TYPE xDummyEventGroupLock;
} StaticEventGroup_t; } StaticEventGroup_t;
/* /*
@@ -1454,6 +1456,7 @@ typedef struct xSTATIC_STREAM_BUFFER
#if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) #if ( configUSE_SB_COMPLETED_CALLBACK == 1 )
void * pvDummy5[ 2 ]; void * pvDummy5[ 2 ];
#endif #endif
portMUX_TYPE xDummyStreamBufferLock;
} StaticStreamBuffer_t; } StaticStreamBuffer_t;
/* Message buffers are built on stream buffers. */ /* Message buffers are built on stream buffers. */

View File

@@ -220,8 +220,9 @@ typedef enum
* \defgroup taskENTER_CRITICAL taskENTER_CRITICAL * \defgroup taskENTER_CRITICAL taskENTER_CRITICAL
* \ingroup SchedulerControl * \ingroup SchedulerControl
*/ */
#define taskENTER_CRITICAL() portENTER_CRITICAL() #define taskENTER_CRITICAL( x ) portENTER_CRITICAL( x )
#define taskENTER_CRITICAL_FROM_ISR() portSET_INTERRUPT_MASK_FROM_ISR() #define taskENTER_CRITICAL_FROM_ISR() portSET_INTERRUPT_MASK_FROM_ISR()
#define taskENTER_CRITICAL_ISR( x ) portENTER_CRITICAL_ISR( x )
/** /**
* task. h * task. h
@@ -235,8 +236,9 @@ typedef enum
* \defgroup taskEXIT_CRITICAL taskEXIT_CRITICAL * \defgroup taskEXIT_CRITICAL taskEXIT_CRITICAL
* \ingroup SchedulerControl * \ingroup SchedulerControl
*/ */
#define taskEXIT_CRITICAL() portEXIT_CRITICAL() #define taskEXIT_CRITICAL( x ) portEXIT_CRITICAL( x )
#define taskEXIT_CRITICAL_FROM_ISR( x ) portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) #define taskEXIT_CRITICAL_FROM_ISR( x ) portCLEAR_INTERRUPT_MASK_FROM_ISR( x )
#define taskEXIT_CRITICAL_ISR( x ) portEXIT_CRITICAL_ISR( x )
/** /**
* task. h * task. h

File diff suppressed because it is too large Load Diff

View File

@@ -43,6 +43,8 @@
#include "FreeRTOS.h" #include "FreeRTOS.h"
#include "task.h" #include "task.h"
#include "stream_buffer.h" #include "stream_buffer.h"
/* Include private IDF API additions for critical thread safety macros */
#include "esp_private/freertos_idf_additions_priv.h"
#if ( configUSE_TASK_NOTIFICATIONS != 1 ) #if ( configUSE_TASK_NOTIFICATIONS != 1 )
#error configUSE_TASK_NOTIFICATIONS must be set to 1 to build stream_buffer.c #error configUSE_TASK_NOTIFICATIONS must be set to 1 to build stream_buffer.c
@@ -63,18 +65,18 @@
* that uses task notifications. */ * that uses task notifications. */
/*lint -save -e9026 Function like macros allowed and needed here so they can be overridden. */ /*lint -save -e9026 Function like macros allowed and needed here so they can be overridden. */
#ifndef sbRECEIVE_COMPLETED #ifndef sbRECEIVE_COMPLETED
#define sbRECEIVE_COMPLETED( pxStreamBuffer ) \ #define sbRECEIVE_COMPLETED( pxStreamBuffer ) \
vTaskSuspendAll(); \ prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxStreamBuffer->xStreamBufferLock ) ); \
{ \ { \
if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \ if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \
{ \ { \
( void ) xTaskNotify( ( pxStreamBuffer )->xTaskWaitingToSend, \ ( void ) xTaskNotify( ( pxStreamBuffer )->xTaskWaitingToSend, \
( uint32_t ) 0, \ ( uint32_t ) 0, \
eNoAction ); \ eNoAction ); \
( pxStreamBuffer )->xTaskWaitingToSend = NULL; \ ( pxStreamBuffer )->xTaskWaitingToSend = NULL; \
} \ } \
} \ } \
( void ) xTaskResumeAll(); ( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxStreamBuffer->xStreamBufferLock ) );
#endif /* sbRECEIVE_COMPLETED */ #endif /* sbRECEIVE_COMPLETED */
/* If user has provided a per-instance receive complete callback, then /* If user has provided a per-instance receive complete callback, then
@@ -140,18 +142,18 @@
* implementation that uses task notifications. * implementation that uses task notifications.
*/ */
#ifndef sbSEND_COMPLETED #ifndef sbSEND_COMPLETED
#define sbSEND_COMPLETED( pxStreamBuffer ) \ #define sbSEND_COMPLETED( pxStreamBuffer ) \
vTaskSuspendAll(); \ prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxStreamBuffer->xStreamBufferLock ) ); \
{ \ { \
if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) \ if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) \
{ \ { \
( void ) xTaskNotify( ( pxStreamBuffer )->xTaskWaitingToReceive, \ ( void ) xTaskNotify( ( pxStreamBuffer )->xTaskWaitingToReceive, \
( uint32_t ) 0, \ ( uint32_t ) 0, \
eNoAction ); \ eNoAction ); \
( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \ ( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \
} \ } \
} \ } \
( void ) xTaskResumeAll(); ( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxStreamBuffer->xStreamBufferLock ) );
#endif /* sbSEND_COMPLETED */ #endif /* sbSEND_COMPLETED */
/* If user has provided a per-instance send completed callback, then /* If user has provided a per-instance send completed callback, then
@@ -243,6 +245,8 @@ typedef struct StreamBufferDef_t /*lint !e9058 Style convention
StreamBufferCallbackFunction_t pxSendCompletedCallback; /* Optional callback called on send complete. sbSEND_COMPLETED is called if this is NULL. */ StreamBufferCallbackFunction_t pxSendCompletedCallback; /* Optional callback called on send complete. sbSEND_COMPLETED is called if this is NULL. */
StreamBufferCallbackFunction_t pxReceiveCompletedCallback; /* Optional callback called on receive complete. sbRECEIVE_COMPLETED is called if this is NULL. */ StreamBufferCallbackFunction_t pxReceiveCompletedCallback; /* Optional callback called on receive complete. sbRECEIVE_COMPLETED is called if this is NULL. */
#endif #endif
portMUX_TYPE xStreamBufferLock; /* Spinlock required for SMP critical sections */
} StreamBuffer_t; } StreamBuffer_t;
/* /*
@@ -385,6 +389,11 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer,
pxSendCompletedCallback, pxSendCompletedCallback,
pxReceiveCompletedCallback ); pxReceiveCompletedCallback );
/* Initialize the stream buffer's spinlock separately, as
* prvInitialiseNewStreamBuffer() is also called from
* xStreamBufferReset(). */
portMUX_INITIALIZE( &( ( ( StreamBuffer_t * ) pucAllocatedMemory )->xStreamBufferLock ) );
traceSTREAM_BUFFER_CREATE( ( ( StreamBuffer_t * ) pucAllocatedMemory ), xIsMessageBuffer ); traceSTREAM_BUFFER_CREATE( ( ( StreamBuffer_t * ) pucAllocatedMemory ), xIsMessageBuffer );
} }
else else
@@ -463,6 +472,11 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer,
* again. */ * again. */
pxStreamBuffer->ucFlags |= sbFLAGS_IS_STATICALLY_ALLOCATED; pxStreamBuffer->ucFlags |= sbFLAGS_IS_STATICALLY_ALLOCATED;
/* Initialize the stream buffer's spinlock separately, as
* prvInitialiseNewStreamBuffer() is also called from
* xStreamBufferReset(). */
portMUX_INITIALIZE( &( pxStreamBuffer->xStreamBufferLock ) );
traceSTREAM_BUFFER_CREATE( pxStreamBuffer, xIsMessageBuffer ); traceSTREAM_BUFFER_CREATE( pxStreamBuffer, xIsMessageBuffer );
xReturn = ( StreamBufferHandle_t ) pxStaticStreamBuffer; /*lint !e9087 Data hiding requires cast to opaque type. */ xReturn = ( StreamBufferHandle_t ) pxStaticStreamBuffer; /*lint !e9087 Data hiding requires cast to opaque type. */
@@ -560,7 +574,7 @@ BaseType_t xStreamBufferReset( StreamBufferHandle_t xStreamBuffer )
#endif #endif
/* Can only reset a message buffer if there are no tasks blocked on it. */ /* Can only reset a message buffer if there are no tasks blocked on it. */
taskENTER_CRITICAL(); taskENTER_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) );
{ {
if( ( pxStreamBuffer->xTaskWaitingToReceive == NULL ) && ( pxStreamBuffer->xTaskWaitingToSend == NULL ) ) if( ( pxStreamBuffer->xTaskWaitingToReceive == NULL ) && ( pxStreamBuffer->xTaskWaitingToSend == NULL ) )
{ {
@@ -590,7 +604,7 @@ BaseType_t xStreamBufferReset( StreamBufferHandle_t xStreamBuffer )
xReturn = pdPASS; xReturn = pdPASS;
} }
} }
taskEXIT_CRITICAL(); taskEXIT_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) );
return xReturn; return xReturn;
} }
@@ -736,7 +750,7 @@ size_t xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
{ {
/* Wait until the required number of bytes are free in the message /* Wait until the required number of bytes are free in the message
* buffer. */ * buffer. */
taskENTER_CRITICAL(); taskENTER_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) );
{ {
xSpace = xStreamBufferSpacesAvailable( pxStreamBuffer ); xSpace = xStreamBufferSpacesAvailable( pxStreamBuffer );
@@ -751,11 +765,11 @@ size_t xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
} }
else else
{ {
taskEXIT_CRITICAL(); taskEXIT_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) );
break; break;
} }
} }
taskEXIT_CRITICAL(); taskEXIT_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) );
traceBLOCKING_ON_STREAM_BUFFER_SEND( xStreamBuffer ); traceBLOCKING_ON_STREAM_BUFFER_SEND( xStreamBuffer );
( void ) xTaskNotifyWait( ( uint32_t ) 0, ( uint32_t ) 0, NULL, xTicksToWait ); ( void ) xTaskNotifyWait( ( uint32_t ) 0, ( uint32_t ) 0, NULL, xTicksToWait );
@@ -932,7 +946,7 @@ size_t xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
{ {
/* Checking if there is data and clearing the notification state must be /* Checking if there is data and clearing the notification state must be
* performed atomically. */ * performed atomically. */
taskENTER_CRITICAL(); taskENTER_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) );
{ {
xBytesAvailable = prvBytesInBuffer( pxStreamBuffer ); xBytesAvailable = prvBytesInBuffer( pxStreamBuffer );
@@ -955,7 +969,7 @@ size_t xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
taskEXIT_CRITICAL(); taskEXIT_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) );
if( xBytesAvailable <= xBytesToStoreMessageLength ) if( xBytesAvailable <= xBytesToStoreMessageLength )
{ {
@@ -1409,7 +1423,17 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer,
} /*lint !e529 !e438 xWriteValue is only used if configASSERT() is defined. */ } /*lint !e529 !e438 xWriteValue is only used if configASSERT() is defined. */
#endif #endif
( void ) memset( ( void * ) pxStreamBuffer, 0x00, sizeof( StreamBuffer_t ) ); /*lint !e9087 memset() requires void *. */ /* This function could be called from xStreamBufferReset(), so we reset the
* stream buffer fields manually in order to avoid clearing
* xStreamBufferLock. The xStreamBufferLock is initialized separately on
* stream buffer creation. */
pxStreamBuffer->xTail = ( size_t ) 0;
pxStreamBuffer->xHead = ( size_t ) 0;
pxStreamBuffer->xTaskWaitingToReceive = ( TaskHandle_t ) 0;
pxStreamBuffer->xTaskWaitingToSend = ( TaskHandle_t ) 0;
#if ( configUSE_TRACE_FACILITY == 1 )
pxStreamBuffer->uxStreamBufferNumber = ( UBaseType_t ) 0;
#endif
pxStreamBuffer->pucBuffer = pucBuffer; pxStreamBuffer->pucBuffer = pucBuffer;
pxStreamBuffer->xLength = xBufferSizeBytes; pxStreamBuffer->xLength = xBufferSizeBytes;
pxStreamBuffer->xTriggerLevelBytes = xTriggerLevelBytes; pxStreamBuffer->xTriggerLevelBytes = xTriggerLevelBytes;

File diff suppressed because it is too large Load Diff

View File

@@ -42,6 +42,8 @@
#include "task.h" #include "task.h"
#include "queue.h" #include "queue.h"
#include "timers.h" #include "timers.h"
/* Include private IDF API additions for critical thread safety macros */
#include "esp_private/freertos_idf_additions_priv.h"
#if ( INCLUDE_xTimerPendFunctionCall == 1 ) && ( configUSE_TIMERS == 0 ) #if ( INCLUDE_xTimerPendFunctionCall == 1 ) && ( configUSE_TIMERS == 0 )
#error configUSE_TIMERS must be set to 1 to make the xTimerPendFunctionCall() function available. #error configUSE_TIMERS must be set to 1 to make the xTimerPendFunctionCall() function available.
@@ -147,6 +149,10 @@
PRIVILEGED_DATA static QueueHandle_t xTimerQueue = NULL; PRIVILEGED_DATA static QueueHandle_t xTimerQueue = NULL;
PRIVILEGED_DATA static TaskHandle_t xTimerTaskHandle = NULL; PRIVILEGED_DATA static TaskHandle_t xTimerTaskHandle = NULL;
/* Spinlock required in SMP when accessing the timers. For now we use a single lock
* Todo: Each timer could possible have its own lock for increased granularity. */
PRIVILEGED_DATA static portMUX_TYPE xTimerLock = portMUX_INITIALIZER_UNLOCKED;
/*lint -restore */ /*lint -restore */
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
@@ -462,7 +468,7 @@
Timer_t * pxTimer = xTimer; Timer_t * pxTimer = xTimer;
configASSERT( xTimer ); configASSERT( xTimer );
taskENTER_CRITICAL(); taskENTER_CRITICAL( &xTimerLock );
{ {
if( xAutoReload != pdFALSE ) if( xAutoReload != pdFALSE )
{ {
@@ -473,7 +479,7 @@
pxTimer->ucStatus &= ( ( uint8_t ) ~tmrSTATUS_IS_AUTORELOAD ); pxTimer->ucStatus &= ( ( uint8_t ) ~tmrSTATUS_IS_AUTORELOAD );
} }
} }
taskEXIT_CRITICAL(); taskEXIT_CRITICAL( &xTimerLock );
} }
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
@@ -483,7 +489,7 @@
BaseType_t xReturn; BaseType_t xReturn;
configASSERT( xTimer ); configASSERT( xTimer );
taskENTER_CRITICAL(); taskENTER_CRITICAL( &xTimerLock );
{ {
if( ( pxTimer->ucStatus & tmrSTATUS_IS_AUTORELOAD ) == 0 ) if( ( pxTimer->ucStatus & tmrSTATUS_IS_AUTORELOAD ) == 0 )
{ {
@@ -496,7 +502,7 @@
xReturn = pdTRUE; xReturn = pdTRUE;
} }
} }
taskEXIT_CRITICAL(); taskEXIT_CRITICAL( &xTimerLock );
return xReturn; return xReturn;
} }
@@ -639,7 +645,7 @@
TickType_t xTimeNow; TickType_t xTimeNow;
BaseType_t xTimerListsWereSwitched; BaseType_t xTimerListsWereSwitched;
vTaskSuspendAll(); prvENTER_CRITICAL_OR_SUSPEND_ALL( &xTimerLock );
{ {
/* Obtain the time now to make an assessment as to whether the timer /* Obtain the time now to make an assessment as to whether the timer
* has expired or not. If obtaining the time causes the lists to switch * has expired or not. If obtaining the time causes the lists to switch
@@ -653,7 +659,7 @@
/* The tick count has not overflowed, has the timer expired? */ /* The tick count has not overflowed, has the timer expired? */
if( ( xListWasEmpty == pdFALSE ) && ( xNextExpireTime <= xTimeNow ) ) if( ( xListWasEmpty == pdFALSE ) && ( xNextExpireTime <= xTimeNow ) )
{ {
( void ) xTaskResumeAll(); ( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &xTimerLock );
prvProcessExpiredTimer( xNextExpireTime, xTimeNow ); prvProcessExpiredTimer( xNextExpireTime, xTimeNow );
} }
else else
@@ -673,7 +679,7 @@
vQueueWaitForMessageRestricted( xTimerQueue, ( xNextExpireTime - xTimeNow ), xListWasEmpty ); vQueueWaitForMessageRestricted( xTimerQueue, ( xNextExpireTime - xTimeNow ), xListWasEmpty );
if( xTaskResumeAll() == pdFALSE ) if( prvEXIT_CRITICAL_OR_RESUME_ALL( &xTimerLock ) == pdFALSE )
{ {
/* Yield to wait for either a command to arrive, or the /* Yield to wait for either a command to arrive, or the
* block time to expire. If a command arrived between the * block time to expire. If a command arrived between the
@@ -689,7 +695,7 @@
} }
else else
{ {
( void ) xTaskResumeAll(); ( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &xTimerLock );
} }
} }
} }
@@ -967,7 +973,7 @@
/* Check that the list from which active timers are referenced, and the /* Check that the list from which active timers are referenced, and the
* queue used to communicate with the timer service, have been * queue used to communicate with the timer service, have been
* initialised. */ * initialised. */
taskENTER_CRITICAL(); taskENTER_CRITICAL( &xTimerLock );
{ {
if( xTimerQueue == NULL ) if( xTimerQueue == NULL )
{ {
@@ -1009,7 +1015,7 @@
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
taskEXIT_CRITICAL(); taskEXIT_CRITICAL( &xTimerLock );
} }
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
@@ -1021,7 +1027,7 @@
configASSERT( xTimer ); configASSERT( xTimer );
/* Is the timer in the list of active timers? */ /* Is the timer in the list of active timers? */
taskENTER_CRITICAL(); taskENTER_CRITICAL( &xTimerLock );
{ {
if( ( pxTimer->ucStatus & tmrSTATUS_IS_ACTIVE ) == 0 ) if( ( pxTimer->ucStatus & tmrSTATUS_IS_ACTIVE ) == 0 )
{ {
@@ -1032,7 +1038,7 @@
xReturn = pdTRUE; xReturn = pdTRUE;
} }
} }
taskEXIT_CRITICAL(); taskEXIT_CRITICAL( &xTimerLock );
return xReturn; return xReturn;
} /*lint !e818 Can't be pointer to const due to the typedef. */ } /*lint !e818 Can't be pointer to const due to the typedef. */
@@ -1045,11 +1051,11 @@
configASSERT( xTimer ); configASSERT( xTimer );
taskENTER_CRITICAL(); taskENTER_CRITICAL( &xTimerLock );
{ {
pvReturn = pxTimer->pvTimerID; pvReturn = pxTimer->pvTimerID;
} }
taskEXIT_CRITICAL(); taskEXIT_CRITICAL( &xTimerLock );
return pvReturn; return pvReturn;
} }
@@ -1062,11 +1068,11 @@
configASSERT( xTimer ); configASSERT( xTimer );
taskENTER_CRITICAL(); taskENTER_CRITICAL( &xTimerLock );
{ {
pxTimer->pvTimerID = pvNewID; pxTimer->pvTimerID = pvNewID;
} }
taskEXIT_CRITICAL(); taskEXIT_CRITICAL( &xTimerLock );
} }
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/

View File

@@ -487,12 +487,12 @@ BaseType_t xTaskGetCoreID( TaskHandle_t xTask )
* access kernel data structures. For single core, a critical section is * access kernel data structures. For single core, a critical section is
* not required as this is not called from an interrupt and the current * not required as this is not called from an interrupt and the current
* TCB will always be the same for any individual execution thread. */ * TCB will always be the same for any individual execution thread. */
taskENTER_CRITICAL_SMP_ONLY(); taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
{ {
xReturn = pxCurrentTCBs[ xCoreID ]; xReturn = pxCurrentTCBs[ xCoreID ];
} }
/* Release the previously taken kernel lock. */ /* Release the previously taken kernel lock. */
taskEXIT_CRITICAL_SMP_ONLY(); taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
} }
#else /* CONFIG_FREERTOS_USE_KERNEL_10_5_1 */ #else /* CONFIG_FREERTOS_USE_KERNEL_10_5_1 */
{ {
@@ -532,12 +532,12 @@ BaseType_t xTaskGetCoreID( TaskHandle_t xTask )
/* For SMP, we need to take the kernel lock here as we are about to /* For SMP, we need to take the kernel lock here as we are about to
* access kernel data structures. */ * access kernel data structures. */
taskENTER_CRITICAL_SMP_ONLY(); taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
{ {
ulRunTimeCounter = xIdleTaskHandle[ xCoreID ]->ulRunTimeCounter; ulRunTimeCounter = xIdleTaskHandle[ xCoreID ]->ulRunTimeCounter;
} }
/* Release the previously taken kernel lock. */ /* Release the previously taken kernel lock. */
taskEXIT_CRITICAL_SMP_ONLY(); taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
return ulRunTimeCounter; return ulRunTimeCounter;
} }
@@ -564,12 +564,12 @@ BaseType_t xTaskGetCoreID( TaskHandle_t xTask )
{ {
/* For SMP, we need to take the kernel lock here as we are about /* For SMP, we need to take the kernel lock here as we are about
* to access kernel data structures. */ * to access kernel data structures. */
taskENTER_CRITICAL_SMP_ONLY(); taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
{ {
ulReturn = xIdleTaskHandle[ xCoreID ]->ulRunTimeCounter / ulTotalTime; ulReturn = xIdleTaskHandle[ xCoreID ]->ulRunTimeCounter / ulTotalTime;
} }
/* Release the previously taken kernel lock. */ /* Release the previously taken kernel lock. */
taskEXIT_CRITICAL_SMP_ONLY(); taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
} }
else else
{ {