forked from espressif/esp-idf
freertos: Uncrustify IDF FreeRTOS
This commit uncrustifies IDF FreeRTOS source files using the upstream provided uncrustify.cfg files.
This commit is contained in:
@@ -5,6 +5,7 @@
|
|||||||
*
|
*
|
||||||
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
|
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* FreeRTOS Kernel V10.4.3
|
* FreeRTOS Kernel V10.4.3
|
||||||
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
@@ -5,6 +5,7 @@
|
|||||||
*
|
*
|
||||||
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
|
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* FreeRTOS Kernel V10.4.3
|
* FreeRTOS Kernel V10.4.3
|
||||||
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
@@ -224,7 +225,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef ESP_PLATFORM // IDF-3755
|
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||||
taskENTER_CRITICAL( &( pxEventBits->xEventGroupLock ) );
|
taskENTER_CRITICAL( &( pxEventBits->xEventGroupLock ) );
|
||||||
#else
|
#else
|
||||||
vTaskSuspendAll();
|
vTaskSuspendAll();
|
||||||
@@ -271,7 +272,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#ifdef ESP_PLATFORM // IDF-3755
|
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||||
taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) );
|
taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) );
|
||||||
xAlreadyYielded = pdFALSE;
|
xAlreadyYielded = pdFALSE;
|
||||||
#else
|
#else
|
||||||
@@ -360,7 +361,7 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef ESP_PLATFORM // IDF-3755
|
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||||
taskENTER_CRITICAL( &( pxEventBits->xEventGroupLock ) );
|
taskENTER_CRITICAL( &( pxEventBits->xEventGroupLock ) );
|
||||||
#else
|
#else
|
||||||
vTaskSuspendAll();
|
vTaskSuspendAll();
|
||||||
@@ -432,7 +433,7 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
|
|||||||
traceEVENT_GROUP_WAIT_BITS_BLOCK( xEventGroup, uxBitsToWaitFor );
|
traceEVENT_GROUP_WAIT_BITS_BLOCK( xEventGroup, uxBitsToWaitFor );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#ifdef ESP_PLATFORM // IDF-3755
|
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||||
taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) );
|
taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) );
|
||||||
xAlreadyYielded = pdFALSE;
|
xAlreadyYielded = pdFALSE;
|
||||||
#else
|
#else
|
||||||
@@ -580,8 +581,9 @@ EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup,
|
|||||||
|
|
||||||
pxList = &( pxEventBits->xTasksWaitingForBits );
|
pxList = &( pxEventBits->xTasksWaitingForBits );
|
||||||
pxListEnd = listGET_END_MARKER( pxList ); /*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM. This is checked and valid. */
|
pxListEnd = listGET_END_MARKER( pxList ); /*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM. This is checked and valid. */
|
||||||
#ifdef ESP_PLATFORM // IDF-3755
|
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||||
taskENTER_CRITICAL( &( pxEventBits->xEventGroupLock ) );
|
taskENTER_CRITICAL( &( pxEventBits->xEventGroupLock ) );
|
||||||
|
|
||||||
/* We are about to traverse a task list which is a kernel data structure.
|
/* We are about to traverse a task list which is a kernel data structure.
|
||||||
* Thus we need to call vTaskTakeKernelLock() to take the kernel lock. */
|
* Thus we need to call vTaskTakeKernelLock() to take the kernel lock. */
|
||||||
vTaskTakeKernelLock();
|
vTaskTakeKernelLock();
|
||||||
@@ -659,7 +661,7 @@ EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup,
|
|||||||
* bit was set in the control word. */
|
* bit was set in the control word. */
|
||||||
pxEventBits->uxEventBits &= ~uxBitsToClear;
|
pxEventBits->uxEventBits &= ~uxBitsToClear;
|
||||||
}
|
}
|
||||||
#ifdef ESP_PLATFORM // IDF-3755
|
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||||
/* Release the previously taken kernel lock, then release the event group spinlock. */
|
/* Release the previously taken kernel lock, then release the event group spinlock. */
|
||||||
vTaskReleaseKernelLock();
|
vTaskReleaseKernelLock();
|
||||||
taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) );
|
taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) );
|
||||||
@@ -676,16 +678,18 @@ void vEventGroupDelete( EventGroupHandle_t xEventGroup )
|
|||||||
EventGroup_t * pxEventBits = xEventGroup;
|
EventGroup_t * pxEventBits = xEventGroup;
|
||||||
const List_t * pxTasksWaitingForBits = &( pxEventBits->xTasksWaitingForBits );
|
const List_t * pxTasksWaitingForBits = &( pxEventBits->xTasksWaitingForBits );
|
||||||
|
|
||||||
|
{
|
||||||
traceEVENT_GROUP_DELETE( xEventGroup );
|
traceEVENT_GROUP_DELETE( xEventGroup );
|
||||||
|
|
||||||
// IDF-3755
|
/* IDF-3755 */
|
||||||
taskENTER_CRITICAL( &( pxEventBits->xEventGroupLock ) );
|
taskENTER_CRITICAL( &( pxEventBits->xEventGroupLock ) );
|
||||||
#ifdef ESP_PLATFORM
|
#ifdef ESP_PLATFORM
|
||||||
|
|
||||||
/* We are about to traverse a task list which is a kernel data structure.
|
/* We are about to traverse a task list which is a kernel data structure.
|
||||||
* Thus we need to call vTaskTakeKernelLock() to take the kernel lock. */
|
* Thus we need to call vTaskTakeKernelLock() to take the kernel lock. */
|
||||||
vTaskTakeKernelLock();
|
vTaskTakeKernelLock();
|
||||||
#endif
|
#endif
|
||||||
{
|
|
||||||
while( listCURRENT_LIST_LENGTH( pxTasksWaitingForBits ) > ( UBaseType_t ) 0 )
|
while( listCURRENT_LIST_LENGTH( pxTasksWaitingForBits ) > ( UBaseType_t ) 0 )
|
||||||
{
|
{
|
||||||
/* Unblock the task, returning 0 as the event list is being deleted
|
/* Unblock the task, returning 0 as the event list is being deleted
|
||||||
@@ -693,7 +697,7 @@ void vEventGroupDelete( EventGroupHandle_t xEventGroup )
|
|||||||
configASSERT( pxTasksWaitingForBits->xListEnd.pxNext != ( const ListItem_t * ) &( pxTasksWaitingForBits->xListEnd ) );
|
configASSERT( pxTasksWaitingForBits->xListEnd.pxNext != ( const ListItem_t * ) &( pxTasksWaitingForBits->xListEnd ) );
|
||||||
vTaskRemoveFromUnorderedEventList( pxTasksWaitingForBits->xListEnd.pxNext, eventUNBLOCKED_DUE_TO_BIT_SET );
|
vTaskRemoveFromUnorderedEventList( pxTasksWaitingForBits->xListEnd.pxNext, eventUNBLOCKED_DUE_TO_BIT_SET );
|
||||||
}
|
}
|
||||||
}
|
|
||||||
#ifdef ESP_PLATFORM
|
#ifdef ESP_PLATFORM
|
||||||
/* Release the previously taken kernel lock. */
|
/* Release the previously taken kernel lock. */
|
||||||
vTaskReleaseKernelLock();
|
vTaskReleaseKernelLock();
|
||||||
@@ -721,6 +725,7 @@ void vEventGroupDelete( EventGroupHandle_t xEventGroup )
|
|||||||
}
|
}
|
||||||
#endif /* configSUPPORT_DYNAMIC_ALLOCATION */
|
#endif /* configSUPPORT_DYNAMIC_ALLOCATION */
|
||||||
}
|
}
|
||||||
|
}
|
||||||
/*-----------------------------------------------------------*/
|
/*-----------------------------------------------------------*/
|
||||||
|
|
||||||
/* For internal use only - execute a 'set bits' command that was pended from
|
/* For internal use only - execute a 'set bits' command that was pended from
|
||||||
|
@@ -5,6 +5,7 @@
|
|||||||
*
|
*
|
||||||
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
|
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* FreeRTOS Kernel V10.4.3
|
* FreeRTOS Kernel V10.4.3
|
||||||
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
@@ -756,7 +757,7 @@
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Default values for trace macros added by ESP-IDF and are not part of Vanilla FreeRTOS
|
* Default values for trace macros added by ESP-IDF and are not part of Vanilla FreeRTOS
|
||||||
*/
|
*/
|
||||||
#ifdef ESP_PLATFORM
|
#ifdef ESP_PLATFORM
|
||||||
#ifndef traceISR_EXIT_TO_SCHEDULER
|
#ifndef traceISR_EXIT_TO_SCHEDULER
|
||||||
|
@@ -5,6 +5,7 @@
|
|||||||
*
|
*
|
||||||
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
|
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* FreeRTOS Kernel V10.4.3
|
* FreeRTOS Kernel V10.4.3
|
||||||
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
@@ -5,6 +5,7 @@
|
|||||||
*
|
*
|
||||||
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
|
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* FreeRTOS Kernel V10.4.3
|
* FreeRTOS Kernel V10.4.3
|
||||||
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
@@ -5,6 +5,7 @@
|
|||||||
*
|
*
|
||||||
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
|
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* FreeRTOS Kernel V10.4.3
|
* FreeRTOS Kernel V10.4.3
|
||||||
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
@@ -5,6 +5,7 @@
|
|||||||
*
|
*
|
||||||
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
|
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* FreeRTOS Kernel V10.4.3
|
* FreeRTOS Kernel V10.4.3
|
||||||
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
@@ -5,6 +5,7 @@
|
|||||||
*
|
*
|
||||||
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
|
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* FreeRTOS Kernel V10.4.3
|
* FreeRTOS Kernel V10.4.3
|
||||||
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
@@ -59,13 +59,14 @@
|
|||||||
* \ingroup FreeRTOSIntro
|
* \ingroup FreeRTOSIntro
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef INC_FREERTOS_H
|
|
||||||
#error "FreeRTOS.h must be included before list.h"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef LIST_H
|
#ifndef LIST_H
|
||||||
#define LIST_H
|
#define LIST_H
|
||||||
|
|
||||||
|
#ifndef INC_FREERTOS_H
|
||||||
|
#error "FreeRTOS.h must be included before list.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The list structure members are modified from within interrupts, and therefore
|
* The list structure members are modified from within interrupts, and therefore
|
||||||
* by rights should be declared volatile. However, they are only modified in a
|
* by rights should be declared volatile. However, they are only modified in a
|
||||||
|
@@ -5,6 +5,7 @@
|
|||||||
*
|
*
|
||||||
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
|
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* FreeRTOS Kernel V10.4.3
|
* FreeRTOS Kernel V10.4.3
|
||||||
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
@@ -5,6 +5,7 @@
|
|||||||
*
|
*
|
||||||
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
|
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* FreeRTOS Kernel V10.4.3
|
* FreeRTOS Kernel V10.4.3
|
||||||
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
@@ -5,6 +5,7 @@
|
|||||||
*
|
*
|
||||||
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
|
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* FreeRTOS Kernel V10.4.3
|
* FreeRTOS Kernel V10.4.3
|
||||||
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
@@ -76,8 +77,8 @@
|
|||||||
#define uxTaskGetStackHighWaterMark2 MPU_uxTaskGetStackHighWaterMark2
|
#define uxTaskGetStackHighWaterMark2 MPU_uxTaskGetStackHighWaterMark2
|
||||||
#define vTaskSetApplicationTaskTag MPU_vTaskSetApplicationTaskTag
|
#define vTaskSetApplicationTaskTag MPU_vTaskSetApplicationTaskTag
|
||||||
#define xTaskGetApplicationTaskTag MPU_xTaskGetApplicationTaskTag
|
#define xTaskGetApplicationTaskTag MPU_xTaskGetApplicationTaskTag
|
||||||
// #define vTaskSetThreadLocalStoragePointer MPU_vTaskSetThreadLocalStoragePointer
|
/* #define vTaskSetThreadLocalStoragePointer MPU_vTaskSetThreadLocalStoragePointer */
|
||||||
// #define pvTaskGetThreadLocalStoragePointer MPU_pvTaskGetThreadLocalStoragePointer
|
/* #define pvTaskGetThreadLocalStoragePointer MPU_pvTaskGetThreadLocalStoragePointer */
|
||||||
#define xTaskCallApplicationTaskHook MPU_xTaskCallApplicationTaskHook
|
#define xTaskCallApplicationTaskHook MPU_xTaskCallApplicationTaskHook
|
||||||
#define xTaskGetIdleTaskHandle MPU_xTaskGetIdleTaskHandle
|
#define xTaskGetIdleTaskHandle MPU_xTaskGetIdleTaskHandle
|
||||||
#define uxTaskGetSystemState MPU_uxTaskGetSystemState
|
#define uxTaskGetSystemState MPU_uxTaskGetSystemState
|
||||||
|
@@ -5,6 +5,7 @@
|
|||||||
*
|
*
|
||||||
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
|
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* FreeRTOS Kernel V10.4.3
|
* FreeRTOS Kernel V10.4.3
|
||||||
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
@@ -5,6 +5,7 @@
|
|||||||
*
|
*
|
||||||
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
|
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* FreeRTOS Kernel V10.4.3
|
* FreeRTOS Kernel V10.4.3
|
||||||
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
@@ -46,11 +47,9 @@ typedef void (* TaskFunction_t)( void * );
|
|||||||
#ifndef pdMS_TO_TICKS
|
#ifndef pdMS_TO_TICKS
|
||||||
#define pdMS_TO_TICKS( xTimeInMs ) ( ( TickType_t ) ( ( ( TickType_t ) ( xTimeInMs ) * ( TickType_t ) configTICK_RATE_HZ ) / ( TickType_t ) 1000U ) )
|
#define pdMS_TO_TICKS( xTimeInMs ) ( ( TickType_t ) ( ( ( TickType_t ) ( xTimeInMs ) * ( TickType_t ) configTICK_RATE_HZ ) / ( TickType_t ) 1000U ) )
|
||||||
#endif
|
#endif
|
||||||
#ifdef ESP_PLATFORM
|
|
||||||
#ifndef pdTICKS_TO_MS
|
#ifndef pdTICKS_TO_MS
|
||||||
#define pdTICKS_TO_MS( xTicks ) ( ( TickType_t ) ( ( uint64_t ) ( xTicks ) * 1000 / configTICK_RATE_HZ ) )
|
#define pdTICKS_TO_MS( xTicks ) ( ( TickType_t ) ( ( uint64_t ) ( xTicks ) * 1000 / configTICK_RATE_HZ ) )
|
||||||
#endif
|
#endif
|
||||||
#endif // ESP_PLATFORM
|
|
||||||
|
|
||||||
#define pdFALSE ( ( BaseType_t ) 0 )
|
#define pdFALSE ( ( BaseType_t ) 0 )
|
||||||
#define pdTRUE ( ( BaseType_t ) 1 )
|
#define pdTRUE ( ( BaseType_t ) 1 )
|
||||||
|
@@ -5,6 +5,7 @@
|
|||||||
*
|
*
|
||||||
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
|
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* FreeRTOS Kernel V10.4.3
|
* FreeRTOS Kernel V10.4.3
|
||||||
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
@@ -1321,6 +1322,7 @@ void vQueueDelete( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
|
|||||||
|
|
||||||
/** @cond !DOC_EXCLUDE_HEADER_SECTION */
|
/** @cond !DOC_EXCLUDE_HEADER_SECTION */
|
||||||
/**@{*/
|
/**@{*/
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||||
* queue. h
|
* queue. h
|
||||||
@@ -1405,6 +1407,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
|
|||||||
const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION;
|
const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION;
|
||||||
BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
|
BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
|
||||||
BaseType_t * const pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION;
|
BaseType_t * const pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||||
* queue. h
|
* queue. h
|
||||||
@@ -1509,6 +1512,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) PRIVILEGED_FUN
|
|||||||
UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
|
UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
|
||||||
|
|
||||||
/** @cond !DOC_EXCLUDE_HEADER_SECTION */
|
/** @cond !DOC_EXCLUDE_HEADER_SECTION */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The functions defined above are for passing data to and from tasks. The
|
* The functions defined above are for passing data to and from tasks. The
|
||||||
* functions below are the equivalents for passing data to and from
|
* functions below are the equivalents for passing data to and from
|
||||||
|
@@ -5,6 +5,7 @@
|
|||||||
*
|
*
|
||||||
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
|
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* FreeRTOS Kernel V10.4.3
|
* FreeRTOS Kernel V10.4.3
|
||||||
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
@@ -47,6 +48,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
|||||||
#define semGIVE_BLOCK_TIME ( ( TickType_t ) 0U )
|
#define semGIVE_BLOCK_TIME ( ( TickType_t ) 0U )
|
||||||
|
|
||||||
/** @cond !DOC_EXCLUDE_HEADER_SECTION */
|
/** @cond !DOC_EXCLUDE_HEADER_SECTION */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* semphr. h
|
* semphr. h
|
||||||
* @code{c}
|
* @code{c}
|
||||||
|
@@ -5,6 +5,7 @@
|
|||||||
*
|
*
|
||||||
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
|
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* FreeRTOS Kernel V10.4.3
|
* FreeRTOS Kernel V10.4.3
|
||||||
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
@@ -62,7 +63,7 @@
|
|||||||
#if ( configCHECK_FOR_STACK_OVERFLOW == 1 )
|
#if ( configCHECK_FOR_STACK_OVERFLOW == 1 )
|
||||||
|
|
||||||
/* FreeRTOSConfig.h is only set to use the first method of
|
/* FreeRTOSConfig.h is only set to use the first method of
|
||||||
overflow checking. */
|
* overflow checking. */
|
||||||
#define taskSECOND_CHECK_FOR_STACK_OVERFLOW()
|
#define taskSECOND_CHECK_FOR_STACK_OVERFLOW()
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@@ -5,6 +5,7 @@
|
|||||||
*
|
*
|
||||||
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
|
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* FreeRTOS Kernel V10.4.3
|
* FreeRTOS Kernel V10.4.3
|
||||||
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
@@ -5,6 +5,7 @@
|
|||||||
*
|
*
|
||||||
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
|
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* FreeRTOS Kernel V10.4.3
|
* FreeRTOS Kernel V10.4.3
|
||||||
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
@@ -5,6 +5,7 @@
|
|||||||
*
|
*
|
||||||
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
|
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* FreeRTOS Kernel V10.4.3
|
* FreeRTOS Kernel V10.4.3
|
||||||
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
@@ -809,9 +810,9 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if ( configUSE_MUTEXES == 1 && configCHECK_MUTEX_GIVEN_BY_OWNER == 1 )
|
#if ( configUSE_MUTEXES == 1 && configCHECK_MUTEX_GIVEN_BY_OWNER == 1 )
|
||||||
configASSERT(pxQueue->uxQueueType != queueQUEUE_IS_MUTEX
|
configASSERT( pxQueue->uxQueueType != queueQUEUE_IS_MUTEX ||
|
||||||
|| pxQueue->u.xSemaphore.xMutexHolder == NULL
|
pxQueue->u.xSemaphore.xMutexHolder == NULL ||
|
||||||
|| pxQueue->u.xSemaphore.xMutexHolder == xTaskGetCurrentTaskHandle());
|
pxQueue->u.xSemaphore.xMutexHolder == xTaskGetCurrentTaskHandle() );
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*lint -save -e904 This function relaxes the coding standard somewhat to
|
/*lint -save -e904 This function relaxes the coding standard somewhat to
|
||||||
@@ -960,7 +961,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
|
|||||||
/* Interrupts and other tasks can send to and receive from the queue
|
/* Interrupts and other tasks can send to and receive from the queue
|
||||||
* now the critical section has been exited. */
|
* now the critical section has been exited. */
|
||||||
|
|
||||||
#ifdef ESP_PLATFORM // IDF-3755
|
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||||
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
|
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||||
#else
|
#else
|
||||||
vTaskSuspendAll();
|
vTaskSuspendAll();
|
||||||
@@ -987,7 +988,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
|
|||||||
* task is already in the ready list before it yields - in which
|
* task is already in the ready list before it yields - in which
|
||||||
* case the yield will not cause a context switch unless there
|
* case the yield will not cause a context switch unless there
|
||||||
* is also a higher priority task in the pending ready list. */
|
* is also a higher priority task in the pending ready list. */
|
||||||
#ifdef ESP_PLATFORM // IDF-3755
|
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||||
#else
|
#else
|
||||||
if( xTaskResumeAll() == pdFALSE )
|
if( xTaskResumeAll() == pdFALSE )
|
||||||
@@ -995,13 +996,12 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
|
|||||||
{
|
{
|
||||||
portYIELD_WITHIN_API();
|
portYIELD_WITHIN_API();
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
/* Try again. */
|
/* Try again. */
|
||||||
prvUnlockQueue( pxQueue );
|
prvUnlockQueue( pxQueue );
|
||||||
#ifdef ESP_PLATFORM // IDF-3755
|
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||||
#else
|
#else
|
||||||
( void ) xTaskResumeAll();
|
( void ) xTaskResumeAll();
|
||||||
@@ -1012,7 +1012,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
|
|||||||
{
|
{
|
||||||
/* The timeout has expired. */
|
/* The timeout has expired. */
|
||||||
prvUnlockQueue( pxQueue );
|
prvUnlockQueue( pxQueue );
|
||||||
#ifdef ESP_PLATFORM // IDF-3755
|
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||||
#else
|
#else
|
||||||
( void ) xTaskResumeAll();
|
( void ) xTaskResumeAll();
|
||||||
@@ -1357,6 +1357,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
|
|||||||
traceQUEUE_GIVE_FROM_ISR_FAILED( pxQueue );
|
traceQUEUE_GIVE_FROM_ISR_FAILED( pxQueue );
|
||||||
xReturn = errQUEUE_FULL;
|
xReturn = errQUEUE_FULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
taskEXIT_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
|
taskEXIT_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
|
||||||
}
|
}
|
||||||
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
|
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
|
||||||
@@ -1456,7 +1457,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
|
|||||||
/* Interrupts and other tasks can send to and receive from the queue
|
/* Interrupts and other tasks can send to and receive from the queue
|
||||||
* now the critical section has been exited. */
|
* now the critical section has been exited. */
|
||||||
|
|
||||||
#ifdef ESP_PLATFORM // IDF-3755
|
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||||
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
|
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||||
#else
|
#else
|
||||||
vTaskSuspendAll();
|
vTaskSuspendAll();
|
||||||
@@ -1473,7 +1474,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
|
|||||||
traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
|
traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
|
||||||
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
|
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
|
||||||
prvUnlockQueue( pxQueue );
|
prvUnlockQueue( pxQueue );
|
||||||
#ifdef ESP_PLATFORM // IDF-3755
|
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||||
#else
|
#else
|
||||||
if( xTaskResumeAll() == pdFALSE )
|
if( xTaskResumeAll() == pdFALSE )
|
||||||
@@ -1481,6 +1482,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
|
|||||||
{
|
{
|
||||||
portYIELD_WITHIN_API();
|
portYIELD_WITHIN_API();
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef ESP_PLATFORM
|
#ifndef ESP_PLATFORM
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@@ -1493,7 +1495,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
|
|||||||
/* The queue contains data again. Loop back to try and read the
|
/* The queue contains data again. Loop back to try and read the
|
||||||
* data. */
|
* data. */
|
||||||
prvUnlockQueue( pxQueue );
|
prvUnlockQueue( pxQueue );
|
||||||
#ifdef ESP_PLATFORM // IDF-3755
|
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||||
#else
|
#else
|
||||||
( void ) xTaskResumeAll();
|
( void ) xTaskResumeAll();
|
||||||
@@ -1505,7 +1507,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
|
|||||||
/* Timed out. If there is no data in the queue exit, otherwise loop
|
/* Timed out. If there is no data in the queue exit, otherwise loop
|
||||||
* back and attempt to read the data. */
|
* back and attempt to read the data. */
|
||||||
prvUnlockQueue( pxQueue );
|
prvUnlockQueue( pxQueue );
|
||||||
#ifdef ESP_PLATFORM // IDF-3755
|
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||||
#else
|
#else
|
||||||
( void ) xTaskResumeAll();
|
( void ) xTaskResumeAll();
|
||||||
@@ -1645,7 +1647,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
|
|||||||
/* Interrupts and other tasks can give to and take from the semaphore
|
/* Interrupts and other tasks can give to and take from the semaphore
|
||||||
* now the critical section has been exited. */
|
* now the critical section has been exited. */
|
||||||
|
|
||||||
#ifdef ESP_PLATFORM // IDF-3755
|
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||||
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
|
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||||
#else
|
#else
|
||||||
vTaskSuspendAll();
|
vTaskSuspendAll();
|
||||||
@@ -1682,7 +1684,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
|
|||||||
|
|
||||||
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
|
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
|
||||||
prvUnlockQueue( pxQueue );
|
prvUnlockQueue( pxQueue );
|
||||||
#ifdef ESP_PLATFORM // IDF-3755
|
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||||
#else
|
#else
|
||||||
if( xTaskResumeAll() == pdFALSE )
|
if( xTaskResumeAll() == pdFALSE )
|
||||||
@@ -1690,6 +1692,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
|
|||||||
{
|
{
|
||||||
portYIELD_WITHIN_API();
|
portYIELD_WITHIN_API();
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef ESP_PLATFORM
|
#ifndef ESP_PLATFORM
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@@ -1702,7 +1705,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
|
|||||||
/* There was no timeout and the semaphore count was not 0, so
|
/* There was no timeout and the semaphore count was not 0, so
|
||||||
* attempt to take the semaphore again. */
|
* attempt to take the semaphore again. */
|
||||||
prvUnlockQueue( pxQueue );
|
prvUnlockQueue( pxQueue );
|
||||||
#ifdef ESP_PLATFORM // IDF-3755
|
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||||
#else
|
#else
|
||||||
( void ) xTaskResumeAll();
|
( void ) xTaskResumeAll();
|
||||||
@@ -1713,7 +1716,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
|
|||||||
{
|
{
|
||||||
/* Timed out. */
|
/* Timed out. */
|
||||||
prvUnlockQueue( pxQueue );
|
prvUnlockQueue( pxQueue );
|
||||||
#ifdef ESP_PLATFORM // IDF-3755
|
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||||
#else
|
#else
|
||||||
( void ) xTaskResumeAll();
|
( void ) xTaskResumeAll();
|
||||||
@@ -1860,7 +1863,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
|
|||||||
/* Interrupts and other tasks can send to and receive from the queue
|
/* Interrupts and other tasks can send to and receive from the queue
|
||||||
* now the critical section has been exited. */
|
* now the critical section has been exited. */
|
||||||
|
|
||||||
#ifdef ESP_PLATFORM // IDF-3755
|
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||||
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
|
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||||
#else
|
#else
|
||||||
vTaskSuspendAll();
|
vTaskSuspendAll();
|
||||||
@@ -1877,7 +1880,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
|
|||||||
traceBLOCKING_ON_QUEUE_PEEK( pxQueue );
|
traceBLOCKING_ON_QUEUE_PEEK( pxQueue );
|
||||||
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
|
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
|
||||||
prvUnlockQueue( pxQueue );
|
prvUnlockQueue( pxQueue );
|
||||||
#ifdef ESP_PLATFORM // IDF-3755
|
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||||
#else
|
#else
|
||||||
if( xTaskResumeAll() == pdFALSE )
|
if( xTaskResumeAll() == pdFALSE )
|
||||||
@@ -1885,6 +1888,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
|
|||||||
{
|
{
|
||||||
portYIELD_WITHIN_API();
|
portYIELD_WITHIN_API();
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef ESP_PLATFORM
|
#ifndef ESP_PLATFORM
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@@ -1897,7 +1901,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
|
|||||||
/* There is data in the queue now, so don't enter the blocked
|
/* There is data in the queue now, so don't enter the blocked
|
||||||
* state, instead return to try and obtain the data. */
|
* state, instead return to try and obtain the data. */
|
||||||
prvUnlockQueue( pxQueue );
|
prvUnlockQueue( pxQueue );
|
||||||
#ifdef ESP_PLATFORM // IDF-3755
|
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||||
#else
|
#else
|
||||||
( void ) xTaskResumeAll();
|
( void ) xTaskResumeAll();
|
||||||
@@ -1909,7 +1913,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
|
|||||||
/* The timeout has expired. If there is still no data in the queue
|
/* The timeout has expired. If there is still no data in the queue
|
||||||
* exit, otherwise go back and try to read the data again. */
|
* exit, otherwise go back and try to read the data again. */
|
||||||
prvUnlockQueue( pxQueue );
|
prvUnlockQueue( pxQueue );
|
||||||
#ifdef ESP_PLATFORM // IDF-3755
|
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||||
#else
|
#else
|
||||||
( void ) xTaskResumeAll();
|
( void ) xTaskResumeAll();
|
||||||
@@ -2019,6 +2023,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
|
|||||||
xReturn = pdFAIL;
|
xReturn = pdFAIL;
|
||||||
traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );
|
traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );
|
||||||
}
|
}
|
||||||
|
|
||||||
taskEXIT_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
|
taskEXIT_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
|
||||||
}
|
}
|
||||||
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
|
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
|
||||||
@@ -2456,6 +2461,7 @@ static void prvUnlockQueue( Queue_t * const pxQueue )
|
|||||||
static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue )
|
static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue )
|
||||||
{
|
{
|
||||||
BaseType_t xReturn;
|
BaseType_t xReturn;
|
||||||
|
|
||||||
taskENTER_CRITICAL( &( ( ( Queue_t * ) pxQueue )->xQueueLock ) );
|
taskENTER_CRITICAL( &( ( ( Queue_t * ) pxQueue )->xQueueLock ) );
|
||||||
{
|
{
|
||||||
if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
|
if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
|
||||||
@@ -2833,6 +2839,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
|||||||
UBaseType_t ux;
|
UBaseType_t ux;
|
||||||
|
|
||||||
taskENTER_CRITICAL( &xQueueRegistryLock );
|
taskENTER_CRITICAL( &xQueueRegistryLock );
|
||||||
|
|
||||||
/* See if there is an empty space in the registry. A NULL name denotes
|
/* See if there is an empty space in the registry. A NULL name denotes
|
||||||
* a free slot. */
|
* a free slot. */
|
||||||
for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
|
for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
|
||||||
@@ -2851,6 +2858,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
|||||||
mtCOVERAGE_TEST_MARKER();
|
mtCOVERAGE_TEST_MARKER();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
taskEXIT_CRITICAL( &xQueueRegistryLock );
|
taskEXIT_CRITICAL( &xQueueRegistryLock );
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2881,6 +2889,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
|||||||
mtCOVERAGE_TEST_MARKER();
|
mtCOVERAGE_TEST_MARKER();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
taskEXIT_CRITICAL( &xQueueRegistryLock );
|
taskEXIT_CRITICAL( &xQueueRegistryLock );
|
||||||
|
|
||||||
return pcReturn;
|
return pcReturn;
|
||||||
@@ -2917,8 +2926,8 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
|||||||
mtCOVERAGE_TEST_MARKER();
|
mtCOVERAGE_TEST_MARKER();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
taskEXIT_CRITICAL( &xQueueRegistryLock );
|
|
||||||
|
|
||||||
|
taskEXIT_CRITICAL( &xQueueRegistryLock );
|
||||||
} /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
|
} /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
|
||||||
|
|
||||||
#endif /* configQUEUE_REGISTRY_SIZE */
|
#endif /* configQUEUE_REGISTRY_SIZE */
|
||||||
|
@@ -5,6 +5,7 @@
|
|||||||
*
|
*
|
||||||
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
|
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* FreeRTOS Kernel V10.4.3
|
* FreeRTOS Kernel V10.4.3
|
||||||
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
@@ -61,7 +62,7 @@
|
|||||||
/*lint -save -e9026 Function like macros allowed and needed here so they can be overridden. */
|
/*lint -save -e9026 Function like macros allowed and needed here so they can be overridden. */
|
||||||
|
|
||||||
#ifndef sbRECEIVE_COMPLETED
|
#ifndef sbRECEIVE_COMPLETED
|
||||||
#ifdef ESP_PLATFORM // IDF-3775
|
#ifdef ESP_PLATFORM /* IDF-3775 */
|
||||||
#define sbRECEIVE_COMPLETED( pxStreamBuffer ) \
|
#define sbRECEIVE_COMPLETED( pxStreamBuffer ) \
|
||||||
taskENTER_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) ); \
|
taskENTER_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) ); \
|
||||||
{ \
|
{ \
|
||||||
@@ -74,7 +75,7 @@
|
|||||||
} \
|
} \
|
||||||
} \
|
} \
|
||||||
taskEXIT_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) );
|
taskEXIT_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) );
|
||||||
#else
|
#else /* ifdef ESP_PLATFORM */
|
||||||
#define sbRECEIVE_COMPLETED( pxStreamBuffer ) \
|
#define sbRECEIVE_COMPLETED( pxStreamBuffer ) \
|
||||||
vTaskSuspendAll(); \
|
vTaskSuspendAll(); \
|
||||||
{ \
|
{ \
|
||||||
@@ -115,7 +116,7 @@
|
|||||||
* or #defined the notification macro away, them provide a default implementation
|
* or #defined the notification macro away, them provide a default implementation
|
||||||
* that uses task notifications. */
|
* that uses task notifications. */
|
||||||
#ifndef sbSEND_COMPLETED
|
#ifndef sbSEND_COMPLETED
|
||||||
#ifdef ESP_PLATFORM // IDF-3755
|
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||||
#define sbSEND_COMPLETED( pxStreamBuffer ) \
|
#define sbSEND_COMPLETED( pxStreamBuffer ) \
|
||||||
taskENTER_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) ); \
|
taskENTER_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) ); \
|
||||||
{ \
|
{ \
|
||||||
@@ -128,7 +129,7 @@
|
|||||||
} \
|
} \
|
||||||
} \
|
} \
|
||||||
taskEXIT_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) );
|
taskEXIT_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) );
|
||||||
#else
|
#else /* ifdef ESP_PLATFORM */
|
||||||
#define sbSEND_COMPLETED( pxStreamBuffer ) \
|
#define sbSEND_COMPLETED( pxStreamBuffer ) \
|
||||||
vTaskSuspendAll(); \
|
vTaskSuspendAll(); \
|
||||||
{ \
|
{ \
|
||||||
|
@@ -5,6 +5,7 @@
|
|||||||
*
|
*
|
||||||
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
|
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* FreeRTOS Kernel V10.4.3
|
* FreeRTOS Kernel V10.4.3
|
||||||
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
@@ -403,6 +404,7 @@ PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList; /*< Poi
|
|||||||
PRIVILEGED_DATA static List_t xPendingReadyList[ configNUM_CORES ]; /*< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */
|
PRIVILEGED_DATA static List_t xPendingReadyList[ configNUM_CORES ]; /*< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */
|
||||||
|
|
||||||
#ifdef ESP_PLATFORM
|
#ifdef ESP_PLATFORM
|
||||||
|
|
||||||
/* Spinlock required for SMP critical sections. This lock protects all of the
|
/* Spinlock required for SMP critical sections. This lock protects all of the
|
||||||
* kernel's data structures such as various tasks lists, flags, and tick counts. */
|
* kernel's data structures such as various tasks lists, flags, and tick counts. */
|
||||||
PRIVILEGED_DATA static portMUX_TYPE xKernelLock = portMUX_INITIALIZER_UNLOCKED;
|
PRIVILEGED_DATA static portMUX_TYPE xKernelLock = portMUX_INITIALIZER_UNLOCKED;
|
||||||
@@ -458,9 +460,9 @@ PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended[configNUM_CORES
|
|||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// per-CPU flags indicating that we are doing context switch, it is used by apptrace and sysview modules
|
/* per-CPU flags indicating that we are doing context switch, it is used by apptrace and sysview modules */
|
||||||
// in order to avoid calls of vPortYield from traceTASK_SWITCHED_IN/OUT when waiting
|
/* in order to avoid calls of vPortYield from traceTASK_SWITCHED_IN/OUT when waiting */
|
||||||
// for locks to be free or for host to read full trace buffer
|
/* for locks to be free or for host to read full trace buffer */
|
||||||
PRIVILEGED_DATA static volatile BaseType_t xSwitchingContext[ configNUM_CORES ] = { pdFALSE };
|
PRIVILEGED_DATA static volatile BaseType_t xSwitchingContext[ configNUM_CORES ] = { pdFALSE };
|
||||||
|
|
||||||
/*lint -restore */
|
/*lint -restore */
|
||||||
@@ -470,7 +472,8 @@ PRIVILEGED_DATA static volatile BaseType_t xSwitchingContext[ configNUM_CORES ]
|
|||||||
/* Callback function prototypes. --------------------------*/
|
/* Callback function prototypes. --------------------------*/
|
||||||
#if ( configCHECK_FOR_STACK_OVERFLOW > 0 )
|
#if ( configCHECK_FOR_STACK_OVERFLOW > 0 )
|
||||||
|
|
||||||
extern void vApplicationStackOverflowHook( TaskHandle_t xTask, char *pcTaskName );
|
extern void vApplicationStackOverflowHook( TaskHandle_t xTask,
|
||||||
|
char * pcTaskName );
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@@ -482,7 +485,9 @@ PRIVILEGED_DATA static volatile BaseType_t xSwitchingContext[ configNUM_CORES ]
|
|||||||
|
|
||||||
#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
|
#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
|
||||||
|
|
||||||
extern void vApplicationGetIdleTaskMemory( StaticTask_t **ppxIdleTaskTCBBuffer, StackType_t **ppxIdleTaskStackBuffer, uint32_t *pulIdleTaskStackSize ); /*lint !e526 Symbol not defined as it is an application callback. */
|
extern void vApplicationGetIdleTaskMemory( StaticTask_t ** ppxIdleTaskTCBBuffer,
|
||||||
|
StackType_t ** ppxIdleTaskStackBuffer,
|
||||||
|
uint32_t * pulIdleTaskStackSize ); /*lint !e526 Symbol not defined as it is an application callback. */
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@@ -1129,7 +1134,6 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
|
|||||||
#if ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS == 1 )
|
#if ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS == 1 )
|
||||||
pxNewTCB->pvThreadLocalStoragePointersDelCallback[ x ] = NULL;
|
pxNewTCB->pvThreadLocalStoragePointersDelCallback[ x ] = NULL;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
@@ -1262,17 +1266,17 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
|
|||||||
* so far. */
|
* so far. */
|
||||||
if( xSchedulerRunning == pdFALSE )
|
if( xSchedulerRunning == pdFALSE )
|
||||||
{
|
{
|
||||||
if( ( pxCurrentTCB[ 0 ] != NULL )
|
if( ( pxCurrentTCB[ 0 ] != NULL ) &&
|
||||||
&& ( taskCAN_RUN_ON_CORE( 0, pxNewTCB->xCoreID ) == pdTRUE )
|
( taskCAN_RUN_ON_CORE( 0, pxNewTCB->xCoreID ) == pdTRUE ) &&
|
||||||
&& ( pxCurrentTCB[ 0 ]->uxPriority <= pxNewTCB->uxPriority ) )
|
( pxCurrentTCB[ 0 ]->uxPriority <= pxNewTCB->uxPriority ) )
|
||||||
{
|
{
|
||||||
pxCurrentTCB[ 0 ] = pxNewTCB;
|
pxCurrentTCB[ 0 ] = pxNewTCB;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if ( configNUM_CORES > 1 )
|
#if ( configNUM_CORES > 1 )
|
||||||
else if( ( pxCurrentTCB[ 1 ] != NULL )
|
else if( ( pxCurrentTCB[ 1 ] != NULL ) &&
|
||||||
&& ( taskCAN_RUN_ON_CORE( 1, pxNewTCB->xCoreID ) == pdTRUE )
|
( taskCAN_RUN_ON_CORE( 1, pxNewTCB->xCoreID ) == pdTRUE ) &&
|
||||||
&& ( pxCurrentTCB[ 1 ]->uxPriority <= pxNewTCB->uxPriority ) )
|
( pxCurrentTCB[ 1 ]->uxPriority <= pxNewTCB->uxPriority ) )
|
||||||
{
|
{
|
||||||
pxCurrentTCB[ 1 ] = pxNewTCB;
|
pxCurrentTCB[ 1 ] = pxNewTCB;
|
||||||
}
|
}
|
||||||
@@ -1320,7 +1324,6 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
|
|||||||
{
|
{
|
||||||
mtCOVERAGE_TEST_MARKER();
|
mtCOVERAGE_TEST_MARKER();
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
taskEXIT_CRITICAL( &xKernelLock );
|
taskEXIT_CRITICAL( &xKernelLock );
|
||||||
}
|
}
|
||||||
@@ -1349,9 +1352,9 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
|
|||||||
xYieldRequiredCurrentCore = pdTRUE;
|
xYieldRequiredCurrentCore = pdTRUE;
|
||||||
}
|
}
|
||||||
/* If the target task can run on the other core, and has a higher priority then the other core, and the other core has not suspended scheduling, the yield the other core */
|
/* If the target task can run on the other core, and has a higher priority then the other core, and the other core has not suspended scheduling, the yield the other core */
|
||||||
else if( ( ( xTaskCoreID == !xCurCoreID ) || ( xTaskCoreID == tskNO_AFFINITY ) )
|
else if( ( ( xTaskCoreID == !xCurCoreID ) || ( xTaskCoreID == tskNO_AFFINITY ) ) &&
|
||||||
&& ( uxTaskPriority > pxCurrentTCB[ !xCurCoreID ]->uxPriority )
|
( uxTaskPriority > pxCurrentTCB[ !xCurCoreID ]->uxPriority ) &&
|
||||||
&& ( uxSchedulerSuspended[ !xCurCoreID ] == ( UBaseType_t ) pdFALSE ) )
|
( uxSchedulerSuspended[ !xCurCoreID ] == ( UBaseType_t ) pdFALSE ) )
|
||||||
{
|
{
|
||||||
/* Signal the other core to yield */
|
/* Signal the other core to yield */
|
||||||
vPortYieldOtherCore( !xCurCoreID );
|
vPortYieldOtherCore( !xCurCoreID );
|
||||||
@@ -1456,15 +1459,15 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
|
|||||||
if( taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, !xCurCoreID ) )
|
if( taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, !xCurCoreID ) )
|
||||||
{
|
{
|
||||||
/* SMP case of deleting a task running on a different core. Same issue
|
/* SMP case of deleting a task running on a different core. Same issue
|
||||||
as a task deleting itself, but we need to send a yield to this task now
|
* as a task deleting itself, but we need to send a yield to this task now
|
||||||
before we release xKernelLock.
|
* before we release xKernelLock.
|
||||||
|
*
|
||||||
Specifically there is a case where the other core may already be spinning on
|
* Specifically there is a case where the other core may already be spinning on
|
||||||
xKernelLock waiting to go into a blocked state. A check is added in
|
* xKernelLock waiting to go into a blocked state. A check is added in
|
||||||
prvAddCurrentTaskToDelayedList() to prevent it from removing itself from
|
* prvAddCurrentTaskToDelayedList() to prevent it from removing itself from
|
||||||
xTasksWaitingTermination list in this case (instead it will immediately
|
* xTasksWaitingTermination list in this case (instead it will immediately
|
||||||
release xKernelLock again and be yielded before the FreeRTOS function
|
* release xKernelLock again and be yielded before the FreeRTOS function
|
||||||
returns.) */
|
* returns.) */
|
||||||
vPortYieldOtherCore( !xCurCoreID );
|
vPortYieldOtherCore( !xCurCoreID );
|
||||||
}
|
}
|
||||||
#endif /* configNUM_CORES > 1 */
|
#endif /* configNUM_CORES > 1 */
|
||||||
@@ -1481,7 +1484,8 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
|
|||||||
}
|
}
|
||||||
taskEXIT_CRITICAL( &xKernelLock );
|
taskEXIT_CRITICAL( &xKernelLock );
|
||||||
|
|
||||||
if( xFreeNow == pdTRUE ) {
|
if( xFreeNow == pdTRUE )
|
||||||
|
{
|
||||||
#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) && ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
|
#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) && ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
|
||||||
prvDeleteTLS( pxTCB );
|
prvDeleteTLS( pxTCB );
|
||||||
#endif
|
#endif
|
||||||
@@ -1494,6 +1498,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
|
|||||||
if( xSchedulerRunning != pdFALSE )
|
if( xSchedulerRunning != pdFALSE )
|
||||||
{
|
{
|
||||||
taskENTER_CRITICAL( &xKernelLock );
|
taskENTER_CRITICAL( &xKernelLock );
|
||||||
|
|
||||||
if( taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, xPortGetCoreID() ) )
|
if( taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, xPortGetCoreID() ) )
|
||||||
{
|
{
|
||||||
configASSERT( xTaskGetSchedulerState() != taskSCHEDULER_SUSPENDED );
|
configASSERT( xTaskGetSchedulerState() != taskSCHEDULER_SUSPENDED );
|
||||||
@@ -1503,6 +1508,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
|
|||||||
{
|
{
|
||||||
mtCOVERAGE_TEST_MARKER();
|
mtCOVERAGE_TEST_MARKER();
|
||||||
}
|
}
|
||||||
|
|
||||||
taskEXIT_CRITICAL( &xKernelLock );
|
taskEXIT_CRITICAL( &xKernelLock );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1512,7 +1518,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
|
|||||||
|
|
||||||
#if ( INCLUDE_xTaskDelayUntil == 1 )
|
#if ( INCLUDE_xTaskDelayUntil == 1 )
|
||||||
#ifdef ESP_PLATFORM
|
#ifdef ESP_PLATFORM
|
||||||
// backward binary compatibility - remove later
|
/* backward binary compatibility - remove later */
|
||||||
#undef vTaskDelayUntil
|
#undef vTaskDelayUntil
|
||||||
void vTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
|
void vTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
|
||||||
const TickType_t xTimeIncrement )
|
const TickType_t xTimeIncrement )
|
||||||
@@ -1531,7 +1537,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
|
|||||||
configASSERT( ( xTimeIncrement > 0U ) );
|
configASSERT( ( xTimeIncrement > 0U ) );
|
||||||
configASSERT( xTaskGetSchedulerState() != taskSCHEDULER_SUSPENDED );
|
configASSERT( xTaskGetSchedulerState() != taskSCHEDULER_SUSPENDED );
|
||||||
|
|
||||||
#ifdef ESP_PLATFORM // IDF-3755
|
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||||
taskENTER_CRITICAL( &xKernelLock );
|
taskENTER_CRITICAL( &xKernelLock );
|
||||||
#else
|
#else
|
||||||
vTaskSuspendAll();
|
vTaskSuspendAll();
|
||||||
@@ -1591,7 +1597,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
|
|||||||
mtCOVERAGE_TEST_MARKER();
|
mtCOVERAGE_TEST_MARKER();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#ifdef ESP_PLATFORM // IDF-3755
|
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||||
taskEXIT_CRITICAL( &xKernelLock );
|
taskEXIT_CRITICAL( &xKernelLock );
|
||||||
xAlreadyYielded = pdFALSE;
|
xAlreadyYielded = pdFALSE;
|
||||||
#else
|
#else
|
||||||
@@ -1625,7 +1631,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
|
|||||||
if( xTicksToDelay > ( TickType_t ) 0U )
|
if( xTicksToDelay > ( TickType_t ) 0U )
|
||||||
{
|
{
|
||||||
configASSERT( xTaskGetSchedulerState() != taskSCHEDULER_SUSPENDED );
|
configASSERT( xTaskGetSchedulerState() != taskSCHEDULER_SUSPENDED );
|
||||||
#ifdef ESP_PLATFORM // IDF-3755
|
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||||
taskENTER_CRITICAL( &xKernelLock );
|
taskENTER_CRITICAL( &xKernelLock );
|
||||||
#else
|
#else
|
||||||
vTaskSuspendAll();
|
vTaskSuspendAll();
|
||||||
@@ -1642,7 +1648,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
|
|||||||
* executing task. */
|
* executing task. */
|
||||||
prvAddCurrentTaskToDelayedList( xTicksToDelay, pdFALSE );
|
prvAddCurrentTaskToDelayedList( xTicksToDelay, pdFALSE );
|
||||||
}
|
}
|
||||||
#ifdef ESP_PLATFORM // IDF-3755
|
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||||
taskEXIT_CRITICAL( &xKernelLock );
|
taskEXIT_CRITICAL( &xKernelLock );
|
||||||
xAlreadyYielded = pdFALSE;
|
xAlreadyYielded = pdFALSE;
|
||||||
#else
|
#else
|
||||||
@@ -1679,7 +1685,8 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
|
|||||||
|
|
||||||
configASSERT( pxTCB );
|
configASSERT( pxTCB );
|
||||||
|
|
||||||
taskENTER_CRITICAL( &xKernelLock ); //Need critical section incase either core context switches in between
|
taskENTER_CRITICAL( &xKernelLock ); /*Need critical section incase either core context switches in between */
|
||||||
|
|
||||||
if( taskIS_CURRENTLY_RUNNING( pxTCB ) )
|
if( taskIS_CURRENTLY_RUNNING( pxTCB ) )
|
||||||
{
|
{
|
||||||
/* The task calling this function is querying its own state. */
|
/* The task calling this function is querying its own state. */
|
||||||
@@ -1756,6 +1763,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
|
|||||||
eReturn = eReady;
|
eReturn = eReady;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
taskEXIT_CRITICAL( &xKernelLock );
|
taskEXIT_CRITICAL( &xKernelLock );
|
||||||
|
|
||||||
return eReturn;
|
return eReturn;
|
||||||
@@ -1899,6 +1907,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
|
|||||||
* is ready to execute. */
|
* is ready to execute. */
|
||||||
xYieldRequired = pdTRUE;
|
xYieldRequired = pdTRUE;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if ( configNUM_CORES > 1 )
|
#if ( configNUM_CORES > 1 )
|
||||||
else if( taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, 1 ) )
|
else if( taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, 1 ) )
|
||||||
{
|
{
|
||||||
@@ -2092,6 +2101,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#if ( configNUM_CORES > 1 )
|
#if ( configNUM_CORES > 1 )
|
||||||
else if( taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, !xPortGetCoreID() ) )
|
else if( taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, !xPortGetCoreID() ) )
|
||||||
{
|
{
|
||||||
@@ -2140,8 +2150,8 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
|
|||||||
{
|
{
|
||||||
/* Has the task already been resumed from within an ISR? */
|
/* Has the task already been resumed from within an ISR? */
|
||||||
#if ( configNUM_CORES > 1 )
|
#if ( configNUM_CORES > 1 )
|
||||||
if( ( listIS_CONTAINED_WITHIN( &xPendingReadyList[ 0 ], &( pxTCB->xEventListItem ) ) == pdFALSE )
|
if( ( listIS_CONTAINED_WITHIN( &xPendingReadyList[ 0 ], &( pxTCB->xEventListItem ) ) == pdFALSE ) &&
|
||||||
&& ( listIS_CONTAINED_WITHIN( &xPendingReadyList[ 1 ], &( pxTCB->xEventListItem ) ) == pdFALSE ) )
|
( listIS_CONTAINED_WITHIN( &xPendingReadyList[ 1 ], &( pxTCB->xEventListItem ) ) == pdFALSE ) )
|
||||||
#else
|
#else
|
||||||
if( listIS_CONTAINED_WITHIN( &xPendingReadyList[ 0 ], &( pxTCB->xEventListItem ) ) == pdFALSE )
|
if( listIS_CONTAINED_WITHIN( &xPendingReadyList[ 0 ], &( pxTCB->xEventListItem ) ) == pdFALSE )
|
||||||
#endif
|
#endif
|
||||||
@@ -2399,7 +2409,7 @@ void vTaskStartScheduler( void )
|
|||||||
* structure specific to the task that will run first.
|
* structure specific to the task that will run first.
|
||||||
* See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html
|
* See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html
|
||||||
* for additional information. */
|
* for additional information. */
|
||||||
// _impure_ptr = &( pxCurrentTCB[xPortGetCoreID()]->xNewLib_reent );
|
/* _impure_ptr = &( pxCurrentTCB[xPortGetCoreID()]->xNewLib_reent ); */
|
||||||
}
|
}
|
||||||
#endif /* configUSE_NEWLIB_REENTRANT */
|
#endif /* configUSE_NEWLIB_REENTRANT */
|
||||||
|
|
||||||
@@ -2462,6 +2472,7 @@ void vTaskSuspendAll( void )
|
|||||||
* https://goo.gl/wu4acr */
|
* https://goo.gl/wu4acr */
|
||||||
|
|
||||||
#if ( configNUM_CORES > 1 )
|
#if ( configNUM_CORES > 1 )
|
||||||
|
|
||||||
/* For SMP, although each core has their own uxSchedulerSuspended, we still
|
/* For SMP, although each core has their own uxSchedulerSuspended, we still
|
||||||
* need enter a critical section when accessing. */
|
* need enter a critical section when accessing. */
|
||||||
taskENTER_CRITICAL( &xKernelLock );
|
taskENTER_CRITICAL( &xKernelLock );
|
||||||
@@ -2614,6 +2625,7 @@ BaseType_t xTaskResumeAll( void )
|
|||||||
* they should be processed now. This ensures the tick count does
|
* they should be processed now. This ensures the tick count does
|
||||||
* not slip, and that any delayed tasks are resumed at the correct
|
* not slip, and that any delayed tasks are resumed at the correct
|
||||||
* time. */
|
* time. */
|
||||||
|
|
||||||
/* Core 0 is solely responsible for managing tick count, thus it
|
/* Core 0 is solely responsible for managing tick count, thus it
|
||||||
* must be the only core to unwind the pended ticks */
|
* must be the only core to unwind the pended ticks */
|
||||||
if( xCoreID == 0 )
|
if( xCoreID == 0 )
|
||||||
@@ -2824,7 +2836,7 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char
|
|||||||
/* Task names will be truncated to configMAX_TASK_NAME_LEN - 1 bytes. */
|
/* Task names will be truncated to configMAX_TASK_NAME_LEN - 1 bytes. */
|
||||||
configASSERT( strlen( pcNameToQuery ) < configMAX_TASK_NAME_LEN );
|
configASSERT( strlen( pcNameToQuery ) < configMAX_TASK_NAME_LEN );
|
||||||
|
|
||||||
#ifdef ESP_PLATFORM // IDF-3755
|
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||||
taskENTER_CRITICAL( &xKernelLock );
|
taskENTER_CRITICAL( &xKernelLock );
|
||||||
#else
|
#else
|
||||||
vTaskSuspendAll();
|
vTaskSuspendAll();
|
||||||
@@ -2874,7 +2886,7 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
#ifdef ESP_PLATFORM // IDF-3755
|
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||||
taskEXIT_CRITICAL( &xKernelLock );
|
taskEXIT_CRITICAL( &xKernelLock );
|
||||||
#else
|
#else
|
||||||
( void ) xTaskResumeAll();
|
( void ) xTaskResumeAll();
|
||||||
@@ -2894,7 +2906,7 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char
|
|||||||
{
|
{
|
||||||
UBaseType_t uxTask = 0, uxQueue = configMAX_PRIORITIES;
|
UBaseType_t uxTask = 0, uxQueue = configMAX_PRIORITIES;
|
||||||
|
|
||||||
#ifdef ESP_PLATFORM // IDF-3755
|
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||||
taskENTER_CRITICAL( &xKernelLock );
|
taskENTER_CRITICAL( &xKernelLock );
|
||||||
#else
|
#else
|
||||||
vTaskSuspendAll();
|
vTaskSuspendAll();
|
||||||
@@ -2957,7 +2969,7 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char
|
|||||||
mtCOVERAGE_TEST_MARKER();
|
mtCOVERAGE_TEST_MARKER();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#ifdef ESP_PLATFORM // IDF-3755
|
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||||
taskEXIT_CRITICAL( &xKernelLock );
|
taskEXIT_CRITICAL( &xKernelLock );
|
||||||
#else
|
#else
|
||||||
( void ) xTaskResumeAll();
|
( void ) xTaskResumeAll();
|
||||||
@@ -3000,6 +3012,7 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char
|
|||||||
/* For SMP, we require a critical section to access xTickCount */
|
/* For SMP, we require a critical section to access xTickCount */
|
||||||
taskENTER_CRITICAL( &xKernelLock );
|
taskENTER_CRITICAL( &xKernelLock );
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Correct the tick count value after a period during which the tick
|
/* Correct the tick count value after a period during which the tick
|
||||||
* was suppressed. Note this does *not* call the tick hook function for
|
* was suppressed. Note this does *not* call the tick hook function for
|
||||||
* each stepped tick. */
|
* each stepped tick. */
|
||||||
@@ -3030,6 +3043,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
|
|||||||
* the scheduler is suspended so the ticks are executed in xTaskResumeAll(). */
|
* the scheduler is suspended so the ticks are executed in xTaskResumeAll(). */
|
||||||
vTaskSuspendAll();
|
vTaskSuspendAll();
|
||||||
#ifdef ESP_PLATFORM
|
#ifdef ESP_PLATFORM
|
||||||
|
|
||||||
/* For SMP, we still require a critical section to access xPendedTicks even
|
/* For SMP, we still require a critical section to access xPendedTicks even
|
||||||
* if the scheduler is disabled. */
|
* if the scheduler is disabled. */
|
||||||
taskENTER_CRITICAL( &xKernelLock );
|
taskENTER_CRITICAL( &xKernelLock );
|
||||||
@@ -3053,7 +3067,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
|
|||||||
|
|
||||||
configASSERT( pxTCB );
|
configASSERT( pxTCB );
|
||||||
|
|
||||||
#ifdef ESP_PLATFORM // IDF-3755
|
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||||
taskENTER_CRITICAL( &xKernelLock );
|
taskENTER_CRITICAL( &xKernelLock );
|
||||||
#else
|
#else
|
||||||
vTaskSuspendAll();
|
vTaskSuspendAll();
|
||||||
@@ -3120,7 +3134,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
|
|||||||
xReturn = pdFAIL;
|
xReturn = pdFAIL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#ifdef ESP_PLATFORM // IDF-3755
|
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||||
taskEXIT_CRITICAL( &xKernelLock );
|
taskEXIT_CRITICAL( &xKernelLock );
|
||||||
#else
|
#else
|
||||||
( void ) xTaskResumeAll();
|
( void ) xTaskResumeAll();
|
||||||
@@ -3152,6 +3166,7 @@ BaseType_t xTaskIncrementTick( void )
|
|||||||
traceTASK_INCREMENT_TICK( xTickCount );
|
traceTASK_INCREMENT_TICK( xTickCount );
|
||||||
|
|
||||||
#ifdef ESP_PLATFORM
|
#ifdef ESP_PLATFORM
|
||||||
|
|
||||||
/* We need a critical section here as we are about to access kernel data
|
/* We need a critical section here as we are about to access kernel data
|
||||||
* structures:
|
* structures:
|
||||||
* - Other cores could be accessing them simultaneously
|
* - Other cores could be accessing them simultaneously
|
||||||
@@ -3248,10 +3263,11 @@ BaseType_t xTaskIncrementTick( void )
|
|||||||
* priority that is equal to or higher than the
|
* priority that is equal to or higher than the
|
||||||
* currently executing task. */
|
* currently executing task. */
|
||||||
#if defined( ESP_PLATFORM ) && ( configNUM_CORES > 1 )
|
#if defined( ESP_PLATFORM ) && ( configNUM_CORES > 1 )
|
||||||
|
|
||||||
/* Since this function is only run on core 0, we
|
/* Since this function is only run on core 0, we
|
||||||
* only need to switch contexts if the unblocked task
|
* only need to switch contexts if the unblocked task
|
||||||
* can run on core 0. */
|
* can run on core 0. */
|
||||||
if( ( pxTCB->xCoreID == 0 || pxTCB->xCoreID == tskNO_AFFINITY ) && (pxTCB->uxPriority >= pxCurrentTCB[ 0 ]->uxPriority) )
|
if( ( ( pxTCB->xCoreID == 0 ) || ( pxTCB->xCoreID == tskNO_AFFINITY ) ) && ( pxTCB->uxPriority >= pxCurrentTCB[ 0 ]->uxPriority ) )
|
||||||
#else
|
#else
|
||||||
if( pxTCB->uxPriority >= pxCurrentTCB[ 0 ]->uxPriority )
|
if( pxTCB->uxPriority >= pxCurrentTCB[ 0 ]->uxPriority )
|
||||||
#endif
|
#endif
|
||||||
@@ -3352,6 +3368,7 @@ BaseType_t xTaskIncrementTick( void )
|
|||||||
* execution */
|
* execution */
|
||||||
BaseType_t xCoreID = xPortGetCoreID();
|
BaseType_t xCoreID = xPortGetCoreID();
|
||||||
BaseType_t xSwitchRequired = pdFALSE;
|
BaseType_t xSwitchRequired = pdFALSE;
|
||||||
|
|
||||||
/* This function should never be called by Core 0. */
|
/* This function should never be called by Core 0. */
|
||||||
configASSERT( xCoreID != 0 );
|
configASSERT( xCoreID != 0 );
|
||||||
|
|
||||||
@@ -3379,11 +3396,13 @@ BaseType_t xTaskIncrementTick( void )
|
|||||||
if( uxTopReadyPriority > pxCurrentTCB[ xCoreID ]->uxPriority )
|
if( uxTopReadyPriority > pxCurrentTCB[ xCoreID ]->uxPriority )
|
||||||
{
|
{
|
||||||
xSwitchRequired = pdTRUE;
|
xSwitchRequired = pdTRUE;
|
||||||
} else {
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
mtCOVERAGE_TEST_MARKER();
|
mtCOVERAGE_TEST_MARKER();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif /* if ( configUSE_PREEMPTION == 1 ) */
|
||||||
|
|
||||||
/* Tasks of equal priority to the currently running task will share
|
/* Tasks of equal priority to the currently running task will share
|
||||||
* processing time (time slice) if preemption is on, and the application
|
* processing time (time slice) if preemption is on, and the application
|
||||||
@@ -3433,7 +3452,8 @@ BaseType_t xTaskIncrementTick( void )
|
|||||||
|
|
||||||
#if ( configUSE_APPLICATION_TASK_TAG == 1 )
|
#if ( configUSE_APPLICATION_TASK_TAG == 1 )
|
||||||
|
|
||||||
void vTaskSetApplicationTaskTag( TaskHandle_t xTask, TaskHookFunction_t pxHookFunction )
|
void vTaskSetApplicationTaskTag( TaskHandle_t xTask,
|
||||||
|
TaskHookFunction_t pxHookFunction )
|
||||||
{
|
{
|
||||||
TCB_t * xTCB;
|
TCB_t * xTCB;
|
||||||
|
|
||||||
@@ -3562,6 +3582,7 @@ static void taskSelectHighestPriorityTaskSMP( void )
|
|||||||
|
|
||||||
/* Search for tasks, starting form the highest ready priority. If nothing is
|
/* Search for tasks, starting form the highest ready priority. If nothing is
|
||||||
* found, we eventually default to the IDLE tasks at priority 0 */
|
* found, we eventually default to the IDLE tasks at priority 0 */
|
||||||
|
|
||||||
for( uxCurPriority = uxTopReadyPriority; uxCurPriority >= 0 && xTaskScheduled == pdFALSE; uxCurPriority-- )
|
for( uxCurPriority = uxTopReadyPriority; uxCurPriority >= 0 && xTaskScheduled == pdFALSE; uxCurPriority-- )
|
||||||
{
|
{
|
||||||
/* Check if current priority has one or more ready tasks. Skip if none */
|
/* Check if current priority has one or more ready tasks. Skip if none */
|
||||||
@@ -3588,6 +3609,7 @@ static void taskSelectHighestPriorityTaskSMP( void )
|
|||||||
TCB_t * pxTCBFirst;
|
TCB_t * pxTCBFirst;
|
||||||
listGET_OWNER_OF_NEXT_ENTRY( pxTCBCur, &( pxReadyTasksLists[ uxCurPriority ] ) );
|
listGET_OWNER_OF_NEXT_ENTRY( pxTCBCur, &( pxReadyTasksLists[ uxCurPriority ] ) );
|
||||||
pxTCBFirst = pxTCBCur;
|
pxTCBFirst = pxTCBCur;
|
||||||
|
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
/* Check if the current task is currently being executed. However, if
|
/* Check if the current task is currently being executed. However, if
|
||||||
@@ -3595,6 +3617,7 @@ static void taskSelectHighestPriorityTaskSMP( void )
|
|||||||
* Todo: Each task can store a xTaskRunState, instead of needing to
|
* Todo: Each task can store a xTaskRunState, instead of needing to
|
||||||
* check each core */
|
* check each core */
|
||||||
UBaseType_t ux;
|
UBaseType_t ux;
|
||||||
|
|
||||||
for( ux = 0; ux < ( UBaseType_t ) configNUM_CORES; ux++ )
|
for( ux = 0; ux < ( UBaseType_t ) configNUM_CORES; ux++ )
|
||||||
{
|
{
|
||||||
if( ux == xCoreID )
|
if( ux == xCoreID )
|
||||||
@@ -3640,12 +3663,14 @@ get_next_task:
|
|||||||
void vTaskSwitchContext( void )
|
void vTaskSwitchContext( void )
|
||||||
{
|
{
|
||||||
#ifdef ESP_PLATFORM
|
#ifdef ESP_PLATFORM
|
||||||
|
|
||||||
/* vTaskSwitchContext is called either from:
|
/* vTaskSwitchContext is called either from:
|
||||||
* - ISR dispatcher when return from an ISR (interrupts will already be disabled)
|
* - ISR dispatcher when return from an ISR (interrupts will already be disabled)
|
||||||
* - vTaskSuspend() which is not in a critical section
|
* - vTaskSuspend() which is not in a critical section
|
||||||
* Therefore, we enter a critical section ISR version to ensure safety */
|
* Therefore, we enter a critical section ISR version to ensure safety */
|
||||||
taskENTER_CRITICAL_ISR( &xKernelLock );
|
taskENTER_CRITICAL_ISR( &xKernelLock );
|
||||||
#endif // ESP_PLATFORM
|
#endif // ESP_PLATFORM
|
||||||
|
|
||||||
if( uxSchedulerSuspended[ xPortGetCoreID() ] != ( UBaseType_t ) pdFALSE )
|
if( uxSchedulerSuspended[ xPortGetCoreID() ] != ( UBaseType_t ) pdFALSE )
|
||||||
{
|
{
|
||||||
/* The scheduler is currently suspended - do not allow a context
|
/* The scheduler is currently suspended - do not allow a context
|
||||||
@@ -3732,6 +3757,7 @@ void vTaskSwitchContext( void )
|
|||||||
#endif /* configUSE_NEWLIB_REENTRANT */
|
#endif /* configUSE_NEWLIB_REENTRANT */
|
||||||
#endif // ESP_PLATFORM
|
#endif // ESP_PLATFORM
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef ESP_PLATFORM
|
#ifdef ESP_PLATFORM
|
||||||
/* Exit the critical section previously entered */
|
/* Exit the critical section previously entered */
|
||||||
taskEXIT_CRITICAL_ISR( &xKernelLock );
|
taskEXIT_CRITICAL_ISR( &xKernelLock );
|
||||||
@@ -3773,6 +3799,7 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList,
|
|||||||
|
|
||||||
/* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by
|
/* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by
|
||||||
* the event groups implementation. */
|
* the event groups implementation. */
|
||||||
|
|
||||||
/* Note. We currently don't always suspend the scheduler. Todo: IDF-3755
|
/* Note. We currently don't always suspend the scheduler. Todo: IDF-3755
|
||||||
* configASSERT( uxSchedulerSuspended[ xPortGetCoreID() ] != 0 ); */
|
* configASSERT( uxSchedulerSuspended[ xPortGetCoreID() ] != 0 ); */
|
||||||
|
|
||||||
@@ -3796,7 +3823,9 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList,
|
|||||||
|
|
||||||
#if ( configUSE_TIMERS == 1 )
|
#if ( configUSE_TIMERS == 1 )
|
||||||
|
|
||||||
void vTaskPlaceOnEventListRestricted( List_t * const pxEventList, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely )
|
void vTaskPlaceOnEventListRestricted( List_t * const pxEventList,
|
||||||
|
TickType_t xTicksToWait,
|
||||||
|
const BaseType_t xWaitIndefinitely )
|
||||||
{
|
{
|
||||||
configASSERT( pxEventList );
|
configASSERT( pxEventList );
|
||||||
|
|
||||||
@@ -3871,8 +3900,8 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList )
|
|||||||
* - The task is pinned, and the pinned core's scheduler is running
|
* - The task is pinned, and the pinned core's scheduler is running
|
||||||
* - The task is unpinned, and at least one of the core's scheduler is running */
|
* - The task is unpinned, and at least one of the core's scheduler is running */
|
||||||
#if ( configNUM_CORES > 1 )
|
#if ( configNUM_CORES > 1 )
|
||||||
if( ( ( uxSchedulerSuspended[ 0 ] == ( UBaseType_t ) pdFALSE ) && ( taskCAN_RUN_ON_CORE( 0, pxUnblockedTCB->xCoreID) == pdTRUE ) )
|
if( ( ( uxSchedulerSuspended[ 0 ] == ( UBaseType_t ) pdFALSE ) && ( taskCAN_RUN_ON_CORE( 0, pxUnblockedTCB->xCoreID ) == pdTRUE ) ) ||
|
||||||
|| ( ( uxSchedulerSuspended[ 1 ] == ( UBaseType_t ) pdFALSE ) && ( taskCAN_RUN_ON_CORE( 1, pxUnblockedTCB->xCoreID) == pdTRUE ) ) )
|
( ( uxSchedulerSuspended[ 1 ] == ( UBaseType_t ) pdFALSE ) && ( taskCAN_RUN_ON_CORE( 1, pxUnblockedTCB->xCoreID ) == pdTRUE ) ) )
|
||||||
#else
|
#else
|
||||||
if( uxSchedulerSuspended[ 0 ] == ( UBaseType_t ) pdFALSE )
|
if( uxSchedulerSuspended[ 0 ] == ( UBaseType_t ) pdFALSE )
|
||||||
#endif /* configNUM_CORES > 1 */
|
#endif /* configNUM_CORES > 1 */
|
||||||
@@ -3967,9 +3996,10 @@ void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem,
|
|||||||
/* THIS FUNCTION MUST BE CALLED WITH THE KERNEL LOCK ALREADY TAKEN.
|
/* THIS FUNCTION MUST BE CALLED WITH THE KERNEL LOCK ALREADY TAKEN.
|
||||||
* It is used by the event flags implementation, thus those functions
|
* It is used by the event flags implementation, thus those functions
|
||||||
* should call vTaskTakeKernelLock() before calling this function. */
|
* should call vTaskTakeKernelLock() before calling this function. */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Todo: IDF-5785
|
* Todo: IDF-5785
|
||||||
configASSERT( uxSchedulerSuspended[ xCurCoreID ] != pdFALSE );
|
* configASSERT( uxSchedulerSuspended[ xCurCoreID ] != pdFALSE );
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* Store the new item value in the event list. */
|
/* Store the new item value in the event list. */
|
||||||
@@ -4260,7 +4290,7 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters )
|
|||||||
|
|
||||||
if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP )
|
if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP )
|
||||||
{
|
{
|
||||||
#ifdef ESP_PLATFORM // IDF-3755
|
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||||
taskENTER_CRITICAL( &xKernelLock );
|
taskENTER_CRITICAL( &xKernelLock );
|
||||||
#else
|
#else
|
||||||
vTaskSuspendAll();
|
vTaskSuspendAll();
|
||||||
@@ -4288,7 +4318,7 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters )
|
|||||||
mtCOVERAGE_TEST_MARKER();
|
mtCOVERAGE_TEST_MARKER();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#ifdef ESP_PLATFORM // IDF-3755
|
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||||
taskEXIT_CRITICAL( &xKernelLock );
|
taskEXIT_CRITICAL( &xKernelLock );
|
||||||
#else
|
#else
|
||||||
( void ) xTaskResumeAll();
|
( void ) xTaskResumeAll();
|
||||||
@@ -4313,6 +4343,7 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters )
|
|||||||
eSleepModeStatus eReturn = eStandardSleep;
|
eSleepModeStatus eReturn = eStandardSleep;
|
||||||
|
|
||||||
taskENTER_CRITICAL( &xKernelLock );
|
taskENTER_CRITICAL( &xKernelLock );
|
||||||
|
|
||||||
if( listCURRENT_LIST_LENGTH( &xPendingReadyList[ xPortGetCoreID() ] ) != 0 )
|
if( listCURRENT_LIST_LENGTH( &xPendingReadyList[ xPortGetCoreID() ] ) != 0 )
|
||||||
{
|
{
|
||||||
/* A task was made ready while the scheduler was suspended. */
|
/* A task was made ready while the scheduler was suspended. */
|
||||||
@@ -4338,6 +4369,7 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters )
|
|||||||
mtCOVERAGE_TEST_MARKER();
|
mtCOVERAGE_TEST_MARKER();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
taskEXIT_CRITICAL( &xKernelLock );
|
taskEXIT_CRITICAL( &xKernelLock );
|
||||||
|
|
||||||
return eReturn;
|
return eReturn;
|
||||||
@@ -4350,7 +4382,10 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters )
|
|||||||
|
|
||||||
#if ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
|
#if ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
|
||||||
|
|
||||||
void vTaskSetThreadLocalStoragePointerAndDelCallback( TaskHandle_t xTaskToSet, BaseType_t xIndex, void *pvValue , TlsDeleteCallbackFunction_t xDelCallback)
|
void vTaskSetThreadLocalStoragePointerAndDelCallback( TaskHandle_t xTaskToSet,
|
||||||
|
BaseType_t xIndex,
|
||||||
|
void * pvValue,
|
||||||
|
TlsDeleteCallbackFunction_t xDelCallback )
|
||||||
{
|
{
|
||||||
TCB_t * pxTCB;
|
TCB_t * pxTCB;
|
||||||
|
|
||||||
@@ -4364,13 +4399,15 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters )
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, BaseType_t xIndex, void *pvValue )
|
void vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
|
||||||
|
BaseType_t xIndex,
|
||||||
|
void * pvValue )
|
||||||
{
|
{
|
||||||
vTaskSetThreadLocalStoragePointerAndDelCallback( xTaskToSet, xIndex, pvValue, ( TlsDeleteCallbackFunction_t ) NULL );
|
vTaskSetThreadLocalStoragePointerAndDelCallback( xTaskToSet, xIndex, pvValue, ( TlsDeleteCallbackFunction_t ) NULL );
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#else
|
#else /* if ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS ) */
|
||||||
void vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
|
void vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
|
||||||
BaseType_t xIndex,
|
BaseType_t xIndex,
|
||||||
void * pvValue )
|
void * pvValue )
|
||||||
@@ -4485,33 +4522,43 @@ static void prvCheckTasksWaitingTermination( void )
|
|||||||
taskENTER_CRITICAL( &xKernelLock );
|
taskENTER_CRITICAL( &xKernelLock );
|
||||||
{
|
{
|
||||||
xListIsEmpty = listLIST_IS_EMPTY( &xTasksWaitingTermination );
|
xListIsEmpty = listLIST_IS_EMPTY( &xTasksWaitingTermination );
|
||||||
|
|
||||||
if( xListIsEmpty == pdFALSE )
|
if( xListIsEmpty == pdFALSE )
|
||||||
{
|
{
|
||||||
/* We only want to kill tasks that ran on this core because e.g. _xt_coproc_release needs to
|
/* We only want to kill tasks that ran on this core because e.g. _xt_coproc_release needs to
|
||||||
be called on the core the process is pinned on, if any */
|
* be called on the core the process is pinned on, if any */
|
||||||
ListItem_t * target = listGET_HEAD_ENTRY( &xTasksWaitingTermination );
|
ListItem_t * target = listGET_HEAD_ENTRY( &xTasksWaitingTermination );
|
||||||
for( ; target != listGET_END_MARKER(&xTasksWaitingTermination); target = listGET_NEXT(target) ){ //Walk the list
|
|
||||||
|
for( ; target != listGET_END_MARKER( &xTasksWaitingTermination ); target = listGET_NEXT( target ) ) /*Walk the list */
|
||||||
|
{
|
||||||
TCB_t * tgt_tcb = ( TCB_t * ) listGET_LIST_ITEM_OWNER( target );
|
TCB_t * tgt_tcb = ( TCB_t * ) listGET_LIST_ITEM_OWNER( target );
|
||||||
int affinity = tgt_tcb->xCoreID;
|
int affinity = tgt_tcb->xCoreID;
|
||||||
//Self deleting tasks are added to Termination List before they switch context. Ensure they aren't still currently running
|
|
||||||
if( pxCurrentTCB[core] == tgt_tcb || (configNUM_CORES > 1 && pxCurrentTCB[!core] == tgt_tcb) ){
|
/*Self deleting tasks are added to Termination List before they switch context. Ensure they aren't still currently running */
|
||||||
continue; //Can't free memory of task that is still running
|
if( ( pxCurrentTCB[ core ] == tgt_tcb ) || ( ( configNUM_CORES > 1 ) && ( pxCurrentTCB[ !core ] == tgt_tcb ) ) )
|
||||||
|
{
|
||||||
|
continue; /*Can't free memory of task that is still running */
|
||||||
}
|
}
|
||||||
if(affinity == core || affinity == tskNO_AFFINITY){ //Find first item not pinned to other core
|
|
||||||
|
if( ( affinity == core ) || ( affinity == tskNO_AFFINITY ) ) /*Find first item not pinned to other core */
|
||||||
|
{
|
||||||
pxTCB = tgt_tcb;
|
pxTCB = tgt_tcb;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if(pxTCB != NULL){
|
|
||||||
( void ) uxListRemove( target ); //Remove list item from list
|
if( pxTCB != NULL )
|
||||||
|
{
|
||||||
|
( void ) uxListRemove( target ); /*Remove list item from list */
|
||||||
--uxCurrentNumberOfTasks;
|
--uxCurrentNumberOfTasks;
|
||||||
--uxDeletedTasksWaitingCleanUp;
|
--uxDeletedTasksWaitingCleanUp;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
taskEXIT_CRITICAL( &xKernelLock ); //Need to call deletion callbacks outside critical section
|
taskEXIT_CRITICAL( &xKernelLock ); /*Need to call deletion callbacks outside critical section */
|
||||||
|
|
||||||
if (pxTCB != NULL) { //Call deletion callbacks and free TCB memory
|
if( pxTCB != NULL ) /*Call deletion callbacks and free TCB memory */
|
||||||
|
{
|
||||||
#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) && ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
|
#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) && ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
|
||||||
prvDeleteTLS( pxTCB );
|
prvDeleteTLS( pxTCB );
|
||||||
#endif
|
#endif
|
||||||
@@ -4520,7 +4567,7 @@ static void prvCheckTasksWaitingTermination( void )
|
|||||||
else
|
else
|
||||||
{
|
{
|
||||||
mtCOVERAGE_TEST_MARKER();
|
mtCOVERAGE_TEST_MARKER();
|
||||||
break; //No TCB found that could be freed by this core, break out of loop
|
break; /*No TCB found that could be freed by this core, break out of loop */
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -4589,7 +4636,7 @@ static void prvCheckTasksWaitingTermination( void )
|
|||||||
* it should be reported as being in the Blocked state. */
|
* it should be reported as being in the Blocked state. */
|
||||||
if( eState == eSuspended )
|
if( eState == eSuspended )
|
||||||
{
|
{
|
||||||
#ifdef ESP_PLATFORM // IDF-3755
|
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||||
taskENTER_CRITICAL( &xKernelLock );
|
taskENTER_CRITICAL( &xKernelLock );
|
||||||
#else
|
#else
|
||||||
vTaskSuspendAll();
|
vTaskSuspendAll();
|
||||||
@@ -4600,7 +4647,7 @@ static void prvCheckTasksWaitingTermination( void )
|
|||||||
pxTaskStatus->eCurrentState = eBlocked;
|
pxTaskStatus->eCurrentState = eBlocked;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#ifdef ESP_PLATFORM // IDF-3755
|
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||||
taskEXIT_CRITICAL( &xKernelLock );
|
taskEXIT_CRITICAL( &xKernelLock );
|
||||||
#else
|
#else
|
||||||
( void ) xTaskResumeAll();
|
( void ) xTaskResumeAll();
|
||||||
@@ -4852,11 +4899,12 @@ BaseType_t xTaskGetAffinity( TaskHandle_t xTask )
|
|||||||
static void prvDeleteTLS( TCB_t * pxTCB )
|
static void prvDeleteTLS( TCB_t * pxTCB )
|
||||||
{
|
{
|
||||||
configASSERT( pxTCB );
|
configASSERT( pxTCB );
|
||||||
|
|
||||||
for( int x = 0; x < configNUM_THREAD_LOCAL_STORAGE_POINTERS; x++ )
|
for( int x = 0; x < configNUM_THREAD_LOCAL_STORAGE_POINTERS; x++ )
|
||||||
{
|
{
|
||||||
if (pxTCB->pvThreadLocalStoragePointersDelCallback[ x ] != NULL) //If del cb is set
|
if( pxTCB->pvThreadLocalStoragePointersDelCallback[ x ] != NULL ) /*If del cb is set */
|
||||||
{
|
{
|
||||||
pxTCB->pvThreadLocalStoragePointersDelCallback[ x ](x, pxTCB->pvThreadLocalStoragePointers[ x ]); //Call del cb
|
pxTCB->pvThreadLocalStoragePointersDelCallback[ x ]( x, pxTCB->pvThreadLocalStoragePointers[ x ] ); /*Call del cb */
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -4907,8 +4955,9 @@ static void prvResetNextTaskUnblockTime( void )
|
|||||||
{
|
{
|
||||||
TaskHandle_t xReturn = NULL;
|
TaskHandle_t xReturn = NULL;
|
||||||
|
|
||||||
//Xtensa-specific: the pxCurrentPCB pointer is atomic so we shouldn't need a lock.
|
/*Xtensa-specific: the pxCurrentPCB pointer is atomic so we shouldn't need a lock. */
|
||||||
if (cpuid < configNUM_CORES) {
|
if( cpuid < configNUM_CORES )
|
||||||
|
{
|
||||||
xReturn = pxCurrentTCB[ cpuid ];
|
xReturn = pxCurrentTCB[ cpuid ];
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -4927,6 +4976,7 @@ static void prvResetNextTaskUnblockTime( void )
|
|||||||
|
|
||||||
/* Known issue. This should use critical sections. See IDF-5889 */
|
/* Known issue. This should use critical sections. See IDF-5889 */
|
||||||
state = portSET_INTERRUPT_MASK_FROM_ISR();
|
state = portSET_INTERRUPT_MASK_FROM_ISR();
|
||||||
|
|
||||||
if( xSchedulerRunning == pdFALSE )
|
if( xSchedulerRunning == pdFALSE )
|
||||||
{
|
{
|
||||||
xReturn = taskSCHEDULER_NOT_STARTED;
|
xReturn = taskSCHEDULER_NOT_STARTED;
|
||||||
@@ -4942,6 +4992,7 @@ static void prvResetNextTaskUnblockTime( void )
|
|||||||
xReturn = taskSCHEDULER_SUSPENDED;
|
xReturn = taskSCHEDULER_SUSPENDED;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
portCLEAR_INTERRUPT_MASK_FROM_ISR( state );
|
portCLEAR_INTERRUPT_MASK_FROM_ISR( state );
|
||||||
|
|
||||||
return xReturn;
|
return xReturn;
|
||||||
@@ -4958,6 +5009,7 @@ static void prvResetNextTaskUnblockTime( void )
|
|||||||
BaseType_t xReturn = pdFALSE;
|
BaseType_t xReturn = pdFALSE;
|
||||||
|
|
||||||
taskENTER_CRITICAL( &xKernelLock );
|
taskENTER_CRITICAL( &xKernelLock );
|
||||||
|
|
||||||
/* If the mutex was given back by an interrupt while the queue was
|
/* If the mutex was given back by an interrupt while the queue was
|
||||||
* locked then the mutex holder might now be NULL. _RB_ Is this still
|
* locked then the mutex holder might now be NULL. _RB_ Is this still
|
||||||
* needed as interrupts can no longer use mutexes? */
|
* needed as interrupts can no longer use mutexes? */
|
||||||
@@ -5034,6 +5086,7 @@ static void prvResetNextTaskUnblockTime( void )
|
|||||||
{
|
{
|
||||||
mtCOVERAGE_TEST_MARKER();
|
mtCOVERAGE_TEST_MARKER();
|
||||||
}
|
}
|
||||||
|
|
||||||
taskEXIT_CRITICAL( &xKernelLock );
|
taskEXIT_CRITICAL( &xKernelLock );
|
||||||
|
|
||||||
return xReturn;
|
return xReturn;
|
||||||
@@ -5050,6 +5103,7 @@ static void prvResetNextTaskUnblockTime( void )
|
|||||||
BaseType_t xReturn = pdFALSE;
|
BaseType_t xReturn = pdFALSE;
|
||||||
|
|
||||||
taskENTER_CRITICAL( &xKernelLock );
|
taskENTER_CRITICAL( &xKernelLock );
|
||||||
|
|
||||||
if( pxMutexHolder != NULL )
|
if( pxMutexHolder != NULL )
|
||||||
{
|
{
|
||||||
/* A task can only have an inherited priority if it holds the mutex.
|
/* A task can only have an inherited priority if it holds the mutex.
|
||||||
@@ -5116,6 +5170,7 @@ static void prvResetNextTaskUnblockTime( void )
|
|||||||
{
|
{
|
||||||
mtCOVERAGE_TEST_MARKER();
|
mtCOVERAGE_TEST_MARKER();
|
||||||
}
|
}
|
||||||
|
|
||||||
taskEXIT_CRITICAL( &xKernelLock );
|
taskEXIT_CRITICAL( &xKernelLock );
|
||||||
|
|
||||||
return xReturn;
|
return xReturn;
|
||||||
@@ -5134,6 +5189,7 @@ static void prvResetNextTaskUnblockTime( void )
|
|||||||
const UBaseType_t uxOnlyOneMutexHeld = ( UBaseType_t ) 1;
|
const UBaseType_t uxOnlyOneMutexHeld = ( UBaseType_t ) 1;
|
||||||
|
|
||||||
taskENTER_CRITICAL( &xKernelLock );
|
taskENTER_CRITICAL( &xKernelLock );
|
||||||
|
|
||||||
if( pxMutexHolder != NULL )
|
if( pxMutexHolder != NULL )
|
||||||
{
|
{
|
||||||
/* If pxMutexHolder is not NULL then the holder must hold at least
|
/* If pxMutexHolder is not NULL then the holder must hold at least
|
||||||
@@ -5226,6 +5282,7 @@ static void prvResetNextTaskUnblockTime( void )
|
|||||||
{
|
{
|
||||||
mtCOVERAGE_TEST_MARKER();
|
mtCOVERAGE_TEST_MARKER();
|
||||||
}
|
}
|
||||||
|
|
||||||
taskEXIT_CRITICAL( &xKernelLock );
|
taskEXIT_CRITICAL( &xKernelLock );
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -5586,10 +5643,12 @@ TickType_t uxTaskResetEventItemValue( void )
|
|||||||
/* If xSemaphoreCreateMutex() is called before any tasks have been created
|
/* If xSemaphoreCreateMutex() is called before any tasks have been created
|
||||||
* then pxCurrentTCB will be NULL. */
|
* then pxCurrentTCB will be NULL. */
|
||||||
taskENTER_CRITICAL( &xKernelLock );
|
taskENTER_CRITICAL( &xKernelLock );
|
||||||
|
|
||||||
if( pxCurrentTCB[ xPortGetCoreID() ] != NULL )
|
if( pxCurrentTCB[ xPortGetCoreID() ] != NULL )
|
||||||
{
|
{
|
||||||
( pxCurrentTCB[ xPortGetCoreID() ]->uxMutexesHeld )++;
|
( pxCurrentTCB[ xPortGetCoreID() ]->uxMutexesHeld )++;
|
||||||
}
|
}
|
||||||
|
|
||||||
curTCB = pxCurrentTCB[ xPortGetCoreID() ];
|
curTCB = pxCurrentTCB[ xPortGetCoreID() ];
|
||||||
taskEXIT_CRITICAL( &xKernelLock );
|
taskEXIT_CRITICAL( &xKernelLock );
|
||||||
|
|
||||||
@@ -5601,8 +5660,8 @@ TickType_t uxTaskResetEventItemValue( void )
|
|||||||
|
|
||||||
#if ( configUSE_TASK_NOTIFICATIONS == 1 )
|
#if ( configUSE_TASK_NOTIFICATIONS == 1 )
|
||||||
|
|
||||||
#ifdef ESP_PLATFORM // IDF-3851
|
#ifdef ESP_PLATFORM /* IDF-3851 */
|
||||||
// included here for backward binary compatibility
|
/* included here for backward binary compatibility */
|
||||||
#undef ulTaskNotifyTake
|
#undef ulTaskNotifyTake
|
||||||
uint32_t ulTaskNotifyTake( BaseType_t xClearCountOnExit,
|
uint32_t ulTaskNotifyTake( BaseType_t xClearCountOnExit,
|
||||||
TickType_t xTicksToWait )
|
TickType_t xTicksToWait )
|
||||||
@@ -5683,8 +5742,8 @@ TickType_t uxTaskResetEventItemValue( void )
|
|||||||
|
|
||||||
#if ( configUSE_TASK_NOTIFICATIONS == 1 )
|
#if ( configUSE_TASK_NOTIFICATIONS == 1 )
|
||||||
|
|
||||||
#ifdef ESP_PLATFORM // IDF-3851
|
#ifdef ESP_PLATFORM /* IDF-3851 */
|
||||||
// included for backward compatibility
|
/* included for backward compatibility */
|
||||||
#undef xTaskNotifyWait
|
#undef xTaskNotifyWait
|
||||||
BaseType_t xTaskNotifyWait( uint32_t ulBitsToClearOnEntry,
|
BaseType_t xTaskNotifyWait( uint32_t ulBitsToClearOnEntry,
|
||||||
uint32_t ulBitsToClearOnExit,
|
uint32_t ulBitsToClearOnExit,
|
||||||
@@ -6202,7 +6261,7 @@ TickType_t uxTaskResetEventItemValue( void )
|
|||||||
return pxTCB->ulRunTimeCounter;
|
return pxTCB->ulRunTimeCounter;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
|
||||||
/*-----------------------------------------------------------*/
|
/*-----------------------------------------------------------*/
|
||||||
|
|
||||||
static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait,
|
static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait,
|
||||||
@@ -6216,8 +6275,8 @@ static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait,
|
|||||||
if( listIS_CONTAINED_WITHIN( &xTasksWaitingTermination, &( pxCurrentTCB[ xCurCoreID ]->xStateListItem ) ) == pdTRUE )
|
if( listIS_CONTAINED_WITHIN( &xTasksWaitingTermination, &( pxCurrentTCB[ xCurCoreID ]->xStateListItem ) ) == pdTRUE )
|
||||||
{
|
{
|
||||||
/* vTaskDelete() has been called to delete this task. This would have happened from the other core while this task was spinning on xTaskQueueMutex,
|
/* vTaskDelete() has been called to delete this task. This would have happened from the other core while this task was spinning on xTaskQueueMutex,
|
||||||
so don't move the running task to the delayed list - as soon as this core re-enables interrupts this task will
|
* so don't move the running task to the delayed list - as soon as this core re-enables interrupts this task will
|
||||||
be suspended permanently. Todo: IDF-5844. */
|
* be suspended permanently. Todo: IDF-5844. */
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@@ -5,6 +5,7 @@
|
|||||||
*
|
*
|
||||||
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
|
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* FreeRTOS Kernel V10.4.3
|
* FreeRTOS Kernel V10.4.3
|
||||||
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
@@ -148,6 +149,7 @@
|
|||||||
PRIVILEGED_DATA static TaskHandle_t xTimerTaskHandle = NULL;
|
PRIVILEGED_DATA static TaskHandle_t xTimerTaskHandle = NULL;
|
||||||
|
|
||||||
#ifdef ESP_PLATFORM
|
#ifdef ESP_PLATFORM
|
||||||
|
|
||||||
/* Spinlock required in SMP when accessing the timers. For now we use a single lock
|
/* Spinlock required in SMP when accessing the timers. For now we use a single lock
|
||||||
* Todo: Each timer could possible have its own lock for increased granularity. */
|
* Todo: Each timer could possible have its own lock for increased granularity. */
|
||||||
PRIVILEGED_DATA portMUX_TYPE xTimerLock = portMUX_INITIALIZER_UNLOCKED;
|
PRIVILEGED_DATA portMUX_TYPE xTimerLock = portMUX_INITIALIZER_UNLOCKED;
|
||||||
@@ -646,7 +648,7 @@ PRIVILEGED_DATA portMUX_TYPE xTimerLock = portMUX_INITIALIZER_UNLOCKED;
|
|||||||
|
|
||||||
vQueueWaitForMessageRestricted( xTimerQueue, ( xNextExpireTime - xTimeNow ), xListWasEmpty );
|
vQueueWaitForMessageRestricted( xTimerQueue, ( xNextExpireTime - xTimeNow ), xListWasEmpty );
|
||||||
|
|
||||||
#ifdef ESP_PLATFORM // IDF-3755
|
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||||
taskEXIT_CRITICAL( &xTimerLock );
|
taskEXIT_CRITICAL( &xTimerLock );
|
||||||
#else
|
#else
|
||||||
if( xTaskResumeAll() == pdFALSE )
|
if( xTaskResumeAll() == pdFALSE )
|
||||||
@@ -658,7 +660,8 @@ PRIVILEGED_DATA portMUX_TYPE xTimerLock = portMUX_INITIALIZER_UNLOCKED;
|
|||||||
* will not cause the task to block. */
|
* will not cause the task to block. */
|
||||||
portYIELD_WITHIN_API();
|
portYIELD_WITHIN_API();
|
||||||
}
|
}
|
||||||
#ifndef ESP_PLATFORM // IDF-3755
|
|
||||||
|
#ifndef ESP_PLATFORM /* IDF-3755 */
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
mtCOVERAGE_TEST_MARKER();
|
mtCOVERAGE_TEST_MARKER();
|
||||||
@@ -668,7 +671,7 @@ PRIVILEGED_DATA portMUX_TYPE xTimerLock = portMUX_INITIALIZER_UNLOCKED;
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
#ifdef ESP_PLATFORM // IDF-3755
|
#ifdef ESP_PLATFORM /* IDF-3755 */
|
||||||
taskEXIT_CRITICAL( &xTimerLock );
|
taskEXIT_CRITICAL( &xTimerLock );
|
||||||
#else
|
#else
|
||||||
( void ) xTaskResumeAll();
|
( void ) xTaskResumeAll();
|
||||||
|
Reference in New Issue
Block a user