mirror of
				https://github.com/espressif/esp-idf.git
				synced 2025-11-04 00:51:42 +01:00 
			
		
		
		
	refactor(freertos/idf): Update thread safety convenience macros
This commit refactors some of the thread safety convenience macros by removing some repeated definitions and keeping them all in "freertos_idf_additions_priv.h"
This commit is contained in:
		@@ -65,74 +65,6 @@
 | 
			
		||||
    #include <stdio.h>
 | 
			
		||||
#endif /* configUSE_STATS_FORMATTING_FUNCTIONS == 1 ) */
 | 
			
		||||
 | 
			
		||||
/* Some code sections require extra critical sections when building for SMP
 | 
			
		||||
 * ( configNUMBER_OF_CORES > 1 ). */
 | 
			
		||||
#if ( configNUMBER_OF_CORES > 1 )
 | 
			
		||||
    /* Macros that enter/exit a critical section only when building for SMP */
 | 
			
		||||
    #define taskENTER_CRITICAL_SMP_ONLY( pxLock )             taskENTER_CRITICAL( pxLock )
 | 
			
		||||
    #define taskEXIT_CRITICAL_SMP_ONLY( pxLock )              taskEXIT_CRITICAL( pxLock )
 | 
			
		||||
    #define taskENTER_CRITICAL_ISR_SMP_ONLY( pxLock )         taskENTER_CRITICAL_ISR( pxLock )
 | 
			
		||||
    #define taskEXIT_CRITICAL_ISR_SMP_ONLY( pxLock )          taskEXIT_CRITICAL_ISR( pxLock )
 | 
			
		||||
    #define taskENTER_CRITICAL_SAFE_SMP_ONLY( pxLock )        prvTaskEnterCriticalSafeSMPOnly( pxLock )
 | 
			
		||||
    #define taskEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock )         prvTaskExitCriticalSafeSMPOnly( pxLock )
 | 
			
		||||
    /* Macros that enter/exit a critical section only when building for single-core */
 | 
			
		||||
    #define taskENTER_CRITICAL_SC_ONLY( pxLock )              taskENTER_CRITICAL( pxLock )
 | 
			
		||||
    #define taskEXIT_CRITICAL_SC_ONLY( pxLock )               taskEXIT_CRITICAL( pxLock )
 | 
			
		||||
    /* Macros that enable/disable interrupts only when building for SMP */
 | 
			
		||||
    #define taskDISABLE_INTERRUPTS_ISR_SMP_ONLY()             portSET_INTERRUPT_MASK_FROM_ISR()
 | 
			
		||||
    #define taskEnable_INTERRUPTS_ISR_SMP_ONLY( uxStatus )    portCLEAR_INTERRUPT_MASK_FROM_ISR( uxStatus )
 | 
			
		||||
 | 
			
		||||
    static inline __attribute__( ( always_inline ) )
 | 
			
		||||
    void prvTaskEnterCriticalSafeSMPOnly( portMUX_TYPE * pxLock )
 | 
			
		||||
    {
 | 
			
		||||
        if( portCHECK_IF_IN_ISR() == pdFALSE )
 | 
			
		||||
        {
 | 
			
		||||
            taskENTER_CRITICAL( pxLock );
 | 
			
		||||
        }
 | 
			
		||||
        else
 | 
			
		||||
        {
 | 
			
		||||
            #ifdef __clang_analyzer__
 | 
			
		||||
                /* Teach clang-tidy that ISR version macro can be different */
 | 
			
		||||
                configASSERT( 1 );
 | 
			
		||||
            #endif
 | 
			
		||||
            taskENTER_CRITICAL_ISR( pxLock );
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    static inline __attribute__( ( always_inline ) )
 | 
			
		||||
    void prvTaskExitCriticalSafeSMPOnly( portMUX_TYPE * pxLock )
 | 
			
		||||
    {
 | 
			
		||||
        if( portCHECK_IF_IN_ISR() == pdFALSE )
 | 
			
		||||
        {
 | 
			
		||||
            taskEXIT_CRITICAL( pxLock );
 | 
			
		||||
        }
 | 
			
		||||
        else
 | 
			
		||||
        {
 | 
			
		||||
            #ifdef __clang_analyzer__
 | 
			
		||||
                /* Teach clang-tidy that ISR version macro can be different */
 | 
			
		||||
                configASSERT( 1 );
 | 
			
		||||
            #endif
 | 
			
		||||
            taskEXIT_CRITICAL_ISR( pxLock );
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
#else /* configNUMBER_OF_CORES > 1 */
 | 
			
		||||
    /* Macros that enter/exit a critical section only when building for SMP */
 | 
			
		||||
    #define taskENTER_CRITICAL_SMP_ONLY( pxLock )
 | 
			
		||||
    #define taskEXIT_CRITICAL_SMP_ONLY( pxLock )
 | 
			
		||||
    #define taskENTER_CRITICAL_ISR_SMP_ONLY( pxLock )
 | 
			
		||||
    #define taskEXIT_CRITICAL_ISR_SMP_ONLY( pxLock )
 | 
			
		||||
    #define taskENTER_CRITICAL_SAFE_SMP_ONLY( pxLock )
 | 
			
		||||
    #define taskEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock )
 | 
			
		||||
    /* Macros that enter/exit a critical section only when building for single-core */
 | 
			
		||||
    #define taskENTER_CRITICAL_SC_ONLY( pxLock )              taskENTER_CRITICAL( pxLock )
 | 
			
		||||
    #define taskEXIT_CRITICAL_SC_ONLY( pxLock )               taskEXIT_CRITICAL( pxLock )
 | 
			
		||||
    /* Macros that enable/disable interrupts only when building for SMP */
 | 
			
		||||
    #define taskDISABLE_INTERRUPTS_ISR_SMP_ONLY()             ( ( UBaseType_t ) 0 )
 | 
			
		||||
    #define taskEnable_INTERRUPTS_ISR_SMP_ONLY( uxStatus )    ( ( void ) uxStatus )
 | 
			
		||||
 | 
			
		||||
#endif /* configNUMBER_OF_CORES > 1 */
 | 
			
		||||
 | 
			
		||||
#if ( configUSE_PREEMPTION == 0 )
 | 
			
		||||
 | 
			
		||||
/* If the cooperative scheduler is being used then a yield should not be
 | 
			
		||||
@@ -1470,7 +1402,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
 | 
			
		||||
 | 
			
		||||
        /* For SMP, we need to take the kernel lock here as we are about to
 | 
			
		||||
         * access kernel data structures. */
 | 
			
		||||
        taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
 | 
			
		||||
        prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
 | 
			
		||||
        {
 | 
			
		||||
            /* Force a reschedule if it is the currently running task that has just
 | 
			
		||||
             * been deleted. */
 | 
			
		||||
@@ -1488,7 +1420,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
        /* Release the previously taken kernel lock. */
 | 
			
		||||
        taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
 | 
			
		||||
        prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
#endif /* INCLUDE_vTaskDelete */
 | 
			
		||||
@@ -2366,7 +2298,7 @@ void vTaskStartScheduler( void )
 | 
			
		||||
 | 
			
		||||
        /* For SMP, we need to take the kernel lock here as we are about to
 | 
			
		||||
         * access kernel data structures. */
 | 
			
		||||
        taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
 | 
			
		||||
        prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
 | 
			
		||||
        {
 | 
			
		||||
            #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) )
 | 
			
		||||
            {
 | 
			
		||||
@@ -2381,7 +2313,7 @@ void vTaskStartScheduler( void )
 | 
			
		||||
            xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT;
 | 
			
		||||
        }
 | 
			
		||||
        /* Release the previously taken kernel lock. */
 | 
			
		||||
        taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
 | 
			
		||||
        prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
 | 
			
		||||
 | 
			
		||||
        /* If configGENERATE_RUN_TIME_STATS is defined then the following
 | 
			
		||||
         * macro must be defined to configure the timer/counter used to generate
 | 
			
		||||
@@ -2431,12 +2363,12 @@ void vTaskEndScheduler( void )
 | 
			
		||||
 | 
			
		||||
    /* For SMP, we need to take the kernel lock here as we are about to access
 | 
			
		||||
     * kernel data structures. */
 | 
			
		||||
    taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
 | 
			
		||||
    prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
 | 
			
		||||
    {
 | 
			
		||||
        xSchedulerRunning = pdFALSE;
 | 
			
		||||
    }
 | 
			
		||||
    /* Release the previously taken kernel lock. */
 | 
			
		||||
    taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
 | 
			
		||||
    prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
 | 
			
		||||
    vPortEndScheduler();
 | 
			
		||||
}
 | 
			
		||||
/*----------------------------------------------------------*/
 | 
			
		||||
@@ -2686,7 +2618,7 @@ TickType_t xTaskGetTickCountFromISR( void )
 | 
			
		||||
 | 
			
		||||
    /* For SMP, we need to take the kernel lock here as we are about to access
 | 
			
		||||
     * kernel data structures. */
 | 
			
		||||
    taskENTER_CRITICAL_ISR_SMP_ONLY( &xKernelLock );
 | 
			
		||||
    prvENTER_CRITICAL_ISR_SMP_ONLY( &xKernelLock );
 | 
			
		||||
    {
 | 
			
		||||
        uxSavedInterruptStatus = portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR();
 | 
			
		||||
        {
 | 
			
		||||
@@ -2695,7 +2627,7 @@ TickType_t xTaskGetTickCountFromISR( void )
 | 
			
		||||
        portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
 | 
			
		||||
    }
 | 
			
		||||
    /* Release the previously taken kernel lock. */
 | 
			
		||||
    taskEXIT_CRITICAL_ISR_SMP_ONLY( &xKernelLock );
 | 
			
		||||
    prvEXIT_CRITICAL_ISR_SMP_ONLY( &xKernelLock );
 | 
			
		||||
 | 
			
		||||
    return xReturn;
 | 
			
		||||
}
 | 
			
		||||
@@ -3094,7 +3026,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
 | 
			
		||||
                 * the event list too.  Interrupts can touch the event list item,
 | 
			
		||||
                 * even though the scheduler is suspended, so a critical section
 | 
			
		||||
                 * is used. */
 | 
			
		||||
                taskENTER_CRITICAL_SC_ONLY( &xKernelLock );
 | 
			
		||||
                prvENTER_CRITICAL_SC_ONLY( &xKernelLock );
 | 
			
		||||
                {
 | 
			
		||||
                    if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
 | 
			
		||||
                    {
 | 
			
		||||
@@ -3110,7 +3042,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
 | 
			
		||||
                        mtCOVERAGE_TEST_MARKER();
 | 
			
		||||
                    }
 | 
			
		||||
                }
 | 
			
		||||
                taskEXIT_CRITICAL_SC_ONLY( &xKernelLock );
 | 
			
		||||
                prvEXIT_CRITICAL_SC_ONLY( &xKernelLock );
 | 
			
		||||
 | 
			
		||||
                /* Place the unblocked task into the appropriate ready list. */
 | 
			
		||||
                prvAddTaskToReadyList( pxTCB );
 | 
			
		||||
@@ -3173,7 +3105,7 @@ BaseType_t xTaskIncrementTick( void )
 | 
			
		||||
    /* For SMP, we need to take the kernel lock here as we are about to access
 | 
			
		||||
     * kernel data structures (unlike single core which calls this function with
 | 
			
		||||
     * interrupts disabled). */
 | 
			
		||||
    taskENTER_CRITICAL_SAFE_SMP_ONLY( &xKernelLock );
 | 
			
		||||
    prvENTER_CRITICAL_SAFE_SMP_ONLY( &xKernelLock );
 | 
			
		||||
    {
 | 
			
		||||
        if( uxSchedulerSuspended[ 0 ] == ( UBaseType_t ) pdFALSE )
 | 
			
		||||
        {
 | 
			
		||||
@@ -3343,7 +3275,7 @@ BaseType_t xTaskIncrementTick( void )
 | 
			
		||||
 | 
			
		||||
    /* Release the previously taken kernel lock as we have finished accessing
 | 
			
		||||
     * the kernel data structures. */
 | 
			
		||||
    taskEXIT_CRITICAL_SAFE_SMP_ONLY( &xKernelLock );
 | 
			
		||||
    prvEXIT_CRITICAL_SAFE_SMP_ONLY( &xKernelLock );
 | 
			
		||||
 | 
			
		||||
    #if ( configUSE_TICK_HOOK == 1 )
 | 
			
		||||
    {
 | 
			
		||||
@@ -3566,7 +3498,7 @@ void vTaskSwitchContext( void )
 | 
			
		||||
    /* For SMP, we need to take the kernel lock here as we are about to access
 | 
			
		||||
    * kernel data structures (unlike single core which calls this function with
 | 
			
		||||
    * either interrupts disabled or when the scheduler hasn't started yet). */
 | 
			
		||||
    taskENTER_CRITICAL_SAFE_SMP_ONLY( &xKernelLock );
 | 
			
		||||
    prvENTER_CRITICAL_SAFE_SMP_ONLY( &xKernelLock );
 | 
			
		||||
    {
 | 
			
		||||
        /* Get current core ID as we can no longer be preempted. */
 | 
			
		||||
        const BaseType_t xCurCoreID = portGET_CORE_ID();
 | 
			
		||||
@@ -3651,7 +3583,7 @@ void vTaskSwitchContext( void )
 | 
			
		||||
 | 
			
		||||
    /* Release the previously taken kernel lock as we have finished accessing
 | 
			
		||||
     * the kernel data structures. */
 | 
			
		||||
    taskEXIT_CRITICAL_SAFE_SMP_ONLY( &xKernelLock );
 | 
			
		||||
    prvEXIT_CRITICAL_SAFE_SMP_ONLY( &xKernelLock );
 | 
			
		||||
}
 | 
			
		||||
/*-----------------------------------------------------------*/
 | 
			
		||||
 | 
			
		||||
@@ -3666,7 +3598,7 @@ void vTaskPlaceOnEventList( List_t * const pxEventList,
 | 
			
		||||
 | 
			
		||||
    /* For SMP, we need to take the kernel lock here as we are about to access
 | 
			
		||||
     * kernel data structures. */
 | 
			
		||||
    taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
 | 
			
		||||
    prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
 | 
			
		||||
    {
 | 
			
		||||
        /* Place the event list item of the TCB in the appropriate event list.
 | 
			
		||||
         * This is placed in the list in priority order so the highest priority task
 | 
			
		||||
@@ -3684,7 +3616,7 @@ void vTaskPlaceOnEventList( List_t * const pxEventList,
 | 
			
		||||
        prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
 | 
			
		||||
    }
 | 
			
		||||
    /* Release the previously taken kernel lock. */
 | 
			
		||||
    taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
 | 
			
		||||
    prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
 | 
			
		||||
}
 | 
			
		||||
/*-----------------------------------------------------------*/
 | 
			
		||||
 | 
			
		||||
@@ -3712,7 +3644,7 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList,
 | 
			
		||||
 | 
			
		||||
    /* For SMP, we need to take the kernel lock here as we are about to access
 | 
			
		||||
     * kernel data structures. */
 | 
			
		||||
    taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
 | 
			
		||||
    prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
 | 
			
		||||
    {
 | 
			
		||||
        /* Store the item value in the event list item.  It is safe to access the
 | 
			
		||||
         * event list item here as interrupts won't access the event list item of a
 | 
			
		||||
@@ -3729,7 +3661,7 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList,
 | 
			
		||||
        prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
 | 
			
		||||
    }
 | 
			
		||||
    /* Release the previously taken kernel lock. */
 | 
			
		||||
    taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
 | 
			
		||||
    prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
 | 
			
		||||
}
 | 
			
		||||
/*-----------------------------------------------------------*/
 | 
			
		||||
 | 
			
		||||
@@ -3749,7 +3681,7 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList,
 | 
			
		||||
 | 
			
		||||
        /* For SMP, we need to take the kernel lock here as we are about to access
 | 
			
		||||
         * kernel data structures. */
 | 
			
		||||
        taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
 | 
			
		||||
        prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
 | 
			
		||||
        {
 | 
			
		||||
            /* Place the event list item of the TCB in the appropriate event list.
 | 
			
		||||
             * In this case it is assume that this is the only task that is going to
 | 
			
		||||
@@ -4548,7 +4480,7 @@ static void prvCheckTasksWaitingTermination( void )
 | 
			
		||||
 | 
			
		||||
        /* A critical section is required for SMP in case another core modifies
 | 
			
		||||
         * the task simultaneously. */
 | 
			
		||||
        taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
 | 
			
		||||
        prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
 | 
			
		||||
        {
 | 
			
		||||
            pxTaskStatus->xHandle = ( TaskHandle_t ) pxTCB;
 | 
			
		||||
            pxTaskStatus->pcTaskName = ( const char * ) &( pxTCB->pcTaskName[ 0 ] );
 | 
			
		||||
@@ -4650,7 +4582,7 @@ static void prvCheckTasksWaitingTermination( void )
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
        /* Exit the previously entered critical section. */
 | 
			
		||||
        taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
 | 
			
		||||
        prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
#endif /* configUSE_TRACE_FACILITY */
 | 
			
		||||
@@ -4872,11 +4804,11 @@ static void prvResetNextTaskUnblockTime( void )
 | 
			
		||||
         * For single-core a critical section is not required as this is not
 | 
			
		||||
         * called from an interrupt and the current TCB will always be the same
 | 
			
		||||
         * for any individual execution thread. */
 | 
			
		||||
        uxSavedInterruptStatus = taskDISABLE_INTERRUPTS_ISR_SMP_ONLY();
 | 
			
		||||
        uxSavedInterruptStatus = prvDISABLE_INTERRUPTS_ISR_SMP_ONLY();
 | 
			
		||||
        {
 | 
			
		||||
            xReturn = pxCurrentTCBs[ portGET_CORE_ID() ];
 | 
			
		||||
        }
 | 
			
		||||
        taskEnable_INTERRUPTS_ISR_SMP_ONLY( uxSavedInterruptStatus );
 | 
			
		||||
        prvENABLE_INTERRUPTS_ISR_SMP_ONLY( uxSavedInterruptStatus );
 | 
			
		||||
 | 
			
		||||
        return xReturn;
 | 
			
		||||
    }
 | 
			
		||||
@@ -4900,7 +4832,7 @@ static void prvResetNextTaskUnblockTime( void )
 | 
			
		||||
         *
 | 
			
		||||
         * We use the ISR versions of interrupt macros as this function could be
 | 
			
		||||
         * called inside critical sections. */
 | 
			
		||||
        uxSavedInterruptStatus = taskDISABLE_INTERRUPTS_ISR_SMP_ONLY();
 | 
			
		||||
        uxSavedInterruptStatus = prvDISABLE_INTERRUPTS_ISR_SMP_ONLY();
 | 
			
		||||
        {
 | 
			
		||||
            if( xSchedulerRunning == pdFALSE )
 | 
			
		||||
            {
 | 
			
		||||
@@ -4918,7 +4850,7 @@ static void prvResetNextTaskUnblockTime( void )
 | 
			
		||||
                }
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
        taskEnable_INTERRUPTS_ISR_SMP_ONLY( uxSavedInterruptStatus );
 | 
			
		||||
        prvENABLE_INTERRUPTS_ISR_SMP_ONLY( uxSavedInterruptStatus );
 | 
			
		||||
 | 
			
		||||
        return xReturn;
 | 
			
		||||
    }
 | 
			
		||||
@@ -4935,7 +4867,7 @@ static void prvResetNextTaskUnblockTime( void )
 | 
			
		||||
 | 
			
		||||
        /* For SMP, we need to take the kernel lock here as we are about to
 | 
			
		||||
         * access kernel data structures. */
 | 
			
		||||
        taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
 | 
			
		||||
        prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
 | 
			
		||||
        {
 | 
			
		||||
            /* Get current core ID as we can no longer be preempted. */
 | 
			
		||||
            const BaseType_t xCurCoreID = portGET_CORE_ID();
 | 
			
		||||
@@ -5018,7 +4950,7 @@ static void prvResetNextTaskUnblockTime( void )
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
        /* Release the previously taken kernel lock. */
 | 
			
		||||
        taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
 | 
			
		||||
        prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
 | 
			
		||||
 | 
			
		||||
        return xReturn;
 | 
			
		||||
    }
 | 
			
		||||
@@ -5035,7 +4967,7 @@ static void prvResetNextTaskUnblockTime( void )
 | 
			
		||||
 | 
			
		||||
        /* For SMP, we need to take the kernel lock here as we are about to
 | 
			
		||||
         * access kernel data structures. */
 | 
			
		||||
        taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
 | 
			
		||||
        prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
 | 
			
		||||
        {
 | 
			
		||||
            if( pxMutexHolder != NULL )
 | 
			
		||||
            {
 | 
			
		||||
@@ -5105,7 +5037,7 @@ static void prvResetNextTaskUnblockTime( void )
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
        /* Release the previously taken kernel lock. */
 | 
			
		||||
        taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
 | 
			
		||||
        prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
 | 
			
		||||
 | 
			
		||||
        return xReturn;
 | 
			
		||||
    }
 | 
			
		||||
@@ -5124,7 +5056,7 @@ static void prvResetNextTaskUnblockTime( void )
 | 
			
		||||
 | 
			
		||||
        /* For SMP, we need to take the kernel lock here as we are about to
 | 
			
		||||
         * access kernel data structures. */
 | 
			
		||||
        taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
 | 
			
		||||
        prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
 | 
			
		||||
        {
 | 
			
		||||
            if( pxMutexHolder != NULL )
 | 
			
		||||
            {
 | 
			
		||||
@@ -5220,7 +5152,7 @@ static void prvResetNextTaskUnblockTime( void )
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
        /* Release the previously taken kernel lock. */
 | 
			
		||||
        taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
 | 
			
		||||
        prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
#endif /* configUSE_MUTEXES */
 | 
			
		||||
@@ -5555,7 +5487,7 @@ TickType_t uxTaskResetEventItemValue( void )
 | 
			
		||||
 | 
			
		||||
    /* For SMP, we need to take the kernel lock here to ensure nothing else
 | 
			
		||||
     * modifies the task's event item value simultaneously. */
 | 
			
		||||
    taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
 | 
			
		||||
    prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
 | 
			
		||||
    {
 | 
			
		||||
        /* Get current core ID as we can no longer be preempted. */
 | 
			
		||||
        const BaseType_t xCurCoreID = portGET_CORE_ID();
 | 
			
		||||
@@ -5566,7 +5498,7 @@ TickType_t uxTaskResetEventItemValue( void )
 | 
			
		||||
         * queues and semaphores. */
 | 
			
		||||
        listSET_LIST_ITEM_VALUE( &( pxCurrentTCBs[ xCurCoreID ]->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCBs[ xCurCoreID ]->uxPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
 | 
			
		||||
    }
 | 
			
		||||
    taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
 | 
			
		||||
    prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
 | 
			
		||||
    /* Release the previously taken kernel lock. */
 | 
			
		||||
 | 
			
		||||
    return uxReturn;
 | 
			
		||||
@@ -5581,7 +5513,7 @@ TickType_t uxTaskResetEventItemValue( void )
 | 
			
		||||
 | 
			
		||||
        /* For SMP, we need to take the kernel lock here as we are about to
 | 
			
		||||
         * access kernel data structures. */
 | 
			
		||||
        taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
 | 
			
		||||
        prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
 | 
			
		||||
        {
 | 
			
		||||
            /* Get current core ID as we can no longer be preempted. */
 | 
			
		||||
            const BaseType_t xCurCoreID = portGET_CORE_ID();
 | 
			
		||||
@@ -5596,7 +5528,7 @@ TickType_t uxTaskResetEventItemValue( void )
 | 
			
		||||
            xReturn = pxCurrentTCBs[ xCurCoreID ];
 | 
			
		||||
        }
 | 
			
		||||
        /* Release the previously taken kernel lock. */
 | 
			
		||||
        taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
 | 
			
		||||
        prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
 | 
			
		||||
 | 
			
		||||
        return xReturn;
 | 
			
		||||
    }
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user