Most code written. Interrupts still do not fire yet.

This commit is contained in:
Jeroen Domburg
2016-10-26 21:09:55 +08:00
parent cbb26c9532
commit d3d9a8bc28
5 changed files with 233 additions and 45 deletions

View File

@@ -0,0 +1,107 @@
// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <stdint.h>
#include <string.h>
#include "esp_attr.h"
#include "esp_err.h"
#include "esp_intr.h"
#include "rom/ets_sys.h"
#include "rom/uart.h"
#include "soc/cpu.h"
#include "soc/dport_reg.h"
#include "soc/io_mux_reg.h"
#include "soc/rtc_cntl_reg.h"
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "freertos/semphr.h"
#include "freertos/queue.h"
#include "freertos/portmacro.h"
#define REASON_YIELD (1<<0)
static portMUX_TYPE reasonSpinlock = portMUX_INITIALIZER_UNLOCKED;
static volatile uint32_t reason[ portNUM_PROCESSORS ];
/*
ToDo: There is a small chance the CPU already has yielded when this ISR is serviced. In that case, it's running the intended task but
the ISR will cause it to switch _away_ from it. portYIELD_FROM_ISR will probably just schedule the task again, but have to check that.
*/
static void esp_crosscore_isr(void *arg) {
volatile uint32_t myReasonVal;
#if 0
//A pointer to the correct reason array item is passed to this ISR.
volatile uint32_t *myReason=arg;
#else
//Does not work yet, the interrupt code needs work to understand two separate interrupt and argument
//tables...
volatile uint32_t *myReason=&reason[xPortGetCoreID()];
#endif
//Clear the interrupt first.
if (xPortGetCoreID()==0) {
WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_0_REG, 0);
} else {
WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_0_REG, 1);
}
//Grab the reason and clear it.
portENTER_CRITICAL(&reasonSpinlock);
myReasonVal=*myReason;
*myReason=0;
portEXIT_CRITICAL(&reasonSpinlock);
//Check what we need to do.
if (myReasonVal&REASON_YIELD) {
portYIELD_FROM_ISR();
}
ets_printf("recv yield\n");
}
//Initialize the crosscore interrupt on this core. Call this once
//on each active core.
void esp_crosscore_int_init() {
portENTER_CRITICAL(&reasonSpinlock);
reason[xPortGetCoreID()]=0;
portEXIT_CRITICAL(&reasonSpinlock);
ESP_INTR_DISABLE(ETS_FROM_CPU_INUM);
if (xPortGetCoreID()==0) {
intr_matrix_set(xPortGetCoreID(), ETS_FROM_CPU_INTR0_SOURCE, ETS_FROM_CPU_INUM);
} else {
intr_matrix_set(xPortGetCoreID(), ETS_FROM_CPU_INTR1_SOURCE, ETS_FROM_CPU_INUM);
}
xt_set_interrupt_handler(ETS_FROM_CPU_INUM, esp_crosscore_isr, (void*)&reason[xPortGetCoreID()]);
ESP_INTR_ENABLE(ETS_FROM_CPU_INUM);
}
void esp_crosscore_int_send_yield(int coreId) {
ets_printf("send yield\n");
assert(coreId<portNUM_PROCESSORS);
//Mark the reason we interrupt the other CPU
portENTER_CRITICAL(&reasonSpinlock);
reason[coreId]|=REASON_YIELD;
portEXIT_CRITICAL(&reasonSpinlock);
//Poke the other CPU.
if (coreId==0) {
WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_0_REG, DPORT_CPU_INTR_FROM_CPU_0);
} else {
WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_1_REG, DPORT_CPU_INTR_FROM_CPU_1);
}
}

View File

@@ -0,0 +1,8 @@
#ifndef __ESP_CROSSCORE_INT_H
#define __ESP_CROSSCORE_INT_H
void esp_crosscore_int_init();
void esp_crosscore_int_send_yield(int coreId);
#endif

View File

@@ -179,6 +179,14 @@ BaseType_t xPortStartScheduler( void ) PRIVILEGED_FUNCTION;
*/ */
void vPortEndScheduler( void ) PRIVILEGED_FUNCTION; void vPortEndScheduler( void ) PRIVILEGED_FUNCTION;
/*
* Send an interrupt to another core in order to make the task running
* on it yield for a higher-priority task.
*/
void vPortYieldOtherCore( BaseType_t coreid) PRIVILEGED_FUNCTION;
/* /*
* The structures and methods of manipulating the MPU are contained within the * The structures and methods of manipulating the MPU are contained within the
* port layer. * port layer.

View File

@@ -103,6 +103,8 @@
#include "panic.h" #include "panic.h"
#include "esp_crosscore_int.h"
/* Defined in portasm.h */ /* Defined in portasm.h */
extern void _frxt_tick_timer_init(void); extern void _frxt_tick_timer_init(void);
@@ -228,6 +230,12 @@ BaseType_t xPortSysTickHandler( void )
return ret; return ret;
} }
void vPortYieldOtherCore( BaseType_t coreid ) {
esp_crosscore_int_send_yield( coreid );
}
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
/* /*

View File

@@ -263,7 +263,7 @@ PRIVILEGED_DATA static volatile TickType_t xTickCount = ( TickType_t ) 0U;
PRIVILEGED_DATA static volatile UBaseType_t uxTopReadyPriority = tskIDLE_PRIORITY; PRIVILEGED_DATA static volatile UBaseType_t uxTopReadyPriority = tskIDLE_PRIORITY;
PRIVILEGED_DATA static volatile BaseType_t xSchedulerRunning = pdFALSE; PRIVILEGED_DATA static volatile BaseType_t xSchedulerRunning = pdFALSE;
PRIVILEGED_DATA static volatile UBaseType_t uxPendedTicks = ( UBaseType_t ) 0U; PRIVILEGED_DATA static volatile UBaseType_t uxPendedTicks = ( UBaseType_t ) 0U;
PRIVILEGED_DATA static volatile BaseType_t xYieldPending = pdFALSE; PRIVILEGED_DATA static volatile BaseType_t xYieldPending[portNUM_PROCESSORS] = {pdFALSE};
PRIVILEGED_DATA static volatile BaseType_t xNumOfOverflows = ( BaseType_t ) 0; PRIVILEGED_DATA static volatile BaseType_t xNumOfOverflows = ( BaseType_t ) 0;
PRIVILEGED_DATA static UBaseType_t uxTaskNumber = ( UBaseType_t ) 0U; PRIVILEGED_DATA static UBaseType_t uxTaskNumber = ( UBaseType_t ) 0U;
PRIVILEGED_DATA static volatile TickType_t xNextTaskUnblockTime = portMAX_DELAY; PRIVILEGED_DATA static volatile TickType_t xNextTaskUnblockTime = portMAX_DELAY;
@@ -417,6 +417,9 @@ count overflows. */
vListInsertEnd( &( pxReadyTasksLists[ ( pxTCB )->uxPriority ] ), &( ( pxTCB )->xGenericListItem ) ) vListInsertEnd( &( pxReadyTasksLists[ ( pxTCB )->uxPriority ] ), &( ( pxTCB )->xGenericListItem ) )
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
#define tskCAN_RUN_HERE( cpuid ) ( cpuid==xPortGetCoreID() || cpuid==tskNO_AFFINITY )
/* /*
* Several functions take an TaskHandle_t parameter that can optionally be NULL, * Several functions take an TaskHandle_t parameter that can optionally be NULL,
* where NULL is used to indicate that the handle of the currently executing * where NULL is used to indicate that the handle of the currently executing
@@ -581,6 +584,35 @@ static void prvResetNextTaskUnblockTime( void );
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
/*
* This routine tries to send an interrupt to another core if needed to make it execute a task
* of higher priority. We try to figure out if needed first by inspecting the pxTCB of the
* other CPU first. Specifically for Xtensa, we can do this because pxTCB is an atomic pointer. It
* is possible that it is inaccurate because the other CPU just did a task switch, but in that case
* at most a superfluous interrupt is generated.
*/
static void taskYIELD_OTHER_CORE( BaseType_t xCoreID, UBaseType_t uxPriority )
{
BaseType_t i;
if (xCoreID != tskNO_AFFINITY) {
if ( pxCurrentTCB[ xCoreID ]->uxPriority < uxPriority ) {
vPortYieldOtherCore( xCoreID );
}
}
else
{
/* The task has no affinity. See if we can find a CPU to put it on.*/
for (i=0; i<portNUM_PROCESSORS; i++) {
if (i != xPortGetCoreID() && pxCurrentTCB[ i ]->uxPriority < uxPriority)
{
vPortYieldOtherCore( i );
break;
}
}
}
}
BaseType_t xTaskGenericCreate( TaskFunction_t pxTaskCode, const char * const pcName, const uint16_t usStackDepth, void * const pvParameters, UBaseType_t uxPriority, TaskHandle_t * const pxCreatedTask, StackType_t * const puxStackBuffer, const MemoryRegion_t * const xRegions, const BaseType_t xCoreID) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ BaseType_t xTaskGenericCreate( TaskFunction_t pxTaskCode, const char * const pcName, const uint16_t usStackDepth, void * const pvParameters, UBaseType_t uxPriority, TaskHandle_t * const pxCreatedTask, StackType_t * const puxStackBuffer, const MemoryRegion_t * const xRegions, const BaseType_t xCoreID) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
{ {
BaseType_t xReturn; BaseType_t xReturn;
@@ -753,7 +785,10 @@ BaseType_t i;
the other processor will keep running the task it's working on, and only switch to the newer the other processor will keep running the task it's working on, and only switch to the newer
task on a timer interrupt. */ task on a timer interrupt. */
//No mux here, uxPriority is mostly atomic and there's not really any harm if this check misfires. //No mux here, uxPriority is mostly atomic and there's not really any harm if this check misfires.
if( pxCurrentTCB[ xPortGetCoreID() ]->uxPriority < uxPriority ) if( xCoreID != xPortGetCoreID() ) {
taskYIELD_OTHER_CORE(xCoreID, uxPriority);
}
else if( pxCurrentTCB[ xPortGetCoreID() ]->uxPriority < uxPriority )
{ {
taskYIELD_IF_USING_PREEMPTION(); taskYIELD_IF_USING_PREEMPTION();
} }
@@ -834,7 +869,7 @@ BaseType_t i;
after which it is not possible to yield away from this task - after which it is not possible to yield away from this task -
hence xYieldPending is used to latch that a context switch is hence xYieldPending is used to latch that a context switch is
required. */ required. */
portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPending ); portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPending[xPortGetCoreID()] );
portYIELD_WITHIN_API(); portYIELD_WITHIN_API();
} }
else else
@@ -1188,10 +1223,14 @@ BaseType_t i;
/* The priority of a task other than the currently /* The priority of a task other than the currently
running task is being raised. Is the priority being running task is being raised. Is the priority being
raised above that of the running task? */ raised above that of the running task? */
if( uxNewPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority ) if ( tskCAN_RUN_HERE(pxTCB->xCoreID) && uxNewPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
{ {
xYieldRequired = pdTRUE; xYieldRequired = pdTRUE;
} }
else if ( pxTCB->xCoreID != xPortGetCoreID() )
{
taskYIELD_OTHER_CORE( pxTCB->xCoreID, uxNewPriority );
}
else else
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
@@ -1470,13 +1509,17 @@ BaseType_t i;
prvAddTaskToReadyList( pxTCB ); prvAddTaskToReadyList( pxTCB );
/* We may have just resumed a higher priority task. */ /* We may have just resumed a higher priority task. */
if( pxTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority ) if( tskCAN_RUN_HERE(pxTCB->xCoreID) && pxTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
{ {
/* This yield may not cause the task just resumed to run, /* This yield may not cause the task just resumed to run,
but will leave the lists in the correct state for the but will leave the lists in the correct state for the
next yield. */ next yield. */
taskYIELD_IF_USING_PREEMPTION_MUX(&xTaskQueueMutex); taskYIELD_IF_USING_PREEMPTION_MUX(&xTaskQueueMutex);
} }
else if( pxTCB->xCoreID != xPortGetCoreID() )
{
taskYIELD_OTHER_CORE( pxTCB->xCoreID, pxTCB->uxPriority );
}
else else
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
@@ -1521,7 +1564,14 @@ BaseType_t i;
{ {
/* Ready lists can be accessed so move the task from the /* Ready lists can be accessed so move the task from the
suspended list to the ready list directly. */ suspended list to the ready list directly. */
if( pxTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority ) ( void ) uxListRemove( &( pxTCB->xGenericListItem ) );
prvAddTaskToReadyList( pxTCB );
if ( pxTCB->xCoreID == xPortGetCoreID() )
{
taskYIELD_OTHER_CORE( pxTCB->xCoreID, pxTCB->uxPriority);
}
else if( pxTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
{ {
xYieldRequired = pdTRUE; xYieldRequired = pdTRUE;
} }
@@ -1529,9 +1579,6 @@ BaseType_t i;
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
( void ) uxListRemove( &( pxTCB->xGenericListItem ) );
prvAddTaskToReadyList( pxTCB );
} }
else else
{ {
@@ -1732,11 +1779,16 @@ BaseType_t xAlreadyYielded = pdFALSE;
/* If the moved task has a priority higher than the current /* If the moved task has a priority higher than the current
task then a yield must be performed. */ task then a yield must be performed. */
if( pxTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority ) if ( tskCAN_RUN_HERE(pxTCB->xCoreID) && pxTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
{ {
xYieldPending = pdTRUE; /* We can schedule the awoken task on this CPU. */
xYieldPending[xPortGetCoreID()] = pdTRUE;
break; break;
} }
else if ( pxTCB->xCoreID != xPortGetCoreID() )
{
taskYIELD_OTHER_CORE( pxTCB->xCoreID, pxTCB->uxPriority );
}
else else
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
@@ -1753,7 +1805,7 @@ BaseType_t xAlreadyYielded = pdFALSE;
{ {
if( xTaskIncrementTick() != pdFALSE ) if( xTaskIncrementTick() != pdFALSE )
{ {
xYieldPending = pdTRUE; xYieldPending[ xPortGetCoreID() ] = pdTRUE;
} }
else else
{ {
@@ -1767,7 +1819,7 @@ BaseType_t xAlreadyYielded = pdFALSE;
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
if( xYieldPending == pdTRUE ) if( xYieldPending[ xPortGetCoreID() ] == pdTRUE )
{ {
#if( configUSE_PREEMPTION != 0 ) #if( configUSE_PREEMPTION != 0 )
{ {
@@ -2135,7 +2187,7 @@ BaseType_t xSwitchRequired = pdFALSE;
#if ( configUSE_PREEMPTION == 1 ) #if ( configUSE_PREEMPTION == 1 )
{ {
if( xYieldPending != pdFALSE ) if( xYieldPending [ xPortGetCoreID() ] != pdFALSE )
{ {
xSwitchRequired = pdTRUE; xSwitchRequired = pdTRUE;
} }
@@ -2251,11 +2303,11 @@ void vTaskSwitchContext( void )
{ {
/* The scheduler is currently suspended - do not allow a context /* The scheduler is currently suspended - do not allow a context
switch. */ switch. */
xYieldPending = pdTRUE; xYieldPending[ xPortGetCoreID() ] = pdTRUE;
} }
else else
{ {
xYieldPending = pdFALSE; xYieldPending[ xPortGetCoreID() ] = pdFALSE;
traceTASK_SWITCHED_OUT(); traceTASK_SWITCHED_OUT();
#if ( configGENERATE_RUN_TIME_STATS == 1 ) #if ( configGENERATE_RUN_TIME_STATS == 1 )
@@ -2610,16 +2662,16 @@ BaseType_t xReturn;
taskEXIT_CRITICAL(&xTaskQueueMutex); taskEXIT_CRITICAL(&xTaskQueueMutex);
} }
if( pxUnblockedTCB->uxPriority > pxCurrentTCB[ xPortGetCoreID() ]->uxPriority ) if ( tskCAN_RUN_HERE(pxUnblockedTCB->xCoreID) && pxUnblockedTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
{ {
/* Return true if the task removed from the event list has a higher /* We can schedule the awoken task on this CPU. */
priority than the calling task. This allows the calling task to know if xYieldPending[xPortGetCoreID()] = pdTRUE;
it should force a context switch now. */
xReturn = pdTRUE; xReturn = pdTRUE;
}
/* Mark that a yield is pending in case the user is not using the else if ( pxUnblockedTCB->xCoreID != xPortGetCoreID() )
"xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */ {
xYieldPending = pdTRUE; taskYIELD_OTHER_CORE( pxUnblockedTCB->xCoreID, pxUnblockedTCB->uxPriority );
xReturn = pdFALSE;
} }
else else
{ {
@@ -2670,17 +2722,16 @@ BaseType_t xReturn;
( void ) uxListRemove( &( pxUnblockedTCB->xGenericListItem ) ); ( void ) uxListRemove( &( pxUnblockedTCB->xGenericListItem ) );
prvAddTaskToReadyList( pxUnblockedTCB ); prvAddTaskToReadyList( pxUnblockedTCB );
if( pxUnblockedTCB->uxPriority > pxCurrentTCB[ xPortGetCoreID() ]->uxPriority ) if ( tskCAN_RUN_HERE(pxUnblockedTCB->xCoreID) && pxUnblockedTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
{ {
/* Return true if the task removed from the event list has /* We can schedule the awoken task on this CPU. */
a higher priority than the calling task. This allows xYieldPending[xPortGetCoreID()] = pdTRUE;
the calling task to know if it should force a context
switch now. */
xReturn = pdTRUE; xReturn = pdTRUE;
}
/* Mark that a yield is pending in case the user is not using the else if ( pxUnblockedTCB->xCoreID != xPortGetCoreID() )
"xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */ {
xYieldPending = pdTRUE; taskYIELD_OTHER_CORE( pxUnblockedTCB->xCoreID, pxUnblockedTCB->uxPriority );
xReturn = pdFALSE;
} }
else else
{ {
@@ -2751,7 +2802,7 @@ BaseType_t xReturn;
void vTaskMissedYield( void ) void vTaskMissedYield( void )
{ {
xYieldPending = pdTRUE; xYieldPending[ xPortGetCoreID() ] = pdTRUE;
} }
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
@@ -2921,7 +2972,7 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters )
/* A task was made ready while the scheduler was suspended. */ /* A task was made ready while the scheduler was suspended. */
eReturn = eAbortSleep; eReturn = eAbortSleep;
} }
else if( xYieldPending != pdFALSE ) else if( xYieldPending[ xPortGetCoreID() ] != pdFALSE )
{ {
/* A yield was pended while the scheduler was suspended. */ /* A yield was pended while the scheduler was suspended. */
eReturn = eAbortSleep; eReturn = eAbortSleep;
@@ -3597,12 +3648,6 @@ TCB_t *pxTCB;
#endif /* ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) */ #endif /* ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) */
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
/*
ToDo: Mutexes haven't been tested or adapted to multicore at all.
In fact, nothing below this line has/is.
*/
#if ( configUSE_MUTEXES == 1 ) #if ( configUSE_MUTEXES == 1 )
void vTaskPriorityInherit( TaskHandle_t const pxMutexHolder ) void vTaskPriorityInherit( TaskHandle_t const pxMutexHolder )
@@ -4434,12 +4479,16 @@ TickType_t uxReturn;
/* The task should not have been on an event list. */ /* The task should not have been on an event list. */
configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL ); configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
if( pxTCB->uxPriority > pxCurrentTCB[ xPortGetCoreID() ]->uxPriority ) if( tskCAN_RUN_HERE(pxTCB->xCoreID) && pxTCB->uxPriority > pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
{ {
/* The notified task has a priority above the currently /* The notified task has a priority above the currently
executing task so a yield is required. */ executing task so a yield is required. */
portYIELD_WITHIN_API(); portYIELD_WITHIN_API();
} }
else if ( pxTCB->xCoreID != xPortGetCoreID() )
{
taskYIELD_OTHER_CORE(pxTCB->xCoreID, pxTCB->uxPriority);
}
else else
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
@@ -4530,7 +4579,7 @@ TickType_t uxReturn;
vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) ); vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) );
} }
if( pxTCB->uxPriority > pxCurrentTCB[ xPortGetCoreID() ]->uxPriority ) if( tskCAN_RUN_HERE(pxTCB->xCoreID) && pxTCB->uxPriority > pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
{ {
/* The notified task has a priority above the currently /* The notified task has a priority above the currently
executing task so a yield is required. */ executing task so a yield is required. */
@@ -4539,6 +4588,10 @@ TickType_t uxReturn;
*pxHigherPriorityTaskWoken = pdTRUE; *pxHigherPriorityTaskWoken = pdTRUE;
} }
} }
else if ( pxTCB->xCoreID != xPortGetCoreID() )
{
taskYIELD_OTHER_CORE( pxTCB->xCoreID, pxTCB->uxPriority );
}
else else
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
@@ -4593,8 +4646,8 @@ TickType_t uxReturn;
this task pending until the scheduler is resumed. */ this task pending until the scheduler is resumed. */
vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) ); vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) );
} }
if( pxTCB->uxPriority > pxCurrentTCB[ xPortGetCoreID() ]->uxPriority ) if( tskCAN_RUN_HERE(pxTCB->xCoreID) && pxTCB->uxPriority > pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
{ {
/* The notified task has a priority above the currently /* The notified task has a priority above the currently
executing task so a yield is required. */ executing task so a yield is required. */
@@ -4603,6 +4656,10 @@ TickType_t uxReturn;
*pxHigherPriorityTaskWoken = pdTRUE; *pxHigherPriorityTaskWoken = pdTRUE;
} }
} }
else if ( pxTCB->xCoreID != xPortGetCoreID() )
{
taskYIELD_OTHER_CORE( pxTCB->xCoreID, pxTCB->uxPriority );
}
else else
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();