forked from espressif/esp-idf
Merge branch 'feature/crosscore_int' into 'master'
Add cross-core int to accelerate task being awoken from another CPU. This adds a per-CPU interrupt that can be used to poke the CPU to go do something. In this case all that is implemented is a request to yield the current task, used in case a CPU unblocks a task that runs on another CPU. This gets rid of the limitation that inter-CPU communication using queues, muxes etc can take up to a FreeRTOS tick to happen. Specs! Sending an in in a queue of length 1 (essentially a semaphore) as quickly as possible (just a small delay in the sender, to make sure the receiver task gets swapped out) for 10 seconds. Number indicates the amount of ints transferred Old code: CPU0->CPU0: 42986 CPU0->CPU1,: 2999 New code: CPU0->CPU0: 42868 CPU0->CPU1: 62073 See merge request !155
This commit is contained in:
@@ -41,6 +41,7 @@
|
||||
#include "esp_event.h"
|
||||
#include "esp_spi_flash.h"
|
||||
#include "esp_ipc.h"
|
||||
#include "esp_crosscore_int.h"
|
||||
#include "esp_log.h"
|
||||
#include "esp_vfs_dev.h"
|
||||
#include "esp_newlib.h"
|
||||
@@ -168,6 +169,9 @@ void start_cpu0_default(void)
|
||||
_GLOBAL_REENT->_stderr = fopen(default_uart_dev, "w");
|
||||
_GLOBAL_REENT->_stdin = fopen(default_uart_dev, "r");
|
||||
do_global_ctors();
|
||||
#if !CONFIG_FREERTOS_UNICORE
|
||||
esp_crosscore_int_init();
|
||||
#endif
|
||||
esp_ipc_init();
|
||||
spi_flash_init();
|
||||
|
||||
@@ -188,6 +192,7 @@ void start_cpu1_default(void)
|
||||
while (port_xSchedulerRunning[0] == 0) {
|
||||
;
|
||||
}
|
||||
esp_crosscore_int_init();
|
||||
ESP_LOGI(TAG, "Starting scheduler on APP CPU.");
|
||||
xPortStartScheduler();
|
||||
}
|
||||
|
103
components/esp32/crosscore_int.c
Normal file
103
components/esp32/crosscore_int.c
Normal file
@@ -0,0 +1,103 @@
|
||||
// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#include <stdint.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "esp_attr.h"
|
||||
#include "esp_err.h"
|
||||
#include "esp_intr.h"
|
||||
|
||||
#include "rom/ets_sys.h"
|
||||
#include "rom/uart.h"
|
||||
|
||||
#include "soc/cpu.h"
|
||||
#include "soc/dport_reg.h"
|
||||
#include "soc/io_mux_reg.h"
|
||||
#include "soc/rtc_cntl_reg.h"
|
||||
|
||||
#include "freertos/FreeRTOS.h"
|
||||
#include "freertos/task.h"
|
||||
#include "freertos/semphr.h"
|
||||
#include "freertos/queue.h"
|
||||
#include "freertos/portmacro.h"
|
||||
|
||||
|
||||
#define REASON_YIELD (1<<0)
|
||||
|
||||
static portMUX_TYPE reasonSpinlock = portMUX_INITIALIZER_UNLOCKED;
|
||||
static volatile uint32_t reason[ portNUM_PROCESSORS ];
|
||||
|
||||
|
||||
/*
|
||||
ToDo: There is a small chance the CPU already has yielded when this ISR is serviced. In that case, it's running the intended task but
|
||||
the ISR will cause it to switch _away_ from it. portYIELD_FROM_ISR will probably just schedule the task again, but have to check that.
|
||||
*/
|
||||
static void esp_crosscore_isr(void *arg) {
|
||||
uint32_t myReasonVal;
|
||||
#if 0
|
||||
//A pointer to the correct reason array item is passed to this ISR.
|
||||
volatile uint32_t *myReason=arg;
|
||||
#else
|
||||
//The previous line does not work yet, the interrupt code needs work to understand two separate interrupt and argument
|
||||
//tables... this is a valid but slightly less optimal replacement.
|
||||
volatile uint32_t *myReason=&reason[xPortGetCoreID()];
|
||||
#endif
|
||||
//Clear the interrupt first.
|
||||
if (xPortGetCoreID()==0) {
|
||||
WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_0_REG, 0);
|
||||
} else {
|
||||
WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_1_REG, 0);
|
||||
}
|
||||
//Grab the reason and clear it.
|
||||
portENTER_CRITICAL(&reasonSpinlock);
|
||||
myReasonVal=*myReason;
|
||||
*myReason=0;
|
||||
portEXIT_CRITICAL(&reasonSpinlock);
|
||||
|
||||
//Check what we need to do.
|
||||
if (myReasonVal&REASON_YIELD) {
|
||||
portYIELD_FROM_ISR();
|
||||
}
|
||||
}
|
||||
|
||||
//Initialize the crosscore interrupt on this core. Call this once
|
||||
//on each active core.
|
||||
void esp_crosscore_int_init() {
|
||||
portENTER_CRITICAL(&reasonSpinlock);
|
||||
reason[xPortGetCoreID()]=0;
|
||||
portEXIT_CRITICAL(&reasonSpinlock);
|
||||
ESP_INTR_DISABLE(ETS_FROM_CPU_INUM);
|
||||
if (xPortGetCoreID()==0) {
|
||||
intr_matrix_set(xPortGetCoreID(), ETS_FROM_CPU_INTR0_SOURCE, ETS_FROM_CPU_INUM);
|
||||
} else {
|
||||
intr_matrix_set(xPortGetCoreID(), ETS_FROM_CPU_INTR1_SOURCE, ETS_FROM_CPU_INUM);
|
||||
}
|
||||
xt_set_interrupt_handler(ETS_FROM_CPU_INUM, esp_crosscore_isr, (void*)&reason[xPortGetCoreID()]);
|
||||
ESP_INTR_ENABLE(ETS_FROM_CPU_INUM);
|
||||
}
|
||||
|
||||
void esp_crosscore_int_send_yield(int coreId) {
|
||||
assert(coreId<portNUM_PROCESSORS);
|
||||
//Mark the reason we interrupt the other CPU
|
||||
portENTER_CRITICAL(&reasonSpinlock);
|
||||
reason[coreId]|=REASON_YIELD;
|
||||
portEXIT_CRITICAL(&reasonSpinlock);
|
||||
//Poke the other CPU.
|
||||
if (coreId==0) {
|
||||
WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_0_REG, DPORT_CPU_INTR_FROM_CPU_0);
|
||||
} else {
|
||||
WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_1_REG, DPORT_CPU_INTR_FROM_CPU_1);
|
||||
}
|
||||
}
|
||||
|
42
components/esp32/include/esp_crosscore_int.h
Normal file
42
components/esp32/include/esp_crosscore_int.h
Normal file
@@ -0,0 +1,42 @@
|
||||
// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#ifndef __ESP_CROSSCORE_INT_H
|
||||
#define __ESP_CROSSCORE_INT_H
|
||||
|
||||
|
||||
/**
|
||||
* Initialize the crosscore interrupt system for this CPU.
|
||||
* This needs to be called once on every CPU that is used
|
||||
* by FreeRTOS.
|
||||
*
|
||||
* If multicore FreeRTOS support is enabled, this will be
|
||||
* called automatically by the startup code and should not
|
||||
* be called manually.
|
||||
*/
|
||||
void esp_crosscore_int_init();
|
||||
|
||||
|
||||
/**
|
||||
* Send an interrupt to a CPU indicating it should yield its
|
||||
* currently running task in favour of a higher-priority task
|
||||
* that presumably just woke up.
|
||||
*
|
||||
* This is used internally by FreeRTOS in multicore mode
|
||||
* and should not be called by the user.
|
||||
*
|
||||
* @param coreID Core that should do the yielding
|
||||
*/
|
||||
void esp_crosscore_int_send_yield(int coreId);
|
||||
|
||||
#endif
|
@@ -133,6 +133,14 @@ menuconfig ENABLE_MEMORY_DEBUG
|
||||
help
|
||||
Enable this option to show malloc heap block and memory crash detect
|
||||
|
||||
config FREERTOS_ISR_STACKSIZE
|
||||
int "ISR stack size"
|
||||
range 1536 32768
|
||||
default 1536
|
||||
help
|
||||
The interrupt handlers have their own stack. The size of the stack can be defined here.
|
||||
Each processor has its own stack, so the total size occupied will be twice this.
|
||||
|
||||
menuconfig FREERTOS_DEBUG_INTERNALS
|
||||
bool "Debug FreeRTOS internals"
|
||||
default n
|
||||
@@ -158,6 +166,8 @@ config FREERTOS_PORTMUX_DEBUG_RECURSIVE
|
||||
If enabled, additional debug information will be printed for recursive
|
||||
portMUX usage.
|
||||
|
||||
|
||||
|
||||
endif # FREERTOS_DEBUG_INTERNALS
|
||||
|
||||
endmenu
|
||||
|
@@ -180,7 +180,7 @@
|
||||
/* The Xtensa port uses a separate interrupt stack. Adjust the stack size */
|
||||
/* to suit the needs of your specific application. */
|
||||
#ifndef configISR_STACK_SIZE
|
||||
#define configISR_STACK_SIZE 1024//2048
|
||||
#define configISR_STACK_SIZE CONFIG_FREERTOS_ISR_STACKSIZE
|
||||
#endif
|
||||
|
||||
/* Minimal heap size to make sure examples can run on memory limited
|
||||
|
@@ -179,6 +179,14 @@ BaseType_t xPortStartScheduler( void ) PRIVILEGED_FUNCTION;
|
||||
*/
|
||||
void vPortEndScheduler( void ) PRIVILEGED_FUNCTION;
|
||||
|
||||
|
||||
/*
|
||||
* Send an interrupt to another core in order to make the task running
|
||||
* on it yield for a higher-priority task.
|
||||
*/
|
||||
|
||||
void vPortYieldOtherCore( BaseType_t coreid) PRIVILEGED_FUNCTION;
|
||||
|
||||
/*
|
||||
* The structures and methods of manipulating the MPU are contained within the
|
||||
* port layer.
|
||||
|
@@ -103,6 +103,8 @@
|
||||
|
||||
#include "esp_panic.h"
|
||||
|
||||
#include "esp_crosscore_int.h"
|
||||
|
||||
/* Defined in portasm.h */
|
||||
extern void _frxt_tick_timer_init(void);
|
||||
|
||||
@@ -228,6 +230,12 @@ BaseType_t xPortSysTickHandler( void )
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
void vPortYieldOtherCore( BaseType_t coreid ) {
|
||||
esp_crosscore_int_send_yield( coreid );
|
||||
}
|
||||
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
/*
|
||||
|
@@ -261,7 +261,7 @@ PRIVILEGED_DATA static List_t xDelayedTaskList1; /*< Delayed tasks. */
|
||||
PRIVILEGED_DATA static List_t xDelayedTaskList2; /*< Delayed tasks (two lists are used - one for delays that have overflowed the current tick count. */
|
||||
PRIVILEGED_DATA static List_t * volatile pxDelayedTaskList; /*< Points to the delayed task list currently being used. */
|
||||
PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList; /*< Points to the delayed task list currently being used to hold tasks that have overflowed the current tick count. */
|
||||
PRIVILEGED_DATA static List_t xPendingReadyList; /*< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */
|
||||
PRIVILEGED_DATA static List_t xPendingReadyList[ portNUM_PROCESSORS ]; /*< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */
|
||||
|
||||
#if ( INCLUDE_vTaskDelete == 1 )
|
||||
|
||||
@@ -288,7 +288,7 @@ PRIVILEGED_DATA static volatile TickType_t xTickCount = ( TickType_t ) 0U;
|
||||
PRIVILEGED_DATA static volatile UBaseType_t uxTopReadyPriority = tskIDLE_PRIORITY;
|
||||
PRIVILEGED_DATA static volatile BaseType_t xSchedulerRunning = pdFALSE;
|
||||
PRIVILEGED_DATA static volatile UBaseType_t uxPendedTicks = ( UBaseType_t ) 0U;
|
||||
PRIVILEGED_DATA static volatile BaseType_t xYieldPending = pdFALSE;
|
||||
PRIVILEGED_DATA static volatile BaseType_t xYieldPending[portNUM_PROCESSORS] = {pdFALSE};
|
||||
PRIVILEGED_DATA static volatile BaseType_t xNumOfOverflows = ( BaseType_t ) 0;
|
||||
PRIVILEGED_DATA static UBaseType_t uxTaskNumber = ( UBaseType_t ) 0U;
|
||||
PRIVILEGED_DATA static volatile TickType_t xNextTaskUnblockTime = portMAX_DELAY;
|
||||
@@ -442,6 +442,9 @@ count overflows. */
|
||||
vListInsertEnd( &( pxReadyTasksLists[ ( pxTCB )->uxPriority ] ), &( ( pxTCB )->xGenericListItem ) )
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
|
||||
#define tskCAN_RUN_HERE( cpuid ) ( cpuid==xPortGetCoreID() || cpuid==tskNO_AFFINITY )
|
||||
|
||||
/*
|
||||
* Several functions take an TaskHandle_t parameter that can optionally be NULL,
|
||||
* where NULL is used to indicate that the handle of the currently executing
|
||||
@@ -614,6 +617,34 @@ static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB, TaskFunction_t pxTaskCode
|
||||
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
/*
|
||||
* This routine tries to send an interrupt to another core if needed to make it execute a task
|
||||
* of higher priority. We try to figure out if needed first by inspecting the pxTCB of the
|
||||
* other CPU first. Specifically for Xtensa, we can do this because pxTCB is an atomic pointer. It
|
||||
* is possible that it is inaccurate because the other CPU just did a task switch, but in that case
|
||||
* at most a superfluous interrupt is generated.
|
||||
*/
|
||||
void taskYIELD_OTHER_CORE( BaseType_t xCoreID, UBaseType_t uxPriority )
|
||||
{
|
||||
BaseType_t i;
|
||||
if (xCoreID != tskNO_AFFINITY) {
|
||||
if ( pxCurrentTCB[ xCoreID ]->uxPriority < uxPriority ) {
|
||||
vPortYieldOtherCore( xCoreID );
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* The task has no affinity. See if we can find a CPU to put it on.*/
|
||||
for (i=0; i<portNUM_PROCESSORS; i++) {
|
||||
if (i != xPortGetCoreID() && pxCurrentTCB[ i ]->uxPriority < uxPriority)
|
||||
{
|
||||
vPortYieldOtherCore( i );
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#if( configSUPPORT_STATIC_ALLOCATION == 1 )
|
||||
|
||||
TaskHandle_t xTaskCreateStaticPinnedToCore( TaskFunction_t pxTaskCode,
|
||||
@@ -1087,7 +1118,21 @@ static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB, TaskFunction_t pxTaskCode
|
||||
//No mux here, uxPriority is mostly atomic and there's not really any harm if this check misfires.
|
||||
if( pxCurrentTCB[ xPortGetCoreID() ]->uxPriority < pxNewTCB->uxPriority )
|
||||
{
|
||||
taskYIELD_IF_USING_PREEMPTION();
|
||||
/* Scheduler is running. If the created task is of a higher priority than an executing task
|
||||
then it should run now.
|
||||
No mux here, uxPriority is mostly atomic and there's not really any harm if this check misfires.
|
||||
*/
|
||||
if( tskCAN_RUN_HERE( xCoreID ) && pxCurrentTCB[ xPortGetCoreID() ]->uxPriority < pxNewTCB->uxPriority )
|
||||
{
|
||||
taskYIELD_IF_USING_PREEMPTION();
|
||||
}
|
||||
else if( xCoreID != xPortGetCoreID() ) {
|
||||
taskYIELD_OTHER_CORE(xCoreID, pxNewTCB->uxPriority);
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -1163,7 +1208,7 @@ static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB, TaskFunction_t pxTaskCode
|
||||
after which it is not possible to yield away from this task -
|
||||
hence xYieldPending is used to latch that a context switch is
|
||||
required. */
|
||||
portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPending );
|
||||
portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPending[xPortGetCoreID()] );
|
||||
portYIELD_WITHIN_API();
|
||||
}
|
||||
else
|
||||
@@ -1517,10 +1562,14 @@ static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB, TaskFunction_t pxTaskCode
|
||||
/* The priority of a task other than the currently
|
||||
running task is being raised. Is the priority being
|
||||
raised above that of the running task? */
|
||||
if( uxNewPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
|
||||
if ( tskCAN_RUN_HERE(pxTCB->xCoreID) && uxNewPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
|
||||
{
|
||||
xYieldRequired = pdTRUE;
|
||||
}
|
||||
else if ( pxTCB->xCoreID != xPortGetCoreID() )
|
||||
{
|
||||
taskYIELD_OTHER_CORE( pxTCB->xCoreID, uxNewPriority );
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
@@ -1742,7 +1791,7 @@ static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB, TaskFunction_t pxTaskCode
|
||||
if( listIS_CONTAINED_WITHIN( &xSuspendedTaskList, &( pxTCB->xGenericListItem ) ) != pdFALSE )
|
||||
{
|
||||
/* Has the task already been resumed from within an ISR? */
|
||||
if( listIS_CONTAINED_WITHIN( &xPendingReadyList, &( pxTCB->xEventListItem ) ) == pdFALSE )
|
||||
if( listIS_CONTAINED_WITHIN( &xPendingReadyList[ xPortGetCoreID() ], &( pxTCB->xEventListItem ) ) == pdFALSE )
|
||||
{
|
||||
/* Is it in the suspended list because it is in the Suspended
|
||||
state, or because is is blocked with no timeout? */
|
||||
@@ -1799,13 +1848,17 @@ static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB, TaskFunction_t pxTaskCode
|
||||
prvAddTaskToReadyList( pxTCB );
|
||||
|
||||
/* We may have just resumed a higher priority task. */
|
||||
if( pxTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
|
||||
if( tskCAN_RUN_HERE(pxTCB->xCoreID) && pxTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
|
||||
{
|
||||
/* This yield may not cause the task just resumed to run,
|
||||
but will leave the lists in the correct state for the
|
||||
next yield. */
|
||||
taskYIELD_IF_USING_PREEMPTION_MUX(&xTaskQueueMutex);
|
||||
}
|
||||
else if( pxTCB->xCoreID != xPortGetCoreID() )
|
||||
{
|
||||
taskYIELD_OTHER_CORE( pxTCB->xCoreID, pxTCB->uxPriority );
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
@@ -1830,7 +1883,6 @@ static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB, TaskFunction_t pxTaskCode
|
||||
|
||||
#if ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) )
|
||||
|
||||
/* ToDo: Make this multicore-compatible. */
|
||||
BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume )
|
||||
{
|
||||
BaseType_t xYieldRequired = pdFALSE;
|
||||
@@ -1850,24 +1902,28 @@ static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB, TaskFunction_t pxTaskCode
|
||||
{
|
||||
/* Ready lists can be accessed so move the task from the
|
||||
suspended list to the ready list directly. */
|
||||
if( pxTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
|
||||
( void ) uxListRemove( &( pxTCB->xGenericListItem ) );
|
||||
prvAddTaskToReadyList( pxTCB );
|
||||
|
||||
if( tskCAN_RUN_HERE( pxTCB->xCoreID ) && pxTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
|
||||
{
|
||||
xYieldRequired = pdTRUE;
|
||||
}
|
||||
else if ( pxTCB->xCoreID != xPortGetCoreID() )
|
||||
{
|
||||
taskYIELD_OTHER_CORE( pxTCB->xCoreID, pxTCB->uxPriority);
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
|
||||
( void ) uxListRemove( &( pxTCB->xGenericListItem ) );
|
||||
prvAddTaskToReadyList( pxTCB );
|
||||
}
|
||||
else
|
||||
{
|
||||
/* The delayed or ready lists cannot be accessed so the task
|
||||
is held in the pending ready list until the scheduler is
|
||||
unsuspended. */
|
||||
vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) );
|
||||
vListInsertEnd( &( xPendingReadyList[ xPortGetCoreID() ] ), &( pxTCB->xEventListItem ) );
|
||||
}
|
||||
}
|
||||
else
|
||||
@@ -2052,18 +2108,19 @@ BaseType_t xAlreadyYielded = pdFALSE;
|
||||
{
|
||||
/* Move any readied tasks from the pending list into the
|
||||
appropriate ready list. */
|
||||
while( listLIST_IS_EMPTY( &xPendingReadyList ) == pdFALSE )
|
||||
while( listLIST_IS_EMPTY( &xPendingReadyList[ xPortGetCoreID() ] ) == pdFALSE )
|
||||
{
|
||||
pxTCB = ( TCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( ( &xPendingReadyList ) );
|
||||
pxTCB = ( TCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( ( &xPendingReadyList[ xPortGetCoreID() ] ) );
|
||||
( void ) uxListRemove( &( pxTCB->xEventListItem ) );
|
||||
( void ) uxListRemove( &( pxTCB->xGenericListItem ) );
|
||||
prvAddTaskToReadyList( pxTCB );
|
||||
|
||||
/* If the moved task has a priority higher than the current
|
||||
task then a yield must be performed. */
|
||||
if( pxTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
|
||||
if ( tskCAN_RUN_HERE(pxTCB->xCoreID) && pxTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
|
||||
{
|
||||
xYieldPending = pdTRUE;
|
||||
/* We can schedule the awoken task on this CPU. */
|
||||
xYieldPending[xPortGetCoreID()] = pdTRUE;
|
||||
break;
|
||||
}
|
||||
else
|
||||
@@ -2082,7 +2139,7 @@ BaseType_t xAlreadyYielded = pdFALSE;
|
||||
{
|
||||
if( xTaskIncrementTick() != pdFALSE )
|
||||
{
|
||||
xYieldPending = pdTRUE;
|
||||
xYieldPending[ xPortGetCoreID() ] = pdTRUE;
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -2096,7 +2153,7 @@ BaseType_t xAlreadyYielded = pdFALSE;
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
|
||||
if( xYieldPending == pdTRUE )
|
||||
if( xYieldPending[ xPortGetCoreID() ] == pdTRUE )
|
||||
{
|
||||
#if( configUSE_PREEMPTION != 0 )
|
||||
{
|
||||
@@ -2480,7 +2537,7 @@ BaseType_t xSwitchRequired = pdFALSE;
|
||||
|
||||
#if ( configUSE_PREEMPTION == 1 )
|
||||
{
|
||||
if( xYieldPending != pdFALSE )
|
||||
if( xYieldPending [ xPortGetCoreID() ] != pdFALSE )
|
||||
{
|
||||
xSwitchRequired = pdTRUE;
|
||||
}
|
||||
@@ -2596,11 +2653,11 @@ void vTaskSwitchContext( void )
|
||||
{
|
||||
/* The scheduler is currently suspended - do not allow a context
|
||||
switch. */
|
||||
xYieldPending = pdTRUE;
|
||||
xYieldPending[ xPortGetCoreID() ] = pdTRUE;
|
||||
}
|
||||
else
|
||||
{
|
||||
xYieldPending = pdFALSE;
|
||||
xYieldPending[ xPortGetCoreID() ] = pdFALSE;
|
||||
traceTASK_SWITCHED_OUT();
|
||||
|
||||
#if ( configGENERATE_RUN_TIME_STATS == 1 )
|
||||
@@ -2951,11 +3008,11 @@ BaseType_t xReturn;
|
||||
/* The delayed and ready lists cannot be accessed, so hold this task
|
||||
pending until the scheduler is resumed. */
|
||||
taskENTER_CRITICAL(&xTaskQueueMutex);
|
||||
vListInsertEnd( &( xPendingReadyList ), &( pxUnblockedTCB->xEventListItem ) );
|
||||
vListInsertEnd( &( xPendingReadyList[ xPortGetCoreID() ] ), &( pxUnblockedTCB->xEventListItem ) );
|
||||
taskEXIT_CRITICAL(&xTaskQueueMutex);
|
||||
}
|
||||
|
||||
if( pxUnblockedTCB->uxPriority > pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
|
||||
if ( tskCAN_RUN_HERE(pxUnblockedTCB->xCoreID) && pxUnblockedTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
|
||||
{
|
||||
/* Return true if the task removed from the event list has a higher
|
||||
priority than the calling task. This allows the calling task to know if
|
||||
@@ -2964,7 +3021,12 @@ BaseType_t xReturn;
|
||||
|
||||
/* Mark that a yield is pending in case the user is not using the
|
||||
"xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */
|
||||
xYieldPending = pdTRUE;
|
||||
xYieldPending[ xPortGetCoreID() ] = pdTRUE;
|
||||
}
|
||||
else if ( pxUnblockedTCB->xCoreID != xPortGetCoreID() )
|
||||
{
|
||||
taskYIELD_OTHER_CORE( pxUnblockedTCB->xCoreID, pxUnblockedTCB->uxPriority );
|
||||
xReturn = pdFALSE;
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -3015,7 +3077,7 @@ BaseType_t xReturn;
|
||||
( void ) uxListRemove( &( pxUnblockedTCB->xGenericListItem ) );
|
||||
prvAddTaskToReadyList( pxUnblockedTCB );
|
||||
|
||||
if( pxUnblockedTCB->uxPriority > pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
|
||||
if ( tskCAN_RUN_HERE(pxUnblockedTCB->xCoreID) && pxUnblockedTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
|
||||
{
|
||||
/* Return true if the task removed from the event list has
|
||||
a higher priority than the calling task. This allows
|
||||
@@ -3025,7 +3087,12 @@ BaseType_t xReturn;
|
||||
|
||||
/* Mark that a yield is pending in case the user is not using the
|
||||
"xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */
|
||||
xYieldPending = pdTRUE;
|
||||
xYieldPending[ xPortGetCoreID() ] = pdTRUE;
|
||||
}
|
||||
else if ( pxUnblockedTCB->xCoreID != xPortGetCoreID() )
|
||||
{
|
||||
taskYIELD_OTHER_CORE( pxUnblockedTCB->xCoreID, pxUnblockedTCB->uxPriority );
|
||||
xReturn = pdFALSE;
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -3096,7 +3163,7 @@ BaseType_t xReturn;
|
||||
|
||||
void vTaskMissedYield( void )
|
||||
{
|
||||
xYieldPending = pdTRUE;
|
||||
xYieldPending[ xPortGetCoreID() ] = pdTRUE;
|
||||
}
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
@@ -3261,12 +3328,12 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters )
|
||||
eSleepModeStatus eReturn = eStandardSleep;
|
||||
taskENTER_CRITICAL(&xTaskQueueMutex);
|
||||
|
||||
if( listCURRENT_LIST_LENGTH( &xPendingReadyList ) != 0 )
|
||||
if( listCURRENT_LIST_LENGTH( &xPendingReadyList[ xPortGetCoreID() ] ) != 0 )
|
||||
{
|
||||
/* A task was made ready while the scheduler was suspended. */
|
||||
eReturn = eAbortSleep;
|
||||
}
|
||||
else if( xYieldPending != pdFALSE )
|
||||
else if( xYieldPending[ xPortGetCoreID() ] != pdFALSE )
|
||||
{
|
||||
/* A yield was pended while the scheduler was suspended. */
|
||||
eReturn = eAbortSleep;
|
||||
@@ -3390,7 +3457,7 @@ UBaseType_t uxPriority;
|
||||
|
||||
vListInitialise( &xDelayedTaskList1 );
|
||||
vListInitialise( &xDelayedTaskList2 );
|
||||
vListInitialise( &xPendingReadyList );
|
||||
vListInitialise( &xPendingReadyList[ xPortGetCoreID() ] );
|
||||
|
||||
#if ( INCLUDE_vTaskDelete == 1 )
|
||||
{
|
||||
@@ -3781,12 +3848,6 @@ TCB_t *pxTCB;
|
||||
#endif /* ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) */
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
/*
|
||||
ToDo: Mutexes haven't been tested or adapted to multicore at all.
|
||||
|
||||
In fact, nothing below this line has/is.
|
||||
*/
|
||||
|
||||
#if ( configUSE_MUTEXES == 1 )
|
||||
|
||||
void vTaskPriorityInherit( TaskHandle_t const pxMutexHolder )
|
||||
@@ -4624,12 +4685,16 @@ TickType_t uxReturn;
|
||||
/* The task should not have been on an event list. */
|
||||
configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
|
||||
|
||||
if( pxTCB->uxPriority > pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
|
||||
if( tskCAN_RUN_HERE(pxTCB->xCoreID) && pxTCB->uxPriority > pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
|
||||
{
|
||||
/* The notified task has a priority above the currently
|
||||
executing task so a yield is required. */
|
||||
portYIELD_WITHIN_API();
|
||||
}
|
||||
else if ( pxTCB->xCoreID != xPortGetCoreID() )
|
||||
{
|
||||
taskYIELD_OTHER_CORE(pxTCB->xCoreID, pxTCB->uxPriority);
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
@@ -4717,10 +4782,10 @@ TickType_t uxReturn;
|
||||
{
|
||||
/* The delayed and ready lists cannot be accessed, so hold
|
||||
this task pending until the scheduler is resumed. */
|
||||
vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) );
|
||||
vListInsertEnd( &( xPendingReadyList[ xPortGetCoreID() ] ), &( pxTCB->xEventListItem ) );
|
||||
}
|
||||
|
||||
if( pxTCB->uxPriority > pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
|
||||
if( tskCAN_RUN_HERE(pxTCB->xCoreID) && pxTCB->uxPriority > pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
|
||||
{
|
||||
/* The notified task has a priority above the currently
|
||||
executing task so a yield is required. */
|
||||
@@ -4729,6 +4794,10 @@ TickType_t uxReturn;
|
||||
*pxHigherPriorityTaskWoken = pdTRUE;
|
||||
}
|
||||
}
|
||||
else if ( pxTCB->xCoreID != xPortGetCoreID() )
|
||||
{
|
||||
taskYIELD_OTHER_CORE( pxTCB->xCoreID, pxTCB->uxPriority );
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
@@ -4781,10 +4850,10 @@ TickType_t uxReturn;
|
||||
{
|
||||
/* The delayed and ready lists cannot be accessed, so hold
|
||||
this task pending until the scheduler is resumed. */
|
||||
vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) );
|
||||
vListInsertEnd( &( xPendingReadyList[ xPortGetCoreID() ] ), &( pxTCB->xEventListItem ) );
|
||||
}
|
||||
|
||||
if( pxTCB->uxPriority > pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
|
||||
|
||||
if( tskCAN_RUN_HERE(pxTCB->xCoreID) && pxTCB->uxPriority > pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
|
||||
{
|
||||
/* The notified task has a priority above the currently
|
||||
executing task so a yield is required. */
|
||||
@@ -4793,6 +4862,10 @@ TickType_t uxReturn;
|
||||
*pxHigherPriorityTaskWoken = pdTRUE;
|
||||
}
|
||||
}
|
||||
else if ( pxTCB->xCoreID != xPortGetCoreID() )
|
||||
{
|
||||
taskYIELD_OTHER_CORE( pxTCB->xCoreID, pxTCB->uxPriority );
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
|
Reference in New Issue
Block a user