Add checks for tasks woken up on other CPUs where needed, make xYieldPending and xPendingReadyList per-processor, add configurable ISR stack size to Kconfig, in general fix the entire wake-up-task-on-other-cpu-by-interrupt implementation

This commit is contained in:
Jeroen Domburg
2016-10-27 16:07:47 +08:00
parent c6477ff10d
commit 3371083c16
5 changed files with 117 additions and 81 deletions

View File

@@ -44,64 +44,60 @@ ToDo: There is a small chance the CPU already has yielded when this ISR is servi
the ISR will cause it to switch _away_ from it. portYIELD_FROM_ISR will probably just schedule the task again, but have to check that.
*/
static void esp_crosscore_isr(void *arg) {
volatile uint32_t myReasonVal;
volatile uint32_t myReasonVal;
#if 0
//A pointer to the correct reason array item is passed to this ISR.
volatile uint32_t *myReason=arg;
//A pointer to the correct reason array item is passed to this ISR.
volatile uint32_t *myReason=arg;
#else
//Does not work yet, the interrupt code needs work to understand two separate interrupt and argument
//tables...
volatile uint32_t *myReason=&reason[xPortGetCoreID()];
//Does not work yet, the interrupt code needs work to understand two separate interrupt and argument
//tables...
volatile uint32_t *myReason=&reason[xPortGetCoreID()];
#endif
//Clear the interrupt first.
if (xPortGetCoreID()==0) {
WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_0_REG, 0);
} else {
WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_1_REG, 0);
}
//Grab the reason and clear it.
portENTER_CRITICAL(&reasonSpinlock);
myReasonVal=*myReason;
*myReason=0;
portEXIT_CRITICAL(&reasonSpinlock);
//Clear the interrupt first.
if (xPortGetCoreID()==0) {
WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_0_REG, 0);
} else {
WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_1_REG, 0);
}
//Grab the reason and clear it.
portENTER_CRITICAL(&reasonSpinlock);
myReasonVal=*myReason;
*myReason=0;
portEXIT_CRITICAL(&reasonSpinlock);
//Check what we need to do.
if (myReasonVal&REASON_YIELD) {
portYIELD_FROM_ISR();
}
ets_printf("recv yield\n");
//Check what we need to do.
if (myReasonVal&REASON_YIELD) {
portYIELD_FROM_ISR();
}
}
//Initialize the crosscore interrupt on this core. Call this once
//on each active core.
void esp_crosscore_int_init() {
portENTER_CRITICAL(&reasonSpinlock);
ets_printf("init cpu %d\n", xPortGetCoreID());
reason[xPortGetCoreID()]=0;
portEXIT_CRITICAL(&reasonSpinlock);
ESP_INTR_DISABLE(ETS_FROM_CPU_INUM);
if (xPortGetCoreID()==0) {
intr_matrix_set(xPortGetCoreID(), ETS_FROM_CPU_INTR0_SOURCE, ETS_FROM_CPU_INUM);
} else {
intr_matrix_set(xPortGetCoreID(), ETS_FROM_CPU_INTR1_SOURCE, ETS_FROM_CPU_INUM);
}
xt_set_interrupt_handler(ETS_FROM_CPU_INUM, esp_crosscore_isr, (void*)&reason[xPortGetCoreID()]);
ESP_INTR_ENABLE(ETS_FROM_CPU_INUM);
portENTER_CRITICAL(&reasonSpinlock);
reason[xPortGetCoreID()]=0;
portEXIT_CRITICAL(&reasonSpinlock);
ESP_INTR_DISABLE(ETS_FROM_CPU_INUM);
if (xPortGetCoreID()==0) {
intr_matrix_set(xPortGetCoreID(), ETS_FROM_CPU_INTR0_SOURCE, ETS_FROM_CPU_INUM);
} else {
intr_matrix_set(xPortGetCoreID(), ETS_FROM_CPU_INTR1_SOURCE, ETS_FROM_CPU_INUM);
}
xt_set_interrupt_handler(ETS_FROM_CPU_INUM, esp_crosscore_isr, (void*)&reason[xPortGetCoreID()]);
ESP_INTR_ENABLE(ETS_FROM_CPU_INUM);
}
void esp_crosscore_int_send_yield(int coreId) {
ets_printf("send yield\n");
assert(coreId<portNUM_PROCESSORS);
//Mark the reason we interrupt the other CPU
portENTER_CRITICAL(&reasonSpinlock);
reason[coreId]|=REASON_YIELD;
portEXIT_CRITICAL(&reasonSpinlock);
//Poke the other CPU.
if (coreId==0) {
WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_0_REG, DPORT_CPU_INTR_FROM_CPU_0);
} else {
WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_1_REG, DPORT_CPU_INTR_FROM_CPU_1);
}
assert(coreId<portNUM_PROCESSORS);
//Mark the reason we interrupt the other CPU
portENTER_CRITICAL(&reasonSpinlock);
reason[coreId]|=REASON_YIELD;
portEXIT_CRITICAL(&reasonSpinlock);
//Poke the other CPU.
if (coreId==0) {
WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_0_REG, DPORT_CPU_INTR_FROM_CPU_0);
} else {
WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_1_REG, DPORT_CPU_INTR_FROM_CPU_1);
}
}

View File

@@ -1,8 +1,35 @@
// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef __ESP_CROSSCORE_INT_H
#define __ESP_CROSSCORE_INT_H
/**
* Initialize the crosscore interrupt system for this CPU.
* This needs to be called once on every CPU that is used
* by FreeRTOS.
*/
void esp_crosscore_int_init();
/**
* Send an interrupt to a CPU indicating it should yield its
* currently running task in favour of a higher-priority task
* that presumably just woke up.
*
* @param coreID Core that should do the yielding
*/
void esp_crosscore_int_send_yield(int coreId);
#endif

View File

@@ -197,6 +197,15 @@ config FREERTOS_PORTMUX_DEBUG_RECURSIVE
If enabled, additional debug information will be printed for recursive
portMUX usage.
config FREERTOS_ISR_STACKSIZE
int "ISR stack size"
range 1536 32768
default 1536
help
The interrupt handlers have their own stack. The size of the stack can be defined here.
Each processor has its own stack, so the total size occupied will be twice this.
endif # FREERTOS_DEBUG_INTERNALS
endmenu

View File

@@ -180,7 +180,7 @@
/* The Xtensa port uses a separate interrupt stack. Adjust the stack size */
/* to suit the needs of your specific application. */
#ifndef configISR_STACK_SIZE
#define configISR_STACK_SIZE 1024//2048
#define configISR_STACK_SIZE CONFIG_FREERTOS_ISR_STACKSIZE
#endif
/* Minimal heap size to make sure examples can run on memory limited

View File

@@ -236,7 +236,7 @@ PRIVILEGED_DATA static List_t xDelayedTaskList1; /*< Delayed tasks. */
PRIVILEGED_DATA static List_t xDelayedTaskList2; /*< Delayed tasks (two lists are used - one for delays that have overflowed the current tick count. */
PRIVILEGED_DATA static List_t * volatile pxDelayedTaskList; /*< Points to the delayed task list currently being used. */
PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList; /*< Points to the delayed task list currently being used to hold tasks that have overflowed the current tick count. */
PRIVILEGED_DATA static List_t xPendingReadyList; /*< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */
PRIVILEGED_DATA static List_t xPendingReadyList[ portNUM_PROCESSORS ]; /*< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */
#if ( INCLUDE_vTaskDelete == 1 )
@@ -591,7 +591,7 @@ static void prvResetNextTaskUnblockTime( void );
* is possible that it is inaccurate because the other CPU just did a task switch, but in that case
* at most a superfluous interrupt is generated.
*/
static void taskYIELD_OTHER_CORE( BaseType_t xCoreID, UBaseType_t uxPriority )
void taskYIELD_OTHER_CORE( BaseType_t xCoreID, UBaseType_t uxPriority )
{
BaseType_t i;
if (xCoreID != tskNO_AFFINITY) {
@@ -781,17 +781,15 @@ BaseType_t i;
{
/* Scheduler is running. If the created task is of a higher priority than an executing task
then it should run now.
ToDo: This only works for the current core. If a task is scheduled on an other processor,
the other processor will keep running the task it's working on, and only switch to the newer
task on a timer interrupt. */
//No mux here, uxPriority is mostly atomic and there's not really any harm if this check misfires.
if( xCoreID != xPortGetCoreID() ) {
taskYIELD_OTHER_CORE(xCoreID, uxPriority);
}
else if( pxCurrentTCB[ xPortGetCoreID() ]->uxPriority < uxPriority )
No mux here, uxPriority is mostly atomic and there's not really any harm if this check misfires.
*/
if( tskCAN_RUN_HERE( xCoreID ) && pxCurrentTCB[ xPortGetCoreID() ]->uxPriority < uxPriority )
{
taskYIELD_IF_USING_PREEMPTION();
}
else if( xCoreID != xPortGetCoreID() ) {
taskYIELD_OTHER_CORE(xCoreID, uxPriority);
}
else
{
mtCOVERAGE_TEST_MARKER();
@@ -1452,7 +1450,7 @@ BaseType_t i;
if( listIS_CONTAINED_WITHIN( &xSuspendedTaskList, &( pxTCB->xGenericListItem ) ) != pdFALSE )
{
/* Has the task already been resumed from within an ISR? */
if( listIS_CONTAINED_WITHIN( &xPendingReadyList, &( pxTCB->xEventListItem ) ) == pdFALSE )
if( listIS_CONTAINED_WITHIN( &xPendingReadyList[ xPortGetCoreID() ], &( pxTCB->xEventListItem ) ) == pdFALSE )
{
/* Is it in the suspended list because it is in the Suspended
state, or because is is blocked with no timeout? */
@@ -1544,7 +1542,6 @@ BaseType_t i;
#if ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) )
/* ToDo: Make this multicore-compatible. */
BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume )
{
BaseType_t xYieldRequired = pdFALSE;
@@ -1567,14 +1564,14 @@ BaseType_t i;
( void ) uxListRemove( &( pxTCB->xGenericListItem ) );
prvAddTaskToReadyList( pxTCB );
if ( pxTCB->xCoreID == xPortGetCoreID() )
{
taskYIELD_OTHER_CORE( pxTCB->xCoreID, pxTCB->uxPriority);
}
else if( pxTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
if( tskCAN_RUN_HERE( pxTCB->xCoreID ) && pxTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
{
xYieldRequired = pdTRUE;
}
else if ( pxTCB->xCoreID != xPortGetCoreID() )
{
taskYIELD_OTHER_CORE( pxTCB->xCoreID, pxTCB->uxPriority);
}
else
{
mtCOVERAGE_TEST_MARKER();
@@ -1585,7 +1582,7 @@ BaseType_t i;
/* The delayed or ready lists cannot be accessed so the task
is held in the pending ready list until the scheduler is
unsuspended. */
vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) );
vListInsertEnd( &( xPendingReadyList[ xPortGetCoreID() ] ), &( pxTCB->xEventListItem ) );
}
}
else
@@ -1770,9 +1767,9 @@ BaseType_t xAlreadyYielded = pdFALSE;
{
/* Move any readied tasks from the pending list into the
appropriate ready list. */
while( listLIST_IS_EMPTY( &xPendingReadyList ) == pdFALSE )
while( listLIST_IS_EMPTY( &xPendingReadyList[ xPortGetCoreID() ] ) == pdFALSE )
{
pxTCB = ( TCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( ( &xPendingReadyList ) );
pxTCB = ( TCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( ( &xPendingReadyList[ xPortGetCoreID() ] ) );
( void ) uxListRemove( &( pxTCB->xEventListItem ) );
( void ) uxListRemove( &( pxTCB->xGenericListItem ) );
prvAddTaskToReadyList( pxTCB );
@@ -1785,10 +1782,6 @@ BaseType_t xAlreadyYielded = pdFALSE;
xYieldPending[xPortGetCoreID()] = pdTRUE;
break;
}
else if ( pxTCB->xCoreID != xPortGetCoreID() )
{
taskYIELD_OTHER_CORE( pxTCB->xCoreID, pxTCB->uxPriority );
}
else
{
mtCOVERAGE_TEST_MARKER();
@@ -2658,15 +2651,20 @@ BaseType_t xReturn;
/* The delayed and ready lists cannot be accessed, so hold this task
pending until the scheduler is resumed. */
taskENTER_CRITICAL(&xTaskQueueMutex);
vListInsertEnd( &( xPendingReadyList ), &( pxUnblockedTCB->xEventListItem ) );
vListInsertEnd( &( xPendingReadyList[ xPortGetCoreID() ] ), &( pxUnblockedTCB->xEventListItem ) );
taskEXIT_CRITICAL(&xTaskQueueMutex);
}
if ( tskCAN_RUN_HERE(pxUnblockedTCB->xCoreID) && pxUnblockedTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
{
/* We can schedule the awoken task on this CPU. */
xYieldPending[xPortGetCoreID()] = pdTRUE;
/* Return true if the task removed from the event list has a higher
priority than the calling task. This allows the calling task to know if
it should force a context switch now. */
xReturn = pdTRUE;
/* Mark that a yield is pending in case the user is not using the
"xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */
xYieldPending[ xPortGetCoreID() ] = pdTRUE;
}
else if ( pxUnblockedTCB->xCoreID != xPortGetCoreID() )
{
@@ -2724,9 +2722,15 @@ BaseType_t xReturn;
if ( tskCAN_RUN_HERE(pxUnblockedTCB->xCoreID) && pxUnblockedTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
{
/* We can schedule the awoken task on this CPU. */
xYieldPending[xPortGetCoreID()] = pdTRUE;
/* Return true if the task removed from the event list has
a higher priority than the calling task. This allows
the calling task to know if it should force a context
switch now. */
xReturn = pdTRUE;
/* Mark that a yield is pending in case the user is not using the
"xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */
xYieldPending[ xPortGetCoreID() ] = pdTRUE;
}
else if ( pxUnblockedTCB->xCoreID != xPortGetCoreID() )
{
@@ -2967,7 +2971,7 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters )
eSleepModeStatus eReturn = eStandardSleep;
taskENTER_CRITICAL(&xTaskQueueMutex);
if( listCURRENT_LIST_LENGTH( &xPendingReadyList ) != 0 )
if( listCURRENT_LIST_LENGTH( &xPendingReadyList[ xPortGetCoreID() ] ) != 0 )
{
/* A task was made ready while the scheduler was suspended. */
eReturn = eAbortSleep;
@@ -3210,7 +3214,7 @@ UBaseType_t uxPriority;
vListInitialise( &xDelayedTaskList1 );
vListInitialise( &xDelayedTaskList2 );
vListInitialise( &xPendingReadyList );
vListInitialise( &xPendingReadyList[ xPortGetCoreID() ] );
#if ( INCLUDE_vTaskDelete == 1 )
{
@@ -4576,7 +4580,7 @@ TickType_t uxReturn;
{
/* The delayed and ready lists cannot be accessed, so hold
this task pending until the scheduler is resumed. */
vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) );
vListInsertEnd( &( xPendingReadyList[ xPortGetCoreID() ] ), &( pxTCB->xEventListItem ) );
}
if( tskCAN_RUN_HERE(pxTCB->xCoreID) && pxTCB->uxPriority > pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
@@ -4644,7 +4648,7 @@ TickType_t uxReturn;
{
/* The delayed and ready lists cannot be accessed, so hold
this task pending until the scheduler is resumed. */
vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) );
vListInsertEnd( &( xPendingReadyList[ xPortGetCoreID() ] ), &( pxTCB->xEventListItem ) );
}
if( tskCAN_RUN_HERE(pxTCB->xCoreID) && pxTCB->uxPriority > pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )