diff --git a/components/esp32/crosscore_int.c b/components/esp32/crosscore_int.c index a98b13583c..56fe6fe9a7 100644 --- a/components/esp32/crosscore_int.c +++ b/components/esp32/crosscore_int.c @@ -44,64 +44,60 @@ ToDo: There is a small chance the CPU already has yielded when this ISR is servi the ISR will cause it to switch _away_ from it. portYIELD_FROM_ISR will probably just schedule the task again, but have to check that. */ static void esp_crosscore_isr(void *arg) { - volatile uint32_t myReasonVal; + volatile uint32_t myReasonVal; #if 0 - //A pointer to the correct reason array item is passed to this ISR. - volatile uint32_t *myReason=arg; + //A pointer to the correct reason array item is passed to this ISR. + volatile uint32_t *myReason=arg; #else - //Does not work yet, the interrupt code needs work to understand two separate interrupt and argument - //tables... - volatile uint32_t *myReason=&reason[xPortGetCoreID()]; + //Does not work yet, the interrupt code needs work to understand two separate interrupt and argument + //tables... + volatile uint32_t *myReason=&reason[xPortGetCoreID()]; #endif - //Clear the interrupt first. - if (xPortGetCoreID()==0) { - WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_0_REG, 0); - } else { - WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_1_REG, 0); - } - //Grab the reason and clear it. - portENTER_CRITICAL(&reasonSpinlock); - myReasonVal=*myReason; - *myReason=0; - portEXIT_CRITICAL(&reasonSpinlock); + //Clear the interrupt first. + if (xPortGetCoreID()==0) { + WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_0_REG, 0); + } else { + WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_1_REG, 0); + } + //Grab the reason and clear it. + portENTER_CRITICAL(&reasonSpinlock); + myReasonVal=*myReason; + *myReason=0; + portEXIT_CRITICAL(&reasonSpinlock); - //Check what we need to do. - if (myReasonVal&REASON_YIELD) { - portYIELD_FROM_ISR(); - } - - ets_printf("recv yield\n"); + //Check what we need to do. + if (myReasonVal&REASON_YIELD) { + portYIELD_FROM_ISR(); + } } //Initialize the crosscore interrupt on this core. Call this once //on each active core. void esp_crosscore_int_init() { - portENTER_CRITICAL(&reasonSpinlock); - ets_printf("init cpu %d\n", xPortGetCoreID()); - reason[xPortGetCoreID()]=0; - portEXIT_CRITICAL(&reasonSpinlock); - ESP_INTR_DISABLE(ETS_FROM_CPU_INUM); - if (xPortGetCoreID()==0) { - intr_matrix_set(xPortGetCoreID(), ETS_FROM_CPU_INTR0_SOURCE, ETS_FROM_CPU_INUM); - } else { - intr_matrix_set(xPortGetCoreID(), ETS_FROM_CPU_INTR1_SOURCE, ETS_FROM_CPU_INUM); - } - xt_set_interrupt_handler(ETS_FROM_CPU_INUM, esp_crosscore_isr, (void*)&reason[xPortGetCoreID()]); - ESP_INTR_ENABLE(ETS_FROM_CPU_INUM); + portENTER_CRITICAL(&reasonSpinlock); + reason[xPortGetCoreID()]=0; + portEXIT_CRITICAL(&reasonSpinlock); + ESP_INTR_DISABLE(ETS_FROM_CPU_INUM); + if (xPortGetCoreID()==0) { + intr_matrix_set(xPortGetCoreID(), ETS_FROM_CPU_INTR0_SOURCE, ETS_FROM_CPU_INUM); + } else { + intr_matrix_set(xPortGetCoreID(), ETS_FROM_CPU_INTR1_SOURCE, ETS_FROM_CPU_INUM); + } + xt_set_interrupt_handler(ETS_FROM_CPU_INUM, esp_crosscore_isr, (void*)&reason[xPortGetCoreID()]); + ESP_INTR_ENABLE(ETS_FROM_CPU_INUM); } void esp_crosscore_int_send_yield(int coreId) { - ets_printf("send yield\n"); - assert(coreIduxPriority < uxPriority ) + No mux here, uxPriority is mostly atomic and there's not really any harm if this check misfires. + */ + if( tskCAN_RUN_HERE( xCoreID ) && pxCurrentTCB[ xPortGetCoreID() ]->uxPriority < uxPriority ) { taskYIELD_IF_USING_PREEMPTION(); } + else if( xCoreID != xPortGetCoreID() ) { + taskYIELD_OTHER_CORE(xCoreID, uxPriority); + } else { mtCOVERAGE_TEST_MARKER(); @@ -1452,7 +1450,7 @@ BaseType_t i; if( listIS_CONTAINED_WITHIN( &xSuspendedTaskList, &( pxTCB->xGenericListItem ) ) != pdFALSE ) { /* Has the task already been resumed from within an ISR? */ - if( listIS_CONTAINED_WITHIN( &xPendingReadyList, &( pxTCB->xEventListItem ) ) == pdFALSE ) + if( listIS_CONTAINED_WITHIN( &xPendingReadyList[ xPortGetCoreID() ], &( pxTCB->xEventListItem ) ) == pdFALSE ) { /* Is it in the suspended list because it is in the Suspended state, or because is is blocked with no timeout? */ @@ -1544,7 +1542,6 @@ BaseType_t i; #if ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) ) -/* ToDo: Make this multicore-compatible. */ BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume ) { BaseType_t xYieldRequired = pdFALSE; @@ -1567,14 +1564,14 @@ BaseType_t i; ( void ) uxListRemove( &( pxTCB->xGenericListItem ) ); prvAddTaskToReadyList( pxTCB ); - if ( pxTCB->xCoreID == xPortGetCoreID() ) - { - taskYIELD_OTHER_CORE( pxTCB->xCoreID, pxTCB->uxPriority); - } - else if( pxTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority ) + if( tskCAN_RUN_HERE( pxTCB->xCoreID ) && pxTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority ) { xYieldRequired = pdTRUE; } + else if ( pxTCB->xCoreID != xPortGetCoreID() ) + { + taskYIELD_OTHER_CORE( pxTCB->xCoreID, pxTCB->uxPriority); + } else { mtCOVERAGE_TEST_MARKER(); @@ -1585,7 +1582,7 @@ BaseType_t i; /* The delayed or ready lists cannot be accessed so the task is held in the pending ready list until the scheduler is unsuspended. */ - vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) ); + vListInsertEnd( &( xPendingReadyList[ xPortGetCoreID() ] ), &( pxTCB->xEventListItem ) ); } } else @@ -1770,9 +1767,9 @@ BaseType_t xAlreadyYielded = pdFALSE; { /* Move any readied tasks from the pending list into the appropriate ready list. */ - while( listLIST_IS_EMPTY( &xPendingReadyList ) == pdFALSE ) + while( listLIST_IS_EMPTY( &xPendingReadyList[ xPortGetCoreID() ] ) == pdFALSE ) { - pxTCB = ( TCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( ( &xPendingReadyList ) ); + pxTCB = ( TCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( ( &xPendingReadyList[ xPortGetCoreID() ] ) ); ( void ) uxListRemove( &( pxTCB->xEventListItem ) ); ( void ) uxListRemove( &( pxTCB->xGenericListItem ) ); prvAddTaskToReadyList( pxTCB ); @@ -1785,10 +1782,6 @@ BaseType_t xAlreadyYielded = pdFALSE; xYieldPending[xPortGetCoreID()] = pdTRUE; break; } - else if ( pxTCB->xCoreID != xPortGetCoreID() ) - { - taskYIELD_OTHER_CORE( pxTCB->xCoreID, pxTCB->uxPriority ); - } else { mtCOVERAGE_TEST_MARKER(); @@ -2658,15 +2651,20 @@ BaseType_t xReturn; /* The delayed and ready lists cannot be accessed, so hold this task pending until the scheduler is resumed. */ taskENTER_CRITICAL(&xTaskQueueMutex); - vListInsertEnd( &( xPendingReadyList ), &( pxUnblockedTCB->xEventListItem ) ); + vListInsertEnd( &( xPendingReadyList[ xPortGetCoreID() ] ), &( pxUnblockedTCB->xEventListItem ) ); taskEXIT_CRITICAL(&xTaskQueueMutex); } if ( tskCAN_RUN_HERE(pxUnblockedTCB->xCoreID) && pxUnblockedTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority ) { - /* We can schedule the awoken task on this CPU. */ - xYieldPending[xPortGetCoreID()] = pdTRUE; + /* Return true if the task removed from the event list has a higher + priority than the calling task. This allows the calling task to know if + it should force a context switch now. */ xReturn = pdTRUE; + + /* Mark that a yield is pending in case the user is not using the + "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */ + xYieldPending[ xPortGetCoreID() ] = pdTRUE; } else if ( pxUnblockedTCB->xCoreID != xPortGetCoreID() ) { @@ -2724,9 +2722,15 @@ BaseType_t xReturn; if ( tskCAN_RUN_HERE(pxUnblockedTCB->xCoreID) && pxUnblockedTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority ) { - /* We can schedule the awoken task on this CPU. */ - xYieldPending[xPortGetCoreID()] = pdTRUE; + /* Return true if the task removed from the event list has + a higher priority than the calling task. This allows + the calling task to know if it should force a context + switch now. */ xReturn = pdTRUE; + + /* Mark that a yield is pending in case the user is not using the + "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */ + xYieldPending[ xPortGetCoreID() ] = pdTRUE; } else if ( pxUnblockedTCB->xCoreID != xPortGetCoreID() ) { @@ -2967,7 +2971,7 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) eSleepModeStatus eReturn = eStandardSleep; taskENTER_CRITICAL(&xTaskQueueMutex); - if( listCURRENT_LIST_LENGTH( &xPendingReadyList ) != 0 ) + if( listCURRENT_LIST_LENGTH( &xPendingReadyList[ xPortGetCoreID() ] ) != 0 ) { /* A task was made ready while the scheduler was suspended. */ eReturn = eAbortSleep; @@ -3210,7 +3214,7 @@ UBaseType_t uxPriority; vListInitialise( &xDelayedTaskList1 ); vListInitialise( &xDelayedTaskList2 ); - vListInitialise( &xPendingReadyList ); + vListInitialise( &xPendingReadyList[ xPortGetCoreID() ] ); #if ( INCLUDE_vTaskDelete == 1 ) { @@ -4576,7 +4580,7 @@ TickType_t uxReturn; { /* The delayed and ready lists cannot be accessed, so hold this task pending until the scheduler is resumed. */ - vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) ); + vListInsertEnd( &( xPendingReadyList[ xPortGetCoreID() ] ), &( pxTCB->xEventListItem ) ); } if( tskCAN_RUN_HERE(pxTCB->xCoreID) && pxTCB->uxPriority > pxCurrentTCB[ xPortGetCoreID() ]->uxPriority ) @@ -4644,7 +4648,7 @@ TickType_t uxReturn; { /* The delayed and ready lists cannot be accessed, so hold this task pending until the scheduler is resumed. */ - vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) ); + vListInsertEnd( &( xPendingReadyList[ xPortGetCoreID() ] ), &( pxTCB->xEventListItem ) ); } if( tskCAN_RUN_HERE(pxTCB->xCoreID) && pxTCB->uxPriority > pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )