Merge branch 'feature/deprecate-old-cpu-api' into 'master'

HAL: Deprecate old CPU/SoC/Interrupt Controller HAL API

Closes IDF-4919 and IDF-5032

See merge request espressif/esp-idf!18987
This commit is contained in:
Darian
2022-07-23 00:37:33 +08:00
134 changed files with 733 additions and 877 deletions

View File

@@ -5,6 +5,7 @@
*/
#include <string.h>
#include "esp_cpu.h"
#include "esp_log.h"
#include "esp_app_trace.h"
#include "esp_app_trace_port.h"
@@ -45,7 +46,7 @@ esp_err_t esp_apptrace_init(void)
void *hw_data = NULL;
// 'esp_apptrace_init()' is called on every core, so ensure to do main initialization only once
if (cpu_hal_get_core_id() == 0) {
if (esp_cpu_get_core_id() == 0) {
memset(&s_trace_channels, 0, sizeof(s_trace_channels));
hw = esp_apptrace_jtag_hw_get(&hw_data);
ESP_APPTRACE_LOGD("HW interface %p", hw);

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2021 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -8,6 +8,7 @@
#include <string.h>
#include "sdkconfig.h"
#include "esp_log.h"
#include "esp_cpu.h"
#include "esp_app_trace_membufs_proto.h"
/** Trace data header. Every user data chunk is prepended with this header.
@@ -259,8 +260,8 @@ static inline uint8_t *esp_apptrace_membufs_wait4buf(esp_apptrace_membufs_proto_
static inline uint8_t *esp_apptrace_membufs_pkt_start(uint8_t *ptr, uint16_t size)
{
// it is safe to use cpu_hal_get_core_id() in macro call because arg is used only once inside it
((esp_tracedata_hdr_t *)ptr)->block_sz = ESP_APPTRACE_USR_BLOCK_CORE(cpu_hal_get_core_id()) | size;
// it is safe to use esp_cpu_get_core_id() in macro call because arg is used only once inside it
((esp_tracedata_hdr_t *)ptr)->block_sz = ESP_APPTRACE_USR_BLOCK_CORE(esp_cpu_get_core_id()) | size;
((esp_tracedata_hdr_t *)ptr)->wr_sz = 0;
return ptr + sizeof(esp_tracedata_hdr_t);
}

View File

@@ -6,6 +6,7 @@
#include "soc/soc.h"
#include "esp_log.h"
#include "esp_cpu.h"
#include "esp_app_trace_port.h"
#include "driver/uart.h"
@@ -105,7 +106,7 @@ static esp_err_t esp_apptrace_uart_unlock(esp_apptrace_uart_data_t *hw_data)
static inline void esp_apptrace_uart_hw_init(void)
{
ESP_APPTRACE_LOGI("Initialized UART on CPU%d", cpu_hal_get_core_id());
ESP_APPTRACE_LOGI("Initialized UART on CPU%d", esp_cpu_get_core_id());
}
@@ -199,7 +200,7 @@ static const int APP_TRACE_UART_RX_BUF_SIZE = 4024;
static esp_err_t esp_apptrace_uart_init(esp_apptrace_uart_data_t *hw_data)
{
int core_id = cpu_hal_get_core_id();
int core_id = esp_cpu_get_core_id();
if (core_id == 0) {
hw_data->tx_data_buff = (uint8_t *)heap_caps_malloc(APP_TRACE_MAX_TX_BUFF_UART, MALLOC_CAP_INTERNAL|MALLOC_CAP_8BIT);
if (hw_data->tx_data_buff == NULL){

View File

@@ -4,6 +4,7 @@
* SPDX-License-Identifier: Apache-2.0
*/
#include "esp_cpu.h"
#include "esp_log.h"
#include "esp_app_trace_membufs_proto.h"
#include "esp_app_trace_port.h"
@@ -36,7 +37,7 @@ typedef struct {
#define ESP_APPTRACE_RISCV_HOST_DATA (1 << 22)
#define ESP_APPTRACE_RISCV_HOST_CONNECT (1 << 23)
#define ESP_APPTRACE_RISCV_INITED(_hw_) ((_hw_)->inited & (1 << 0/*cpu_hal_get_core_id()*/))
#define ESP_APPTRACE_RISCV_INITED(_hw_) ((_hw_)->inited & (1 << 0/*esp_cpu_get_core_id()*/))
static esp_err_t esp_apptrace_riscv_init(esp_apptrace_riscv_data_t *hw_data);
static esp_err_t esp_apptrace_riscv_flush(esp_apptrace_riscv_data_t *hw_data, esp_apptrace_tmo_t *tmo);
@@ -94,7 +95,7 @@ esp_apptrace_hw_t *esp_apptrace_jtag_hw_get(void **data)
e.g. OpenOCD flasher stub use own implementation of it. */
__attribute__((weak)) int esp_apptrace_advertise_ctrl_block(void *ctrl_block_addr)
{
if (!esp_cpu_in_ocd_debug_mode()) {
if (!esp_cpu_dbgr_is_attached()) {
return 0;
}
return (int) semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_APPTRACE_INIT, (long*)ctrl_block_addr);
@@ -139,7 +140,7 @@ static esp_err_t esp_apptrace_riscv_unlock(esp_apptrace_riscv_data_t *hw_data)
static esp_err_t esp_apptrace_riscv_init(esp_apptrace_riscv_data_t *hw_data)
{
int core_id = cpu_hal_get_core_id();
int core_id = esp_cpu_get_core_id();
if (hw_data->inited == 0) {
esp_apptrace_mem_block_t mem_blocks_cfg[2];
@@ -253,7 +254,7 @@ static bool esp_apptrace_riscv_host_is_connected(esp_apptrace_riscv_data_t *hw_d
if (!ESP_APPTRACE_RISCV_INITED(hw_data)) {
return false;
}
return s_tracing_ctrl[cpu_hal_get_core_id()].ctrl & ESP_APPTRACE_RISCV_HOST_CONNECT ? true : false;
return s_tracing_ctrl[esp_cpu_get_core_id()].ctrl & ESP_APPTRACE_RISCV_HOST_CONNECT ? true : false;
}
static esp_err_t esp_apptrace_riscv_flush_nolock(esp_apptrace_riscv_data_t *hw_data, uint32_t min_sz, esp_apptrace_tmo_t *tmo)
@@ -297,13 +298,13 @@ static inline void esp_apptrace_riscv_buffer_swap_lock(void)
// HACK: in this case host will set breakpoint just after ESP_APPTRACE_RISCV_CTRL_REG update,
// here we set address to set bp at
// enter ERI update critical section
s_tracing_ctrl[cpu_hal_get_core_id()].stat = (uint32_t)&__esp_apptrace_riscv_updated;
s_tracing_ctrl[esp_cpu_get_core_id()].stat = (uint32_t)&__esp_apptrace_riscv_updated;
}
static __attribute__((noinline)) void esp_apptrace_riscv_buffer_swap_unlock(void)
{
// exit ERI update critical section
s_tracing_ctrl[cpu_hal_get_core_id()].stat = 0;
s_tracing_ctrl[esp_cpu_get_core_id()].stat = 0;
// TODO: currently host sets breakpoint, use break instruction to stop;
// it will allow to use ESP_APPTRACE_RISCV_STAT_REG for other purposes
asm volatile (
@@ -317,13 +318,13 @@ static esp_err_t esp_apptrace_riscv_buffer_swap_start(uint32_t curr_block_id)
esp_apptrace_riscv_buffer_swap_lock();
uint32_t ctrl_reg = s_tracing_ctrl[cpu_hal_get_core_id()].ctrl;
uint32_t ctrl_reg = s_tracing_ctrl[esp_cpu_get_core_id()].ctrl;
uint32_t host_connected = ESP_APPTRACE_RISCV_HOST_CONNECT & ctrl_reg;
if (host_connected) {
uint32_t acked_block = ESP_APPTRACE_RISCV_BLOCK_ID_GET(ctrl_reg);
uint32_t host_to_read = ESP_APPTRACE_RISCV_BLOCK_LEN_GET(ctrl_reg);
if (host_to_read != 0 || acked_block != (curr_block_id & ESP_APPTRACE_RISCV_BLOCK_ID_MSK)) {
ESP_APPTRACE_LOGD("[%d]: Can not switch %x %d %x %x/%lx", cpu_hal_get_core_id(), ctrl_reg, host_to_read, acked_block,
ESP_APPTRACE_LOGD("[%d]: Can not switch %x %d %x %x/%lx", esp_cpu_get_core_id(), ctrl_reg, host_to_read, acked_block,
curr_block_id & ESP_APPTRACE_RISCV_BLOCK_ID_MSK, curr_block_id);
res = ESP_ERR_NO_MEM;
goto _on_err;
@@ -337,9 +338,9 @@ _on_err:
static esp_err_t esp_apptrace_riscv_buffer_swap_end(uint32_t new_block_id, uint32_t prev_block_len)
{
uint32_t ctrl_reg = s_tracing_ctrl[cpu_hal_get_core_id()].ctrl;
uint32_t ctrl_reg = s_tracing_ctrl[esp_cpu_get_core_id()].ctrl;
uint32_t host_connected = ESP_APPTRACE_RISCV_HOST_CONNECT & ctrl_reg;
s_tracing_ctrl[cpu_hal_get_core_id()].ctrl = ESP_APPTRACE_RISCV_BLOCK_ID(new_block_id) |
s_tracing_ctrl[esp_cpu_get_core_id()].ctrl = ESP_APPTRACE_RISCV_BLOCK_ID(new_block_id) |
host_connected | ESP_APPTRACE_RISCV_BLOCK_LEN(prev_block_len);
esp_apptrace_riscv_buffer_swap_unlock();
return ESP_OK;
@@ -353,7 +354,7 @@ static esp_err_t esp_apptrace_riscv_buffer_swap(uint32_t new_block_id)
static bool esp_apptrace_riscv_host_data_pending(void)
{
uint32_t ctrl_reg = s_tracing_ctrl[cpu_hal_get_core_id()].ctrl;
uint32_t ctrl_reg = s_tracing_ctrl[esp_cpu_get_core_id()].ctrl;
// ESP_APPTRACE_LOGV("%s() 0x%x", __func__, ctrl_reg);
return (ctrl_reg & ESP_APPTRACE_RISCV_HOST_DATA) ? true : false;
}

View File

@@ -155,6 +155,7 @@
#endif
#include "eri.h"
#include "esp_private/trax.h"
#include "esp_cpu.h"
#include "esp_log.h"
#include "esp_app_trace_membufs_proto.h"
#include "esp_app_trace_port.h"
@@ -173,7 +174,7 @@
#define ESP_APPTRACE_TRAX_HOST_DATA (1 << 22)
#define ESP_APPTRACE_TRAX_HOST_CONNECT (1 << 23)
#define ESP_APPTRACE_TRAX_INITED(_hw_) ((_hw_)->inited & (1 << cpu_hal_get_core_id()))
#define ESP_APPTRACE_TRAX_INITED(_hw_) ((_hw_)->inited & (1 << esp_cpu_get_core_id()))
#define ESP_APPTRACE_TRAX_BLOCK_SIZE (0x4000UL)
@@ -271,7 +272,7 @@ static inline void esp_apptrace_trax_hw_init(void)
// must be read by host before any transfer using TRAX
eri_write(ESP_APPTRACE_TRAX_STAT_REG, 0);
ESP_APPTRACE_LOGI("Initialized TRAX on CPU%d", cpu_hal_get_core_id());
ESP_APPTRACE_LOGI("Initialized TRAX on CPU%d", esp_cpu_get_core_id());
}
static inline void esp_apptrace_trax_select_memory_block(int block_num)
@@ -310,7 +311,7 @@ static inline void esp_apptrace_trax_memory_enable(void)
static esp_err_t esp_apptrace_trax_init(esp_apptrace_trax_data_t *hw_data)
{
int core_id = cpu_hal_get_core_id();
int core_id = esp_cpu_get_core_id();
// 'esp_apptrace_trax_init()' is called on every core, so ensure to do main initialization only once
if (core_id == 0) {
@@ -497,7 +498,7 @@ static esp_err_t esp_apptrace_trax_buffer_swap_start(uint32_t curr_block_id)
uint32_t acked_block = ESP_APPTRACE_TRAX_BLOCK_ID_GET(ctrl_reg);
uint32_t host_to_read = ESP_APPTRACE_TRAX_BLOCK_LEN_GET(ctrl_reg);
if (host_to_read != 0 || acked_block != (curr_block_id & ESP_APPTRACE_TRAX_BLOCK_ID_MSK)) {
ESP_APPTRACE_LOGD("HC[%d]: Can not switch %x %d %x %x/%lx", cpu_hal_get_core_id(), ctrl_reg, host_to_read, acked_block,
ESP_APPTRACE_LOGD("HC[%d]: Can not switch %x %d %x %x/%lx", esp_cpu_get_core_id(), ctrl_reg, host_to_read, acked_block,
curr_block_id & ESP_APPTRACE_TRAX_BLOCK_ID_MSK, curr_block_id);
res = ESP_ERR_NO_MEM;
goto _on_err;

View File

@@ -342,12 +342,12 @@ define away all of the tracing macros.
#define traceTASK_SWITCHED_IN() if(prvGetTCBFromHandle(NULL) == xTaskGetIdleTaskHandle()) { \
SEGGER_SYSVIEW_OnIdle(); \
} else { \
SEGGER_SYSVIEW_OnTaskStartExec((U32)pxCurrentTCB[cpu_hal_get_core_id()]); \
SEGGER_SYSVIEW_OnTaskStartExec((U32)pxCurrentTCB[esp_cpu_get_core_id()]); \
}
#else
#define traceTASK_SWITCHED_IN() { \
if (memcmp(pxCurrentTCB[cpu_hal_get_core_id()]->pcTaskName, "IDLE", 5) != 0) { \
SEGGER_SYSVIEW_OnTaskStartExec((U32)pxCurrentTCB[cpu_hal_get_core_id()]); \
if (memcmp(pxCurrentTCB[esp_cpu_get_core_id()]->pcTaskName, "IDLE", 5) != 0) { \
SEGGER_SYSVIEW_OnTaskStartExec((U32)pxCurrentTCB[esp_cpu_get_core_id()]); \
} else { \
SEGGER_SYSVIEW_OnIdle(); \
} \
@@ -357,8 +357,8 @@ define away all of the tracing macros.
#define traceMOVED_TASK_TO_READY_STATE(pxTCB) SEGGER_SYSVIEW_OnTaskStartReady((U32)pxTCB)
#define traceREADDED_TASK_TO_READY_STATE(pxTCB)
#define traceMOVED_TASK_TO_DELAYED_LIST() SEGGER_SYSVIEW_OnTaskStopReady((U32)pxCurrentTCB[cpu_hal_get_core_id()], (1u << 2))
#define traceMOVED_TASK_TO_OVERFLOW_DELAYED_LIST() SEGGER_SYSVIEW_OnTaskStopReady((U32)pxCurrentTCB[cpu_hal_get_core_id()], (1u << 2))
#define traceMOVED_TASK_TO_DELAYED_LIST() SEGGER_SYSVIEW_OnTaskStopReady((U32)pxCurrentTCB[esp_cpu_get_core_id()], (1u << 2))
#define traceMOVED_TASK_TO_OVERFLOW_DELAYED_LIST() SEGGER_SYSVIEW_OnTaskStopReady((U32)pxCurrentTCB[esp_cpu_get_core_id()], (1u << 2))
#define traceMOVED_TASK_TO_SUSPENDED_LIST(pxTCB) SEGGER_SYSVIEW_OnTaskStopReady((U32)pxTCB, ((3u << 3) | 3))
#define traceISR_EXIT_TO_SCHEDULER() SEGGER_SYSVIEW_RecordExitISRToScheduler()

View File

@@ -12,6 +12,7 @@
#include "esp_app_trace.h"
#include "esp_log.h"
#include "esp_cpu.h"
#include "esp_private/startup_internal.h"
const static char *TAG = "segger_rtt";
@@ -158,7 +159,7 @@ unsigned SEGGER_RTT_WriteSkipNoLock(unsigned BufferIndex, const void* pBuffer, u
uint8_t event_id = *pbuf;
#if CONFIG_APPTRACE_SV_DEST_UART
if (
(APPTRACE_SV_DEST_CPU != cpu_hal_get_core_id()) &&
(APPTRACE_SV_DEST_CPU != esp_cpu_get_core_id()) &&
(
(event_id == SYSVIEW_EVTID_ISR_ENTER) ||
(event_id == SYSVIEW_EVTID_ISR_EXIT) ||
@@ -189,7 +190,7 @@ unsigned SEGGER_RTT_WriteSkipNoLock(unsigned BufferIndex, const void* pBuffer, u
return 0;
}
#if CONFIG_APPTRACE_SV_DEST_JTAG
if (cpu_hal_get_core_id()) { // dual core specific code
if (esp_cpu_get_core_id()) { // dual core specific code
// use the highest - 1 bit of event ID to indicate core ID
// the highest bit can not be used due to event ID encoding method
// this reduces supported ID range to [0..63] (for 1 byte IDs) plus [128..16383] (for 2 bytes IDs)

View File

@@ -12,6 +12,7 @@
#include "driver/gptimer.h"
#include "esp_intr_alloc.h"
#include "esp_rom_sys.h"
#include "esp_cpu.h"
#include "freertos/FreeRTOS.h"
#include "freertos/semphr.h"
#include "freertos/task.h"
@@ -179,7 +180,7 @@ static void esp_apptrace_dummy_task(void *p)
int i = 0;
while (!arg->stop) {
ESP_APPTRACE_TEST_LOGD("%x: dummy task work %d.%d", xTaskGetCurrentTaskHandle(), cpu_hal_get_core_id(), i++);
ESP_APPTRACE_TEST_LOGD("%x: dummy task work %d.%d", xTaskGetCurrentTaskHandle(), esp_cpu_get_core_id(), i++);
if (tmo_ticks) {
vTaskDelay(tmo_ticks);
}
@@ -210,7 +211,7 @@ static void esp_apptrace_test_task(void *p)
.resolution_hz = 1000000,
};
TEST_ESP_OK(gptimer_new_timer(&timer_config, &arg->timers[i].gptimer));
*(uint32_t *)arg->timers[i].data.buf = ((uint32_t)arg->timers[i].gptimer) | (1 << 31) | (cpu_hal_get_core_id() ? 0x1 : 0);
*(uint32_t *)arg->timers[i].data.buf = ((uint32_t)arg->timers[i].gptimer) | (1 << 31) | (esp_cpu_get_core_id() ? 0x1 : 0);
ESP_APPTRACE_TEST_LOGI("%x: start timer %x period %u us", xTaskGetCurrentTaskHandle(), arg->timers[i].gptimer, arg->timers[i].data.period);
gptimer_alarm_config_t alarm_config = {
.reload_count = 0,
@@ -226,7 +227,7 @@ static void esp_apptrace_test_task(void *p)
TEST_ESP_OK(gptimer_start(arg->timers[i].gptimer));
}
*(uint32_t *)arg->data.buf = (uint32_t)xTaskGetCurrentTaskHandle() | (cpu_hal_get_core_id() ? 0x1 : 0);
*(uint32_t *)arg->data.buf = (uint32_t)xTaskGetCurrentTaskHandle() | (esp_cpu_get_core_id() ? 0x1 : 0);
arg->data.wr_cnt = 0;
arg->data.wr_err = 0;
while (!arg->stop) {
@@ -652,7 +653,7 @@ static void esp_logtrace_task(void *p)
ESP_LOGI(TAG, "%p: sample print 4 %c", xTaskGetCurrentTaskHandle(), ((i & 0xFF) % 95) + 32);
ESP_LOGI(TAG, "%p: sample print 5 %f", xTaskGetCurrentTaskHandle(), 1.0);
ESP_LOGI(TAG, "%p: sample print 6 %f", xTaskGetCurrentTaskHandle(), 3.45);
ESP_LOGI(TAG, "%p: logtrace task work %d.%d", xTaskGetCurrentTaskHandle(), cpu_hal_get_core_id(), i);
ESP_LOGI(TAG, "%p: logtrace task work %d.%d", xTaskGetCurrentTaskHandle(), esp_cpu_get_core_id(), i);
if (++i == 10000) {
break;
}

View File

@@ -6,7 +6,6 @@
#include <stdbool.h>
#include "hal/cpu_hal.h"
#include "hal/mpu_hal.h"
#include "hal/mpu_types.h"
#include "soc/soc_caps.h"

View File

@@ -4,9 +4,9 @@
* SPDX-License-Identifier: Apache-2.0
*/
#include "esp_cpu.h"
#include "esp_log.h"
#include "bootloader_common.h"
#include "hal/cpu_hal.h"
#include "esp_rom_sys.h"
@@ -22,8 +22,8 @@ void abort(void)
#if !CONFIG_ESP_SYSTEM_PANIC_SILENT_REBOOT
esp_rom_printf("abort() was called at PC 0x%08x\r\n", (intptr_t)__builtin_return_address(0) - 3);
#endif
if (cpu_hal_is_debugger_attached()) {
cpu_hal_break();
if (esp_cpu_dbgr_is_attached()) {
esp_cpu_dbgr_break();
}
while (1) {
}

View File

@@ -5,7 +5,7 @@
*/
#include "sdkconfig.h"
#include "bootloader_random.h"
#include "hal/cpu_hal.h"
#include "esp_cpu.h"
#include "soc/wdev_reg.h"
#ifndef BOOTLOADER_BUILD
@@ -43,10 +43,10 @@
values.
*/
random = REG_READ(WDEV_RND_REG);
start = cpu_hal_get_cycle_count();
start = esp_cpu_get_cycle_count();
do {
random ^= REG_READ(WDEV_RND_REG);
now = cpu_hal_get_cycle_count();
now = esp_cpu_get_cycle_count();
} while (now - start < RNG_CPU_WAIT_CYCLE_NUM);
}
buffer_bytes[i] = random >> ((i % 4) * 8);

View File

@@ -157,7 +157,7 @@ static esp_err_t image_load(esp_image_load_mode_t mode, const esp_partition_pos_
bootloader_sha256_handle_t *p_sha_handle = &sha_handle;
CHECK_ERR(process_image_header(data, part->offset, (verify_sha) ? p_sha_handle : NULL, do_verify, silent));
CHECK_ERR(process_segments(data, silent, do_load, sha_handle, checksum));
bool skip_check_checksum = !do_verify || esp_cpu_in_ocd_debug_mode();
bool skip_check_checksum = !do_verify || esp_cpu_dbgr_is_attached();
CHECK_ERR(process_checksum(sha_handle, checksum_word, data, silent, skip_check_checksum));
CHECK_ERR(process_appended_hash(data, part->size, do_verify, silent));
if (verify_sha) {
@@ -167,7 +167,7 @@ static esp_err_t image_load(esp_image_load_mode_t mode, const esp_partition_pos_
// If secure boot is not enabled in hardware, then
// skip the signature check in bootloader when the debugger is attached.
// This is done to allow for breakpoints in Flash.
bool do_verify_sig = !esp_cpu_in_ocd_debug_mode();
bool do_verify_sig = !esp_cpu_dbgr_is_attached();
#else // CONFIG_SECURE_BOOT
bool do_verify_sig = true;
#endif // end checking for JTAG
@@ -177,7 +177,7 @@ static esp_err_t image_load(esp_image_load_mode_t mode, const esp_partition_pos_
}
#else // SECURE_BOOT_CHECK_SIGNATURE
// No secure boot, but SHA-256 can be appended for basic corruption detection
if (sha_handle != NULL && !esp_cpu_in_ocd_debug_mode()) {
if (sha_handle != NULL && !esp_cpu_dbgr_is_attached()) {
err = verify_simple_hash(sha_handle, data);
sha_handle = NULL; // calling verify_simple_hash finishes sha_handle
}

View File

@@ -12,6 +12,7 @@
#include "esp_log.h"
#include "unity.h"
#include "unity_test_utils.h"
#include "soc/soc.h"
extern "C" void setUp()
{

View File

@@ -15,10 +15,10 @@
#include "esp_intr_alloc.h"
#include "esp_log.h"
#include "esp_check.h"
#include "esp_cpu.h"
#include "soc/soc_caps.h"
#include "soc/gpio_periph.h"
#include "soc/io_mux_reg.h"
#include "hal/cpu_hal.h"
#include "hal/dedic_gpio_cpu_ll.h"
#include "hal/gpio_hal.h"
#include "esp_private/periph_ctrl.h"
@@ -196,7 +196,7 @@ esp_err_t dedic_gpio_new_bundle(const dedic_gpio_bundle_config_t *config, dedic_
dedic_gpio_bundle_t *bundle = NULL;
uint32_t out_mask = 0;
uint32_t in_mask = 0;
uint32_t core_id = cpu_hal_get_core_id(); // dedicated GPIO will be binded to the CPU who invokes this API
uint32_t core_id = esp_cpu_get_core_id(); // dedicated GPIO will be binded to the CPU who invokes this API
ESP_GOTO_ON_FALSE(config && ret_bundle, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
ESP_GOTO_ON_FALSE(config->gpio_array && config->array_size > 0, ESP_ERR_INVALID_ARG, err, TAG, "invalid GPIO array or size");
@@ -306,7 +306,7 @@ esp_err_t dedic_gpio_del_bundle(dedic_gpio_bundle_handle_t bundle)
bool recycle_all = false;
ESP_GOTO_ON_FALSE(bundle, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
uint32_t core_id = cpu_hal_get_core_id();
uint32_t core_id = esp_cpu_get_core_id();
ESP_GOTO_ON_FALSE(core_id == bundle->core_id, ESP_FAIL, err, TAG, "del bundle on wrong CPU");
portENTER_CRITICAL(&s_platform[core_id]->spinlock);
@@ -377,7 +377,7 @@ esp_err_t dedic_gpio_bundle_set_interrupt_and_callback(dedic_gpio_bundle_handle_
{
esp_err_t ret = ESP_OK;
ESP_GOTO_ON_FALSE(bundle, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
uint32_t core_id = cpu_hal_get_core_id();
uint32_t core_id = esp_cpu_get_core_id();
// lazy alloc interrupt
ESP_GOTO_ON_ERROR(dedic_gpio_install_interrupt(core_id), err, TAG, "allocate interrupt on core %d failed", core_id);

View File

@@ -46,6 +46,7 @@
#include "esp_attr.h"
#include "esp_rom_gpio.h"
#include "esp_memory_utils.h"
/* The actual max size of DMA buffer is 4095
* Set 4092 here to align with 4-byte, so that the position of the slot data in the buffer will be relatively fixed */

View File

@@ -30,6 +30,7 @@
#include "esp_private/periph_ctrl.h"
#include "driver/gpio.h"
#include "driver/pulse_cnt.h"
#include "esp_memory_utils.h"
// If ISR handler is allowed to run whilst cache is disabled,
// Make sure all the code and related variables used by the handler are in the SRAM

View File

@@ -16,6 +16,7 @@
#endif
#include "esp_log.h"
#include "esp_check.h"
#include "esp_memory_utils.h"
#include "esp_rom_gpio.h"
#include "soc/rmt_periph.h"
#include "soc/rtc.h"

View File

@@ -24,6 +24,7 @@
#include "driver/gpio.h"
#include "driver/rmt_tx.h"
#include "rmt_private.h"
#include "esp_memory_utils.h"
static const char *TAG = "rmt";

View File

@@ -84,7 +84,7 @@ The driver of FIFOs works as below:
#include "freertos/FreeRTOS.h"
#include "soc/soc_memory_layout.h"
#include "soc/gpio_periph.h"
#include "hal/cpu_hal.h"
#include "esp_cpu.h"
#include "freertos/semphr.h"
#include "esp_private/periph_ctrl.h"
#include "driver/gpio.h"
@@ -616,7 +616,7 @@ esp_err_t sdio_slave_send_get_finished(void **out_arg, TickType_t wait)
esp_err_t sdio_slave_transmit(uint8_t *addr, size_t len)
{
uint32_t timestamp = cpu_hal_get_cycle_count();
uint32_t timestamp = esp_cpu_get_cycle_count();
uint32_t ret_stamp;
esp_err_t err = sdio_slave_send_queue(addr, len, (void *)timestamp, portMAX_DELAY);

View File

@@ -5,6 +5,7 @@
*/
#include "esp_log.h"
#include "esp_memory_utils.h"
#include "freertos/FreeRTOS.h"
#include "freertos/semphr.h"
#include "freertos/queue.h"

View File

@@ -1316,8 +1316,8 @@ TEST_CASE_MULTIPLE_DEVICES("SPI Master: FD, DMA, Master Single Direction Test",
//IDF-5146
#define RECORD_TIME_PREPARE() uint32_t __t1, __t2
#define RECORD_TIME_START() do {__t1 = esp_cpu_get_ccount();}while(0)
#define RECORD_TIME_END(p_time) do{__t2 = esp_cpu_get_ccount(); *p_time = (__t2-__t1);}while(0)
#define RECORD_TIME_START() do {__t1 = esp_cpu_get_cycle_count();}while(0)
#define RECORD_TIME_END(p_time) do{__t2 = esp_cpu_get_cycle_count(); *p_time = (__t2-__t1);}while(0)
#define GET_US_BY_CCOUNT(t) ((double)t/CONFIG_ESP_DEFAULT_CPU_FREQ_MHZ)
static void speed_setup(spi_device_handle_t *spi, bool use_dma)

View File

@@ -16,6 +16,7 @@
#include <string.h>
#include "test_gpio.h"
#include "esp_system.h"
#include "esp_cpu.h"
#include "esp_sleep.h"
#include "unity.h"
#include "unity_test_utils.h"
@@ -104,7 +105,7 @@ TEST_CASE("GPIO_config_parameters_test", "[gpio]")
static void gpio_isr_edge_handler(void *arg)
{
uint32_t gpio_num = (uint32_t) arg;
esp_rom_printf("GPIO[%d] intr on core %d, val: %d\n", gpio_num, cpu_hal_get_core_id(), gpio_get_level(gpio_num));
esp_rom_printf("GPIO[%d] intr on core %d, val: %d\n", gpio_num, esp_cpu_get_core_id(), gpio_get_level(gpio_num));
edge_intr_times++;
}

View File

@@ -7,11 +7,11 @@
#include <stdio.h>
#include <string.h>
#include "sdkconfig.h"
#include "hal/cpu_hal.h"
#include "hal/gpio_hal.h"
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "esp_log.h"
#include "esp_cpu.h"
#include "unity.h"
#include "esp_rom_gpio.h"
@@ -483,9 +483,9 @@ static uint32_t tx_end_time0, tx_end_time1;
static void rmt_tx_end_cb(rmt_channel_t channel, void *arg)
{
if (channel == 0) {
tx_end_time0 = cpu_hal_get_cycle_count();
tx_end_time0 = esp_cpu_get_cycle_count();
} else {
tx_end_time1 = cpu_hal_get_cycle_count();
tx_end_time1 = esp_cpu_get_cycle_count();
}
}
TEST_CASE("RMT TX simultaneously", "[rmt]")

View File

@@ -13,6 +13,7 @@
#include "esp_private/esp_clk.h"
#include "soc/soc_caps.h"
#include "esp_rom_sys.h"
#include "soc/soc.h"
#define TEST_TIMER_RESOLUTION_HZ 1000000 // 1MHz resolution
#define TIMER_DELTA 0.001

View File

@@ -20,6 +20,7 @@
#include "esp_private/periph_ctrl.h"
#include "driver/twai.h"
#include "soc/soc_caps.h"
#include "soc/soc.h"
#include "soc/twai_periph.h"
#include "soc/gpio_sig_map.h"
#include "hal/twai_hal.h"

View File

@@ -26,6 +26,7 @@
#include "hal/adc_types.h"
#include "hal/adc_hal.h"
#include "hal/dma_types.h"
#include "esp_memory_utils.h"
//For DMA
#if SOC_GDMA_SUPPORTED
#include "esp_private/gdma.h"

View File

@@ -10,6 +10,7 @@
#include "test_utils.h"
#include "esp_log.h"
#include "esp_err.h"
#include "esp_cpu.h"
#include "soc/adc_periph.h"
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
@@ -202,8 +203,8 @@ TEST_CASE("ADC1 oneshot raw average / std_deviation", "[adc_oneshot][ignore][man
#endif
#define RECORD_TIME_PREPARE() uint32_t __t1, __t2
#define RECORD_TIME_START() do {__t1 = esp_cpu_get_ccount();}while(0)
#define RECORD_TIME_END(p_time) do{__t2 = esp_cpu_get_ccount(); *p_time = (__t2-__t1);}while(0)
#define RECORD_TIME_START() do {__t1 = esp_cpu_get_cycle_count();}while(0)
#define RECORD_TIME_END(p_time) do{__t2 = esp_cpu_get_cycle_count(); *p_time = (__t2-__t1);}while(0)
#define GET_US_BY_CCOUNT(t) ((double)t/CPU_FREQ_MHZ)

View File

@@ -20,11 +20,11 @@
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "freertos/semphr.h"
#include "hal/cpu_hal.h"
#include "dm9051.h"
#include "sdkconfig.h"
#include "esp_rom_gpio.h"
#include "esp_rom_sys.h"
#include "esp_cpu.h"
static const char *TAG = "dm9051.mac";
@@ -795,7 +795,7 @@ esp_eth_mac_t *esp_eth_mac_new_dm9051(const eth_dm9051_config_t *dm9051_config,
/* create dm9051 task */
BaseType_t core_num = tskNO_AFFINITY;
if (mac_config->flags & ETH_MAC_FLAG_PIN_TO_CORE) {
core_num = cpu_hal_get_core_id();
core_num = esp_cpu_get_core_id();
}
BaseType_t xReturned = xTaskCreatePinnedToCore(emac_dm9051_task, "dm9051_tsk", mac_config->rx_task_stack_size, emac,
mac_config->rx_task_prio, &emac->rx_task_hdl, core_num);

View File

@@ -15,13 +15,13 @@
#include "esp_eth_driver.h"
#include "esp_pm.h"
#include "esp_mac.h"
#include "esp_cpu.h"
#include "esp_heap_caps.h"
#include "esp_intr_alloc.h"
#include "esp_private/esp_clk.h"
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "freertos/semphr.h"
#include "hal/cpu_hal.h"
#include "hal/emac_hal.h"
#include "hal/gpio_hal.h"
#include "soc/soc.h"
@@ -507,7 +507,7 @@ static esp_err_t esp_emac_alloc_driver_obj(const eth_mac_config_t *config, emac_
/* create rx task */
BaseType_t core_num = tskNO_AFFINITY;
if (config->flags & ETH_MAC_FLAG_PIN_TO_CORE) {
core_num = cpu_hal_get_core_id();
core_num = esp_cpu_get_core_id();
}
BaseType_t xReturned = xTaskCreatePinnedToCore(emac_esp32_rx_task, "emac_rx", config->rx_task_stack_size, emac,
config->rx_task_prio, &emac->rx_task_hdl, core_num);

View File

@@ -9,6 +9,7 @@
#include <string.h>
#include "esp_log.h"
#include "esp_check.h"
#include "esp_cpu.h"
#include "driver/gpio.h"
#include "esp_rom_gpio.h"
#include "driver/spi_master.h"
@@ -702,7 +703,7 @@ esp_eth_mac_t *esp_eth_mac_new_ksz8851snl(const eth_ksz8851snl_config_t *ksz8851
BaseType_t core_num = tskNO_AFFINITY;
if (mac_config->flags & ETH_MAC_FLAG_PIN_TO_CORE) {
core_num = cpu_hal_get_core_id();
core_num = esp_cpu_get_core_id();
}
BaseType_t xReturned = xTaskCreatePinnedToCore(emac_ksz8851snl_task, "ksz8851snl_tsk", mac_config->rx_task_stack_size,
emac, mac_config->rx_task_prio, &emac->rx_task_hdl, core_num);

View File

@@ -19,11 +19,11 @@
#include <sys/param.h>
#include "esp_log.h"
#include "esp_check.h"
#include "esp_cpu.h"
#include "esp_eth_driver.h"
#include "esp_intr_alloc.h"
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "hal/cpu_hal.h"
#include "openeth.h"
#include "esp_mac.h"
@@ -395,7 +395,7 @@ esp_eth_mac_t *esp_eth_mac_new_openeth(const eth_mac_config_t *config)
// Create the RX task
BaseType_t core_num = tskNO_AFFINITY;
if (config->flags & ETH_MAC_FLAG_PIN_TO_CORE) {
core_num = cpu_hal_get_core_id();
core_num = esp_cpu_get_core_id();
}
BaseType_t xReturned = xTaskCreatePinnedToCore(emac_opencores_rx_task, "emac_rx", config->rx_task_stack_size, emac,
config->rx_task_prio, &emac->rx_task_hdl, core_num);

View File

@@ -16,10 +16,10 @@
#include "esp_intr_alloc.h"
#include "esp_heap_caps.h"
#include "esp_rom_gpio.h"
#include "esp_cpu.h"
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "freertos/semphr.h"
#include "hal/cpu_hal.h"
#include "w5500.h"
#include "sdkconfig.h"
@@ -674,7 +674,7 @@ esp_eth_mac_t *esp_eth_mac_new_w5500(const eth_w5500_config_t *w5500_config, con
/* create w5500 task */
BaseType_t core_num = tskNO_AFFINITY;
if (mac_config->flags & ETH_MAC_FLAG_PIN_TO_CORE) {
core_num = cpu_hal_get_core_id();
core_num = esp_cpu_get_core_id();
}
BaseType_t xReturned = xTaskCreatePinnedToCore(emac_w5500_task, "w5500_tsk", mac_config->rx_task_stack_size, emac,
mac_config->rx_task_prio, &emac->rx_task_hdl, core_num);

View File

@@ -13,6 +13,7 @@
#include "soc/uart_reg.h"
#include "soc/periph_defs.h"
#include "esp_attr.h"
#include "esp_cpu.h"
#include "esp_log.h"
#include "esp_intr_alloc.h"
#include "hal/wdt_hal.h"
@@ -192,7 +193,7 @@ static int wp_count = 0;
static uint32_t bp_list[GDB_BP_SIZE] = {0};
static uint32_t wp_list[GDB_WP_SIZE] = {0};
static uint32_t wp_size[GDB_WP_SIZE] = {0};
static watchpoint_trigger_t wp_access[GDB_WP_SIZE] = {0};
static esp_cpu_watchpoint_trigger_t wp_access[GDB_WP_SIZE] = {0};
static volatile bool step_in_progress = false;
static bool not_send_reason = false;
@@ -438,16 +439,16 @@ void update_breakpoints(void)
{
for (size_t i = 0; i < GDB_BP_SIZE; i++) {
if (bp_list[i] != 0) {
cpu_ll_set_breakpoint(i, (uint32_t)bp_list[i]);
esp_cpu_set_breakpoint(i, (const void *)bp_list[i]);
} else {
cpu_hal_clear_breakpoint(i);
esp_cpu_clear_breakpoint(i);
}
}
for (size_t i = 0; i < GDB_WP_SIZE; i++) {
if (wp_list[i] != 0) {
cpu_hal_set_watchpoint(i, (void *)wp_list[i], wp_size[i], wp_access[i]);
esp_cpu_set_watchpoint(i, (void *)wp_list[i], wp_size[i], wp_access[i]);
} else {
cpu_hal_clear_watchpoint(i);
esp_cpu_clear_watchpoint(i);
}
}
}
@@ -514,7 +515,7 @@ static void handle_Z2_command(const unsigned char *cmd, int len)
esp_gdbstub_send_str_packet("E02");
return;
}
wp_access[wp_count] = WATCHPOINT_TRIGGER_ON_WO;
wp_access[wp_count] = ESP_CPU_WATCHPOINT_STORE;
wp_size[wp_count] = size;
wp_list[wp_count++] = (uint32_t)addr;
update_breakpoints();
@@ -533,7 +534,7 @@ static void handle_Z3_command(const unsigned char *cmd, int len)
esp_gdbstub_send_str_packet("E02");
return;
}
wp_access[wp_count] = WATCHPOINT_TRIGGER_ON_RO;
wp_access[wp_count] = ESP_CPU_WATCHPOINT_LOAD;
wp_size[wp_count] = size;
wp_list[wp_count++] = (uint32_t)addr;
update_breakpoints();
@@ -552,7 +553,7 @@ static void handle_Z4_command(const unsigned char *cmd, int len)
esp_gdbstub_send_str_packet("E02");
return;
}
wp_access[wp_count] = WATCHPOINT_TRIGGER_ON_RW;
wp_access[wp_count] = ESP_CPU_WATCHPOINT_ACCESS;
wp_size[wp_count] = size;
wp_list[wp_count++] = (uint32_t)addr;
update_breakpoints();

View File

@@ -6,10 +6,10 @@
#include <string.h>
#include "esp_gdbstub_common.h"
#include "hal/cpu_hal.h"
#include "soc/soc_memory_layout.h"
#include "xtensa/config/specreg.h"
#include "sdkconfig.h"
#include "esp_cpu.h"
#include "esp_ipc_isr.h"
#include "esp_private/crosscore_int.h"
@@ -27,7 +27,7 @@ static void init_regfile(esp_gdbstub_gdb_regfile_t *dst)
static void update_regfile_common(esp_gdbstub_gdb_regfile_t *dst)
{
if (dst->a[0] & 0x8000000U) {
dst->a[0] = (uint32_t)cpu_ll_pc_to_ptr(dst->a[0]);
dst->a[0] = (uint32_t)esp_cpu_pc_to_addr(dst->a[0]);
}
if (!esp_stack_ptr_is_sane(dst->a[1])) {
dst->a[1] = 0xDEADBEEF;
@@ -42,14 +42,14 @@ void esp_gdbstub_frame_to_regfile(const esp_gdbstub_frame_t *frame, esp_gdbstub_
{
init_regfile(dst);
const uint32_t *a_regs = (const uint32_t *) &frame->a0;
if (!(esp_ptr_executable(cpu_ll_pc_to_ptr(frame->pc)) && (frame->pc & 0xC0000000U))) {
if (!(esp_ptr_executable(esp_cpu_pc_to_addr(frame->pc)) && (frame->pc & 0xC0000000U))) {
/* Xtensa ABI sets the 2 MSBs of the PC according to the windowed call size
* Incase the PC is invalid, GDB will fail to translate addresses to function names
* Hence replacing the PC to a placeholder address in case of invalid PC
*/
dst->pc = (uint32_t)&_invalid_pc_placeholder;
} else {
dst->pc = (uint32_t)cpu_ll_pc_to_ptr(frame->pc);
dst->pc = (uint32_t)esp_cpu_pc_to_addr(frame->pc);
}
for (int i = 0; i < 16; i++) {
@@ -76,10 +76,10 @@ static void solicited_frame_to_regfile(const XtSolFrame *frame, esp_gdbstub_gdb_
{
init_regfile(dst);
const uint32_t *a_regs = (const uint32_t *) &frame->a0;
if (!(esp_ptr_executable(cpu_ll_pc_to_ptr(frame->pc)) && (frame->pc & 0xC0000000U))) {
if (!(esp_ptr_executable(esp_cpu_pc_to_addr(frame->pc)) && (frame->pc & 0xC0000000U))) {
dst->pc = (uint32_t)&_invalid_pc_placeholder;
} else {
dst->pc = (uint32_t)cpu_ll_pc_to_ptr(frame->pc);
dst->pc = (uint32_t)esp_cpu_pc_to_addr(frame->pc);
}
/* only 4 registers saved in the solicited frame */
@@ -194,7 +194,7 @@ void esp_gdbstub_do_step(void)
void esp_gdbstub_trigger_cpu(void)
{
#if !CONFIG_FREERTOS_UNICORE
if (0 == cpu_hal_get_core_id()) {
if (0 == esp_cpu_get_core_id()) {
esp_crosscore_int_send_gdb_call(1);
} else {
esp_crosscore_int_send_gdb_call(0);

View File

@@ -3,7 +3,7 @@ idf_build_get_property(target IDF_TARGET)
set(requires soc)
set(priv_requires efuse spi_flash bootloader_support)
set(srcs "compare_set.c" "cpu.c" "esp_memory_utils.c")
set(srcs "cpu.c" "esp_memory_utils.c")
if(NOT BOOTLOADER_BUILD)
list(APPEND srcs "esp_clk.c"
"clk_ctrl_os.c"

View File

@@ -1,41 +0,0 @@
/*
* SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "compare_set.h"
#include "spinlock.h"
#include "soc/soc_caps.h"
#if __XTENSA__ && SOC_SPIRAM_SUPPORTED
static spinlock_t global_extram_lock = SPINLOCK_INITIALIZER;
void compare_and_set_extram(volatile uint32_t *addr, uint32_t compare, uint32_t *set)
{
uint32_t intlevel, old_value;
__asm__ __volatile__ ("rsil %0, " XTSTR(XCHAL_EXCM_LEVEL) "\n"
: "=r"(intlevel));
spinlock_acquire(&global_extram_lock, SPINLOCK_WAIT_FOREVER);
old_value = *addr;
if (old_value == compare) {
*addr = *set;
}
spinlock_release(&global_extram_lock);
__asm__ __volatile__ ("memw \n"
"wsr %0, ps\n"
:: "r"(intlevel));
*set = old_value;
}
#else // __XTENSA__ && SOC_SPIRAM_SUPPORTED
void compare_and_set_extram(volatile uint32_t *addr, uint32_t compare, uint32_t *set)
{
compare_and_set_native(addr, compare, set);
}
#endif // endif

View File

@@ -452,9 +452,9 @@ void esp_cpu_configure_region_protection(void)
* are silently ignored by the CPU
*/
if (esp_cpu_in_ocd_debug_mode()) {
if (esp_cpu_dbgr_is_attached()) {
// Anti-FI check that cpu is really in ocd mode
ESP_FAULT_ASSERT(esp_cpu_in_ocd_debug_mode());
ESP_FAULT_ASSERT(esp_cpu_dbgr_is_attached());
// 1. IRAM
PMP_ENTRY_SET(0, SOC_DIRAM_IRAM_LOW, NONE);
@@ -620,7 +620,7 @@ esp_err_t esp_cpu_clear_watchpoint(int wp_num)
*
* ------------------------------------------------------------------------------------------------------------------ */
#if __XTENSA__ && XCHAL_HAVE_S32C1I && SOC_SPIRAM_SUPPORTED
#if __XTENSA__ && XCHAL_HAVE_S32C1I && CONFIG_SPIRAM
static DRAM_ATTR uint32_t external_ram_cas_lock = 0;
#endif
@@ -628,35 +628,39 @@ bool esp_cpu_compare_and_set(volatile uint32_t *addr, uint32_t compare_value, ui
{
#if __XTENSA__
bool ret;
#if XCHAL_HAVE_S32C1I && SOC_SPIRAM_SUPPORTED
if (esp_ptr_external_ram((const void *)addr)) {
#if XCHAL_HAVE_S32C1I && CONFIG_SPIRAM
// Check if the target address is in external RAM
if ((uint32_t)addr >= SOC_EXTRAM_DATA_LOW && (uint32_t)addr < SOC_EXTRAM_DATA_HIGH) {
/* The target address is in external RAM, thus the native CAS instruction cannot be used. Instead, we achieve
atomicity by disabling interrupts and then acquiring an external RAM CAS lock. */
uint32_t intr_level;
// Atomicity is achieved by disabling interrupts then acquiring a an external RAM CAS lock
__asm__ __volatile__ ("rsil %0, " XTSTR(XCHAL_EXCM_LEVEL) "\n"
: "=r"(intr_level));
while (!xt_utils_compare_and_set(&external_ram_cas_lock, 0, 1)) {
;
if (!xt_utils_compare_and_set(&external_ram_cas_lock, 0, 1)) {
// External RAM CAS lock already taken. Exit
ret = false;
goto exit;
}
// Now we compare and set the target address
uint32_t old_value;
old_value = *addr;
if (old_value == compare_value) {
ret = (*addr == compare_value);
if (ret) {
*addr = new_value;
}
// Release the external RAM CAS lock and reenable interrupts
// Release the external RAM CAS lock
external_ram_cas_lock = 0;
exit:
// Reenable interrupts
__asm__ __volatile__ ("memw \n"
"wsr %0, ps\n"
:: "r"(intr_level));
ret = (old_value == compare_value);
} else
#endif //XCHAL_HAVE_S32C1I && SOC_SPIRAM_SUPPORTED
#endif // XCHAL_HAVE_S32C1I && CONFIG_SPIRAM
{
// The target address is in internal RAM. Use the CPU's native CAS instruction
ret = xt_utils_compare_and_set(addr, compare_value, new_value);
}
return ret;
#else
#else // __XTENSA__
// Single core targets don't have atomic CAS instruction. So access method is the same for internal and external RAM
return rv_utils_compare_and_set(addr, compare_value, new_value);
#endif

View File

@@ -10,7 +10,7 @@
#include <string.h>
#include <sys/param.h>
#include "esp_attr.h"
#include "hal/cpu_hal.h"
#include "esp_cpu.h"
#include "soc/wdev_reg.h"
#include "esp_private/esp_clk.h"
@@ -47,7 +47,7 @@ uint32_t IRAM_ATTR esp_random(void)
uint32_t ccount;
uint32_t result = 0;
do {
ccount = cpu_hal_get_cycle_count();
ccount = esp_cpu_get_cycle_count();
result ^= REG_READ(WDEV_RND_REG);
} while (ccount - last_ccount < cpu_to_apb_freq_ratio * APB_CYCLE_WAIT_NUM);
last_ccount = ccount;

View File

@@ -1,29 +0,0 @@
/*
* SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <stdint.h>
#include <stdbool.h>
#include "esp_attr.h"
#include "esp_cpu.h"
#include "esp_memory_utils.h"
#include "hal/cpu_hal.h"
#ifdef __cplusplus
extern "C" {
#endif
static inline void __attribute__((always_inline)) compare_and_set_native(volatile uint32_t *addr, uint32_t compare, uint32_t *set)
{
cpu_ll_compare_and_set_native(addr, compare, set);
}
void compare_and_set_extram(volatile uint32_t *addr, uint32_t compare, uint32_t *set);
#ifdef __cplusplus
}
#endif

View File

@@ -17,6 +17,7 @@
#elif __riscv
#include "riscv/rv_utils.h"
#endif
#include "esp_intr_alloc.h"
#include "esp_err.h"
#ifdef __cplusplus
@@ -34,7 +35,7 @@ typedef uint32_t esp_cpu_cycle_count_t;
* @brief CPU interrupt type
*/
typedef enum {
ESP_CPU_INTR_TYPE_LEVEL,
ESP_CPU_INTR_TYPE_LEVEL = 0,
ESP_CPU_INTR_TYPE_EDGE,
ESP_CPU_INTR_TYPE_NA,
} esp_cpu_intr_type_t;
@@ -548,33 +549,6 @@ FORCE_INLINE_ATTR intptr_t esp_cpu_get_call_addr(intptr_t return_address)
*/
bool esp_cpu_compare_and_set(volatile uint32_t *addr, uint32_t compare_value, uint32_t new_value);
/* ---------------------------------------------------- Deprecate ------------------------------------------------------
*
* ------------------------------------------------------------------------------------------------------------------ */
/*
[refactor-todo] Make these deprecated inline
*/
typedef esp_cpu_cycle_count_t esp_cpu_ccount_t;
#define esp_cpu_get_ccount() esp_cpu_get_cycle_count()
#define esp_cpu_set_ccount(ccount) esp_cpu_set_cycle_count(ccount)
/**
* @brief Returns true if a JTAG debugger is attached to CPU OCD (on chip debug) port.
*
* [refactor-todo] See if this can be replaced with esp_cpu_dbgr_is_attached directly
*
* @note Always returns false if CONFIG_ESP_DEBUG_OCDAWARE is not enabled
*/
FORCE_INLINE_ATTR bool esp_cpu_in_ocd_debug_mode(void)
{
#if CONFIG_ESP_DEBUG_OCDAWARE
return esp_cpu_dbgr_is_attached();
#else // CONFIG_ESP_DEBUG_OCDAWARE
return false; // Always return false if "OCD aware" is disabled
#endif // CONFIG_ESP_DEBUG_OCDAWARE
}
#ifdef __cplusplus
}
#endif

View File

@@ -8,7 +8,6 @@
/*
Note: This is a compatibility header. Call the interfaces in esp_cpu.h instead
[refactor-todo]: Mark all API in this header as deprecated
*/
#include <stdint.h>
@@ -88,7 +87,7 @@ typedef enum {
* @param id breakpoint to set [0..SOC_CPU_BREAKPOINTS_NUM - 1]
* @param addr address to set a breakpoint on
*/
static inline void cpu_hal_set_breakpoint(int id, const void *addr)
static inline __attribute__((deprecated)) void cpu_hal_set_breakpoint(int id, const void *addr)
{
esp_cpu_set_breakpoint(id, addr);
}
@@ -97,7 +96,7 @@ static inline void cpu_hal_set_breakpoint(int id, const void *addr)
*
* @param id breakpoint to clear [0..SOC_CPU_BREAKPOINTS_NUM - 1]
*/
static inline void cpu_hal_clear_breakpoint(int id)
static inline __attribute__((deprecated)) void cpu_hal_clear_breakpoint(int id)
{
esp_cpu_clear_breakpoint(id);
}
@@ -114,7 +113,8 @@ static inline void cpu_hal_clear_breakpoint(int id)
* @param size number of bytes from starting address to watch
* @param trigger operation on specified memory range that triggers the watchpoint (read, write, read/write)
*/
static inline void cpu_hal_set_watchpoint(int id, const void *addr, size_t size, watchpoint_trigger_t trigger)
static inline __attribute__((deprecated))
void cpu_hal_set_watchpoint(int id, const void *addr, size_t size, watchpoint_trigger_t trigger)
{
esp_cpu_set_watchpoint(id, addr, size, (esp_cpu_watchpoint_trigger_t)trigger);
}
@@ -124,7 +124,7 @@ static inline void cpu_hal_set_watchpoint(int id, const void *addr, size_t size,
*
* @param id watchpoint to clear [0..SOC_CPU_WATCHPOINTS_NUM - 1]
*/
static inline void cpu_hal_clear_watchpoint(int id)
static inline __attribute__((deprecated)) void cpu_hal_clear_watchpoint(int id)
{
esp_cpu_clear_watchpoint(id);
}
@@ -136,7 +136,8 @@ static inline void cpu_hal_clear_watchpoint(int id)
*
* @param base address to move the exception vector table to
*/
static inline __attribute__((always_inline)) void cpu_hal_set_vecbase(const void *base)
static inline __attribute__((deprecated)) __attribute__((always_inline))
void cpu_hal_set_vecbase(const void *base)
{
esp_cpu_intr_set_ivt_addr(base);
}

View File

@@ -8,7 +8,6 @@
/*
Note: This is a compatibility header. Call the interfaces in esp_cpu.h instead
[refactor-todo]: Mark all API in this header as deprecated
*/
#include <stdint.h>
@@ -21,59 +20,56 @@ Note: This is a compatibility header. Call the interfaces in esp_cpu.h instead
extern "C" {
#endif
FORCE_INLINE_ATTR __attribute__((pure)) uint32_t cpu_ll_get_core_id(void)
FORCE_INLINE_ATTR __attribute__((deprecated)) __attribute__((pure)) uint32_t cpu_ll_get_core_id(void)
{
return esp_cpu_get_core_id();
}
FORCE_INLINE_ATTR uint32_t cpu_ll_get_cycle_count(void)
FORCE_INLINE_ATTR __attribute__((deprecated)) uint32_t cpu_ll_get_cycle_count(void)
{
return (uint32_t)esp_cpu_get_ccount();
return (uint32_t)esp_cpu_get_cycle_count();
}
FORCE_INLINE_ATTR void cpu_ll_set_cycle_count(uint32_t val)
FORCE_INLINE_ATTR __attribute__((deprecated)) void cpu_ll_set_cycle_count(uint32_t val)
{
esp_cpu_set_cycle_count((esp_cpu_ccount_t)val);
esp_cpu_set_cycle_count((esp_cpu_cycle_count_t)val);
}
FORCE_INLINE_ATTR void *cpu_ll_get_sp(void)
FORCE_INLINE_ATTR __attribute__((deprecated)) void *cpu_ll_get_sp(void)
{
return esp_cpu_get_sp();
}
FORCE_INLINE_ATTR void cpu_ll_init_hwloop(void)
FORCE_INLINE_ATTR __attribute__((deprecated)) void cpu_ll_init_hwloop(void)
{
; // Nothing to do. Contents moved to bootloader directly
}
#if SOC_CPU_BREAKPOINTS_NUM > 0
FORCE_INLINE_ATTR void cpu_ll_set_breakpoint(int id, uint32_t pc)
FORCE_INLINE_ATTR __attribute__((deprecated)) void cpu_ll_set_breakpoint(int id, uint32_t pc)
{
esp_cpu_set_breakpoint(id, (const void *)pc);
}
FORCE_INLINE_ATTR void cpu_ll_clear_breakpoint(int id)
FORCE_INLINE_ATTR __attribute__((deprecated)) void cpu_ll_clear_breakpoint(int id)
{
esp_cpu_clear_breakpoint(id);
}
#endif // SOC_CPU_BREAKPOINTS_NUM > 0
FORCE_INLINE_ATTR __attribute__((pure)) uint32_t cpu_ll_ptr_to_pc(const void *addr)
FORCE_INLINE_ATTR __attribute__((deprecated)) __attribute__((pure)) uint32_t cpu_ll_ptr_to_pc(const void *addr)
{
return ((uint32_t) addr);
}
FORCE_INLINE_ATTR __attribute__((pure)) void *cpu_ll_pc_to_ptr(uint32_t pc)
FORCE_INLINE_ATTR __attribute__((deprecated)) __attribute__((pure)) void *cpu_ll_pc_to_ptr(uint32_t pc)
{
return esp_cpu_pc_to_addr(pc);
}
FORCE_INLINE_ATTR void cpu_ll_set_watchpoint(int id,
const void* addr,
size_t size,
bool on_read,
bool on_write)
FORCE_INLINE_ATTR __attribute__((deprecated))
void cpu_ll_set_watchpoint(int id, const void *addr, size_t size, bool on_read, bool on_write)
{
esp_cpu_watchpoint_trigger_t trigger;
if (on_read && on_write) {
@@ -86,32 +82,33 @@ FORCE_INLINE_ATTR void cpu_ll_set_watchpoint(int id,
esp_cpu_set_watchpoint(id, addr, size, trigger);
}
FORCE_INLINE_ATTR void cpu_ll_clear_watchpoint(int id)
FORCE_INLINE_ATTR __attribute__((deprecated)) void cpu_ll_clear_watchpoint(int id)
{
esp_cpu_clear_watchpoint(id);
}
FORCE_INLINE_ATTR bool cpu_ll_is_debugger_attached(void)
FORCE_INLINE_ATTR __attribute__((deprecated)) bool cpu_ll_is_debugger_attached(void)
{
return esp_cpu_dbgr_is_attached();
}
FORCE_INLINE_ATTR void cpu_ll_break(void)
FORCE_INLINE_ATTR __attribute__((deprecated)) void cpu_ll_break(void)
{
esp_cpu_dbgr_break();
}
FORCE_INLINE_ATTR void cpu_ll_set_vecbase(const void *base)
FORCE_INLINE_ATTR __attribute__((deprecated)) void cpu_ll_set_vecbase(const void *base)
{
esp_cpu_intr_set_ivt_addr(base);
}
FORCE_INLINE_ATTR void cpu_ll_waiti(void)
FORCE_INLINE_ATTR __attribute__((deprecated)) void cpu_ll_waiti(void)
{
esp_cpu_wait_for_intr();
}
FORCE_INLINE_ATTR void cpu_ll_compare_and_set_native(volatile uint32_t *addr, uint32_t compare, uint32_t *set)
FORCE_INLINE_ATTR __attribute__((deprecated))
void cpu_ll_compare_and_set_native(volatile uint32_t *addr, uint32_t compare, uint32_t *set)
{
#ifdef __clang_analyzer__
//Teach clang-tidy that "addr" and "set" cannot be const as they can both be updated by S32C1I instruction

View File

@@ -8,7 +8,6 @@
/*
Note: This is a compatibility header. Call the interfaces in esp_cpu.h instead
[refactor-todo]: Mark all API in this header as deprecated
*/
#include <stdint.h>
@@ -49,7 +48,7 @@ typedef void (*interrupt_handler_t)(void *arg);
* @param interrupt_number Interrupt number 0 to 31
* @return interrupt type
*/
FORCE_INLINE_ATTR int_type_t interrupt_controller_hal_desc_type(int interrupt_number)
FORCE_INLINE_ATTR __attribute__((deprecated)) int_type_t interrupt_controller_hal_desc_type(int interrupt_number)
{
esp_cpu_intr_desc_t intr_desc;
esp_cpu_intr_get_desc(esp_cpu_get_core_id(), interrupt_number, &intr_desc);
@@ -62,7 +61,7 @@ FORCE_INLINE_ATTR int_type_t interrupt_controller_hal_desc_type(int interrupt_nu
* @param interrupt_number Interrupt number 0 to 31
* @return interrupt level bitmask
*/
FORCE_INLINE_ATTR int interrupt_controller_hal_desc_level(int interrupt_number)
FORCE_INLINE_ATTR __attribute__((deprecated)) int interrupt_controller_hal_desc_level(int interrupt_number)
{
esp_cpu_intr_desc_t intr_desc;
esp_cpu_intr_get_desc(esp_cpu_get_core_id(), interrupt_number, &intr_desc);
@@ -76,7 +75,8 @@ FORCE_INLINE_ATTR int interrupt_controller_hal_desc_level(int interrupt_number)
* @param cpu_number CPU number between 0 and SOC_CPU_CORES_NUM - 1
* @return flags for that interrupt number
*/
FORCE_INLINE_ATTR int_desc_flag_t interrupt_controller_hal_desc_flags(int interrupt_number, int cpu_number)
FORCE_INLINE_ATTR __attribute__((deprecated))
int_desc_flag_t interrupt_controller_hal_desc_flags(int interrupt_number, int cpu_number)
{
esp_cpu_intr_desc_t intr_desc;
esp_cpu_intr_get_desc(cpu_number, interrupt_number, &intr_desc);
@@ -97,7 +97,7 @@ FORCE_INLINE_ATTR int_desc_flag_t interrupt_controller_hal_desc_flags(int interr
* @param interrupt_number Interrupt number 0 to 31
* @return interrupt type
*/
FORCE_INLINE_ATTR int_type_t interrupt_controller_hal_get_type(int interrupt_number)
FORCE_INLINE_ATTR __attribute__((deprecated)) int_type_t interrupt_controller_hal_get_type(int interrupt_number)
{
return interrupt_controller_hal_desc_type(interrupt_number);
}
@@ -108,7 +108,7 @@ FORCE_INLINE_ATTR int_type_t interrupt_controller_hal_get_type(int interrupt_num
* @param interrupt_number Interrupt number 0 to 31
* @return interrupt level bitmask
*/
FORCE_INLINE_ATTR int interrupt_controller_hal_get_level(int interrupt_number)
FORCE_INLINE_ATTR __attribute__((deprecated)) int interrupt_controller_hal_get_level(int interrupt_number)
{
return interrupt_controller_hal_desc_level(interrupt_number);
}
@@ -120,7 +120,8 @@ FORCE_INLINE_ATTR int interrupt_controller_hal_get_level(int interrupt_number)
* @param cpu_number CPU number between 0 and SOC_CPU_CORES_NUM - 1
* @return flags for that interrupt number
*/
FORCE_INLINE_ATTR uint32_t interrupt_controller_hal_get_cpu_desc_flags(int interrupt_number, int cpu_number)
FORCE_INLINE_ATTR __attribute__((deprecated))
uint32_t interrupt_controller_hal_get_cpu_desc_flags(int interrupt_number, int cpu_number)
{
return (uint32_t)interrupt_controller_hal_desc_flags(interrupt_number, cpu_number);
}
@@ -134,7 +135,7 @@ FORCE_INLINE_ATTR uint32_t interrupt_controller_hal_get_cpu_desc_flags(int inter
* @param interrupt_number Interrupt number 0 to 31
* @param type interrupt type as edge or level triggered
*/
FORCE_INLINE_ATTR void interrupt_controller_hal_set_int_type(int intr, int_type_t type)
FORCE_INLINE_ATTR __attribute__((deprecated)) void interrupt_controller_hal_set_int_type(int intr, int_type_t type)
{
esp_cpu_intr_set_type(intr, (esp_cpu_intr_type_t)type);
}
@@ -145,7 +146,7 @@ FORCE_INLINE_ATTR void interrupt_controller_hal_set_int_type(int intr, int_type_
* @param interrupt_number Interrupt number 0 to 31
* @param level priority between 1 (lowest) to 7 (highest)
*/
FORCE_INLINE_ATTR void interrupt_controller_hal_set_int_level(int intr, int level)
FORCE_INLINE_ATTR __attribute__((deprecated)) void interrupt_controller_hal_set_int_level(int intr, int level)
{
esp_cpu_intr_set_priority(intr, level);
}
@@ -158,7 +159,7 @@ FORCE_INLINE_ATTR void interrupt_controller_hal_set_int_level(int intr, int leve
* @param cpu this argument is ignored
* @return true for valid handler, false otherwise
*/
FORCE_INLINE_ATTR bool interrupt_controller_hal_has_handler(int intr, int cpu)
FORCE_INLINE_ATTR __attribute__((deprecated)) bool interrupt_controller_hal_has_handler(int intr, int cpu)
{
(void) cpu;
return esp_cpu_intr_has_handler(intr);
@@ -171,7 +172,8 @@ FORCE_INLINE_ATTR bool interrupt_controller_hal_has_handler(int intr, int cpu)
* @param handler handler invoked when an interrupt occurs
* @param arg optional argument to pass to the handler
*/
FORCE_INLINE_ATTR void interrupt_controller_hal_set_int_handler(uint8_t intr, interrupt_handler_t handler, void *arg)
FORCE_INLINE_ATTR __attribute__((deprecated))
void interrupt_controller_hal_set_int_handler(uint8_t intr, interrupt_handler_t handler, void *arg)
{
esp_cpu_intr_set_handler(intr, (esp_cpu_intr_handler_t)handler, arg);
}
@@ -183,7 +185,7 @@ FORCE_INLINE_ATTR void interrupt_controller_hal_set_int_handler(uint8_t intr, in
*
* @return argument used by handler of passed interrupt number
*/
FORCE_INLINE_ATTR void *interrupt_controller_hal_get_int_handler_arg(uint8_t intr)
FORCE_INLINE_ATTR __attribute__((deprecated)) void *interrupt_controller_hal_get_int_handler_arg(uint8_t intr)
{
return esp_cpu_intr_get_handler_arg(intr);
}
@@ -195,7 +197,7 @@ FORCE_INLINE_ATTR void *interrupt_controller_hal_get_int_handler_arg(uint8_t int
*
* @param mask bitmask of interrupts that needs to be enabled
*/
FORCE_INLINE_ATTR void interrupt_controller_hal_enable_interrupts(uint32_t mask)
FORCE_INLINE_ATTR __attribute__((deprecated)) void interrupt_controller_hal_enable_interrupts(uint32_t mask)
{
esp_cpu_intr_enable(mask);
}
@@ -205,7 +207,7 @@ FORCE_INLINE_ATTR void interrupt_controller_hal_enable_interrupts(uint32_t mask)
*
* @param mask bitmask of interrupts that needs to be disabled
*/
FORCE_INLINE_ATTR void interrupt_controller_hal_disable_interrupts(uint32_t mask)
FORCE_INLINE_ATTR __attribute__((deprecated)) void interrupt_controller_hal_disable_interrupts(uint32_t mask)
{
esp_cpu_intr_disable(mask);
}
@@ -215,7 +217,7 @@ FORCE_INLINE_ATTR void interrupt_controller_hal_disable_interrupts(uint32_t mask
*
* @return The bitmask of current interrupts
*/
FORCE_INLINE_ATTR uint32_t interrupt_controller_hal_read_interrupt_mask(void)
FORCE_INLINE_ATTR __attribute__((deprecated)) uint32_t interrupt_controller_hal_read_interrupt_mask(void)
{
return esp_cpu_intr_get_enabled_mask();
}
@@ -225,7 +227,7 @@ FORCE_INLINE_ATTR uint32_t interrupt_controller_hal_read_interrupt_mask(void)
*
* @param intr interrupt number ranged from 0 to 31
*/
FORCE_INLINE_ATTR void interrupt_controller_hal_edge_int_acknowledge(int intr)
FORCE_INLINE_ATTR __attribute__((deprecated)) void interrupt_controller_hal_edge_int_acknowledge(int intr)
{
esp_cpu_intr_edge_ack(intr);
}

View File

@@ -8,7 +8,6 @@
/*
Note: This is a compatibility header. Call the interfaces in esp_cpu.h instead
[refactor-todo]: Mark all API in this header as deprecated
*/
#include "soc/soc_caps.h"

View File

@@ -8,7 +8,6 @@
/*
Note: This is a compatibility header. Call the interfaces in esp_cpu.h instead
[refactor-todo]: Mark all API in this header as deprecated
*/
#include "esp_attr.h"
@@ -18,17 +17,17 @@ Note: This is a compatibility header. Call the interfaces in esp_cpu.h instead
extern "C" {
#endif
FORCE_INLINE_ATTR void soc_ll_stall_core(int core)
FORCE_INLINE_ATTR __attribute__((deprecated)) void soc_ll_stall_core(int core)
{
esp_cpu_stall(core);
}
FORCE_INLINE_ATTR void soc_ll_unstall_core(int core)
FORCE_INLINE_ATTR __attribute__((deprecated)) void soc_ll_unstall_core(int core)
{
esp_cpu_unstall(core);
}
FORCE_INLINE_ATTR void soc_ll_reset_core(int core)
FORCE_INLINE_ATTR __attribute__((deprecated)) void soc_ll_reset_core(int core)
{
esp_cpu_reset(core);
}

View File

@@ -5,15 +5,14 @@
*/
#pragma once
#include "sdkconfig.h"
#include <stdint.h>
#include <stdbool.h>
#include "sdkconfig.h"
#include "hal/cpu_hal.h"
#include "compare_set.h"
#include "soc/soc.h"
#include "esp_cpu.h"
#if __XTENSA__
#include "xtensa/xtruntime.h"
#include "xt_utils.h"
#endif
#ifdef __cplusplus
@@ -35,20 +34,7 @@ extern "C" {
typedef struct {
NEED_VOLATILE_MUX uint32_t owner;
NEED_VOLATILE_MUX uint32_t count;
}spinlock_t;
#if (CONFIG_SPIRAM)
/**
* @brief Check if the pointer is on external ram
* @param p pointer
* @return true: on external ram; false: not on external ram
*/
static inline bool __attribute__((always_inline)) spinlock_ptr_external_ram(const void *p)
{
//On esp32, this external virtual address rergion is for psram
return ((intptr_t)p >= SOC_EXTRAM_DATA_LOW && (intptr_t)p < SOC_EXTRAM_DATA_HIGH);
}
#endif
} spinlock_t;
/**
* @brief Initialize a lock to its default state - unlocked
@@ -80,25 +66,17 @@ static inline void __attribute__((always_inline)) spinlock_initialize(spinlock_t
static inline bool __attribute__((always_inline)) spinlock_acquire(spinlock_t *lock, int32_t timeout)
{
#if !CONFIG_FREERTOS_UNICORE && !BOOTLOADER_BUILD
uint32_t result;
uint32_t irq_status;
uint32_t ccount_start;
uint32_t core_id, other_core_id;
bool lock_set;
esp_cpu_cycle_count_t start_count;
assert(lock);
irq_status = XTOS_SET_INTLEVEL(XCHAL_EXCM_LEVEL);
if(timeout != SPINLOCK_WAIT_FOREVER){
RSR(CCOUNT, ccount_start);
}
/*spin until we own a core */
RSR(PRID, core_id);
/* Note: coreID is the full 32 bit core ID (CORE_ID_REGVAL_PRO/CORE_ID_REGVAL_APP) */
// Note: The core IDs are the full 32 bit (CORE_ID_REGVAL_PRO/CORE_ID_REGVAL_APP) values
core_id = xt_utils_get_raw_core_id();
other_core_id = CORE_ID_REGVAL_XOR_SWAP ^ core_id;
do {
/* lock->owner should be one of SPINLOCK_FREE, CORE_ID_REGVAL_PRO,
* CORE_ID_REGVAL_APP:
@@ -106,39 +84,50 @@ static inline bool __attribute__((always_inline)) spinlock_acquire(spinlock_t *l
* - If "our" core_id, we can drop through immediately.
* - If "other_core_id", we spin here.
*/
result = core_id;
#if (CONFIG_SPIRAM)
if (spinlock_ptr_external_ram(lock)) {
compare_and_set_extram(&lock->owner, SPINLOCK_FREE, &result);
} else {
#endif
compare_and_set_native(&lock->owner, SPINLOCK_FREE, &result);
#if (CONFIG_SPIRAM)
}
#endif
if(result != other_core_id) {
break;
}
if (timeout != SPINLOCK_WAIT_FOREVER) {
uint32_t ccount_now;
ccount_now = cpu_hal_get_cycle_count();
if (ccount_now - ccount_start > (unsigned)timeout) {
XTOS_RESTORE_INTLEVEL(irq_status);
return false;
}
}
}while(1);
/* any other value implies memory corruption or uninitialized mux */
assert(result == core_id || result == SPINLOCK_FREE);
assert((result == SPINLOCK_FREE) == (lock->count == 0)); /* we're first to lock iff count is zero */
assert(lock->count < 0xFF); /* Bad count value implies memory corruption */
// The caller is already the owner of the lock. Simply increment the nesting count
if (lock->owner == core_id) {
assert(lock->count > 0 && lock->count < 0xFF); // Bad count value implies memory corruption
lock->count++;
XTOS_RESTORE_INTLEVEL(irq_status);
return true;
}
/* First attempt to take the lock.
*
* Note: We do a first attempt separately (instead of putting this into a loop) in order to avoid call to
* esp_cpu_get_cycle_count(). This doing a first attempt separately makes acquiring a free lock quicker, which
* is the case for the majority of spinlock_acquire() calls (as spinlocks are free most of the time since they
* aren't meant to be held for long).
*/
lock_set = esp_cpu_compare_and_set(&lock->owner, SPINLOCK_FREE, core_id);
if (lock_set || timeout == SPINLOCK_NO_WAIT) {
// We've successfully taken the lock, or we are not retrying
goto exit;
}
// First attempt to take the lock has failed. Retry until the lock is taken, or until we timeout.
start_count = esp_cpu_get_cycle_count();
do {
lock_set = esp_cpu_compare_and_set(&lock->owner, SPINLOCK_FREE, core_id);
if (lock_set) {
break;
}
// Keep looping if we are waiting forever, or check if we have timed out
} while ((timeout == SPINLOCK_WAIT_FOREVER) || (esp_cpu_get_cycle_count() - start_count) <= timeout);
exit:
if (lock_set) {
assert(lock->owner == core_id);
assert(lock->count == 0); // This is the first time the lock is set, so count should still be 0
lock->count++; // Finally, we increment the lock count
} else { // We timed out waiting for lock
assert(lock->owner == SPINLOCK_FREE || lock->owner == other_core_id);
assert(lock->count < 0xFF); // Bad count value implies memory corruption
}
XTOS_RESTORE_INTLEVEL(irq_status);
return lock_set;
#else // !CONFIG_FREERTOS_UNICORE
return true;
@@ -167,11 +156,11 @@ static inline void __attribute__((always_inline)) spinlock_release(spinlock_t *l
assert(lock);
irq_status = XTOS_SET_INTLEVEL(XCHAL_EXCM_LEVEL);
RSR(PRID, core_id);
assert(core_id == lock->owner); // This is a mutex we didn't lock, or it's corrupt
core_id = xt_utils_get_raw_core_id();
assert(core_id == lock->owner); // This is a lock that we didn't acquire, or the lock is corrupt
lock->count--;
if(!lock->count) {
if (!lock->count) { // If this is the last recursive release of the lock, mark the lock as free
lock->owner = SPINLOCK_FREE;
} else {
assert(lock->count < 0x100); // Indicates memory corruption

View File

@@ -17,11 +17,11 @@
#include "freertos/task.h"
#include "esp_err.h"
#include "esp_log.h"
#include "esp_memory_utils.h"
#include "esp_intr_alloc.h"
#include "esp_attr.h"
#include "hal/cpu_hal.h"
#include "esp_cpu.h"
#include "esp_private/rtc_ctrl.h"
#include "hal/interrupt_controller_hal.h"
#if !CONFIG_FREERTOS_UNICORE
#include "esp_ipc.h"
@@ -106,31 +106,33 @@ static portMUX_TYPE spinlock = portMUX_INITIALIZER_UNLOCKED;
//with an incrementing cpu.intno value.
static void insert_vector_desc(vector_desc_t *to_insert)
{
vector_desc_t *vd=vector_desc_head;
vector_desc_t *prev=NULL;
while(vd!=NULL) {
vector_desc_t *vd = vector_desc_head;
vector_desc_t *prev = NULL;
while(vd != NULL) {
if (vd->cpu > to_insert->cpu) break;
if (vd->cpu == to_insert->cpu && vd->intno >= to_insert->intno) break;
prev=vd;
vd=vd->next;
prev = vd;
vd = vd->next;
}
if ((vector_desc_head==NULL) || (prev==NULL)) {
if ((vector_desc_head == NULL) || (prev == NULL)) {
//First item
to_insert->next = vd;
vector_desc_head=to_insert;
vector_desc_head = to_insert;
} else {
prev->next=to_insert;
to_insert->next=vd;
prev->next = to_insert;
to_insert->next = vd;
}
}
//Returns a vector_desc entry for an intno/cpu, or NULL if none exists.
static vector_desc_t *find_desc_for_int(int intno, int cpu)
{
vector_desc_t *vd=vector_desc_head;
while(vd!=NULL) {
if (vd->cpu==cpu && vd->intno==intno) break;
vd=vd->next;
vector_desc_t *vd = vector_desc_head;
while(vd != NULL) {
if (vd->cpu == cpu && vd->intno == intno) {
break;
}
vd = vd->next;
}
return vd;
}
@@ -140,13 +142,15 @@ static vector_desc_t *find_desc_for_int(int intno, int cpu)
//it into the list. Returns NULL on malloc fail.
static vector_desc_t *get_desc_for_int(int intno, int cpu)
{
vector_desc_t *vd=find_desc_for_int(intno, cpu);
if (vd==NULL) {
vector_desc_t *newvd=heap_caps_malloc(sizeof(vector_desc_t), MALLOC_CAP_INTERNAL|MALLOC_CAP_8BIT);
if (newvd==NULL) return NULL;
vector_desc_t *vd = find_desc_for_int(intno, cpu);
if (vd == NULL) {
vector_desc_t *newvd = heap_caps_malloc(sizeof(vector_desc_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
if (newvd == NULL) {
return NULL;
}
memset(newvd, 0, sizeof(vector_desc_t));
newvd->intno=intno;
newvd->cpu=cpu;
newvd->intno = intno;
newvd->cpu = cpu;
insert_vector_desc(newvd);
return newvd;
} else {
@@ -157,42 +161,52 @@ static vector_desc_t *get_desc_for_int(int intno, int cpu)
//Returns a vector_desc entry for an source, the cpu parameter is used to tell GPIO_INT and GPIO_NMI from different CPUs
static vector_desc_t * find_desc_for_source(int source, int cpu)
{
vector_desc_t *vd=vector_desc_head;
while(vd!=NULL) {
if ( !(vd->flags & VECDESC_FL_SHARED) ) {
if ( vd->source == source && cpu == vd->cpu ) break;
} else if ( vd->cpu == cpu ) {
vector_desc_t *vd = vector_desc_head;
while(vd != NULL) {
if (!(vd->flags & VECDESC_FL_SHARED)) {
if (vd->source == source && cpu == vd->cpu) {
break;
}
} else if (vd->cpu == cpu) {
// check only shared vds for the correct cpu, otherwise skip
bool found = false;
shared_vector_desc_t *svd = vd->shared_vec_info;
assert(svd != NULL );
assert(svd != NULL);
while(svd) {
if ( svd->source == source ) {
if (svd->source == source) {
found = true;
break;
}
svd = svd->next;
}
if ( found ) break;
if (found) {
break;
}
vd=vd->next;
}
vd = vd->next;
}
return vd;
}
esp_err_t esp_intr_mark_shared(int intno, int cpu, bool is_int_ram)
{
if (intno>31) return ESP_ERR_INVALID_ARG;
if (cpu>=SOC_CPU_CORES_NUM) return ESP_ERR_INVALID_ARG;
if (intno>31) {
return ESP_ERR_INVALID_ARG;
}
if (cpu >= SOC_CPU_CORES_NUM) {
return ESP_ERR_INVALID_ARG;
}
portENTER_CRITICAL(&spinlock);
vector_desc_t *vd=get_desc_for_int(intno, cpu);
if (vd==NULL) {
vector_desc_t *vd = get_desc_for_int(intno, cpu);
if (vd == NULL) {
portEXIT_CRITICAL(&spinlock);
return ESP_ERR_NO_MEM;
}
vd->flags=VECDESC_FL_SHARED;
if (is_int_ram) vd->flags|=VECDESC_FL_INIRAM;
vd->flags = VECDESC_FL_SHARED;
if (is_int_ram) {
vd->flags |= VECDESC_FL_INIRAM;
}
portEXIT_CRITICAL(&spinlock);
return ESP_OK;
@@ -200,16 +214,20 @@ esp_err_t esp_intr_mark_shared(int intno, int cpu, bool is_int_ram)
esp_err_t esp_intr_reserve(int intno, int cpu)
{
if (intno>31) return ESP_ERR_INVALID_ARG;
if (cpu>=SOC_CPU_CORES_NUM) return ESP_ERR_INVALID_ARG;
if (intno > 31) {
return ESP_ERR_INVALID_ARG;
}
if (cpu >= SOC_CPU_CORES_NUM) {
return ESP_ERR_INVALID_ARG;
}
portENTER_CRITICAL(&spinlock);
vector_desc_t *vd=get_desc_for_int(intno, cpu);
if (vd==NULL) {
vector_desc_t *vd = get_desc_for_int(intno, cpu);
if (vd == NULL) {
portEXIT_CRITICAL(&spinlock);
return ESP_ERR_NO_MEM;
}
vd->flags=VECDESC_FL_RESERVED;
vd->flags = VECDESC_FL_RESERVED;
portEXIT_CRITICAL(&spinlock);
return ESP_OK;
@@ -219,49 +237,52 @@ static bool is_vect_desc_usable(vector_desc_t *vd, int flags, int cpu, int force
{
//Check if interrupt is not reserved by design
int x = vd->intno;
if (interrupt_controller_hal_get_cpu_desc_flags(x, cpu)==INTDESC_RESVD) {
esp_cpu_intr_desc_t intr_desc;
esp_cpu_intr_get_desc(cpu, x, &intr_desc);
if (intr_desc.flags & ESP_CPU_INTR_DESC_FLAG_RESVD) {
ALCHLOG("....Unusable: reserved");
return false;
}
if (interrupt_controller_hal_get_cpu_desc_flags(x, cpu)==INTDESC_SPECIAL && force==-1) {
if (intr_desc.flags & ESP_CPU_INTR_DESC_FLAG_SPECIAL && force == -1) {
ALCHLOG("....Unusable: special-purpose int");
return false;
}
#ifndef SOC_CPU_HAS_FLEXIBLE_INTC
//Check if the interrupt level is acceptable
if (!(flags&(1<<interrupt_controller_hal_get_level(x)))) {
ALCHLOG("....Unusable: incompatible level");
//Check if the interrupt priority is acceptable
if (!(flags & (1 << intr_desc.priority))) {
ALCHLOG("....Unusable: incompatible priority");
return false;
}
//check if edge/level type matches what we want
if (((flags&ESP_INTR_FLAG_EDGE) && (interrupt_controller_hal_get_type(x)==INTTP_LEVEL)) ||
(((!(flags&ESP_INTR_FLAG_EDGE)) && (interrupt_controller_hal_get_type(x)==INTTP_EDGE)))) {
if (((flags & ESP_INTR_FLAG_EDGE) && (intr_desc.type == ESP_CPU_INTR_TYPE_LEVEL)) ||
(((!(flags & ESP_INTR_FLAG_EDGE)) && (intr_desc.type == ESP_CPU_INTR_TYPE_EDGE)))) {
ALCHLOG("....Unusable: incompatible trigger type");
return false;
}
#endif
//check if interrupt is reserved at runtime
if (vd->flags&VECDESC_FL_RESERVED) {
if (vd->flags & VECDESC_FL_RESERVED) {
ALCHLOG("....Unusable: reserved at runtime.");
return false;
}
//Ints can't be both shared and non-shared.
assert(!((vd->flags&VECDESC_FL_SHARED)&&(vd->flags&VECDESC_FL_NONSHARED)));
assert(!((vd->flags & VECDESC_FL_SHARED) && (vd->flags & VECDESC_FL_NONSHARED)));
//check if interrupt already is in use by a non-shared interrupt
if (vd->flags&VECDESC_FL_NONSHARED) {
if (vd->flags & VECDESC_FL_NONSHARED) {
ALCHLOG("....Unusable: already in (non-shared) use.");
return false;
}
// check shared interrupt flags
if (vd->flags&VECDESC_FL_SHARED ) {
if (flags&ESP_INTR_FLAG_SHARED) {
bool in_iram_flag=((flags&ESP_INTR_FLAG_IRAM)!=0);
bool desc_in_iram_flag=((vd->flags&VECDESC_FL_INIRAM)!=0);
if (vd->flags & VECDESC_FL_SHARED) {
if (flags & ESP_INTR_FLAG_SHARED) {
bool in_iram_flag = ((flags & ESP_INTR_FLAG_IRAM) != 0);
bool desc_in_iram_flag = ((vd->flags & VECDESC_FL_INIRAM) != 0);
//Bail out if int is shared, but iram property doesn't match what we want.
if ((vd->flags&VECDESC_FL_SHARED) && (desc_in_iram_flag!=in_iram_flag)) {
if ((vd->flags & VECDESC_FL_SHARED) && (desc_in_iram_flag != in_iram_flag)) {
ALCHLOG("....Unusable: shared but iram prop doesn't match");
return false;
}
@@ -270,8 +291,8 @@ static bool is_vect_desc_usable(vector_desc_t *vd, int flags, int cpu, int force
ALCHLOG("...Unusable: int is shared, we need non-shared.");
return false;
}
} else if (interrupt_controller_hal_has_handler(x, cpu)) {
//Check if interrupt already is allocated by interrupt_controller_hal_set_int_handler
} else if (esp_cpu_intr_has_handler(x)) {
//Check if interrupt already is allocated by esp_cpu_intr_set_handler
ALCHLOG("....Unusable: already allocated");
return false;
}
@@ -281,12 +302,12 @@ static bool is_vect_desc_usable(vector_desc_t *vd, int flags, int cpu, int force
//Locate a free interrupt compatible with the flags given.
//The 'force' argument can be -1, or 0-31 to force checking a certain interrupt.
//When a CPU is forced, the INTDESC_SPECIAL marked interrupts are also accepted.
//When a CPU is forced, the ESP_CPU_INTR_DESC_FLAG_SPECIAL marked interrupts are also accepted.
static int get_available_int(int flags, int cpu, int force, int source)
{
int x;
int best=-1;
int bestLevel=9;
int bestPriority=9;
int bestSharedCt=INT_MAX;
//Default vector desc, for vectors not in the linked list
@@ -294,32 +315,34 @@ static int get_available_int(int flags, int cpu, int force, int source)
memset(&empty_vect_desc, 0, sizeof(vector_desc_t));
//Level defaults to any low/med interrupt
if (!(flags&ESP_INTR_FLAG_LEVELMASK)) flags|=ESP_INTR_FLAG_LOWMED;
if (!(flags & ESP_INTR_FLAG_LEVELMASK)) {
flags |= ESP_INTR_FLAG_LOWMED;
}
ALCHLOG("get_available_int: try to find existing. Cpu: %d, Source: %d", cpu, source);
vector_desc_t *vd = find_desc_for_source(source, cpu);
if ( vd ) {
if (vd) {
// if existing vd found, don't need to search any more.
ALCHLOG("get_avalible_int: existing vd found. intno: %d", vd->intno);
if ( force != -1 && force != vd->intno ) {
ALCHLOG("get_avalible_int: intr forced but not matach existing. existing intno: %d, force: %d", vd->intno, force);
} else if ( !is_vect_desc_usable(vd, flags, cpu, force) ) {
} else if (!is_vect_desc_usable(vd, flags, cpu, force)) {
ALCHLOG("get_avalible_int: existing vd invalid.");
} else {
best = vd->intno;
}
return best;
}
if (force!=-1) {
if (force != -1) {
ALCHLOG("get_available_int: try to find force. Cpu: %d, Source: %d, Force: %d", cpu, source, force);
//if force assigned, don't need to search any more.
vd = find_desc_for_int(force, cpu);
if (vd == NULL ) {
if (vd == NULL) {
//if existing vd not found, just check the default state for the intr.
empty_vect_desc.intno = force;
vd = &empty_vect_desc;
}
if ( is_vect_desc_usable(vd, flags, cpu, force) ) {
if (is_vect_desc_usable(vd, flags, cpu, force)) {
best = vd->intno;
} else {
ALCHLOG("get_avalible_int: forced vd invalid.");
@@ -329,50 +352,55 @@ static int get_available_int(int flags, int cpu, int force, int source)
ALCHLOG("get_free_int: start looking. Current cpu: %d", cpu);
//No allocated handlers as well as forced intr, iterate over the 32 possible interrupts
for (x=0; x<32; x++) {
for (x = 0; x < 32; x++) {
//Grab the vector_desc for this vector.
vd=find_desc_for_int(x, cpu);
if (vd==NULL) {
vd = find_desc_for_int(x, cpu);
if (vd == NULL) {
empty_vect_desc.intno = x;
vd=&empty_vect_desc;
vd = &empty_vect_desc;
}
ALCHLOG("Int %d reserved %d level %d %s hasIsr %d",
x, interrupt_controller_hal_get_cpu_desc_flags(x,cpu)==INTDESC_RESVD, interrupt_controller_hal_get_level(x),
interrupt_controller_hal_get_type(x)==INTTP_LEVEL?"LEVEL":"EDGE", interrupt_controller_hal_has_handler(x, cpu));
esp_cpu_intr_desc_t intr_desc;
esp_cpu_intr_get_desc(cpu, x, &intr_desc);
if ( !is_vect_desc_usable(vd, flags, cpu, force) ) continue;
ALCHLOG("Int %d reserved %d priority %d %s hasIsr %d",
x, intr_desc.flags & ESP_CPU_INTR_DESC_FLAG_RESVD, intr_desc.priority,
intr_desc.type == ESP_CPU_INTR_TYPE_LEVEL? "LEVEL" : "EDGE", esp_cpu_intr_has_handler(x));
if (flags&ESP_INTR_FLAG_SHARED) {
if (!is_vect_desc_usable(vd, flags, cpu, force)) {
continue;
}
if (flags & ESP_INTR_FLAG_SHARED) {
//We're allocating a shared int.
//See if int already is used as a shared interrupt.
if (vd->flags&VECDESC_FL_SHARED) {
if (vd->flags & VECDESC_FL_SHARED) {
//We can use this already-marked-as-shared interrupt. Count the already attached isrs in order to see
//how useful it is.
int no=0;
shared_vector_desc_t *svdesc=vd->shared_vec_info;
while (svdesc!=NULL) {
int no = 0;
shared_vector_desc_t *svdesc = vd->shared_vec_info;
while (svdesc != NULL) {
no++;
svdesc=svdesc->next;
svdesc = svdesc->next;
}
if (no<bestSharedCt || bestLevel>interrupt_controller_hal_get_level(x)) {
if (no<bestSharedCt || bestPriority > intr_desc.priority) {
//Seems like this shared vector is both okay and has the least amount of ISRs already attached to it.
best=x;
bestSharedCt=no;
bestLevel=interrupt_controller_hal_get_level(x);
best = x;
bestSharedCt = no;
bestPriority = intr_desc.priority;
ALCHLOG("...int %d more usable as a shared int: has %d existing vectors", x, no);
} else {
ALCHLOG("...worse than int %d", best);
}
} else {
if (best==-1) {
if (best == -1) {
//We haven't found a feasible shared interrupt yet. This one is still free and usable, even if
//not marked as shared.
//Remember it in case we don't find any other shared interrupt that qualifies.
if (bestLevel>interrupt_controller_hal_get_level(x)) {
best=x;
bestLevel=interrupt_controller_hal_get_level(x);
if (bestPriority > intr_desc.priority) {
best = x;
bestPriority = intr_desc.priority;
ALCHLOG("...int %d usable as a new shared int", x);
}
} else {
@@ -381,9 +409,9 @@ static int get_available_int(int flags, int cpu, int force, int source)
}
} else {
//Seems this interrupt is feasible. Select it and break out of the loop; no need to search further.
if (bestLevel>interrupt_controller_hal_get_level(x)) {
best=x;
bestLevel=interrupt_controller_hal_get_level(x);
if (bestPriority > intr_desc.priority) {
best = x;
bestPriority = intr_desc.priority;
} else {
ALCHLOG("...worse than int %d", best);
}
@@ -398,21 +426,21 @@ static int get_available_int(int flags, int cpu, int force, int source)
//Common shared isr handler. Chain-call all ISRs.
static void IRAM_ATTR shared_intr_isr(void *arg)
{
vector_desc_t *vd=(vector_desc_t*)arg;
shared_vector_desc_t *sh_vec=vd->shared_vec_info;
vector_desc_t *vd = (vector_desc_t*)arg;
shared_vector_desc_t *sh_vec = vd->shared_vec_info;
portENTER_CRITICAL_ISR(&spinlock);
while(sh_vec) {
if (!sh_vec->disabled) {
if ((sh_vec->statusreg == NULL) || (*sh_vec->statusreg & sh_vec->statusmask)) {
traceISR_ENTER(sh_vec->source+ETS_INTERNAL_INTR_SOURCE_OFF);
traceISR_ENTER(sh_vec->source + ETS_INTERNAL_INTR_SOURCE_OFF);
sh_vec->isr(sh_vec->arg);
// check if we will return to scheduler or to interrupted task after ISR
if (!os_task_switch_is_pended(cpu_hal_get_core_id())) {
if (!os_task_switch_is_pended(esp_cpu_get_core_id())) {
traceISR_EXIT();
}
}
}
sh_vec=sh_vec->next;
sh_vec = sh_vec->next;
}
portEXIT_CRITICAL_ISR(&spinlock);
}
@@ -421,14 +449,14 @@ static void IRAM_ATTR shared_intr_isr(void *arg)
//Common non-shared isr handler wrapper.
static void IRAM_ATTR non_shared_intr_isr(void *arg)
{
non_shared_isr_arg_t *ns_isr_arg=(non_shared_isr_arg_t*)arg;
non_shared_isr_arg_t *ns_isr_arg = (non_shared_isr_arg_t*)arg;
portENTER_CRITICAL_ISR(&spinlock);
traceISR_ENTER(ns_isr_arg->source+ETS_INTERNAL_INTR_SOURCE_OFF);
traceISR_ENTER(ns_isr_arg->source + ETS_INTERNAL_INTR_SOURCE_OFF);
// FIXME: can we call ISR and check os_task_switch_is_pended() after releasing spinlock?
// when CONFIG_APPTRACE_SV_ENABLE = 0 ISRs for non-shared IRQs are called without spinlock
ns_isr_arg->isr(ns_isr_arg->isr_arg);
// check if we will return to scheduler or to interrupted task after ISR
if (!os_task_switch_is_pended(cpu_hal_get_core_id())) {
if (!os_task_switch_is_pended(esp_cpu_get_core_id())) {
traceISR_EXIT();
}
portEXIT_CRITICAL_ISR(&spinlock);
@@ -440,16 +468,24 @@ esp_err_t esp_intr_alloc_intrstatus(int source, int flags, uint32_t intrstatusre
void *arg, intr_handle_t *ret_handle)
{
intr_handle_data_t *ret=NULL;
int force=-1;
ESP_EARLY_LOGV(TAG, "esp_intr_alloc_intrstatus (cpu %u): checking args", cpu_hal_get_core_id());
int force = -1;
ESP_EARLY_LOGV(TAG, "esp_intr_alloc_intrstatus (cpu %u): checking args", esp_cpu_get_core_id());
//Shared interrupts should be level-triggered.
if ((flags&ESP_INTR_FLAG_SHARED) && (flags&ESP_INTR_FLAG_EDGE)) return ESP_ERR_INVALID_ARG;
if ((flags & ESP_INTR_FLAG_SHARED) && (flags & ESP_INTR_FLAG_EDGE)) {
return ESP_ERR_INVALID_ARG;
}
//You can't set an handler / arg for a non-C-callable interrupt.
if ((flags&ESP_INTR_FLAG_HIGH) && (handler)) return ESP_ERR_INVALID_ARG;
if ((flags & ESP_INTR_FLAG_HIGH) && (handler)) {
return ESP_ERR_INVALID_ARG;
}
//Shared ints should have handler and non-processor-local source
if ((flags&ESP_INTR_FLAG_SHARED) && (!handler || source<0)) return ESP_ERR_INVALID_ARG;
if ((flags & ESP_INTR_FLAG_SHARED) && (!handler || source<0)) {
return ESP_ERR_INVALID_ARG;
}
//Statusreg should have a mask
if (intrstatusreg && !intrstatusmask) return ESP_ERR_INVALID_ARG;
if (intrstatusreg && !intrstatusmask) {
return ESP_ERR_INVALID_ARG;
}
//If the ISR is marked to be IRAM-resident, the handler must not be in the cached region
//ToDo: if we are to allow placing interrupt handlers into the 0x400c0000—0x400c2000 region,
//we need to make sure the interrupt is connected to the CPU0.
@@ -466,70 +502,84 @@ esp_err_t esp_intr_alloc_intrstatus(int source, int flags, uint32_t intrstatusre
}
//Default to prio 1 for shared interrupts. Default to prio 1, 2 or 3 for non-shared interrupts.
if ((flags&ESP_INTR_FLAG_LEVELMASK)==0) {
if (flags&ESP_INTR_FLAG_SHARED) {
flags|=ESP_INTR_FLAG_LEVEL1;
if ((flags & ESP_INTR_FLAG_LEVELMASK) == 0) {
if (flags & ESP_INTR_FLAG_SHARED) {
flags |= ESP_INTR_FLAG_LEVEL1;
} else {
flags|=ESP_INTR_FLAG_LOWMED;
flags |= ESP_INTR_FLAG_LOWMED;
}
}
ESP_EARLY_LOGV(TAG, "esp_intr_alloc_intrstatus (cpu %u): Args okay. Resulting flags 0x%X", cpu_hal_get_core_id(), flags);
ESP_EARLY_LOGV(TAG, "esp_intr_alloc_intrstatus (cpu %u): Args okay. Resulting flags 0x%X", esp_cpu_get_core_id(), flags);
//Check 'special' interrupt sources. These are tied to one specific interrupt, so we
//have to force get_free_int to only look at that.
if (source==ETS_INTERNAL_TIMER0_INTR_SOURCE) force=ETS_INTERNAL_TIMER0_INTR_NO;
if (source==ETS_INTERNAL_TIMER1_INTR_SOURCE) force=ETS_INTERNAL_TIMER1_INTR_NO;
if (source==ETS_INTERNAL_TIMER2_INTR_SOURCE) force=ETS_INTERNAL_TIMER2_INTR_NO;
if (source==ETS_INTERNAL_SW0_INTR_SOURCE) force=ETS_INTERNAL_SW0_INTR_NO;
if (source==ETS_INTERNAL_SW1_INTR_SOURCE) force=ETS_INTERNAL_SW1_INTR_NO;
if (source==ETS_INTERNAL_PROFILING_INTR_SOURCE) force=ETS_INTERNAL_PROFILING_INTR_NO;
if (source == ETS_INTERNAL_TIMER0_INTR_SOURCE) {
force = ETS_INTERNAL_TIMER0_INTR_NO;
}
if (source == ETS_INTERNAL_TIMER1_INTR_SOURCE) {
force = ETS_INTERNAL_TIMER1_INTR_NO;
}
if (source == ETS_INTERNAL_TIMER2_INTR_SOURCE) {
force = ETS_INTERNAL_TIMER2_INTR_NO;
}
if (source == ETS_INTERNAL_SW0_INTR_SOURCE) {
force = ETS_INTERNAL_SW0_INTR_NO;
}
if (source == ETS_INTERNAL_SW1_INTR_SOURCE) {
force = ETS_INTERNAL_SW1_INTR_NO;
}
if (source == ETS_INTERNAL_PROFILING_INTR_SOURCE) {
force = ETS_INTERNAL_PROFILING_INTR_NO;
}
//Allocate a return handle. If we end up not needing it, we'll free it later on.
ret=heap_caps_malloc(sizeof(intr_handle_data_t), MALLOC_CAP_INTERNAL|MALLOC_CAP_8BIT);
if (ret==NULL) return ESP_ERR_NO_MEM;
ret = heap_caps_malloc(sizeof(intr_handle_data_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
if (ret == NULL) {
return ESP_ERR_NO_MEM;
}
portENTER_CRITICAL(&spinlock);
uint32_t cpu = cpu_hal_get_core_id();
uint32_t cpu = esp_cpu_get_core_id();
//See if we can find an interrupt that matches the flags.
int intr=get_available_int(flags, cpu, force, source);
if (intr==-1) {
int intr = get_available_int(flags, cpu, force, source);
if (intr == -1) {
//None found. Bail out.
portEXIT_CRITICAL(&spinlock);
free(ret);
return ESP_ERR_NOT_FOUND;
}
//Get an int vector desc for int.
vector_desc_t *vd=get_desc_for_int(intr, cpu);
if (vd==NULL) {
vector_desc_t *vd = get_desc_for_int(intr, cpu);
if (vd == NULL) {
portEXIT_CRITICAL(&spinlock);
free(ret);
return ESP_ERR_NO_MEM;
}
//Allocate that int!
if (flags&ESP_INTR_FLAG_SHARED) {
if (flags & ESP_INTR_FLAG_SHARED) {
//Populate vector entry and add to linked list.
shared_vector_desc_t *sh_vec=malloc(sizeof(shared_vector_desc_t));
if (sh_vec==NULL) {
if (sh_vec == NULL) {
portEXIT_CRITICAL(&spinlock);
free(ret);
return ESP_ERR_NO_MEM;
}
memset(sh_vec, 0, sizeof(shared_vector_desc_t));
sh_vec->statusreg=(uint32_t*)intrstatusreg;
sh_vec->statusmask=intrstatusmask;
sh_vec->isr=handler;
sh_vec->arg=arg;
sh_vec->next=vd->shared_vec_info;
sh_vec->source=source;
sh_vec->disabled=0;
vd->shared_vec_info=sh_vec;
vd->flags|=VECDESC_FL_SHARED;
sh_vec->statusreg = (uint32_t*)intrstatusreg;
sh_vec->statusmask = intrstatusmask;
sh_vec->isr = handler;
sh_vec->arg = arg;
sh_vec->next = vd->shared_vec_info;
sh_vec->source = source;
sh_vec->disabled = 0;
vd->shared_vec_info = sh_vec;
vd->flags |= VECDESC_FL_SHARED;
//(Re-)set shared isr handler to new value.
interrupt_controller_hal_set_int_handler(intr, shared_intr_isr, vd);
esp_cpu_intr_set_handler(intr, (esp_cpu_intr_handler_t)shared_intr_isr, vd);
} else {
//Mark as unusable for other interrupt sources. This is ours now!
vd->flags=VECDESC_FL_NONSHARED;
vd->flags = VECDESC_FL_NONSHARED;
if (handler) {
#if CONFIG_APPTRACE_SV_ENABLE
non_shared_isr_arg_t *ns_isr_arg=malloc(sizeof(non_shared_isr_arg_t));
@@ -538,62 +588,62 @@ esp_err_t esp_intr_alloc_intrstatus(int source, int flags, uint32_t intrstatusre
free(ret);
return ESP_ERR_NO_MEM;
}
ns_isr_arg->isr=handler;
ns_isr_arg->isr_arg=arg;
ns_isr_arg->source=source;
interrupt_controller_hal_set_int_handler(intr, non_shared_intr_isr, ns_isr_arg);
ns_isr_arg->isr = handler;
ns_isr_arg->isr_arg = arg;
ns_isr_arg->source = source;
esp_cpu_intr_set_handler(intr, (esp_cpu_intr_handler_t)non_shared_intr_isr, ns_isr_arg);
#else
interrupt_controller_hal_set_int_handler(intr, handler, arg);
esp_cpu_intr_set_handler(intr, (esp_cpu_intr_handler_t)handler, arg);
#endif
}
if (flags & ESP_INTR_FLAG_EDGE) {
interrupt_controller_hal_edge_int_acknowledge(intr);
esp_cpu_intr_edge_ack(intr);
}
vd->source=source;
vd->source = source;
}
if (flags&ESP_INTR_FLAG_IRAM) {
vd->flags|=VECDESC_FL_INIRAM;
non_iram_int_mask[cpu]&=~(1<<intr);
if (flags & ESP_INTR_FLAG_IRAM) {
vd->flags |= VECDESC_FL_INIRAM;
non_iram_int_mask[cpu] &= ~(1<<intr);
} else {
vd->flags&=~VECDESC_FL_INIRAM;
non_iram_int_mask[cpu]|=(1<<intr);
vd->flags &= ~VECDESC_FL_INIRAM;
non_iram_int_mask[cpu] |= (1<<intr);
}
if (source>=0) {
esp_rom_route_intr_matrix(cpu, source, intr);
}
//Fill return handle data.
ret->vector_desc=vd;
ret->shared_vector_desc=vd->shared_vec_info;
ret->vector_desc = vd;
ret->shared_vector_desc = vd->shared_vec_info;
//Enable int at CPU-level;
ESP_INTR_ENABLE(intr);
//If interrupt has to be started disabled, do that now; ints won't be enabled for real until the end
//of the critical section.
if (flags&ESP_INTR_FLAG_INTRDISABLED) {
if (flags & ESP_INTR_FLAG_INTRDISABLED) {
esp_intr_disable(ret);
}
#ifdef SOC_CPU_HAS_FLEXIBLE_INTC
//Extract the level from the interrupt passed flags
int level = esp_intr_flags_to_level(flags);
interrupt_controller_hal_set_int_level(intr, level);
esp_cpu_intr_set_priority(intr, level);
if (flags & ESP_INTR_FLAG_EDGE) {
interrupt_controller_hal_set_int_type(intr, INTTP_EDGE);
esp_cpu_intr_set_type(intr, ESP_CPU_INTR_TYPE_EDGE);
} else {
interrupt_controller_hal_set_int_type(intr, INTTP_LEVEL);
esp_cpu_intr_set_type(intr, ESP_CPU_INTR_TYPE_LEVEL);
}
#endif
portEXIT_CRITICAL(&spinlock);
//Fill return handle if needed, otherwise free handle.
if (ret_handle!=NULL) {
*ret_handle=ret;
if (ret_handle != NULL) {
*ret_handle = ret;
} else {
free(ret);
}
@@ -614,7 +664,9 @@ esp_err_t esp_intr_alloc(int source, int flags, intr_handler_t handler, void *ar
esp_err_t IRAM_ATTR esp_intr_set_in_iram(intr_handle_t handle, bool is_in_iram)
{
if (!handle) return ESP_ERR_INVALID_ARG;
if (!handle) {
return ESP_ERR_INVALID_ARG;
}
vector_desc_t *vd = handle->vector_desc;
if (vd->flags & VECDESC_FL_SHARED) {
return ESP_ERR_INVALID_ARG;
@@ -642,11 +694,13 @@ static void esp_intr_free_cb(void *arg)
esp_err_t esp_intr_free(intr_handle_t handle)
{
bool free_shared_vector=false;
if (!handle) return ESP_ERR_INVALID_ARG;
if (!handle) {
return ESP_ERR_INVALID_ARG;
}
#if !CONFIG_FREERTOS_UNICORE
//Assign this routine to the core where this interrupt is allocated on.
if (handle->vector_desc->cpu!=cpu_hal_get_core_id()) {
if (handle->vector_desc->cpu != esp_cpu_get_core_id()) {
esp_err_t ret = esp_ipc_call_blocking(handle->vector_desc->cpu, &esp_intr_free_cb, (void *)handle);
return ret == ESP_OK ? ESP_OK : ESP_FAIL;
}
@@ -654,48 +708,53 @@ esp_err_t esp_intr_free(intr_handle_t handle)
portENTER_CRITICAL(&spinlock);
esp_intr_disable(handle);
if (handle->vector_desc->flags&VECDESC_FL_SHARED) {
if (handle->vector_desc->flags & VECDESC_FL_SHARED) {
//Find and kill the shared int
shared_vector_desc_t *svd=handle->vector_desc->shared_vec_info;
shared_vector_desc_t *prevsvd=NULL;
shared_vector_desc_t *svd = handle->vector_desc->shared_vec_info;
shared_vector_desc_t *prevsvd = NULL;
assert(svd); //should be something in there for a shared int
while (svd!=NULL) {
if (svd==handle->shared_vector_desc) {
while (svd != NULL) {
if (svd == handle->shared_vector_desc) {
//Found it. Now kill it.
if (prevsvd) {
prevsvd->next=svd->next;
prevsvd->next = svd->next;
} else {
handle->vector_desc->shared_vec_info=svd->next;
handle->vector_desc->shared_vec_info = svd->next;
}
free(svd);
break;
}
prevsvd=svd;
svd=svd->next;
prevsvd = svd;
svd = svd->next;
}
//If nothing left, disable interrupt.
if (handle->vector_desc->shared_vec_info==NULL) free_shared_vector=true;
ESP_EARLY_LOGV(TAG, "esp_intr_free: Deleting shared int: %s. Shared int is %s", svd?"not found or last one":"deleted", free_shared_vector?"empty now.":"still in use");
if (handle->vector_desc->shared_vec_info == NULL) {
free_shared_vector = true;
}
ESP_EARLY_LOGV(TAG,
"esp_intr_free: Deleting shared int: %s. Shared int is %s",
svd ? "not found or last one" : "deleted",
free_shared_vector ? "empty now." : "still in use");
}
if ((handle->vector_desc->flags&VECDESC_FL_NONSHARED) || free_shared_vector) {
if ((handle->vector_desc->flags & VECDESC_FL_NONSHARED) || free_shared_vector) {
ESP_EARLY_LOGV(TAG, "esp_intr_free: Disabling int, killing handler");
#if CONFIG_APPTRACE_SV_ENABLE
if (!free_shared_vector) {
void *isr_arg = interrupt_controller_hal_get_int_handler_arg(handle->vector_desc->intno);
void *isr_arg = esp_cpu_intr_get_handler_arg(handle->vector_desc->intno);
if (isr_arg) {
free(isr_arg);
}
}
#endif
//Reset to normal handler:
interrupt_controller_hal_set_int_handler(handle->vector_desc->intno, NULL, (void*)((int)handle->vector_desc->intno));
esp_cpu_intr_set_handler(handle->vector_desc->intno, NULL, (void*)((int)handle->vector_desc->intno));
//Theoretically, we could free the vector_desc... not sure if that's worth the few bytes of memory
//we save.(We can also not use the same exit path for empty shared ints anymore if we delete
//the desc.) For now, just mark it as free.
handle->vector_desc->flags&=~(VECDESC_FL_NONSHARED|VECDESC_FL_RESERVED|VECDESC_FL_SHARED);
handle->vector_desc->flags &= ~(VECDESC_FL_NONSHARED|VECDESC_FL_RESERVED|VECDESC_FL_SHARED);
//Also kill non_iram mask bit.
non_iram_int_mask[handle->vector_desc->cpu]&=~(1<<(handle->vector_desc->intno));
non_iram_int_mask[handle->vector_desc->cpu] &= ~(1<<(handle->vector_desc->intno));
}
portEXIT_CRITICAL(&spinlock);
free(handle);
@@ -725,11 +784,13 @@ int esp_intr_get_cpu(intr_handle_t handle)
esp_err_t IRAM_ATTR esp_intr_enable(intr_handle_t handle)
{
if (!handle) return ESP_ERR_INVALID_ARG;
if (!handle) {
return ESP_ERR_INVALID_ARG;
}
portENTER_CRITICAL_SAFE(&spinlock);
int source;
if (handle->shared_vector_desc) {
handle->shared_vector_desc->disabled=0;
handle->shared_vector_desc->disabled = 0;
source=handle->shared_vector_desc->source;
} else {
source=handle->vector_desc->source;
@@ -739,7 +800,9 @@ esp_err_t IRAM_ATTR esp_intr_enable(intr_handle_t handle)
esp_rom_route_intr_matrix(handle->vector_desc->cpu, source, handle->vector_desc->intno);
} else {
//Re-enable using cpu int ena reg
if (handle->vector_desc->cpu!=cpu_hal_get_core_id()) return ESP_ERR_INVALID_ARG; //Can only enable these ints on this cpu
if (handle->vector_desc->cpu != esp_cpu_get_core_id()) {
return ESP_ERR_INVALID_ARG; //Can only enable these ints on this cpu
}
ESP_INTR_ENABLE(handle->vector_desc->intno);
}
portEXIT_CRITICAL_SAFE(&spinlock);
@@ -748,18 +811,20 @@ esp_err_t IRAM_ATTR esp_intr_enable(intr_handle_t handle)
esp_err_t IRAM_ATTR esp_intr_disable(intr_handle_t handle)
{
if (!handle) return ESP_ERR_INVALID_ARG;
if (!handle) {
return ESP_ERR_INVALID_ARG;
}
portENTER_CRITICAL_SAFE(&spinlock);
int source;
bool disabled = 1;
if (handle->shared_vector_desc) {
handle->shared_vector_desc->disabled=1;
handle->shared_vector_desc->disabled = 1;
source=handle->shared_vector_desc->source;
shared_vector_desc_t *svd=handle->vector_desc->shared_vec_info;
assert( svd != NULL );
while( svd ) {
if ( svd->source == source && svd->disabled == 0 ) {
shared_vector_desc_t *svd = handle->vector_desc->shared_vec_info;
assert(svd != NULL);
while(svd) {
if (svd->source == source && svd->disabled == 0) {
disabled = 0;
break;
}
@@ -770,13 +835,13 @@ esp_err_t IRAM_ATTR esp_intr_disable(intr_handle_t handle)
}
if (source >= 0) {
if ( disabled ) {
if (disabled) {
//Disable using int matrix
esp_rom_route_intr_matrix(handle->vector_desc->cpu, source, INT_MUX_DISABLED_INTNO);
}
} else {
//Disable using per-cpu regs
if (handle->vector_desc->cpu!=cpu_hal_get_core_id()) {
if (handle->vector_desc->cpu != esp_cpu_get_core_id()) {
portEXIT_CRITICAL_SAFE(&spinlock);
return ESP_ERR_INVALID_ARG; //Can only enable these ints on this cpu
}
@@ -790,14 +855,14 @@ void IRAM_ATTR esp_intr_noniram_disable(void)
{
portENTER_CRITICAL_SAFE(&spinlock);
uint32_t oldint;
uint32_t cpu = cpu_hal_get_core_id();
uint32_t cpu = esp_cpu_get_core_id();
uint32_t non_iram_ints = non_iram_int_mask[cpu];
if (non_iram_int_disabled_flag[cpu]) {
abort();
}
non_iram_int_disabled_flag[cpu] = true;
oldint = interrupt_controller_hal_read_interrupt_mask();
interrupt_controller_hal_disable_interrupts(non_iram_ints);
oldint = esp_cpu_intr_get_enabled_mask();
esp_cpu_intr_disable(non_iram_ints);
// Disable the RTC bit which don't want to be put in IRAM.
rtc_isr_noniram_disable(cpu);
// Save disabled ints
@@ -808,13 +873,13 @@ void IRAM_ATTR esp_intr_noniram_disable(void)
void IRAM_ATTR esp_intr_noniram_enable(void)
{
portENTER_CRITICAL_SAFE(&spinlock);
uint32_t cpu = cpu_hal_get_core_id();
uint32_t cpu = esp_cpu_get_core_id();
int non_iram_ints = non_iram_int_disabled[cpu];
if (!non_iram_int_disabled_flag[cpu]) {
abort();
}
non_iram_int_disabled_flag[cpu] = false;
interrupt_controller_hal_enable_interrupts(non_iram_ints);
esp_cpu_intr_enable(non_iram_ints);
rtc_isr_noniram_enable(cpu);
portEXIT_CRITICAL_SAFE(&spinlock);
}
@@ -825,19 +890,19 @@ void IRAM_ATTR esp_intr_noniram_enable(void)
void IRAM_ATTR ets_isr_unmask(uint32_t mask) {
interrupt_controller_hal_enable_interrupts(mask);
esp_cpu_intr_enable(mask);
}
void IRAM_ATTR ets_isr_mask(uint32_t mask) {
interrupt_controller_hal_disable_interrupts(mask);
esp_cpu_intr_disable(mask);
}
void esp_intr_enable_source(int inum)
{
interrupt_controller_hal_enable_interrupts(1 << inum);
esp_cpu_intr_enable(1 << inum);
}
void esp_intr_disable_source(int inum)
{
interrupt_controller_hal_disable_interrupts(1 << inum);
esp_cpu_intr_disable(1 << inum);
}

View File

@@ -15,7 +15,6 @@
#include "hal/efuse_ll.h"
#include "hal/efuse_hal.h"
#include "soc/gpio_struct.h"
#include "hal/cpu_hal.h"
#include "hal/gpio_ll.h"
#include "esp_hw_log.h"
#include "sdkconfig.h"

View File

@@ -10,11 +10,11 @@
#include <stdlib.h>
#include "esp32/rom/rtc.h"
#include "esp_rom_uart.h"
#include "esp_cpu.h"
#include "soc/rtc.h"
#include "soc/rtc_periph.h"
#include "soc/sens_periph.h"
#include "soc/efuse_periph.h"
#include "hal/cpu_hal.h"
#include "hal/clk_tree_ll.h"
#include "hal/regi2c_ctrl_ll.h"
#include "esp_hw_log.h"
@@ -122,7 +122,7 @@ void rtc_clk_init(rtc_clk_config_t cfg)
clk_ll_ref_tick_set_divider(SOC_CPU_CLK_SRC_PLL, new_config.freq_mhz);
/* Re-calculate the ccount to make time calculation correct. */
cpu_hal_set_cycle_count( (uint64_t)cpu_hal_get_cycle_count() * cfg.cpu_freq_mhz / freq_before );
esp_cpu_set_cycle_count( (uint64_t)esp_cpu_get_cycle_count() * cfg.cpu_freq_mhz / freq_before );
/* Slow & fast clocks setup */
// We will not power off RC_FAST in bootloader stage even if it is not being used as any

View File

@@ -14,9 +14,9 @@
#include "soc/rtc.h"
#include "soc/rtc_periph.h"
#include "soc/efuse_periph.h"
#include "hal/cpu_hal.h"
#include "hal/regi2c_ctrl_ll.h"
#include "esp_hw_log.h"
#include "esp_cpu.h"
#include "sdkconfig.h"
#include "esp_rom_uart.h"
@@ -64,7 +64,7 @@ void rtc_clk_init(rtc_clk_config_t cfg)
rtc_clk_cpu_freq_set_config(&new_config);
/* Re-calculate the ccount to make time calculation correct. */
cpu_hal_set_cycle_count( (uint64_t)cpu_hal_get_cycle_count() * cfg.cpu_freq_mhz / freq_before );
esp_cpu_set_cycle_count( (uint64_t)esp_cpu_get_cycle_count() * cfg.cpu_freq_mhz / freq_before );
/* Slow & fast clocks setup */
// We will not power off RC_FAST in bootloader stage even if it is not being used as any

View File

@@ -649,8 +649,8 @@ esp_err_t esp_mprot_set_prot(const esp_memp_config_t *memp_config)
//debugger connected:
// 1.check the signal repeatedly to avoid possible glitching attempt
// 2.leave the Memprot unset to allow debug operations
if (esp_cpu_in_ocd_debug_mode()) {
ESP_FAULT_ASSERT(esp_cpu_in_ocd_debug_mode());
if (esp_cpu_dbgr_is_attached()) {
ESP_FAULT_ASSERT(esp_cpu_dbgr_is_attached());
return ESP_OK;
}

View File

@@ -14,9 +14,9 @@
#include "soc/rtc.h"
#include "soc/rtc_periph.h"
#include "soc/efuse_periph.h"
#include "hal/cpu_hal.h"
#include "hal/regi2c_ctrl_ll.h"
#include "esp_hw_log.h"
#include "esp_cpu.h"
#include "sdkconfig.h"
#include "esp_rom_uart.h"
@@ -64,7 +64,7 @@ void rtc_clk_init(rtc_clk_config_t cfg)
rtc_clk_cpu_freq_set_config(&new_config);
/* Re-calculate the ccount to make time calculation correct. */
cpu_hal_set_cycle_count( (uint64_t)cpu_hal_get_cycle_count() * cfg.cpu_freq_mhz / freq_before );
esp_cpu_set_cycle_count( (uint64_t)esp_cpu_get_cycle_count() * cfg.cpu_freq_mhz / freq_before );
/* Slow & fast clocks setup */
// We will not power off RC_FAST in bootloader stage even if it is not being used as any

View File

@@ -17,8 +17,8 @@
#include "soc/rtc_periph.h"
#include "soc/rtc_cntl_reg.h"
#include "soc/efuse_periph.h"
#include "hal/cpu_hal.h"
#include "esp_hw_log.h"
#include "esp_cpu.h"
#include "sdkconfig.h"
#include "esp_rom_uart.h"
#include "soc/system_reg.h"
@@ -72,7 +72,7 @@ void rtc_clk_init(rtc_clk_config_t cfg)
rtc_clk_cpu_freq_set_config(&new_config);
/* Re-calculate the ccount to make time calculation correct. */
cpu_hal_set_cycle_count( (uint64_t)cpu_hal_get_cycle_count() * cfg.cpu_freq_mhz / freq_before );
esp_cpu_set_cycle_count( (uint64_t)esp_cpu_get_cycle_count() * cfg.cpu_freq_mhz / freq_before );
/* Slow & fast clocks setup */
if (cfg.slow_clk_src == SOC_RTC_SLOW_CLK_SRC_XTAL32K) {

View File

@@ -802,8 +802,8 @@ esp_err_t esp_memprot_set_prot(bool invoke_panic_handler, bool lock_feature, uin
}
//if being debugged check we are not glitched and dont enable Memprot
if (esp_cpu_in_ocd_debug_mode()) {
ESP_FAULT_ASSERT(esp_cpu_in_ocd_debug_mode());
if (esp_cpu_dbgr_is_attached()) {
ESP_FAULT_ASSERT(esp_cpu_dbgr_is_attached());
} else {
//initialize for specific buses (any memory type does the job)
if (invoke_panic_handler) {

View File

@@ -15,9 +15,9 @@
#include "soc/sens_periph.h"
#include "soc/efuse_periph.h"
#include "soc/syscon_reg.h"
#include "hal/cpu_hal.h"
#include "hal/regi2c_ctrl_ll.h"
#include "esp_hw_log.h"
#include "esp_cpu.h"
#include "sdkconfig.h"
static const char* TAG = "rtc_clk_init";
@@ -64,7 +64,7 @@ void rtc_clk_init(rtc_clk_config_t cfg)
rtc_clk_cpu_freq_set_config(&new_config);
/* Re-calculate the ccount to make time calculation correct. */
cpu_hal_set_cycle_count( (uint64_t)cpu_hal_get_cycle_count() * cfg.cpu_freq_mhz / freq_before );
esp_cpu_set_cycle_count( (uint64_t)esp_cpu_get_cycle_count() * cfg.cpu_freq_mhz / freq_before );
/* Slow & fast clocks setup */
// We will not power off RC_FAST in bootloader stage even if it is not being used as any

View File

@@ -875,8 +875,8 @@ esp_err_t esp_mprot_set_prot(const esp_memp_config_t *memp_config)
// 1.check the signal repeatedly to avoid possible glitching attempt
// 2.leave the Memprot unset to allow debug operations
if (esp_cpu_in_ocd_debug_mode()) {
ESP_FAULT_ASSERT(esp_cpu_in_ocd_debug_mode());
if (esp_cpu_dbgr_is_attached()) {
ESP_FAULT_ASSERT(esp_cpu_dbgr_is_attached());
return ESP_OK;
}

View File

@@ -12,9 +12,9 @@
#include "esp32s3/rom/rtc.h"
#include "soc/rtc.h"
#include "soc/rtc_cntl_reg.h"
#include "hal/cpu_hal.h"
#include "hal/regi2c_ctrl_ll.h"
#include "esp_hw_log.h"
#include "esp_cpu.h"
static const char *TAG = "rtc_clk_init";
@@ -60,7 +60,7 @@ void rtc_clk_init(rtc_clk_config_t cfg)
rtc_clk_cpu_freq_set_config(&new_config);
/* Re-calculate the ccount to make time calculation correct. */
cpu_hal_set_cycle_count( (uint64_t)cpu_hal_get_cycle_count() * cfg.cpu_freq_mhz / freq_before );
esp_cpu_set_cycle_count( (uint64_t)esp_cpu_get_cycle_count() * cfg.cpu_freq_mhz / freq_before );
/* Slow & fast clocks setup */
// We will not power off RC_FAST in bootloader stage even if it is not being used as any

View File

@@ -10,6 +10,7 @@
#include <sys/param.h>
#include "esp_attr.h"
#include "esp_memory_utils.h"
#include "esp_sleep.h"
#include "esp_private/esp_timer_private.h"
#include "esp_private/system_internal.h"
@@ -534,7 +535,7 @@ static uint32_t IRAM_ATTR esp_sleep_start(uint32_t pd_flags)
rtc_clk_cpu_freq_set_config(&cpu_freq_config);
if (!deep_sleep) {
s_config.ccount_ticks_record = cpu_ll_get_cycle_count();
s_config.ccount_ticks_record = esp_cpu_get_cycle_count();
misc_modules_wake_prepare();
}
@@ -655,7 +656,7 @@ static inline bool can_power_down_vddsdio(const uint32_t vddsdio_pd_sleep_durati
esp_err_t esp_light_sleep_start(void)
{
s_config.ccount_ticks_record = cpu_ll_get_cycle_count();
s_config.ccount_ticks_record = esp_cpu_get_cycle_count();
static portMUX_TYPE light_sleep_lock = portMUX_INITIALIZER_UNLOCKED;
portENTER_CRITICAL(&light_sleep_lock);
/* We will be calling esp_timer_private_set inside DPORT access critical
@@ -665,7 +666,7 @@ esp_err_t esp_light_sleep_start(void)
esp_timer_private_lock();
s_config.rtc_ticks_at_sleep_start = rtc_time_get();
uint32_t ccount_at_sleep_start = cpu_ll_get_cycle_count();
uint32_t ccount_at_sleep_start = esp_cpu_get_cycle_count();
uint64_t high_res_time_at_start = esp_timer_get_time();
uint32_t sleep_time_overhead_in = (ccount_at_sleep_start - s_config.ccount_ticks_record) / (esp_clk_cpu_freq() / 1000000ULL);
@@ -787,7 +788,7 @@ esp_err_t esp_light_sleep_start(void)
wdt_hal_write_protect_enable(&rtc_wdt_ctx);
}
portEXIT_CRITICAL(&light_sleep_lock);
s_config.sleep_time_overhead_out = (cpu_ll_get_cycle_count() - s_config.ccount_ticks_record) / (esp_clk_cpu_freq() / 1000000ULL);
s_config.sleep_time_overhead_out = (esp_cpu_get_cycle_count() - s_config.ccount_ticks_record) / (esp_clk_cpu_freq() / 1000000ULL);
return err;
}

View File

@@ -25,7 +25,7 @@
#include "soc/dport_reg.h"
#include "dport_access.h"
#include "soc/rtc.h"
#include "hal/cpu_hal.h"
#include "esp_cpu.h"
#include "esp_intr_alloc.h"
@@ -365,7 +365,7 @@ static void accessDPORT2_stall_other_cpu(void *pvParameters)
dport_test_result = true;
while (exit_flag == false) {
DPORT_STALL_OTHER_CPU_START();
XTHAL_SET_CCOMPARE(2, cpu_hal_get_cycle_count());
XTHAL_SET_CCOMPARE(2, esp_cpu_get_cycle_count());
xt_highint5_read_apb = 1;
for (int i = 0; i < 200; ++i) {
if (_DPORT_REG_READ(DPORT_DATE_REG) != _DPORT_REG_READ(DPORT_DATE_REG)) {
@@ -402,7 +402,7 @@ static void accessDPORT2(void *pvParameters)
TEST_ESP_OK(esp_intr_alloc(ETS_INTERNAL_TIMER2_INTR_SOURCE, ESP_INTR_FLAG_LEVEL5 | ESP_INTR_FLAG_IRAM, NULL, NULL, &inth));
while (exit_flag == false) {
XTHAL_SET_CCOMPARE(2, cpu_hal_get_cycle_count() + 21);
XTHAL_SET_CCOMPARE(2, esp_cpu_get_cycle_count() + 21);
for (int i = 0; i < 200; ++i) {
if (DPORT_REG_READ(DPORT_DATE_REG) != DPORT_REG_READ(DPORT_DATE_REG)) {
dport_test_result = false;
@@ -450,7 +450,7 @@ static uint32_t IRAM_ATTR test_dport_access_reg_read(uint32_t reg)
#else
uint32_t apb;
unsigned int intLvl;
XTHAL_SET_CCOMPARE(2, cpu_hal_get_cycle_count() + s_shift_counter);
XTHAL_SET_CCOMPARE(2, esp_cpu_get_cycle_count() + s_shift_counter);
__asm__ __volatile__ (\
/* "movi %[APB], "XTSTR(0x3ff40078)"\n" */ /* (1) uncomment for reproduce issue */ \
"bnez %[APB], kl1\n" /* this branch command helps get good reproducing */ \

View File

@@ -13,11 +13,11 @@
#include "esp_err.h"
#include "esp_pm.h"
#include "esp_log.h"
#include "esp_cpu.h"
#include "esp_private/crosscore_int.h"
#include "soc/rtc.h"
#include "hal/cpu_hal.h"
#include "hal/uart_ll.h"
#include "hal/uart_types.h"
@@ -532,7 +532,7 @@ static void IRAM_ATTR update_ccompare(void)
/* disable level 4 and below */
uint32_t irq_status = XTOS_SET_INTLEVEL(XCHAL_DEBUGLEVEL - 2);
#endif
uint32_t ccount = cpu_hal_get_cycle_count();
uint32_t ccount = esp_cpu_get_cycle_count();
uint32_t ccompare = XTHAL_GET_CCOMPARE(XT_TIMER_INDEX);
if ((ccompare - CCOMPARE_MIN_CYCLES_IN_FUTURE) - ccount < UINT32_MAX / 2) {
uint32_t diff = ccompare - ccount;
@@ -658,7 +658,7 @@ void IRAM_ATTR vApplicationSleep( TickType_t xExpectedIdleTime )
* work for timer interrupt, and changing CCOMPARE would clear
* the interrupt flag.
*/
cpu_hal_set_cycle_count(XTHAL_GET_CCOMPARE(XT_TIMER_INDEX) - 16);
esp_cpu_set_cycle_count(XTHAL_GET_CCOMPARE(XT_TIMER_INDEX) - 16);
while (!(XTHAL_GET_INTERRUPT() & BIT(XT_TIMER_INTNUM))) {
;
}
@@ -845,7 +845,7 @@ void esp_pm_impl_waiti(void)
#if CONFIG_FREERTOS_USE_TICKLESS_IDLE
int core_id = xPortGetCoreID();
if (s_skipped_light_sleep[core_id]) {
cpu_hal_waiti();
esp_cpu_wait_for_intr();
/* Interrupt took the CPU out of waiti and s_rtos_lock_handle[core_id]
* is now taken. However since we are back to idle task, we can release
* the lock so that vApplicationSleep can attempt to enter light sleep.
@@ -854,7 +854,7 @@ void esp_pm_impl_waiti(void)
}
s_skipped_light_sleep[core_id] = true;
#else
cpu_hal_waiti();
esp_cpu_wait_for_intr();
#endif // CONFIG_FREERTOS_USE_TICKLESS_IDLE
}

View File

@@ -8,6 +8,7 @@
#include <sys/param.h>
#include "esp_log.h"
#include "esp_check.h"
#include "esp_memory_utils.h"
#include "driver/spi_master.h"
#include "esp_private/periph_ctrl.h"
#include "essl_internal.h"

View File

@@ -6,11 +6,12 @@
#include <stdint.h>
#include "esp_attr.h"
#include "esp_err.h"
#include "esp_cpu.h"
#include "esp_intr_alloc.h"
#include "esp_debug_helpers.h"
#include "soc/periph_defs.h"
#include "hal/cpu_hal.h"
#include "freertos/FreeRTOS.h"
#include "freertos/portmacro.h"
@@ -51,7 +52,7 @@ static void IRAM_ATTR esp_crosscore_isr(void *arg) {
//Clear the interrupt first.
#if CONFIG_IDF_TARGET_ESP32
if (cpu_hal_get_core_id()==0) {
if (esp_cpu_get_core_id()==0) {
DPORT_WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_0_REG, 0);
} else {
DPORT_WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_1_REG, 0);
@@ -59,7 +60,7 @@ static void IRAM_ATTR esp_crosscore_isr(void *arg) {
#elif CONFIG_IDF_TARGET_ESP32S2
DPORT_WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_0_REG, 0);
#elif CONFIG_IDF_TARGET_ESP32S3
if (cpu_hal_get_core_id()==0) {
if (esp_cpu_get_core_id()==0) {
WRITE_PERI_REG(SYSTEM_CPU_INTR_FROM_CPU_0_REG, 0);
} else {
WRITE_PERI_REG(SYSTEM_CPU_INTR_FROM_CPU_1_REG, 0);
@@ -100,11 +101,11 @@ static void IRAM_ATTR esp_crosscore_isr(void *arg) {
//on each active core.
void esp_crosscore_int_init(void) {
portENTER_CRITICAL(&reason_spinlock);
reason[cpu_hal_get_core_id()]=0;
reason[esp_cpu_get_core_id()]=0;
portEXIT_CRITICAL(&reason_spinlock);
esp_err_t err __attribute__((unused)) = ESP_OK;
#if portNUM_PROCESSORS > 1
if (cpu_hal_get_core_id()==0) {
if (esp_cpu_get_core_id()==0) {
err = esp_intr_alloc(ETS_FROM_CPU_INTR0_SOURCE, ESP_INTR_FLAG_IRAM, esp_crosscore_isr, (void*)&reason[0], NULL);
} else {
err = esp_intr_alloc(ETS_FROM_CPU_INTR1_SOURCE, ESP_INTR_FLAG_IRAM, esp_crosscore_isr, (void*)&reason[1], NULL);

View File

@@ -11,6 +11,7 @@
#include "freertos/FreeRTOS.h"
#include "esp_attr.h"
#include "esp_freertos_hooks.h"
#include "esp_cpu.h"
#include "sdkconfig.h"
@@ -55,7 +56,7 @@ void esp_vApplicationIdleHook(void)
esp_pm_impl_idle_hook();
esp_pm_impl_waiti();
#else
cpu_hal_waiti();
esp_cpu_wait_for_intr();
#endif

View File

@@ -9,9 +9,9 @@
#include "esp_attr.h"
#include "esp_err.h"
#include "esp_bit_defs.h"
#include "esp_cpu.h"
#include "soc/soc_caps.h"
#include "hal/cpu_hal.h"
#include "sdkconfig.h"
@@ -32,7 +32,7 @@ extern sys_startup_fn_t const g_startup_fn[1];
#endif
// Utility to execute `sys_startup_fn_t` for the current core.
#define SYS_STARTUP_FN() ((*g_startup_fn[(cpu_hal_get_core_id())])())
#define SYS_STARTUP_FN() ((*g_startup_fn[(esp_cpu_get_core_id())])())
#if !CONFIG_ESP_SYSTEM_SINGLE_CORE_MODE
void startup_resume_other_cores(void);

View File

@@ -9,10 +9,9 @@
#include <stdbool.h>
#include "sdkconfig.h"
#include "soc/soc_caps.h"
#include "hal/cpu_hal.h"
#include "hal/wdt_hal.h"
#include "hal/interrupt_controller_hal.h"
#include "freertos/FreeRTOS.h"
#include "esp_cpu.h"
#include "esp_err.h"
#include "esp_attr.h"
#include "esp_log.h"
@@ -49,7 +48,7 @@ volatile bool int_wdt_cpu1_ticked = false;
static void IRAM_ATTR tick_hook(void)
{
#if CONFIG_ESP_INT_WDT_CHECK_CPU1
if (cpu_hal_get_core_id() != 0) {
if (esp_cpu_get_core_id() != 0) {
int_wdt_cpu1_ticked = true;
} else {
// Only feed wdt if app cpu also ticked.
@@ -71,7 +70,7 @@ static void IRAM_ATTR tick_hook(void)
}
}
#else // CONFIG_ESP_INT_WDT_CHECK_CPU1
if (cpu_hal_get_core_id() != 0) {
if (esp_cpu_get_core_id() != 0) {
return;
} else {
// Todo: Check if there's a way to avoid reconfiguring the stages on each feed.
@@ -135,16 +134,16 @@ void esp_int_wdt_cpu_init(void)
#if SOC_TIMER_GROUPS > 1
assert((CONFIG_ESP_INT_WDT_TIMEOUT_MS >= (portTICK_PERIOD_MS << 1)) && "Interrupt watchdog timeout needs to be at least twice the RTOS tick period!");
// Register tick hook for current CPU to feed the INT WDT
esp_register_freertos_tick_hook_for_cpu(tick_hook, cpu_hal_get_core_id());
esp_register_freertos_tick_hook_for_cpu(tick_hook, esp_cpu_get_core_id());
/*
* Register INT WDT interrupt for current CPU. We do this manually as the timeout interrupt should call an assembly
* panic handler (see riscv/vector.S and xtensa_vectors.S).
*/
esp_intr_disable_source(WDT_INT_NUM);
esp_rom_route_intr_matrix(cpu_hal_get_core_id(), ETS_TG1_WDT_LEVEL_INTR_SOURCE, WDT_INT_NUM);
esp_rom_route_intr_matrix(esp_cpu_get_core_id(), ETS_TG1_WDT_LEVEL_INTR_SOURCE, WDT_INT_NUM);
#if SOC_CPU_HAS_FLEXIBLE_INTC
interrupt_controller_hal_set_int_type(WDT_INT_NUM, INTR_TYPE_LEVEL);
interrupt_controller_hal_set_int_level(WDT_INT_NUM, SOC_INTERRUPT_LEVEL_MEDIUM);
esp_cpu_intr_set_type(WDT_INT_NUM, INTR_TYPE_LEVEL);
esp_cpu_intr_set_priority(WDT_INT_NUM, SOC_INTERRUPT_LEVEL_MEDIUM);
#endif
#if CONFIG_ESP32_ECO3_CACHE_LOCK_FIX
/*

View File

@@ -15,7 +15,6 @@
#include "esp_cpu.h"
#include "soc/rtc.h"
#include "hal/timer_hal.h"
#include "hal/cpu_hal.h"
#include "hal/wdt_types.h"
#include "hal/wdt_hal.h"
@@ -277,7 +276,7 @@ void esp_panic_handler(panic_info_t *info)
// If on-chip-debugger is attached, and system is configured to be aware of this,
// then only print up to details. Users should be able to probe for the other information
// in debug mode.
if (esp_cpu_in_ocd_debug_mode()) {
if (esp_cpu_dbgr_is_attached()) {
panic_print_str("Setting breakpoint at 0x");
panic_print_hex((uint32_t)info->addr);
panic_print_str(" and returning...\r\n");
@@ -291,7 +290,7 @@ void esp_panic_handler(panic_info_t *info)
#endif
#endif
cpu_hal_set_breakpoint(0, info->addr); // use breakpoint 0
esp_cpu_set_breakpoint(0, info->addr); // use breakpoint 0
return;
}

View File

@@ -18,7 +18,7 @@ const static char *TAG = "esp_dbg_stubs";
/* Advertises apptrace control block address to host */
static int esp_dbg_stubs_advertise_table(void *stub_table_addr)
{
if (!esp_cpu_in_ocd_debug_mode()) {
if (!esp_cpu_dbgr_is_attached()) {
return 0;
}
return (int) semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_DBG_STUBS_INIT, (long*)stub_table_addr);

View File

@@ -14,11 +14,11 @@
#include "esp_private/spi_flash_os.h"
#include "esp_rom_sys.h"
#include "esp_cpu.h"
#include "soc/soc.h"
#include "esp_cpu.h"
#include "soc/rtc_periph.h"
#include "hal/cpu_hal.h"
#include "esp_attr.h"
#include "bootloader_flash.h"
#include "esp_intr_alloc.h"
@@ -42,7 +42,7 @@ IRAM_ATTR static void rtc_brownout_isr_handler(void *arg)
*/
brownout_hal_intr_clear();
// Stop the other core.
esp_cpu_stall(!cpu_hal_get_core_id());
esp_cpu_stall(!esp_cpu_get_core_id());
esp_reset_reason_set_hint(ESP_RST_BROWNOUT);
#if CONFIG_SPI_FLASH_BROWNOUT_RESET
if (spi_flash_brownout_need_reset()) {

View File

@@ -137,7 +137,7 @@ static volatile bool s_resume_cores;
static void core_intr_matrix_clear(void)
{
uint32_t core_id = cpu_hal_get_core_id();
uint32_t core_id = esp_cpu_get_core_id();
for (int i = 0; i < ETS_MAX_INTR_SOURCE; i++) {
esp_rom_route_intr_matrix(core_id, i, ETS_INVALID_INUM);
@@ -152,7 +152,7 @@ void startup_resume_other_cores(void)
void IRAM_ATTR call_start_cpu1(void)
{
cpu_hal_set_vecbase(&_vector_table);
esp_cpu_intr_set_ivt_addr(&_vector_table);
ets_set_appcpu_boot_addr(0);
@@ -266,7 +266,7 @@ void IRAM_ATTR call_start_cpu0(void)
#endif
#ifdef __riscv
if (cpu_hal_is_debugger_attached()) {
if (esp_cpu_dbgr_is_attached()) {
/* Let debugger some time to detect that target started, halt it, enable ebreaks and resume.
500ms should be enough. */
for (uint32_t ms_num = 0; ms_num < 2; ms_num++) {
@@ -285,7 +285,7 @@ void IRAM_ATTR call_start_cpu0(void)
#endif
// Move exception vectors to IRAM
cpu_hal_set_vecbase(&_vector_table);
esp_cpu_intr_set_ivt_addr(&_vector_table);
rst_reas[0] = esp_rom_get_reset_reason(0);
#if !CONFIG_ESP_SYSTEM_SINGLE_CORE_MODE

View File

@@ -17,7 +17,6 @@
#include "soc/rtc.h"
#include "hal/soc_hal.h"
#include "hal/cpu_hal.h"
#include "esp_private/cache_err_int.h"
@@ -102,7 +101,7 @@ static void print_state(const void *f)
static void frame_to_panic_info(void *frame, panic_info_t *info, bool pseudo_excause)
{
info->core = cpu_hal_get_core_id();
info->core = esp_cpu_get_core_id();
info->exception = PANIC_EXCEPTION_FAULT;
info->details = NULL;
info->reason = "Unknown";
@@ -126,7 +125,7 @@ static void panic_handler(void *frame, bool pseudo_excause)
* Setup environment and perform necessary architecture/chip specific
* steps here prior to the system panic handler.
* */
int core_id = cpu_hal_get_core_id();
int core_id = esp_cpu_get_core_id();
// If multiple cores arrive at panic handler, save frames for all of them
g_exc_frames[core_id] = frame;
@@ -164,9 +163,9 @@ static void panic_handler(void *frame, bool pseudo_excause)
esp_ipc_isr_stall_abort();
if (esp_cpu_in_ocd_debug_mode()) {
if (esp_cpu_dbgr_is_attached()) {
#if __XTENSA__
if (!(esp_ptr_executable(cpu_ll_pc_to_ptr(panic_get_address(frame))) && (panic_get_address(frame) & 0xC0000000U))) {
if (!(esp_ptr_executable(esp_cpu_pc_to_addr(panic_get_address(frame))) && (panic_get_address(frame) & 0xC0000000U))) {
/* Xtensa ABI sets the 2 MSBs of the PC according to the windowed call size
* Incase the PC is invalid, GDB will fail to translate addresses to function names
* Hence replacing the PC to a placeholder address in case of invalid PC
@@ -198,7 +197,7 @@ static void panic_handler(void *frame, bool pseudo_excause)
*/
static void IRAM_ATTR panic_enable_cache(void)
{
int core_id = cpu_hal_get_core_id();
int core_id = esp_cpu_get_core_id();
if (!spi_flash_cache_enabled()) {
esp_ipc_isr_stall_abort();

View File

@@ -18,10 +18,10 @@
#include "esp_err.h"
#include "esp_attr.h"
#include "esp_cpu.h"
#include "esp_intr_alloc.h"
#include "soc/dport_reg.h"
#include "hal/cpu_hal.h"
#include "esp_rom_sys.h"
@@ -29,7 +29,7 @@
void esp_cache_err_int_init(void)
{
uint32_t core_id = cpu_hal_get_core_id();
uint32_t core_id = esp_cpu_get_core_id();
ESP_INTR_DISABLE(ETS_MEMACCESS_ERR_INUM);
// We do not register a handler for the interrupt because it is interrupt

View File

@@ -7,7 +7,6 @@
#include "soc/rtc.h"
#include "soc/dport_reg.h"
#include "soc/i2s_reg.h"
#include "hal/cpu_hal.h"
#include "esp_private/periph_ctrl.h"
#include "esp_private/esp_clk.h"
#include "bootloader_clock.h"
@@ -16,6 +15,7 @@
#include "esp_private/spi_common_internal.h" // [refactor-todo]: for spicommon_periph_in_use
#include "esp_log.h"
#include "esp_cpu.h"
#include "esp_rom_uart.h"
#include "esp_rom_sys.h"
@@ -189,7 +189,7 @@ static void select_rtc_slow_clk(slow_clk_sel_t slow_clk)
}
// Re calculate the ccount to make time calculation correct.
cpu_hal_set_cycle_count( (uint64_t)cpu_hal_get_cycle_count() * new_freq_mhz / old_freq_mhz );
esp_cpu_set_cycle_count( (uint64_t)esp_cpu_get_cycle_count() * new_freq_mhz / old_freq_mhz );
}
/* This function is not exposed as an API at this point.

View File

@@ -21,7 +21,6 @@
#include "esp_cpu.h"
#include "soc/rtc.h"
#include "hal/wdt_hal.h"
#include "hal/cpu_hal.h"
#include "freertos/xtensa_api.h"
#include "soc/soc_memory_layout.h"
#include "esp_private/cache_err_int.h"
@@ -53,7 +52,7 @@ void IRAM_ATTR esp_restart_noos(void)
// CPU must be reset before stalling, in case it was running a s32c1i
// instruction. This would cause memory pool to be locked by arbiter
// to the stalled CPU, preventing current CPU from accessing this pool.
const uint32_t core_id = cpu_hal_get_core_id();
const uint32_t core_id = esp_cpu_get_core_id();
const uint32_t other_core_id = (core_id == 0) ? 1 : 0;
esp_cpu_reset(other_core_id);
esp_cpu_stall(other_core_id);

View File

@@ -11,6 +11,7 @@
#include "sdkconfig.h"
#include "esp_attr.h"
#include "esp_log.h"
#include "esp_cpu.h"
#include "esp_private/esp_clk.h"
#include "esp_clk_internal.h"
#include "esp32c2/rom/ets_sys.h"
@@ -20,7 +21,6 @@
#include "soc/soc.h"
#include "soc/rtc.h"
#include "soc/rtc_periph.h"
#include "hal/cpu_hal.h"
#include "hal/wdt_hal.h"
#include "esp_private/periph_ctrl.h"
#include "bootloader_clock.h"
@@ -127,7 +127,7 @@ static const char *TAG = "clk";
}
// Re calculate the ccount to make time calculation correct.
cpu_hal_set_cycle_count( (uint64_t)cpu_hal_get_cycle_count() * new_freq_mhz / old_freq_mhz );
esp_cpu_set_cycle_count( (uint64_t)esp_cpu_get_cycle_count() * new_freq_mhz / old_freq_mhz );
}
static void select_rtc_slow_clk(slow_clk_sel_t slow_clk)

View File

@@ -23,7 +23,6 @@
#include "soc/syscon_reg.h"
#include "soc/system_reg.h"
#include "hal/wdt_hal.h"
#include "hal/cpu_hal.h"
#include "esp_private/cache_err_int.h"
#include "esp32c2/rom/cache.h"
@@ -52,7 +51,7 @@ void IRAM_ATTR esp_restart_noos(void)
// CPU must be reset before stalling, in case it was running a s32c1i
// instruction. This would cause memory pool to be locked by arbiter
// to the stalled CPU, preventing current CPU from accessing this pool.
const uint32_t core_id = cpu_hal_get_core_id();
const uint32_t core_id = esp_cpu_get_core_id();
#if !CONFIG_FREERTOS_UNICORE
const uint32_t other_core_id = (core_id == 0) ? 1 : 0;
esp_cpu_reset(other_core_id);

View File

@@ -11,6 +11,7 @@
#include "sdkconfig.h"
#include "esp_attr.h"
#include "esp_log.h"
#include "esp_cpu.h"
#include "esp_clk_internal.h"
#include "esp32c3/rom/ets_sys.h"
#include "esp32c3/rom/uart.h"
@@ -19,7 +20,6 @@
#include "soc/rtc.h"
#include "soc/rtc_periph.h"
#include "soc/i2s_reg.h"
#include "hal/cpu_hal.h"
#include "hal/wdt_hal.h"
#include "esp_private/periph_ctrl.h"
#include "esp_private/esp_clk.h"
@@ -135,7 +135,7 @@ static const char *TAG = "clk";
}
// Re calculate the ccount to make time calculation correct.
cpu_hal_set_cycle_count( (uint64_t)cpu_hal_get_cycle_count() * new_freq_mhz / old_freq_mhz );
esp_cpu_set_cycle_count( (uint64_t)esp_cpu_get_cycle_count() * new_freq_mhz / old_freq_mhz );
}
static void select_rtc_slow_clk(slow_clk_sel_t slow_clk)

View File

@@ -24,7 +24,6 @@
#include "soc/system_reg.h"
#include "soc/uart_reg.h"
#include "hal/wdt_hal.h"
#include "hal/cpu_hal.h"
#include "esp_private/cache_err_int.h"
#include "esp32c3/rom/cache.h"
@@ -53,7 +52,7 @@ void IRAM_ATTR esp_restart_noos(void)
// CPU must be reset before stalling, in case it was running a s32c1i
// instruction. This would cause memory pool to be locked by arbiter
// to the stalled CPU, preventing current CPU from accessing this pool.
const uint32_t core_id = cpu_hal_get_core_id();
const uint32_t core_id = esp_cpu_get_core_id();
#if !CONFIG_FREERTOS_UNICORE
const uint32_t other_core_id = (core_id == 0) ? 1 : 0;
esp_cpu_reset(other_core_id);

View File

@@ -11,6 +11,7 @@
#include "sdkconfig.h"
#include "esp_attr.h"
#include "esp_log.h"
#include "esp_cpu.h"
#include "esp_clk_internal.h"
#include "esp32h2/rom/ets_sys.h"
#include "esp32h2/rom/uart.h"
@@ -20,7 +21,6 @@
#include "soc/rtc.h"
#include "soc/rtc_periph.h"
#include "soc/i2s_reg.h"
#include "hal/cpu_hal.h"
#include "hal/wdt_hal.h"
#include "esp_private/periph_ctrl.h"
#include "esp_private/esp_clk.h"
@@ -127,7 +127,7 @@ static const char *TAG = "clk";
}
// Re calculate the ccount to make time calculation correct.
cpu_hal_set_cycle_count( (uint64_t)cpu_hal_get_cycle_count() * new_freq_mhz / old_freq_mhz );
esp_cpu_set_cycle_count( (uint64_t)esp_cpu_get_cycle_count() * new_freq_mhz / old_freq_mhz );
}
static void select_rtc_slow_clk(slow_clk_sel_t slow_clk)

View File

@@ -23,7 +23,6 @@
#include "soc/syscon_reg.h"
#include "soc/system_reg.h"
#include "hal/wdt_hal.h"
#include "hal/cpu_hal.h"
#include "esp_private/cache_err_int.h"
#include "esp32h2/rom/cache.h"
@@ -52,7 +51,7 @@ void IRAM_ATTR esp_restart_noos(void)
// CPU must be reset before stalling, in case it was running a s32c1i
// instruction. This would cause memory pool to be locked by arbiter
// to the stalled CPU, preventing current CPU from accessing this pool.
const uint32_t core_id = cpu_hal_get_core_id();
const uint32_t core_id = esp_cpu_get_core_id();
#if !CONFIG_FREERTOS_UNICORE
const uint32_t other_core_id = (core_id == 0) ? 1 : 0;
esp_cpu_reset(other_core_id);

View File

@@ -18,13 +18,13 @@
#include "esp_err.h"
#include "esp_attr.h"
#include "esp_cpu.h"
#include "esp_intr_alloc.h"
#include "soc/extmem_reg.h"
#include "soc/dport_reg.h"
#include "soc/periph_defs.h"
#include "hal/cpu_hal.h"
#include "esp_rom_sys.h"
@@ -32,7 +32,7 @@
void esp_cache_err_int_init(void)
{
uint32_t core_id = cpu_hal_get_core_id();
uint32_t core_id = esp_cpu_get_core_id();
ESP_INTR_DISABLE(ETS_MEMACCESS_ERR_INUM);
// We do not register a handler for the interrupt because it is interrupt

View File

@@ -11,6 +11,7 @@
#include "sdkconfig.h"
#include "esp_attr.h"
#include "esp_log.h"
#include "esp_cpu.h"
#include "esp_clk_internal.h"
#include "esp_rom_uart.h"
#include "esp_rom_sys.h"
@@ -20,7 +21,6 @@
#include "soc/rtc.h"
#include "soc/rtc_periph.h"
#include "soc/i2s_reg.h"
#include "hal/cpu_hal.h"
#include "hal/wdt_hal.h"
#include "esp_private/periph_ctrl.h"
#include "esp_private/esp_clk.h"
@@ -137,7 +137,7 @@ static void select_rtc_slow_clk(slow_clk_sel_t slow_clk);
}
// Re calculate the ccount to make time calculation correct.
cpu_hal_set_cycle_count( (uint64_t)cpu_hal_get_cycle_count() * new_freq_mhz / old_freq_mhz );
esp_cpu_set_cycle_count( (uint64_t)esp_cpu_get_cycle_count() * new_freq_mhz / old_freq_mhz );
}
static void select_rtc_slow_clk(slow_clk_sel_t slow_clk)

View File

@@ -22,10 +22,8 @@
#include "soc/syscon_reg.h"
#include "soc/rtc_periph.h"
#include "hal/wdt_hal.h"
#include "hal/cpu_hal.h"
#include "freertos/xtensa_api.h"
#include "soc/soc_memory_layout.h"
#include "hal/cpu_hal.h"
#include "esp32s2/rom/rtc.h"
@@ -57,7 +55,7 @@ void IRAM_ATTR esp_restart_noos(void)
// CPU must be reset before stalling, in case it was running a s32c1i
// instruction. This would cause memory pool to be locked by arbiter
// to the stalled CPU, preventing current CPU from accessing this pool.
const uint32_t core_id = cpu_hal_get_core_id();
const uint32_t core_id = esp_cpu_get_core_id();
//Todo: Refactor to use Interrupt or Task Watchdog API, and a system level WDT context
// Disable TG0/TG1 watchdogs

View File

@@ -17,10 +17,10 @@
#include "esp_err.h"
#include "esp_log.h"
#include "esp_attr.h"
#include "esp_cpu.h"
#include "esp_intr_alloc.h"
#include "soc/soc.h"
#include "soc/periph_defs.h"
#include "hal/cpu_hal.h"
#include "esp_rom_sys.h"
#include "hal/cache_ll.h"
@@ -28,7 +28,7 @@ static const char *TAG = "CACHE_ERR";
void esp_cache_err_int_init(void)
{
uint32_t core_id = cpu_hal_get_core_id();
uint32_t core_id = esp_cpu_get_core_id();
ESP_INTR_DISABLE(ETS_CACHEERR_INUM);
// We do not register a handler for the interrupt because it is interrupt

View File

@@ -11,6 +11,7 @@
#include "sdkconfig.h"
#include "esp_attr.h"
#include "esp_log.h"
#include "esp_cpu.h"
#include "esp_clk_internal.h"
#include "esp_rom_uart.h"
#include "esp_rom_sys.h"
@@ -19,7 +20,6 @@
#include "soc/rtc.h"
#include "soc/rtc_periph.h"
#include "soc/i2s_reg.h"
#include "hal/cpu_hal.h"
#include "hal/wdt_hal.h"
#include "esp_private/periph_ctrl.h"
#include "esp_private/esp_clk.h"
@@ -129,7 +129,7 @@ static void select_rtc_slow_clk(slow_clk_sel_t slow_clk);
}
// Re calculate the ccount to make time calculation correct.
cpu_hal_set_cycle_count( (uint64_t)cpu_hal_get_cycle_count() * new_freq_mhz / old_freq_mhz );
esp_cpu_set_cycle_count( (uint64_t)esp_cpu_get_cycle_count() * new_freq_mhz / old_freq_mhz );
}
static void select_rtc_slow_clk(slow_clk_sel_t slow_clk)

View File

@@ -21,7 +21,6 @@
#include "soc/syscon_reg.h"
#include "soc/rtc_periph.h"
#include "hal/wdt_hal.h"
#include "hal/cpu_hal.h"
#include "freertos/xtensa_api.h"
#include "soc/soc_memory_layout.h"
@@ -86,7 +85,7 @@ void IRAM_ATTR esp_restart_noos(void)
// CPU must be reset before stalling, in case it was running a s32c1i
// instruction. This would cause memory pool to be locked by arbiter
// to the stalled CPU, preventing current CPU from accessing this pool.
const uint32_t core_id = cpu_hal_get_core_id();
const uint32_t core_id = esp_cpu_get_core_id();
#if !CONFIG_FREERTOS_UNICORE
const uint32_t other_core_id = (core_id == 0) ? 1 : 0;
esp_cpu_reset(other_core_id);

View File

@@ -31,6 +31,7 @@
#include "esp_flash_encrypt.h"
#include "esp_secure_boot.h"
#include "esp_xt_wdt.h"
#include "esp_cpu.h"
#if __has_include("esp_ota_ops.h")
#include "esp_ota_ops.h"
@@ -200,7 +201,7 @@ static void do_system_init_fn(void)
esp_system_init_fn_t *p;
int core_id = cpu_hal_get_core_id();
int core_id = esp_cpu_get_core_id();
for (p = &_esp_system_init_fn_array_start; p < &_esp_system_init_fn_array_end; ++p) {
if (p->cores & BIT(core_id)) {
ESP_LOGD(TAG, "calling init function: %p on core: %d", p->fn, core_id);

View File

@@ -11,6 +11,7 @@
#include "freertos/semphr.h"
#include "unity.h"
#include "test_utils.h"
#include "esp_cpu.h"
#include "esp_rom_sys.h"
#include "esp_ipc_isr.h"
@@ -46,7 +47,7 @@ TEST_CASE("Test ipc_isr blocking IPC function calls get_cycle_count_other_cpu",
{
int val = 0x5a5a;
esp_ipc_isr_asm_call_blocking(esp_test_ipc_isr_get_cycle_count_other_cpu, &val);
esp_rom_printf("CCOUNT CPU0 = %d\n", cpu_ll_get_cycle_count());
esp_rom_printf("CCOUNT CPU0 = %d\n", esp_cpu_get_cycle_count());
esp_rom_printf("CCOUNT CPU1 = %d\n", val);
}

View File

@@ -32,7 +32,7 @@
#include <string.h>
#include "esp_system.h"
#include "esp_rom_sys.h"
#include "hal/cpu_hal.h"
#include "esp_cpu.h"
struct source_location {
@@ -138,8 +138,8 @@ void __ubsan_handle_invalid_builtin(void *data_);
static void __ubsan_maybe_debugbreak(void)
{
if (cpu_hal_is_debugger_attached()) {
cpu_hal_break();
if (esp_cpu_dbgr_is_attached()) {
esp_cpu_dbgr_break();
}
}

View File

@@ -28,12 +28,12 @@
#include "esp_event.h"
#include "esp_heap_caps.h"
#include "esp_timer.h"
#include "esp_cpu.h"
#include "esp_private/wifi_os_adapter.h"
#include "esp_private/wifi.h"
#include "esp_phy_init.h"
#include "soc/dport_reg.h"
#include "soc/syscon_reg.h"
#include "hal/interrupt_controller_hal.h"
#include "phy_init_data.h"
#include "esp_private/periph_ctrl.h"
#include "nvs.h"
@@ -737,8 +737,8 @@ wifi_osi_funcs_t g_wifi_osi_funcs = {
._set_intr = set_intr_wrapper,
._clear_intr = clear_intr_wrapper,
._set_isr = set_isr_wrapper,
._ints_on = interrupt_controller_hal_enable_interrupts,
._ints_off = interrupt_controller_hal_disable_interrupts,
._ints_on = esp_cpu_intr_enable,
._ints_off = esp_cpu_intr_disable,
._is_from_isr = is_from_isr_wrapper,
._spin_lock_create = spin_lock_create_wrapper,
._spin_lock_delete = free,

View File

@@ -28,13 +28,13 @@
#include "esp_event.h"
#include "esp_heap_caps.h"
#include "esp_timer.h"
#include "esp_cpu.h"
#include "esp_private/wifi_os_adapter.h"
#include "esp_private/wifi.h"
#include "esp_phy_init.h"
#include "soc/dport_reg.h"
#include "soc/rtc.h"
#include "soc/syscon_reg.h"
#include "hal/interrupt_controller_hal.h"
#include "phy_init_data.h"
#include "esp_private/periph_ctrl.h"
#include "esp_private/esp_clk.h"
@@ -738,8 +738,8 @@ wifi_osi_funcs_t g_wifi_osi_funcs = {
._set_intr = set_intr_wrapper,
._clear_intr = clear_intr_wrapper,
._set_isr = set_isr_wrapper,
._ints_on = interrupt_controller_hal_enable_interrupts,
._ints_off = interrupt_controller_hal_disable_interrupts,
._ints_on = esp_cpu_intr_enable,
._ints_off = esp_cpu_intr_disable,
._is_from_isr = is_from_isr_wrapper,
._spin_lock_create = spin_lock_create_wrapper,
._spin_lock_delete = free,

View File

@@ -28,13 +28,13 @@
#include "esp_event.h"
#include "esp_heap_caps.h"
#include "esp_timer.h"
#include "esp_cpu.h"
#include "esp_private/wifi_os_adapter.h"
#include "esp_private/wifi.h"
#include "esp_phy_init.h"
#include "soc/rtc_cntl_reg.h"
#include "soc/rtc.h"
#include "soc/syscon_reg.h"
#include "hal/interrupt_controller_hal.h"
#include "phy_init_data.h"
#include "esp_private/periph_ctrl.h"
#include "esp_private/esp_clk.h"
@@ -755,8 +755,8 @@ wifi_osi_funcs_t g_wifi_osi_funcs = {
._set_intr = set_intr_wrapper,
._clear_intr = clear_intr_wrapper,
._set_isr = set_isr_wrapper,
._ints_on = interrupt_controller_hal_enable_interrupts,
._ints_off = interrupt_controller_hal_disable_interrupts,
._ints_on = esp_cpu_intr_enable,
._ints_off = esp_cpu_intr_disable,
._is_from_isr = is_from_isr_wrapper,
._spin_lock_create = spin_lock_create_wrapper,
._spin_lock_delete = free,

View File

@@ -145,10 +145,10 @@ void esp_core_dump_to_uart(panic_info_t *info)
ESP_COREDUMP_LOGI("Press Enter to print core dump to UART...");
const int cpu_ticks_per_ms = esp_clk_cpu_freq() / 1000;
tm_end = esp_cpu_get_ccount() / cpu_ticks_per_ms + CONFIG_ESP_COREDUMP_UART_DELAY;
tm_end = esp_cpu_get_cycle_count() / cpu_ticks_per_ms + CONFIG_ESP_COREDUMP_UART_DELAY;
ch = esp_core_dump_uart_get_char();
while (!(ch == '\n' || ch == '\r')) {
tm_cur = esp_cpu_get_ccount() / cpu_ticks_per_ms;
tm_cur = esp_cpu_get_cycle_count() / cpu_ticks_per_ms;
if (tm_cur >= tm_end){
break;
}

View File

@@ -21,6 +21,7 @@
#include "test_fatfs_common.h"
#include "wear_levelling.h"
#include "esp_partition.h"
#include "esp_memory_utils.h"
#if !TEMPORARY_DISABLED_FOR_TARGETS(ESP32C2)

View File

@@ -11,8 +11,9 @@
#include "spinlock.h"
#include "soc/interrupt_core0_reg.h"
#include "esp_macros.h"
#include "hal/cpu_hal.h"
#include "esp_cpu.h"
#include "esp_private/crosscore_int.h"
#include "esp_memory_utils.h"
#ifdef __cplusplus
extern "C" {
@@ -225,7 +226,7 @@ static inline void __attribute__((always_inline)) vPortYieldCore( BaseType_t xCo
static inline BaseType_t __attribute__((always_inline)) xPortGetCoreID( void )
{
return (BaseType_t) cpu_hal_get_core_id();
return (BaseType_t) esp_cpu_get_core_id();
}
/* ------------------------------------------------ IDF Compatibility --------------------------------------------------
@@ -241,42 +242,6 @@ static inline BaseType_t xPortInIsrContext(void)
// Added for backward compatibility with IDF
#define xPortInterruptedFromISRContext() xPortInIsrContext()
// ---------------------- Spinlocks ------------------------
/**
* @brief Wrapper for atomic compare-and-set instruction
*
* @note Isn't a real atomic CAS.
* @note [refactor-todo] check if we still need this
* @note [refactor-todo] Check if this function should be renamed (due to void return type)
*
* @param[inout] addr Pointer to target address
* @param[in] compare Compare value
* @param[inout] set Pointer to set value
*/
static inline void __attribute__((always_inline)) uxPortCompareSet(volatile uint32_t *addr, uint32_t compare, uint32_t *set)
{
compare_and_set_native(addr, compare, set);
}
/**
* @brief Wrapper for atomic compare-and-set instruction in external RAM
*
* @note Isn't a real atomic CAS.
* @note [refactor-todo] check if we still need this
* @note [refactor-todo] Check if this function should be renamed (due to void return type)
*
* @param[inout] addr Pointer to target address
* @param[in] compare Compare value
* @param[inout] set Pointer to set value
*/
static inline void uxPortCompareSetExtram(volatile uint32_t *addr, uint32_t compare, uint32_t *set)
{
#if defined(CONFIG_SPIRAM)
compare_and_set_extram(addr, compare, set);
#endif
}
// ------------------ Critical Sections --------------------
/*

View File

@@ -26,7 +26,7 @@
#include "FreeRTOS.h" /* This pulls in portmacro.h */
#include "task.h"
#include "portmacro.h"
#include "esp_memory_utils.h"
#ifdef CONFIG_FREERTOS_SYSTICK_USES_SYSTIMER
#include "soc/periph_defs.h"
#include "soc/system_reg.h"

View File

@@ -16,9 +16,9 @@
#include "xt_instr_macros.h"
#include "portbenchmark.h"
#include "esp_macros.h"
#include "hal/cpu_hal.h"
#include "compare_set.h" /* For compare_and_set_native(). [refactor-todo] Use esp_cpu.h instead */
#include "esp_cpu.h"
#include "esp_private/crosscore_int.h"
#include "esp_memory_utils.h"
/*
Note: We should not include any FreeRTOS headers (directly or indirectly) here as this will create a reverse dependency
@@ -253,7 +253,7 @@ static inline void __attribute__((always_inline)) vPortYieldFromISR( void )
static inline BaseType_t __attribute__((always_inline)) xPortGetCoreID( void )
{
return (BaseType_t) cpu_hal_get_core_id();
return (BaseType_t) esp_cpu_get_core_id();
}
/* ------------------------------------------------ IDF Compatibility --------------------------------------------------
@@ -285,18 +285,6 @@ static inline void vPortClearInterruptMaskFromISR(UBaseType_t prev_level)
// ---------------------- Spinlocks ------------------------
static inline void __attribute__((always_inline)) uxPortCompareSet(volatile uint32_t *addr, uint32_t compare, uint32_t *set)
{
compare_and_set_native(addr, compare, set);
}
static inline void uxPortCompareSetExtram(volatile uint32_t *addr, uint32_t compare, uint32_t *set)
{
#if defined(CONFIG_SPIRAM)
compare_and_set_extram(addr, compare, set);
#endif
}
static inline bool __attribute__((always_inline)) vPortCPUAcquireMutexTimeout(portMUX_TYPE *mux, int timeout)
{
return (spinlock_acquire(mux, timeout));

View File

@@ -28,6 +28,7 @@
#include "esp_heap_caps_init.h"
#include "esp_freertos_hooks.h"
#include "esp_intr_alloc.h"
#include "esp_memory_utils.h"
#if CONFIG_SPIRAM
/* Required by esp_psram_extram_reserve_dma_pool() */
#include "esp_psram.h"

View File

@@ -46,11 +46,11 @@
#include "soc/interrupt_core0_reg.h"
#include "esp_macros.h"
#include "esp_attr.h"
#include "esp_cpu.h"
#include "esp_rom_sys.h"
#include "esp_heap_caps.h"
#include "esp_system.h" /* required by esp_get_...() functions in portable.h. [refactor-todo] Update portable.h */
#include "esp_newlib.h"
#include "compare_set.h" /* For compare_and_set_native(). [refactor-todo] Use esp_cpu.h instead */
/* [refactor-todo] These includes are not directly used in this file. They are kept into to prevent a breaking change. Remove these. */
#include <limits.h>
@@ -182,32 +182,6 @@ typedef struct {
(mux)->count = 0; \
})
/**
* @brief Wrapper for atomic compare-and-set instruction
*
* @note Isn't a real atomic CAS.
* @note [refactor-todo] check if we still need this
* @note [refactor-todo] Check if this function should be renamed (due to void return type)
*
* @param[inout] addr Pointer to target address
* @param[in] compare Compare value
* @param[inout] set Pointer to set value
*/
static inline void __attribute__((always_inline)) uxPortCompareSet(volatile uint32_t *addr, uint32_t compare, uint32_t *set);
/**
* @brief Wrapper for atomic compare-and-set instruction in external RAM
*
* @note Isn't a real atomic CAS.
* @note [refactor-todo] check if we still need this
* @note [refactor-todo] Check if this function should be renamed (due to void return type)
*
* @param[inout] addr Pointer to target address
* @param[in] compare Compare value
* @param[inout] set Pointer to set value
*/
static inline void uxPortCompareSetExtram(volatile uint32_t *addr, uint32_t compare, uint32_t *set);
// ------------------ Critical Sections --------------------
/**
@@ -317,7 +291,7 @@ void vPortSetStackWatchpoint(void *pxStackStart);
*/
FORCE_INLINE_ATTR BaseType_t xPortGetCoreID(void)
{
return (BaseType_t) cpu_hal_get_core_id();
return (BaseType_t) esp_cpu_get_core_id();
}
@@ -434,22 +408,6 @@ FORCE_INLINE_ATTR BaseType_t xPortGetCoreID(void)
// --------------------- Interrupts ------------------------
// ---------------------- Spinlocks ------------------------
static inline void __attribute__((always_inline)) uxPortCompareSet(volatile uint32_t *addr, uint32_t compare, uint32_t *set)
{
compare_and_set_native(addr, compare, set);
}
static inline void uxPortCompareSetExtram(volatile uint32_t *addr, uint32_t compare, uint32_t *set)
{
#if defined(CONFIG_SPIRAM)
compare_and_set_extram(addr, compare, set);
#endif
}
// ---------------------- Yielding -------------------------
FORCE_INLINE_ATTR bool xPortCanYield(void)

View File

@@ -55,6 +55,7 @@
#include "task.h"
#include "portmacro.h"
#include "port_systick.h"
#include "esp_memory_utils.h"

View File

@@ -72,15 +72,15 @@
#include <xtensa/xtruntime.h> /* required for XTOS_SET_INTLEVEL. [refactor-todo] add common intr functions to esp_hw_support */
#include "xt_instr_macros.h"
#include "spinlock.h"
#include "hal/cpu_hal.h"
#include "esp_private/crosscore_int.h"
#include "esp_macros.h"
#include "esp_attr.h"
#include "esp_cpu.h"
#include "esp_memory_utils.h"
#include "esp_newlib.h" /* required for esp_reent_init() in tasks.c */
#include "esp_heap_caps.h"
#include "esp_rom_sys.h"
#include "esp_system.h" /* required by esp_get_...() functions in portable.h. [refactor-todo] Update portable.h */
#include "compare_set.h" /* For compare_and_set_native(). [refactor-todo] Use esp_cpu.h instead */
#include "portbenchmark.h"
/* [refactor-todo] These includes are not directly used in this file. They are kept into to prevent a breaking change. Remove these. */
@@ -405,38 +405,6 @@ void vPortSetStackWatchpoint( void *pxStackStart );
*/
FORCE_INLINE_ATTR BaseType_t xPortGetCoreID(void);
/**
* @brief Wrapper for atomic compare-and-set instruction
*
* This subroutine will atomically compare *addr to 'compare'. If *addr == compare, *addr is set to *set. *set is
* updated with the previous value of *addr (either 'compare' or some other value.)
*
* @warning From the ISA docs: in some (unspecified) cases, the s32c1i instruction may return the "bitwise inverse" of
* the old mem if the mem wasn't written. This doesn't seem to happen on the ESP32 (portMUX assertions would
* fail).
*
* @note [refactor-todo] Check if this can be deprecated
* @note [refactor-todo] Check if this function should be renamed (due to void return type)
*
* @param[inout] addr Pointer to target address
* @param[in] compare Compare value
* @param[inout] set Pointer to set value
*/
static inline void __attribute__((always_inline)) uxPortCompareSet(volatile uint32_t *addr, uint32_t compare, uint32_t *set);
/**
* @brief Wrapper for atomic compare-and-set instruction in external RAM
*
* Atomic compare-and-set but the target address is placed in external RAM
*
* @note [refactor-todo] Check if this can be deprecated
*
* @param[inout] addr Pointer to target address
* @param[in] compare Compare value
* @param[inout] set Pointer to set value
*/
static inline void __attribute__((always_inline)) uxPortCompareSetExtram(volatile uint32_t *addr, uint32_t compare, uint32_t *set);
/* ------------------------------------------- FreeRTOS Porting Interface ----------------------------------------------
@@ -658,19 +626,7 @@ FORCE_INLINE_ATTR bool xPortCanYield(void)
FORCE_INLINE_ATTR BaseType_t xPortGetCoreID(void)
{
return (BaseType_t) cpu_hal_get_core_id();
}
static inline void __attribute__((always_inline)) uxPortCompareSet(volatile uint32_t *addr, uint32_t compare, uint32_t *set)
{
compare_and_set_native(addr, compare, set);
}
static inline void __attribute__((always_inline)) uxPortCompareSetExtram(volatile uint32_t *addr, uint32_t compare, uint32_t *set)
{
#ifdef CONFIG_SPIRAM
compare_and_set_extram(addr, compare, set);
#endif
return (BaseType_t) esp_cpu_get_core_id();
}

Some files were not shown because too many files have changed in this diff Show More