components/bt: using high level interrupt in lc

This commit is contained in:
baohongde
2020-04-17 23:16:59 +08:00
parent f490b4ddfe
commit a172605af4
11 changed files with 306 additions and 392 deletions

View File

@@ -1,6 +1,7 @@
if(CONFIG_BT_ENABLED) if(CONFIG_BT_ENABLED)
set(COMPONENT_SRCS "bt.c") set(COMPONENT_SRCS "bt.c"
"hli_api.c")
set(COMPONENT_ADD_INCLUDEDIRS include) set(COMPONENT_ADD_INCLUDEDIRS include)
if(CONFIG_BLUEDROID_ENABLED) if(CONFIG_BLUEDROID_ENABLED)
@@ -294,4 +295,6 @@ if(CONFIG_BT_ENABLED)
target_link_libraries(${COMPONENT_TARGET} "-L${CMAKE_CURRENT_LIST_DIR}/lib") target_link_libraries(${COMPONENT_TARGET} "-L${CMAKE_CURRENT_LIST_DIR}/lib")
target_link_libraries(${COMPONENT_TARGET} btdm_app) target_link_libraries(${COMPONENT_TARGET} btdm_app)
target_link_libraries(${COMPONENT_TARGET} "-u ld_include_hli_vectors_bt")
endif() endif()

View File

@@ -44,10 +44,10 @@
#include "soc/soc_memory_layout.h" #include "soc/soc_memory_layout.h"
#include "esp_clk.h" #include "esp_clk.h"
#include "esp_coexist_internal.h" #include "esp_coexist_internal.h"
#include "hli_api.h"
#if CONFIG_BT_ENABLED #if CONFIG_BT_ENABLED
#define CONFIG_BT_HLIGH_LEVEL_INT
/* Macro definition /* Macro definition
************************************************************************ ************************************************************************
*/ */
@@ -90,14 +90,9 @@ do{\
} while(0) } while(0)
#define OSI_FUNCS_TIME_BLOCKING 0xffffffff #define OSI_FUNCS_TIME_BLOCKING 0xffffffff
#define OSI_VERSION 0x00010002 #define OSI_VERSION 0x00010003
#define OSI_MAGIC_VALUE 0xFADEBEAD #define OSI_MAGIC_VALUE 0xFADEBEAD
/* SPIRAM Configuration */
#if CONFIG_SPIRAM_USE_MALLOC
#define BTDM_MAX_QUEUE_NUM (5)
#endif
/* Types definition /* Types definition
************************************************************************ ************************************************************************
*/ */
@@ -115,15 +110,6 @@ typedef struct {
intptr_t end; intptr_t end;
} btdm_dram_available_region_t; } btdm_dram_available_region_t;
/* PSRAM configuration */
#if CONFIG_SPIRAM_USE_MALLOC
typedef struct {
QueueHandle_t handle;
void *storage;
void *buffer;
} btdm_queue_item_t;
#endif
/* OSI function */ /* OSI function */
struct osi_funcs_t { struct osi_funcs_t {
uint32_t _version; uint32_t _version;
@@ -143,7 +129,7 @@ struct osi_funcs_t {
void (*_mutex_delete)(void *mutex); void (*_mutex_delete)(void *mutex);
int32_t (*_mutex_lock)(void *mutex); int32_t (*_mutex_lock)(void *mutex);
int32_t (*_mutex_unlock)(void *mutex); int32_t (*_mutex_unlock)(void *mutex);
void *(* _queue_create)(uint32_t queue_len, uint32_t item_size); void *(* _queue_create)(uint32_t queue_len, uint32_t item_size, int flag);
void (* _queue_delete)(void *queue); void (* _queue_delete)(void *queue);
int32_t (* _queue_send)(void *queue, void *item, uint32_t block_time_ms); int32_t (* _queue_send)(void *queue, void *item, uint32_t block_time_ms);
int32_t (* _queue_send_from_isr)(void *queue, void *item, void *hptw); int32_t (* _queue_send_from_isr)(void *queue, void *item, void *hptw);
@@ -241,12 +227,10 @@ extern uint32_t _btdm_data_end;
/* Local Function Declare /* Local Function Declare
********************************************************************* *********************************************************************
*/ */
#if CONFIG_SPIRAM_USE_MALLOC static xt_handler set_isr_hlevel_wrapper(int n, xt_handler f, void *arg);
static bool btdm_queue_generic_register(const btdm_queue_item_t *queue); static void IRAM_ATTR interrupt_hlevel_disable(void);
static bool btdm_queue_generic_deregister(btdm_queue_item_t *queue); static void IRAM_ATTR interrupt_hlevel_restore(void);
#endif /* CONFIG_SPIRAM_USE_MALLOC */ static void IRAM_ATTR task_yield(void);
static void IRAM_ATTR interrupt_disable(void);
static void IRAM_ATTR interrupt_restore(void);
static void IRAM_ATTR task_yield_from_isr(void); static void IRAM_ATTR task_yield_from_isr(void);
static void *semphr_create_wrapper(uint32_t max, uint32_t init); static void *semphr_create_wrapper(uint32_t max, uint32_t init);
static void semphr_delete_wrapper(void *semphr); static void semphr_delete_wrapper(void *semphr);
@@ -258,12 +242,12 @@ static void *mutex_create_wrapper(void);
static void mutex_delete_wrapper(void *mutex); static void mutex_delete_wrapper(void *mutex);
static int32_t mutex_lock_wrapper(void *mutex); static int32_t mutex_lock_wrapper(void *mutex);
static int32_t mutex_unlock_wrapper(void *mutex); static int32_t mutex_unlock_wrapper(void *mutex);
static void *queue_create_wrapper(uint32_t queue_len, uint32_t item_size); static void *queue_create_hlevel_wrapper(uint32_t queue_len, uint32_t item_size, int flag);
static void queue_delete_wrapper(void *queue); static void queue_delete_hlevel_wrapper(void *queue);
static int32_t queue_send_wrapper(void *queue, void *item, uint32_t block_time_ms); static int32_t IRAM_ATTR queue_send_hlevel_wrapper(void *queue, void *item, uint32_t block_time_ms);
static int32_t IRAM_ATTR queue_send_from_isr_wrapper(void *queue, void *item, void *hptw); static int32_t IRAM_ATTR queue_send_from_isr_hlevel_wrapper(void *queue, void *item, void *hptw);
static int32_t queue_recv_wrapper(void *queue, void *item, uint32_t block_time_ms); static int32_t IRAM_ATTR queue_recv_hlevel_wrapper(void *queue, void *item, uint32_t block_time_ms);
static int32_t IRAM_ATTR queue_recv_from_isr_wrapper(void *queue, void *item, void *hptw); static int32_t IRAM_ATTR queue_recv_from_isr_hlevel_wrapper(void *queue, void *item, void *hptw);
static int32_t task_create_wrapper(void *task_func, const char *name, uint32_t stack_depth, void *param, uint32_t prio, void *task_handle, uint32_t core_id); static int32_t task_create_wrapper(void *task_func, const char *name, uint32_t stack_depth, void *param, uint32_t prio, void *task_handle, uint32_t core_id);
static void task_delete_wrapper(void *task_handle); static void task_delete_wrapper(void *task_handle);
static bool IRAM_ATTR is_in_isr_wrapper(void); static bool IRAM_ATTR is_in_isr_wrapper(void);
@@ -289,11 +273,11 @@ static void coex_bt_wakeup_request_end(void);
/* OSI funcs */ /* OSI funcs */
static const struct osi_funcs_t osi_funcs_ro = { static const struct osi_funcs_t osi_funcs_ro = {
._version = OSI_VERSION, ._version = OSI_VERSION,
._set_isr = xt_set_interrupt_handler, ._set_isr = set_isr_hlevel_wrapper,
._ints_on = xt_ints_on, ._ints_on = xt_ints_on,
._interrupt_disable = interrupt_disable, ._interrupt_disable = interrupt_hlevel_disable,
._interrupt_restore = interrupt_restore, ._interrupt_restore = interrupt_hlevel_restore,
._task_yield = vPortYield, ._task_yield = task_yield,
._task_yield_from_isr = task_yield_from_isr, ._task_yield_from_isr = task_yield_from_isr,
._semphr_create = semphr_create_wrapper, ._semphr_create = semphr_create_wrapper,
._semphr_delete = semphr_delete_wrapper, ._semphr_delete = semphr_delete_wrapper,
@@ -305,12 +289,12 @@ static const struct osi_funcs_t osi_funcs_ro = {
._mutex_delete = mutex_delete_wrapper, ._mutex_delete = mutex_delete_wrapper,
._mutex_lock = mutex_lock_wrapper, ._mutex_lock = mutex_lock_wrapper,
._mutex_unlock = mutex_unlock_wrapper, ._mutex_unlock = mutex_unlock_wrapper,
._queue_create = queue_create_wrapper, ._queue_create = queue_create_hlevel_wrapper,
._queue_delete = queue_delete_wrapper, ._queue_delete = queue_delete_hlevel_wrapper,
._queue_send = queue_send_wrapper, ._queue_send = queue_send_hlevel_wrapper,
._queue_send_from_isr = queue_send_from_isr_wrapper, ._queue_send_from_isr = queue_send_from_isr_hlevel_wrapper,
._queue_recv = queue_recv_wrapper, ._queue_recv = queue_recv_hlevel_wrapper,
._queue_recv_from_isr = queue_recv_from_isr_wrapper, ._queue_recv_from_isr = queue_recv_from_isr_hlevel_wrapper,
._task_create = task_create_wrapper, ._task_create = task_create_wrapper,
._task_delete = task_delete_wrapper, ._task_delete = task_delete_wrapper,
._is_in_isr = is_in_isr_wrapper, ._is_in_isr = is_in_isr_wrapper,
@@ -362,11 +346,6 @@ SOC_RESERVE_MEMORY_REGION(SOC_MEM_BT_DATA_START, SOC_MEM_BT_DATA_END,
static DRAM_ATTR struct osi_funcs_t *osi_funcs_p; static DRAM_ATTR struct osi_funcs_t *osi_funcs_p;
#if CONFIG_SPIRAM_USE_MALLOC
static DRAM_ATTR btdm_queue_item_t btdm_queue_table[BTDM_MAX_QUEUE_NUM];
static DRAM_ATTR SemaphoreHandle_t btdm_queue_table_mux = NULL;
#endif /* #if CONFIG_SPIRAM_USE_MALLOC */
/* Static variable declare */ /* Static variable declare */
// timestamp when PHY/RF was switched on // timestamp when PHY/RF was switched on
static DRAM_ATTR int64_t s_time_phy_rf_just_enabled = 0; static DRAM_ATTR int64_t s_time_phy_rf_just_enabled = 0;
@@ -397,70 +376,50 @@ static inline void btdm_check_and_init_bb(void)
} }
} }
#if CONFIG_SPIRAM_USE_MALLOC struct interrupt_hlevel_cb{
static bool btdm_queue_generic_register(const btdm_queue_item_t *queue) uint32_t status;
uint8_t nested;
};
struct interrupt_hlevel_cb hli_cb = {
.status = 0,
.nested = 0,
};
static xt_handler set_isr_hlevel_wrapper(int mask, xt_handler f, void *arg)
{ {
if (!btdm_queue_table_mux || !queue) { esp_err_t err = hli_intr_register((intr_handler_t) f, arg, DPORT_PRO_INTR_STATUS_0_REG, mask);
return NULL; if (err == ESP_OK) {
} return f;
bool ret = false;
btdm_queue_item_t *item;
xSemaphoreTake(btdm_queue_table_mux, portMAX_DELAY);
for (int i = 0; i < BTDM_MAX_QUEUE_NUM; ++i) {
item = &btdm_queue_table[i];
if (item->handle == NULL) {
memcpy(item, queue, sizeof(btdm_queue_item_t));
ret = true;
break;
}
}
xSemaphoreGive(btdm_queue_table_mux);
return ret;
}
static bool btdm_queue_generic_deregister(btdm_queue_item_t *queue)
{
if (!btdm_queue_table_mux || !queue) {
return false;
}
bool ret = false;
btdm_queue_item_t *item;
xSemaphoreTake(btdm_queue_table_mux, portMAX_DELAY);
for (int i = 0; i < BTDM_MAX_QUEUE_NUM; ++i) {
item = &btdm_queue_table[i];
if (item->handle == queue->handle) {
memcpy(queue, item, sizeof(btdm_queue_item_t));
memset(item, 0, sizeof(btdm_queue_item_t));
ret = true;
break;
}
}
xSemaphoreGive(btdm_queue_table_mux);
return ret;
}
#endif /* CONFIG_SPIRAM_USE_MALLOC */
static void IRAM_ATTR interrupt_disable(void)
{
if (xPortInIsrContext()) {
portENTER_CRITICAL_ISR(&global_int_mux);
} else { } else {
portENTER_CRITICAL(&global_int_mux); return 0;
} }
} }
static void IRAM_ATTR interrupt_restore(void) static void IRAM_ATTR interrupt_hlevel_disable(void)
{ {
if (xPortInIsrContext()) { assert(xPortGetCoreID() == CONFIG_BTDM_CONTROLLER_PINNED_TO_CORE);
portEXIT_CRITICAL_ISR(&global_int_mux); uint32_t status = hli_intr_disable();
} else { if (hli_cb.nested++ == 0) {
portEXIT_CRITICAL(&global_int_mux); hli_cb.status = status;
} }
} }
static void IRAM_ATTR interrupt_hlevel_restore(void)
{
assert(xPortGetCoreID() == CONFIG_BTDM_CONTROLLER_PINNED_TO_CORE);
assert(hli_cb.nested > 0);
if (--hli_cb.nested == 0) {
hli_intr_restore(hli_cb.status);
}
}
static void IRAM_ATTR task_yield(void)
{
vPortYield();
}
static void IRAM_ATTR task_yield_from_isr(void) static void IRAM_ATTR task_yield_from_isr(void)
{ {
portYIELD_FROM_ISR(); portYIELD_FROM_ISR();
@@ -468,148 +427,62 @@ static void IRAM_ATTR task_yield_from_isr(void)
static void *semphr_create_wrapper(uint32_t max, uint32_t init) static void *semphr_create_wrapper(uint32_t max, uint32_t init)
{ {
#if !CONFIG_SPIRAM_USE_MALLOC SemaphoreHandle_t downstream_semaphore = xSemaphoreCreateCounting(max, init);
return (void *)xSemaphoreCreateCounting(max, init); assert(downstream_semaphore);
#else hli_queue_handle_t s_semaphore = hli_semaphore_create(max, downstream_semaphore);
StaticQueue_t *queue_buffer = NULL; assert(downstream_semaphore);
QueueHandle_t handle = NULL;
queue_buffer = heap_caps_malloc(sizeof(StaticQueue_t), MALLOC_CAP_INTERNAL|MALLOC_CAP_8BIT); return s_semaphore;
if (!queue_buffer) {
goto error;
}
handle = xSemaphoreCreateCountingStatic(max, init, queue_buffer);
if (!handle) {
goto error;
}
btdm_queue_item_t item = {
.handle = handle,
.storage = NULL,
.buffer = queue_buffer,
};
if (!btdm_queue_generic_register(&item)) {
goto error;
}
return handle;
error:
if (handle) {
vSemaphoreDelete(handle);
}
if (queue_buffer) {
free(queue_buffer);
}
return NULL;
#endif
} }
static void semphr_delete_wrapper(void *semphr) static void semphr_delete_wrapper(void *semphr)
{ {
#if !CONFIG_SPIRAM_USE_MALLOC if (((hli_queue_handle_t)semphr)->downstream != NULL) {
vSemaphoreDelete(semphr); vSemaphoreDelete(((hli_queue_handle_t)semphr)->downstream);
#else
btdm_queue_item_t item = {
.handle = semphr,
.storage = NULL,
.buffer = NULL,
};
if (btdm_queue_generic_deregister(&item)) {
vSemaphoreDelete(item.handle);
free(item.buffer);
} }
return; hli_queue_delete(semphr);
#endif
} }
static int32_t IRAM_ATTR semphr_take_from_isr_wrapper(void *semphr, void *hptw) static int32_t IRAM_ATTR semphr_take_from_isr_wrapper(void *semphr, void *hptw)
{ {
return (int32_t)xSemaphoreTakeFromISR(semphr, hptw); return (int32_t)xSemaphoreTakeFromISR(((hli_queue_handle_t)semphr)->downstream, hptw);
} }
static int32_t IRAM_ATTR semphr_give_from_isr_wrapper(void *semphr, void *hptw) static int32_t IRAM_ATTR semphr_give_from_isr_wrapper(void *semphr, void *hptw)
{ {
return (int32_t)xSemaphoreGiveFromISR(semphr, hptw); if (hptw != NULL) {
*(uint32_t *)hptw = 0;
}
bool ret = hli_semaphore_give(semphr);
return ret;
} }
static int32_t semphr_take_wrapper(void *semphr, uint32_t block_time_ms) static int32_t semphr_take_wrapper(void *semphr, uint32_t block_time_ms)
{ {
bool ret;
if (block_time_ms == OSI_FUNCS_TIME_BLOCKING) { if (block_time_ms == OSI_FUNCS_TIME_BLOCKING) {
return (int32_t)xSemaphoreTake(semphr, portMAX_DELAY); ret = xSemaphoreTake(((hli_queue_handle_t)semphr)->downstream, portMAX_DELAY);
} else { } else {
return (int32_t)xSemaphoreTake(semphr, block_time_ms / portTICK_PERIOD_MS); ret = xSemaphoreTake(((hli_queue_handle_t)semphr)->downstream, block_time_ms / portTICK_PERIOD_MS);
} }
return (int32_t)ret;
} }
static int32_t semphr_give_wrapper(void *semphr) static int32_t semphr_give_wrapper(void *semphr)
{ {
return (int32_t)xSemaphoreGive(semphr); return (int32_t)xSemaphoreGive(((hli_queue_handle_t)semphr)->downstream);
} }
static void *mutex_create_wrapper(void) static void *mutex_create_wrapper(void)
{ {
#if CONFIG_SPIRAM_USE_MALLOC
StaticQueue_t *queue_buffer = NULL;
QueueHandle_t handle = NULL;
queue_buffer = heap_caps_malloc(sizeof(StaticQueue_t), MALLOC_CAP_INTERNAL|MALLOC_CAP_8BIT);
if (!queue_buffer) {
goto error;
}
handle = xSemaphoreCreateMutexStatic(queue_buffer);
if (!handle) {
goto error;
}
btdm_queue_item_t item = {
.handle = handle,
.storage = NULL,
.buffer = queue_buffer,
};
if (!btdm_queue_generic_register(&item)) {
goto error;
}
return handle;
error:
if (handle) {
vSemaphoreDelete(handle);
}
if (queue_buffer) {
free(queue_buffer);
}
return NULL;
#else
return (void *)xSemaphoreCreateMutex(); return (void *)xSemaphoreCreateMutex();
#endif
} }
static void mutex_delete_wrapper(void *mutex) static void mutex_delete_wrapper(void *mutex)
{ {
#if !CONFIG_SPIRAM_USE_MALLOC
vSemaphoreDelete(mutex); vSemaphoreDelete(mutex);
#else
btdm_queue_item_t item = {
.handle = mutex,
.storage = NULL,
.buffer = NULL,
};
if (btdm_queue_generic_deregister(&item)) {
vSemaphoreDelete(item.handle);
free(item.buffer);
}
return;
#endif
} }
static int32_t mutex_lock_wrapper(void *mutex) static int32_t mutex_lock_wrapper(void *mutex)
@@ -622,104 +495,66 @@ static int32_t mutex_unlock_wrapper(void *mutex)
return (int32_t)xSemaphoreGive(mutex); return (int32_t)xSemaphoreGive(mutex);
} }
static void *queue_create_wrapper(uint32_t queue_len, uint32_t item_size) static void *queue_create_hlevel_wrapper(uint32_t queue_len, uint32_t item_size, int flag)
{ {
#if CONFIG_SPIRAM_USE_MALLOC QueueHandle_t downstream_queue = xQueueCreate(queue_len, item_size);
StaticQueue_t *queue_buffer = NULL; assert(downstream_queue);
uint8_t *queue_storage = NULL; hli_queue_handle_t queue;
QueueHandle_t handle = NULL; /**
* TODO: Should use macro here!
queue_buffer = heap_caps_malloc(sizeof(StaticQueue_t), MALLOC_CAP_INTERNAL|MALLOC_CAP_8BIT); */
if (!queue_buffer) { if (flag == 0) {
goto error; queue = hli_queue_create(queue_len, item_size, downstream_queue);
} else if (flag == 1) {
queue = hli_customer_queue_create(queue_len, item_size, downstream_queue);
} else {
assert(0);
} }
assert(queue);
queue_storage = heap_caps_malloc((queue_len*item_size), MALLOC_CAP_INTERNAL|MALLOC_CAP_8BIT); return queue;
if (!queue_storage ) {
goto error;
}
handle = xQueueCreateStatic(queue_len, item_size, queue_storage, queue_buffer);
if (!handle) {
goto error;
}
btdm_queue_item_t item = {
.handle = handle,
.storage = queue_storage,
.buffer = queue_buffer,
};
if (!btdm_queue_generic_register(&item)) {
goto error;
}
return handle;
error:
if (handle) {
vQueueDelete(handle);
}
if (queue_storage) {
free(queue_storage);
}
if (queue_buffer) {
free(queue_buffer);
}
return NULL;
#else
return (void *)xQueueCreate(queue_len, item_size);
#endif
} }
static void queue_delete_wrapper(void *queue) static void queue_delete_hlevel_wrapper(void *queue)
{ {
#if !CONFIG_SPIRAM_USE_MALLOC if (((hli_queue_handle_t)queue)->downstream != NULL) {
vQueueDelete(queue); vQueueDelete(((hli_queue_handle_t)queue)->downstream);
#else
btdm_queue_item_t item = {
.handle = queue,
.storage = NULL,
.buffer = NULL,
};
if (btdm_queue_generic_deregister(&item)) {
vQueueDelete(item.handle);
free(item.storage);
free(item.buffer);
} }
return; hli_queue_delete(queue);
#endif
} }
static int32_t queue_send_wrapper(void *queue, void *item, uint32_t block_time_ms) static int32_t queue_send_hlevel_wrapper(void *queue, void *item, uint32_t block_time_ms)
{ {
if (block_time_ms == OSI_FUNCS_TIME_BLOCKING) { if (block_time_ms == OSI_FUNCS_TIME_BLOCKING) {
return (int32_t)xQueueSend(queue, item, portMAX_DELAY); return (int32_t)xQueueSend(((hli_queue_handle_t)queue)->downstream, item, portMAX_DELAY);
} else { } else {
return (int32_t)xQueueSend(queue, item, block_time_ms / portTICK_PERIOD_MS); return (int32_t)xQueueSend(((hli_queue_handle_t)queue)->downstream, item, block_time_ms / portTICK_PERIOD_MS);
} }
} }
static int32_t IRAM_ATTR queue_send_from_isr_wrapper(void *queue, void *item, void *hptw) static int32_t IRAM_ATTR queue_send_from_isr_hlevel_wrapper(void *queue, void *item, void *hptw)
{ {
return (int32_t)xQueueSendFromISR(queue, item, hptw); if (hptw != NULL) {
*(uint32_t *)hptw = 0;
}
return hli_queue_put(queue, item);
} }
static int32_t queue_recv_wrapper(void *queue, void *item, uint32_t block_time_ms) static int32_t queue_recv_hlevel_wrapper(void *queue, void *item, uint32_t block_time_ms)
{ {
bool ret;
if (block_time_ms == OSI_FUNCS_TIME_BLOCKING) { if (block_time_ms == OSI_FUNCS_TIME_BLOCKING) {
return (int32_t)xQueueReceive(queue, item, portMAX_DELAY); ret = (int32_t)xQueueReceive(((hli_queue_handle_t)queue)->downstream, item, portMAX_DELAY);
} else { } else {
return (int32_t)xQueueReceive(queue, item, block_time_ms / portTICK_PERIOD_MS); ret =(int32_t)xQueueReceive(((hli_queue_handle_t)queue)->downstream, item, block_time_ms / portTICK_PERIOD_MS);
} }
return ret;
} }
static int32_t IRAM_ATTR queue_recv_from_isr_wrapper(void *queue, void *item, void *hptw) static int32_t IRAM_ATTR queue_recv_from_isr_hlevel_wrapper(void *queue, void *item, void *hptw)
{ {
return (int32_t)xQueueReceiveFromISR(queue, item, hptw); return (int32_t)xQueueReceiveFromISR(((hli_queue_handle_t)queue)->downstream, item, hptw);
} }
static int32_t task_create_wrapper(void *task_func, const char *name, uint32_t stack_depth, void *param, uint32_t prio, void *task_handle, uint32_t core_id) static int32_t task_create_wrapper(void *task_func, const char *name, uint32_t stack_depth, void *param, uint32_t prio, void *task_handle, uint32_t core_id)
@@ -1155,14 +990,6 @@ esp_err_t esp_bt_controller_init(esp_bt_controller_config_t *cfg)
ESP_LOGI(BTDM_LOG_TAG, "BT controller compile version [%s]", btdm_controller_get_compile_version()); ESP_LOGI(BTDM_LOG_TAG, "BT controller compile version [%s]", btdm_controller_get_compile_version());
#if CONFIG_SPIRAM_USE_MALLOC
btdm_queue_table_mux = xSemaphoreCreateMutex();
if (btdm_queue_table_mux == NULL) {
return ESP_ERR_NO_MEM;
}
memset(btdm_queue_table, 0, sizeof(btdm_queue_item_t) * BTDM_MAX_QUEUE_NUM);
#endif
#ifdef CONFIG_PM_ENABLE #ifdef CONFIG_PM_ENABLE
if ((err = esp_pm_lock_create(ESP_PM_NO_LIGHT_SLEEP, 0, "btLS", &s_light_sleep_pm_lock)) != ESP_OK) { if ((err = esp_pm_lock_create(ESP_PM_NO_LIGHT_SLEEP, 0, "btLS", &s_light_sleep_pm_lock)) != ESP_OK) {
goto error; goto error;
@@ -1277,12 +1104,6 @@ esp_err_t esp_bt_controller_deinit(void)
s_pm_lock_sem = NULL; s_pm_lock_sem = NULL;
#endif #endif
#if CONFIG_SPIRAM_USE_MALLOC
vSemaphoreDelete(btdm_queue_table_mux);
btdm_queue_table_mux = NULL;
memset(btdm_queue_table, 0, sizeof(btdm_queue_item_t) * BTDM_MAX_QUEUE_NUM);
#endif
free(osi_funcs_p); free(osi_funcs_p);
osi_funcs_p = NULL; osi_funcs_p = NULL;
@@ -1524,4 +1345,21 @@ esp_err_t esp_ble_scan_dupilcate_list_flush(void)
return ESP_OK; return ESP_OK;
} }
#endif /* CONFIG_BT_ENABLED */ void IRAM_ATTR interrupt_disable_l3(void)
{
if (xPortInIsrContext()) {
portENTER_CRITICAL_ISR(&global_int_mux);
} else {
portENTER_CRITICAL(&global_int_mux);
}
}
void IRAM_ATTR interrupt_restore_l3(void)
{
if (xPortInIsrContext()) {
portEXIT_CRITICAL_ISR(&global_int_mux);
} else {
portEXIT_CRITICAL(&global_int_mux);
}
}
#endif /* CONFIG_BT_ENABLED */

View File

@@ -10,6 +10,7 @@ COMPONENT_ADD_INCLUDEDIRS := include
LIBS := btdm_app LIBS := btdm_app
COMPONENT_ADD_LDFLAGS := -lbt -L $(COMPONENT_PATH)/lib \ COMPONENT_ADD_LDFLAGS := -lbt -L $(COMPONENT_PATH)/lib \
-u ld_include_hli_vectors_bt \
$(addprefix -l,$(LIBS)) $(addprefix -l,$(LIBS))
# re-link program if BT binary libs change # re-link program if BT binary libs change

View File

@@ -20,8 +20,34 @@ typedef struct {
uint32_t intr_mask; uint32_t intr_mask;
} hli_handler_info_t; } hli_handler_info_t;
typedef struct {
#define CUSTOMER_TYPE_REQUEST (0)
#define CUSTOMER_TYPE_RELEASE (1)
struct {
uint32_t cb_type;
union {
int (* request)(uint32_t, uint32_t, uint32_t);
int (* release)(uint32_t);
} cb;
} customer_cb;
uint32_t arg0, arg1, arg2;
} customer_swisr_t;
static void IRAM_ATTR customer_swisr_handle(customer_swisr_t *cus_swisr)
{
if (cus_swisr->customer_cb.cb_type == CUSTOMER_TYPE_REQUEST) {
if (cus_swisr->customer_cb.cb.request != NULL) {
cus_swisr->customer_cb.cb.request(cus_swisr->arg0, cus_swisr->arg1, cus_swisr->arg2);
}
} else if(cus_swisr->customer_cb.cb_type == CUSTOMER_TYPE_RELEASE) {
if (cus_swisr->customer_cb.cb.release != NULL) {
cus_swisr->customer_cb.cb.release(cus_swisr->arg0);
}
}
}
static hli_handler_info_t s_hli_handlers[HLI_MAX_HANDLERS]; static hli_handler_info_t s_hli_handlers[HLI_MAX_HANDLERS];
static const char* TAG = "hli_queue"; // static const char* TAG = "hli_queue";
esp_err_t hli_intr_register(intr_handler_t handler, void* arg, uint32_t intr_reg, uint32_t intr_mask) esp_err_t hli_intr_register(intr_handler_t handler, void* arg, uint32_t intr_reg, uint32_t intr_mask)
{ {
@@ -65,18 +91,18 @@ void IRAM_ATTR hli_c_handler(void)
} }
} }
if (!handled) { if (!handled) {
ets_printf(DRAM_STR("hli_c_handler: no handler found!\n")); // ets_printf(DRAM_STR("hli_c_handler: no handler found!\n"));
abort(); // abort();
} }
} }
uint32_t hli_intr_disable(void) uint32_t IRAM_ATTR hli_intr_disable(void)
{ {
// disable level 4 and below // disable level 4 and below
return XTOS_SET_INTLEVEL(XCHAL_DEBUGLEVEL - 2); return XTOS_SET_INTLEVEL(XCHAL_DEBUGLEVEL - 2);
} }
void hli_intr_restore(uint32_t state) void IRAM_ATTR hli_intr_restore(uint32_t state)
{ {
XTOS_RESTORE_JUST_INTLEVEL(state); XTOS_RESTORE_JUST_INTLEVEL(state);
} }
@@ -86,8 +112,10 @@ void hli_intr_restore(uint32_t state)
#define HLI_QUEUE_SW_INT_NUM 29 #define HLI_QUEUE_SW_INT_NUM 29
#define HLI_QUEUE_FLAG_SEMAPHORE BIT(0) #define HLI_QUEUE_FLAG_SEMAPHORE BIT(0)
#define HLI_QUEUE_FLAG_CUSTOMER BIT(1)
struct hli_queue_t *s_meta_queue_ptr; struct hli_queue_t *s_meta_queue_ptr = NULL;
intr_handle_t ret_handle;
static inline char* IRAM_ATTR wrap_ptr(hli_queue_handle_t queue, char *ptr) static inline char* IRAM_ATTR wrap_ptr(hli_queue_handle_t queue, char *ptr)
{ {
@@ -109,17 +137,20 @@ static void IRAM_ATTR queue_isr_handler(void* arg)
int do_yield = pdFALSE; int do_yield = pdFALSE;
XTHAL_SET_INTCLEAR(BIT(HLI_QUEUE_SW_INT_NUM)); XTHAL_SET_INTCLEAR(BIT(HLI_QUEUE_SW_INT_NUM));
hli_queue_handle_t queue; hli_queue_handle_t queue;
while (hli_queue_get(s_meta_queue_ptr, &queue)) { while (hli_queue_get(s_meta_queue_ptr, &queue)) {
static char scratch[HLI_QUEUE_MAX_ELEM_SIZE]; static char scratch[HLI_QUEUE_MAX_ELEM_SIZE];
while (hli_queue_get(queue, scratch)) { while (hli_queue_get(queue, scratch)) {
int res = pdPASS; int res = pdPASS;
if ((queue->flags & HLI_QUEUE_FLAG_SEMAPHORE) != 0) { if ((queue->flags & HLI_QUEUE_FLAG_CUSTOMER) != 0) {
customer_swisr_handle((customer_swisr_t *)scratch);
} else if ((queue->flags & HLI_QUEUE_FLAG_SEMAPHORE) != 0) {
res = xSemaphoreGiveFromISR((SemaphoreHandle_t) queue->downstream, &do_yield); res = xSemaphoreGiveFromISR((SemaphoreHandle_t) queue->downstream, &do_yield);
} else { } else {
res = xQueueSendFromISR(queue->downstream, scratch, &do_yield); res = xQueueSendFromISR(queue->downstream, scratch, &do_yield);
} }
if (res == pdFAIL) { if (res == pdFAIL) {
ESP_EARLY_LOGE(TAG, "Failed to send to %s %p", (queue->flags & HLI_QUEUE_FLAG_SEMAPHORE) == 0 ? "queue" : "semaphore", queue->downstream); // ESP_EARLY_LOGE(TAG, "Failed to send to %s %p", (queue->flags & HLI_QUEUE_FLAG_SEMAPHORE) == 0 ? "queue" : "semaphore", queue->downstream);
} }
} }
} }
@@ -167,9 +198,21 @@ static void queue_init(hli_queue_handle_t queue, size_t buf_size, size_t elem_si
void hli_queue_setup(void) void hli_queue_setup(void)
{ {
s_meta_queue_ptr = hli_queue_create(HLI_META_QUEUE_SIZE, sizeof(void*), NULL); if (s_meta_queue_ptr == NULL) {
ESP_ERROR_CHECK(esp_intr_alloc(ETS_INTERNAL_SW1_INTR_SOURCE, ESP_INTR_FLAG_IRAM, queue_isr_handler, NULL, NULL)); s_meta_queue_ptr = hli_queue_create(HLI_META_QUEUE_SIZE, sizeof(void*), NULL);
xt_ints_on(BIT(HLI_QUEUE_SW_INT_NUM)); ESP_ERROR_CHECK(esp_intr_alloc(ETS_INTERNAL_SW1_INTR_SOURCE, ESP_INTR_FLAG_IRAM, queue_isr_handler, NULL, &ret_handle));
xt_ints_on(BIT(HLI_QUEUE_SW_INT_NUM));
}
}
void hli_queue_shutdown(void)
{
if (s_meta_queue_ptr != NULL) {
hli_queue_delete(s_meta_queue_ptr);
s_meta_queue_ptr = NULL;
esp_intr_free(ret_handle);
xt_ints_off(BIT(HLI_QUEUE_SW_INT_NUM));
}
} }
hli_queue_handle_t hli_queue_create(size_t nelem, size_t elem_size, QueueHandle_t downstream) hli_queue_handle_t hli_queue_create(size_t nelem, size_t elem_size, QueueHandle_t downstream)
@@ -188,6 +231,16 @@ hli_queue_handle_t hli_queue_create(size_t nelem, size_t elem_size, QueueHandle_
return res; return res;
} }
hli_queue_handle_t hli_customer_queue_create(size_t nelem, size_t elem_size, QueueHandle_t downstream)
{
hli_queue_handle_t res = hli_queue_create(nelem, elem_size, (QueueHandle_t) downstream);
if (res == NULL) {
return NULL;
}
res->flags |= HLI_QUEUE_FLAG_CUSTOMER;
return res;
}
hli_queue_handle_t hli_semaphore_create(size_t max_count, SemaphoreHandle_t downstream) hli_queue_handle_t hli_semaphore_create(size_t max_count, SemaphoreHandle_t downstream)
{ {
const size_t elem_size = 1; const size_t elem_size = 1;

View File

@@ -66,6 +66,11 @@ typedef struct hli_queue_t* hli_queue_handle_t;
*/ */
void hli_queue_setup(void); void hli_queue_setup(void);
/**
* @brief Shutdown hli_queue module.
*/
void hli_queue_shutdown(void);
/** /**
* @brief Create a hli queue, wrapping a FreeRTOS queue * @brief Create a hli queue, wrapping a FreeRTOS queue
* *
@@ -81,6 +86,21 @@ void hli_queue_setup(void);
*/ */
hli_queue_handle_t hli_queue_create(size_t nelem, size_t elem_size, QueueHandle_t downstream); hli_queue_handle_t hli_queue_create(size_t nelem, size_t elem_size, QueueHandle_t downstream);
/**
* @brief Create a customer hli queue, wrapping a FreeRTOS queue
*
* This queue can be used from high level interrupts,
* but **ONLY ON THE CPU WHERE hli_queue_setup WAS CALLED**. Values sent to this
* queue are automatically forwarded to "downstream" FreeRTOS queue using a level 3
* software interrupt.
*
* @param nelem number of elements in the queue
* @param elem_size size of one element; must match element size of a downstream queue
* @param downstream FreeRTOS queue to send the values to
* @return hli_queue_handle_t handle of the created queue, or NULL on failure
*/
hli_queue_handle_t hli_customer_queue_create(size_t nelem, size_t elem_size, QueueHandle_t downstream);
/** /**
* @brief Create a hli queue, wrapping a FreeRTOS semaphore * @brief Create a hli queue, wrapping a FreeRTOS semaphore
* *

View File

@@ -11,7 +11,7 @@
/* Interrupt stack size, for C code. /* Interrupt stack size, for C code.
* TODO: reduce and make configurable. * TODO: reduce and make configurable.
*/ */
#define L5_INTR_STACK_SIZE 4096 #define L4_INTR_STACK_SIZE 4096
/* Save area for the CPU state: /* Save area for the CPU state:
* - 64 words for the general purpose registers * - 64 words for the general purpose registers
@@ -25,21 +25,20 @@
#define REG_FILE_SIZE (64 * 4) #define REG_FILE_SIZE (64 * 4)
#define SPECREG_OFFSET REG_FILE_SIZE #define SPECREG_OFFSET REG_FILE_SIZE
#define SPECREG_SIZE (7 * 4) #define SPECREG_SIZE (7 * 4)
#define REG_SAVE_AREA_SIZE (SPECREG_OFFSET * SPECREG_SIZE) #define REG_SAVE_AREA_SIZE (SPECREG_OFFSET + SPECREG_SIZE)
.data .data
_l5_intr_stack: _l4_intr_stack:
.space L5_INTR_STACK_SIZE .space L4_INTR_STACK_SIZE
_l5_save_ctx: _l4_save_ctx:
.space REG_SAVE_AREA_SIZE .space REG_SAVE_AREA_SIZE
.section .iram1,"ax" .section .iram1,"ax"
.global xt_highint5 .global xt_highint4
.type xt_highint5,@function .type xt_highint4,@function
.align 4 .align 4
xt_highint4:
xt_highint5:
/* disable exception mode, window overflow */ /* disable exception mode, window overflow */
movi a0, PS_INTLEVEL(5) | PS_EXCM /*TOCHECK*/ movi a0, PS_INTLEVEL(5) | PS_EXCM /*TOCHECK*/
wsr a0, PS wsr a0, PS
@@ -50,7 +49,7 @@ xt_highint5:
s32i a1, a0, 4 s32i a1, a0, 4
s32i a2, a0, 8 s32i a2, a0, 8
s32i a3, a0, 12 s32i a3, a0, 12
rsr a2, EXCSAVE_5 /* holds the value of a0 */ rsr a2, EXCSAVE_4 /* holds the value of a0 */
s32i a2, a0, 0 s32i a2, a0, 0
/* Save special registers */ /* Save special registers */
@@ -76,7 +75,7 @@ xt_highint5:
* These 60 registers are saved in 5 iterations, 12 registers at a time. * These 60 registers are saved in 5 iterations, 12 registers at a time.
*/ */
movi a1, 5 movi a1, 5
movi a3, _l5_save_ctx + 4 * 4 movi a3, _l4_save_ctx + 4 * 4
/* This is repeated 5 times, each time the window is shifted by 12 registers. /* This is repeated 5 times, each time the window is shifted by 12 registers.
* We come here with a1 = downcounter, a3 = save pointer, a2 and a0 unused. * We come here with a1 = downcounter, a3 = save pointer, a2 and a0 unused.
@@ -100,7 +99,7 @@ xt_highint5:
* At the same time we can decrement the counter and adjust the save area pointer * At the same time we can decrement the counter and adjust the save area pointer
*/ */
/* a0 is constant (_l5_save_ctx), no need to copy */ /* a0 is constant (_l4_save_ctx), no need to copy */
addi a13, a1, -1 /* copy and decrement the downcounter */ addi a13, a1, -1 /* copy and decrement the downcounter */
/* a2 is scratch so no need to copy */ /* a2 is scratch so no need to copy */
addi a15, a3, 48 /* copy and adjust the save area pointer */ addi a15, a3, 48 /* copy and adjust the save area pointer */
@@ -111,7 +110,7 @@ xt_highint5:
/* the loop is complete */ /* the loop is complete */
2: 2:
rotw 4 /* this brings us back to the original window */ rotw 4 /* this brings us back to the original window */
/* a0 still points to _l5_save_ctx */ /* a0 still points to _l4_save_ctx */
/* Can clear WINDOWSTART now, all registers are saved */ /* Can clear WINDOWSTART now, all registers are saved */
rsr a2, WINDOWBASE rsr a2, WINDOWBASE
@@ -123,13 +122,13 @@ xt_highint5:
_highint4_stack_switch: _highint4_stack_switch:
movi a0, 0 movi a0, 0
movi sp, _l5_intr_stack + L5_INTR_STACK_SIZE - 16 movi sp, _l4_intr_stack + L4_INTR_STACK_SIZE - 16
s32e a0, sp, -12 /* For GDB: set null SP */ s32e a0, sp, -12 /* For GDB: set null SP */
s32e a0, sp, -16 /* For GDB: set null PC */ s32e a0, sp, -16 /* For GDB: set null PC */
movi a0, _highint5_stack_switch /* For GDB: cosmetics, for the frame where stack switch happened */ movi a0, _highint4_stack_switch /* For GDB: cosmetics, for the frame where stack switch happened */
/* Set up PS for C, disable all interrupts except NMI and debug, and clear EXCM. */ /* Set up PS for C, disable all interrupts except NMI and debug, and clear EXCM. */
movi a6, PS_INTLEVEL(5) | PS_UM | PS_WOE movi a6, PS_INTLEVEL(4) | PS_UM | PS_WOE
wsr a6, PS wsr a6, PS
rsync rsync
@@ -147,7 +146,7 @@ _highint4_stack_switch:
/* Restore the special registers. /* Restore the special registers.
* WINDOWSTART will be restored near the end. * WINDOWSTART will be restored near the end.
*/ */
movi a0, _l5_save_ctx + SPECREG_OFFSET movi a0, _l4_save_ctx + SPECREG_OFFSET
l32i a2, a0, 8 l32i a2, a0, 8
wsr a2, SAR wsr a2, SAR
l32i a2, a0, 12 l32i a2, a0, 12
@@ -170,7 +169,7 @@ _highint4_stack_switch:
* To simplify the loop, we put the initial values into a13 and a15. * To simplify the loop, we put the initial values into a13 and a15.
*/ */
rotw -4 rotw -4
movi a15, _l5_save_ctx + 64 * 4 /* point to the end of the save area */ movi a15, _l4_save_ctx + 64 * 4 /* point to the end of the save area */
movi a13, 5 movi a13, 5
1: 1:
@@ -205,7 +204,7 @@ _highint4_stack_switch:
* to be restored. Also need to restore WINDOWSTART, since all the general * to be restored. Also need to restore WINDOWSTART, since all the general
* registers are now in place. * registers are now in place.
*/ */
movi a0, _l5_save_ctx movi a0, _l4_save_ctx
l32i a2, a0, SPECREG_OFFSET + 4 l32i a2, a0, SPECREG_OFFSET + 4
wsr a2, WINDOWSTART wsr a2, WINDOWSTART
@@ -215,7 +214,7 @@ _highint4_stack_switch:
l32i a3, a0, 12 l32i a3, a0, 12
rsr a0, EXCSAVE_4 /* holds the value of a0 before the interrupt handler */ rsr a0, EXCSAVE_4 /* holds the value of a0 before the interrupt handler */
/* Return from the interrupt, restoring PS from EPS_5 */ /* Return from the interrupt, restoring PS from EPS_4 */
rfi 4 rfi 4
/* The linker has no reason to link in this file; all symbols it exports are already defined /* The linker has no reason to link in this file; all symbols it exports are already defined

View File

@@ -31,19 +31,19 @@ Interrupt , a high-priority interrupt, is used for several things:
*/ */
#define L4_INTR_STACK_SIZE 12 #define L5_INTR_STACK_SIZE 12
#define L4_INTR_A2_OFFSET 0 #define L5_INTR_A2_OFFSET 0
#define L4_INTR_A3_OFFSET 4 #define L5_INTR_A3_OFFSET 4
#define L4_INTR_A4_OFFSET 8 #define L5_INTR_A4_OFFSET 8
.data .data
_l4_intr_stack: _l5_intr_stack:
.space L4_INTR_STACK_SIZE .space L5_INTR_STACK_SIZE
.section .iram1,"ax" .section .iram1,"ax"
.global xt_highint4 .global xt_highint5
.type xt_highint4,@function .type xt_highint5,@function
.align 4 .align 4
xt_highint4: xt_highint5:
#ifndef CONFIG_FREERTOS_UNICORE #ifndef CONFIG_FREERTOS_UNICORE
/* See if we're here for the dport access interrupt */ /* See if we're here for the dport access interrupt */
@@ -61,7 +61,7 @@ xt_highint4:
#endif #endif
rsr a0, PS /* save interruptee's PS */ rsr a0, PS /* save interruptee's PS */
s32i a0, sp, XT_STK_PS s32i a0, sp, XT_STK_PS
rsr a0, EPC_4 /* save interruptee's PC */ rsr a0, EPC_5 /* save interruptee's PC */
s32i a0, sp, XT_STK_PC s32i a0, sp, XT_STK_PC
#if XCHAL_HAVE_WINDOWED #if XCHAL_HAVE_WINDOWED
s32e a0, sp, -16 /* for debug backtrace */ s32e a0, sp, -16 /* for debug backtrace */
@@ -104,7 +104,7 @@ xt_highint4:
s32i a0, sp, XT_STK_EXCCAUSE s32i a0, sp, XT_STK_EXCCAUSE
/* _xt_context_save seems to save the current a0, but we need the interuptees a0. Fix this. */ /* _xt_context_save seems to save the current a0, but we need the interuptees a0. Fix this. */
rsr a0, EXCSAVE_4 /* save interruptee's a0 */ rsr a0, EXCSAVE_5 /* save interruptee's a0 */
s32i a0, sp, XT_STK_A0 s32i a0, sp, XT_STK_A0
@@ -120,13 +120,13 @@ xt_highint4:
l32i a0, sp, XT_STK_PS /* retrieve interruptee's PS */ l32i a0, sp, XT_STK_PS /* retrieve interruptee's PS */
wsr a0, PS wsr a0, PS
l32i a0, sp, XT_STK_PC /* retrieve interruptee's PC */ l32i a0, sp, XT_STK_PC /* retrieve interruptee's PC */
wsr a0, EPC_4 wsr a0, EPC_5
l32i a0, sp, XT_STK_A0 /* retrieve interruptee's A0 */ l32i a0, sp, XT_STK_A0 /* retrieve interruptee's A0 */
l32i sp, sp, XT_STK_A1 /* remove exception frame */ l32i sp, sp, XT_STK_A1 /* remove exception frame */
rsync /* ensure PS and EPC written */ rsync /* ensure PS and EPC written */
rsr a0, EXCSAVE_4 /* restore a0 */ rsr a0, EXCSAVE_5 /* restore a0 */
rfi 4 rfi 5
@@ -138,19 +138,19 @@ xt_highint4:
/* This section is for dport access register protection */ /* This section is for dport access register protection */
/* Allocate exception frame and save minimal context. */ /* Allocate exception frame and save minimal context. */
/* Because the interrupt cause code has protection that only /* Because the interrupt cause code has protection that only
allows one cpu to enter in the dport section of the L4 allows one cpu to enter in the dport section of the L4
interrupt at one time, there's no need to have two interrupt at one time, there's no need to have two
_l4_intr_stack for each cpu */ _l5_intr_stack for each cpu */
/* This int is edge-triggered and needs clearing. */ /* This int is edge-triggered and needs clearing. */
movi a0, (1<<ETS_DPORT_INUM) movi a0, (1<<ETS_DPORT_INUM)
wsr a0, INTCLEAR wsr a0, INTCLEAR
/* Save A2, A3, A4 so we can use those registers */ /* Save A2, A3, A4 so we can use those registers */
movi a0, _l4_intr_stack movi a0, _l5_intr_stack
s32i a2, a0, L4_INTR_A2_OFFSET s32i a2, a0, L5_INTR_A2_OFFSET
s32i a3, a0, L4_INTR_A3_OFFSET s32i a3, a0, L5_INTR_A3_OFFSET
s32i a4, a0, L4_INTR_A4_OFFSET s32i a4, a0, L5_INTR_A4_OFFSET
/* handle dport interrupt */ /* handle dport interrupt */
/* get CORE_ID */ /* get CORE_ID */
@@ -185,19 +185,19 @@ xt_highint4:
wsr a4, PS /* restore iterrupt level */ wsr a4, PS /* restore iterrupt level */
/* Done. Restore registers and return. */ /* Done. Restore registers and return. */
movi a0, _l4_intr_stack movi a0, _l5_intr_stack
l32i a2, a0, L4_INTR_A2_OFFSET l32i a2, a0, L5_INTR_A2_OFFSET
l32i a3, a0, L4_INTR_A3_OFFSET l32i a3, a0, L5_INTR_A3_OFFSET
l32i a4, a0, L4_INTR_A4_OFFSET l32i a4, a0, L5_INTR_A4_OFFSET
rsync /* ensure register restored */ rsync /* ensure register restored */
rsr a0, EXCSAVE_4 /* restore a0 */ rsr a0, EXCSAVE_5 /* restore a0 */
rfi 4 rfi 5
#endif // CONFIG_FREERTOS_UNICORE #endif // CONFIG_FREERTOS_UNICORE
/* The linker has no reason to link in this file; all symbols it exports are already defined /* The linker has no reason to link in this file; all symbols it exports are already defined
(weakly!) in the default int handler. Define a symbol here so we can use it to have the (weakly!) in the default int handler. Define a symbol here so we can use it to have the
linker inspect this anyway. */ linker inspect this anyway. */
.global ld_include_panic_highint_hdl .global ld_include_panic_highint_hdl

View File

@@ -35,8 +35,8 @@
#if CONFIG_INT_WDT #if CONFIG_INT_WDT
#define WDT_INT_NUM 24 // #define WDT_INT_NUM 24
#define WDT_INT_NUM ETS_T1_WDT_INUM
//Take care: the tick hook can also be called before esp_int_wdt_init() is called. //Take care: the tick hook can also be called before esp_int_wdt_init() is called.
#if CONFIG_INT_WDT_CHECK_CPU1 #if CONFIG_INT_WDT_CHECK_CPU1

View File

@@ -34,14 +34,14 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
medium level interrupts, by calling xt_set_interrupt_handler(). These medium level interrupts, by calling xt_set_interrupt_handler(). These
handlers can be written in C, and must obey C calling convention. The handlers can be written in C, and must obey C calling convention. The
handler table is indexed by the interrupt number. Each handler may be handler table is indexed by the interrupt number. Each handler may be
provided with an argument. provided with an argument.
Note that the system timer interrupt is handled specially, and is Note that the system timer interrupt is handled specially, and is
dispatched to the RTOS-specific handler. This timer cannot be hooked dispatched to the RTOS-specific handler. This timer cannot be hooked
by application code. by application code.
Optional hooks are also provided to install a handler per level at Optional hooks are also provided to install a handler per level at
run-time, made available by compiling this source file with run-time, made available by compiling this source file with
'-DXT_INTEXC_HOOKS' (useful for automated testing). '-DXT_INTEXC_HOOKS' (useful for automated testing).
!! This file is a template that usually needs to be modified to handle !! !! This file is a template that usually needs to be modified to handle !!
@@ -81,10 +81,10 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
This allows more flexibility in locating code without the performance This allows more flexibility in locating code without the performance
overhead of the 'l32r' literal data load in cases where the destination overhead of the 'l32r' literal data load in cases where the destination
is in range of 'call0'. There is an additional benefit in that 'call0' is in range of 'call0'. There is an additional benefit in that 'call0'
has a longer range than 'j' due to the target being word-aligned, so has a longer range than 'j' due to the target being word-aligned, so
the 'l32r' sequence is less likely needed. the 'l32r' sequence is less likely needed.
3. The use of 'call0' with -mlongcalls requires that register a0 not be 3. The use of 'call0' with -mlongcalls requires that register a0 not be
live at the time of the call, which is always the case for a function live at the time of the call, which is always the case for a function
call but needs to be ensured if 'call0' is used as a jump in lieu of 'j'. call but needs to be ensured if 'call0' is used as a jump in lieu of 'j'.
4. This use of 'call0' is independent of the C function call ABI. 4. This use of 'call0' is independent of the C function call ABI.
@@ -365,7 +365,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
/* /*
-------------------------------------------------------------------------------- --------------------------------------------------------------------------------
Panic handler. Panic handler.
Should be reached by call0 (preferable) or jump only. If call0, a0 says where Should be reached by call0 (preferable) or jump only. If call0, a0 says where
from. If on simulator, display panic message and abort, else loop indefinitely. from. If on simulator, display panic message and abort, else loop indefinitely.
-------------------------------------------------------------------------------- --------------------------------------------------------------------------------
*/ */
@@ -410,7 +410,7 @@ _xt_panic:
s32i a0, sp, XT_STK_A0 s32i a0, sp, XT_STK_A0
/* Set up PS for C, disable all interrupts except NMI and debug, and clear EXCM. */ /* Set up PS for C, disable all interrupts except NMI and debug, and clear EXCM. */
movi a0, PS_INTLEVEL(XCHAL_DEBUGLEVEL - 1) | PS_UM | PS_WOE movi a0, PS_INTLEVEL(XCHAL_DEBUGLEVEL - 2) | PS_UM | PS_WOE
wsr a0, PS wsr a0, PS
//Call panic handler //Call panic handler
@@ -437,7 +437,7 @@ panic_print_hex_a:
panic_print_hex_ok: panic_print_hex_ok:
s32i a5,a3,0 s32i a5,a3,0
slli a2,a2,4 slli a2,a2,4
addi a4,a4,-1 addi a4,a4,-1
bnei a4,0,panic_print_hex_loop bnei a4,0,panic_print_hex_loop
movi a5,' ' movi a5,' '
@@ -456,12 +456,12 @@ panic_print_hex_ok:
-------------------------------------------------------------------------------- --------------------------------------------------------------------------------
Hooks to dynamically install handlers for exceptions and interrupts. Hooks to dynamically install handlers for exceptions and interrupts.
Allows automated regression frameworks to install handlers per test. Allows automated regression frameworks to install handlers per test.
Consists of an array of function pointers indexed by interrupt level, Consists of an array of function pointers indexed by interrupt level,
with index 0 containing the entry for user exceptions. with index 0 containing the entry for user exceptions.
Initialized with all 0s, meaning no handler is installed at each level. Initialized with all 0s, meaning no handler is installed at each level.
See comment in xtensa_rtos.h for more details. See comment in xtensa_rtos.h for more details.
*WARNING* This array is for all CPUs, that is, installing a hook for *WARNING* This array is for all CPUs, that is, installing a hook for
one CPU will install it for all others as well! one CPU will install it for all others as well!
-------------------------------------------------------------------------------- --------------------------------------------------------------------------------
*/ */
@@ -491,7 +491,7 @@ _xt_intexc_hooks:
the appropriate stack frame, saves a few vector-specific registers and the appropriate stack frame, saves a few vector-specific registers and
calls XT_RTOS_INT_ENTER to save the rest of the interrupted context calls XT_RTOS_INT_ENTER to save the rest of the interrupted context
and enter the RTOS, then sets up a C environment. It then calls the and enter the RTOS, then sets up a C environment. It then calls the
user's interrupt handler code (which may be coded in C) and finally user's interrupt handler code (which may be coded in C) and finally
calls XT_RTOS_INT_EXIT to transfer control to the RTOS for scheduling. calls XT_RTOS_INT_EXIT to transfer control to the RTOS for scheduling.
While XT_RTOS_INT_EXIT does not return directly to the interruptee, While XT_RTOS_INT_EXIT does not return directly to the interruptee,
@@ -695,9 +695,9 @@ _xt_user_exc:
/* Set up PS for C, reenable debug and NMI interrupts, and clear EXCM. */ /* Set up PS for C, reenable debug and NMI interrupts, and clear EXCM. */
#ifdef __XTENSA_CALL0_ABI__ #ifdef __XTENSA_CALL0_ABI__
movi a0, PS_INTLEVEL(XCHAL_DEBUGLEVEL - 1) | PS_UM movi a0, PS_INTLEVEL(XCHAL_DEBUGLEVEL - 2) | PS_UM
#else #else
movi a0, PS_INTLEVEL(XCHAL_DEBUGLEVEL - 1) | PS_UM | PS_WOE movi a0, PS_INTLEVEL(XCHAL_DEBUGLEVEL - 2) | PS_UM | PS_WOE
#endif #endif
wsr a0, PS wsr a0, PS
@@ -867,7 +867,7 @@ _xt_syscall_exc:
-------------------------------------------------------------------------------- --------------------------------------------------------------------------------
Co-Processor Exception Handler (jumped to from User Exception Handler). Co-Processor Exception Handler (jumped to from User Exception Handler).
These exceptions are generated by co-processor instructions, which are only These exceptions are generated by co-processor instructions, which are only
allowed in thread code (not in interrupts or kernel code). This restriction is allowed in thread code (not in interrupts or kernel code). This restriction is
deliberately imposed to reduce the burden of state-save/restore in interrupts. deliberately imposed to reduce the burden of state-save/restore in interrupts.
-------------------------------------------------------------------------------- --------------------------------------------------------------------------------
*/ */
@@ -984,7 +984,7 @@ _xt_coproc_exc:
or a4, a4, a2 /* a4 = CPENABLE | (1 << n) */ or a4, a4, a2 /* a4 = CPENABLE | (1 << n) */
wsr a4, CPENABLE wsr a4, CPENABLE
/* /*
Keep loading _xt_coproc_owner_sa[n] atomic (=load once, then use that value Keep loading _xt_coproc_owner_sa[n] atomic (=load once, then use that value
everywhere): _xt_coproc_release assumes it works like this in order not to need everywhere): _xt_coproc_release assumes it works like this in order not to need
locking. locking.
@@ -1030,8 +1030,8 @@ locking.
/* /*
The config-specific HAL macro invoked below destroys a2-5, preserves a0-1. The config-specific HAL macro invoked below destroys a2-5, preserves a0-1.
It is theoretically possible for Xtensa processor designers to write TIE It is theoretically possible for Xtensa processor designers to write TIE
that causes more address registers to be affected, but it is generally that causes more address registers to be affected, but it is generally
unlikely. If that ever happens, more registers needs to be saved/restored unlikely. If that ever happens, more registers needs to be saved/restored
around this macro invocation, and the value in a15 needs to be recomputed. around this macro invocation, and the value in a15 needs to be recomputed.
*/ */
@@ -1056,8 +1056,8 @@ locking.
/* /*
The config-specific HAL macro invoked below destroys a2-5, preserves a0-1. The config-specific HAL macro invoked below destroys a2-5, preserves a0-1.
It is theoretically possible for Xtensa processor designers to write TIE It is theoretically possible for Xtensa processor designers to write TIE
that causes more address registers to be affected, but it is generally that causes more address registers to be affected, but it is generally
unlikely. If that ever happens, more registers needs to be saved/restored unlikely. If that ever happens, more registers needs to be saved/restored
around this macro invocation. around this macro invocation.
*/ */
@@ -1138,12 +1138,12 @@ _xt_lowint1:
/* Save rest of interrupt context and enter RTOS. */ /* Save rest of interrupt context and enter RTOS. */
call0 XT_RTOS_INT_ENTER /* common RTOS interrupt entry */ call0 XT_RTOS_INT_ENTER /* common RTOS interrupt entry */
/* !! We are now on the RTOS system stack !! */ /* !! We are now on the RTOS system stack !! */
/* Set up PS for C, enable interrupts above this level and clear EXCM. */ /* Set up PS for C, enable interrupts above this level and clear EXCM. */
#ifdef __XTENSA_CALL0_ABI__ #ifdef __XTENSA_CALL0_ABI__
movi a0, PS_INTLEVEL(1) | PS_UM movi a0, PS_INTLEVEL(1) | PS_UM
#else #else
movi a0, PS_INTLEVEL(1) | PS_UM | PS_WOE movi a0, PS_INTLEVEL(1) | PS_UM | PS_WOE
#endif #endif
wsr a0, PS wsr a0, PS
@@ -1175,7 +1175,7 @@ _xt_lowint1:
the appropriate stack frame, saves a few vector-specific registers and the appropriate stack frame, saves a few vector-specific registers and
calls XT_RTOS_INT_ENTER to save the rest of the interrupted context calls XT_RTOS_INT_ENTER to save the rest of the interrupted context
and enter the RTOS, then sets up a C environment. It then calls the and enter the RTOS, then sets up a C environment. It then calls the
user's interrupt handler code (which may be coded in C) and finally user's interrupt handler code (which may be coded in C) and finally
calls XT_RTOS_INT_EXIT to transfer control to the RTOS for scheduling. calls XT_RTOS_INT_EXIT to transfer control to the RTOS for scheduling.
While XT_RTOS_INT_EXIT does not return directly to the interruptee, While XT_RTOS_INT_EXIT does not return directly to the interruptee,
@@ -1602,7 +1602,7 @@ and used for purposes requiring very short service times.
Here are templates for high priority (level 2+) interrupt vectors. Here are templates for high priority (level 2+) interrupt vectors.
They assume only one interrupt per level to avoid the burden of identifying They assume only one interrupt per level to avoid the burden of identifying
which interrupts at this level are pending and enabled. This allows for which interrupts at this level are pending and enabled. This allows for
minimum latency and avoids having to save/restore a2 in addition to a0. minimum latency and avoids having to save/restore a2 in addition to a0.
If more than one interrupt per high priority level is configured, this burden If more than one interrupt per high priority level is configured, this burden
is on the handler which in any case must provide a way to save and restore is on the handler which in any case must provide a way to save and restore
@@ -1727,12 +1727,12 @@ _NMIExceptionVector:
WINDOW OVERFLOW AND UNDERFLOW EXCEPTION VECTORS AND ALLOCA EXCEPTION HANDLER WINDOW OVERFLOW AND UNDERFLOW EXCEPTION VECTORS AND ALLOCA EXCEPTION HANDLER
Here is the code for each window overflow/underflow exception vector and Here is the code for each window overflow/underflow exception vector and
(interspersed) efficient code for handling the alloca exception cause. (interspersed) efficient code for handling the alloca exception cause.
Window exceptions are handled entirely in the vector area and are very Window exceptions are handled entirely in the vector area and are very
tight for performance. The alloca exception is also handled entirely in tight for performance. The alloca exception is also handled entirely in
the window vector area so comes at essentially no cost in code size. the window vector area so comes at essentially no cost in code size.
Users should never need to modify them and Cadence Design Systems recommends Users should never need to modify them and Cadence Design Systems recommends
they do not. they do not.
Window handlers go at predetermined vector locations according to the Window handlers go at predetermined vector locations according to the

View File

@@ -402,7 +402,7 @@
* 2 1 extern level * 2 1 extern level
* 3 1 extern level * 3 1 extern level
* 4 1 extern level WBB * 4 1 extern level WBB
* 5 1 extern level BT/BLE Controller BT/BLE Controller * 5 1 extern level
* 6 1 timer FreeRTOS Tick(L1) FreeRTOS Tick(L1) * 6 1 timer FreeRTOS Tick(L1) FreeRTOS Tick(L1)
* 7 1 software BT/BLE VHCI BT/BLE VHCI * 7 1 software BT/BLE VHCI BT/BLE VHCI
* 8 1 extern level BT/BLE BB(RX/TX) BT/BLE BB(RX/TX) * 8 1 extern level BT/BLE BB(RX/TX) BT/BLE BB(RX/TX)
@@ -421,14 +421,14 @@
* 21 2 extern level * 21 2 extern level
* 22 3 extern edge * 22 3 extern edge
* 23 3 extern level * 23 3 extern level
* 24 4 extern level TG1_WDT * 24 4 extern level
* 25 4 extern level CACHEERR * 25 4 extern level BT/BLE Controller BT/BLE Controller
* 26 5 extern level * 26 5 extern Level CACHEERR
* 27 3 extern level Reserved Reserved * 27 3 extern level Reserved Reserved
* 28 4 extern edge DPORT ACCESS DPORT ACCESS * 28 4 extern edge DPORT ACCESS DPORT ACCESS
* 29 3 software Reserved Reserved * 29 3 software BT/BLE hli BT/BLE hli
* 30 4 extern edge Reserved Reserved * 30 4 extern edge Reserved Reserved
* 31 5 extern level * 31 5 extern level TG1_WDT
************************************************************************************************************* *************************************************************************************************************
*/ */
@@ -438,8 +438,8 @@
#define ETS_WBB_INUM 4 #define ETS_WBB_INUM 4
#define ETS_TG0_T1_INUM 10 /**< use edge interrupt*/ #define ETS_TG0_T1_INUM 10 /**< use edge interrupt*/
#define ETS_FRC1_INUM 22 #define ETS_FRC1_INUM 22
#define ETS_T1_WDT_INUM 24 #define ETS_T1_WDT_INUM 31
#define ETS_CACHEERR_INUM 25 #define ETS_CACHEERR_INUM 26
#define ETS_DPORT_INUM 28 #define ETS_DPORT_INUM 28
//CPU0 Interrupt number used in ROM, should be cancelled in SDK //CPU0 Interrupt number used in ROM, should be cancelled in SDK