components/bt: using high level interrupt in lc

This commit is contained in:
baohongde
2020-04-17 23:16:59 +08:00
parent f490b4ddfe
commit a172605af4
11 changed files with 306 additions and 392 deletions

View File

@@ -1,6 +1,7 @@
if(CONFIG_BT_ENABLED)
set(COMPONENT_SRCS "bt.c")
set(COMPONENT_SRCS "bt.c"
"hli_api.c")
set(COMPONENT_ADD_INCLUDEDIRS include)
if(CONFIG_BLUEDROID_ENABLED)
@@ -294,4 +295,6 @@ if(CONFIG_BT_ENABLED)
target_link_libraries(${COMPONENT_TARGET} "-L${CMAKE_CURRENT_LIST_DIR}/lib")
target_link_libraries(${COMPONENT_TARGET} btdm_app)
target_link_libraries(${COMPONENT_TARGET} "-u ld_include_hli_vectors_bt")
endif()

View File

@@ -44,10 +44,10 @@
#include "soc/soc_memory_layout.h"
#include "esp_clk.h"
#include "esp_coexist_internal.h"
#include "hli_api.h"
#if CONFIG_BT_ENABLED
#define CONFIG_BT_HLIGH_LEVEL_INT
/* Macro definition
************************************************************************
*/
@@ -90,14 +90,9 @@ do{\
} while(0)
#define OSI_FUNCS_TIME_BLOCKING 0xffffffff
#define OSI_VERSION 0x00010002
#define OSI_VERSION 0x00010003
#define OSI_MAGIC_VALUE 0xFADEBEAD
/* SPIRAM Configuration */
#if CONFIG_SPIRAM_USE_MALLOC
#define BTDM_MAX_QUEUE_NUM (5)
#endif
/* Types definition
************************************************************************
*/
@@ -115,15 +110,6 @@ typedef struct {
intptr_t end;
} btdm_dram_available_region_t;
/* PSRAM configuration */
#if CONFIG_SPIRAM_USE_MALLOC
typedef struct {
QueueHandle_t handle;
void *storage;
void *buffer;
} btdm_queue_item_t;
#endif
/* OSI function */
struct osi_funcs_t {
uint32_t _version;
@@ -143,7 +129,7 @@ struct osi_funcs_t {
void (*_mutex_delete)(void *mutex);
int32_t (*_mutex_lock)(void *mutex);
int32_t (*_mutex_unlock)(void *mutex);
void *(* _queue_create)(uint32_t queue_len, uint32_t item_size);
void *(* _queue_create)(uint32_t queue_len, uint32_t item_size, int flag);
void (* _queue_delete)(void *queue);
int32_t (* _queue_send)(void *queue, void *item, uint32_t block_time_ms);
int32_t (* _queue_send_from_isr)(void *queue, void *item, void *hptw);
@@ -241,12 +227,10 @@ extern uint32_t _btdm_data_end;
/* Local Function Declare
*********************************************************************
*/
#if CONFIG_SPIRAM_USE_MALLOC
static bool btdm_queue_generic_register(const btdm_queue_item_t *queue);
static bool btdm_queue_generic_deregister(btdm_queue_item_t *queue);
#endif /* CONFIG_SPIRAM_USE_MALLOC */
static void IRAM_ATTR interrupt_disable(void);
static void IRAM_ATTR interrupt_restore(void);
static xt_handler set_isr_hlevel_wrapper(int n, xt_handler f, void *arg);
static void IRAM_ATTR interrupt_hlevel_disable(void);
static void IRAM_ATTR interrupt_hlevel_restore(void);
static void IRAM_ATTR task_yield(void);
static void IRAM_ATTR task_yield_from_isr(void);
static void *semphr_create_wrapper(uint32_t max, uint32_t init);
static void semphr_delete_wrapper(void *semphr);
@@ -258,12 +242,12 @@ static void *mutex_create_wrapper(void);
static void mutex_delete_wrapper(void *mutex);
static int32_t mutex_lock_wrapper(void *mutex);
static int32_t mutex_unlock_wrapper(void *mutex);
static void *queue_create_wrapper(uint32_t queue_len, uint32_t item_size);
static void queue_delete_wrapper(void *queue);
static int32_t queue_send_wrapper(void *queue, void *item, uint32_t block_time_ms);
static int32_t IRAM_ATTR queue_send_from_isr_wrapper(void *queue, void *item, void *hptw);
static int32_t queue_recv_wrapper(void *queue, void *item, uint32_t block_time_ms);
static int32_t IRAM_ATTR queue_recv_from_isr_wrapper(void *queue, void *item, void *hptw);
static void *queue_create_hlevel_wrapper(uint32_t queue_len, uint32_t item_size, int flag);
static void queue_delete_hlevel_wrapper(void *queue);
static int32_t IRAM_ATTR queue_send_hlevel_wrapper(void *queue, void *item, uint32_t block_time_ms);
static int32_t IRAM_ATTR queue_send_from_isr_hlevel_wrapper(void *queue, void *item, void *hptw);
static int32_t IRAM_ATTR queue_recv_hlevel_wrapper(void *queue, void *item, uint32_t block_time_ms);
static int32_t IRAM_ATTR queue_recv_from_isr_hlevel_wrapper(void *queue, void *item, void *hptw);
static int32_t task_create_wrapper(void *task_func, const char *name, uint32_t stack_depth, void *param, uint32_t prio, void *task_handle, uint32_t core_id);
static void task_delete_wrapper(void *task_handle);
static bool IRAM_ATTR is_in_isr_wrapper(void);
@@ -289,11 +273,11 @@ static void coex_bt_wakeup_request_end(void);
/* OSI funcs */
static const struct osi_funcs_t osi_funcs_ro = {
._version = OSI_VERSION,
._set_isr = xt_set_interrupt_handler,
._set_isr = set_isr_hlevel_wrapper,
._ints_on = xt_ints_on,
._interrupt_disable = interrupt_disable,
._interrupt_restore = interrupt_restore,
._task_yield = vPortYield,
._interrupt_disable = interrupt_hlevel_disable,
._interrupt_restore = interrupt_hlevel_restore,
._task_yield = task_yield,
._task_yield_from_isr = task_yield_from_isr,
._semphr_create = semphr_create_wrapper,
._semphr_delete = semphr_delete_wrapper,
@@ -305,12 +289,12 @@ static const struct osi_funcs_t osi_funcs_ro = {
._mutex_delete = mutex_delete_wrapper,
._mutex_lock = mutex_lock_wrapper,
._mutex_unlock = mutex_unlock_wrapper,
._queue_create = queue_create_wrapper,
._queue_delete = queue_delete_wrapper,
._queue_send = queue_send_wrapper,
._queue_send_from_isr = queue_send_from_isr_wrapper,
._queue_recv = queue_recv_wrapper,
._queue_recv_from_isr = queue_recv_from_isr_wrapper,
._queue_create = queue_create_hlevel_wrapper,
._queue_delete = queue_delete_hlevel_wrapper,
._queue_send = queue_send_hlevel_wrapper,
._queue_send_from_isr = queue_send_from_isr_hlevel_wrapper,
._queue_recv = queue_recv_hlevel_wrapper,
._queue_recv_from_isr = queue_recv_from_isr_hlevel_wrapper,
._task_create = task_create_wrapper,
._task_delete = task_delete_wrapper,
._is_in_isr = is_in_isr_wrapper,
@@ -362,11 +346,6 @@ SOC_RESERVE_MEMORY_REGION(SOC_MEM_BT_DATA_START, SOC_MEM_BT_DATA_END,
static DRAM_ATTR struct osi_funcs_t *osi_funcs_p;
#if CONFIG_SPIRAM_USE_MALLOC
static DRAM_ATTR btdm_queue_item_t btdm_queue_table[BTDM_MAX_QUEUE_NUM];
static DRAM_ATTR SemaphoreHandle_t btdm_queue_table_mux = NULL;
#endif /* #if CONFIG_SPIRAM_USE_MALLOC */
/* Static variable declare */
// timestamp when PHY/RF was switched on
static DRAM_ATTR int64_t s_time_phy_rf_just_enabled = 0;
@@ -397,70 +376,50 @@ static inline void btdm_check_and_init_bb(void)
}
}
#if CONFIG_SPIRAM_USE_MALLOC
static bool btdm_queue_generic_register(const btdm_queue_item_t *queue)
struct interrupt_hlevel_cb{
uint32_t status;
uint8_t nested;
};
struct interrupt_hlevel_cb hli_cb = {
.status = 0,
.nested = 0,
};
static xt_handler set_isr_hlevel_wrapper(int mask, xt_handler f, void *arg)
{
if (!btdm_queue_table_mux || !queue) {
return NULL;
}
bool ret = false;
btdm_queue_item_t *item;
xSemaphoreTake(btdm_queue_table_mux, portMAX_DELAY);
for (int i = 0; i < BTDM_MAX_QUEUE_NUM; ++i) {
item = &btdm_queue_table[i];
if (item->handle == NULL) {
memcpy(item, queue, sizeof(btdm_queue_item_t));
ret = true;
break;
}
}
xSemaphoreGive(btdm_queue_table_mux);
return ret;
}
static bool btdm_queue_generic_deregister(btdm_queue_item_t *queue)
{
if (!btdm_queue_table_mux || !queue) {
return false;
}
bool ret = false;
btdm_queue_item_t *item;
xSemaphoreTake(btdm_queue_table_mux, portMAX_DELAY);
for (int i = 0; i < BTDM_MAX_QUEUE_NUM; ++i) {
item = &btdm_queue_table[i];
if (item->handle == queue->handle) {
memcpy(queue, item, sizeof(btdm_queue_item_t));
memset(item, 0, sizeof(btdm_queue_item_t));
ret = true;
break;
}
}
xSemaphoreGive(btdm_queue_table_mux);
return ret;
}
#endif /* CONFIG_SPIRAM_USE_MALLOC */
static void IRAM_ATTR interrupt_disable(void)
{
if (xPortInIsrContext()) {
portENTER_CRITICAL_ISR(&global_int_mux);
esp_err_t err = hli_intr_register((intr_handler_t) f, arg, DPORT_PRO_INTR_STATUS_0_REG, mask);
if (err == ESP_OK) {
return f;
} else {
portENTER_CRITICAL(&global_int_mux);
return 0;
}
}
static void IRAM_ATTR interrupt_restore(void)
static void IRAM_ATTR interrupt_hlevel_disable(void)
{
if (xPortInIsrContext()) {
portEXIT_CRITICAL_ISR(&global_int_mux);
} else {
portEXIT_CRITICAL(&global_int_mux);
assert(xPortGetCoreID() == CONFIG_BTDM_CONTROLLER_PINNED_TO_CORE);
uint32_t status = hli_intr_disable();
if (hli_cb.nested++ == 0) {
hli_cb.status = status;
}
}
static void IRAM_ATTR interrupt_hlevel_restore(void)
{
assert(xPortGetCoreID() == CONFIG_BTDM_CONTROLLER_PINNED_TO_CORE);
assert(hli_cb.nested > 0);
if (--hli_cb.nested == 0) {
hli_intr_restore(hli_cb.status);
}
}
static void IRAM_ATTR task_yield(void)
{
vPortYield();
}
static void IRAM_ATTR task_yield_from_isr(void)
{
portYIELD_FROM_ISR();
@@ -468,148 +427,62 @@ static void IRAM_ATTR task_yield_from_isr(void)
static void *semphr_create_wrapper(uint32_t max, uint32_t init)
{
#if !CONFIG_SPIRAM_USE_MALLOC
return (void *)xSemaphoreCreateCounting(max, init);
#else
StaticQueue_t *queue_buffer = NULL;
QueueHandle_t handle = NULL;
SemaphoreHandle_t downstream_semaphore = xSemaphoreCreateCounting(max, init);
assert(downstream_semaphore);
hli_queue_handle_t s_semaphore = hli_semaphore_create(max, downstream_semaphore);
assert(downstream_semaphore);
queue_buffer = heap_caps_malloc(sizeof(StaticQueue_t), MALLOC_CAP_INTERNAL|MALLOC_CAP_8BIT);
if (!queue_buffer) {
goto error;
}
handle = xSemaphoreCreateCountingStatic(max, init, queue_buffer);
if (!handle) {
goto error;
}
btdm_queue_item_t item = {
.handle = handle,
.storage = NULL,
.buffer = queue_buffer,
};
if (!btdm_queue_generic_register(&item)) {
goto error;
}
return handle;
error:
if (handle) {
vSemaphoreDelete(handle);
}
if (queue_buffer) {
free(queue_buffer);
}
return NULL;
#endif
return s_semaphore;
}
static void semphr_delete_wrapper(void *semphr)
{
#if !CONFIG_SPIRAM_USE_MALLOC
vSemaphoreDelete(semphr);
#else
btdm_queue_item_t item = {
.handle = semphr,
.storage = NULL,
.buffer = NULL,
};
if (btdm_queue_generic_deregister(&item)) {
vSemaphoreDelete(item.handle);
free(item.buffer);
if (((hli_queue_handle_t)semphr)->downstream != NULL) {
vSemaphoreDelete(((hli_queue_handle_t)semphr)->downstream);
}
return;
#endif
hli_queue_delete(semphr);
}
static int32_t IRAM_ATTR semphr_take_from_isr_wrapper(void *semphr, void *hptw)
{
return (int32_t)xSemaphoreTakeFromISR(semphr, hptw);
return (int32_t)xSemaphoreTakeFromISR(((hli_queue_handle_t)semphr)->downstream, hptw);
}
static int32_t IRAM_ATTR semphr_give_from_isr_wrapper(void *semphr, void *hptw)
{
return (int32_t)xSemaphoreGiveFromISR(semphr, hptw);
if (hptw != NULL) {
*(uint32_t *)hptw = 0;
}
bool ret = hli_semaphore_give(semphr);
return ret;
}
static int32_t semphr_take_wrapper(void *semphr, uint32_t block_time_ms)
{
bool ret;
if (block_time_ms == OSI_FUNCS_TIME_BLOCKING) {
return (int32_t)xSemaphoreTake(semphr, portMAX_DELAY);
ret = xSemaphoreTake(((hli_queue_handle_t)semphr)->downstream, portMAX_DELAY);
} else {
return (int32_t)xSemaphoreTake(semphr, block_time_ms / portTICK_PERIOD_MS);
ret = xSemaphoreTake(((hli_queue_handle_t)semphr)->downstream, block_time_ms / portTICK_PERIOD_MS);
}
return (int32_t)ret;
}
static int32_t semphr_give_wrapper(void *semphr)
{
return (int32_t)xSemaphoreGive(semphr);
return (int32_t)xSemaphoreGive(((hli_queue_handle_t)semphr)->downstream);
}
static void *mutex_create_wrapper(void)
{
#if CONFIG_SPIRAM_USE_MALLOC
StaticQueue_t *queue_buffer = NULL;
QueueHandle_t handle = NULL;
queue_buffer = heap_caps_malloc(sizeof(StaticQueue_t), MALLOC_CAP_INTERNAL|MALLOC_CAP_8BIT);
if (!queue_buffer) {
goto error;
}
handle = xSemaphoreCreateMutexStatic(queue_buffer);
if (!handle) {
goto error;
}
btdm_queue_item_t item = {
.handle = handle,
.storage = NULL,
.buffer = queue_buffer,
};
if (!btdm_queue_generic_register(&item)) {
goto error;
}
return handle;
error:
if (handle) {
vSemaphoreDelete(handle);
}
if (queue_buffer) {
free(queue_buffer);
}
return NULL;
#else
return (void *)xSemaphoreCreateMutex();
#endif
}
static void mutex_delete_wrapper(void *mutex)
{
#if !CONFIG_SPIRAM_USE_MALLOC
vSemaphoreDelete(mutex);
#else
btdm_queue_item_t item = {
.handle = mutex,
.storage = NULL,
.buffer = NULL,
};
if (btdm_queue_generic_deregister(&item)) {
vSemaphoreDelete(item.handle);
free(item.buffer);
}
return;
#endif
}
static int32_t mutex_lock_wrapper(void *mutex)
@@ -622,104 +495,66 @@ static int32_t mutex_unlock_wrapper(void *mutex)
return (int32_t)xSemaphoreGive(mutex);
}
static void *queue_create_wrapper(uint32_t queue_len, uint32_t item_size)
static void *queue_create_hlevel_wrapper(uint32_t queue_len, uint32_t item_size, int flag)
{
#if CONFIG_SPIRAM_USE_MALLOC
StaticQueue_t *queue_buffer = NULL;
uint8_t *queue_storage = NULL;
QueueHandle_t handle = NULL;
queue_buffer = heap_caps_malloc(sizeof(StaticQueue_t), MALLOC_CAP_INTERNAL|MALLOC_CAP_8BIT);
if (!queue_buffer) {
goto error;
QueueHandle_t downstream_queue = xQueueCreate(queue_len, item_size);
assert(downstream_queue);
hli_queue_handle_t queue;
/**
* TODO: Should use macro here!
*/
if (flag == 0) {
queue = hli_queue_create(queue_len, item_size, downstream_queue);
} else if (flag == 1) {
queue = hli_customer_queue_create(queue_len, item_size, downstream_queue);
} else {
assert(0);
}
queue_storage = heap_caps_malloc((queue_len*item_size), MALLOC_CAP_INTERNAL|MALLOC_CAP_8BIT);
if (!queue_storage ) {
goto error;
}
handle = xQueueCreateStatic(queue_len, item_size, queue_storage, queue_buffer);
if (!handle) {
goto error;
}
btdm_queue_item_t item = {
.handle = handle,
.storage = queue_storage,
.buffer = queue_buffer,
};
if (!btdm_queue_generic_register(&item)) {
goto error;
}
return handle;
error:
if (handle) {
vQueueDelete(handle);
}
if (queue_storage) {
free(queue_storage);
}
if (queue_buffer) {
free(queue_buffer);
}
return NULL;
#else
return (void *)xQueueCreate(queue_len, item_size);
#endif
assert(queue);
return queue;
}
static void queue_delete_wrapper(void *queue)
static void queue_delete_hlevel_wrapper(void *queue)
{
#if !CONFIG_SPIRAM_USE_MALLOC
vQueueDelete(queue);
#else
btdm_queue_item_t item = {
.handle = queue,
.storage = NULL,
.buffer = NULL,
};
if (btdm_queue_generic_deregister(&item)) {
vQueueDelete(item.handle);
free(item.storage);
free(item.buffer);
if (((hli_queue_handle_t)queue)->downstream != NULL) {
vQueueDelete(((hli_queue_handle_t)queue)->downstream);
}
return;
#endif
hli_queue_delete(queue);
}
static int32_t queue_send_wrapper(void *queue, void *item, uint32_t block_time_ms)
static int32_t queue_send_hlevel_wrapper(void *queue, void *item, uint32_t block_time_ms)
{
if (block_time_ms == OSI_FUNCS_TIME_BLOCKING) {
return (int32_t)xQueueSend(queue, item, portMAX_DELAY);
return (int32_t)xQueueSend(((hli_queue_handle_t)queue)->downstream, item, portMAX_DELAY);
} else {
return (int32_t)xQueueSend(queue, item, block_time_ms / portTICK_PERIOD_MS);
return (int32_t)xQueueSend(((hli_queue_handle_t)queue)->downstream, item, block_time_ms / portTICK_PERIOD_MS);
}
}
static int32_t IRAM_ATTR queue_send_from_isr_wrapper(void *queue, void *item, void *hptw)
static int32_t IRAM_ATTR queue_send_from_isr_hlevel_wrapper(void *queue, void *item, void *hptw)
{
return (int32_t)xQueueSendFromISR(queue, item, hptw);
if (hptw != NULL) {
*(uint32_t *)hptw = 0;
}
return hli_queue_put(queue, item);
}
static int32_t queue_recv_wrapper(void *queue, void *item, uint32_t block_time_ms)
static int32_t queue_recv_hlevel_wrapper(void *queue, void *item, uint32_t block_time_ms)
{
bool ret;
if (block_time_ms == OSI_FUNCS_TIME_BLOCKING) {
return (int32_t)xQueueReceive(queue, item, portMAX_DELAY);
ret = (int32_t)xQueueReceive(((hli_queue_handle_t)queue)->downstream, item, portMAX_DELAY);
} else {
return (int32_t)xQueueReceive(queue, item, block_time_ms / portTICK_PERIOD_MS);
ret =(int32_t)xQueueReceive(((hli_queue_handle_t)queue)->downstream, item, block_time_ms / portTICK_PERIOD_MS);
}
return ret;
}
static int32_t IRAM_ATTR queue_recv_from_isr_wrapper(void *queue, void *item, void *hptw)
static int32_t IRAM_ATTR queue_recv_from_isr_hlevel_wrapper(void *queue, void *item, void *hptw)
{
return (int32_t)xQueueReceiveFromISR(queue, item, hptw);
return (int32_t)xQueueReceiveFromISR(((hli_queue_handle_t)queue)->downstream, item, hptw);
}
static int32_t task_create_wrapper(void *task_func, const char *name, uint32_t stack_depth, void *param, uint32_t prio, void *task_handle, uint32_t core_id)
@@ -1155,14 +990,6 @@ esp_err_t esp_bt_controller_init(esp_bt_controller_config_t *cfg)
ESP_LOGI(BTDM_LOG_TAG, "BT controller compile version [%s]", btdm_controller_get_compile_version());
#if CONFIG_SPIRAM_USE_MALLOC
btdm_queue_table_mux = xSemaphoreCreateMutex();
if (btdm_queue_table_mux == NULL) {
return ESP_ERR_NO_MEM;
}
memset(btdm_queue_table, 0, sizeof(btdm_queue_item_t) * BTDM_MAX_QUEUE_NUM);
#endif
#ifdef CONFIG_PM_ENABLE
if ((err = esp_pm_lock_create(ESP_PM_NO_LIGHT_SLEEP, 0, "btLS", &s_light_sleep_pm_lock)) != ESP_OK) {
goto error;
@@ -1277,12 +1104,6 @@ esp_err_t esp_bt_controller_deinit(void)
s_pm_lock_sem = NULL;
#endif
#if CONFIG_SPIRAM_USE_MALLOC
vSemaphoreDelete(btdm_queue_table_mux);
btdm_queue_table_mux = NULL;
memset(btdm_queue_table, 0, sizeof(btdm_queue_item_t) * BTDM_MAX_QUEUE_NUM);
#endif
free(osi_funcs_p);
osi_funcs_p = NULL;
@@ -1524,4 +1345,21 @@ esp_err_t esp_ble_scan_dupilcate_list_flush(void)
return ESP_OK;
}
void IRAM_ATTR interrupt_disable_l3(void)
{
if (xPortInIsrContext()) {
portENTER_CRITICAL_ISR(&global_int_mux);
} else {
portENTER_CRITICAL(&global_int_mux);
}
}
void IRAM_ATTR interrupt_restore_l3(void)
{
if (xPortInIsrContext()) {
portEXIT_CRITICAL_ISR(&global_int_mux);
} else {
portEXIT_CRITICAL(&global_int_mux);
}
}
#endif /* CONFIG_BT_ENABLED */

View File

@@ -10,6 +10,7 @@ COMPONENT_ADD_INCLUDEDIRS := include
LIBS := btdm_app
COMPONENT_ADD_LDFLAGS := -lbt -L $(COMPONENT_PATH)/lib \
-u ld_include_hli_vectors_bt \
$(addprefix -l,$(LIBS))
# re-link program if BT binary libs change

View File

@@ -20,8 +20,34 @@ typedef struct {
uint32_t intr_mask;
} hli_handler_info_t;
typedef struct {
#define CUSTOMER_TYPE_REQUEST (0)
#define CUSTOMER_TYPE_RELEASE (1)
struct {
uint32_t cb_type;
union {
int (* request)(uint32_t, uint32_t, uint32_t);
int (* release)(uint32_t);
} cb;
} customer_cb;
uint32_t arg0, arg1, arg2;
} customer_swisr_t;
static void IRAM_ATTR customer_swisr_handle(customer_swisr_t *cus_swisr)
{
if (cus_swisr->customer_cb.cb_type == CUSTOMER_TYPE_REQUEST) {
if (cus_swisr->customer_cb.cb.request != NULL) {
cus_swisr->customer_cb.cb.request(cus_swisr->arg0, cus_swisr->arg1, cus_swisr->arg2);
}
} else if(cus_swisr->customer_cb.cb_type == CUSTOMER_TYPE_RELEASE) {
if (cus_swisr->customer_cb.cb.release != NULL) {
cus_swisr->customer_cb.cb.release(cus_swisr->arg0);
}
}
}
static hli_handler_info_t s_hli_handlers[HLI_MAX_HANDLERS];
static const char* TAG = "hli_queue";
// static const char* TAG = "hli_queue";
esp_err_t hli_intr_register(intr_handler_t handler, void* arg, uint32_t intr_reg, uint32_t intr_mask)
{
@@ -65,18 +91,18 @@ void IRAM_ATTR hli_c_handler(void)
}
}
if (!handled) {
ets_printf(DRAM_STR("hli_c_handler: no handler found!\n"));
abort();
// ets_printf(DRAM_STR("hli_c_handler: no handler found!\n"));
// abort();
}
}
uint32_t hli_intr_disable(void)
uint32_t IRAM_ATTR hli_intr_disable(void)
{
// disable level 4 and below
return XTOS_SET_INTLEVEL(XCHAL_DEBUGLEVEL - 2);
}
void hli_intr_restore(uint32_t state)
void IRAM_ATTR hli_intr_restore(uint32_t state)
{
XTOS_RESTORE_JUST_INTLEVEL(state);
}
@@ -86,8 +112,10 @@ void hli_intr_restore(uint32_t state)
#define HLI_QUEUE_SW_INT_NUM 29
#define HLI_QUEUE_FLAG_SEMAPHORE BIT(0)
#define HLI_QUEUE_FLAG_CUSTOMER BIT(1)
struct hli_queue_t *s_meta_queue_ptr;
struct hli_queue_t *s_meta_queue_ptr = NULL;
intr_handle_t ret_handle;
static inline char* IRAM_ATTR wrap_ptr(hli_queue_handle_t queue, char *ptr)
{
@@ -109,17 +137,20 @@ static void IRAM_ATTR queue_isr_handler(void* arg)
int do_yield = pdFALSE;
XTHAL_SET_INTCLEAR(BIT(HLI_QUEUE_SW_INT_NUM));
hli_queue_handle_t queue;
while (hli_queue_get(s_meta_queue_ptr, &queue)) {
static char scratch[HLI_QUEUE_MAX_ELEM_SIZE];
while (hli_queue_get(queue, scratch)) {
int res = pdPASS;
if ((queue->flags & HLI_QUEUE_FLAG_SEMAPHORE) != 0) {
if ((queue->flags & HLI_QUEUE_FLAG_CUSTOMER) != 0) {
customer_swisr_handle((customer_swisr_t *)scratch);
} else if ((queue->flags & HLI_QUEUE_FLAG_SEMAPHORE) != 0) {
res = xSemaphoreGiveFromISR((SemaphoreHandle_t) queue->downstream, &do_yield);
} else {
res = xQueueSendFromISR(queue->downstream, scratch, &do_yield);
}
if (res == pdFAIL) {
ESP_EARLY_LOGE(TAG, "Failed to send to %s %p", (queue->flags & HLI_QUEUE_FLAG_SEMAPHORE) == 0 ? "queue" : "semaphore", queue->downstream);
// ESP_EARLY_LOGE(TAG, "Failed to send to %s %p", (queue->flags & HLI_QUEUE_FLAG_SEMAPHORE) == 0 ? "queue" : "semaphore", queue->downstream);
}
}
}
@@ -167,9 +198,21 @@ static void queue_init(hli_queue_handle_t queue, size_t buf_size, size_t elem_si
void hli_queue_setup(void)
{
if (s_meta_queue_ptr == NULL) {
s_meta_queue_ptr = hli_queue_create(HLI_META_QUEUE_SIZE, sizeof(void*), NULL);
ESP_ERROR_CHECK(esp_intr_alloc(ETS_INTERNAL_SW1_INTR_SOURCE, ESP_INTR_FLAG_IRAM, queue_isr_handler, NULL, NULL));
ESP_ERROR_CHECK(esp_intr_alloc(ETS_INTERNAL_SW1_INTR_SOURCE, ESP_INTR_FLAG_IRAM, queue_isr_handler, NULL, &ret_handle));
xt_ints_on(BIT(HLI_QUEUE_SW_INT_NUM));
}
}
void hli_queue_shutdown(void)
{
if (s_meta_queue_ptr != NULL) {
hli_queue_delete(s_meta_queue_ptr);
s_meta_queue_ptr = NULL;
esp_intr_free(ret_handle);
xt_ints_off(BIT(HLI_QUEUE_SW_INT_NUM));
}
}
hli_queue_handle_t hli_queue_create(size_t nelem, size_t elem_size, QueueHandle_t downstream)
@@ -188,6 +231,16 @@ hli_queue_handle_t hli_queue_create(size_t nelem, size_t elem_size, QueueHandle_
return res;
}
hli_queue_handle_t hli_customer_queue_create(size_t nelem, size_t elem_size, QueueHandle_t downstream)
{
hli_queue_handle_t res = hli_queue_create(nelem, elem_size, (QueueHandle_t) downstream);
if (res == NULL) {
return NULL;
}
res->flags |= HLI_QUEUE_FLAG_CUSTOMER;
return res;
}
hli_queue_handle_t hli_semaphore_create(size_t max_count, SemaphoreHandle_t downstream)
{
const size_t elem_size = 1;

View File

@@ -66,6 +66,11 @@ typedef struct hli_queue_t* hli_queue_handle_t;
*/
void hli_queue_setup(void);
/**
* @brief Shutdown hli_queue module.
*/
void hli_queue_shutdown(void);
/**
* @brief Create a hli queue, wrapping a FreeRTOS queue
*
@@ -81,6 +86,21 @@ void hli_queue_setup(void);
*/
hli_queue_handle_t hli_queue_create(size_t nelem, size_t elem_size, QueueHandle_t downstream);
/**
* @brief Create a customer hli queue, wrapping a FreeRTOS queue
*
* This queue can be used from high level interrupts,
* but **ONLY ON THE CPU WHERE hli_queue_setup WAS CALLED**. Values sent to this
* queue are automatically forwarded to "downstream" FreeRTOS queue using a level 3
* software interrupt.
*
* @param nelem number of elements in the queue
* @param elem_size size of one element; must match element size of a downstream queue
* @param downstream FreeRTOS queue to send the values to
* @return hli_queue_handle_t handle of the created queue, or NULL on failure
*/
hli_queue_handle_t hli_customer_queue_create(size_t nelem, size_t elem_size, QueueHandle_t downstream);
/**
* @brief Create a hli queue, wrapping a FreeRTOS semaphore
*

View File

@@ -11,7 +11,7 @@
/* Interrupt stack size, for C code.
* TODO: reduce and make configurable.
*/
#define L5_INTR_STACK_SIZE 4096
#define L4_INTR_STACK_SIZE 4096
/* Save area for the CPU state:
* - 64 words for the general purpose registers
@@ -25,21 +25,20 @@
#define REG_FILE_SIZE (64 * 4)
#define SPECREG_OFFSET REG_FILE_SIZE
#define SPECREG_SIZE (7 * 4)
#define REG_SAVE_AREA_SIZE (SPECREG_OFFSET * SPECREG_SIZE)
#define REG_SAVE_AREA_SIZE (SPECREG_OFFSET + SPECREG_SIZE)
.data
_l5_intr_stack:
.space L5_INTR_STACK_SIZE
_l4_intr_stack:
.space L4_INTR_STACK_SIZE
_l5_save_ctx:
_l4_save_ctx:
.space REG_SAVE_AREA_SIZE
.section .iram1,"ax"
.global xt_highint5
.type xt_highint5,@function
.global xt_highint4
.type xt_highint4,@function
.align 4
xt_highint5:
xt_highint4:
/* disable exception mode, window overflow */
movi a0, PS_INTLEVEL(5) | PS_EXCM /*TOCHECK*/
wsr a0, PS
@@ -50,7 +49,7 @@ xt_highint5:
s32i a1, a0, 4
s32i a2, a0, 8
s32i a3, a0, 12
rsr a2, EXCSAVE_5 /* holds the value of a0 */
rsr a2, EXCSAVE_4 /* holds the value of a0 */
s32i a2, a0, 0
/* Save special registers */
@@ -76,7 +75,7 @@ xt_highint5:
* These 60 registers are saved in 5 iterations, 12 registers at a time.
*/
movi a1, 5
movi a3, _l5_save_ctx + 4 * 4
movi a3, _l4_save_ctx + 4 * 4
/* This is repeated 5 times, each time the window is shifted by 12 registers.
* We come here with a1 = downcounter, a3 = save pointer, a2 and a0 unused.
@@ -100,7 +99,7 @@ xt_highint5:
* At the same time we can decrement the counter and adjust the save area pointer
*/
/* a0 is constant (_l5_save_ctx), no need to copy */
/* a0 is constant (_l4_save_ctx), no need to copy */
addi a13, a1, -1 /* copy and decrement the downcounter */
/* a2 is scratch so no need to copy */
addi a15, a3, 48 /* copy and adjust the save area pointer */
@@ -111,7 +110,7 @@ xt_highint5:
/* the loop is complete */
2:
rotw 4 /* this brings us back to the original window */
/* a0 still points to _l5_save_ctx */
/* a0 still points to _l4_save_ctx */
/* Can clear WINDOWSTART now, all registers are saved */
rsr a2, WINDOWBASE
@@ -123,13 +122,13 @@ xt_highint5:
_highint4_stack_switch:
movi a0, 0
movi sp, _l5_intr_stack + L5_INTR_STACK_SIZE - 16
movi sp, _l4_intr_stack + L4_INTR_STACK_SIZE - 16
s32e a0, sp, -12 /* For GDB: set null SP */
s32e a0, sp, -16 /* For GDB: set null PC */
movi a0, _highint5_stack_switch /* For GDB: cosmetics, for the frame where stack switch happened */
movi a0, _highint4_stack_switch /* For GDB: cosmetics, for the frame where stack switch happened */
/* Set up PS for C, disable all interrupts except NMI and debug, and clear EXCM. */
movi a6, PS_INTLEVEL(5) | PS_UM | PS_WOE
movi a6, PS_INTLEVEL(4) | PS_UM | PS_WOE
wsr a6, PS
rsync
@@ -147,7 +146,7 @@ _highint4_stack_switch:
/* Restore the special registers.
* WINDOWSTART will be restored near the end.
*/
movi a0, _l5_save_ctx + SPECREG_OFFSET
movi a0, _l4_save_ctx + SPECREG_OFFSET
l32i a2, a0, 8
wsr a2, SAR
l32i a2, a0, 12
@@ -170,7 +169,7 @@ _highint4_stack_switch:
* To simplify the loop, we put the initial values into a13 and a15.
*/
rotw -4
movi a15, _l5_save_ctx + 64 * 4 /* point to the end of the save area */
movi a15, _l4_save_ctx + 64 * 4 /* point to the end of the save area */
movi a13, 5
1:
@@ -205,7 +204,7 @@ _highint4_stack_switch:
* to be restored. Also need to restore WINDOWSTART, since all the general
* registers are now in place.
*/
movi a0, _l5_save_ctx
movi a0, _l4_save_ctx
l32i a2, a0, SPECREG_OFFSET + 4
wsr a2, WINDOWSTART
@@ -215,7 +214,7 @@ _highint4_stack_switch:
l32i a3, a0, 12
rsr a0, EXCSAVE_4 /* holds the value of a0 before the interrupt handler */
/* Return from the interrupt, restoring PS from EPS_5 */
/* Return from the interrupt, restoring PS from EPS_4 */
rfi 4
/* The linker has no reason to link in this file; all symbols it exports are already defined

View File

@@ -31,19 +31,19 @@ Interrupt , a high-priority interrupt, is used for several things:
*/
#define L4_INTR_STACK_SIZE 12
#define L4_INTR_A2_OFFSET 0
#define L4_INTR_A3_OFFSET 4
#define L4_INTR_A4_OFFSET 8
#define L5_INTR_STACK_SIZE 12
#define L5_INTR_A2_OFFSET 0
#define L5_INTR_A3_OFFSET 4
#define L5_INTR_A4_OFFSET 8
.data
_l4_intr_stack:
.space L4_INTR_STACK_SIZE
_l5_intr_stack:
.space L5_INTR_STACK_SIZE
.section .iram1,"ax"
.global xt_highint4
.type xt_highint4,@function
.global xt_highint5
.type xt_highint5,@function
.align 4
xt_highint4:
xt_highint5:
#ifndef CONFIG_FREERTOS_UNICORE
/* See if we're here for the dport access interrupt */
@@ -61,7 +61,7 @@ xt_highint4:
#endif
rsr a0, PS /* save interruptee's PS */
s32i a0, sp, XT_STK_PS
rsr a0, EPC_4 /* save interruptee's PC */
rsr a0, EPC_5 /* save interruptee's PC */
s32i a0, sp, XT_STK_PC
#if XCHAL_HAVE_WINDOWED
s32e a0, sp, -16 /* for debug backtrace */
@@ -104,7 +104,7 @@ xt_highint4:
s32i a0, sp, XT_STK_EXCCAUSE
/* _xt_context_save seems to save the current a0, but we need the interuptees a0. Fix this. */
rsr a0, EXCSAVE_4 /* save interruptee's a0 */
rsr a0, EXCSAVE_5 /* save interruptee's a0 */
s32i a0, sp, XT_STK_A0
@@ -120,13 +120,13 @@ xt_highint4:
l32i a0, sp, XT_STK_PS /* retrieve interruptee's PS */
wsr a0, PS
l32i a0, sp, XT_STK_PC /* retrieve interruptee's PC */
wsr a0, EPC_4
wsr a0, EPC_5
l32i a0, sp, XT_STK_A0 /* retrieve interruptee's A0 */
l32i sp, sp, XT_STK_A1 /* remove exception frame */
rsync /* ensure PS and EPC written */
rsr a0, EXCSAVE_4 /* restore a0 */
rfi 4
rsr a0, EXCSAVE_5 /* restore a0 */
rfi 5
@@ -140,17 +140,17 @@ xt_highint4:
/* Because the interrupt cause code has protection that only
allows one cpu to enter in the dport section of the L4
interrupt at one time, there's no need to have two
_l4_intr_stack for each cpu */
_l5_intr_stack for each cpu */
/* This int is edge-triggered and needs clearing. */
movi a0, (1<<ETS_DPORT_INUM)
wsr a0, INTCLEAR
/* Save A2, A3, A4 so we can use those registers */
movi a0, _l4_intr_stack
s32i a2, a0, L4_INTR_A2_OFFSET
s32i a3, a0, L4_INTR_A3_OFFSET
s32i a4, a0, L4_INTR_A4_OFFSET
movi a0, _l5_intr_stack
s32i a2, a0, L5_INTR_A2_OFFSET
s32i a3, a0, L5_INTR_A3_OFFSET
s32i a4, a0, L5_INTR_A4_OFFSET
/* handle dport interrupt */
/* get CORE_ID */
@@ -185,14 +185,14 @@ xt_highint4:
wsr a4, PS /* restore iterrupt level */
/* Done. Restore registers and return. */
movi a0, _l4_intr_stack
l32i a2, a0, L4_INTR_A2_OFFSET
l32i a3, a0, L4_INTR_A3_OFFSET
l32i a4, a0, L4_INTR_A4_OFFSET
movi a0, _l5_intr_stack
l32i a2, a0, L5_INTR_A2_OFFSET
l32i a3, a0, L5_INTR_A3_OFFSET
l32i a4, a0, L5_INTR_A4_OFFSET
rsync /* ensure register restored */
rsr a0, EXCSAVE_4 /* restore a0 */
rfi 4
rsr a0, EXCSAVE_5 /* restore a0 */
rfi 5
#endif // CONFIG_FREERTOS_UNICORE

View File

@@ -35,8 +35,8 @@
#if CONFIG_INT_WDT
#define WDT_INT_NUM 24
// #define WDT_INT_NUM 24
#define WDT_INT_NUM ETS_T1_WDT_INUM
//Take care: the tick hook can also be called before esp_int_wdt_init() is called.
#if CONFIG_INT_WDT_CHECK_CPU1

View File

@@ -410,7 +410,7 @@ _xt_panic:
s32i a0, sp, XT_STK_A0
/* Set up PS for C, disable all interrupts except NMI and debug, and clear EXCM. */
movi a0, PS_INTLEVEL(XCHAL_DEBUGLEVEL - 1) | PS_UM | PS_WOE
movi a0, PS_INTLEVEL(XCHAL_DEBUGLEVEL - 2) | PS_UM | PS_WOE
wsr a0, PS
//Call panic handler
@@ -695,9 +695,9 @@ _xt_user_exc:
/* Set up PS for C, reenable debug and NMI interrupts, and clear EXCM. */
#ifdef __XTENSA_CALL0_ABI__
movi a0, PS_INTLEVEL(XCHAL_DEBUGLEVEL - 1) | PS_UM
movi a0, PS_INTLEVEL(XCHAL_DEBUGLEVEL - 2) | PS_UM
#else
movi a0, PS_INTLEVEL(XCHAL_DEBUGLEVEL - 1) | PS_UM | PS_WOE
movi a0, PS_INTLEVEL(XCHAL_DEBUGLEVEL - 2) | PS_UM | PS_WOE
#endif
wsr a0, PS

View File

@@ -402,7 +402,7 @@
* 2 1 extern level
* 3 1 extern level
* 4 1 extern level WBB
* 5 1 extern level BT/BLE Controller BT/BLE Controller
* 5 1 extern level
* 6 1 timer FreeRTOS Tick(L1) FreeRTOS Tick(L1)
* 7 1 software BT/BLE VHCI BT/BLE VHCI
* 8 1 extern level BT/BLE BB(RX/TX) BT/BLE BB(RX/TX)
@@ -421,14 +421,14 @@
* 21 2 extern level
* 22 3 extern edge
* 23 3 extern level
* 24 4 extern level TG1_WDT
* 25 4 extern level CACHEERR
* 26 5 extern level
* 24 4 extern level
* 25 4 extern level BT/BLE Controller BT/BLE Controller
* 26 5 extern Level CACHEERR
* 27 3 extern level Reserved Reserved
* 28 4 extern edge DPORT ACCESS DPORT ACCESS
* 29 3 software Reserved Reserved
* 29 3 software BT/BLE hli BT/BLE hli
* 30 4 extern edge Reserved Reserved
* 31 5 extern level
* 31 5 extern level TG1_WDT
*************************************************************************************************************
*/
@@ -438,8 +438,8 @@
#define ETS_WBB_INUM 4
#define ETS_TG0_T1_INUM 10 /**< use edge interrupt*/
#define ETS_FRC1_INUM 22
#define ETS_T1_WDT_INUM 24
#define ETS_CACHEERR_INUM 25
#define ETS_T1_WDT_INUM 31
#define ETS_CACHEERR_INUM 26
#define ETS_DPORT_INUM 28
//CPU0 Interrupt number used in ROM, should be cancelled in SDK