Merge branch 'apptrace_changes' into 'master'

Apptrace improvements

Closes IDF-11982

See merge request espressif/esp-idf!40407
This commit is contained in:
Erhan Kurubas
2025-07-10 15:07:36 +02:00
20 changed files with 198 additions and 332 deletions

View File

@@ -191,15 +191,6 @@ menu "Application Level Tracing"
help
Size of the memory buffer for trace data in bytes.
config APPTRACE_PENDING_DATA_SIZE_MAX
int "Size of the pending data buffer"
depends on APPTRACE_MEMBUFS_APPTRACE_PROTO_ENABLE
default 0
help
Size of the buffer for events in bytes. It is useful for buffering events from
the time critical code (scheduler, ISRs etc). If this parameter is 0 then
events will be discarded when main HW buffer is full.
menu "FreeRTOS SystemView Tracing"
depends on APPTRACE_ENABLE
config APPTRACE_SV_ENABLE

View File

@@ -52,7 +52,6 @@ const static char *TAG = "esp_apptrace";
static uint32_t esp_apptrace_membufs_down_buffer_write_nolock(esp_apptrace_membufs_proto_data_t *proto, uint8_t *data, uint32_t size);
esp_err_t esp_apptrace_membufs_init(esp_apptrace_membufs_proto_data_t *proto, const esp_apptrace_mem_block_t blocks_cfg[2])
{
// disabled by default
@@ -64,10 +63,6 @@ esp_err_t esp_apptrace_membufs_init(esp_apptrace_membufs_proto_data_t *proto, co
proto->state.markers[i] = 0;
}
proto->state.in_block = 0;
#if CONFIG_APPTRACE_PENDING_DATA_SIZE_MAX > 0
esp_apptrace_rb_init(&proto->rb_pend, proto->pending_data,
sizeof(proto->pending_data));
#endif
return ESP_OK;
}
@@ -81,10 +76,10 @@ static esp_err_t esp_apptrace_membufs_swap(esp_apptrace_membufs_proto_data_t *pr
{
int prev_block_num = proto->state.in_block % 2;
int new_block_num = prev_block_num ? (0) : (1);
esp_err_t res = ESP_OK;
res = proto->hw->swap_start(proto->state.in_block);
esp_err_t res = proto->hw->swap_start(proto->state.in_block);
if (res != ESP_OK) {
ESP_APPTRACE_LOGE("Failed to swap to new block: %d", res);
return res;
}
@@ -113,28 +108,6 @@ static esp_err_t esp_apptrace_membufs_swap(esp_apptrace_membufs_proto_data_t *pr
}
hdr->block_sz = 0;
}
#if CONFIG_APPTRACE_PENDING_DATA_SIZE_MAX > 0
// copy pending data to block if any
while (proto->state.markers[new_block_num] < proto->blocks[new_block_num].sz) {
uint32_t read_sz = esp_apptrace_rb_read_size_get(&proto->rb_pend);
if (read_sz == 0) {
break; // no more data in pending buffer
}
if (read_sz > proto->blocks[new_block_num].sz - proto->state.markers[new_block_num]) {
read_sz = proto->blocks[new_block_num].sz - proto->state.markers[new_block_num];
}
uint8_t *ptr = esp_apptrace_rb_consume(&proto->rb_pend, read_sz);
if (!ptr) {
assert(false && "Failed to consume pended bytes!!");
break;
}
ESP_APPTRACE_LOGD("Pump %d pend bytes [%x %x %x %x : %x %x %x %x : %x %x %x %x : %x %x...%x %x]",
read_sz, *(ptr+0), *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4),
*(ptr+5), *(ptr+6), *(ptr+7), *(ptr+8), *(ptr+9), *(ptr+10), *(ptr+11), *(ptr+12), *(ptr+13), *(ptr+read_sz-2), *(ptr+read_sz-1));
memcpy(proto->blocks[new_block_num].start + proto->state.markers[new_block_num], ptr, read_sz);
proto->state.markers[new_block_num] += read_sz;
}
#endif
proto->hw->swap_end(proto->state.in_block, proto->state.markers[prev_block_num]);
return res;
}
@@ -231,45 +204,6 @@ static uint32_t esp_apptrace_membufs_down_buffer_write_nolock(esp_apptrace_membu
return total_sz;
}
static inline uint8_t *esp_apptrace_membufs_wait4buf(esp_apptrace_membufs_proto_data_t *proto, uint16_t size, esp_apptrace_tmo_t *tmo, int *pended)
{
uint8_t *ptr = NULL;
int res = esp_apptrace_membufs_swap_waitus(proto, tmo);
if (res != ESP_OK) {
return NULL;
}
#if CONFIG_APPTRACE_PENDING_DATA_SIZE_MAX > 0
// check if we still have pending data
if (esp_apptrace_rb_read_size_get(&proto->rb_pend) > 0) {
// if after block switch we still have pending data (not all pending data have been pumped to block)
// alloc new pending buffer
*pended = 1;
ptr = esp_apptrace_rb_produce(&proto->rb_pend, size);
if (!ptr) {
ESP_APPTRACE_LOGE("Failed to alloc pend buf 1: w-r-s %d-%d-%d!", proto->rb_pend.wr, proto->rb_pend.rd, proto->rb_pend.cur_size);
}
} else
#endif
{
// update block pointers
if (ESP_APPTRACE_INBLOCK_MARKER(proto) + size > ESP_APPTRACE_INBLOCK(proto)->sz) {
#if CONFIG_APPTRACE_PENDING_DATA_SIZE_MAX > 0
*pended = 1;
ptr = esp_apptrace_rb_produce(&proto->rb_pend, size);
if (ptr == NULL) {
ESP_APPTRACE_LOGE("Failed to alloc pend buf 2: w-r-s %d-%d-%d!", proto->rb_pend.wr, proto->rb_pend.rd, proto->rb_pend.cur_size);
}
#endif
} else {
*pended = 0;
ptr = ESP_APPTRACE_INBLOCK(proto)->start + ESP_APPTRACE_INBLOCK_MARKER(proto);
}
}
return ptr;
}
static inline uint8_t *esp_apptrace_membufs_pkt_start(uint8_t *ptr, uint16_t size)
{
// it is safe to use esp_cpu_get_core_id() in macro call because arg is used only once inside it
@@ -287,63 +221,23 @@ static inline void esp_apptrace_membufs_pkt_end(uint8_t *ptr)
uint8_t *esp_apptrace_membufs_up_buffer_get(esp_apptrace_membufs_proto_data_t *proto, uint32_t size, esp_apptrace_tmo_t *tmo)
{
uint8_t *buf_ptr = NULL;
if (size > ESP_APPTRACE_USR_DATA_LEN_MAX(proto)) {
ESP_APPTRACE_LOGE("Too large user data size %" PRIu32 "!", size);
return NULL;
}
// check for data in the pending buffer
#if CONFIG_APPTRACE_PENDING_DATA_SIZE_MAX > 0
if (esp_apptrace_rb_read_size_get(&proto->rb_pend) > 0) {
// if we have buffered data try to switch block
esp_apptrace_membufs_swap(proto);
// if switch was successful, part or all pended data have been copied to block
}
if (esp_apptrace_rb_read_size_get(&proto->rb_pend) > 0) {
// if we have buffered data alloc new pending buffer
ESP_APPTRACE_LOGD("Get %d bytes from PEND buffer", size);
buf_ptr = esp_apptrace_rb_produce(&proto->rb_pend, ESP_APPTRACE_USR_BLOCK_RAW_SZ(size));
if (buf_ptr == NULL) {
int pended_buf;
buf_ptr = esp_apptrace_membufs_wait4buf(proto, ESP_APPTRACE_USR_BLOCK_RAW_SZ(size), tmo, &pended_buf);
if (buf_ptr && !pended_buf) {
ESP_APPTRACE_LOGD("Get %d bytes from block", size);
// update cur block marker
ESP_APPTRACE_INBLOCK_MARKER_UPD(proto, ESP_APPTRACE_USR_BLOCK_RAW_SZ(size));
}
}
} else {
#else
if (1) {
#endif
if (ESP_APPTRACE_INBLOCK_MARKER(proto) + ESP_APPTRACE_USR_BLOCK_RAW_SZ(size) > ESP_APPTRACE_INBLOCK(proto)->sz) {
#if CONFIG_APPTRACE_PENDING_DATA_SIZE_MAX > 0
ESP_APPTRACE_LOGD("Block full. Get %" PRIu32 " bytes from PEND buffer", size);
buf_ptr = esp_apptrace_rb_produce(&proto->rb_pend, ESP_APPTRACE_USR_BLOCK_RAW_SZ(size));
#endif
if (buf_ptr == NULL) {
int pended_buf;
ESP_APPTRACE_LOGD(" full. Get %" PRIu32 " bytes from pend buffer", size);
buf_ptr = esp_apptrace_membufs_wait4buf(proto, ESP_APPTRACE_USR_BLOCK_RAW_SZ(size), tmo, &pended_buf);
if (buf_ptr && !pended_buf) {
ESP_APPTRACE_LOGD("Got %" PRIu32 " bytes from block", size);
int res = esp_apptrace_membufs_swap_waitus(proto, tmo);
if (res != ESP_OK) {
return NULL;
}
}
uint8_t *buf_ptr = ESP_APPTRACE_INBLOCK(proto)->start + ESP_APPTRACE_INBLOCK_MARKER(proto);
// update cur block marker
ESP_APPTRACE_INBLOCK_MARKER_UPD(proto, ESP_APPTRACE_USR_BLOCK_RAW_SZ(size));
}
}
} else {
ESP_APPTRACE_LOGD("Get %" PRIu32 " bytes from buffer", size);
// fit to curr nlock
buf_ptr = ESP_APPTRACE_INBLOCK(proto)->start + ESP_APPTRACE_INBLOCK_MARKER(proto);
// update cur block marker
ESP_APPTRACE_INBLOCK_MARKER_UPD(proto, ESP_APPTRACE_USR_BLOCK_RAW_SZ(size));
}
}
if (buf_ptr) {
buf_ptr = esp_apptrace_membufs_pkt_start(buf_ptr, size);
}
ESP_APPTRACE_LOGD("Got %" PRIu32 " bytes from block", size);
return buf_ptr;
}
@@ -367,15 +261,16 @@ esp_err_t esp_apptrace_membufs_flush_nolock(esp_apptrace_membufs_proto_data_t *p
ESP_APPTRACE_LOGI("Ignore flush request for min %" PRIu32 " bytes. Bytes in block: %" PRIu32, min_sz, ESP_APPTRACE_INBLOCK_MARKER(proto));
return ESP_OK;
}
// switch block while size of data (including that in pending buffer) is more than min size
// switch block while size of data is more than min size
while (ESP_APPTRACE_INBLOCK_MARKER(proto) > min_sz) {
ESP_APPTRACE_LOGD("Try to flush %" PRIu32 " bytes. Wait until block switch for %" PRIi64 " us", ESP_APPTRACE_INBLOCK_MARKER(proto), tmo->tmo);
ESP_APPTRACE_LOGD("Try to flush %" PRIu32 " bytes", ESP_APPTRACE_INBLOCK_MARKER(proto));
res = esp_apptrace_membufs_swap_waitus(proto, tmo);
if (res != ESP_OK) {
if (tmo->tmo != ESP_APPTRACE_TMO_INFINITE)
ESP_APPTRACE_LOGW("Failed to switch to another block in %lld us!", tmo->tmo);
else
ESP_APPTRACE_LOGE("Failed to switch to another block in %lld us!", tmo->tmo);
if (res == ESP_ERR_TIMEOUT) {
ESP_APPTRACE_LOGW("Failed to switch to another block in %" PRIi32 " us!", (int32_t)tmo->elapsed);
} else {
ESP_APPTRACE_LOGE("Failed to switch to another block, res: %d", res);
}
return res;
}
}

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2017-2023 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2017-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2017-2023 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2017-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2017-2021 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2017-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -32,7 +32,7 @@ typedef struct {
* @brief Initializes timeout structure.
*
* @param tmo Pointer to timeout structure to be initialized.
* @param user_tmo Timeout value (in us). Use ESP_APPTRACE_TMO_INFINITE to wait indefinetly.
* @param user_tmo Timeout value (in us). Use ESP_APPTRACE_TMO_INFINITE to wait indefinitely.
*/
static inline void esp_apptrace_tmo_init(esp_apptrace_tmo_t *tmo, uint32_t user_tmo)
{
@@ -94,7 +94,7 @@ esp_err_t esp_apptrace_lock_give(esp_apptrace_lock_t *lock);
/** Ring buffer control structure.
*
* @note For purposes of application tracing module if there is no enough space for user data and write pointer can be wrapped
* current ring buffer size can be temporarily shrinked in order to provide buffer with requested size.
* current ring buffer size can be temporarily shrunk in order to provide buffer with requested size.
*/
typedef struct {
uint8_t *data; ///< pointer to data storage

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2017-2024 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2017-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -14,7 +14,6 @@
#include "string.h"
#include "driver/gpio.h"
#define APPTRACE_DEST_UART (CONFIG_APPTRACE_DEST_UART0 | CONFIG_APPTRACE_DEST_UART1 | CONFIG_APPTRACE_DEST_UART2)
#define APP_TRACE_MAX_TX_BUFF_UART CONFIG_APPTRACE_UART_TX_BUFF_SIZE
@@ -109,7 +108,6 @@ static inline void esp_apptrace_uart_hw_init(void)
ESP_APPTRACE_LOGI("Initialized UART on CPU%d", esp_cpu_get_core_id());
}
/*****************************************************************************************/
/***************************** Apptrace HW iface *****************************************/
/*****************************************************************************************/
@@ -128,8 +126,7 @@ static esp_err_t esp_apptrace_send_uart_data(esp_apptrace_uart_data_t *hw_data,
len_free = out_position - hw_data->tx_data_buff_in;
}
int check_len = APP_TRACE_MAX_TX_BUFF_UART - hw_data->tx_data_buff_in;
if (size <= len_free)
{
if (size <= len_free) {
if (check_len >= size) {
memcpy(&hw_data->tx_data_buff[hw_data->tx_data_buff_in], data, size);
hw_data->tx_data_buff_in += size;
@@ -183,13 +180,11 @@ static void esp_apptrace_send_uart_tx_task(void *arg)
while (1) {
send_buff_data(hw_data, &tmo);
vTaskDelay(10);
if (hw_data->circular_buff_overflow == true)
{
if (hw_data->circular_buff_overflow == true) {
hw_data->circular_buff_overflow = false;
ESP_LOGE(TAG, "Buffer overflow. Please increase UART baudrate, or increase UART TX ring buffer size in menuconfig.");
}
if (hw_data->message_buff_overflow == true)
{
if (hw_data->message_buff_overflow == true) {
hw_data->message_buff_overflow = false;
ESP_LOGE(TAG, "Message size more then message buffer!");
}
@@ -209,8 +204,7 @@ static esp_err_t esp_apptrace_uart_init(esp_apptrace_uart_data_t *hw_data)
hw_data->tx_data_buff_in = 0;
hw_data->tx_data_buff_out = 0;
hw_data->tx_msg_buff = (uint8_t *)heap_caps_malloc(APP_TRACE_MAX_TX_MSG_UART, MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
if (hw_data->tx_msg_buff == NULL)
{
if (hw_data->tx_msg_buff == NULL) {
return ESP_ERR_NO_MEM;
}
hw_data->tx_msg_buff_size = 0;
@@ -222,8 +216,9 @@ static esp_err_t esp_apptrace_uart_init(esp_apptrace_uart_data_t *hw_data)
int source_clk = UART_SCLK_DEFAULT;
#if SOC_UART_LP_NUM > 0
if (hw_data->port_num >= SOC_UART_HP_NUM)
if (hw_data->port_num >= SOC_UART_HP_NUM) {
source_clk = LP_UART_SCLK_DEFAULT;
}
#endif
const uart_config_t uart_config = {
@@ -244,7 +239,9 @@ static esp_err_t esp_apptrace_uart_init(esp_apptrace_uart_data_t *hw_data)
assert((err == ESP_OK) && "Not possible to configure UART RX/TX pins. Please check and change menuconfig parameters!");
int uart_prio = CONFIG_APPTRACE_UART_TASK_PRIO;
if (uart_prio >= (configMAX_PRIORITIES-1)) uart_prio = configMAX_PRIORITIES - 1;
if (uart_prio >= (configMAX_PRIORITIES - 1)) {
uart_prio = configMAX_PRIORITIES - 1;
}
err = xTaskCreate(esp_apptrace_send_uart_tx_task, "app_trace_uart_tx_task", 2500, hw_data, uart_prio, NULL);
assert((err == pdPASS) && "Not possible to configure UART. Not possible to create task!");
@@ -266,8 +263,7 @@ static uint8_t *esp_apptrace_uart_up_buffer_get(esp_apptrace_uart_data_t *hw_dat
hw_data->message_buff_overflow = true;
return NULL;
}
if (hw_data->tx_msg_buff_size != 0)
{
if (hw_data->tx_msg_buff_size != 0) {
// A previous message was not sent.
return NULL;
}

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2021-2024 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2021-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0 OR MIT
*/
@@ -54,7 +54,6 @@ static esp_err_t esp_apptrace_riscv_buffer_swap(uint32_t new_block_id, uint32_t
static esp_err_t esp_apptrace_riscv_buffer_swap_end(uint32_t new_block_id, uint32_t prev_block_len);
static bool esp_apptrace_riscv_host_data_pending(void);
const static char *TAG = "esp_apptrace";
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE

View File

@@ -106,16 +106,7 @@
// Data which are transffered from host to target are also prepended with a header. Down channel data header is simple and consists of one two bytes field
// containing length of host data following the header.
// 4.3 Data Buffering
// ------------------
// It takes some time for the host to read TRAX memory block via JTAG. In streaming mode it can happen that target has filled its TRAX block, but host
// has not completed reading of the previous one yet. So in this case time critical tracing calls (which can not be delayed for too long time due to
// the lack of free memory in TRAX block) can be dropped. To avoid such scenarios tracing module implements data buffering. Buffered data will be sent
// to the host later when TRAX block switch occurs. The maximum size of the buffered data is controlled by menuconfig option
// CONFIG_APPTRACE_PENDING_DATA_SIZE_MAX.
// 4.4 Target Connection/Disconnection
// 4.3 Target Connection/Disconnection
// -----------------------------------
// When host is going to start tracing in streaming mode it needs to put both ESP32 cores into initial state when 'host connected' bit is set
@@ -197,7 +188,6 @@ typedef struct {
esp_apptrace_membufs_proto_data_t membufs;
} esp_apptrace_trax_data_t;
static esp_err_t esp_apptrace_trax_init(esp_apptrace_trax_data_t *hw_data);
static esp_err_t esp_apptrace_trax_flush(esp_apptrace_trax_data_t *hw_data, esp_apptrace_tmo_t *tmo);
static esp_err_t esp_apptrace_trax_flush_nolock(esp_apptrace_trax_data_t *hw_data, uint32_t min_sz, esp_apptrace_tmo_t *tmo);
@@ -212,7 +202,6 @@ static esp_err_t esp_apptrace_trax_buffer_swap(uint32_t new_block_id, uint32_t p
static esp_err_t esp_apptrace_trax_buffer_swap_end(uint32_t new_block_id, uint32_t prev_block_len);
static bool esp_apptrace_trax_host_data_pending(void);
const static char *TAG = "esp_apptrace";
static uint8_t * const s_trax_blocks[] = {

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2020-2022 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2020-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -38,12 +38,6 @@ typedef struct {
esp_apptrace_membufs_proto_hw_t * hw;
volatile esp_apptrace_membufs_state_t state; // state
esp_apptrace_mem_block_t blocks[2]; // memory blocks
#if CONFIG_APPTRACE_PENDING_DATA_SIZE_MAX > 0
// ring buffer control struct for pending user blocks
esp_apptrace_rb_t rb_pend;
// storage for pending user blocks
uint8_t pending_data[CONFIG_APPTRACE_PENDING_DATA_SIZE_MAX + 1];
#endif
// ring buffer control struct for data from host (down buffer)
esp_apptrace_rb_t rb_down;
} esp_apptrace_membufs_proto_data_t;

View File

@@ -8,7 +8,6 @@ CONFIG_ESP32_APPTRACE_ENABLE CONFIG_APPTRACE_ENABLE
CONFIG_ESP32_APPTRACE_LOCK_ENABLE CONFIG_APPTRACE_LOCK_ENABLE
CONFIG_ESP32_APPTRACE_ONPANIC_HOST_FLUSH_TMO CONFIG_APPTRACE_ONPANIC_HOST_FLUSH_TMO
CONFIG_ESP32_APPTRACE_POSTMORTEM_FLUSH_TRAX_THRESH CONFIG_APPTRACE_POSTMORTEM_FLUSH_THRESH
CONFIG_ESP32_APPTRACE_PENDING_DATA_SIZE_MAX CONFIG_APPTRACE_PENDING_DATA_SIZE_MAX
CONFIG_ESP32_GCOV_ENABLE CONFIG_APPTRACE_GCOV_ENABLE
CONFIG_SYSVIEW_ENABLE CONFIG_APPTRACE_SV_ENABLE

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2017-2021 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2017-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -61,7 +61,7 @@ static uint8_t s_down_buf[SYSVIEW_DOWN_BUF_SIZE];
*
* Parameters
* min_sz Threshold for flushing data. If current filling level is above this value, data will be flushed. TRAX destinations only.
* tmo Timeout for operation (in us). Use ESP_APPTRACE_TMO_INFINITE to wait indefinetly.
* tmo Timeout for operation (in us). Use ESP_APPTRACE_TMO_INFINITE to wait indefinitely.
*
* Return value
* None.
@@ -92,7 +92,7 @@ void SEGGER_RTT_ESP_FlushNoLock(unsigned long min_sz, unsigned long tmo)
*
* Parameters
* min_sz Threshold for flushing data. If current filling level is above this value, data will be flushed. TRAX destinations only.
* tmo Timeout for operation (in us). Use ESP_APPTRACE_TMO_INFINITE to wait indefinetly.
* tmo Timeout for operation (in us). Use ESP_APPTRACE_TMO_INFINITE to wait indefinitely.
*
* Return value
* None.
@@ -121,7 +121,8 @@ void SEGGER_RTT_ESP_Flush(unsigned long min_sz, unsigned long tmo)
* Return value
* Number of bytes that have been read.
*/
unsigned SEGGER_RTT_ReadNoLock(unsigned BufferIndex, void* pData, unsigned BufferSize) {
unsigned SEGGER_RTT_ReadNoLock(unsigned BufferIndex, void* pData, unsigned BufferSize)
{
uint32_t size = BufferSize;
esp_err_t res = esp_apptrace_read(ESP_APPTRACE_DEST_SYSVIEW, pData, &size, 0);
if (res != ESP_OK) {
@@ -154,7 +155,8 @@ unsigned SEGGER_RTT_ReadNoLock(unsigned BufferIndex, void* pData, unsigned Buffe
* and may only be called after RTT has been initialized.
* Either by calling SEGGER_RTT_Init() or calling another RTT API function first.
*/
unsigned SEGGER_RTT_WriteSkipNoLock(unsigned BufferIndex, const void* pBuffer, unsigned NumBytes) {
unsigned SEGGER_RTT_WriteSkipNoLock(unsigned BufferIndex, const void* pBuffer, unsigned NumBytes)
{
uint8_t *pbuf = (uint8_t *)pBuffer;
uint8_t event_id = *pbuf;
#if CONFIG_APPTRACE_SV_DEST_UART
@@ -216,15 +218,13 @@ unsigned SEGGER_RTT_WriteSkipNoLock(unsigned BufferIndex, const void* pBuffer, u
#if CONFIG_APPTRACE_SV_DEST_UART
esp_err_t res = esp_apptrace_write(ESP_APPTRACE_DEST_SYSVIEW, pBuffer, NumBytes, SEGGER_HOST_WAIT_TMO);
if (res != ESP_OK)
{
if (res != ESP_OK) {
return 0; // skip current data buffer only, accumulated events are kept
}
s_events_buf_filled = 0;
#endif
if (event_id == SYSVIEW_EVTID_TRACE_STOP)
{
if (event_id == SYSVIEW_EVTID_TRACE_STOP) {
SEGGER_RTT_ESP_FlushNoLock(0, SEGGER_STOP_WAIT_TMO);
}
return NumBytes;
@@ -255,7 +255,8 @@ unsigned SEGGER_RTT_WriteSkipNoLock(unsigned BufferIndex, const void* pBuffer, u
* May only be called once per buffer.
* Buffer name and flags can be reconfigured using the appropriate functions.
*/
int SEGGER_RTT_ConfigUpBuffer(unsigned BufferIndex, const char* sName, void* pBuffer, unsigned BufferSize, unsigned Flags) {
int SEGGER_RTT_ConfigUpBuffer(unsigned BufferIndex, const char* sName, void* pBuffer, unsigned BufferSize, unsigned Flags)
{
s_events_buf_filled = 0;
return 0;
}
@@ -285,7 +286,8 @@ int SEGGER_RTT_ConfigUpBuffer(unsigned BufferIndex, const char* sName, void* pBu
* May only be called once per buffer.
* Buffer name and flags can be reconfigured using the appropriate functions.
*/
int SEGGER_RTT_ConfigDownBuffer(unsigned BufferIndex, const char* sName, void* pBuffer, unsigned BufferSize, unsigned Flags) {
int SEGGER_RTT_ConfigDownBuffer(unsigned BufferIndex, const char* sName, void* pBuffer, unsigned BufferSize, unsigned Flags)
{
esp_apptrace_down_buffer_config(s_down_buf, sizeof(s_down_buf));
return 0;
}
@@ -302,5 +304,4 @@ ESP_SYSTEM_INIT_FN(sysview_init, SECONDARY, BIT(0), 120)
return ESP_OK;
}
/*************************** End of file ****************************/

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2018-2021 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2018-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -27,7 +27,6 @@ static SEGGER_SYSVIEW_MODULE s_esp_sysview_heap_module = {
static bool s_mod_registered;
esp_err_t esp_sysview_heap_trace_start(uint32_t tmo)
{
uint32_t tmo_ticks = tmo / (1000 * portTICK_PERIOD_MS);

View File

@@ -31,7 +31,7 @@ The library supports two modes of operation:
**Post-mortem mode:** This is the default mode. The mode does not need interaction with the host side. In this mode, tracing module does not check whether the host has read all the data from *HW UP BUFFER*, but directly overwrites old data with the new ones. This mode is useful when only the latest trace data is interesting to the user, e.g., for analyzing program's behavior just before the crash. The host can read the data later on upon user request, e.g., via special OpenOCD command in case of working via JTAG interface.
**Streaming mode:** Tracing module enters this mode when the host connects to {IDF_TARGET_NAME}. In this mode, before writing new data to *HW UP BUFFER*, the tracing module checks that whether there is enough space in it and if necessary, waits for the host to read data and free enough memory. Maximum waiting time is controlled via timeout values passed by users to corresponding API routines. So when application tries to write data to the trace buffer using the finite value of the maximum waiting time, it is possible that this data will be dropped. This is especially true for tracing from time critical code (ISRs, OS scheduler code, etc.) where infinite timeouts can lead to system malfunction. In order to avoid loss of such critical data, developers can enable additional data buffering via menuconfig option :ref:`CONFIG_APPTRACE_PENDING_DATA_SIZE_MAX`. This macro specifies the size of data which can be buffered in above conditions. The option can also help to overcome situation when data transfer to the host is temporarily slowed down, e.g., due to USB bus congestions. But it will not help when the average bitrate of the trace data stream exceeds the hardware interface capabilities.
**Streaming mode:** Tracing module enters this mode when the host connects to {IDF_TARGET_NAME}. In this mode, before writing new data to *HW UP BUFFER*, the tracing module checks that whether there is enough space in it and if necessary, waits for the host to read data and free enough memory. Maximum waiting time is controlled via timeout values passed by users to corresponding API routines. So when application tries to write data to the trace buffer using the finite value of the maximum waiting time, it is possible that this data will be dropped. This is especially true for tracing from time critical code (ISRs, OS scheduler code, etc.) where infinite timeouts can lead to system malfunction.
Configuration Options and Dependencies

View File

@@ -43,3 +43,8 @@ Bootloader
----------
Removed option for compiling bootloader with no optimization level (-O0, `CONFIG_BOOTLOADER_COMPILER_OPTIMIZATION_NONE`). On most targets it was no longer possible to compile the bootloader with -O0, as IRAM sections would overflow. For debugging purposes, it is recommended to use the -Og (:ref:`CONFIG_BOOTLOADER_COMPILER_OPTIMIZATION_DEBUG<CONFIG_BOOTLOADER_COMPILER_OPTIMIZATION_DEBUG>`) optimization level instead. This provides a good balance between optimization and debuggability.
App Trace
----------
Removed extra data buffering option. `CONFIG_APPTRACE_PENDING_DATA_SIZE_MAX` is no longer supported.

View File

@@ -31,7 +31,7 @@ ESP-IDF 中提供了应用层跟踪功能,用于分析应用程序的行为。
**后验模式:** 后验模式为默认模式,该模式不需要和主机进行交互。在这种模式下,跟踪模块不会检查主机是否已经从 *HW UP BUFFER* 缓冲区读走所有数据,而是直接使用新数据覆盖旧数据。如果用户仅对最新的跟踪数据感兴趣,例如想要分析程序在崩溃之前的行为,则推荐使用该模式。主机可以稍后根据用户的请求来读取数据,例如在使用 JTAG 接口的情况下,通过特殊的 OpenOCD 命令进行读取。
**流模式:** 当主机连接到 {IDF_TARGET_NAME} 时,跟踪模块会进入此模式。在这种模式下,跟踪模块在新数据写入 *HW UP BUFFER* 之前会检查其中是否有足够的空间,并在必要的时候等待主机读取数据并释放足够的内存。最大等待时间是由用户传递给相应 API 函数的超时时间参数决定的。因此当应用程序尝试使用有限的最大等待时间值来将数据写入跟踪缓冲区时,这些数据可能会被丢弃。尤其需要注意的是,如果在对时效要求严格的代码中(如中断处理函数、操作系统调度等)指定了无限的超时时间,将会导致系统故障。为了避免丢失此类关键数据,开发人员可以在 menuconfig 中开启 :ref:`CONFIG_APPTRACE_PENDING_DATA_SIZE_MAX` 选项,以启用额外的数据缓冲区。此宏还指定了在上述条件下可以缓冲的数据大小,它有助于缓解由于 USB 总线拥塞等原因导致的向主机传输数据间歇性减缓的状况。但是,当跟踪数据流的平均比特率超出硬件接口的能力时,该选项无法发挥作用。
**流模式:** 当主机连接到 {IDF_TARGET_NAME} 时,跟踪模块会进入此模式。在这种模式下,跟踪模块在新数据写入 *HW UP BUFFER* 之前会检查其中是否有足够的空间,并在必要的时候等待主机读取数据并释放足够的内存。最大等待时间是由用户传递给相应 API 函数的超时时间参数决定的。因此当应用程序尝试使用有限的最大等待时间值来将数据写入跟踪缓冲区时,这些数据可能会被丢弃。尤其需要注意的是,如果在对时效要求严格的代码中(如中断处理函数、操作系统调度等)指定了无限的超时时间,将会导致系统故障。
配置选项与依赖项

View File

@@ -4,5 +4,4 @@ CONFIG_APPTRACE_ENABLE=y
CONFIG_APPTRACE_LOCK_ENABLE=y
CONFIG_APPTRACE_ONPANIC_HOST_FLUSH_TMO=-1
CONFIG_APPTRACE_POSTMORTEM_FLUSH_THRESH=0
CONFIG_APPTRACE_PENDING_DATA_SIZE_MAX=0
CONFIG_APPTRACE_GCOV_ENABLE=y

View File

@@ -43,7 +43,6 @@ components_not_formatted_temporary:
# 2. If no, move it to 'components_not_formatted_permanent' section below.
check: false
include:
- "/components/app_trace/"
- "/components/app_update/"
- "/components/bootloader_support/"
- "/components/bootloader/"