Merge branch 'test/parlio_rx_cache_safe_v5.3' into 'release/v5.3'

Reenable parlio rx cache safe test (v5.3)

See merge request espressif/esp-idf!38684
This commit is contained in:
morris
2025-04-24 18:47:42 +08:00
6 changed files with 31 additions and 24 deletions

View File

@ -298,15 +298,14 @@ esp_err_t parlio_new_tx_unit(const parlio_tx_unit_config_t *config, parlio_tx_un
ESP_RETURN_ON_FALSE(config->flags.clk_gate_en == 0, ESP_ERR_NOT_SUPPORTED, TAG, "clock gating is not supported"); ESP_RETURN_ON_FALSE(config->flags.clk_gate_en == 0, ESP_ERR_NOT_SUPPORTED, TAG, "clock gating is not supported");
#endif // SOC_PARLIO_TX_CLK_SUPPORT_GATING #endif // SOC_PARLIO_TX_CLK_SUPPORT_GATING
// malloc unit memory // allocate unit from internal memory because it contains atomic member
uint32_t mem_caps = PARLIO_MEM_ALLOC_CAPS; unit = heap_caps_calloc(1, sizeof(parlio_tx_unit_t) + sizeof(parlio_tx_trans_desc_t) * config->trans_queue_depth, MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
unit = heap_caps_calloc(1, sizeof(parlio_tx_unit_t) + sizeof(parlio_tx_trans_desc_t) * config->trans_queue_depth, mem_caps);
ESP_GOTO_ON_FALSE(unit, ESP_ERR_NO_MEM, err, TAG, "no memory for tx unit"); ESP_GOTO_ON_FALSE(unit, ESP_ERR_NO_MEM, err, TAG, "no memory for tx unit");
unit->max_transfer_bits = config->max_transfer_size * 8; unit->max_transfer_bits = config->max_transfer_size * 8;
unit->base.dir = PARLIO_DIR_TX; unit->base.dir = PARLIO_DIR_TX;
unit->data_width = data_width; unit->data_width = data_width;
//create transaction queue // create transaction queue
ESP_GOTO_ON_ERROR(parlio_tx_create_trans_queue(unit, config), err, TAG, "create transaction queue failed"); ESP_GOTO_ON_ERROR(parlio_tx_create_trans_queue(unit, config), err, TAG, "create transaction queue failed");
// register the unit to a group // register the unit to a group

View File

@ -2,11 +2,6 @@ set(srcs "test_app_main.c"
"test_parlio_rx.c" "test_parlio_rx.c"
"test_parlio_tx.c") "test_parlio_tx.c")
# TODO: IDF-7840, semaphore in 'spi_bus_lock.c' is not IRAM safe
if(CONFIG_PARLIO_ISR_IRAM_SAFE)
list(REMOVE_ITEM srcs "test_parlio_rx.c")
endif()
# In order for the cases defined by `TEST_CASE` to be linked into the final elf, # In order for the cases defined by `TEST_CASE` to be linked into the final elf,
# the component can be registered as WHOLE_ARCHIVE # the component can be registered as WHOLE_ARCHIVE
idf_component_register(SRCS ${srcs} idf_component_register(SRCS ${srcs}

View File

@ -5,8 +5,8 @@
*/ */
#include <string.h> #include <string.h>
#include <stdatomic.h>
#include "sdkconfig.h" #include "sdkconfig.h"
#include "stdatomic.h"
#include "esp_types.h" #include "esp_types.h"
#include "esp_attr.h" #include "esp_attr.h"
#include "esp_check.h" #include "esp_check.h"
@ -31,6 +31,12 @@
#include "esp_private/gdma.h" #include "esp_private/gdma.h"
#endif #endif
#if CONFIG_SPI_MASTER_ISR_IN_IRAM || CONFIG_SPI_SLAVE_ISR_IN_IRAM
#define SPI_COMMON_MALLOC_CAPS (MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT)
#else
#define SPI_COMMON_MALLOC_CAPS (MALLOC_CAP_DEFAULT)
#endif
static const char *SPI_TAG = "spi"; static const char *SPI_TAG = "spi";
#define SPI_CHECK(a, str, ret_val) ESP_RETURN_ON_FALSE(a, ret_val, SPI_TAG, str) #define SPI_CHECK(a, str, ret_val) ESP_RETURN_ON_FALSE(a, ret_val, SPI_TAG, str)
@ -265,7 +271,7 @@ esp_err_t spicommon_dma_chan_alloc(spi_host_device_t host_id, spi_dma_chan_t dma
#endif #endif
esp_err_t ret = ESP_OK; esp_err_t ret = ESP_OK;
spi_dma_ctx_t *dma_ctx = (spi_dma_ctx_t *)calloc(1, sizeof(spi_dma_ctx_t)); spi_dma_ctx_t *dma_ctx = (spi_dma_ctx_t *)heap_caps_calloc(1, sizeof(spi_dma_ctx_t), SPI_COMMON_MALLOC_CAPS);
if (!dma_ctx) { if (!dma_ctx) {
ret = ESP_ERR_NO_MEM; ret = ESP_ERR_NO_MEM;
goto cleanup; goto cleanup;
@ -814,7 +820,7 @@ esp_err_t spi_bus_initialize(spi_host_device_t host_id, const spi_bus_config_t *
SPI_CHECK(spi_chan_claimed, "host_id already in use", ESP_ERR_INVALID_STATE); SPI_CHECK(spi_chan_claimed, "host_id already in use", ESP_ERR_INVALID_STATE);
//clean and initialize the context //clean and initialize the context
ctx = (spicommon_bus_context_t *)calloc(1, sizeof(spicommon_bus_context_t)); ctx = (spicommon_bus_context_t *)heap_caps_calloc(1, sizeof(spicommon_bus_context_t), SPI_COMMON_MALLOC_CAPS);
if (!ctx) { if (!ctx) {
err = ESP_ERR_NO_MEM; err = ESP_ERR_NO_MEM;
goto cleanup; goto cleanup;

View File

@ -133,6 +133,12 @@ We have two bits to control the interrupt:
#include "esp_cache.h" #include "esp_cache.h"
#endif #endif
#if CONFIG_SPI_MASTER_IN_IRAM || CONFIG_SPI_MASTER_ISR_IN_IRAM
#define SPI_MASTER_MALLOC_CAPS (MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT)
#else
#define SPI_MASTER_MALLOC_CAPS (MALLOC_CAP_DEFAULT)
#endif
typedef struct spi_device_t spi_device_t; typedef struct spi_device_t spi_device_t;
/// struct to hold private transaction data (like tx and rx buffer for DMA). /// struct to hold private transaction data (like tx and rx buffer for DMA).
@ -481,7 +487,7 @@ esp_err_t spi_bus_add_device(spi_host_device_t host_id, const spi_device_interfa
} }
//Allocate memory for device //Allocate memory for device
dev = malloc(sizeof(spi_device_t)); dev = heap_caps_malloc(sizeof(spi_device_t), SPI_MASTER_MALLOC_CAPS);
if (dev == NULL) { if (dev == NULL) {
goto nomem; goto nomem;
} }

View File

@ -156,7 +156,8 @@ esp_err_t spi_slave_initialize(spi_host_device_t host, const spi_bus_config_t *b
spi_chan_claimed = spicommon_periph_claim(host, "spi slave"); spi_chan_claimed = spicommon_periph_claim(host, "spi slave");
SPI_CHECK(spi_chan_claimed, "host already in use", ESP_ERR_INVALID_STATE); SPI_CHECK(spi_chan_claimed, "host already in use", ESP_ERR_INVALID_STATE);
spihost[host] = malloc(sizeof(spi_slave_t)); // spi_slave_t contains atomic variable, memory must be allocated from internal memory
spihost[host] = heap_caps_malloc(sizeof(spi_slave_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
if (spihost[host] == NULL) { if (spihost[host] == NULL) {
ret = ESP_ERR_NO_MEM; ret = ESP_ERR_NO_MEM;
goto cleanup; goto cleanup;

View File

@ -33,7 +33,7 @@
* This lock is designed to solve the conflicts between SPI devices (used in tasks) and * This lock is designed to solve the conflicts between SPI devices (used in tasks) and
* the background operations (ISR or cache access). * the background operations (ISR or cache access).
* *
* There are N (device/task) + 1 (BG) acquiring processer candidates that may touch the bus. * There are N (device/task) + 1 (BG) acquiring processor candidates that may touch the bus.
* *
* The core of the lock is a `status` atomic variable, which is always available. No intermediate * The core of the lock is a `status` atomic variable, which is always available. No intermediate
* status is allowed. The atomic operations (mainly `atomic_fetch_and`, `atomic_fetch_or`) * status is allowed. The atomic operations (mainly `atomic_fetch_and`, `atomic_fetch_or`)
@ -49,7 +49,7 @@
* state of devices. Either one of REQ or PENDING being active indicates the device has pending BG * state of devices. Either one of REQ or PENDING being active indicates the device has pending BG
* requests. Reason of having two bits instead of one is in the appendix below. * requests. Reason of having two bits instead of one is in the appendix below.
* *
* Acquiring processer means the current processor (task or ISR) allowed to touch the critical * Acquiring processor means the current processor (task or ISR) allowed to touch the critical
* resources, or the SPI bus. * resources, or the SPI bus.
* *
* States of the lock: * States of the lock:
@ -168,7 +168,7 @@ typedef struct spi_bus_lock_t spi_bus_lock_t;
* This flag is weak, will not prevent acquiring of devices. But will help the BG to be re-enabled again after the bus is release. * This flag is weak, will not prevent acquiring of devices. But will help the BG to be re-enabled again after the bus is release.
*/ */
// get the bit mask wher bit [high-1, low] are all 1'b1 s. // get the bit mask where bit [high-1, low] are all 1'b1 s.
#define BIT1_MASK(high, low) ((UINT32_MAX << (high)) ^ (UINT32_MAX << (low))) #define BIT1_MASK(high, low) ((UINT32_MAX << (high)) ^ (UINT32_MAX << (low)))
#define LOCK_BIT(mask) ((mask) << LOCK_SHIFT) #define LOCK_BIT(mask) ((mask) << LOCK_SHIFT)
@ -238,7 +238,7 @@ struct spi_bus_lock_dev_t {
* acquire_end_core(): * acquire_end_core():
* uint32_t status = lock_status_clear(lock, dev_handle->mask & LOCK_MASK); * uint32_t status = lock_status_clear(lock, dev_handle->mask & LOCK_MASK);
* *
* Becuase this is the first `spi_hdl_1`, so after this , lock_bits == 0`b0. status == 0 * Because this is the first `spi_hdl_1`, so after this , lock_bits == 0`b0. status == 0
* *
* 2. spi_hdl_2: * 2. spi_hdl_2:
* acquire_core: * acquire_core:
@ -254,7 +254,7 @@ struct spi_bus_lock_dev_t {
* *
* 5. spi_hdl_1: * 5. spi_hdl_1:
* acquire_end_core: * acquire_end_core:
* status is 0, so it cleas the lock->acquiring_dev * status is 0, so it clears the lock->acquiring_dev
* *
* 6. spi_hdl_2: * 6. spi_hdl_2:
* spi_device_polling_end: * spi_device_polling_end:
@ -482,7 +482,7 @@ SPI_BUS_LOCK_ISR_ATTR static inline void update_pend_core(spi_bus_lock_t *lock,
} }
// Clear the PEND bit (not REQ bit!) of a device, return the suggestion whether we can try to quit the ISR. // Clear the PEND bit (not REQ bit!) of a device, return the suggestion whether we can try to quit the ISR.
// Lost the acquiring processor immediately when the BG bits for active device are inactive, indiciating by the return value. // Lost the acquiring processor immediately when the BG bits for active device are inactive, indicating by the return value.
// Can be called only when ISR is acting as the acquiring processor. // Can be called only when ISR is acting as the acquiring processor.
SPI_BUS_LOCK_ISR_ATTR static inline bool clear_pend_core(spi_bus_lock_dev_t *dev_handle) SPI_BUS_LOCK_ISR_ATTR static inline bool clear_pend_core(spi_bus_lock_dev_t *dev_handle)
{ {
@ -585,7 +585,7 @@ SPI_BUS_LOCK_ISR_ATTR static inline esp_err_t dev_wait(spi_bus_lock_dev_t *dev_h
******************************************************************************/ ******************************************************************************/
esp_err_t spi_bus_init_lock(spi_bus_lock_handle_t *out_lock, const spi_bus_lock_config_t *config) esp_err_t spi_bus_init_lock(spi_bus_lock_handle_t *out_lock, const spi_bus_lock_config_t *config)
{ {
spi_bus_lock_t* lock = (spi_bus_lock_t*)calloc(sizeof(spi_bus_lock_t), 1); spi_bus_lock_t* lock = (spi_bus_lock_t*)heap_caps_calloc(1, sizeof(spi_bus_lock_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
if (lock == NULL) { if (lock == NULL) {
return ESP_ERR_NO_MEM; return ESP_ERR_NO_MEM;
} }
@ -648,7 +648,7 @@ esp_err_t spi_bus_lock_register_dev(spi_bus_lock_handle_t lock, spi_bus_lock_dev
if (dev_lock == NULL) { if (dev_lock == NULL) {
return ESP_ERR_NO_MEM; return ESP_ERR_NO_MEM;
} }
dev_lock->semphr = xSemaphoreCreateBinary(); dev_lock->semphr = xSemaphoreCreateBinaryWithCaps(MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
if (dev_lock->semphr == NULL) { if (dev_lock->semphr == NULL) {
free(dev_lock); free(dev_lock);
atomic_store(&lock->dev[id], (intptr_t)NULL); atomic_store(&lock->dev[id], (intptr_t)NULL);
@ -676,7 +676,7 @@ void spi_bus_lock_unregister_dev(spi_bus_lock_dev_handle_t dev_handle)
atomic_store(&lock->dev[id], (intptr_t)NULL); atomic_store(&lock->dev[id], (intptr_t)NULL);
if (dev_handle->semphr) { if (dev_handle->semphr) {
vSemaphoreDelete(dev_handle->semphr); vSemaphoreDeleteWithCaps(dev_handle->semphr);
} }
free(dev_handle); free(dev_handle);
@ -819,7 +819,7 @@ SPI_BUS_LOCK_ISR_ATTR bool spi_bus_lock_bg_clear_req(spi_bus_lock_dev_t *dev_han
} }
SPI_BUS_LOCK_ISR_ATTR bool spi_bus_lock_bg_check_dev_acq(spi_bus_lock_t *lock, SPI_BUS_LOCK_ISR_ATTR bool spi_bus_lock_bg_check_dev_acq(spi_bus_lock_t *lock,
spi_bus_lock_dev_handle_t *out_dev_lock) spi_bus_lock_dev_handle_t *out_dev_lock)
{ {
BUS_LOCK_DEBUG_EXECUTE_CHECK(!lock->acquiring_dev); BUS_LOCK_DEBUG_EXECUTE_CHECK(!lock->acquiring_dev);
uint32_t status = lock_status_fetch(lock); uint32_t status = lock_status_fetch(lock);