diff --git a/components/esp_driver_spi/include/driver/spi_slave.h b/components/esp_driver_spi/include/driver/spi_slave.h index 0d5a7cb9f7..5704bc1448 100644 --- a/components/esp_driver_spi/include/driver/spi_slave.h +++ b/components/esp_driver_spi/include/driver/spi_slave.h @@ -62,7 +62,6 @@ typedef struct { */ } spi_slave_interface_config_t; - #define SPI_SLAVE_TRANS_DMA_BUFFER_ALIGN_AUTO (1<<0) ///< Automatically re-malloc dma buffer if user buffer doesn't meet hardware alignment or dma_capable, this process may loss some memory and performance /** diff --git a/components/esp_driver_spi/src/gpspi/spi_master.c b/components/esp_driver_spi/src/gpspi/spi_master.c index a0692ddfb0..b29f55e9c9 100644 --- a/components/esp_driver_spi/src/gpspi/spi_master.c +++ b/components/esp_driver_spi/src/gpspi/spi_master.c @@ -137,7 +137,7 @@ typedef struct spi_device_t spi_device_t; typedef struct { spi_transaction_t *trans; const uint32_t *buffer_to_send; //equals to tx_data, if SPI_TRANS_USE_RXDATA is applied; otherwise if original buffer wasn't in DMA-capable memory, this gets the address of a temporary buffer that is; - //otherwise sets to the original buffer or NULL if no buffer is assigned. + //otherwise sets to the original buffer or NULL if no buffer is assigned. uint32_t *buffer_to_rcv; // similar to buffer_to_send } spi_trans_priv_t; @@ -305,7 +305,7 @@ static esp_err_t spi_master_deinit_driver(void* arg) SPI_CHECK(is_valid_host(host_id), "invalid host_id", ESP_ERR_INVALID_ARG); int x; - for (x=0; xdevice[x] == NULL, "not all CSses freed", ESP_ERR_INVALID_STATE); } @@ -326,8 +326,12 @@ void spi_get_timing(bool gpio_is_used, int input_delay_ns, int eff_clk, int* dum int timing_miso_delay; spi_hal_cal_timing(APB_CLK_FREQ, eff_clk, gpio_is_used, input_delay_ns, &timing_dummy, &timing_miso_delay); - if (dummy_o) *dummy_o = timing_dummy; - if (cycles_remain_o) *cycles_remain_o = timing_miso_delay; + if (dummy_o) { + *dummy_o = timing_dummy; + } + if (cycles_remain_o) { + *cycles_remain_o = timing_miso_delay; + } #else //TODO: IDF-6578 ESP_LOGW(SPI_TAG, "This func temporary not supported for current target!"); @@ -382,7 +386,7 @@ esp_err_t spi_bus_add_device(spi_host_device_t host_id, const spi_device_interfa //The hardware looks like it would support this, but actually setting cs_ena_pretrans when transferring in full //duplex mode does absolutely nothing on the ESP32. SPI_CHECK(dev_config->cs_ena_pretrans <= 1 || (dev_config->address_bits == 0 && dev_config->command_bits == 0) || - (dev_config->flags & SPI_DEVICE_HALFDUPLEX), "In full-duplex mode, only support cs pretrans delay = 1 and without address_bits and command_bits", ESP_ERR_INVALID_ARG); + (dev_config->flags & SPI_DEVICE_HALFDUPLEX), "In full-duplex mode, only support cs pretrans delay = 1 and without address_bits and command_bits", ESP_ERR_INVALID_ARG); #endif //Check post_cb status when `SPI_DEVICE_NO_RETURN_RESULT` flag is set. @@ -428,7 +432,9 @@ esp_err_t spi_bus_add_device(spi_host_device_t host_id, const spi_device_interfa //Allocate memory for device dev = malloc(sizeof(spi_device_t)); - if (dev == NULL) goto nomem; + if (dev == NULL) { + goto nomem; + } memset(dev, 0, sizeof(spi_device_t)); dev->id = freecs; @@ -461,7 +467,7 @@ esp_err_t spi_bus_add_device(spi_host_device_t host_id, const spi_device_interfa //save a pointer to device in spi_host_t host->device[freecs] = dev; //save a pointer to host in spi_device_t - dev->host= host; + dev->host = host; //initialise the device specific configuration spi_hal_dev_config_t *hal_dev = &(dev->hal_dev); @@ -486,14 +492,18 @@ esp_err_t spi_bus_add_device(spi_host_device_t host_id, const spi_device_interfa hal_dev->positive_cs = dev_config->flags & SPI_DEVICE_POSITIVE_CS ? 1 : 0; *handle = dev; - ESP_LOGD(SPI_TAG, "SPI%d: New device added to CS%d, effective clock: %dkHz", host_id+1, freecs, freq/1000); + ESP_LOGD(SPI_TAG, "SPI%d: New device added to CS%d, effective clock: %dkHz", host_id + 1, freecs, freq / 1000); return ESP_OK; nomem: if (dev) { - if (dev->trans_queue) vQueueDelete(dev->trans_queue); - if (dev->ret_queue) vQueueDelete(dev->ret_queue); + if (dev->trans_queue) { + vQueueDelete(dev->trans_queue); + } + if (dev->ret_queue) { + vQueueDelete(dev->ret_queue); + } spi_bus_lock_unregister_dev(dev->dev_lock); } free(dev); @@ -502,13 +512,13 @@ nomem: esp_err_t spi_bus_remove_device(spi_device_handle_t handle) { - SPI_CHECK(handle!=NULL, "invalid handle", ESP_ERR_INVALID_ARG); + SPI_CHECK(handle != NULL, "invalid handle", ESP_ERR_INVALID_ARG); //These checks aren't exhaustive; another thread could sneak in a transaction inbetween. These are only here to //catch design errors and aren't meant to be triggered during normal operation. - SPI_CHECK(uxQueueMessagesWaiting(handle->trans_queue)==0, "Have unfinished transactions", ESP_ERR_INVALID_STATE); + SPI_CHECK(uxQueueMessagesWaiting(handle->trans_queue) == 0, "Have unfinished transactions", ESP_ERR_INVALID_STATE); SPI_CHECK(handle->host->cur_cs == DEV_NUM_MAX || handle->host->device[handle->host->cur_cs] != handle, "Have unfinished transactions", ESP_ERR_INVALID_STATE); if (handle->ret_queue) { - SPI_CHECK(uxQueueMessagesWaiting(handle->ret_queue)==0, "Have unfinished transactions", ESP_ERR_INVALID_STATE); + SPI_CHECK(uxQueueMessagesWaiting(handle->ret_queue) == 0, "Have unfinished transactions", ESP_ERR_INVALID_STATE); } #if SOC_SPI_SUPPORT_CLK_RC_FAST @@ -519,11 +529,17 @@ esp_err_t spi_bus_remove_device(spi_device_handle_t handle) //return int spics_io_num = handle->cfg.spics_io_num; - if (spics_io_num >= 0) spicommon_cs_free_io(spics_io_num); + if (spics_io_num >= 0) { + spicommon_cs_free_io(spics_io_num); + } //Kill queues - if (handle->trans_queue) vQueueDelete(handle->trans_queue); - if (handle->ret_queue) vQueueDelete(handle->ret_queue); + if (handle->trans_queue) { + vQueueDelete(handle->trans_queue); + } + if (handle->ret_queue) { + vQueueDelete(handle->ret_queue); + } spi_bus_lock_unregister_dev(handle->dev_lock); assert(handle->host->device[handle->id] == handle); @@ -534,7 +550,7 @@ esp_err_t spi_bus_remove_device(spi_device_handle_t handle) esp_err_t spi_device_get_actual_freq(spi_device_handle_t handle, int* freq_khz) { - if ((spi_device_t*)handle == NULL || freq_khz == NULL) { + if ((spi_device_t *)handle == NULL || freq_khz == NULL) { return ESP_ERR_INVALID_ARG; } @@ -568,7 +584,9 @@ static SPI_MASTER_ISR_ATTR void spi_setup_device(spi_device_t *dev) static SPI_MASTER_ISR_ATTR spi_device_t *get_acquiring_dev(spi_host_t *host) { spi_bus_lock_dev_handle_t dev_lock = spi_bus_lock_get_acquiring_dev(host->bus_attr->lock); - if (!dev_lock) return NULL; + if (!dev_lock) { + return NULL; + } return host->device[spi_bus_lock_get_dev_id(dev_lock)]; } @@ -623,7 +641,7 @@ static void SPI_MASTER_ISR_ATTR spi_new_trans(spi_device_t *dev, spi_trans_priv_ //Set up OIO/QIO/DIO if needed hal_trans.line_mode.data_lines = (trans->flags & SPI_TRANS_MODE_DIO) ? 2 : - (trans->flags & SPI_TRANS_MODE_QIO) ? 4 : 1; + (trans->flags & SPI_TRANS_MODE_QIO) ? 4 : 1; #if SOC_SPI_SUPPORT_OCT if (trans->flags & SPI_TRANS_MODE_OCT) { hal_trans.line_mode.data_lines = 8; @@ -652,7 +670,9 @@ static void SPI_MASTER_ISR_ATTR spi_new_trans(spi_device_t *dev, spi_trans_priv_ spi_hal_prepare_data(hal, hal_dev, &hal_trans); //Call pre-transmission callback, if any - if (dev->cfg.pre_cb) dev->cfg.pre_cb(trans); + if (dev->cfg.pre_cb) { + dev->cfg.pre_cb(trans); + } //Kick off transfer spi_hal_user_start(hal); } @@ -666,7 +686,9 @@ static void SPI_MASTER_ISR_ATTR spi_post_trans(spi_host_t *host) spi_hal_fetch_result(&host->hal); //Call post-transaction callback, if any spi_device_t* dev = host->device[host->cur_cs]; - if (dev->cfg.post_cb) dev->cfg.post_cb(cur_trans); + if (dev->cfg.post_cb) { + dev->cfg.post_cb(cur_trans); + } host->cur_cs = DEV_NUM_MAX; } @@ -734,7 +756,6 @@ static void SPI_MASTER_ISR_ATTR spi_intr(void *arg) spi_bus_lock_handle_t lock = host->bus_attr->lock; BaseType_t trans_found = pdFALSE; - // There should be remaining requests BUS_LOCK_DEBUG_EXECUTE_CHECK(spi_bus_lock_bg_req_exist(lock)); @@ -781,26 +802,28 @@ static void SPI_MASTER_ISR_ATTR spi_intr(void *arg) // or resume acquiring device task (if quit due to bus acquiring). } while (!spi_bus_lock_bg_exit(lock, trans_found, &do_yield)); - if (do_yield) portYIELD_FROM_ISR(); + if (do_yield) { + portYIELD_FROM_ISR(); + } } static SPI_MASTER_ISR_ATTR esp_err_t check_trans_valid(spi_device_handle_t handle, spi_transaction_t *trans_desc) { - SPI_CHECK(handle!=NULL, "invalid dev handle", ESP_ERR_INVALID_ARG); + SPI_CHECK(handle != NULL, "invalid dev handle", ESP_ERR_INVALID_ARG); spi_host_t *host = handle->host; const spi_bus_attr_t* bus_attr = host->bus_attr; bool tx_enabled = (trans_desc->flags & SPI_TRANS_USE_TXDATA) || (trans_desc->tx_buffer); bool rx_enabled = (trans_desc->flags & SPI_TRANS_USE_RXDATA) || (trans_desc->rx_buffer); spi_transaction_ext_t *t_ext = (spi_transaction_ext_t *)trans_desc; - bool dummy_enabled = (((trans_desc->flags & SPI_TRANS_VARIABLE_DUMMY)? t_ext->dummy_bits: handle->cfg.dummy_bits) != 0); + bool dummy_enabled = (((trans_desc->flags & SPI_TRANS_VARIABLE_DUMMY) ? t_ext->dummy_bits : handle->cfg.dummy_bits) != 0); bool extra_dummy_enabled = handle->hal_dev.timing_conf.timing_dummy; bool is_half_duplex = ((handle->cfg.flags & SPI_DEVICE_HALFDUPLEX) != 0); //check transmission length - SPI_CHECK((trans_desc->flags & SPI_TRANS_USE_RXDATA)==0 || trans_desc->rxlength <= 32, "SPI_TRANS_USE_RXDATA only available for rxdata transfer <= 32 bits", ESP_ERR_INVALID_ARG); - SPI_CHECK((trans_desc->flags & SPI_TRANS_USE_TXDATA)==0 || trans_desc->length <= 32, "SPI_TRANS_USE_TXDATA only available for txdata transfer <= 32 bits", ESP_ERR_INVALID_ARG); - SPI_CHECK(trans_desc->length <= bus_attr->max_transfer_sz*8, "txdata transfer > host maximum", ESP_ERR_INVALID_ARG); - SPI_CHECK(trans_desc->rxlength <= bus_attr->max_transfer_sz*8, "rxdata transfer > host maximum", ESP_ERR_INVALID_ARG); + SPI_CHECK((trans_desc->flags & SPI_TRANS_USE_RXDATA) == 0 || trans_desc->rxlength <= 32, "SPI_TRANS_USE_RXDATA only available for rxdata transfer <= 32 bits", ESP_ERR_INVALID_ARG); + SPI_CHECK((trans_desc->flags & SPI_TRANS_USE_TXDATA) == 0 || trans_desc->length <= 32, "SPI_TRANS_USE_TXDATA only available for txdata transfer <= 32 bits", ESP_ERR_INVALID_ARG); + SPI_CHECK(trans_desc->length <= bus_attr->max_transfer_sz * 8, "txdata transfer > host maximum", ESP_ERR_INVALID_ARG); + SPI_CHECK(trans_desc->rxlength <= bus_attr->max_transfer_sz * 8, "rxdata transfer > host maximum", ESP_ERR_INVALID_ARG); SPI_CHECK(is_half_duplex || trans_desc->rxlength <= trans_desc->length, "rx length > tx length in full duplex mode", ESP_ERR_INVALID_ARG); //check working mode #if SOC_SPI_SUPPORT_OCT @@ -808,10 +831,10 @@ static SPI_MASTER_ISR_ATTR esp_err_t check_trans_valid(spi_device_handle_t handl SPI_CHECK(!((trans_desc->flags & SPI_TRANS_MODE_OCT) && (handle->cfg.flags & SPI_DEVICE_3WIRE)), "Incompatible when setting to both Octal mode and 3-wire-mode", ESP_ERR_INVALID_ARG); SPI_CHECK(!((trans_desc->flags & SPI_TRANS_MODE_OCT) && !is_half_duplex), "Incompatible when setting to both Octal mode and half duplex mode", ESP_ERR_INVALID_ARG); #endif - SPI_CHECK(!((trans_desc->flags & (SPI_TRANS_MODE_DIO|SPI_TRANS_MODE_QIO)) && (handle->cfg.flags & SPI_DEVICE_3WIRE)), "Incompatible when setting to both multi-line mode and 3-wire-mode", ESP_ERR_INVALID_ARG); - SPI_CHECK(!((trans_desc->flags & (SPI_TRANS_MODE_DIO|SPI_TRANS_MODE_QIO)) && !is_half_duplex), "Incompatible when setting to both multi-line mode and half duplex mode", ESP_ERR_INVALID_ARG); + SPI_CHECK(!((trans_desc->flags & (SPI_TRANS_MODE_DIO | SPI_TRANS_MODE_QIO)) && (handle->cfg.flags & SPI_DEVICE_3WIRE)), "Incompatible when setting to both multi-line mode and 3-wire-mode", ESP_ERR_INVALID_ARG); + SPI_CHECK(!((trans_desc->flags & (SPI_TRANS_MODE_DIO | SPI_TRANS_MODE_QIO)) && !is_half_duplex), "Incompatible when setting to both multi-line mode and half duplex mode", ESP_ERR_INVALID_ARG); #ifdef CONFIG_IDF_TARGET_ESP32 - SPI_CHECK(!is_half_duplex || !bus_attr->dma_enabled || !rx_enabled || !tx_enabled, "SPI half duplex mode does not support using DMA with both MOSI and MISO phases.", ESP_ERR_INVALID_ARG ); + SPI_CHECK(!is_half_duplex || !bus_attr->dma_enabled || !rx_enabled || !tx_enabled, "SPI half duplex mode does not support using DMA with both MOSI and MISO phases.", ESP_ERR_INVALID_ARG); #endif #if !SOC_SPI_HD_BOTH_INOUT_SUPPORTED //On these chips, HW doesn't support using both TX and RX phases when in halfduplex mode @@ -825,8 +848,8 @@ static SPI_MASTER_ISR_ATTR esp_err_t check_trans_valid(spi_device_handle_t handl SPI_CHECK(!is_half_duplex || trans_desc->rxlength != 0 || !rx_enabled, "trans rx_buffer should be NULL and SPI_TRANS_USE_RXDATA should be cleared to skip MISO phase.", ESP_ERR_INVALID_ARG); //In Full duplex mode, default rxlength to be the same as length, if not filled in. // set rxlength to length is ok, even when rx buffer=NULL - if (trans_desc->rxlength==0 && !is_half_duplex) { - trans_desc->rxlength=trans_desc->length; + if (trans_desc->rxlength == 0 && !is_half_duplex) { + trans_desc->rxlength = trans_desc->length; } //Dummy phase is not available when both data out and in are enabled, regardless of FD or HD mode. SPI_CHECK(!tx_enabled || !rx_enabled || !dummy_enabled || !extra_dummy_enabled, "Dummy phase is not available when both data out and in are enabled", ESP_ERR_INVALID_ARG); @@ -846,7 +869,7 @@ static SPI_MASTER_ISR_ATTR void uninstall_priv_desc(spi_trans_priv_t* trans_buf) { spi_transaction_t *trans_desc = trans_buf->trans; if ((void *)trans_buf->buffer_to_send != &trans_desc->tx_data[0] && - trans_buf->buffer_to_send != trans_desc->tx_buffer) { + trans_buf->buffer_to_send != trans_desc->tx_buffer) { free((void *)trans_buf->buffer_to_send); //force free, ignore const } // copy data from temporary DMA-capable buffer back to IRAM buffer and free the temporary one. @@ -868,7 +891,7 @@ static SPI_MASTER_ISR_ATTR esp_err_t setup_priv_desc(spi_host_t *host, spi_trans // rx memory assign uint32_t* rcv_ptr; - if ( trans_desc->flags & SPI_TRANS_USE_RXDATA ) { + if (trans_desc->flags & SPI_TRANS_USE_RXDATA) { rcv_ptr = (uint32_t *)&trans_desc->rx_data[0]; } else { //if not use RXDATA neither rx_buffer, buffer_to_rcv assigned to NULL @@ -877,7 +900,7 @@ static SPI_MASTER_ISR_ATTR esp_err_t setup_priv_desc(spi_host_t *host, spi_trans // tx memory assign const uint32_t *send_ptr; - if ( trans_desc->flags & SPI_TRANS_USE_TXDATA ) { + if (trans_desc->flags & SPI_TRANS_USE_TXDATA) { send_ptr = (uint32_t *)&trans_desc->tx_data[0]; } else { //if not use TXDATA neither tx_buffer, tx data assigned to NULL @@ -895,17 +918,17 @@ static SPI_MASTER_ISR_ATTR esp_err_t setup_priv_desc(spi_host_t *host, spi_trans #endif if (send_ptr && bus_attr->dma_enabled) { - if ((!esp_ptr_dma_capable(send_ptr) || tx_unaligned )) { + if ((!esp_ptr_dma_capable(send_ptr) || tx_unaligned)) { ESP_RETURN_ON_FALSE(!(trans_desc->flags & SPI_TRANS_DMA_BUFFER_ALIGN_MANUAL), ESP_ERR_INVALID_ARG, SPI_TAG, "Set flag SPI_TRANS_DMA_BUFFER_ALIGN_MANUAL but TX buffer addr&len not align to %d, or not dma_capable", alignment); //if txbuf in the desc not DMA-capable, or not bytes aligned to alignment, malloc a new one - ESP_EARLY_LOGD(SPI_TAG, "Allocate TX buffer for DMA" ); + ESP_EARLY_LOGD(SPI_TAG, "Allocate TX buffer for DMA"); tx_byte_len = (tx_byte_len + alignment - 1) & (~(alignment - 1)); // up align alignment uint32_t *temp = heap_caps_aligned_alloc(alignment, tx_byte_len, MALLOC_CAP_DMA); if (temp == NULL) { goto clean_up; } - memcpy( temp, send_ptr, (trans_desc->length + 7) / 8 ); + memcpy(temp, send_ptr, (trans_desc->length + 7) / 8); send_ptr = temp; } #if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE @@ -914,10 +937,10 @@ static SPI_MASTER_ISR_ATTR esp_err_t setup_priv_desc(spi_host_t *host, spi_trans #endif } - if (rcv_ptr && bus_attr->dma_enabled && (!esp_ptr_dma_capable(rcv_ptr) || rx_unaligned )) { + if (rcv_ptr && bus_attr->dma_enabled && (!esp_ptr_dma_capable(rcv_ptr) || rx_unaligned)) { ESP_RETURN_ON_FALSE(!(trans_desc->flags & SPI_TRANS_DMA_BUFFER_ALIGN_MANUAL), ESP_ERR_INVALID_ARG, SPI_TAG, "Set flag SPI_TRANS_DMA_BUFFER_ALIGN_MANUAL but RX buffer addr&len not align to %d, or not dma_capable", alignment); //if rxbuf in the desc not DMA-capable, or not aligned to alignment, malloc a new one - ESP_EARLY_LOGD(SPI_TAG, "Allocate RX buffer for DMA" ); + ESP_EARLY_LOGD(SPI_TAG, "Allocate RX buffer for DMA"); rx_byte_len = (rx_byte_len + alignment - 1) & (~(alignment - 1)); // up align alignment rcv_ptr = heap_caps_aligned_alloc(alignment, rx_byte_len, MALLOC_CAP_DMA); if (rcv_ptr == NULL) { @@ -936,11 +959,13 @@ clean_up: esp_err_t SPI_MASTER_ATTR spi_device_queue_trans(spi_device_handle_t handle, spi_transaction_t *trans_desc, TickType_t ticks_to_wait) { esp_err_t ret = check_trans_valid(handle, trans_desc); - if (ret != ESP_OK) return ret; + if (ret != ESP_OK) { + return ret; + } spi_host_t *host = handle->host; - SPI_CHECK(!spi_bus_device_is_polling(handle), "Cannot queue new transaction while previous polling transaction is not terminated.", ESP_ERR_INVALID_STATE ); + SPI_CHECK(!spi_bus_device_is_polling(handle), "Cannot queue new transaction while previous polling transaction is not terminated.", ESP_ERR_INVALID_STATE); /* Even when using interrupt transfer, the CS can only be kept activated if the bus has been * acquired with `spi_device_acquire_bus()` first. */ @@ -950,7 +975,9 @@ esp_err_t SPI_MASTER_ATTR spi_device_queue_trans(spi_device_handle_t handle, spi spi_trans_priv_t trans_buf = { .trans = trans_desc, }; ret = setup_priv_desc(host, &trans_buf); - if (ret != ESP_OK) return ret; + if (ret != ESP_OK) { + return ret; + } #ifdef CONFIG_PM_ENABLE // though clock source is selectable, read/write reg and mem of spi peripherial still use APB @@ -985,14 +1012,14 @@ esp_err_t SPI_MASTER_ATTR spi_device_get_trans_result(spi_device_handle_t handle { BaseType_t r; spi_trans_priv_t trans_buf; - SPI_CHECK(handle!=NULL, "invalid dev handle", ESP_ERR_INVALID_ARG); + SPI_CHECK(handle != NULL, "invalid dev handle", ESP_ERR_INVALID_ARG); bool use_dma = handle->host->bus_attr->dma_enabled; //if SPI_DEVICE_NO_RETURN_RESULT is set, ret_queue will always be empty SPI_CHECK(!(handle->cfg.flags & SPI_DEVICE_NO_RETURN_RESULT), "API not Supported!", ESP_ERR_NOT_SUPPORTED); //use the interrupt, block until return - r=xQueueReceive(handle->ret_queue, (void*)&trans_buf, ticks_to_wait); + r = xQueueReceive(handle->ret_queue, (void*)&trans_buf, ticks_to_wait); if (!r) { // The memory occupied by rx and tx DMA buffer destroyed only when receiving from the queue (transaction finished). // If timeout, wait and retry. @@ -1015,10 +1042,14 @@ esp_err_t SPI_MASTER_ATTR spi_device_transmit(spi_device_handle_t handle, spi_tr spi_transaction_t *ret_trans; //ToDo: check if any spi transfers in flight ret = spi_device_queue_trans(handle, trans_desc, portMAX_DELAY); - if (ret != ESP_OK) return ret; + if (ret != ESP_OK) { + return ret; + } ret = spi_device_get_trans_result(handle, &ret_trans, portMAX_DELAY); - if (ret != ESP_OK) return ret; + if (ret != ESP_OK) { + return ret; + } assert(ret_trans == trans_desc); return ESP_OK; @@ -1027,8 +1058,8 @@ esp_err_t SPI_MASTER_ATTR spi_device_transmit(spi_device_handle_t handle, spi_tr esp_err_t SPI_MASTER_ISR_ATTR spi_device_acquire_bus(spi_device_t *device, TickType_t wait) { spi_host_t *const host = device->host; - SPI_CHECK(wait==portMAX_DELAY, "acquire finite time not supported now.", ESP_ERR_INVALID_ARG); - SPI_CHECK(!spi_bus_device_is_polling(device), "Cannot acquire bus when a polling transaction is in progress.", ESP_ERR_INVALID_STATE ); + SPI_CHECK(wait == portMAX_DELAY, "acquire finite time not supported now.", ESP_ERR_INVALID_ARG); + SPI_CHECK(!spi_bus_device_is_polling(device), "Cannot acquire bus when a polling transaction is in progress.", ESP_ERR_INVALID_STATE); esp_err_t ret = spi_bus_lock_acquire_start(device->dev_lock, wait); if (ret != ESP_OK) { @@ -1062,7 +1093,7 @@ void SPI_MASTER_ISR_ATTR spi_device_release_bus(spi_device_t *dev) { spi_host_t *host = dev->host; - if (spi_bus_device_is_polling(dev)){ + if (spi_bus_device_is_polling(dev)) { ESP_EARLY_LOGE(SPI_TAG, "Cannot release bus when a polling transaction is in progress."); assert(0); } @@ -1093,13 +1124,17 @@ esp_err_t SPI_MASTER_ISR_ATTR spi_device_polling_start(spi_device_handle_t handl esp_err_t ret; SPI_CHECK(ticks_to_wait == portMAX_DELAY, "currently timeout is not available for polling transactions", ESP_ERR_INVALID_ARG); ret = check_trans_valid(handle, trans_desc); - if (ret!=ESP_OK) return ret; - SPI_CHECK(!spi_bus_device_is_polling(handle), "Cannot send polling transaction while the previous polling transaction is not terminated.", ESP_ERR_INVALID_STATE ); + if (ret != ESP_OK) { + return ret; + } + SPI_CHECK(!spi_bus_device_is_polling(handle), "Cannot send polling transaction while the previous polling transaction is not terminated.", ESP_ERR_INVALID_STATE); spi_host_t *host = handle->host; spi_trans_priv_t priv_polling_trans = { .trans = trans_desc, }; ret = setup_priv_desc(host, &priv_polling_trans); - if (ret!=ESP_OK) return ret; + if (ret != ESP_OK) { + return ret; + } /* If device_acquiring_lock is set to handle, it means that the user has already * acquired the bus thanks to the function `spi_device_acquire_bus()`. @@ -1182,7 +1217,9 @@ esp_err_t SPI_MASTER_ISR_ATTR spi_device_polling_transmit(spi_device_handle_t ha { esp_err_t ret; ret = spi_device_polling_start(handle, trans_desc, portMAX_DELAY); - if (ret != ESP_OK) return ret; + if (ret != ESP_OK) { + return ret; + } return spi_device_polling_end(handle, portMAX_DELAY); } diff --git a/components/esp_driver_spi/src/gpspi/spi_slave.c b/components/esp_driver_spi/src/gpspi/spi_slave.c index f71a0443e4..23990f0e17 100644 --- a/components/esp_driver_spi/src/gpspi/spi_slave.c +++ b/components/esp_driver_spi/src/gpspi/spi_slave.c @@ -36,7 +36,6 @@ static const char *SPI_TAG = "spi_slave"; #define SPI_CHECK(a, str, ret_val) ESP_RETURN_ON_FALSE(a, ret_val, SPI_TAG, str) - #ifdef CONFIG_SPI_SLAVE_ISR_IN_IRAM #define SPI_SLAVE_ISR_ATTR IRAM_ATTR #else @@ -98,7 +97,7 @@ static inline bool is_valid_host(spi_host_device_t host) static inline bool SPI_SLAVE_ISR_ATTR bus_is_iomux(spi_slave_t *host) { - return host->flags&SPICOMMON_BUSFLAG_IOMUX_PINS; + return host->flags & SPICOMMON_BUSFLAG_IOMUX_PINS; } static void SPI_SLAVE_ISR_ATTR freeze_cs(spi_slave_t *host) @@ -139,24 +138,24 @@ esp_err_t spi_slave_initialize(spi_host_device_t host, const spi_bus_config_t *b esp_err_t err; SPI_CHECK(is_valid_host(host), "invalid host", ESP_ERR_INVALID_ARG); #ifdef CONFIG_IDF_TARGET_ESP32 - SPI_CHECK(dma_chan >= SPI_DMA_DISABLED && dma_chan <= SPI_DMA_CH_AUTO, "invalid dma channel", ESP_ERR_INVALID_ARG ); + SPI_CHECK(dma_chan >= SPI_DMA_DISABLED && dma_chan <= SPI_DMA_CH_AUTO, "invalid dma channel", ESP_ERR_INVALID_ARG); #elif CONFIG_IDF_TARGET_ESP32S2 - SPI_CHECK( dma_chan == SPI_DMA_DISABLED || dma_chan == (int)host || dma_chan == SPI_DMA_CH_AUTO, "invalid dma channel", ESP_ERR_INVALID_ARG ); + SPI_CHECK(dma_chan == SPI_DMA_DISABLED || dma_chan == (int)host || dma_chan == SPI_DMA_CH_AUTO, "invalid dma channel", ESP_ERR_INVALID_ARG); #elif SOC_GDMA_SUPPORTED - SPI_CHECK( dma_chan == SPI_DMA_DISABLED || dma_chan == SPI_DMA_CH_AUTO, "invalid dma channel, chip only support spi dma channel auto-alloc", ESP_ERR_INVALID_ARG ); + SPI_CHECK(dma_chan == SPI_DMA_DISABLED || dma_chan == SPI_DMA_CH_AUTO, "invalid dma channel, chip only support spi dma channel auto-alloc", ESP_ERR_INVALID_ARG); #endif - SPI_CHECK((bus_config->intr_flags & (ESP_INTR_FLAG_HIGH|ESP_INTR_FLAG_EDGE|ESP_INTR_FLAG_INTRDISABLED))==0, "intr flag not allowed", ESP_ERR_INVALID_ARG); + SPI_CHECK((bus_config->intr_flags & (ESP_INTR_FLAG_HIGH | ESP_INTR_FLAG_EDGE | ESP_INTR_FLAG_INTRDISABLED)) == 0, "intr flag not allowed", ESP_ERR_INVALID_ARG); #ifndef CONFIG_SPI_SLAVE_ISR_IN_IRAM - SPI_CHECK((bus_config->intr_flags & ESP_INTR_FLAG_IRAM)==0, "ESP_INTR_FLAG_IRAM should be disabled when CONFIG_SPI_SLAVE_ISR_IN_IRAM is not set.", ESP_ERR_INVALID_ARG); + SPI_CHECK((bus_config->intr_flags & ESP_INTR_FLAG_IRAM) == 0, "ESP_INTR_FLAG_IRAM should be disabled when CONFIG_SPI_SLAVE_ISR_IN_IRAM is not set.", ESP_ERR_INVALID_ARG); #endif SPI_CHECK(slave_config->spics_io_num < 0 || GPIO_IS_VALID_GPIO(slave_config->spics_io_num), "spics pin invalid", ESP_ERR_INVALID_ARG); //Check post_trans_cb status when `SPI_SLAVE_NO_RETURN_RESULT` flag is set. - if(slave_config->flags & SPI_SLAVE_NO_RETURN_RESULT) { + if (slave_config->flags & SPI_SLAVE_NO_RETURN_RESULT) { SPI_CHECK(slave_config->post_trans_cb != NULL, "use feature flag 'SPI_SLAVE_NO_RETURN_RESULT' but no post_trans_cb function sets", ESP_ERR_INVALID_ARG); } - spi_chan_claimed=spicommon_periph_claim(host, "spi slave"); + spi_chan_claimed = spicommon_periph_claim(host, "spi slave"); SPI_CHECK(spi_chan_claimed, "host already in use", ESP_ERR_INVALID_STATE); spihost[host] = malloc(sizeof(spi_slave_t)); @@ -181,7 +180,9 @@ esp_err_t spi_slave_initialize(spi_host_device_t host, const spi_bus_config_t *b //See how many dma descriptors we need and allocate them int dma_desc_ct = (bus_config->max_transfer_sz + SPI_MAX_DMA_LEN - 1) / SPI_MAX_DMA_LEN; - if (dma_desc_ct == 0) dma_desc_ct = 1; //default to 4k when max is not given + if (dma_desc_ct == 0) { + dma_desc_ct = 1; //default to 4k when max is not given + } spihost[host]->max_transfer_sz = dma_desc_ct * SPI_MAX_DMA_LEN; #if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE size_t alignment; @@ -203,8 +204,8 @@ esp_err_t spi_slave_initialize(spi_host_device_t host, const spi_bus_config_t *b spihost[host]->max_transfer_sz = SOC_SPI_MAXIMUM_BUFFER_SIZE; } - err = spicommon_bus_initialize_io(host, bus_config, SPICOMMON_BUSFLAG_SLAVE|bus_config->flags, &spihost[host]->flags); - if (err!=ESP_OK) { + err = spicommon_bus_initialize_io(host, bus_config, SPICOMMON_BUSFLAG_SLAVE | bus_config->flags, &spihost[host]->flags); + if (err != ESP_OK) { ret = err; goto cleanup; } @@ -216,11 +217,13 @@ esp_err_t spi_slave_initialize(spi_host_device_t host, const spi_bus_config_t *b } // The slave DMA suffers from unexpected transactions. Forbid reading if DMA is enabled by disabling the CS line. - if (spihost[host]->dma_enabled) freeze_cs(spihost[host]); + if (spihost[host]->dma_enabled) { + freeze_cs(spihost[host]); + } #ifdef CONFIG_PM_ENABLE err = esp_pm_lock_create(ESP_PM_APB_FREQ_MAX, 0, "spi_slave", - &spihost[host]->pm_lock); + &spihost[host]->pm_lock); if (err != ESP_OK) { ret = err; goto cleanup; @@ -235,7 +238,7 @@ esp_err_t spi_slave_initialize(spi_host_device_t host, const spi_bus_config_t *b ret = ESP_ERR_NO_MEM; goto cleanup; } - if(!(slave_config->flags & SPI_SLAVE_NO_RETURN_RESULT)) { + if (!(slave_config->flags & SPI_SLAVE_NO_RETURN_RESULT)) { spihost[host]->ret_queue = xQueueCreate(slave_config->queue_size, sizeof(spi_slave_trans_priv_t)); if (!spihost[host]->ret_queue) { ret = ESP_ERR_NO_MEM; @@ -282,8 +285,12 @@ esp_err_t spi_slave_initialize(spi_host_device_t host, const spi_bus_config_t *b cleanup: if (spihost[host]) { - if (spihost[host]->trans_queue) vQueueDelete(spihost[host]->trans_queue); - if (spihost[host]->ret_queue) vQueueDelete(spihost[host]->ret_queue); + if (spihost[host]->trans_queue) { + vQueueDelete(spihost[host]->trans_queue); + } + if (spihost[host]->ret_queue) { + vQueueDelete(spihost[host]->ret_queue); + } #ifdef CONFIG_PM_ENABLE if (spihost[host]->pm_lock) { esp_pm_lock_release(spihost[host]->pm_lock); @@ -309,8 +316,12 @@ esp_err_t spi_slave_free(spi_host_device_t host) { SPI_CHECK(is_valid_host(host), "invalid host", ESP_ERR_INVALID_ARG); SPI_CHECK(spihost[host], "host not slave", ESP_ERR_INVALID_ARG); - if (spihost[host]->trans_queue) vQueueDelete(spihost[host]->trans_queue); - if (spihost[host]->ret_queue) vQueueDelete(spihost[host]->ret_queue); + if (spihost[host]->trans_queue) { + vQueueDelete(spihost[host]->trans_queue); + } + if (spihost[host]->ret_queue) { + vQueueDelete(spihost[host]->ret_queue); + } if (spihost[host]->dma_enabled) { spicommon_dma_chan_free(host); free(spihost[host]->hal.dmadesc_tx); @@ -357,10 +368,10 @@ static esp_err_t SPI_SLAVE_ISR_ATTR spi_slave_setup_priv_trans(spi_host_device_t uint32_t buffer_byte_len = (trans->length + 7) / 8; if (spihost[host]->dma_enabled && trans->tx_buffer) { - if ((!esp_ptr_dma_capable( trans->tx_buffer ) || ((((uint32_t)trans->tx_buffer) | buffer_byte_len) & (alignment - 1)))) { + if ((!esp_ptr_dma_capable(trans->tx_buffer) || ((((uint32_t)trans->tx_buffer) | buffer_byte_len) & (alignment - 1)))) { ESP_RETURN_ON_FALSE_ISR(trans->flags & SPI_SLAVE_TRANS_DMA_BUFFER_ALIGN_AUTO, ESP_ERR_INVALID_ARG, SPI_TAG, "TX buffer addr&len not align to %d, or not dma_capable", alignment); //if txbuf in the desc not DMA-capable, or not align to "alignment", malloc a new one - ESP_EARLY_LOGD(SPI_TAG, "Allocate TX buffer for DMA" ); + ESP_EARLY_LOGD(SPI_TAG, "Allocate TX buffer for DMA"); buffer_byte_len = (buffer_byte_len + alignment - 1) & (~(alignment - 1)); // up align to "alignment" uint32_t *temp = heap_caps_aligned_alloc(alignment, buffer_byte_len, MALLOC_CAP_DMA); if (temp == NULL) { @@ -376,11 +387,11 @@ static esp_err_t SPI_SLAVE_ISR_ATTR spi_slave_setup_priv_trans(spi_host_device_t if (spihost[host]->dma_enabled && trans->rx_buffer && (!esp_ptr_dma_capable(trans->rx_buffer) || ((((uint32_t)trans->rx_buffer) | (trans->length + 7) / 8) & (alignment - 1)))) { ESP_RETURN_ON_FALSE_ISR(trans->flags & SPI_SLAVE_TRANS_DMA_BUFFER_ALIGN_AUTO, ESP_ERR_INVALID_ARG, SPI_TAG, "RX buffer addr&len not align to %d, or not dma_capable", alignment); //if rxbuf in the desc not DMA-capable, or not align to "alignment", malloc a new one - ESP_EARLY_LOGD(SPI_TAG, "Allocate RX buffer for DMA" ); + ESP_EARLY_LOGD(SPI_TAG, "Allocate RX buffer for DMA"); buffer_byte_len = (buffer_byte_len + alignment - 1) & (~(alignment - 1)); // up align to "alignment" priv_trans->rx_buffer = heap_caps_aligned_alloc(alignment, buffer_byte_len, MALLOC_CAP_DMA); if (priv_trans->rx_buffer == NULL) { - free (priv_trans->tx_buffer); + free(priv_trans->tx_buffer); return ESP_ERR_NO_MEM; } } @@ -393,12 +404,12 @@ esp_err_t SPI_SLAVE_ATTR spi_slave_queue_trans(spi_host_device_t host, const spi BaseType_t r; SPI_CHECK(is_valid_host(host), "invalid host", ESP_ERR_INVALID_ARG); SPI_CHECK(spihost[host], "host not slave", ESP_ERR_INVALID_ARG); - SPI_CHECK(spihost[host]->dma_enabled == 0 || trans_desc->tx_buffer==NULL || esp_ptr_dma_capable(trans_desc->tx_buffer), - "txdata not in DMA-capable memory", ESP_ERR_INVALID_ARG); - SPI_CHECK(spihost[host]->dma_enabled == 0 || trans_desc->rx_buffer==NULL || - (esp_ptr_dma_capable(trans_desc->rx_buffer) && esp_ptr_word_aligned(trans_desc->rx_buffer) && - (trans_desc->length%4==0)), - "rxdata not in DMA-capable memory or not WORD aligned", ESP_ERR_INVALID_ARG); + SPI_CHECK(spihost[host]->dma_enabled == 0 || trans_desc->tx_buffer == NULL || esp_ptr_dma_capable(trans_desc->tx_buffer), + "txdata not in DMA-capable memory", ESP_ERR_INVALID_ARG); + SPI_CHECK(spihost[host]->dma_enabled == 0 || trans_desc->rx_buffer == NULL || + (esp_ptr_dma_capable(trans_desc->rx_buffer) && esp_ptr_word_aligned(trans_desc->rx_buffer) && + (trans_desc->length % 4 == 0)), + "rxdata not in DMA-capable memory or not WORD aligned", ESP_ERR_INVALID_ARG); SPI_CHECK(trans_desc->length <= spihost[host]->max_transfer_sz * 8, "data transfer > host maximum", ESP_ERR_INVALID_ARG); @@ -406,7 +417,9 @@ esp_err_t SPI_SLAVE_ATTR spi_slave_queue_trans(spi_host_device_t host, const spi SPI_CHECK(ESP_OK == spi_slave_setup_priv_trans(host, &priv_trans), "slave setup priv_trans failed", ESP_ERR_NO_MEM); r = xQueueSend(spihost[host]->trans_queue, (void *)&priv_trans, ticks_to_wait); - if (!r) return ESP_ERR_TIMEOUT; + if (!r) { + return ESP_ERR_TIMEOUT; + } esp_intr_enable(spihost[host]->intr); return ESP_OK; } @@ -434,7 +447,7 @@ esp_err_t SPI_SLAVE_ATTR spi_slave_queue_reset(spi_host_device_t host) spi_ll_set_int_stat(spihost[host]->hal.hw); spi_slave_trans_priv_t trans; - while( uxQueueMessagesWaiting(spihost[host]->trans_queue)) { + while (uxQueueMessagesWaiting(spihost[host]->trans_queue)) { xQueueReceive(spihost[host]->trans_queue, &trans, 0); spi_slave_uninstall_priv_trans(host, &trans); } @@ -455,17 +468,17 @@ esp_err_t SPI_SLAVE_ISR_ATTR spi_slave_queue_trans_isr(spi_host_device_t host, c uint32_t buffer_byte_len = (trans_desc->length + 7) / 8; ESP_RETURN_ON_FALSE_ISR(\ - (trans_desc->tx_buffer && \ - esp_ptr_dma_capable(trans_desc->tx_buffer) && \ - ((((uint32_t)trans_desc->tx_buffer) | buffer_byte_len) & (alignment - 1)) == 0), \ - ESP_ERR_INVALID_ARG, SPI_TAG, "txdata addr & len not align to %d bytes or not dma_capable", alignment\ - ); + (trans_desc->tx_buffer && \ + esp_ptr_dma_capable(trans_desc->tx_buffer) && \ + ((((uint32_t)trans_desc->tx_buffer) | buffer_byte_len) & (alignment - 1)) == 0), \ + ESP_ERR_INVALID_ARG, SPI_TAG, "txdata addr & len not align to %d bytes or not dma_capable", alignment\ + ); ESP_RETURN_ON_FALSE_ISR(\ - (trans_desc->rx_buffer && \ - esp_ptr_dma_capable(trans_desc->rx_buffer) && \ - ((((uint32_t)trans_desc->rx_buffer) | buffer_byte_len) & (alignment - 1)) == 0), \ - ESP_ERR_INVALID_ARG, SPI_TAG, "rxdata addr & len not align to %d bytes or not dma_capable", alignment\ - ); + (trans_desc->rx_buffer && \ + esp_ptr_dma_capable(trans_desc->rx_buffer) && \ + ((((uint32_t)trans_desc->rx_buffer) | buffer_byte_len) & (alignment - 1)) == 0), \ + ESP_ERR_INVALID_ARG, SPI_TAG, "rxdata addr & len not align to %d bytes or not dma_capable", alignment\ + ); } spi_slave_trans_priv_t priv_trans = { @@ -490,7 +503,7 @@ esp_err_t SPI_SLAVE_ISR_ATTR spi_slave_queue_reset_isr(spi_host_device_t host) spi_slave_trans_priv_t trans; BaseType_t do_yield = pdFALSE; - while( pdFALSE == xQueueIsQueueEmptyFromISR(spihost[host]->trans_queue)) { + while (pdFALSE == xQueueIsQueueEmptyFromISR(spihost[host]->trans_queue)) { xQueueReceiveFromISR(spihost[host]->trans_queue, &trans, &do_yield); spi_slave_uninstall_priv_trans(host, &trans); } @@ -512,23 +525,28 @@ esp_err_t SPI_SLAVE_ATTR spi_slave_get_trans_result(spi_host_device_t host, spi_ spi_slave_trans_priv_t priv_trans; r = xQueueReceive(spihost[host]->ret_queue, (void *)&priv_trans, ticks_to_wait); - if (!r) return ESP_ERR_TIMEOUT; + if (!r) { + return ESP_ERR_TIMEOUT; + } spi_slave_uninstall_priv_trans(host, &priv_trans); *trans_desc = priv_trans.trans; return ESP_OK; } - esp_err_t SPI_SLAVE_ATTR spi_slave_transmit(spi_host_device_t host, spi_slave_transaction_t *trans_desc, TickType_t ticks_to_wait) { esp_err_t ret; spi_slave_transaction_t *ret_trans; //ToDo: check if any spi transfers in flight ret = spi_slave_queue_trans(host, trans_desc, ticks_to_wait); - if (ret != ESP_OK) return ret; + if (ret != ESP_OK) { + return ret; + } ret = spi_slave_get_trans_result(host, &ret_trans, ticks_to_wait); - if (ret != ESP_OK) return ret; + if (ret != ESP_OK) { + return ret; + } assert(ret_trans == trans_desc); return ESP_OK; } @@ -556,7 +574,9 @@ static void SPI_SLAVE_ISR_ATTR spi_intr(void *arg) bool use_dma = host->dma_enabled; if (host->cur_trans.trans) { // When DMA is enabled, the slave rx dma suffers from unexpected transactions. Forbid reading until transaction ready. - if (use_dma) freeze_cs(host); + if (use_dma) { + freeze_cs(host); + } spi_slave_hal_store_result(hal); host->cur_trans.trans->trans_len = spi_slave_hal_get_rcv_bitlen(hal); @@ -579,9 +599,11 @@ static void SPI_SLAVE_ISR_ATTR spi_intr(void *arg) assert(ret == ESP_OK); } #endif - if (host->cfg.post_trans_cb) host->cfg.post_trans_cb(host->cur_trans.trans); + if (host->cfg.post_trans_cb) { + host->cfg.post_trans_cb(host->cur_trans.trans); + } - if(!(host->cfg.flags & SPI_SLAVE_NO_RETURN_RESULT)) { + if (!(host->cfg.flags & SPI_SLAVE_NO_RETURN_RESULT)) { xQueueSendFromISR(host->ret_queue, &host->cur_trans, &do_yield); } host->cur_trans.trans = NULL; @@ -595,7 +617,9 @@ static void SPI_SLAVE_ISR_ATTR spi_intr(void *arg) if (spicommon_dmaworkaround_reset_in_progress()) { //We need to wait for the reset to complete. Disable int (will be re-enabled on reset callback) and exit isr. esp_intr_disable(host->intr); - if (do_yield) portYIELD_FROM_ISR(); + if (do_yield) { + portYIELD_FROM_ISR(); + } return; } } @@ -637,7 +661,11 @@ static void SPI_SLAVE_ISR_ATTR spi_intr(void *arg) //Kick off transfer spi_slave_hal_user_start(hal); - if (host->cfg.post_setup_cb) host->cfg.post_setup_cb(priv_trans.trans); + if (host->cfg.post_setup_cb) { + host->cfg.post_setup_cb(priv_trans.trans); + } + } + if (do_yield) { + portYIELD_FROM_ISR(); } - if (do_yield) portYIELD_FROM_ISR(); } diff --git a/components/esp_driver_spi/test_apps/spi/master/main/test_spi_master.c b/components/esp_driver_spi/test_apps/spi/master/main/test_spi_master.c index 9a881bb912..ef14ee4894 100644 --- a/components/esp_driver_spi/test_apps/spi/master/main/test_spi_master.c +++ b/components/esp_driver_spi/test_apps/spi/master/main/test_spi_master.c @@ -1591,7 +1591,6 @@ void test_add_device_slave(void) TEST_CASE_MULTIPLE_DEVICES("SPI_Master:Test multiple devices", "[spi_ms]", test_add_device_master, test_add_device_slave); - #if (SOC_CPU_CORES_NUM > 1) && (!CONFIG_FREERTOS_UNICORE) #define TEST_ISR_CNT 100 diff --git a/components/esp_driver_spi/test_apps/spi/slave/main/test_spi_slave.c b/components/esp_driver_spi/test_apps/spi/slave/main/test_spi_slave.c index 9e0ac96e4d..6f1d471b48 100644 --- a/components/esp_driver_spi/test_apps/spi/slave/main/test_spi_slave.c +++ b/components/esp_driver_spi/test_apps/spi/slave/main/test_spi_slave.c @@ -652,7 +652,7 @@ static IRAM_ATTR void spi_queue_reset_in_isr(void) uint8_t *slave_isr_send = heap_caps_aligned_alloc(64, TEST_BUFFER_SZ, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL); uint8_t *slave_isr_recv = heap_caps_aligned_alloc(64, TEST_BUFFER_SZ, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL); - uint8_t *dummy_data = heap_caps_aligned_alloc(64, 64*2, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL); + uint8_t *dummy_data = heap_caps_aligned_alloc(64, 64 * 2, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL); uint8_t *slave_isr_exp = heap_caps_malloc(TEST_BUFFER_SZ, MALLOC_CAP_DEFAULT | MALLOC_CAP_INTERNAL); get_tx_buffer(1001, slave_isr_exp, slave_isr_send, TEST_BUFFER_SZ); get_tx_buffer(1001, dummy_data, dummy_data + 64, 64);