feat(mbedtls/sha): Support SHA-DMA operations by satisfying L1 cache alignment requirements

- Use block mode as default SHA operation mode instead of DMA
This commit is contained in:
harshal.patil
2024-03-26 11:29:28 +05:30
parent d737625215
commit 211a2a5477
2 changed files with 22 additions and 11 deletions

View File

@@ -17,27 +17,27 @@ extern "C"
#endif #endif
#if (SOC_AES_SUPPORT_DMA) && (SOC_SHA_SUPPORT_DMA) #if (SOC_AES_SUPPORT_DMA) || (SOC_SHA_SUPPORT_DMA)
#if (SOC_AES_GDMA) && (SOC_SHA_GDMA) #if (SOC_AES_GDMA) || (SOC_SHA_GDMA)
#if (SOC_GDMA_TRIG_PERIPH_AES0_BUS == SOC_GDMA_BUS_AHB) && (SOC_GDMA_TRIG_PERIPH_SHA0_BUS == SOC_GDMA_BUS_AHB) #if (SOC_GDMA_TRIG_PERIPH_AES0_BUS == SOC_GDMA_BUS_AHB) || (SOC_GDMA_TRIG_PERIPH_SHA0_BUS == SOC_GDMA_BUS_AHB)
#define DMA_DESC_MEM_ALIGN_SIZE 4 #define DMA_DESC_MEM_ALIGN_SIZE 4
typedef dma_descriptor_align4_t crypto_dma_desc_t; typedef dma_descriptor_align4_t crypto_dma_desc_t;
#elif (SOC_GDMA_TRIG_PERIPH_AES0_BUS == SOC_GDMA_BUS_AXI) && (SOC_GDMA_TRIG_PERIPH_SHA0_BUS == SOC_GDMA_BUS_AXI) #elif (SOC_GDMA_TRIG_PERIPH_AES0_BUS == SOC_GDMA_BUS_AXI) || (SOC_GDMA_TRIG_PERIPH_SHA0_BUS == SOC_GDMA_BUS_AXI)
#define DMA_DESC_MEM_ALIGN_SIZE 8 #define DMA_DESC_MEM_ALIGN_SIZE 8
typedef dma_descriptor_align8_t crypto_dma_desc_t; typedef dma_descriptor_align8_t crypto_dma_desc_t;
#else #else
#error "As we support a shared crypto GDMA layer for the AES and the SHA peripheral, both the peripherals must use the same GDMA bus" #error "As we support a shared crypto GDMA layer for the AES and the SHA peripheral, both the peripherals must use the same GDMA bus"
#endif /* (SOC_GDMA_TRIG_PERIPH_AES0_BUS == SOC_GDMA_BUS_AHB) && (SOC_GDMA_TRIG_PERIPH_AES0_BUS == SOC_GDMA_BUS_AHB) */ #endif /* (SOC_GDMA_TRIG_PERIPH_AES0_BUS == SOC_GDMA_BUS_AHB) || (SOC_GDMA_TRIG_PERIPH_AES0_BUS == SOC_GDMA_BUS_AHB) */
#elif (SOC_AES_CRYPTO_DMA) && (SOC_SHA_CRYPTO_DMA) #elif (SOC_AES_CRYPTO_DMA) || (SOC_SHA_CRYPTO_DMA)
#define DMA_DESC_MEM_ALIGN_SIZE 4 #define DMA_DESC_MEM_ALIGN_SIZE 4
typedef dma_descriptor_align4_t crypto_dma_desc_t; typedef dma_descriptor_align4_t crypto_dma_desc_t;
#endif /* (SOC_AES_GDMA) && (SOC_SHA_GDMA) */ #endif /* (SOC_AES_GDMA) && (SOC_SHA_GDMA) */
#endif /* (SOC_AES_SUPPORT_DMA) && (SOC_SHA_SUPPORT_DMA) */ #endif /* (SOC_AES_SUPPORT_DMA) || (SOC_SHA_SUPPORT_DMA) */
#ifdef __cplusplus #ifdef __cplusplus

View File

@@ -29,12 +29,15 @@
#include <stdio.h> #include <stdio.h>
#include <sys/lock.h> #include <sys/lock.h>
#include "esp_dma_utils.h"
#include "esp_private/esp_cache_private.h"
#include "esp_log.h" #include "esp_log.h"
#include "esp_memory_utils.h" #include "esp_memory_utils.h"
#include "esp_crypto_lock.h" #include "esp_crypto_lock.h"
#include "esp_attr.h" #include "esp_attr.h"
#include "esp_crypto_dma.h" #include "esp_crypto_dma.h"
#include "esp_cache.h" #include "esp_cache.h"
#include "hal/dma_types.h"
#include "soc/ext_mem_defs.h" #include "soc/ext_mem_defs.h"
#include "soc/periph_defs.h" #include "soc/periph_defs.h"
@@ -155,10 +158,6 @@ static void esp_sha_block_mode(esp_sha_type sha_type, const uint8_t *input, uint
} }
} }
/* These are static due to:
* * Must be in DMA capable memory, so stack is not a safe place to put them
* * To avoid having to malloc/free them for every DMA operation
*/
static DRAM_ATTR crypto_dma_desc_t s_dma_descr_input; static DRAM_ATTR crypto_dma_desc_t s_dma_descr_input;
static DRAM_ATTR crypto_dma_desc_t s_dma_descr_buf; static DRAM_ATTR crypto_dma_desc_t s_dma_descr_buf;
@@ -198,6 +197,18 @@ static esp_err_t esp_sha_dma_process(esp_sha_type sha_type, const void *input, u
s_dma_descr_buf.next = (&s_dma_descr_input); s_dma_descr_buf.next = (&s_dma_descr_input);
} }
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
if (ilen) {
ESP_ERROR_CHECK(esp_cache_msync(&s_dma_descr_input, sizeof(crypto_dma_desc_t), ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_UNALIGNED));
ESP_ERROR_CHECK(esp_cache_msync(s_dma_descr_input.buffer, s_dma_descr_input.dw0.length, ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_UNALIGNED));
}
if (buf_len) {
ESP_ERROR_CHECK(esp_cache_msync(&s_dma_descr_buf, sizeof(crypto_dma_desc_t), ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_UNALIGNED));
ESP_ERROR_CHECK(esp_cache_msync(s_dma_descr_buf.buffer, s_dma_descr_buf.dw0.length, ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_UNALIGNED));
}
#endif /* SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE */
if (esp_sha_dma_start(dma_descr_head) != ESP_OK) { if (esp_sha_dma_start(dma_descr_head) != ESP_OK) {
ESP_LOGE(TAG, "esp_sha_dma_start failed, no DMA channel available"); ESP_LOGE(TAG, "esp_sha_dma_start failed, no DMA channel available");
return -1; return -1;