mirror of
https://github.com/espressif/esp-idf.git
synced 2025-08-03 20:54:32 +02:00
fix(cache): no longer use freeze in esp_cache_msync
Writeback and invalidation don't need cache to be frozen first
This commit is contained in:
@@ -15,35 +15,10 @@
|
|||||||
#include "esp_cache.h"
|
#include "esp_cache.h"
|
||||||
#include "esp_private/critical_section.h"
|
#include "esp_private/critical_section.h"
|
||||||
|
|
||||||
|
|
||||||
static const char *TAG = "cache";
|
static const char *TAG = "cache";
|
||||||
|
|
||||||
#if SOC_CACHE_WRITEBACK_SUPPORTED
|
#if SOC_CACHE_WRITEBACK_SUPPORTED
|
||||||
DEFINE_CRIT_SECTION_LOCK_STATIC(s_spinlock);
|
DEFINE_CRIT_SECTION_LOCK_STATIC(s_spinlock);
|
||||||
|
|
||||||
void s_cache_freeze(void)
|
|
||||||
{
|
|
||||||
#if SOC_CACHE_FREEZE_SUPPORTED
|
|
||||||
cache_hal_freeze(CACHE_TYPE_DATA | CACHE_TYPE_INSTRUCTION);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/**
|
|
||||||
* For writeback supported, but the freeze not supported chip (Now only S2),
|
|
||||||
* as it's single core, the critical section is enough to prevent preemption from an non-IRAM ISR
|
|
||||||
*/
|
|
||||||
}
|
|
||||||
|
|
||||||
void s_cache_unfreeze(void)
|
|
||||||
{
|
|
||||||
#if SOC_CACHE_FREEZE_SUPPORTED
|
|
||||||
cache_hal_unfreeze(CACHE_TYPE_DATA | CACHE_TYPE_INSTRUCTION);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Similarly, for writeback supported, but the freeze not supported chip (Now only S2),
|
|
||||||
* we don't need to do more
|
|
||||||
*/
|
|
||||||
}
|
|
||||||
#endif //#if SOC_CACHE_WRITEBACK_SUPPORTED
|
#endif //#if SOC_CACHE_WRITEBACK_SUPPORTED
|
||||||
|
|
||||||
|
|
||||||
@@ -54,9 +29,7 @@ esp_err_t esp_cache_msync(void *addr, size_t size, int flags)
|
|||||||
|
|
||||||
#if SOC_CACHE_WRITEBACK_SUPPORTED
|
#if SOC_CACHE_WRITEBACK_SUPPORTED
|
||||||
if ((flags & ESP_CACHE_MSYNC_FLAG_UNALIGNED) == 0) {
|
if ((flags & ESP_CACHE_MSYNC_FLAG_UNALIGNED) == 0) {
|
||||||
esp_os_enter_critical_safe(&s_spinlock);
|
|
||||||
uint32_t data_cache_line_size = cache_hal_get_cache_line_size(CACHE_TYPE_DATA);
|
uint32_t data_cache_line_size = cache_hal_get_cache_line_size(CACHE_TYPE_DATA);
|
||||||
esp_os_exit_critical_safe(&s_spinlock);
|
|
||||||
|
|
||||||
ESP_RETURN_ON_FALSE_ISR(((uint32_t)addr % data_cache_line_size) == 0, ESP_ERR_INVALID_ARG, TAG, "start address isn't aligned with the data cache line size (%d)B", data_cache_line_size);
|
ESP_RETURN_ON_FALSE_ISR(((uint32_t)addr % data_cache_line_size) == 0, ESP_ERR_INVALID_ARG, TAG, "start address isn't aligned with the data cache line size (%d)B", data_cache_line_size);
|
||||||
ESP_RETURN_ON_FALSE_ISR((size % data_cache_line_size) == 0, ESP_ERR_INVALID_ARG, TAG, "size isn't aligned with the data cache line size (%d)B", data_cache_line_size);
|
ESP_RETURN_ON_FALSE_ISR((size % data_cache_line_size) == 0, ESP_ERR_INVALID_ARG, TAG, "size isn't aligned with the data cache line size (%d)B", data_cache_line_size);
|
||||||
@@ -66,15 +39,14 @@ esp_err_t esp_cache_msync(void *addr, size_t size, int flags)
|
|||||||
uint32_t vaddr = (uint32_t)addr;
|
uint32_t vaddr = (uint32_t)addr;
|
||||||
|
|
||||||
esp_os_enter_critical_safe(&s_spinlock);
|
esp_os_enter_critical_safe(&s_spinlock);
|
||||||
s_cache_freeze();
|
|
||||||
|
|
||||||
cache_hal_writeback_addr(vaddr, size);
|
cache_hal_writeback_addr(vaddr, size);
|
||||||
if (flags & ESP_CACHE_MSYNC_FLAG_INVALIDATE) {
|
|
||||||
cache_hal_invalidate_addr(vaddr, size);
|
|
||||||
}
|
|
||||||
|
|
||||||
s_cache_unfreeze();
|
|
||||||
esp_os_exit_critical_safe(&s_spinlock);
|
esp_os_exit_critical_safe(&s_spinlock);
|
||||||
|
|
||||||
|
if (flags & ESP_CACHE_MSYNC_FLAG_INVALIDATE) {
|
||||||
|
esp_os_enter_critical_safe(&s_spinlock);
|
||||||
|
cache_hal_invalidate_addr(vaddr, size);
|
||||||
|
esp_os_exit_critical_safe(&s_spinlock);
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
return ESP_OK;
|
return ESP_OK;
|
||||||
|
Reference in New Issue
Block a user