flash mmap: abstract R/W MMU table instead of reg access

This commit is contained in:
jiangguangming
2022-04-18 15:04:10 +08:00
parent 96965d5d64
commit 9c6afee12f
9 changed files with 276 additions and 44 deletions

View File

@@ -69,6 +69,62 @@ static inline bool mmu_ll_check_valid_ext_vaddr_region(uint32_t mmu_id, uint32_t
(ADDRESS_IN_DROM0_CACHE(vaddr_start) && ADDRESS_IN_DROM0_CACHE(vaddr_end)); (ADDRESS_IN_DROM0_CACHE(vaddr_start) && ADDRESS_IN_DROM0_CACHE(vaddr_end));
} }
/**
* Write to the MMU table to map the virtual memory and the physical memory
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
* @param mmu_val Value to be set into an MMU entry, for physical address
* @param target MMU target physical memory.
*/
__attribute__((always_inline))
static inline void mmu_ll_write_entry(uint32_t mmu_id, uint32_t entry_id, uint32_t mmu_val, mmu_target_t target)
{
(void)target;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
DPORT_INTERRUPT_DISABLE();
switch (mmu_id) {
case MMU_TABLE_CORE0:
DPORT_WRITE_PERI_REG((uint32_t)&DPORT_PRO_FLASH_MMU_TABLE[entry_id], mmu_val);
break;
case MMU_TABLE_CORE1:
DPORT_WRITE_PERI_REG((uint32_t)&DPORT_APP_FLASH_MMU_TABLE[entry_id], mmu_val);
break;
default:
HAL_ASSERT(false);
}
DPORT_INTERRUPT_RESTORE();
}
/**
* Read the raw value from MMU table
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
* @param mmu_val Value to be read from MMU table
*/
__attribute__((always_inline))
static inline uint32_t mmu_ll_read_entry(uint32_t mmu_id, uint32_t entry_id)
{
uint32_t mmu_value;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
DPORT_INTERRUPT_DISABLE();
switch (mmu_id) {
case MMU_TABLE_CORE0:
mmu_value = DPORT_SEQUENCE_REG_READ((uint32_t)&DPORT_PRO_FLASH_MMU_TABLE[entry_id]);
break;
case MMU_TABLE_CORE1:
mmu_value = DPORT_SEQUENCE_REG_READ((uint32_t)&DPORT_APP_FLASH_MMU_TABLE[entry_id]);
break;
default:
HAL_ASSERT(false);
}
DPORT_INTERRUPT_RESTORE();
return mmu_value;
}
/** /**
* Set MMU table entry as invalid * Set MMU table entry as invalid
* *
@@ -82,14 +138,14 @@ static inline void mmu_ll_set_entry_invalid(uint32_t mmu_id, uint32_t entry_id)
DPORT_INTERRUPT_DISABLE(); DPORT_INTERRUPT_DISABLE();
switch (mmu_id) { switch (mmu_id) {
case 0: case MMU_TABLE_CORE0:
DPORT_WRITE_PERI_REG((uint32_t)&DPORT_PRO_FLASH_MMU_TABLE[entry_id], DPORT_FLASH_MMU_TABLE_INVALID_VAL); DPORT_WRITE_PERI_REG((uint32_t)&DPORT_PRO_FLASH_MMU_TABLE[entry_id], MMU_INVALID);
break; break;
case 1: case MMU_TABLE_CORE1:
DPORT_WRITE_PERI_REG((uint32_t)&DPORT_APP_FLASH_MMU_TABLE[entry_id], DPORT_FLASH_MMU_TABLE_INVALID_VAL); DPORT_WRITE_PERI_REG((uint32_t)&DPORT_APP_FLASH_MMU_TABLE[entry_id], MMU_INVALID);
break; break;
default: default:
HAL_ASSERT(false && "invalid mmu_id"); HAL_ASSERT(false);
} }
DPORT_INTERRUPT_RESTORE(); DPORT_INTERRUPT_RESTORE();
} }
@@ -107,6 +163,25 @@ static inline void mmu_ll_unmap_all(uint32_t mmu_id)
} }
} }
/**
* Get MMU table entry is invalid
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
* return ture for MMU entry is invalid, false for valid
*/
__attribute__((always_inline))
static inline bool mmu_ll_get_entry_is_invalid(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
DPORT_INTERRUPT_DISABLE();
uint32_t mmu_value = DPORT_SEQUENCE_REG_READ((uint32_t)&DPORT_PRO_FLASH_MMU_TABLE[entry_id]);
DPORT_INTERRUPT_RESTORE();
return (mmu_value & MMU_INVALID) ? true : false;
}
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@@ -147,6 +147,22 @@ static inline void mmu_ll_write_entry(uint32_t mmu_id, uint32_t entry_id, uint32
*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4) = mmu_val | MMU_ACCESS_FLASH | MMU_VALID; *(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4) = mmu_val | MMU_ACCESS_FLASH | MMU_VALID;
} }
/**
* Read the raw value from MMU table
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
* @param mmu_val Value to be read from MMU table
*/
__attribute__((always_inline))
static inline uint32_t mmu_ll_read_entry(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
return *(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4);
}
/** /**
* Set MMU table entry as invalid * Set MMU table entry as invalid
* *
@@ -175,6 +191,22 @@ static inline void mmu_ll_unmap_all(uint32_t mmu_id)
} }
} }
/**
* Get MMU table entry is invalid
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
* return ture for MMU entry is invalid, false for valid
*/
__attribute__((always_inline))
static inline bool mmu_ll_get_entry_is_invalid(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
return (*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4) & MMU_INVALID) ? true : false;
}
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@@ -114,6 +114,22 @@ static inline void mmu_ll_write_entry(uint32_t mmu_id, uint32_t entry_id, uint32
*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4) = mmu_val | MMU_ACCESS_FLASH | MMU_VALID; *(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4) = mmu_val | MMU_ACCESS_FLASH | MMU_VALID;
} }
/**
* Read the raw value from MMU table
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
* @param mmu_val Value to be read from MMU table
*/
__attribute__((always_inline))
static inline uint32_t mmu_ll_read_entry(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
return *(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4);
}
/** /**
* Set MMU table entry as invalid * Set MMU table entry as invalid
* *
@@ -142,6 +158,22 @@ static inline void mmu_ll_unmap_all(uint32_t mmu_id)
} }
} }
/**
* Get MMU table entry is invalid
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
* return ture for MMU entry is invalid, false for valid
*/
__attribute__((always_inline))
static inline bool mmu_ll_get_entry_is_invalid(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
return (*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4) & MMU_INVALID) ? true : false;
}
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@@ -114,6 +114,22 @@ static inline void mmu_ll_write_entry(uint32_t mmu_id, uint32_t entry_id, uint32
*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4) = mmu_val | MMU_ACCESS_FLASH | MMU_VALID; *(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4) = mmu_val | MMU_ACCESS_FLASH | MMU_VALID;
} }
/**
* Read the raw value from MMU table
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
* @param mmu_val Value to be read from MMU table
*/
__attribute__((always_inline))
static inline uint32_t mmu_ll_read_entry(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
return *(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4);
}
/** /**
* Set MMU table entry as invalid * Set MMU table entry as invalid
* *
@@ -142,6 +158,22 @@ static inline void mmu_ll_unmap_all(uint32_t mmu_id)
} }
} }
/**
* Get MMU table entry is invalid
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
* return ture for MMU entry is invalid, false for valid
*/
__attribute__((always_inline))
static inline bool mmu_ll_get_entry_is_invalid(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
return (*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4) & MMU_INVALID) ? true : false;
}
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@@ -138,6 +138,22 @@ static inline void mmu_ll_write_entry(uint32_t mmu_id, uint32_t entry_id, uint32
*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4) = mmu_val | target_code | MMU_VALID; *(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4) = mmu_val | target_code | MMU_VALID;
} }
/**
* Read the raw value from MMU table
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
* @param mmu_val Value to be read from MMU table
*/
__attribute__((always_inline))
static inline uint32_t mmu_ll_read_entry(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
return *(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4);
}
/** /**
* Set MMU table entry as invalid * Set MMU table entry as invalid
* *
@@ -166,6 +182,22 @@ static inline void mmu_ll_unmap_all(uint32_t mmu_id)
} }
} }
/**
* Get MMU table entry is invalid
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
* return ture for MMU entry is invalid, false for valid
*/
__attribute__((always_inline))
static inline bool mmu_ll_get_entry_is_invalid(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
return (*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4) & MMU_INVALID) ? true : false;
}
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@@ -114,6 +114,22 @@ static inline void mmu_ll_write_entry(uint32_t mmu_id, uint32_t entry_id, uint32
*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4) = mmu_val | target_code | MMU_VALID; *(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4) = mmu_val | target_code | MMU_VALID;
} }
/**
* Read the raw value from MMU table
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
* @param mmu_val Value to be read from MMU table
*/
__attribute__((always_inline))
static inline uint32_t mmu_ll_read_entry(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
return *(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4);
}
/** /**
* Set MMU table entry as invalid * Set MMU table entry as invalid
* *
@@ -142,6 +158,22 @@ static inline void mmu_ll_unmap_all(uint32_t mmu_id)
} }
} }
/**
* Get MMU table entry is invalid
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
* return ture for MMU entry is invalid, false for valid
*/
__attribute__((always_inline))
static inline bool mmu_ll_get_entry_is_invalid(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
return (*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4) & MMU_INVALID) ? true : false;
}
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@@ -28,6 +28,14 @@ typedef enum {
MMU_TARGET_PSRAM0, MMU_TARGET_PSRAM0,
} mmu_target_t; } mmu_target_t;
/**
* MMU table id
*/
typedef enum {
MMU_TABLE_CORE0,
MMU_TABLE_CORE1,
} mmu_table_id_t;
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@@ -37,6 +37,8 @@ extern "C" {
#define ADDRESS_IN_DRAM1_CACHE(vaddr) ADDRESS_IN_BUS(DRAM1_CACHE, vaddr) #define ADDRESS_IN_DRAM1_CACHE(vaddr) ADDRESS_IN_BUS(DRAM1_CACHE, vaddr)
#define ADDRESS_IN_DROM0_CACHE(vaddr) ADDRESS_IN_BUS(DROM0_CACHE, vaddr) #define ADDRESS_IN_DROM0_CACHE(vaddr) ADDRESS_IN_BUS(DROM0_CACHE, vaddr)
#define MMU_INVALID BIT(8)
//MMU entry num //MMU entry num
#define MMU_ENTRY_NUM 256 #define MMU_ENTRY_NUM 256

View File

@@ -12,16 +12,13 @@
#include <freertos/FreeRTOS.h> #include <freertos/FreeRTOS.h>
#include <freertos/task.h> #include <freertos/task.h>
#include <freertos/semphr.h> #include <freertos/semphr.h>
#include "soc/soc.h"
#include "soc/soc_memory_layout.h"
#include "soc/dport_access.h"
#include "sdkconfig.h" #include "sdkconfig.h"
#include "esp_attr.h" #include "esp_attr.h"
#include "esp_spi_flash.h" #include "esp_spi_flash.h"
#include "esp_flash_encrypt.h" #include "esp_flash_encrypt.h"
#include "esp_rom_spiflash.h"
#include "esp_log.h" #include "esp_log.h"
#include "cache_utils.h" #include "cache_utils.h"
#include "hal/mmu_ll.h"
#if CONFIG_IDF_TARGET_ESP32 #if CONFIG_IDF_TARGET_ESP32
#include "soc/dport_reg.h" #include "soc/dport_reg.h"
@@ -87,28 +84,26 @@ static void IRAM_ATTR spi_flash_mmap_init(void)
if (s_mmap_page_refcnt[SOC_MMU_DROM0_PAGES_START] != 0) { if (s_mmap_page_refcnt[SOC_MMU_DROM0_PAGES_START] != 0) {
return; /* mmap data already initialised */ return; /* mmap data already initialised */
} }
DPORT_INTERRUPT_DISABLE();
for (int i = 0; i < SOC_MMU_REGIONS_COUNT * SOC_MMU_PAGES_PER_REGION; ++i) { for (int i = 0; i < SOC_MMU_REGIONS_COUNT * SOC_MMU_PAGES_PER_REGION; ++i) {
uint32_t entry_pro = DPORT_SEQUENCE_REG_READ((uint32_t)&SOC_MMU_DPORT_PRO_FLASH_MMU_TABLE[i]); uint32_t entry_pro = mmu_ll_read_entry(MMU_TABLE_CORE0, i);
#if !CONFIG_FREERTOS_UNICORE && CONFIG_IDF_TARGET_ESP32 #if !CONFIG_FREERTOS_UNICORE && CONFIG_IDF_TARGET_ESP32
uint32_t entry_app = DPORT_SEQUENCE_REG_READ((uint32_t)&DPORT_APP_FLASH_MMU_TABLE[i]); uint32_t entry_app = mmu_ll_read_entry(MMU_TABLE_CORE1, i);
if (entry_pro != entry_app) { if (entry_pro != entry_app) {
// clean up entries used by boot loader // clean up entries used by boot loader
entry_pro = SOC_MMU_INVALID_ENTRY_VAL; mmu_ll_set_entry_invalid(MMU_TABLE_CORE0, i);
SOC_MMU_DPORT_PRO_FLASH_MMU_TABLE[i] = SOC_MMU_INVALID_ENTRY_VAL;
} }
#endif #endif
if ((entry_pro & SOC_MMU_INVALID_ENTRY_VAL) == 0 && (i == SOC_MMU_DROM0_PAGES_START || i == SOC_MMU_PRO_IRAM0_FIRST_USABLE_PAGE || entry_pro != 0)) { bool entry_pro_invalid = mmu_ll_get_entry_is_invalid(MMU_TABLE_CORE0, i);
if (!entry_pro_invalid && (i == SOC_MMU_DROM0_PAGES_START || i == SOC_MMU_PRO_IRAM0_FIRST_USABLE_PAGE || entry_pro != 0)) {
s_mmap_page_refcnt[i] = 1; s_mmap_page_refcnt[i] = 1;
} else { } else {
SOC_MMU_DPORT_PRO_FLASH_MMU_TABLE[i] = SOC_MMU_INVALID_ENTRY_VAL; mmu_ll_set_entry_invalid(MMU_TABLE_CORE0, i);
#if !CONFIG_FREERTOS_UNICORE && CONFIG_IDF_TARGET_ESP32 #if !CONFIG_FREERTOS_UNICORE && CONFIG_IDF_TARGET_ESP32
DPORT_APP_FLASH_MMU_TABLE[i] = SOC_MMU_INVALID_ENTRY_VAL; mmu_ll_set_entry_invalid(MMU_TABLE_CORE1, i);
#endif #endif
} }
} }
DPORT_INTERRUPT_RESTORE();
} }
static void IRAM_ATTR get_mmu_region(spi_flash_mmap_memory_t memory, int* out_begin, int* out_size,uint32_t* region_addr) static void IRAM_ATTR get_mmu_region(spi_flash_mmap_memory_t memory, int* out_begin, int* out_size,uint32_t* region_addr)
@@ -133,7 +128,7 @@ esp_err_t IRAM_ATTR spi_flash_mmap(size_t src_addr, size_t size, spi_flash_mmap_
if (src_addr & INVALID_PHY_PAGE) { if (src_addr & INVALID_PHY_PAGE) {
return ESP_ERR_INVALID_ARG; return ESP_ERR_INVALID_ARG;
} }
if (src_addr + size > g_rom_flashchip.chip_size) { if ((src_addr + size) > spi_flash_get_chip_size()) {
return ESP_ERR_INVALID_ARG; return ESP_ERR_INVALID_ARG;
} }
// region which should be mapped // region which should be mapped
@@ -166,7 +161,7 @@ esp_err_t IRAM_ATTR spi_flash_mmap_pages(const int *pages, size_t page_count, sp
return ESP_ERR_INVALID_ARG; return ESP_ERR_INVALID_ARG;
} }
for (int i = 0; i < page_count; i++) { for (int i = 0; i < page_count; i++) {
if (pages[i] < 0 || pages[i]*SPI_FLASH_MMU_PAGE_SIZE >= g_rom_flashchip.chip_size) { if (pages[i] < 0 || pages[i]*SPI_FLASH_MMU_PAGE_SIZE >= spi_flash_get_chip_size()) {
return ESP_ERR_INVALID_ARG; return ESP_ERR_INVALID_ARG;
} }
} }
@@ -197,15 +192,13 @@ esp_err_t IRAM_ATTR spi_flash_mmap_pages(const int *pages, size_t page_count, sp
for (start = region_begin; start < end; ++start) { for (start = region_begin; start < end; ++start) {
int pageno = 0; int pageno = 0;
int pos; int pos;
DPORT_INTERRUPT_DISABLE();
for (pos = start; pos < start + page_count; ++pos, ++pageno) { for (pos = start; pos < start + page_count; ++pos, ++pageno) {
int table_val = (int) DPORT_SEQUENCE_REG_READ((uint32_t)&SOC_MMU_DPORT_PRO_FLASH_MMU_TABLE[pos]); int table_val = (int) mmu_ll_read_entry(MMU_TABLE_CORE0, pos);
uint8_t refcnt = s_mmap_page_refcnt[pos]; uint8_t refcnt = s_mmap_page_refcnt[pos];
if (refcnt != 0 && table_val != SOC_MMU_PAGE_IN_FLASH(pages[pageno])) { if (refcnt != 0 && table_val != SOC_MMU_PAGE_IN_FLASH(pages[pageno])) {
break; break;
} }
} }
DPORT_INTERRUPT_RESTORE();
// whole mapping range matched, bail out // whole mapping range matched, bail out
if (pos - start == page_count) { if (pos - start == page_count) {
break; break;
@@ -217,12 +210,11 @@ esp_err_t IRAM_ATTR spi_flash_mmap_pages(const int *pages, size_t page_count, sp
} else { } else {
// set up mapping using pages // set up mapping using pages
uint32_t pageno = 0; uint32_t pageno = 0;
DPORT_INTERRUPT_DISABLE();
for (int i = start; i != start + page_count; ++i, ++pageno) { for (int i = start; i != start + page_count; ++i, ++pageno) {
// sanity check: we won't reconfigure entries with non-zero reference count // sanity check: we won't reconfigure entries with non-zero reference count
uint32_t entry_pro = DPORT_SEQUENCE_REG_READ((uint32_t)&SOC_MMU_DPORT_PRO_FLASH_MMU_TABLE[i]); uint32_t entry_pro = mmu_ll_read_entry(MMU_TABLE_CORE0, i);
#if !CONFIG_FREERTOS_UNICORE && CONFIG_IDF_TARGET_ESP32 #if !CONFIG_FREERTOS_UNICORE && CONFIG_IDF_TARGET_ESP32
uint32_t entry_app = DPORT_SEQUENCE_REG_READ((uint32_t)&DPORT_APP_FLASH_MMU_TABLE[i]); uint32_t entry_app = mmu_ll_read_entry(MMU_TABLE_CORE1, i);
#endif #endif
assert(s_mmap_page_refcnt[i] == 0 || assert(s_mmap_page_refcnt[i] == 0 ||
(entry_pro == SOC_MMU_PAGE_IN_FLASH(pages[pageno]) (entry_pro == SOC_MMU_PAGE_IN_FLASH(pages[pageno])
@@ -236,9 +228,9 @@ esp_err_t IRAM_ATTR spi_flash_mmap_pages(const int *pages, size_t page_count, sp
|| entry_app != SOC_MMU_PAGE_IN_FLASH(pages[pageno]) || entry_app != SOC_MMU_PAGE_IN_FLASH(pages[pageno])
#endif #endif
) { ) {
SOC_MMU_DPORT_PRO_FLASH_MMU_TABLE[i] = SOC_MMU_PAGE_IN_FLASH(pages[pageno]); mmu_ll_write_entry(MMU_TABLE_CORE0, i, pages[pageno], 0);
#if !CONFIG_FREERTOS_UNICORE && CONFIG_IDF_TARGET_ESP32 #if !CONFIG_FREERTOS_UNICORE && CONFIG_IDF_TARGET_ESP32
DPORT_APP_FLASH_MMU_TABLE[i] = pages[pageno]; mmu_ll_write_entry(MMU_TABLE_CORE1, i, pages[pageno], 0);
#endif #endif
#if !CONFIG_IDF_TARGET_ESP32 #if !CONFIG_IDF_TARGET_ESP32
@@ -249,7 +241,6 @@ esp_err_t IRAM_ATTR spi_flash_mmap_pages(const int *pages, size_t page_count, sp
} }
++s_mmap_page_refcnt[i]; ++s_mmap_page_refcnt[i];
} }
DPORT_INTERRUPT_RESTORE();
LIST_INSERT_HEAD(&s_mmap_entries_head, new_entry, entries); LIST_INSERT_HEAD(&s_mmap_entries_head, new_entry, entries);
new_entry->page = start; new_entry->page = start;
new_entry->count = page_count; new_entry->count = page_count;
@@ -299,9 +290,9 @@ void IRAM_ATTR spi_flash_munmap(spi_flash_mmap_handle_t handle)
for (int i = it->page; i < it->page + it->count; ++i) { for (int i = it->page; i < it->page + it->count; ++i) {
assert(s_mmap_page_refcnt[i] > 0); assert(s_mmap_page_refcnt[i] > 0);
if (--s_mmap_page_refcnt[i] == 0) { if (--s_mmap_page_refcnt[i] == 0) {
SOC_MMU_DPORT_PRO_FLASH_MMU_TABLE[i] = SOC_MMU_INVALID_ENTRY_VAL; mmu_ll_set_entry_invalid(MMU_TABLE_CORE0, i);
#if !CONFIG_FREERTOS_UNICORE && CONFIG_IDF_TARGET_ESP32 #if !CONFIG_FREERTOS_UNICORE && CONFIG_IDF_TARGET_ESP32
DPORT_APP_FLASH_MMU_TABLE[i] = SOC_MMU_INVALID_ENTRY_VAL; mmu_ll_set_entry_invalid(MMU_TABLE_CORE1, i);
#endif #endif
} }
} }
@@ -327,7 +318,7 @@ static uint32_t IRAM_ATTR NOINLINE_ATTR spi_flash_protected_read_mmu_entry(int i
{ {
uint32_t value; uint32_t value;
spi_flash_disable_interrupts_caches_and_other_cpu(); spi_flash_disable_interrupts_caches_and_other_cpu();
value = DPORT_REG_READ((uint32_t)&SOC_MMU_DPORT_PRO_FLASH_MMU_TABLE[index]); value = mmu_ll_read_entry(MMU_TABLE_CORE0, index);
spi_flash_enable_interrupts_caches_and_other_cpu(); spi_flash_enable_interrupts_caches_and_other_cpu();
return value; return value;
} }
@@ -357,13 +348,12 @@ uint32_t IRAM_ATTR spi_flash_mmap_get_free_pages(spi_flash_mmap_memory_t memory)
int region_size; // number of pages to check int region_size; // number of pages to check
uint32_t region_addr; // base address of memory region uint32_t region_addr; // base address of memory region
get_mmu_region(memory,&region_begin,&region_size,&region_addr); get_mmu_region(memory,&region_begin,&region_size,&region_addr);
DPORT_INTERRUPT_DISABLE();
for (int i = region_begin; i < region_begin + region_size; ++i) { for (int i = region_begin; i < region_begin + region_size; ++i) {
if (s_mmap_page_refcnt[i] == 0 && DPORT_SEQUENCE_REG_READ((uint32_t)&SOC_MMU_DPORT_PRO_FLASH_MMU_TABLE[i]) == SOC_MMU_INVALID_ENTRY_VAL) { bool entry_is_invalid = mmu_ll_get_entry_is_invalid(MMU_TABLE_CORE0, i);
if (s_mmap_page_refcnt[i] == 0 && entry_is_invalid) {
count++; count++;
} }
} }
DPORT_INTERRUPT_RESTORE();
spi_flash_enable_interrupts_caches_and_other_cpu(); spi_flash_enable_interrupts_caches_and_other_cpu();
return count; return count;
} }
@@ -400,7 +390,8 @@ size_t spi_flash_cache2phys(const void *cached)
return SPI_FLASH_CACHE2PHYS_FAIL; return SPI_FLASH_CACHE2PHYS_FAIL;
} }
uint32_t phys_page = spi_flash_protected_read_mmu_entry(cache_page); uint32_t phys_page = spi_flash_protected_read_mmu_entry(cache_page);
if (phys_page == SOC_MMU_INVALID_ENTRY_VAL) { bool entry_is_invalid = mmu_ll_get_entry_is_invalid(MMU_TABLE_CORE0, cache_page);
if (entry_is_invalid) {
/* page is not mapped */ /* page is not mapped */
return SPI_FLASH_CACHE2PHYS_FAIL; return SPI_FLASH_CACHE2PHYS_FAIL;
} }
@@ -426,9 +417,8 @@ const void *IRAM_ATTR spi_flash_phys2cache(size_t phys_offs, spi_flash_mmap_memo
page_delta = SOC_MMU_IROM0_PAGES_START; page_delta = SOC_MMU_IROM0_PAGES_START;
} }
spi_flash_disable_interrupts_caches_and_other_cpu(); spi_flash_disable_interrupts_caches_and_other_cpu();
DPORT_INTERRUPT_DISABLE();
for (int i = start; i < end; i++) { for (int i = start; i < end; i++) {
uint32_t mmu_value = DPORT_SEQUENCE_REG_READ((uint32_t)&SOC_MMU_DPORT_PRO_FLASH_MMU_TABLE[i]); uint32_t mmu_value = mmu_ll_read_entry(MMU_TABLE_CORE0, i);
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS #if CONFIG_SPIRAM_FETCH_INSTRUCTIONS
if (phys_page >= instruction_flash_start_page_get() && phys_page <= instruction_flash_end_page_get()) { if (phys_page >= instruction_flash_start_page_get() && phys_page <= instruction_flash_end_page_get()) {
if (mmu_value & MMU_ACCESS_SPIRAM) { if (mmu_value & MMU_ACCESS_SPIRAM) {
@@ -448,12 +438,10 @@ const void *IRAM_ATTR spi_flash_phys2cache(size_t phys_offs, spi_flash_mmap_memo
if (mmu_value == SOC_MMU_PAGE_IN_FLASH(phys_page)) { if (mmu_value == SOC_MMU_PAGE_IN_FLASH(phys_page)) {
i -= page_delta; i -= page_delta;
intptr_t cache_page = base + (SPI_FLASH_MMU_PAGE_SIZE * i); intptr_t cache_page = base + (SPI_FLASH_MMU_PAGE_SIZE * i);
DPORT_INTERRUPT_RESTORE();
spi_flash_enable_interrupts_caches_and_other_cpu(); spi_flash_enable_interrupts_caches_and_other_cpu();
return (const void *) (cache_page | (phys_offs & (SPI_FLASH_MMU_PAGE_SIZE-1))); return (const void *) (cache_page | (phys_offs & (SPI_FLASH_MMU_PAGE_SIZE-1)));
} }
} }
DPORT_INTERRUPT_RESTORE();
spi_flash_enable_interrupts_caches_and_other_cpu(); spi_flash_enable_interrupts_caches_and_other_cpu();
return NULL; return NULL;
} }
@@ -472,10 +460,10 @@ static bool IRAM_ATTR is_page_mapped_in_cache(uint32_t phys_page, const void **o
start[1] = SOC_MMU_PRO_IRAM0_FIRST_USABLE_PAGE; start[1] = SOC_MMU_PRO_IRAM0_FIRST_USABLE_PAGE;
end[1] = SOC_MMU_IROM0_PAGES_END; end[1] = SOC_MMU_IROM0_PAGES_END;
DPORT_INTERRUPT_DISABLE();
for (int j = 0; j < 2; j++) { for (int j = 0; j < 2; j++) {
for (int i = start[j]; i < end[j]; i++) { for (int i = start[j]; i < end[j]; i++) {
if (DPORT_SEQUENCE_REG_READ((uint32_t)&SOC_MMU_DPORT_PRO_FLASH_MMU_TABLE[i]) == SOC_MMU_PAGE_IN_FLASH(phys_page)) { uint32_t entry_pro = mmu_ll_read_entry(MMU_TABLE_CORE0, i);
if (entry_pro == SOC_MMU_PAGE_IN_FLASH(phys_page)) {
#if !CONFIG_IDF_TARGET_ESP32 #if !CONFIG_IDF_TARGET_ESP32
if (j == 0) { /* SPI_FLASH_MMAP_DATA */ if (j == 0) { /* SPI_FLASH_MMAP_DATA */
*out_ptr = (const void *)(SOC_MMU_VADDR0_START_ADDR + SPI_FLASH_MMU_PAGE_SIZE * (i - start[0])); *out_ptr = (const void *)(SOC_MMU_VADDR0_START_ADDR + SPI_FLASH_MMU_PAGE_SIZE * (i - start[0]));
@@ -483,12 +471,10 @@ static bool IRAM_ATTR is_page_mapped_in_cache(uint32_t phys_page, const void **o
*out_ptr = (const void *)(SOC_MMU_VADDR1_FIRST_USABLE_ADDR + SPI_FLASH_MMU_PAGE_SIZE * (i - start[1])); *out_ptr = (const void *)(SOC_MMU_VADDR1_FIRST_USABLE_ADDR + SPI_FLASH_MMU_PAGE_SIZE * (i - start[1]));
} }
#endif #endif
DPORT_INTERRUPT_RESTORE();
return true; return true;
} }
} }
} }
DPORT_INTERRUPT_RESTORE();
return false; return false;
} }
@@ -502,6 +488,7 @@ IRAM_ATTR bool spi_flash_check_and_flush_cache(size_t start_addr, size_t length)
length = (length + SPI_FLASH_MMU_PAGE_SIZE - 1) & ~(SPI_FLASH_MMU_PAGE_SIZE-1); length = (length + SPI_FLASH_MMU_PAGE_SIZE - 1) & ~(SPI_FLASH_MMU_PAGE_SIZE-1);
for (uint32_t addr = page_start_addr; addr < page_start_addr + length; addr += SPI_FLASH_MMU_PAGE_SIZE) { for (uint32_t addr = page_start_addr; addr < page_start_addr + length; addr += SPI_FLASH_MMU_PAGE_SIZE) {
uint32_t page = addr / SPI_FLASH_MMU_PAGE_SIZE; uint32_t page = addr / SPI_FLASH_MMU_PAGE_SIZE;
// TODO: IDF-4969
if (page >= 256) { if (page >= 256) {
return false; /* invalid address */ return false; /* invalid address */
} }