forked from espressif/esp-idf
esp32c6: mmu & cache related g0 components changes
This commit is contained in:
@@ -54,9 +54,9 @@ static inline cache_bus_mask_t cache_ll_l1_get_bus(uint32_t cache_id, uint32_t v
|
||||
cache_bus_mask_t mask = 0;
|
||||
|
||||
uint32_t vaddr_end = vaddr_start + len - 1;
|
||||
if (vaddr_start >= IRAM0_CACHE_ADDRESS_LOW && vaddr_end < IRAM0_CACHE_ADDRESS_HIGH) {
|
||||
if (vaddr_start >= IRAM0_CACHE_ADDRESS_LOW && vaddr_end < IRAM0_CACHE_ADDRESS_HIGH(CONFIG_MMU_PAGE_SIZE)) {
|
||||
mask |= CACHE_BUS_IBUS0;
|
||||
} else if (vaddr_start >= DRAM0_CACHE_ADDRESS_LOW && vaddr_end < DRAM0_CACHE_ADDRESS_HIGH) {
|
||||
} else if (vaddr_start >= DRAM0_CACHE_ADDRESS_LOW && vaddr_end < DRAM0_CACHE_ADDRESS_HIGH(CONFIG_MMU_PAGE_SIZE)) {
|
||||
mask |= CACHE_BUS_DBUS0;
|
||||
} else {
|
||||
HAL_ASSERT(0); //Out of region
|
||||
@@ -81,12 +81,12 @@ static inline void cache_ll_l1_enable_bus(uint32_t cache_id, cache_bus_mask_t ma
|
||||
HAL_ASSERT((mask & (CACHE_BUS_IBUS1 | CACHE_BUS_IBUS2| CACHE_BUS_DBUS1 | CACHE_BUS_DBUS2)) == 0);
|
||||
|
||||
uint32_t ibus_mask = 0;
|
||||
ibus_mask |= (mask & CACHE_BUS_IBUS0) ? EXTMEM_ICACHE_SHUT_IBUS : 0;
|
||||
REG_CLR_BIT(EXTMEM_ICACHE_CTRL1_REG, ibus_mask);
|
||||
ibus_mask |= (mask & CACHE_BUS_IBUS0) ? EXTMEM_DCACHE_SHUT_DBUS0 : 0;
|
||||
REG_CLR_BIT(EXTMEM_ICACHE_CTRL_REG, ibus_mask);
|
||||
|
||||
uint32_t dbus_mask = 0;
|
||||
dbus_mask |= (mask & CACHE_BUS_DBUS0) ? EXTMEM_ICACHE_SHUT_DBUS : 0;
|
||||
REG_CLR_BIT(EXTMEM_ICACHE_CTRL1_REG, dbus_mask);
|
||||
dbus_mask |= (mask & CACHE_BUS_DBUS0) ? EXTMEM_DCACHE_SHUT_DBUS1 : 0;
|
||||
REG_CLR_BIT(EXTMEM_ICACHE_CTRL_REG, dbus_mask);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -103,12 +103,12 @@ static inline void cache_ll_l1_disable_bus(uint32_t cache_id, cache_bus_mask_t m
|
||||
HAL_ASSERT((mask & (CACHE_BUS_IBUS1 | CACHE_BUS_IBUS2| CACHE_BUS_DBUS1 | CACHE_BUS_DBUS2)) == 0);
|
||||
|
||||
uint32_t ibus_mask = 0;
|
||||
ibus_mask |= (mask & CACHE_BUS_IBUS0) ? EXTMEM_ICACHE_SHUT_IBUS : 0;
|
||||
REG_SET_BIT(EXTMEM_ICACHE_CTRL1_REG, ibus_mask);
|
||||
ibus_mask |= (mask & CACHE_BUS_IBUS0) ? EXTMEM_DCACHE_SHUT_DBUS0 : 0;
|
||||
REG_SET_BIT(EXTMEM_ICACHE_CTRL_REG, ibus_mask);
|
||||
|
||||
uint32_t dbus_mask = 0;
|
||||
dbus_mask |= (mask & CACHE_BUS_DBUS0) ? EXTMEM_ICACHE_SHUT_DBUS : 0;
|
||||
REG_SET_BIT(EXTMEM_ICACHE_CTRL1_REG, dbus_mask);
|
||||
dbus_mask |= (mask & CACHE_BUS_DBUS0) ? EXTMEM_DCACHE_SHUT_DBUS1 : 0;
|
||||
REG_SET_BIT(EXTMEM_ICACHE_CTRL_REG, dbus_mask);
|
||||
}
|
||||
|
||||
/*------------------------------------------------------------------------------
|
||||
@@ -122,7 +122,8 @@ static inline void cache_ll_l1_disable_bus(uint32_t cache_id, cache_bus_mask_t m
|
||||
*/
|
||||
static inline void cache_ll_l1_enable_access_error_intr(uint32_t cache_id, uint32_t mask)
|
||||
{
|
||||
SET_PERI_REG_MASK(EXTMEM_CORE0_ACS_CACHE_INT_ENA_REG, mask);
|
||||
// TODO: IDF-5656
|
||||
// SET_PERI_REG_MASK(EXTMEM_CORE0_ACS_CACHE_INT_ENA_REG, mask);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -133,7 +134,8 @@ static inline void cache_ll_l1_enable_access_error_intr(uint32_t cache_id, uint3
|
||||
*/
|
||||
static inline void cache_ll_l1_clear_access_error_intr(uint32_t cache_id, uint32_t mask)
|
||||
{
|
||||
SET_PERI_REG_MASK(EXTMEM_CORE0_ACS_CACHE_INT_CLR_REG, mask);
|
||||
// TODO: IDF-5656
|
||||
// SET_PERI_REG_MASK(EXTMEM_CORE0_ACS_CACHE_INT_CLR_REG, mask);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -146,7 +148,9 @@ static inline void cache_ll_l1_clear_access_error_intr(uint32_t cache_id, uint32
|
||||
*/
|
||||
static inline uint32_t cache_ll_l1_get_access_error_intr_status(uint32_t cache_id, uint32_t mask)
|
||||
{
|
||||
return GET_PERI_REG_MASK(EXTMEM_CORE0_ACS_CACHE_INT_ST_REG, mask);
|
||||
// TODO: IDF-5656
|
||||
// return GET_PERI_REG_MASK(EXTMEM_CORE0_ACS_CACHE_INT_ST_REG, mask);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -157,7 +161,8 @@ static inline uint32_t cache_ll_l1_get_access_error_intr_status(uint32_t cache_i
|
||||
*/
|
||||
static inline void cache_ll_l1_enable_illegal_error_intr(uint32_t cache_id, uint32_t mask)
|
||||
{
|
||||
SET_PERI_REG_MASK(EXTMEM_CACHE_ILG_INT_ENA_REG, mask);
|
||||
// TODO: IDF-5656
|
||||
// SET_PERI_REG_MASK(EXTMEM_CACHE_ILG_INT_ENA_REG, mask);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -168,7 +173,8 @@ static inline void cache_ll_l1_enable_illegal_error_intr(uint32_t cache_id, uint
|
||||
*/
|
||||
static inline void cache_ll_l1_clear_illegal_error_intr(uint32_t cache_id, uint32_t mask)
|
||||
{
|
||||
SET_PERI_REG_MASK(EXTMEM_CACHE_ILG_INT_CLR_REG, mask);
|
||||
// TODO: IDF-5656
|
||||
// SET_PERI_REG_MASK(EXTMEM_CACHE_ILG_INT_CLR_REG, mask);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -181,7 +187,9 @@ static inline void cache_ll_l1_clear_illegal_error_intr(uint32_t cache_id, uint3
|
||||
*/
|
||||
static inline uint32_t cache_ll_l1_get_illegal_error_intr_status(uint32_t cache_id, uint32_t mask)
|
||||
{
|
||||
return GET_PERI_REG_MASK(EXTMEM_CACHE_ILG_INT_ST_REG, mask);
|
||||
// TODO: IDF-5656
|
||||
// return GET_PERI_REG_MASK(EXTMEM_CACHE_ILG_INT_ST_REG, mask);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
@@ -8,16 +8,32 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "soc/extmem_reg.h"
|
||||
#include "soc/spi_mem_reg.h"
|
||||
#include "soc/ext_mem_defs.h"
|
||||
#include "hal/assert.h"
|
||||
#include "hal/mmu_types.h"
|
||||
#include "hal/efuse_ll.h"
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief The real MMU page size get from Kconfig.
|
||||
*
|
||||
* @note Only used in this file
|
||||
*/
|
||||
#define MMU_LL_PAGE_SIZE (CONFIG_MMU_PAGE_SIZE)
|
||||
|
||||
__attribute__((always_inline)) static inline bool mmu_ll_cache_encryption_enabled(void)
|
||||
{
|
||||
unsigned cnt = efuse_ll_get_flash_crypt_cnt();
|
||||
// 3 bits wide, any odd number - 1 or 3 - bits set means encryption is on
|
||||
cnt = ((cnt >> 2) ^ (cnt >> 1) ^ cnt) & 0x1;
|
||||
return (cnt == 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get MMU page size
|
||||
*
|
||||
@@ -28,22 +44,27 @@ extern "C" {
|
||||
__attribute__((always_inline))
|
||||
static inline mmu_page_size_t mmu_ll_get_page_size(uint32_t mmu_id)
|
||||
{
|
||||
//On esp32c3, MMU Page size is always 64KB
|
||||
(void)mmu_id;
|
||||
return MMU_PAGE_64KB;
|
||||
uint32_t page_size_code = REG_GET_FIELD(SPI_MEM_MMU_POWER_CTRL_REG(0), SPI_MEM_MMU_PAGE_SIZE);
|
||||
return (page_size_code == 0) ? MMU_PAGE_64KB : \
|
||||
(page_size_code == 1) ? MMU_PAGE_32KB : \
|
||||
(page_size_code == 2) ? MMU_PAGE_16KB : \
|
||||
MMU_PAGE_8KB;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set MMU page size
|
||||
*
|
||||
* @param size MMU page size
|
||||
*
|
||||
* @note On esp32c3, only supports `MMU_PAGE_64KB`
|
||||
*/
|
||||
__attribute__((always_inline))
|
||||
static inline void mmu_ll_set_page_size(uint32_t mmu_id, uint32_t size)
|
||||
{
|
||||
HAL_ASSERT(size == MMU_PAGE_64KB);
|
||||
uint8_t reg_val = (size == MMU_PAGE_64KB) ? 0 : \
|
||||
(size == MMU_PAGE_32KB) ? 1 : \
|
||||
(size == MMU_PAGE_16KB) ? 2 : \
|
||||
(size == MMU_PAGE_8KB) ? 3 : 0;
|
||||
REG_SET_FIELD(SPI_MEM_MMU_POWER_CTRL_REG(0), SPI_MEM_MMU_PAGE_SIZE, reg_val);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -61,7 +82,7 @@ static inline bool mmu_ll_check_valid_ext_vaddr_region(uint32_t mmu_id, uint32_t
|
||||
{
|
||||
(void)mmu_id;
|
||||
uint32_t vaddr_end = vaddr_start + len;
|
||||
return (ADDRESS_IN_IRAM0_CACHE(vaddr_start) && ADDRESS_IN_IRAM0_CACHE(vaddr_end)) || (ADDRESS_IN_DRAM0_CACHE(vaddr_start) && ADDRESS_IN_DRAM0_CACHE(vaddr_end));
|
||||
return (ADDRESS_IN_IRAM0_CACHE(vaddr_start, MMU_LL_PAGE_SIZE) && ADDRESS_IN_IRAM0_CACHE(vaddr_end, MMU_LL_PAGE_SIZE)) || (ADDRESS_IN_DRAM0_CACHE(vaddr_start, MMU_LL_PAGE_SIZE) && ADDRESS_IN_DRAM0_CACHE(vaddr_end, MMU_LL_PAGE_SIZE));
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -77,7 +98,22 @@ __attribute__((always_inline))
|
||||
static inline uint32_t mmu_ll_get_entry_id(uint32_t mmu_id, uint32_t vaddr)
|
||||
{
|
||||
(void)mmu_id;
|
||||
return ((vaddr & MMU_VADDR_MASK) >> 16);
|
||||
mmu_page_size_t page_size = mmu_ll_get_page_size(mmu_id);
|
||||
uint32_t shift_code = 0;
|
||||
switch (page_size) {
|
||||
case MMU_PAGE_64KB:
|
||||
shift_code = 16;
|
||||
break;
|
||||
case MMU_PAGE_32KB:
|
||||
shift_code = 15;
|
||||
break;
|
||||
case MMU_PAGE_16KB:
|
||||
shift_code = 14;
|
||||
break;
|
||||
default:
|
||||
HAL_ASSERT(shift_code);
|
||||
}
|
||||
return ((vaddr & MMU_VADDR_MASK(page_size)) >> shift_code);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -93,7 +129,22 @@ __attribute__((always_inline))
|
||||
static inline uint32_t mmu_ll_format_paddr(uint32_t mmu_id, uint32_t paddr)
|
||||
{
|
||||
(void)mmu_id;
|
||||
return paddr >> 16;
|
||||
mmu_page_size_t page_size = mmu_ll_get_page_size(mmu_id);
|
||||
uint32_t shift_code = 0;
|
||||
switch (page_size) {
|
||||
case MMU_PAGE_64KB:
|
||||
shift_code = 16;
|
||||
break;
|
||||
case MMU_PAGE_32KB:
|
||||
shift_code = 15;
|
||||
break;
|
||||
case MMU_PAGE_16KB:
|
||||
shift_code = 14;
|
||||
break;
|
||||
default:
|
||||
HAL_ASSERT(shift_code);
|
||||
}
|
||||
return paddr >> shift_code;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -104,14 +155,18 @@ static inline uint32_t mmu_ll_format_paddr(uint32_t mmu_id, uint32_t paddr)
|
||||
* @param mmu_val Value to be set into an MMU entry, for physical address
|
||||
* @param target MMU target physical memory.
|
||||
*/
|
||||
__attribute__((always_inline))
|
||||
static inline void mmu_ll_write_entry(uint32_t mmu_id, uint32_t entry_id, uint32_t mmu_val, mmu_target_t target)
|
||||
__attribute__((always_inline)) static inline void mmu_ll_write_entry(uint32_t mmu_id, uint32_t entry_id, uint32_t mmu_val, mmu_target_t target)
|
||||
{
|
||||
(void)mmu_id;
|
||||
HAL_ASSERT(target == MMU_TARGET_FLASH0);
|
||||
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
|
||||
|
||||
*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4) = mmu_val | MMU_ACCESS_FLASH | MMU_VALID;
|
||||
(void)target;
|
||||
uint32_t mmu_raw_value;
|
||||
if (mmu_ll_cache_encryption_enabled()) {
|
||||
mmu_val |= MMU_SENSITIVE;
|
||||
}
|
||||
/* Note: for ESP32-C6, invert invalid bit for compatible with upper-layer software */
|
||||
mmu_raw_value = mmu_val ^ MMU_INVALID_MASK;
|
||||
REG_WRITE(SPI_MEM_MMU_ITEM_INDEX_REG(0), entry_id);
|
||||
REG_WRITE(SPI_MEM_MMU_ITEM_CONTENT_REG(0), mmu_raw_value);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -121,28 +176,49 @@ static inline void mmu_ll_write_entry(uint32_t mmu_id, uint32_t entry_id, uint32
|
||||
* @param entry_id MMU entry ID
|
||||
* @param mmu_val Value to be read from MMU table
|
||||
*/
|
||||
__attribute__((always_inline))
|
||||
static inline uint32_t mmu_ll_read_entry(uint32_t mmu_id, uint32_t entry_id)
|
||||
__attribute__((always_inline)) static inline uint32_t mmu_ll_read_entry(uint32_t mmu_id, uint32_t entry_id)
|
||||
{
|
||||
(void)mmu_id;
|
||||
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
|
||||
|
||||
return *(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4);
|
||||
uint32_t mmu_raw_value;
|
||||
uint32_t ret;
|
||||
REG_WRITE(SPI_MEM_MMU_ITEM_INDEX_REG(0), entry_id);
|
||||
mmu_raw_value = REG_READ(SPI_MEM_MMU_ITEM_CONTENT_REG(0));
|
||||
if (mmu_ll_cache_encryption_enabled()) {
|
||||
mmu_raw_value &= ~MMU_SENSITIVE;
|
||||
}
|
||||
/* Note: for ESP32-C6, invert invalid bit for compatible with upper-layer software */
|
||||
ret = mmu_raw_value ^ MMU_INVALID_MASK;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set MMU table entry as invalid
|
||||
*
|
||||
* @param mmu_id MMU ID
|
||||
* @param entry_id MMU entry ID
|
||||
* @param entry_id MMU entry
|
||||
*/
|
||||
__attribute__((always_inline))
|
||||
static inline void mmu_ll_set_entry_invalid(uint32_t mmu_id, uint32_t entry_id)
|
||||
__attribute__((always_inline)) static inline void mmu_ll_set_entry_invalid(uint32_t mmu_id, uint32_t entry_id)
|
||||
{
|
||||
(void)mmu_id;
|
||||
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
|
||||
REG_WRITE(SPI_MEM_MMU_ITEM_INDEX_REG(0), entry_id);
|
||||
REG_WRITE(SPI_MEM_MMU_ITEM_CONTENT_REG(0), MMU_INVALID);
|
||||
}
|
||||
|
||||
*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4) = MMU_INVALID;
|
||||
/**
|
||||
* Get MMU table entry is invalid
|
||||
*
|
||||
* @param mmu_id MMU ID
|
||||
* @param entry_id MMU entry ID
|
||||
* return ture for MMU entry is invalid, false for valid
|
||||
*/
|
||||
__attribute__((always_inline)) static inline bool mmu_ll_get_entry_is_invalid(uint32_t mmu_id, uint32_t entry_id)
|
||||
{
|
||||
(void)mmu_id;
|
||||
uint32_t mmu_raw_value;
|
||||
REG_WRITE(SPI_MEM_MMU_ITEM_INDEX_REG(0), entry_id);
|
||||
mmu_raw_value = REG_READ(SPI_MEM_MMU_ITEM_CONTENT_REG(0));
|
||||
/* Note: for ESP32-C6, the invalid-bit of MMU: 0 for invalid, 1 for valid */
|
||||
return (mmu_raw_value & MMU_INVALID_MASK) ? false : true;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -158,22 +234,6 @@ static inline void mmu_ll_unmap_all(uint32_t mmu_id)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get MMU table entry is invalid
|
||||
*
|
||||
* @param mmu_id MMU ID
|
||||
* @param entry_id MMU entry ID
|
||||
* return ture for MMU entry is invalid, false for valid
|
||||
*/
|
||||
__attribute__((always_inline))
|
||||
static inline bool mmu_ll_get_entry_is_invalid(uint32_t mmu_id, uint32_t entry_id)
|
||||
{
|
||||
(void)mmu_id;
|
||||
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
|
||||
|
||||
return (*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4) & MMU_INVALID) ? true : false;
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
@@ -7,35 +7,36 @@
|
||||
#define _CACHE_MEMORY_H_
|
||||
|
||||
#include "esp_bit_defs.h"
|
||||
#include "sdkconfig.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*IRAM0 is connected with Cache IBUS0*/
|
||||
#define IRAM0_ADDRESS_LOW 0x40000000
|
||||
#define IRAM0_ADDRESS_HIGH 0x44000000
|
||||
#define IRAM0_CACHE_ADDRESS_LOW 0x42000000
|
||||
#define IRAM0_CACHE_ADDRESS_HIGH 0x42800000
|
||||
#define IRAM0_CACHE_ADDRESS_HIGH(page_size) (IRAM0_CACHE_ADDRESS_LOW + ((page_size) * 128)) // MMU has 256 pages, first 128 for instruction
|
||||
#define IRAM0_ADDRESS_LOW 0x40000000
|
||||
#define IRAM0_ADDRESS_HIGH(page_size) IRAM0_CACHE_ADDRESS_HIGH(page_size)
|
||||
|
||||
/*DRAM0 is connected with Cache DBUS0*/
|
||||
#define DRAM0_ADDRESS_LOW 0x3C000000
|
||||
#define DRAM0_ADDRESS_HIGH 0x40000000
|
||||
#define DRAM0_CACHE_ADDRESS_LOW 0x3C000000
|
||||
#define DRAM0_CACHE_ADDRESS_HIGH 0x3C800000
|
||||
#define DRAM0_CACHE_OPERATION_HIGH DRAM0_CACHE_ADDRESS_HIGH
|
||||
#define ESP_CACHE_TEMP_ADDR 0x3C000000
|
||||
#define DRAM0_ADDRESS_LOW 0x42000000
|
||||
#define DRAM0_ADDRESS_HIGH 0x43000000
|
||||
#define DRAM0_CACHE_ADDRESS_LOW IRAM0_CACHE_ADDRESS_HIGH(CONFIG_MMU_PAGE_SIZE) // ESP32C6-TODO after fixed, also need to remove the sdkconfig.h inclusion
|
||||
#define DRAM0_CACHE_ADDRESS_HIGH(page_size) (IRAM0_CACHE_ADDRESS_HIGH(page_size) + ((page_size) * 128)) // MMU has 256 pages, second 128 for data
|
||||
#define DRAM0_CACHE_OPERATION_HIGH(page_size) DRAM0_CACHE_ADDRESS_HIGH(page_size)
|
||||
#define ESP_CACHE_TEMP_ADDR 0x42000000
|
||||
|
||||
#define BUS_SIZE(bus_name) (bus_name##_ADDRESS_HIGH - bus_name##_ADDRESS_LOW)
|
||||
#define ADDRESS_IN_BUS(bus_name, vaddr) ((vaddr) >= bus_name##_ADDRESS_LOW && (vaddr) < bus_name##_ADDRESS_HIGH)
|
||||
#define BUS_SIZE(bus_name, page_size) (bus_name##_ADDRESS_HIGH(page_size) - bus_name##_ADDRESS_LOW)
|
||||
#define ADDRESS_IN_BUS(bus_name, vaddr, page_size) ((vaddr) >= bus_name##_ADDRESS_LOW && (vaddr) < bus_name##_ADDRESS_HIGH(page_size))
|
||||
|
||||
#define ADDRESS_IN_IRAM0(vaddr) ADDRESS_IN_BUS(IRAM0, vaddr)
|
||||
#define ADDRESS_IN_IRAM0_CACHE(vaddr) ADDRESS_IN_BUS(IRAM0_CACHE, vaddr)
|
||||
#define ADDRESS_IN_DRAM0(vaddr) ADDRESS_IN_BUS(DRAM0, vaddr)
|
||||
#define ADDRESS_IN_DRAM0_CACHE(vaddr) ADDRESS_IN_BUS(DRAM0_CACHE, vaddr)
|
||||
#define ADDRESS_IN_IRAM0(vaddr, page_size) ADDRESS_IN_BUS(IRAM0, vaddr, page_size)
|
||||
#define ADDRESS_IN_IRAM0_CACHE(vaddr, page_size) ADDRESS_IN_BUS(IRAM0_CACHE, vaddr, page_size)
|
||||
#define ADDRESS_IN_DRAM0(vaddr, page_size) ADDRESS_IN_BUS(DRAM0, vaddr, page_size)
|
||||
#define ADDRESS_IN_DRAM0_CACHE(vaddr, page_size) ADDRESS_IN_BUS(DRAM0_CACHE, vaddr, page_size)
|
||||
|
||||
#define BUS_IRAM0_CACHE_SIZE BUS_SIZE(IRAM0_CACHE)
|
||||
#define BUS_DRAM0_CACHE_SIZE BUS_SIZE(DRAM0_CACHE)
|
||||
#define BUS_IRAM0_CACHE_SIZE(page_size) BUS_SIZE(IRAM0_CACHE, page_size)
|
||||
#define BUS_DRAM0_CACHE_SIZE(page_size) BUS_SIZE(DRAM0_CACHE, page_size)
|
||||
|
||||
#define CACHE_IBUS 0
|
||||
#define CACHE_IBUS_MMU_START 0
|
||||
@@ -54,7 +55,7 @@ extern "C" {
|
||||
#define CACHE_DROM_MMU_END Cache_Get_DROM_MMU_End()
|
||||
#define CACHE_DROM_MMU_SIZE (CACHE_DROM_MMU_END - CACHE_DROM_MMU_START)
|
||||
|
||||
#define CACHE_DROM_MMU_MAX_END 0x200
|
||||
#define CACHE_DROM_MMU_MAX_END 0x400
|
||||
|
||||
#define ICACHE_MMU_SIZE 0x200
|
||||
#define DCACHE_MMU_SIZE 0x200
|
||||
@@ -62,10 +63,21 @@ extern "C" {
|
||||
#define MMU_BUS_START(i) 0
|
||||
#define MMU_BUS_SIZE(i) 0x200
|
||||
|
||||
#define MMU_INVALID BIT(8)
|
||||
#define MMU_VALID 0
|
||||
#define MMU_TYPE 0
|
||||
#define MMU_ACCESS_FLASH 0
|
||||
#define MMU_MSPI_ACCESS_FLASH 0
|
||||
#define MMU_MSPI_VALID BIT(9)
|
||||
#define MMU_MSPI_INVALID 0
|
||||
#define MMU_MSPI_SENSITIVE BIT(10)
|
||||
|
||||
#define MMU_ACCESS_FLASH MMU_MSPI_ACCESS_FLASH
|
||||
#define MMU_ACCESS_SPIRAM MMU_MSPI_ACCESS_SPIRAM
|
||||
#define MMU_VALID MMU_MSPI_VALID
|
||||
#define MMU_SENSITIVE MMU_MSPI_SENSITIVE
|
||||
|
||||
// ESP32C6-TODO
|
||||
#define MMU_INVALID_MASK MMU_MSPI_VALID
|
||||
#define MMU_INVALID MMU_MSPI_INVALID
|
||||
|
||||
|
||||
|
||||
#define CACHE_MAX_SYNC_NUM 0x400000
|
||||
#define CACHE_MAX_LOCK_NUM 0x8000
|
||||
@@ -76,29 +88,25 @@ extern "C" {
|
||||
/**
|
||||
* MMU entry valid bit mask for mapping value. For an entry:
|
||||
* valid bit + value bits
|
||||
* valid bit is BIT(8), so value bits are 0xff
|
||||
* valid bit is BIT(9), so value bits are 0x1ff
|
||||
*/
|
||||
#define MMU_VALID_VAL_MASK 0xff
|
||||
#define MMU_VALID_VAL_MASK 0x1ff
|
||||
/**
|
||||
* Max MMU available paddr page num.
|
||||
* `MMU_MAX_PADDR_PAGE_NUM * CONFIG_MMU_PAGE_SIZE` means the max paddr address supported by the MMU. e.g.:
|
||||
* 256 * 64KB, means MMU can support 16MB paddr at most
|
||||
*/
|
||||
#define MMU_MAX_PADDR_PAGE_NUM 256
|
||||
//MMU entry num
|
||||
#define MMU_ENTRY_NUM 256
|
||||
|
||||
/**
|
||||
* This is the mask used for mapping. e.g.:
|
||||
* 0x4200_0000 & MMU_VADDR_MASK
|
||||
*/
|
||||
#define MMU_VADDR_MASK 0x7FFFFF
|
||||
//MMU entry num
|
||||
#define MMU_ENTRY_NUM 128
|
||||
#define MMU_VADDR_MASK(page_size) ((page_size) * MMU_ENTRY_NUM - 1)
|
||||
|
||||
#define CACHE_ICACHE_LOW_SHIFT 0
|
||||
#define CACHE_ICACHE_HIGH_SHIFT 2
|
||||
#define CACHE_DCACHE_LOW_SHIFT 4
|
||||
#define CACHE_DCACHE_HIGH_SHIFT 6
|
||||
|
||||
#define CACHE_MEMORY_IBANK0_ADDR 0x4037c000
|
||||
#define CACHE_MEMORY_IBANK0_ADDR 0x40800000
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
@@ -32,10 +32,10 @@ extern "C" {
|
||||
#define EXTMEM_CACHE_WRAP_AROUND_CTRL_REG (DR_REG_EXTMEM_BASE + 0x20)
|
||||
/* EXTMEM_DCACHE_WRAP : R/W ;bitpos:[4] ;default: 1'h0 ; */
|
||||
/*description: Set this bit as 1 to enable L1-DCache wrap around mode..*/
|
||||
#define EXTMEM_DCACHE_WRAP (BIT(4))
|
||||
#define EXTMEM_DCACHE_WRAP_M (BIT(4))
|
||||
#define EXTMEM_DCACHE_WRAP_V 0x1
|
||||
#define EXTMEM_DCACHE_WRAP_S 4
|
||||
#define EXTMEM_CACHE_FLASH_WRAP_AROUND (BIT(4))
|
||||
#define EXTMEM_CACHE_FLASH_WRAP_AROUND_M (BIT(4))
|
||||
#define EXTMEM_CACHE_FLASH_WRAP_AROUND_V 0x1
|
||||
#define EXTMEM_CACHE_FLASH_WRAP_AROUND_S 4
|
||||
|
||||
#define EXTMEM_CACHE_TAG_MEM_POWER_CTRL_REG (DR_REG_EXTMEM_BASE + 0x24)
|
||||
/* EXTMEM_DCACHE_TAG_MEM_FORCE_PU : R/W ;bitpos:[18] ;default: 1'h1 ; */
|
||||
|
@@ -15,16 +15,18 @@ extern "C" {
|
||||
|
||||
/* Defined for flash mmap */
|
||||
#define SOC_MMU_REGIONS_COUNT 1
|
||||
#define SOC_MMU_PAGES_PER_REGION 128
|
||||
#define SOC_MMU_PAGES_PER_REGION 256
|
||||
#define SOC_MMU_IROM0_PAGES_START (CACHE_IROM_MMU_START / sizeof(uint32_t))
|
||||
#define SOC_MMU_IROM0_PAGES_END (CACHE_IROM_MMU_END / sizeof(uint32_t))
|
||||
#define SOC_MMU_DROM0_PAGES_START (CACHE_DROM_MMU_START / sizeof(uint32_t))
|
||||
#define SOC_MMU_DROM0_PAGES_END (CACHE_DROM_MMU_END / sizeof(uint32_t))
|
||||
#define SOC_MMU_ADDR_MASK MMU_VALID_VAL_MASK
|
||||
#define SOC_MMU_INVALID_ENTRY_VAL MMU_TABLE_INVALID_VAL
|
||||
#define SOC_MMU_ADDR_MASK (MMU_VALID - 1)
|
||||
#define SOC_MMU_PAGE_IN_FLASH(page) (page) //Always in Flash
|
||||
#define SOC_MMU_DPORT_PRO_FLASH_MMU_TABLE FLASH_MMU_TABLE
|
||||
#define SOC_MMU_VADDR1_START_ADDR IRAM0_CACHE_ADDRESS_LOW
|
||||
#define SOC_MMU_PRO_IRAM0_FIRST_USABLE_PAGE SOC_MMU_IROM0_PAGES_START
|
||||
#define SOC_MMU_VADDR0_START_ADDR (SOC_DROM_LOW + (SOC_MMU_DROM0_PAGES_START * SPI_FLASH_MMU_PAGE_SIZE))
|
||||
#define SOC_MMU_VADDR0_START_ADDR (SOC_IROM_LOW + (SOC_MMU_DROM0_PAGES_START * SPI_FLASH_MMU_PAGE_SIZE))
|
||||
#define SOC_MMU_VADDR1_FIRST_USABLE_ADDR SOC_IROM_LOW
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
@@ -216,25 +216,6 @@
|
||||
#define SOC_ROM_STACK_START 0x4087c770
|
||||
|
||||
|
||||
// ESP32C6-TODO @wanglei Need check
|
||||
/* Defined for flash mmap */
|
||||
#define REGIONS_COUNT 2
|
||||
#define PAGES_PER_REGION 256
|
||||
#define IROM0_PAGES_START (CACHE_IROM_MMU_START / sizeof(uint32_t))
|
||||
#define IROM0_PAGES_END (CACHE_IROM_MMU_END / sizeof(uint32_t))
|
||||
#define DROM0_PAGES_START (CACHE_DROM_MMU_START / sizeof(uint32_t))
|
||||
#define DROM0_PAGES_END (CACHE_DROM_MMU_END / sizeof(uint32_t))
|
||||
#define INVALID_ENTRY_VAL MMU_TABLE_INVALID_VAL
|
||||
#define VALID_ENTRY_VAL MMU_TABLE_VALID_VAL
|
||||
#define MMU_ADDR_MASK MMU_ADDRESS_MASK
|
||||
#define PAGE_IN_FLASH(page) ((page) | MMU_MSPI_VALID) // ESP32C6-TODO
|
||||
#define VADDR1_START_ADDR IRAM0_CACHE_ADDRESS_LOW
|
||||
#define PRO_IRAM0_FIRST_USABLE_PAGE IROM0_PAGES_START
|
||||
#define VADDR0_START_ADDR DRAM0_CACHE_ADDRESS_LOW
|
||||
#define VADDR1_FIRST_USABLE_ADDR SOC_IROM_LOW
|
||||
|
||||
|
||||
|
||||
//On RISC-V CPUs, the interrupt sources are all external interrupts, whose type, source and priority are configured by SW.
|
||||
//There is no HW NMI conception. SW should controlled the masked levels through INT_THRESH_REG.
|
||||
|
||||
|
Reference in New Issue
Block a user