Line data Source code
1 0 : /* 2 : * Copyright (c) 2022 Intel Corporation 3 : * 4 : * SPDX-License-Identifier: Apache-2.0 5 : */ 6 : 7 : #include <stdint.h> 8 : 9 : #ifndef ZEPHYR_INCLUDE_ARCH_XTENSA_XTENSA_MMU_H 10 : #define ZEPHYR_INCLUDE_ARCH_XTENSA_XTENSA_MMU_H 11 : 12 : /** 13 : * @defgroup xtensa_mmu_apis Xtensa Memory Management Unit (MMU) APIs 14 : * @ingroup xtensa_apis 15 : * @{ 16 : */ 17 : 18 : /** 19 : * @name Memory region permission and caching mode. 20 : * @{ 21 : */ 22 : 23 : /** Memory region is executable. */ 24 1 : #define XTENSA_MMU_PERM_X BIT(0) 25 : 26 : /** Memory region is writable. */ 27 1 : #define XTENSA_MMU_PERM_W BIT(1) 28 : 29 : /** Memory region is both executable and writable */ 30 1 : #define XTENSA_MMU_PERM_WX (XTENSA_MMU_PERM_W | XTENSA_MMU_PERM_X) 31 : 32 : /** Memory region has write-back cache. */ 33 1 : #define XTENSA_MMU_CACHED_WB BIT(2) 34 : 35 : /** Memory region has write-through cache. */ 36 1 : #define XTENSA_MMU_CACHED_WT BIT(3) 37 : 38 : /** 39 : * @} 40 : */ 41 : 42 : /** 43 : * @name Memory domain and partitions 44 : * @{ 45 : */ 46 : 47 0 : typedef uint32_t k_mem_partition_attr_t; 48 : 49 0 : #define K_MEM_PARTITION_IS_EXECUTABLE(attr) (((attr) & XTENSA_MMU_PERM_X) != 0) 50 0 : #define K_MEM_PARTITION_IS_WRITABLE(attr) (((attr) & XTENSA_MMU_PERM_W) != 0) 51 0 : #define K_MEM_PARTITION_IS_USER(attr) (((attr) & XTENSA_MMU_MAP_USER) != 0) 52 : 53 : /* Read-Write access permission attributes */ 54 0 : #define K_MEM_PARTITION_P_RW_U_RW \ 55 : ((k_mem_partition_attr_t) {XTENSA_MMU_PERM_W | XTENSA_MMU_MAP_USER}) 56 0 : #define K_MEM_PARTITION_P_RW_U_NA \ 57 : ((k_mem_partition_attr_t) {0}) 58 0 : #define K_MEM_PARTITION_P_RO_U_RO \ 59 : ((k_mem_partition_attr_t) {XTENSA_MMU_MAP_USER}) 60 0 : #define K_MEM_PARTITION_P_RO_U_NA \ 61 : ((k_mem_partition_attr_t) {0}) 62 0 : #define K_MEM_PARTITION_P_NA_U_NA \ 63 : ((k_mem_partition_attr_t) {0}) 64 : 65 : /* Execution-allowed attributes */ 66 0 : #define K_MEM_PARTITION_P_RX_U_RX \ 67 : ((k_mem_partition_attr_t) {XTENSA_MMU_PERM_X}) 68 : 69 : /** 70 : * @} 71 : */ 72 : 73 : /** 74 : * @brief Software only bit to indicate a memory region can be accessed by user thread(s). 75 : * 76 : * This BIT tells the mapping code which ring PTE entries to use. 77 : */ 78 1 : #define XTENSA_MMU_MAP_USER BIT(4) 79 : 80 : /** 81 : * @brief Software only bit to indicate a memory region is shared by all threads. 82 : * 83 : * This BIT tells the mapping code whether the memory region should 84 : * be shared between all threads. That is not used in the HW, it is 85 : * just for the implementation. 86 : * 87 : * The PTE mapping this memory will use an ASID that is set in the 88 : * ring 4 spot in RASID. 89 : */ 90 1 : #define XTENSA_MMU_MAP_SHARED BIT(30) 91 : 92 : /** 93 : * Struct used to map a memory region. 94 : */ 95 1 : struct xtensa_mmu_range { 96 : /** Name of the memory region. */ 97 1 : const char *name; 98 : 99 : /** Start address of the memory region. */ 100 1 : const uint32_t start; 101 : 102 : /** End address of the memory region. */ 103 1 : const uint32_t end; 104 : 105 : /** Attributes for the memory region. */ 106 1 : const uint32_t attrs; 107 : }; 108 : 109 : /** 110 : * @brief Additional memory regions required by SoC. 111 : * 112 : * These memory regions will be setup by MMU initialization code at boot. 113 : */ 114 1 : extern const struct xtensa_mmu_range xtensa_soc_mmu_ranges[]; 115 : 116 : /** Number of SoC additional memory regions. */ 117 1 : extern int xtensa_soc_mmu_ranges_num; 118 : 119 : /** 120 : * @brief Initialize hardware MMU. 121 : * 122 : * This initializes the MMU hardware and setup the memory regions at boot. 123 : */ 124 1 : void xtensa_mmu_init(void); 125 : 126 : /** 127 : * @brief Re-initialize hardware MMU. 128 : * 129 : * This configures the MMU hardware when the cpu lost context and has 130 : * re-started. 131 : * 132 : * It assumes that the page table is already created and accessible in memory. 133 : */ 134 1 : void xtensa_mmu_reinit(void); 135 : 136 : /** 137 : * @brief Tell other processors to flush TLBs. 138 : * 139 : * This sends IPI to other processors to telling them to 140 : * invalidate cache to page tables and flush TLBs. This is 141 : * needed when one processor is updating page tables that 142 : * may affect threads running on other processors. 143 : * 144 : * @note This needs to be implemented in the SoC layer. 145 : */ 146 1 : void xtensa_mmu_tlb_ipi(void); 147 : 148 : /** 149 : * @brief Invalidate cache to page tables and flush TLBs. 150 : * 151 : * This invalidates cache to all page tables and flush TLBs 152 : * as they may have been modified by other processors. 153 : */ 154 1 : void xtensa_mmu_tlb_shootdown(void); 155 : 156 : /** 157 : * @} 158 : */ 159 : 160 : #endif /* ZEPHYR_INCLUDE_ARCH_XTENSA_XTENSA_MMU_H */