1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3 * Copyright (c) 2016, Linaro Limited
4 * Copyright (c) 2014, STMicroelectronics International N.V.
5 */
6 #ifndef __CORE_MMU_ARCH_H
7 #define __CORE_MMU_ARCH_H
8
9 #ifndef __ASSEMBLER__
10 #include <arm.h>
11 #include <assert.h>
12 #include <compiler.h>
13 #include <config.h>
14 #include <kernel/user_ta.h>
15 #include <mm/tee_mmu_types.h>
16 #include <types_ext.h>
17 #include <util.h>
18 #endif
19
20 #include <platform_config.h>
21
22 /*
23 * Platforms can define TRUSTED_{S,D}RAM_* or TZ{S,D}RAM_*. We're helping
24 * here with the transition to TRUSTED_{S,D}RAM_* by defining these if
25 * missing based on the legacy defines.
26 */
27 #ifdef TZSRAM_BASE
28 #ifdef TRUSTED_SRAM_BASE
29 #error TRUSTED_SRAM_BASE is already defined
30 #endif
31 #define TRUSTED_SRAM_BASE TZSRAM_BASE
32 #define TRUSTED_SRAM_SIZE TZSRAM_SIZE
33 #endif
34
35 #ifdef TZDRAM_BASE
36 #ifdef TRUSTED_DRAM_BASE
37 #error TRUSTED_DRAM_BASE is already defined
38 #endif
39 #define TRUSTED_DRAM_BASE TZDRAM_BASE
40 #define TRUSTED_DRAM_SIZE TZDRAM_SIZE
41 #endif
42
43 #define SMALL_PAGE_SHIFT U(12)
44
45 #ifdef CFG_WITH_LPAE
46 #define CORE_MMU_PGDIR_SHIFT U(21)
47 #define CORE_MMU_PGDIR_LEVEL U(3)
48 #else
49 #define CORE_MMU_PGDIR_SHIFT U(20)
50 #define CORE_MMU_PGDIR_LEVEL U(2)
51 #endif
52
53 #define CORE_MMU_USER_CODE_SHIFT SMALL_PAGE_SHIFT
54
55 #define CORE_MMU_USER_PARAM_SHIFT SMALL_PAGE_SHIFT
56
57 /*
58 * Level of base table (i.e. first level of page table),
59 * depending on address space
60 */
61 #if !defined(CFG_WITH_LPAE) || (CFG_LPAE_ADDR_SPACE_BITS < 40)
62 #define CORE_MMU_BASE_TABLE_SHIFT U(30)
63 #define CORE_MMU_BASE_TABLE_LEVEL U(1)
64 #elif (CFG_LPAE_ADDR_SPACE_BITS <= 48)
65 #define CORE_MMU_BASE_TABLE_SHIFT U(39)
66 #define CORE_MMU_BASE_TABLE_LEVEL U(0)
67 #else /* (CFG_LPAE_ADDR_SPACE_BITS > 48) */
68 #error "CFG_WITH_LPAE with CFG_LPAE_ADDR_SPACE_BITS > 48 isn't supported!"
69 #endif
70
71 #ifdef CFG_WITH_LPAE
72 /*
73 * CORE_MMU_BASE_TABLE_OFFSET is used when switching to/from reduced kernel
74 * mapping. The actual value depends on internals in core_mmu_lpae.c which
75 * we rather not expose here. There's a compile time assertion to check
76 * that these magic numbers are correct.
77 */
78 #define CORE_MMU_BASE_TABLE_OFFSET \
79 (CFG_TEE_CORE_NB_CORE * \
80 BIT(CFG_LPAE_ADDR_SPACE_BITS - CORE_MMU_BASE_TABLE_SHIFT) * \
81 U(8))
82 #endif
83 /*
84 * TEE_RAM_VA_START: The start virtual address of the TEE RAM
85 * TEE_TEXT_VA_START: The start virtual address of the OP-TEE text
86 */
87
88 /*
89 * Identify mapping constraint: virtual base address is the physical start addr.
90 * If platform did not set some macros, some get default value.
91 */
92 #ifndef TEE_RAM_VA_SIZE
93 #define TEE_RAM_VA_SIZE CORE_MMU_PGDIR_SIZE
94 #endif
95
96 #ifndef TEE_LOAD_ADDR
97 #define TEE_LOAD_ADDR TEE_RAM_START
98 #endif
99
100 #define TEE_RAM_VA_START TEE_RAM_START
101 #define TEE_TEXT_VA_START (TEE_RAM_VA_START + \
102 (TEE_LOAD_ADDR - TEE_RAM_START))
103
104 #ifndef STACK_ALIGNMENT
105 #define STACK_ALIGNMENT (sizeof(long) * U(2))
106 #endif
107
108 #ifndef __ASSEMBLER__
109
110 /*
111 * Assembly code in enable_mmu() depends on the layout of this struct.
112 */
113 struct core_mmu_config {
114 #if defined(ARM64)
115 uint64_t tcr_el1;
116 uint64_t mair_el1;
117 uint64_t ttbr0_el1_base;
118 uint64_t ttbr0_core_offset;
119 uint64_t load_offset;
120 #elif defined(CFG_WITH_LPAE)
121 uint32_t ttbcr;
122 uint32_t mair0;
123 uint32_t ttbr0_base;
124 uint32_t ttbr0_core_offset;
125 uint32_t load_offset;
126 #else
127 uint32_t prrr;
128 uint32_t nmrr;
129 uint32_t dacr;
130 uint32_t ttbcr;
131 uint32_t ttbr;
132 uint32_t load_offset;
133 #endif
134 };
135
136 #ifdef CFG_WITH_LPAE
137 /*
138 * struct core_mmu_user_map - current user mapping register state
139 * @user_map: physical address of user map translation table
140 * @asid: ASID for the user map
141 *
142 * Note that this struct should be treated as an opaque struct since
143 * the content depends on descriptor table format.
144 */
145 struct core_mmu_user_map {
146 uint64_t user_map;
147 uint32_t asid;
148 };
149 #else
150 /*
151 * struct core_mmu_user_map - current user mapping register state
152 * @ttbr0: content of ttbr0
153 * @ctxid: content of contextidr
154 *
155 * Note that this struct should be treated as an opaque struct since
156 * the content depends on descriptor table format.
157 */
158 struct core_mmu_user_map {
159 uint32_t ttbr0;
160 uint32_t ctxid;
161 };
162 #endif
163
164 #ifdef CFG_WITH_LPAE
165 bool core_mmu_user_va_range_is_defined(void);
166 #else
core_mmu_user_va_range_is_defined(void)167 static inline bool __noprof core_mmu_user_va_range_is_defined(void)
168 {
169 return true;
170 }
171 #endif
172
173 /* Cache maintenance operation type */
174 enum cache_op {
175 DCACHE_CLEAN,
176 DCACHE_AREA_CLEAN,
177 DCACHE_INVALIDATE,
178 DCACHE_AREA_INVALIDATE,
179 ICACHE_INVALIDATE,
180 ICACHE_AREA_INVALIDATE,
181 DCACHE_CLEAN_INV,
182 DCACHE_AREA_CLEAN_INV,
183 };
184
185 /* L1/L2 cache maintenance */
186 TEE_Result cache_op_inner(enum cache_op op, void *va, size_t len);
187 #ifdef CFG_PL310
188 TEE_Result cache_op_outer(enum cache_op op, paddr_t pa, size_t len);
189 #else
cache_op_outer(enum cache_op op __unused,paddr_t pa __unused,size_t len __unused)190 static inline TEE_Result cache_op_outer(enum cache_op op __unused,
191 paddr_t pa __unused,
192 size_t len __unused)
193 {
194 /* Nothing to do about L2 Cache Maintenance when no PL310 */
195 return TEE_SUCCESS;
196 }
197 #endif
198
199 /* Do section mapping, not support on LPAE */
200 void map_memarea_sections(const struct tee_mmap_region *mm, uint32_t *ttb);
201
core_mmu_check_max_pa(paddr_t pa __maybe_unused)202 static inline bool core_mmu_check_max_pa(paddr_t pa __maybe_unused)
203 {
204 #if defined(ARM64)
205 return pa <= (BIT64(CFG_CORE_ARM64_PA_BITS) - 1);
206 #elif defined(CFG_CORE_LARGE_PHYS_ADDR)
207 return pa <= (BIT64(40) - 1);
208 #else
209 COMPILE_TIME_ASSERT(sizeof(paddr_t) == sizeof(uint32_t));
210 return true;
211 #endif
212 }
213
214 /*
215 * Special barrier to make sure all the changes to translation tables are
216 * visible before returning.
217 */
core_mmu_table_write_barrier(void)218 static inline void core_mmu_table_write_barrier(void)
219 {
220 dsb_ishst();
221 }
222
core_mmu_entry_have_security_bit(uint32_t attr)223 static inline bool core_mmu_entry_have_security_bit(uint32_t attr)
224 {
225 return !(attr & TEE_MATTR_TABLE) || !IS_ENABLED(CFG_WITH_LPAE);
226 }
227
core_mmu_get_va_width(void)228 static inline unsigned int core_mmu_get_va_width(void)
229 {
230 if (IS_ENABLED(ARM64)) {
231 COMPILE_TIME_ASSERT(CFG_LPAE_ADDR_SPACE_BITS >= 32);
232 COMPILE_TIME_ASSERT(CFG_LPAE_ADDR_SPACE_BITS <= 48);
233 return CFG_LPAE_ADDR_SPACE_BITS;
234 }
235 return 32;
236 }
237 #endif /*__ASSEMBLER__*/
238
239 #endif /* CORE_MMU_H */
240