1 // SPDX-License-Identifier: (BSD-2-Clause AND BSD-3-Clause)
2 /*
3  * Copyright (c) 2015-2016, 2022 Linaro Limited
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice,
10  * this list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  * this list of conditions and the following disclaimer in the documentation
14  * and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 /*
30  * Copyright (c) 2014, 2022, ARM Limited and Contributors. All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions are met:
34  *
35  * Redistributions of source code must retain the above copyright notice, this
36  * list of conditions and the following disclaimer.
37  *
38  * Redistributions in binary form must reproduce the above copyright notice,
39  * this list of conditions and the following disclaimer in the documentation
40  * and/or other materials provided with the distribution.
41  *
42  * Neither the name of ARM nor the names of its contributors may be used
43  * to endorse or promote products derived from this software without specific
44  * prior written permission.
45  *
46  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
50  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
56  * POSSIBILITY OF SUCH DAMAGE.
57  */
58 #include <platform_config.h>
59 
60 #include <arm.h>
61 #include <assert.h>
62 #include <compiler.h>
63 #include <config.h>
64 #include <inttypes.h>
65 #include <keep.h>
66 #include <kernel/cache_helpers.h>
67 #include <kernel/linker.h>
68 #include <kernel/misc.h>
69 #include <kernel/panic.h>
70 #include <kernel/thread.h>
71 #include <kernel/tlb_helpers.h>
72 #include <memtag.h>
73 #include <mm/core_memprot.h>
74 #include <mm/pgt_cache.h>
75 #include <string.h>
76 #include <trace.h>
77 #include <types_ext.h>
78 #include <util.h>
79 
80 #ifndef DEBUG_XLAT_TABLE
81 #define DEBUG_XLAT_TABLE 0
82 #endif
83 
84 #if DEBUG_XLAT_TABLE
85 #define debug_print(...) DMSG_RAW(__VA_ARGS__)
86 #else
87 #define debug_print(...) ((void)0)
88 #endif
89 
90 
91 /*
92  * Miscellaneous MMU related constants
93  */
94 
95 #define INVALID_DESC		0x0
96 #define BLOCK_DESC		0x1
97 #define L3_BLOCK_DESC		0x3
98 #define TABLE_DESC		0x3
99 #define DESC_ENTRY_TYPE_MASK	0x3
100 
101 #define XN			(1ull << 2)
102 #define PXN			(1ull << 1)
103 #define CONT_HINT		(1ull << 0)
104 
105 #define UPPER_ATTRS(x)		(((x) & 0x7) << 52)
106 #define GP                      BIT64(50)   /* Guarded Page, Aarch64 FEAT_BTI */
107 #define NON_GLOBAL		(1ull << 9)
108 #define ACCESS_FLAG		(1ull << 8)
109 #define NSH			(0x0 << 6)
110 #define OSH			(0x2 << 6)
111 #define ISH			(0x3 << 6)
112 
113 #define AP_RO			(0x1 << 5)
114 #define AP_RW			(0x0 << 5)
115 #define AP_UNPRIV		(0x1 << 4)
116 
117 #define NS				(0x1 << 3)
118 #define LOWER_ATTRS_SHIFT		2
119 #define LOWER_ATTRS(x)			(((x) & 0xfff) << LOWER_ATTRS_SHIFT)
120 
121 #define ATTR_DEVICE_nGnRE_INDEX		0x0
122 #define ATTR_IWBWA_OWBWA_NTR_INDEX	0x1
123 #define ATTR_DEVICE_nGnRnE_INDEX	0x2
124 #define ATTR_TAGGED_NORMAL_MEM_INDEX	0x3
125 #define ATTR_INDEX_MASK			0x7
126 
127 #define ATTR_DEVICE_nGnRnE		(0x0)
128 #define ATTR_DEVICE_nGnRE		(0x4)
129 #define ATTR_IWBWA_OWBWA_NTR		(0xff)
130 /* Same as ATTR_IWBWA_OWBWA_NTR but with memory tagging.  */
131 #define ATTR_TAGGED_NORMAL_MEM		(0xf0)
132 
133 #define MAIR_ATTR_SET(attr, index)	(((uint64_t)attr) << ((index) << 3))
134 
135 #define OUTPUT_ADDRESS_MASK	(0x0000FFFFFFFFF000ULL)
136 
137 /* (internal) physical address size bits in EL3/EL1 */
138 #define TCR_PS_BITS_4GB		(0x0)
139 #define TCR_PS_BITS_64GB	(0x1)
140 #define TCR_PS_BITS_1TB		(0x2)
141 #define TCR_PS_BITS_4TB		(0x3)
142 #define TCR_PS_BITS_16TB	(0x4)
143 #define TCR_PS_BITS_256TB	(0x5)
144 
145 #define UNSET_DESC		((uint64_t)-1)
146 
147 #define FOUR_KB_SHIFT		12
148 #define PAGE_SIZE_SHIFT		FOUR_KB_SHIFT
149 #define PAGE_SIZE		(1 << PAGE_SIZE_SHIFT)
150 #define PAGE_SIZE_MASK		(PAGE_SIZE - 1)
151 #define IS_PAGE_ALIGNED(addr)	(((addr) & PAGE_SIZE_MASK) == 0)
152 
153 #define XLAT_ENTRY_SIZE_SHIFT	3 /* Each MMU table entry is 8 bytes (1 << 3) */
154 #define XLAT_ENTRY_SIZE		(1 << XLAT_ENTRY_SIZE_SHIFT)
155 
156 #define XLAT_TABLE_SIZE_SHIFT	PAGE_SIZE_SHIFT
157 #define XLAT_TABLE_SIZE		(1 << XLAT_TABLE_SIZE_SHIFT)
158 
159 #define XLAT_TABLE_LEVEL_MAX	U(3)
160 
161 /* Values for number of entries in each MMU translation table */
162 #define XLAT_TABLE_ENTRIES_SHIFT (XLAT_TABLE_SIZE_SHIFT - XLAT_ENTRY_SIZE_SHIFT)
163 #define XLAT_TABLE_ENTRIES	(1 << XLAT_TABLE_ENTRIES_SHIFT)
164 #define XLAT_TABLE_ENTRIES_MASK	(XLAT_TABLE_ENTRIES - 1)
165 
166 /* Values to convert a memory address to an index into a translation table */
167 #define L3_XLAT_ADDRESS_SHIFT	PAGE_SIZE_SHIFT
168 #define L2_XLAT_ADDRESS_SHIFT	(L3_XLAT_ADDRESS_SHIFT + \
169 				 XLAT_TABLE_ENTRIES_SHIFT)
170 #define L1_XLAT_ADDRESS_SHIFT	(L2_XLAT_ADDRESS_SHIFT + \
171 				 XLAT_TABLE_ENTRIES_SHIFT)
172 #define L0_XLAT_ADDRESS_SHIFT	(L1_XLAT_ADDRESS_SHIFT + \
173 				 XLAT_TABLE_ENTRIES_SHIFT)
174 #define XLAT_ADDR_SHIFT(level)	(PAGE_SIZE_SHIFT + \
175 				 ((XLAT_TABLE_LEVEL_MAX - (level)) * \
176 				 XLAT_TABLE_ENTRIES_SHIFT))
177 
178 #define XLAT_BLOCK_SIZE(level)	(UL(1) << XLAT_ADDR_SHIFT(level))
179 
180 /* Base table */
181 #define BASE_XLAT_ADDRESS_SHIFT	XLAT_ADDR_SHIFT(CORE_MMU_BASE_TABLE_LEVEL)
182 #define BASE_XLAT_BLOCK_SIZE	XLAT_BLOCK_SIZE(CORE_MMU_BASE_TABLE_LEVEL)
183 
184 #define NUM_BASE_LEVEL_ENTRIES	\
185 	BIT(CFG_LPAE_ADDR_SPACE_BITS - BASE_XLAT_ADDRESS_SHIFT)
186 
187 /*
188  * MMU L1 table, one for each core
189  *
190  * With CFG_CORE_UNMAP_CORE_AT_EL0, each core has one table to be used
191  * while in kernel mode and one to be used while in user mode.
192  */
193 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0
194 #define NUM_BASE_TABLES	2
195 #else
196 #define NUM_BASE_TABLES	1
197 #endif
198 
199 #ifndef MAX_XLAT_TABLES
200 #ifdef CFG_VIRTUALIZATION
201 #	define XLAT_TABLE_VIRTUALIZATION_EXTRA 3
202 #else
203 #	define XLAT_TABLE_VIRTUALIZATION_EXTRA 0
204 #endif
205 #ifdef CFG_CORE_ASLR
206 #	define XLAT_TABLE_ASLR_EXTRA 3
207 #else
208 #	define XLAT_TABLE_ASLR_EXTRA 0
209 #endif
210 #if (CORE_MMU_BASE_TABLE_LEVEL == 0)
211 #	define XLAT_TABLE_TEE_EXTRA 8
212 #	define XLAT_TABLE_USER_EXTRA (NUM_BASE_TABLES * CFG_TEE_CORE_NB_CORE)
213 #else
214 #	define XLAT_TABLE_TEE_EXTRA 5
215 #	define XLAT_TABLE_USER_EXTRA 0
216 #endif
217 #define MAX_XLAT_TABLES		(XLAT_TABLE_TEE_EXTRA + \
218 				 XLAT_TABLE_VIRTUALIZATION_EXTRA + \
219 				 XLAT_TABLE_ASLR_EXTRA + \
220 				 XLAT_TABLE_USER_EXTRA)
221 #endif /*!MAX_XLAT_TABLES*/
222 
223 #if (CORE_MMU_BASE_TABLE_LEVEL == 0)
224 #if (MAX_XLAT_TABLES <= UINT8_MAX)
225 typedef uint8_t l1_idx_t;
226 #elif (MAX_XLAT_TABLES <= UINT16_MAX)
227 typedef uint16_t l1_idx_t;
228 #else
229 #error MAX_XLAT_TABLES is suspiciously large, please check
230 #endif
231 #endif
232 
233 typedef uint64_t base_xlat_tbls_t[CFG_TEE_CORE_NB_CORE][NUM_BASE_LEVEL_ENTRIES];
234 typedef uint64_t xlat_tbl_t[XLAT_TABLE_ENTRIES];
235 
236 static base_xlat_tbls_t base_xlation_table[NUM_BASE_TABLES]
237 	__aligned(NUM_BASE_LEVEL_ENTRIES * XLAT_ENTRY_SIZE)
238 	__section(".nozi.mmu.base_table");
239 
240 static xlat_tbl_t xlat_tables[MAX_XLAT_TABLES]
241 	__aligned(XLAT_TABLE_SIZE) __section(".nozi.mmu.l2");
242 
243 #define XLAT_TABLES_SIZE	(sizeof(xlat_tbl_t) * MAX_XLAT_TABLES)
244 
245 /* MMU L2 table for TAs, one for each thread */
246 static xlat_tbl_t xlat_tables_ul1[CFG_NUM_THREADS]
247 	__aligned(XLAT_TABLE_SIZE) __section(".nozi.mmu.l2");
248 
249 /*
250  * TAs page table entry inside a level 1 page table.
251  *
252  * TAs mapping is expected to start from level 2.
253  *
254  * If base level is 1 then this is the index of a level 1 entry,
255  * that will point directly into TA mapping table.
256  *
257  * If base level is 0 then entry 0 in base table is always used, and then
258  * we fallback to "base level == 1" like scenario.
259  */
260 static int user_va_idx __nex_data = -1;
261 
262 struct mmu_partition {
263 	base_xlat_tbls_t *base_tables;
264 	xlat_tbl_t *xlat_tables;
265 	xlat_tbl_t *l2_ta_tables;
266 	unsigned int xlat_tables_used;
267 	unsigned int asid;
268 
269 #if (CORE_MMU_BASE_TABLE_LEVEL == 0)
270 	/*
271 	 * Indexes of the L1 table from 'xlat_tables'
272 	 * that points to the user mappings.
273 	 */
274 	l1_idx_t user_l1_table_idx[NUM_BASE_TABLES][CFG_TEE_CORE_NB_CORE];
275 #endif
276 };
277 
278 static struct mmu_partition default_partition __nex_data = {
279 	.base_tables = base_xlation_table,
280 	.xlat_tables = xlat_tables,
281 	.l2_ta_tables = xlat_tables_ul1,
282 	.xlat_tables_used = 0,
283 	.asid = 0
284 };
285 
286 #ifdef CFG_VIRTUALIZATION
287 static struct mmu_partition *current_prtn[CFG_TEE_CORE_NB_CORE] __nex_bss;
288 #endif
289 
get_prtn(void)290 static struct mmu_partition *get_prtn(void)
291 {
292 #ifdef CFG_VIRTUALIZATION
293 	struct mmu_partition *ret;
294 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
295 
296 	ret = current_prtn[get_core_pos()];
297 
298 	thread_unmask_exceptions(exceptions);
299 	return ret;
300 #else
301 	return &default_partition;
302 #endif
303 }
304 
desc_to_mattr(unsigned level,uint64_t desc)305 static uint32_t desc_to_mattr(unsigned level, uint64_t desc)
306 {
307 	uint32_t a;
308 
309 	if (!(desc & 1))
310 		return 0;
311 
312 	if (level == XLAT_TABLE_LEVEL_MAX) {
313 		if ((desc & DESC_ENTRY_TYPE_MASK) != L3_BLOCK_DESC)
314 			return 0;
315 	} else {
316 		if ((desc & DESC_ENTRY_TYPE_MASK) == TABLE_DESC)
317 			return TEE_MATTR_TABLE;
318 	}
319 
320 	a = TEE_MATTR_VALID_BLOCK;
321 
322 	if (desc & LOWER_ATTRS(ACCESS_FLAG))
323 		a |= TEE_MATTR_PRX | TEE_MATTR_URX;
324 
325 	if (!(desc & LOWER_ATTRS(AP_RO)))
326 		a |= TEE_MATTR_PW | TEE_MATTR_UW;
327 
328 	if (!(desc & LOWER_ATTRS(AP_UNPRIV)))
329 		a &= ~TEE_MATTR_URWX;
330 
331 	if (desc & UPPER_ATTRS(XN))
332 		a &= ~(TEE_MATTR_PX | TEE_MATTR_UX);
333 
334 	if (desc & UPPER_ATTRS(PXN))
335 		a &= ~TEE_MATTR_PX;
336 
337 	COMPILE_TIME_ASSERT(ATTR_DEVICE_nGnRnE_INDEX ==
338 			    TEE_MATTR_MEM_TYPE_STRONGLY_O);
339 	COMPILE_TIME_ASSERT(ATTR_DEVICE_nGnRE_INDEX == TEE_MATTR_MEM_TYPE_DEV);
340 	COMPILE_TIME_ASSERT(ATTR_IWBWA_OWBWA_NTR_INDEX ==
341 			    TEE_MATTR_MEM_TYPE_CACHED);
342 	COMPILE_TIME_ASSERT(ATTR_TAGGED_NORMAL_MEM_INDEX ==
343 			    TEE_MATTR_MEM_TYPE_TAGGED);
344 
345 	a |= ((desc & LOWER_ATTRS(ATTR_INDEX_MASK)) >> LOWER_ATTRS_SHIFT) <<
346 	     TEE_MATTR_MEM_TYPE_SHIFT;
347 
348 	if (!(desc & LOWER_ATTRS(NON_GLOBAL)))
349 		a |= TEE_MATTR_GLOBAL;
350 
351 	if (!(desc & LOWER_ATTRS(NS)))
352 		a |= TEE_MATTR_SECURE;
353 
354 	if (desc & GP)
355 		a |= TEE_MATTR_GUARDED;
356 
357 	return a;
358 }
359 
mattr_to_desc(unsigned level,uint32_t attr)360 static uint64_t mattr_to_desc(unsigned level, uint32_t attr)
361 {
362 	uint64_t desc;
363 	uint32_t a = attr;
364 
365 	if (a & TEE_MATTR_TABLE)
366 		return TABLE_DESC;
367 
368 	if (!(a & TEE_MATTR_VALID_BLOCK))
369 		return 0;
370 
371 	if (a & (TEE_MATTR_PX | TEE_MATTR_PW))
372 		a |= TEE_MATTR_PR;
373 	if (a & (TEE_MATTR_UX | TEE_MATTR_UW))
374 		a |= TEE_MATTR_UR;
375 	if (a & TEE_MATTR_UR)
376 		a |= TEE_MATTR_PR;
377 	if (a & TEE_MATTR_UW)
378 		a |= TEE_MATTR_PW;
379 
380 	if (IS_ENABLED(CFG_CORE_BTI) && (a & TEE_MATTR_PX))
381 		a |= TEE_MATTR_GUARDED;
382 
383 	if (level == XLAT_TABLE_LEVEL_MAX)
384 		desc = L3_BLOCK_DESC;
385 	else
386 		desc = BLOCK_DESC;
387 
388 	if (!(a & (TEE_MATTR_PX | TEE_MATTR_UX)))
389 		desc |= UPPER_ATTRS(XN);
390 	if (!(a & TEE_MATTR_PX))
391 		desc |= UPPER_ATTRS(PXN);
392 
393 	if (a & TEE_MATTR_UR)
394 		desc |= LOWER_ATTRS(AP_UNPRIV);
395 
396 	if (!(a & TEE_MATTR_PW))
397 		desc |= LOWER_ATTRS(AP_RO);
398 
399 	if (feat_bti_is_implemented() && (a & TEE_MATTR_GUARDED))
400 		desc |= GP;
401 
402 	/* Keep in sync with core_mmu.c:core_mmu_mattr_is_ok */
403 	switch ((a >> TEE_MATTR_MEM_TYPE_SHIFT) & TEE_MATTR_MEM_TYPE_MASK) {
404 	case TEE_MATTR_MEM_TYPE_STRONGLY_O:
405 		desc |= LOWER_ATTRS(ATTR_DEVICE_nGnRnE_INDEX | OSH);
406 		break;
407 	case TEE_MATTR_MEM_TYPE_DEV:
408 		desc |= LOWER_ATTRS(ATTR_DEVICE_nGnRE_INDEX | OSH);
409 		break;
410 	case TEE_MATTR_MEM_TYPE_CACHED:
411 		desc |= LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX | ISH);
412 		break;
413 	case TEE_MATTR_MEM_TYPE_TAGGED:
414 		desc |= LOWER_ATTRS(ATTR_TAGGED_NORMAL_MEM_INDEX | ISH);
415 		break;
416 	default:
417 		/*
418 		 * "Can't happen" the attribute is supposed to be checked
419 		 * with core_mmu_mattr_is_ok() before.
420 		 */
421 		panic();
422 	}
423 
424 	if (a & (TEE_MATTR_UR | TEE_MATTR_PR))
425 		desc |= LOWER_ATTRS(ACCESS_FLAG);
426 
427 	if (!(a & TEE_MATTR_GLOBAL))
428 		desc |= LOWER_ATTRS(NON_GLOBAL);
429 
430 	desc |= a & TEE_MATTR_SECURE ? 0 : LOWER_ATTRS(NS);
431 
432 	return desc;
433 }
434 
435 #ifdef CFG_VIRTUALIZATION
core_mmu_get_total_pages_size(void)436 size_t core_mmu_get_total_pages_size(void)
437 {
438 	return ROUNDUP(sizeof(base_xlation_table), SMALL_PAGE_SIZE) +
439 		sizeof(xlat_tables) + sizeof(xlat_tables_ul1);
440 }
441 
core_alloc_mmu_prtn(void * tables)442 struct mmu_partition *core_alloc_mmu_prtn(void *tables)
443 {
444 	struct mmu_partition *prtn;
445 	uint8_t *tbl = tables;
446 	unsigned int asid = asid_alloc();
447 
448 	assert(((vaddr_t)tbl) % SMALL_PAGE_SIZE == 0);
449 
450 	if (!asid)
451 		return NULL;
452 
453 	prtn = nex_malloc(sizeof(*prtn));
454 	if (!prtn) {
455 		asid_free(asid);
456 		return NULL;
457 	}
458 
459 	prtn->base_tables = (void *)tbl;
460 	COMPILE_TIME_ASSERT(sizeof(base_xlation_table) <= SMALL_PAGE_SIZE);
461 	memset(prtn->base_tables, 0, SMALL_PAGE_SIZE);
462 	tbl += ROUNDUP(sizeof(base_xlation_table), SMALL_PAGE_SIZE);
463 
464 	prtn->xlat_tables = (void *)tbl;
465 	memset(prtn->xlat_tables, 0, XLAT_TABLES_SIZE);
466 	tbl += XLAT_TABLES_SIZE;
467 	assert(((vaddr_t)tbl) % SMALL_PAGE_SIZE == 0);
468 
469 	prtn->l2_ta_tables = (void *)tbl;
470 	prtn->xlat_tables_used = 0;
471 	prtn->asid = asid;
472 
473 	return prtn;
474 }
475 
core_free_mmu_prtn(struct mmu_partition * prtn)476 void core_free_mmu_prtn(struct mmu_partition *prtn)
477 {
478 	asid_free(prtn->asid);
479 	nex_free(prtn);
480 }
481 
core_mmu_set_prtn(struct mmu_partition * prtn)482 void core_mmu_set_prtn(struct mmu_partition *prtn)
483 {
484 	uint64_t ttbr;
485 	/*
486 	 * We are changing mappings for current CPU,
487 	 * so make sure that we will not be rescheduled
488 	 */
489 	assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR);
490 
491 	current_prtn[get_core_pos()] = prtn;
492 
493 	ttbr = virt_to_phys(prtn->base_tables[0][get_core_pos()]);
494 
495 	write_ttbr0_el1(ttbr | ((paddr_t)prtn->asid << TTBR_ASID_SHIFT));
496 	isb();
497 	tlbi_all();
498 }
499 
core_mmu_set_default_prtn(void)500 void core_mmu_set_default_prtn(void)
501 {
502 	core_mmu_set_prtn(&default_partition);
503 }
504 
core_mmu_set_default_prtn_tbl(void)505 void core_mmu_set_default_prtn_tbl(void)
506 {
507 	size_t n = 0;
508 
509 	for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++)
510 		current_prtn[n] = &default_partition;
511 }
512 #endif
513 
core_mmu_xlat_table_alloc(struct mmu_partition * prtn)514 static uint64_t *core_mmu_xlat_table_alloc(struct mmu_partition *prtn)
515 {
516 	uint64_t *new_table = NULL;
517 
518 	if (prtn->xlat_tables_used >= MAX_XLAT_TABLES) {
519 		EMSG("%u xlat tables exhausted", MAX_XLAT_TABLES);
520 
521 		return NULL;
522 	}
523 
524 	new_table = prtn->xlat_tables[prtn->xlat_tables_used++];
525 
526 	DMSG("xlat tables used %u / %u",
527 	     prtn->xlat_tables_used, MAX_XLAT_TABLES);
528 
529 	return new_table;
530 }
531 
532 /*
533  * Given an entry that points to a table returns the virtual address
534  * of the pointed table. NULL otherwise.
535  */
core_mmu_xlat_table_entry_pa2va(struct mmu_partition * prtn,unsigned int level,uint64_t entry)536 static void *core_mmu_xlat_table_entry_pa2va(struct mmu_partition *prtn,
537 					     unsigned int level,
538 					     uint64_t entry)
539 {
540 	paddr_t pa = 0;
541 	void *va = NULL;
542 
543 	if ((entry & DESC_ENTRY_TYPE_MASK) != TABLE_DESC ||
544 	    level >= XLAT_TABLE_LEVEL_MAX)
545 		return NULL;
546 
547 	pa = entry & OUTPUT_ADDRESS_MASK;
548 
549 	if (!IS_ENABLED(CFG_VIRTUALIZATION) || prtn == &default_partition)
550 		va = phys_to_virt(pa, MEM_AREA_TEE_RAM_RW_DATA,
551 				  XLAT_TABLE_SIZE);
552 	else
553 		va = phys_to_virt(pa, MEM_AREA_SEC_RAM_OVERALL,
554 				  XLAT_TABLE_SIZE);
555 
556 	return va;
557 }
558 
559 /*
560  * For a table entry that points to a table - allocate and copy to
561  * a new pointed table. This is done for the requested entry,
562  * without going deeper into the pointed table entries.
563  *
564  * A success is returned for non-table entries, as nothing to do there.
565  */
566 __maybe_unused
core_mmu_entry_copy(struct core_mmu_table_info * tbl_info,unsigned int idx)567 static bool core_mmu_entry_copy(struct core_mmu_table_info *tbl_info,
568 				unsigned int idx)
569 {
570 	uint64_t *orig_table = NULL;
571 	uint64_t *new_table = NULL;
572 	uint64_t *entry = NULL;
573 	struct mmu_partition *prtn = NULL;
574 
575 #ifdef CFG_VIRTUALIZATION
576 	prtn = tbl_info->prtn;
577 #else
578 	prtn = &default_partition;
579 #endif
580 	assert(prtn);
581 
582 	if (idx >= tbl_info->num_entries)
583 		return false;
584 
585 	entry = (uint64_t *)tbl_info->table + idx;
586 
587 	/* Nothing to do for non-table entries */
588 	if ((*entry & DESC_ENTRY_TYPE_MASK) != TABLE_DESC ||
589 	    tbl_info->level >= XLAT_TABLE_LEVEL_MAX)
590 		return true;
591 
592 	new_table = core_mmu_xlat_table_alloc(prtn);
593 	if (!new_table)
594 		return false;
595 
596 	orig_table = core_mmu_xlat_table_entry_pa2va(prtn, tbl_info->level,
597 						     *entry);
598 	if (!orig_table)
599 		return false;
600 
601 	/* Copy original table content to new table */
602 	memcpy(new_table, orig_table, XLAT_TABLE_ENTRIES * XLAT_ENTRY_SIZE);
603 
604 	/* Point to the new table */
605 	*entry = virt_to_phys(new_table) | (*entry & ~OUTPUT_ADDRESS_MASK);
606 
607 	return true;
608 }
609 
core_init_mmu_prtn_tee(struct mmu_partition * prtn,struct tee_mmap_region * mm)610 static void core_init_mmu_prtn_tee(struct mmu_partition *prtn,
611 				   struct tee_mmap_region *mm)
612 {
613 	size_t n;
614 
615 	assert(prtn && mm);
616 
617 	for (n = 0; !core_mmap_is_end_of_table(mm + n); n++) {
618 		debug_print(" %010" PRIxVA " %010" PRIxPA " %10zx %x",
619 			    mm[n].va, mm[n].pa, mm[n].size, mm[n].attr);
620 
621 		if (!IS_PAGE_ALIGNED(mm[n].pa) || !IS_PAGE_ALIGNED(mm[n].size))
622 			panic("unaligned region");
623 	}
624 
625 	/* Clear table before use */
626 	memset(prtn->base_tables, 0, sizeof(base_xlation_table));
627 
628 	for (n = 0; !core_mmap_is_end_of_table(mm + n); n++)
629 		if (!core_mmu_is_dynamic_vaspace(mm + n))
630 			core_mmu_map_region(prtn, mm + n);
631 
632 	/*
633 	 * Primary mapping table is ready at index `get_core_pos()`
634 	 * whose value may not be ZERO. Take this index as copy source.
635 	 */
636 	for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) {
637 		if (n == get_core_pos())
638 			continue;
639 
640 		memcpy(prtn->base_tables[0][n],
641 		       prtn->base_tables[0][get_core_pos()],
642 		       XLAT_ENTRY_SIZE * NUM_BASE_LEVEL_ENTRIES);
643 	}
644 }
645 
646 /*
647  * In order to support 32-bit TAs we will have to find
648  * a user VA base in the region [1GB, 4GB[.
649  * Due to OP-TEE design limitation, TAs page table should be an entry
650  * inside a level 1 page table.
651  *
652  * Available options are only these:
653  * - base level 0 entry 0 - [0GB, 512GB[
654  *   - level 1 entry 0 - [0GB, 1GB[
655  *   - level 1 entry 1 - [1GB, 2GB[           <----
656  *   - level 1 entry 2 - [2GB, 3GB[           <----
657  *   - level 1 entry 3 - [3GB, 4GB[           <----
658  *   - level 1 entry 4 - [4GB, 5GB[
659  *   - ...
660  * - ...
661  *
662  * - base level 1 entry 0 - [0GB, 1GB[
663  * - base level 1 entry 1 - [1GB, 2GB[        <----
664  * - base level 1 entry 2 - [2GB, 3GB[        <----
665  * - base level 1 entry 3 - [3GB, 4GB[        <----
666  * - base level 1 entry 4 - [4GB, 5GB[
667  * - ...
668  */
set_user_va_idx(struct mmu_partition * prtn)669 static void set_user_va_idx(struct mmu_partition *prtn)
670 {
671 	uint64_t *tbl = NULL;
672 	unsigned int n = 0;
673 
674 	assert(prtn);
675 
676 	tbl = prtn->base_tables[0][get_core_pos()];
677 
678 	/*
679 	 * If base level is 0, then we must use its entry 0.
680 	 */
681 	if (CORE_MMU_BASE_TABLE_LEVEL == 0) {
682 		/*
683 		 * If base level 0 entry 0 is not used then
684 		 * it's clear that we can use level 1 entry 1 inside it.
685 		 * (will be allocated later).
686 		 */
687 		if ((tbl[0] & DESC_ENTRY_TYPE_MASK) == INVALID_DESC) {
688 			user_va_idx = 1;
689 
690 			return;
691 		}
692 
693 		assert((tbl[0] & DESC_ENTRY_TYPE_MASK) == TABLE_DESC);
694 
695 		tbl = core_mmu_xlat_table_entry_pa2va(prtn, 0, tbl[0]);
696 		assert(tbl);
697 	}
698 
699 	/*
700 	 * Search level 1 table (i.e. 1GB mapping per entry) for
701 	 * an empty entry in the range [1GB, 4GB[.
702 	 */
703 	for (n = 1; n < 4; n++) {
704 		if ((tbl[n] & DESC_ENTRY_TYPE_MASK) == INVALID_DESC) {
705 			user_va_idx = n;
706 			break;
707 		}
708 	}
709 
710 	assert(user_va_idx != -1);
711 }
712 
713 /*
714  * Setup an entry inside a core level 1 page table for TAs memory mapping
715  *
716  * If base table level is 1 - user_va_idx is already the index,
717  *                            so nothing to do.
718  * If base table level is 0 - we might need to allocate entry 0 of base table,
719  *                            as TAs page table is an entry inside a level 1
720  *                            page table.
721  */
core_init_mmu_prtn_ta_core(struct mmu_partition * prtn __maybe_unused,unsigned int base_idx __maybe_unused,unsigned int core __maybe_unused)722 static void core_init_mmu_prtn_ta_core(struct mmu_partition *prtn
723 				       __maybe_unused,
724 				       unsigned int base_idx __maybe_unused,
725 				       unsigned int core __maybe_unused)
726 {
727 #if (CORE_MMU_BASE_TABLE_LEVEL == 0)
728 	struct core_mmu_table_info tbl_info = { };
729 	uint64_t *tbl = NULL;
730 	uintptr_t idx = 0;
731 
732 	assert(user_va_idx != -1);
733 	COMPILE_TIME_ASSERT(MAX_XLAT_TABLES <
734 			    (1 << (8 * sizeof(prtn->user_l1_table_idx[0][0]))));
735 
736 	tbl = prtn->base_tables[base_idx][core];
737 
738 	/*
739 	 * If base level is 0, then user_va_idx refers to
740 	 * level 1 page table that's in base level 0 entry 0.
741 	 */
742 	core_mmu_set_info_table(&tbl_info, 0, 0, tbl);
743 #ifdef CFG_VIRTUALIZATION
744 	tbl_info.prtn = prtn;
745 #endif
746 
747 	/*
748 	 * If this isn't the core that created the initial tables
749 	 * mappings, then the level 1 table must be copied,
750 	 * as it will hold pointer to the user mapping table
751 	 * that changes per core.
752 	 */
753 	if (core != get_core_pos()) {
754 		if (!core_mmu_entry_copy(&tbl_info, 0))
755 			panic();
756 	}
757 
758 	if (!core_mmu_entry_to_finer_grained(&tbl_info, 0, true))
759 		panic();
760 
761 	/*
762 	 * Now base level table should be ready with a table descriptor
763 	 */
764 	assert((tbl[0] & DESC_ENTRY_TYPE_MASK) == TABLE_DESC);
765 
766 	tbl = core_mmu_xlat_table_entry_pa2va(prtn, 0, tbl[0]);
767 	assert(tbl);
768 
769 	idx = ((uintptr_t)&tbl[user_va_idx] - (uintptr_t)prtn->xlat_tables) /
770 	      sizeof(xlat_tbl_t);
771 	assert(idx < prtn->xlat_tables_used);
772 
773 	prtn->user_l1_table_idx[base_idx][core] = idx;
774 #endif
775 }
776 
core_init_mmu_prtn_ta(struct mmu_partition * prtn)777 static void core_init_mmu_prtn_ta(struct mmu_partition *prtn)
778 {
779 	unsigned int base_idx = 0;
780 	unsigned int core = 0;
781 
782 	assert(user_va_idx != -1);
783 
784 	for (base_idx = 0; base_idx < NUM_BASE_TABLES; base_idx++)
785 		for (core = 0; core < CFG_TEE_CORE_NB_CORE; core++)
786 			core_init_mmu_prtn_ta_core(prtn, base_idx, core);
787 }
788 
core_init_mmu_prtn(struct mmu_partition * prtn,struct tee_mmap_region * mm)789 void core_init_mmu_prtn(struct mmu_partition *prtn, struct tee_mmap_region *mm)
790 {
791 	core_init_mmu_prtn_tee(prtn, mm);
792 	core_init_mmu_prtn_ta(prtn);
793 }
794 
core_init_mmu(struct tee_mmap_region * mm)795 void core_init_mmu(struct tee_mmap_region *mm)
796 {
797 	uint64_t max_va = 0;
798 	size_t n;
799 
800 	COMPILE_TIME_ASSERT(CORE_MMU_BASE_TABLE_SHIFT ==
801 			    XLAT_ADDR_SHIFT(CORE_MMU_BASE_TABLE_LEVEL));
802 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0
803 	COMPILE_TIME_ASSERT(CORE_MMU_BASE_TABLE_OFFSET ==
804 			   sizeof(base_xlation_table) / 2);
805 #endif
806 	COMPILE_TIME_ASSERT(XLAT_TABLES_SIZE == sizeof(xlat_tables));
807 
808 	/* Initialize default pagetables */
809 	core_init_mmu_prtn_tee(&default_partition, mm);
810 
811 	for (n = 0; !core_mmap_is_end_of_table(mm + n); n++) {
812 		vaddr_t va_end = mm[n].va + mm[n].size - 1;
813 
814 		if (va_end > max_va)
815 			max_va = va_end;
816 	}
817 
818 	set_user_va_idx(&default_partition);
819 
820 	core_init_mmu_prtn_ta(&default_partition);
821 
822 	COMPILE_TIME_ASSERT(CFG_LPAE_ADDR_SPACE_BITS > L1_XLAT_ADDRESS_SHIFT);
823 	assert(max_va < BIT64(CFG_LPAE_ADDR_SPACE_BITS));
824 }
825 
826 #ifdef CFG_WITH_PAGER
827 /* Prefer to consume only 1 base xlat table for the whole mapping */
core_mmu_prefer_tee_ram_at_top(paddr_t paddr)828 bool core_mmu_prefer_tee_ram_at_top(paddr_t paddr)
829 {
830 	size_t base_level_size = BASE_XLAT_BLOCK_SIZE;
831 	paddr_t base_level_mask = base_level_size - 1;
832 
833 	return (paddr & base_level_mask) > (base_level_size / 2);
834 }
835 #endif
836 
837 #ifdef ARM32
core_init_mmu_regs(struct core_mmu_config * cfg)838 void core_init_mmu_regs(struct core_mmu_config *cfg)
839 {
840 	uint32_t ttbcr = 0;
841 	uint32_t mair = 0;
842 
843 	cfg->ttbr0_base = virt_to_phys(base_xlation_table[0][0]);
844 	cfg->ttbr0_core_offset = sizeof(base_xlation_table[0][0]);
845 
846 	mair  = MAIR_ATTR_SET(ATTR_DEVICE_nGnRE, ATTR_DEVICE_nGnRE_INDEX);
847 	mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, ATTR_IWBWA_OWBWA_NTR_INDEX);
848 	mair |= MAIR_ATTR_SET(ATTR_DEVICE_nGnRnE, ATTR_DEVICE_nGnRnE_INDEX);
849 	/*
850 	 * Tagged memory isn't supported in 32-bit mode, map tagged memory
851 	 * as normal memory instead.
852 	 */
853 	mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
854 			      ATTR_TAGGED_NORMAL_MEM_INDEX);
855 	cfg->mair0 = mair;
856 
857 	ttbcr = TTBCR_EAE;
858 	ttbcr |= TTBCR_XRGNX_WBWA << TTBCR_IRGN0_SHIFT;
859 	ttbcr |= TTBCR_XRGNX_WBWA << TTBCR_ORGN0_SHIFT;
860 	ttbcr |= TTBCR_SHX_ISH << TTBCR_SH0_SHIFT;
861 	ttbcr |= TTBCR_EPD1;	/* Disable the use of TTBR1 */
862 
863 	/* TTBCR.A1 = 0 => ASID is stored in TTBR0 */
864 	cfg->ttbcr = ttbcr;
865 }
866 #endif /*ARM32*/
867 
868 #ifdef ARM64
get_physical_addr_size_bits(void)869 static unsigned int get_physical_addr_size_bits(void)
870 {
871 	/*
872 	 * Intermediate Physical Address Size.
873 	 * 0b000      32 bits, 4GB.
874 	 * 0b001      36 bits, 64GB.
875 	 * 0b010      40 bits, 1TB.
876 	 * 0b011      42 bits, 4TB.
877 	 * 0b100      44 bits, 16TB.
878 	 * 0b101      48 bits, 256TB.
879 	 * 0b110      52 bits, 4PB (not supported)
880 	 */
881 
882 	COMPILE_TIME_ASSERT(CFG_CORE_ARM64_PA_BITS >= 32);
883 
884 	if (CFG_CORE_ARM64_PA_BITS <= 32)
885 		return TCR_PS_BITS_4GB;
886 
887 	if (CFG_CORE_ARM64_PA_BITS <= 36)
888 		return TCR_PS_BITS_64GB;
889 
890 	if (CFG_CORE_ARM64_PA_BITS <= 40)
891 		return TCR_PS_BITS_1TB;
892 
893 	if (CFG_CORE_ARM64_PA_BITS <= 42)
894 		return TCR_PS_BITS_4TB;
895 
896 	if (CFG_CORE_ARM64_PA_BITS <= 44)
897 		return TCR_PS_BITS_16TB;
898 
899 	/* Physical address can't exceed 48 bits */
900 	COMPILE_TIME_ASSERT(CFG_CORE_ARM64_PA_BITS <= 48);
901 	/* CFG_CORE_ARM64_PA_BITS <= 48 */
902 	return TCR_PS_BITS_256TB;
903 }
904 
core_init_mmu_regs(struct core_mmu_config * cfg)905 void core_init_mmu_regs(struct core_mmu_config *cfg)
906 {
907 	uint64_t ips = get_physical_addr_size_bits();
908 	uint64_t mair = 0;
909 	uint64_t tcr = 0;
910 
911 	cfg->ttbr0_el1_base = virt_to_phys(base_xlation_table[0][0]);
912 	cfg->ttbr0_core_offset = sizeof(base_xlation_table[0][0]);
913 
914 	mair  = MAIR_ATTR_SET(ATTR_DEVICE_nGnRE, ATTR_DEVICE_nGnRE_INDEX);
915 	mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, ATTR_IWBWA_OWBWA_NTR_INDEX);
916 	mair |= MAIR_ATTR_SET(ATTR_DEVICE_nGnRnE, ATTR_DEVICE_nGnRnE_INDEX);
917 	/*
918 	 * If MEMTAG isn't enabled, map tagged memory as normal memory
919 	 * instead.
920 	 */
921 	if (memtag_is_enabled())
922 		mair |= MAIR_ATTR_SET(ATTR_TAGGED_NORMAL_MEM,
923 				      ATTR_TAGGED_NORMAL_MEM_INDEX);
924 	else
925 		mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
926 				      ATTR_TAGGED_NORMAL_MEM_INDEX);
927 	cfg->mair_el1 = mair;
928 
929 	tcr = TCR_RES1;
930 	tcr |= TCR_XRGNX_WBWA << TCR_IRGN0_SHIFT;
931 	tcr |= TCR_XRGNX_WBWA << TCR_ORGN0_SHIFT;
932 	tcr |= TCR_SHX_ISH << TCR_SH0_SHIFT;
933 	tcr |= ips << TCR_EL1_IPS_SHIFT;
934 	tcr |= 64 - CFG_LPAE_ADDR_SPACE_BITS;
935 
936 	/* Disable the use of TTBR1 */
937 	tcr |= TCR_EPD1;
938 
939 	/*
940 	 * TCR.A1 = 0 => ASID is stored in TTBR0
941 	 * TCR.AS = 0 => Same ASID size as in Aarch32/ARMv7
942 	 */
943 	cfg->tcr_el1 = tcr;
944 }
945 #endif /*ARM64*/
946 
core_mmu_set_info_table(struct core_mmu_table_info * tbl_info,unsigned level,vaddr_t va_base,void * table)947 void core_mmu_set_info_table(struct core_mmu_table_info *tbl_info,
948 		unsigned level, vaddr_t va_base, void *table)
949 {
950 	tbl_info->level = level;
951 	tbl_info->table = table;
952 	tbl_info->va_base = va_base;
953 	tbl_info->shift = XLAT_ADDR_SHIFT(level);
954 
955 #if (CORE_MMU_BASE_TABLE_LEVEL > 0)
956 	assert(level >= CORE_MMU_BASE_TABLE_LEVEL);
957 #endif
958 	assert(level <= XLAT_TABLE_LEVEL_MAX);
959 
960 	if (level == CORE_MMU_BASE_TABLE_LEVEL)
961 		tbl_info->num_entries = NUM_BASE_LEVEL_ENTRIES;
962 	else
963 		tbl_info->num_entries = XLAT_TABLE_ENTRIES;
964 }
965 
core_mmu_get_user_pgdir(struct core_mmu_table_info * pgd_info)966 void core_mmu_get_user_pgdir(struct core_mmu_table_info *pgd_info)
967 {
968 	vaddr_t va_range_base;
969 	void *tbl = get_prtn()->l2_ta_tables[thread_get_id()];
970 
971 	core_mmu_get_user_va_range(&va_range_base, NULL);
972 	core_mmu_set_info_table(pgd_info, 2, va_range_base, tbl);
973 }
974 
core_mmu_create_user_map(struct user_mode_ctx * uctx,struct core_mmu_user_map * map)975 void core_mmu_create_user_map(struct user_mode_ctx *uctx,
976 			      struct core_mmu_user_map *map)
977 {
978 	struct core_mmu_table_info dir_info;
979 
980 	COMPILE_TIME_ASSERT(sizeof(uint64_t) * XLAT_TABLE_ENTRIES == PGT_SIZE);
981 
982 	core_mmu_get_user_pgdir(&dir_info);
983 	memset(dir_info.table, 0, PGT_SIZE);
984 	core_mmu_populate_user_map(&dir_info, uctx);
985 	map->user_map = virt_to_phys(dir_info.table) | TABLE_DESC;
986 	map->asid = uctx->vm_info.asid;
987 }
988 
core_mmu_find_table(struct mmu_partition * prtn,vaddr_t va,unsigned max_level,struct core_mmu_table_info * tbl_info)989 bool core_mmu_find_table(struct mmu_partition *prtn, vaddr_t va,
990 			 unsigned max_level,
991 			 struct core_mmu_table_info *tbl_info)
992 {
993 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
994 	unsigned int num_entries = NUM_BASE_LEVEL_ENTRIES;
995 	unsigned int level = CORE_MMU_BASE_TABLE_LEVEL;
996 	vaddr_t va_base = 0;
997 	bool ret = false;
998 	uint64_t *tbl;
999 
1000 	if (!prtn)
1001 		prtn = get_prtn();
1002 	tbl = prtn->base_tables[0][get_core_pos()];
1003 
1004 	while (true) {
1005 		unsigned int level_size_shift = XLAT_ADDR_SHIFT(level);
1006 		unsigned int n = (va - va_base) >> level_size_shift;
1007 
1008 		if (n >= num_entries)
1009 			goto out;
1010 
1011 		if (level == max_level || level == XLAT_TABLE_LEVEL_MAX ||
1012 		    (tbl[n] & TABLE_DESC) != TABLE_DESC) {
1013 			/*
1014 			 * We've either reached max_level, a block
1015 			 * mapping entry or an "invalid" mapping entry.
1016 			 */
1017 
1018 			/*
1019 			 * Base level is the CPU specific translation table.
1020 			 * It doesn't make sense to return anything based
1021 			 * on that unless foreign interrupts already are
1022 			 * masked.
1023 			 */
1024 			if (level == CORE_MMU_BASE_TABLE_LEVEL &&
1025 			    !(exceptions & THREAD_EXCP_FOREIGN_INTR))
1026 				goto out;
1027 
1028 			tbl_info->table = tbl;
1029 			tbl_info->va_base = va_base;
1030 			tbl_info->level = level;
1031 			tbl_info->shift = level_size_shift;
1032 			tbl_info->num_entries = num_entries;
1033 #ifdef CFG_VIRTUALIZATION
1034 			tbl_info->prtn = prtn;
1035 #endif
1036 			ret = true;
1037 			goto out;
1038 		}
1039 
1040 		tbl = core_mmu_xlat_table_entry_pa2va(prtn, level, tbl[n]);
1041 
1042 		if (!tbl)
1043 			goto out;
1044 
1045 		va_base += (vaddr_t)n << level_size_shift;
1046 		level++;
1047 		num_entries = XLAT_TABLE_ENTRIES;
1048 	}
1049 out:
1050 	thread_unmask_exceptions(exceptions);
1051 	return ret;
1052 }
1053 
core_mmu_entry_to_finer_grained(struct core_mmu_table_info * tbl_info,unsigned int idx,bool secure __unused)1054 bool core_mmu_entry_to_finer_grained(struct core_mmu_table_info *tbl_info,
1055 				     unsigned int idx, bool secure __unused)
1056 {
1057 	uint64_t *new_table;
1058 	uint64_t *entry;
1059 	int i;
1060 	paddr_t pa;
1061 	uint64_t attr;
1062 	paddr_t block_size_on_next_lvl = XLAT_BLOCK_SIZE(tbl_info->level + 1);
1063 	struct mmu_partition *prtn;
1064 
1065 #ifdef CFG_VIRTUALIZATION
1066 	prtn = tbl_info->prtn;
1067 #else
1068 	prtn = &default_partition;
1069 #endif
1070 	assert(prtn);
1071 
1072 	if (tbl_info->level >= XLAT_TABLE_LEVEL_MAX ||
1073 	    idx >= tbl_info->num_entries)
1074 		return false;
1075 
1076 	entry = (uint64_t *)tbl_info->table + idx;
1077 
1078 	if ((*entry & DESC_ENTRY_TYPE_MASK) == TABLE_DESC)
1079 		return true;
1080 
1081 	new_table = core_mmu_xlat_table_alloc(prtn);
1082 	if (!new_table)
1083 		return false;
1084 
1085 	if (*entry) {
1086 		pa = *entry & OUTPUT_ADDRESS_MASK;
1087 		attr = *entry & ~(OUTPUT_ADDRESS_MASK | DESC_ENTRY_TYPE_MASK);
1088 		for (i = 0; i < XLAT_TABLE_ENTRIES; i++) {
1089 			new_table[i] = pa | attr | BLOCK_DESC;
1090 			pa += block_size_on_next_lvl;
1091 		}
1092 	} else {
1093 		memset(new_table, 0, XLAT_TABLE_ENTRIES * XLAT_ENTRY_SIZE);
1094 	}
1095 
1096 	*entry = virt_to_phys(new_table) | TABLE_DESC;
1097 
1098 	return true;
1099 }
1100 
core_mmu_set_entry_primitive(void * table,size_t level,size_t idx,paddr_t pa,uint32_t attr)1101 void core_mmu_set_entry_primitive(void *table, size_t level, size_t idx,
1102 				  paddr_t pa, uint32_t attr)
1103 {
1104 	uint64_t *tbl = table;
1105 	uint64_t desc = mattr_to_desc(level, attr);
1106 
1107 	tbl[idx] = desc | pa;
1108 }
1109 
core_mmu_get_entry_primitive(const void * table,size_t level,size_t idx,paddr_t * pa,uint32_t * attr)1110 void core_mmu_get_entry_primitive(const void *table, size_t level,
1111 				  size_t idx, paddr_t *pa, uint32_t *attr)
1112 {
1113 	const uint64_t *tbl = table;
1114 
1115 	if (pa)
1116 		*pa = tbl[idx] & GENMASK_64(47, 12);
1117 
1118 	if (attr)
1119 		*attr = desc_to_mattr(level, tbl[idx]);
1120 }
1121 
core_mmu_user_va_range_is_defined(void)1122 bool core_mmu_user_va_range_is_defined(void)
1123 {
1124 	return user_va_idx != -1;
1125 }
1126 
core_mmu_get_user_va_range(vaddr_t * base,size_t * size)1127 void core_mmu_get_user_va_range(vaddr_t *base, size_t *size)
1128 {
1129 	assert(user_va_idx != -1);
1130 
1131 	if (base)
1132 		*base = (vaddr_t)user_va_idx << L1_XLAT_ADDRESS_SHIFT;
1133 	if (size)
1134 		*size = BIT64(L1_XLAT_ADDRESS_SHIFT);
1135 }
1136 
core_mmu_get_user_mapping_entry(struct mmu_partition * prtn,unsigned int base_idx)1137 static uint64_t *core_mmu_get_user_mapping_entry(struct mmu_partition *prtn,
1138 						 unsigned int base_idx)
1139 {
1140 #if (CORE_MMU_BASE_TABLE_LEVEL == 0)
1141 	uint8_t idx = 0;
1142 	uint64_t *tbl = NULL;
1143 #endif
1144 
1145 	assert(user_va_idx != -1);
1146 
1147 #if (CORE_MMU_BASE_TABLE_LEVEL == 0)
1148 	idx = prtn->user_l1_table_idx[base_idx][get_core_pos()];
1149 	tbl = prtn->xlat_tables[idx];
1150 
1151 	return &tbl[user_va_idx];
1152 #else
1153 	return &prtn->base_tables[base_idx][get_core_pos()][user_va_idx];
1154 #endif
1155 }
1156 
core_mmu_user_mapping_is_active(void)1157 bool core_mmu_user_mapping_is_active(void)
1158 {
1159 	bool ret = false;
1160 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
1161 	uint64_t *entry = NULL;
1162 
1163 	entry = core_mmu_get_user_mapping_entry(get_prtn(), 0);
1164 	ret = (*entry != 0);
1165 
1166 	thread_unmask_exceptions(exceptions);
1167 
1168 	return ret;
1169 }
1170 
1171 #ifdef ARM32
core_mmu_get_user_map(struct core_mmu_user_map * map)1172 void core_mmu_get_user_map(struct core_mmu_user_map *map)
1173 {
1174 	struct mmu_partition *prtn = get_prtn();
1175 	uint64_t *entry = NULL;
1176 
1177 	entry = core_mmu_get_user_mapping_entry(prtn, 0);
1178 
1179 	map->user_map = *entry;
1180 	if (map->user_map) {
1181 		map->asid = (read_ttbr0_64bit() >> TTBR_ASID_SHIFT) &
1182 			    TTBR_ASID_MASK;
1183 	} else {
1184 		map->asid = 0;
1185 	}
1186 }
1187 
core_mmu_set_user_map(struct core_mmu_user_map * map)1188 void core_mmu_set_user_map(struct core_mmu_user_map *map)
1189 {
1190 	uint64_t ttbr = 0;
1191 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
1192 	struct mmu_partition *prtn = get_prtn();
1193 	uint64_t *entries[NUM_BASE_TABLES] = { };
1194 	unsigned int i = 0;
1195 
1196 	ttbr = read_ttbr0_64bit();
1197 	/* Clear ASID */
1198 	ttbr &= ~((uint64_t)TTBR_ASID_MASK << TTBR_ASID_SHIFT);
1199 	write_ttbr0_64bit(ttbr);
1200 	isb();
1201 
1202 	for (i = 0; i < NUM_BASE_TABLES; i++)
1203 		entries[i] = core_mmu_get_user_mapping_entry(prtn, i);
1204 
1205 	/* Set the new map */
1206 	if (map && map->user_map) {
1207 		for (i = 0; i < NUM_BASE_TABLES; i++)
1208 			*entries[i] = map->user_map;
1209 
1210 		dsb();	/* Make sure the write above is visible */
1211 		ttbr |= ((uint64_t)map->asid << TTBR_ASID_SHIFT);
1212 		write_ttbr0_64bit(ttbr);
1213 		isb();
1214 	} else {
1215 		for (i = 0; i < NUM_BASE_TABLES; i++)
1216 			*entries[i] = INVALID_DESC;
1217 
1218 		dsb();	/* Make sure the write above is visible */
1219 	}
1220 
1221 	tlbi_all();
1222 	icache_inv_all();
1223 
1224 	thread_unmask_exceptions(exceptions);
1225 }
1226 
core_mmu_get_fault_type(uint32_t fault_descr)1227 enum core_mmu_fault core_mmu_get_fault_type(uint32_t fault_descr)
1228 {
1229 	assert(fault_descr & FSR_LPAE);
1230 
1231 	switch (fault_descr & FSR_STATUS_MASK) {
1232 	case 0x21: /* b100001 Alignment fault */
1233 		return CORE_MMU_FAULT_ALIGNMENT;
1234 	case 0x11: /* b010001 Asynchronous extern abort (DFSR only) */
1235 		return CORE_MMU_FAULT_ASYNC_EXTERNAL;
1236 	case 0x12: /* b100010 Debug event */
1237 		return CORE_MMU_FAULT_DEBUG_EVENT;
1238 	default:
1239 		break;
1240 	}
1241 
1242 	switch ((fault_descr & FSR_STATUS_MASK) >> 2) {
1243 	case 0x1: /* b0001LL Translation fault */
1244 		return CORE_MMU_FAULT_TRANSLATION;
1245 	case 0x2: /* b0010LL Access flag fault */
1246 	case 0x3: /* b0011LL Permission fault */
1247 		if (fault_descr & FSR_WNR)
1248 			return CORE_MMU_FAULT_WRITE_PERMISSION;
1249 		else
1250 			return CORE_MMU_FAULT_READ_PERMISSION;
1251 	default:
1252 		return CORE_MMU_FAULT_OTHER;
1253 	}
1254 }
1255 #endif /*ARM32*/
1256 
1257 #ifdef ARM64
core_mmu_get_user_map(struct core_mmu_user_map * map)1258 void core_mmu_get_user_map(struct core_mmu_user_map *map)
1259 {
1260 	struct mmu_partition *prtn = get_prtn();
1261 	uint64_t *entry = NULL;
1262 
1263 	entry = core_mmu_get_user_mapping_entry(prtn, 0);
1264 
1265 	map->user_map = *entry;
1266 	if (map->user_map) {
1267 		map->asid = (read_ttbr0_el1() >> TTBR_ASID_SHIFT) &
1268 			    TTBR_ASID_MASK;
1269 	} else {
1270 		map->asid = 0;
1271 	}
1272 }
1273 
core_mmu_set_user_map(struct core_mmu_user_map * map)1274 void core_mmu_set_user_map(struct core_mmu_user_map *map)
1275 {
1276 	uint64_t ttbr = 0;
1277 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
1278 	struct mmu_partition *prtn = get_prtn();
1279 	uint64_t *entries[NUM_BASE_TABLES] = { };
1280 	unsigned int i = 0;
1281 
1282 	ttbr = read_ttbr0_el1();
1283 	/* Clear ASID */
1284 	ttbr &= ~((uint64_t)TTBR_ASID_MASK << TTBR_ASID_SHIFT);
1285 	write_ttbr0_el1(ttbr);
1286 	isb();
1287 
1288 	for (i = 0; i < NUM_BASE_TABLES; i++)
1289 		entries[i] = core_mmu_get_user_mapping_entry(prtn, i);
1290 
1291 	/* Set the new map */
1292 	if (map && map->user_map) {
1293 		for (i = 0; i < NUM_BASE_TABLES; i++)
1294 			*entries[i] = map->user_map;
1295 
1296 		dsb();	/* Make sure the write above is visible */
1297 		ttbr |= ((uint64_t)map->asid << TTBR_ASID_SHIFT);
1298 		write_ttbr0_el1(ttbr);
1299 		isb();
1300 	} else {
1301 		for (i = 0; i < NUM_BASE_TABLES; i++)
1302 			*entries[i] = INVALID_DESC;
1303 
1304 		dsb();	/* Make sure the write above is visible */
1305 	}
1306 
1307 	tlbi_all();
1308 	icache_inv_all();
1309 
1310 	thread_unmask_exceptions(exceptions);
1311 }
1312 
core_mmu_get_fault_type(uint32_t fault_descr)1313 enum core_mmu_fault core_mmu_get_fault_type(uint32_t fault_descr)
1314 {
1315 	switch ((fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
1316 	case ESR_EC_SP_ALIGN:
1317 	case ESR_EC_PC_ALIGN:
1318 		return CORE_MMU_FAULT_ALIGNMENT;
1319 	case ESR_EC_IABT_EL0:
1320 	case ESR_EC_DABT_EL0:
1321 	case ESR_EC_IABT_EL1:
1322 	case ESR_EC_DABT_EL1:
1323 		switch (fault_descr & ESR_FSC_MASK) {
1324 		case ESR_FSC_SIZE_L0:
1325 		case ESR_FSC_SIZE_L1:
1326 		case ESR_FSC_SIZE_L2:
1327 		case ESR_FSC_SIZE_L3:
1328 		case ESR_FSC_TRANS_L0:
1329 		case ESR_FSC_TRANS_L1:
1330 		case ESR_FSC_TRANS_L2:
1331 		case ESR_FSC_TRANS_L3:
1332 			return CORE_MMU_FAULT_TRANSLATION;
1333 		case ESR_FSC_ACCF_L1:
1334 		case ESR_FSC_ACCF_L2:
1335 		case ESR_FSC_ACCF_L3:
1336 		case ESR_FSC_PERMF_L1:
1337 		case ESR_FSC_PERMF_L2:
1338 		case ESR_FSC_PERMF_L3:
1339 			if (fault_descr & ESR_ABT_WNR)
1340 				return CORE_MMU_FAULT_WRITE_PERMISSION;
1341 			else
1342 				return CORE_MMU_FAULT_READ_PERMISSION;
1343 		case ESR_FSC_ALIGN:
1344 			return CORE_MMU_FAULT_ALIGNMENT;
1345 		case ESR_FSC_TAG_CHECK:
1346 			return CORE_MMU_FAULT_TAG_CHECK;
1347 		default:
1348 			return CORE_MMU_FAULT_OTHER;
1349 		}
1350 	default:
1351 		return CORE_MMU_FAULT_OTHER;
1352 	}
1353 }
1354 #endif /*ARM64*/
1355