1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2016, Linaro Limited
4 * Copyright (c) 2014, STMicroelectronics International N.V.
5 */
6
7 #include <arm.h>
8 #include <assert.h>
9 #include <keep.h>
10 #include <kernel/cache_helpers.h>
11 #include <kernel/misc.h>
12 #include <kernel/panic.h>
13 #include <kernel/tlb_helpers.h>
14 #include <kernel/thread.h>
15 #include <mm/core_memprot.h>
16 #include <mm/core_mmu.h>
17 #include <mm/pgt_cache.h>
18 #include <platform_config.h>
19 #include <stdlib.h>
20 #include <string.h>
21 #include <trace.h>
22 #include <util.h>
23
24 #ifdef CFG_WITH_LPAE
25 #error This file is not to be used with LPAE
26 #endif
27
28 #ifdef CFG_VIRTUALIZATION
29 #error Currently V7 MMU code does not support virtualization
30 #endif
31
32 #ifndef DEBUG_XLAT_TABLE
33 #define DEBUG_XLAT_TABLE 0
34 #endif
35
36 #if DEBUG_XLAT_TABLE
37 #define debug_print(...) DMSG_RAW(__VA_ARGS__)
38 #else
39 #define debug_print(...) ((void)0)
40 #endif
41
42 /*
43 * MMU related values
44 */
45
46 /* Sharable */
47 #define TEE_MMU_TTB_S (1 << 1)
48
49 /* Not Outer Sharable */
50 #define TEE_MMU_TTB_NOS (1 << 5)
51
52 /* Normal memory, Inner Non-cacheable */
53 #define TEE_MMU_TTB_IRGN_NC 0
54
55 /* Normal memory, Inner Write-Back Write-Allocate Cacheable */
56 #define TEE_MMU_TTB_IRGN_WBWA (1 << 6)
57
58 /* Normal memory, Inner Write-Through Cacheable */
59 #define TEE_MMU_TTB_IRGN_WT 1
60
61 /* Normal memory, Inner Write-Back no Write-Allocate Cacheable */
62 #define TEE_MMU_TTB_IRGN_WB (1 | (1 << 6))
63
64 /* Normal memory, Outer Write-Back Write-Allocate Cacheable */
65 #define TEE_MMU_TTB_RNG_WBWA (1 << 3)
66
67 /* Normal memory, Outer Write-Back no Write-Allocate Cacheable */
68 #define TEE_MMU_TTB_RNG_WB (3 << 3)
69
70 #ifndef CFG_NO_SMP
71 #define TEE_MMU_DEFAULT_ATTRS \
72 (TEE_MMU_TTB_S | TEE_MMU_TTB_NOS | \
73 TEE_MMU_TTB_IRGN_WBWA | TEE_MMU_TTB_RNG_WBWA)
74 #else
75 #define TEE_MMU_DEFAULT_ATTRS (TEE_MMU_TTB_IRGN_WB | TEE_MMU_TTB_RNG_WB)
76 #endif
77
78
79 #define INVALID_DESC 0x0
80
81 #define SECTION_SHIFT 20
82 #define SECTION_MASK 0x000fffff
83 #define SECTION_SIZE 0x00100000
84
85 /* armv7 memory mapping attributes: section mapping */
86 #define SECTION_SECURE (0 << 19)
87 #define SECTION_NOTSECURE (1 << 19)
88 #define SECTION_SHARED (1 << 16)
89 #define SECTION_NOTGLOBAL (1 << 17)
90 #define SECTION_ACCESS_FLAG (1 << 10)
91 #define SECTION_UNPRIV (1 << 11)
92 #define SECTION_RO (1 << 15)
93 #define SECTION_TEXCB(texcb) ((((texcb) >> 2) << 12) | \
94 ((((texcb) >> 1) & 0x1) << 3) | \
95 (((texcb) & 0x1) << 2))
96 #define SECTION_DEVICE SECTION_TEXCB(ATTR_DEVICE_INDEX)
97 #define SECTION_NORMAL SECTION_TEXCB(ATTR_DEVICE_INDEX)
98 #define SECTION_NORMAL_CACHED SECTION_TEXCB(ATTR_NORMAL_CACHED_INDEX)
99 #define SECTION_STRONG_O SECTION_TEXCB(ATTR_STRONG_O_INDEX)
100 #define SECTION_TAGGED_CACHED SECTION_TEXCB(ATTR_TAGGED_CACHED_INDEX)
101
102 #define SECTION_XN (1 << 4)
103 #define SECTION_PXN (1 << 0)
104 #define SECTION_SECTION (2 << 0)
105
106 #define SECTION_PT_NOTSECURE (1 << 3)
107 #define SECTION_PT_PT (1 << 0)
108
109 #define SECTION_PT_ATTR_MASK ~((1 << 10) - 1)
110
111 #define SMALL_PAGE_SMALL_PAGE (1 << 1)
112 #define SMALL_PAGE_SHARED (1 << 10)
113 #define SMALL_PAGE_NOTGLOBAL (1 << 11)
114 #define SMALL_PAGE_TEXCB(texcb) ((((texcb) >> 2) << 6) | \
115 ((((texcb) >> 1) & 0x1) << 3) | \
116 (((texcb) & 0x1) << 2))
117 #define SMALL_PAGE_DEVICE SMALL_PAGE_TEXCB(ATTR_DEVICE_INDEX)
118 #define SMALL_PAGE_NORMAL SMALL_PAGE_TEXCB(ATTR_DEVICE_INDEX)
119 #define SMALL_PAGE_NORMAL_CACHED \
120 SMALL_PAGE_TEXCB(ATTR_NORMAL_CACHED_INDEX)
121 #define SMALL_PAGE_STRONG_O SMALL_PAGE_TEXCB(ATTR_STRONG_O_INDEX)
122 #define SMALL_PAGE_TAGGED_CACHED \
123 SMALL_PAGE_TEXCB(ATTR_TAGGED_CACHED_INDEX)
124 #define SMALL_PAGE_ACCESS_FLAG (1 << 4)
125 #define SMALL_PAGE_UNPRIV (1 << 5)
126 #define SMALL_PAGE_RO (1 << 9)
127 #define SMALL_PAGE_XN (1 << 0)
128
129
130 /* The TEX, C and B bits concatenated */
131 #define ATTR_DEVICE_INDEX 0x0
132 #define ATTR_NORMAL_CACHED_INDEX 0x1
133 #define ATTR_STRONG_O_INDEX 0x2
134 /* Compat with TEE_MATTR_MEM_TYPE_TAGGED */
135 #define ATTR_TAGGED_CACHED_INDEX 0x3
136
137 #define PRRR_IDX(idx, tr, nos) (((tr) << (2 * (idx))) | \
138 ((uint32_t)(nos) << ((idx) + 24)))
139 #define NMRR_IDX(idx, ir, or) (((ir) << (2 * (idx))) | \
140 ((uint32_t)(or) << (2 * (idx) + 16)))
141 #define PRRR_DS0 (1 << 16)
142 #define PRRR_DS1 (1 << 17)
143 #define PRRR_NS0 (1 << 18)
144 #define PRRR_NS1 (1 << 19)
145
146 #define ATTR_DEVICE_PRRR PRRR_IDX(ATTR_DEVICE_INDEX, 1, 0)
147 #define ATTR_DEVICE_NMRR NMRR_IDX(ATTR_DEVICE_INDEX, 0, 0)
148
149 #define ATTR_STRONGLY_O_PRRR PRRR_IDX(ATTR_STRONG_O_INDEX, 0, 0)
150 #define ATTR_STRONGLY_O_NMRR NMRR_IDX(ATTR_STRONG_O_INDEX, 0, 0)
151
152 #ifndef CFG_NO_SMP
153 #define ATTR_NORMAL_CACHED_PRRR PRRR_IDX(ATTR_NORMAL_CACHED_INDEX, 2, 1)
154 #define ATTR_NORMAL_CACHED_NMRR NMRR_IDX(ATTR_NORMAL_CACHED_INDEX, 1, 1)
155 #define ATTR_TAGGED_CACHED_PRRR PRRR_IDX(ATTR_TAGGED_CACHED_INDEX, 2, 1)
156 #define ATTR_TAGGED_CACHED_NMRR NMRR_IDX(ATTR_TAGGED_CACHED_INDEX, 1, 1)
157 #else
158 #define ATTR_NORMAL_CACHED_PRRR PRRR_IDX(ATTR_NORMAL_CACHED_INDEX, 2, 0)
159 #define ATTR_NORMAL_CACHED_NMRR NMRR_IDX(ATTR_NORMAL_CACHED_INDEX, 3, 3)
160 #define ATTR_TAGGED_CACHED_PRRR PRRR_IDX(ATTR_TAGGED_CACHED_INDEX, 2, 0)
161 #define ATTR_TAGGED_CACHED_NMRR NMRR_IDX(ATTR_TAGGED_CACHED_INDEX, 3, 3)
162 #endif
163
164 #define NUM_L1_ENTRIES 4096
165 #define NUM_L2_ENTRIES 256
166
167 #define L1_TBL_SIZE (NUM_L1_ENTRIES * 4)
168 #define L2_TBL_SIZE (NUM_L2_ENTRIES * 4)
169 #define L1_ALIGNMENT L1_TBL_SIZE
170 #define L2_ALIGNMENT L2_TBL_SIZE
171
172 /* Defined to the smallest possible secondary L1 MMU table */
173 #define TTBCR_N_VALUE 7
174
175 /* Number of sections in ttbr0 when user mapping activated */
176 #define NUM_UL1_ENTRIES (1 << (12 - TTBCR_N_VALUE))
177 #define UL1_ALIGNMENT (NUM_UL1_ENTRIES * 4)
178 /* TTB attributes */
179
180 /* TTB0 of TTBR0 (depends on TTBCR_N_VALUE) */
181 #define TTB_UL1_MASK (~(UL1_ALIGNMENT - 1))
182 /* TTB1 of TTBR1 */
183 #define TTB_L1_MASK (~(L1_ALIGNMENT - 1))
184
185 #ifndef MAX_XLAT_TABLES
186 #ifdef CFG_CORE_ASLR
187 # define XLAT_TABLE_ASLR_EXTRA 2
188 #else
189 # define XLAT_TABLE_ASLR_EXTRA 0
190 #endif
191 #define MAX_XLAT_TABLES (4 + XLAT_TABLE_ASLR_EXTRA)
192 #endif /*!MAX_XLAT_TABLES*/
193
194 enum desc_type {
195 DESC_TYPE_PAGE_TABLE,
196 DESC_TYPE_SECTION,
197 DESC_TYPE_SUPER_SECTION,
198 DESC_TYPE_LARGE_PAGE,
199 DESC_TYPE_SMALL_PAGE,
200 DESC_TYPE_INVALID,
201 };
202
203 typedef uint32_t l1_xlat_tbl_t[NUM_L1_ENTRIES];
204 typedef uint32_t l2_xlat_tbl_t[NUM_L2_ENTRIES];
205 typedef uint32_t ul1_xlat_tbl_t[NUM_UL1_ENTRIES];
206
207 static l1_xlat_tbl_t main_mmu_l1_ttb
208 __aligned(L1_ALIGNMENT) __section(".nozi.mmu.l1");
209
210 /* L2 MMU tables */
211 static l2_xlat_tbl_t main_mmu_l2_ttb[MAX_XLAT_TABLES]
212 __aligned(L2_ALIGNMENT) __section(".nozi.mmu.l2");
213
214 /* MMU L1 table for TAs, one for each thread */
215 static ul1_xlat_tbl_t main_mmu_ul1_ttb[CFG_NUM_THREADS]
216 __aligned(UL1_ALIGNMENT) __section(".nozi.mmu.ul1");
217
218 struct mmu_partition {
219 l1_xlat_tbl_t *l1_table;
220 l2_xlat_tbl_t *l2_tables;
221 ul1_xlat_tbl_t *ul1_tables;
222 uint32_t tables_used;
223 };
224
225 static struct mmu_partition default_partition = {
226 .l1_table = &main_mmu_l1_ttb,
227 .l2_tables = main_mmu_l2_ttb,
228 .ul1_tables = main_mmu_ul1_ttb,
229 .tables_used = 0,
230 };
231
232 #ifdef CFG_VIRTUALIZATION
233 static struct mmu_partition *current_prtn[CFG_TEE_CORE_NB_CORE];
234
core_mmu_set_default_prtn_tbl(void)235 void core_mmu_set_default_prtn_tbl(void)
236 {
237 size_t n = 0;
238
239 for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++)
240 current_prtn[n] = &default_partition;
241 }
242 #endif
243
get_prtn(void)244 static struct mmu_partition *get_prtn(void)
245 {
246 #ifdef CFG_VIRTUALIZATION
247 return current_prtn[get_core_pos()];
248 #else
249 return &default_partition;
250 #endif
251 }
252
core_mmu_get_main_ttb_va(struct mmu_partition * prtn)253 static vaddr_t core_mmu_get_main_ttb_va(struct mmu_partition *prtn)
254 {
255 return (vaddr_t)prtn->l1_table;
256 }
257
core_mmu_get_main_ttb_pa(struct mmu_partition * prtn)258 static paddr_t core_mmu_get_main_ttb_pa(struct mmu_partition *prtn)
259 {
260 paddr_t pa = virt_to_phys((void *)core_mmu_get_main_ttb_va(prtn));
261
262 if (pa & ~TTB_L1_MASK)
263 panic("invalid core l1 table");
264 return pa;
265 }
266
core_mmu_get_ul1_ttb_va(struct mmu_partition * prtn)267 static vaddr_t core_mmu_get_ul1_ttb_va(struct mmu_partition *prtn)
268 {
269 return (vaddr_t)prtn->ul1_tables[thread_get_id()];
270 }
271
core_mmu_get_ul1_ttb_pa(struct mmu_partition * prtn)272 static paddr_t core_mmu_get_ul1_ttb_pa(struct mmu_partition *prtn)
273 {
274 paddr_t pa = virt_to_phys((void *)core_mmu_get_ul1_ttb_va(prtn));
275
276 if (pa & ~TTB_UL1_MASK)
277 panic("invalid user l1 table");
278 return pa;
279 }
280
core_mmu_alloc_l2(struct mmu_partition * prtn,size_t size)281 static void *core_mmu_alloc_l2(struct mmu_partition *prtn, size_t size)
282 {
283 uint32_t to_alloc = ROUNDUP(size, NUM_L2_ENTRIES * SMALL_PAGE_SIZE) /
284 (NUM_L2_ENTRIES * SMALL_PAGE_SIZE);
285
286 DMSG("L2 table used: %d/%d", prtn->tables_used + to_alloc,
287 MAX_XLAT_TABLES);
288 if (prtn->tables_used + to_alloc > MAX_XLAT_TABLES)
289 return NULL;
290
291 memset(prtn->l2_tables[prtn->tables_used], 0,
292 sizeof(l2_xlat_tbl_t) * to_alloc);
293 prtn->tables_used += to_alloc;
294 return prtn->l2_tables[prtn->tables_used - to_alloc];
295 }
296
get_desc_type(unsigned level,uint32_t desc)297 static enum desc_type get_desc_type(unsigned level, uint32_t desc)
298 {
299 assert(level >= 1 && level <= 2);
300
301 if (level == 1) {
302 if ((desc & 0x3) == 0x1)
303 return DESC_TYPE_PAGE_TABLE;
304
305 if ((desc & 0x2) == 0x2) {
306 if (desc & (1 << 18))
307 return DESC_TYPE_SUPER_SECTION;
308 return DESC_TYPE_SECTION;
309 }
310 } else {
311 if ((desc & 0x3) == 0x1)
312 return DESC_TYPE_LARGE_PAGE;
313
314 if ((desc & 0x2) == 0x2)
315 return DESC_TYPE_SMALL_PAGE;
316 }
317
318 return DESC_TYPE_INVALID;
319 }
320
texcb_to_mattr(uint32_t texcb)321 static uint32_t texcb_to_mattr(uint32_t texcb)
322 {
323 COMPILE_TIME_ASSERT(ATTR_DEVICE_INDEX == TEE_MATTR_MEM_TYPE_DEV);
324 COMPILE_TIME_ASSERT(ATTR_NORMAL_CACHED_INDEX ==
325 TEE_MATTR_MEM_TYPE_CACHED);
326 COMPILE_TIME_ASSERT(ATTR_STRONG_O_INDEX ==
327 TEE_MATTR_MEM_TYPE_STRONGLY_O);
328 COMPILE_TIME_ASSERT(ATTR_TAGGED_CACHED_INDEX ==
329 TEE_MATTR_MEM_TYPE_TAGGED);
330
331 return texcb << TEE_MATTR_MEM_TYPE_SHIFT;
332 }
333
mattr_to_texcb(uint32_t attr)334 static uint32_t mattr_to_texcb(uint32_t attr)
335 {
336 /* Keep in sync with core_mmu.c:core_mmu_mattr_is_ok */
337 return (attr >> TEE_MATTR_MEM_TYPE_SHIFT) & TEE_MATTR_MEM_TYPE_MASK;
338 }
339
340
desc_to_mattr(unsigned level,uint32_t desc)341 static uint32_t desc_to_mattr(unsigned level, uint32_t desc)
342 {
343 uint32_t a;
344
345 switch (get_desc_type(level, desc)) {
346 case DESC_TYPE_PAGE_TABLE:
347 a = TEE_MATTR_TABLE;
348 if (!(desc & SECTION_PT_NOTSECURE))
349 a |= TEE_MATTR_SECURE;
350 break;
351 case DESC_TYPE_SECTION:
352 a = TEE_MATTR_VALID_BLOCK;
353 if (desc & SECTION_ACCESS_FLAG)
354 a |= TEE_MATTR_PRX | TEE_MATTR_URX;
355
356 if (!(desc & SECTION_RO))
357 a |= TEE_MATTR_PW | TEE_MATTR_UW;
358
359 if (desc & SECTION_XN)
360 a &= ~(TEE_MATTR_PX | TEE_MATTR_UX);
361
362 if (desc & SECTION_PXN)
363 a &= ~TEE_MATTR_PX;
364
365 a |= texcb_to_mattr(((desc >> 12) & 0x7) | ((desc >> 2) & 0x3));
366
367 if (!(desc & SECTION_NOTGLOBAL))
368 a |= TEE_MATTR_GLOBAL;
369
370 if (!(desc & SECTION_NOTSECURE))
371 a |= TEE_MATTR_SECURE;
372
373 break;
374 case DESC_TYPE_SMALL_PAGE:
375 a = TEE_MATTR_VALID_BLOCK;
376 if (desc & SMALL_PAGE_ACCESS_FLAG)
377 a |= TEE_MATTR_PRX | TEE_MATTR_URX;
378
379 if (!(desc & SMALL_PAGE_RO))
380 a |= TEE_MATTR_PW | TEE_MATTR_UW;
381
382 if (desc & SMALL_PAGE_XN)
383 a &= ~(TEE_MATTR_PX | TEE_MATTR_UX);
384
385 a |= texcb_to_mattr(((desc >> 6) & 0x7) | ((desc >> 2) & 0x3));
386
387 if (!(desc & SMALL_PAGE_NOTGLOBAL))
388 a |= TEE_MATTR_GLOBAL;
389 break;
390 default:
391 return 0;
392 }
393
394 return a;
395 }
396
mattr_to_desc(unsigned level,uint32_t attr)397 static uint32_t mattr_to_desc(unsigned level, uint32_t attr)
398 {
399 uint32_t desc;
400 uint32_t a = attr;
401 unsigned texcb;
402
403 if (level == 1 && (a & TEE_MATTR_TABLE)) {
404 desc = SECTION_PT_PT;
405 if (!(a & TEE_MATTR_SECURE))
406 desc |= SECTION_PT_NOTSECURE;
407 return desc;
408 }
409
410 if (!(a & TEE_MATTR_VALID_BLOCK))
411 return INVALID_DESC;
412
413 if (a & (TEE_MATTR_PX | TEE_MATTR_PW))
414 a |= TEE_MATTR_PR;
415 if (a & (TEE_MATTR_UX | TEE_MATTR_UW))
416 a |= TEE_MATTR_UR;
417 if (a & TEE_MATTR_UR)
418 a |= TEE_MATTR_PR;
419 if (a & TEE_MATTR_UW)
420 a |= TEE_MATTR_PW;
421
422
423 texcb = mattr_to_texcb(a);
424
425 if (level == 1) { /* Section */
426 #ifndef CFG_NO_SMP
427 desc = SECTION_SECTION | SECTION_SHARED;
428 #else
429 desc = SECTION_SECTION;
430 #endif
431
432 if (!(a & (TEE_MATTR_PX | TEE_MATTR_UX)))
433 desc |= SECTION_XN;
434
435 #ifdef CFG_HWSUPP_MEM_PERM_PXN
436 if (!(a & TEE_MATTR_PX))
437 desc |= SECTION_PXN;
438 #endif
439
440 if (a & TEE_MATTR_UR)
441 desc |= SECTION_UNPRIV;
442
443 if (!(a & TEE_MATTR_PW))
444 desc |= SECTION_RO;
445
446 if (a & (TEE_MATTR_UR | TEE_MATTR_PR))
447 desc |= SECTION_ACCESS_FLAG;
448
449 if (!(a & TEE_MATTR_GLOBAL))
450 desc |= SECTION_NOTGLOBAL;
451
452 if (!(a & TEE_MATTR_SECURE))
453 desc |= SECTION_NOTSECURE;
454
455 desc |= SECTION_TEXCB(texcb);
456 } else {
457 #ifndef CFG_NO_SMP
458 desc = SMALL_PAGE_SMALL_PAGE | SMALL_PAGE_SHARED;
459 #else
460 desc = SMALL_PAGE_SMALL_PAGE;
461 #endif
462
463 if (!(a & (TEE_MATTR_PX | TEE_MATTR_UX)))
464 desc |= SMALL_PAGE_XN;
465
466 if (a & TEE_MATTR_UR)
467 desc |= SMALL_PAGE_UNPRIV;
468
469 if (!(a & TEE_MATTR_PW))
470 desc |= SMALL_PAGE_RO;
471
472 if (a & (TEE_MATTR_UR | TEE_MATTR_PR))
473 desc |= SMALL_PAGE_ACCESS_FLAG;
474
475 if (!(a & TEE_MATTR_GLOBAL))
476 desc |= SMALL_PAGE_NOTGLOBAL;
477
478 desc |= SMALL_PAGE_TEXCB(texcb);
479 }
480
481 return desc;
482 }
483
core_mmu_set_info_table(struct core_mmu_table_info * tbl_info,unsigned level,vaddr_t va_base,void * table)484 void core_mmu_set_info_table(struct core_mmu_table_info *tbl_info,
485 unsigned level, vaddr_t va_base, void *table)
486 {
487 tbl_info->level = level;
488 tbl_info->table = table;
489 tbl_info->va_base = va_base;
490 assert(level <= 2);
491 if (level == 1) {
492 tbl_info->shift = SECTION_SHIFT;
493 tbl_info->num_entries = NUM_L1_ENTRIES;
494 } else {
495 tbl_info->shift = SMALL_PAGE_SHIFT;
496 tbl_info->num_entries = NUM_L2_ENTRIES;
497 }
498 }
499
core_mmu_get_user_pgdir(struct core_mmu_table_info * pgd_info)500 void core_mmu_get_user_pgdir(struct core_mmu_table_info *pgd_info)
501 {
502 void *tbl = (void *)core_mmu_get_ul1_ttb_va(get_prtn());
503
504 core_mmu_set_info_table(pgd_info, 1, 0, tbl);
505 pgd_info->num_entries = NUM_UL1_ENTRIES;
506 }
507
core_mmu_create_user_map(struct user_mode_ctx * uctx,struct core_mmu_user_map * map)508 void core_mmu_create_user_map(struct user_mode_ctx *uctx,
509 struct core_mmu_user_map *map)
510 {
511 struct core_mmu_table_info dir_info = { };
512
513 COMPILE_TIME_ASSERT(L2_TBL_SIZE == PGT_SIZE);
514
515 core_mmu_get_user_pgdir(&dir_info);
516 memset(dir_info.table, 0, dir_info.num_entries * sizeof(uint32_t));
517 core_mmu_populate_user_map(&dir_info, uctx);
518 map->ttbr0 = core_mmu_get_ul1_ttb_pa(get_prtn()) |
519 TEE_MMU_DEFAULT_ATTRS;
520 map->ctxid = uctx->vm_info.asid;
521 }
522
core_mmu_find_table(struct mmu_partition * prtn,vaddr_t va,unsigned max_level,struct core_mmu_table_info * tbl_info)523 bool core_mmu_find_table(struct mmu_partition *prtn, vaddr_t va,
524 unsigned max_level,
525 struct core_mmu_table_info *tbl_info)
526 {
527 uint32_t *tbl;
528 unsigned n = va >> SECTION_SHIFT;
529
530 if (!prtn)
531 prtn = get_prtn();
532 tbl = (uint32_t *)core_mmu_get_main_ttb_va(prtn);
533
534 if (max_level == 1 || (tbl[n] & 0x3) != 0x1) {
535 core_mmu_set_info_table(tbl_info, 1, 0, tbl);
536 } else {
537 paddr_t ntbl = tbl[n] & ~((1 << 10) - 1);
538 void *l2tbl = phys_to_virt(ntbl, MEM_AREA_TEE_RAM_RW_DATA,
539 L2_TBL_SIZE);
540
541 if (!l2tbl)
542 return false;
543
544 core_mmu_set_info_table(tbl_info, 2, n << SECTION_SHIFT, l2tbl);
545 }
546 return true;
547 }
548
core_mmu_set_entry_primitive(void * table,size_t level,size_t idx,paddr_t pa,uint32_t attr)549 void core_mmu_set_entry_primitive(void *table, size_t level, size_t idx,
550 paddr_t pa, uint32_t attr)
551 {
552 uint32_t *tbl = table;
553 uint32_t desc = mattr_to_desc(level, attr);
554
555 tbl[idx] = desc | pa;
556 }
557
desc_to_pa(unsigned level,uint32_t desc)558 static paddr_t desc_to_pa(unsigned level, uint32_t desc)
559 {
560 unsigned shift_mask;
561
562 switch (get_desc_type(level, desc)) {
563 case DESC_TYPE_PAGE_TABLE:
564 shift_mask = 10;
565 break;
566 case DESC_TYPE_SECTION:
567 shift_mask = 20;
568 break;
569 case DESC_TYPE_SUPER_SECTION:
570 shift_mask = 24; /* We're ignoring bits 32 and above. */
571 break;
572 case DESC_TYPE_LARGE_PAGE:
573 shift_mask = 16;
574 break;
575 case DESC_TYPE_SMALL_PAGE:
576 shift_mask = 12;
577 break;
578 default:
579 /* Invalid section */
580 shift_mask = 4;
581 }
582
583 return desc & ~((1 << shift_mask) - 1);
584 }
585
core_mmu_entry_to_finer_grained(struct core_mmu_table_info * tbl_info,unsigned int idx,bool secure)586 bool core_mmu_entry_to_finer_grained(struct core_mmu_table_info *tbl_info,
587 unsigned int idx, bool secure)
588 {
589 uint32_t *new_table;
590 uint32_t *entry;
591 uint32_t new_table_desc;
592 uint32_t attr;
593 uint32_t desc;
594 paddr_t pa;
595 int i;
596
597 if (tbl_info->level != 1)
598 return false;
599
600 if (idx >= NUM_L1_ENTRIES)
601 return false;
602
603 entry = (uint32_t *)tbl_info->table + idx;
604 attr = desc_to_mattr(1, *entry);
605
606 if (*entry && get_desc_type(1, *entry) == DESC_TYPE_PAGE_TABLE) {
607 /*
608 * If there is page table already,
609 * check the secure attribute fits
610 */
611 return secure == (bool)(attr & TEE_MATTR_SECURE);
612 }
613
614 /* If there is something mapped, check the secure access flag */
615 if (attr && secure != (bool)(attr & TEE_MATTR_SECURE))
616 return false;
617
618 new_table = core_mmu_alloc_l2(get_prtn(),
619 NUM_L2_ENTRIES * SMALL_PAGE_SIZE);
620
621 if (!new_table)
622 return false;
623
624 new_table_desc = SECTION_PT_PT | virt_to_phys(new_table);
625
626 if (!secure)
627 new_table_desc |= SECTION_PT_NOTSECURE;
628
629 if (*entry) {
630 pa = desc_to_pa(1, *entry);
631 desc = mattr_to_desc(2, attr);
632 for (i = 0; i < NUM_L2_ENTRIES; i++, pa += SMALL_PAGE_SIZE)
633 new_table[i] = desc | pa;
634 }
635
636 /* Update descriptor at current level */
637 *entry = new_table_desc;
638
639 return true;
640 }
641
core_mmu_get_entry_primitive(const void * table,size_t level,size_t idx,paddr_t * pa,uint32_t * attr)642 void core_mmu_get_entry_primitive(const void *table, size_t level,
643 size_t idx, paddr_t *pa, uint32_t *attr)
644 {
645 const uint32_t *tbl = table;
646
647 if (pa)
648 *pa = desc_to_pa(level, tbl[idx]);
649
650 if (attr)
651 *attr = desc_to_mattr(level, tbl[idx]);
652 }
653
core_mmu_get_user_va_range(vaddr_t * base,size_t * size)654 void core_mmu_get_user_va_range(vaddr_t *base, size_t *size)
655 {
656 if (base) {
657 /* Leaving the first entry unmapped to make NULL unmapped */
658 *base = 1 << SECTION_SHIFT;
659 }
660
661 if (size)
662 *size = (NUM_UL1_ENTRIES - 1) << SECTION_SHIFT;
663 }
664
core_mmu_get_user_map(struct core_mmu_user_map * map)665 void core_mmu_get_user_map(struct core_mmu_user_map *map)
666 {
667 map->ttbr0 = read_ttbr0();
668 map->ctxid = read_contextidr();
669 }
670
core_mmu_set_user_map(struct core_mmu_user_map * map)671 void core_mmu_set_user_map(struct core_mmu_user_map *map)
672 {
673 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
674
675 /*
676 * Update the reserved Context ID and TTBR0
677 */
678
679 dsb(); /* ARM erratum 754322 */
680 write_contextidr(0);
681 isb();
682
683 if (map) {
684 write_ttbr0(map->ttbr0);
685 isb();
686 write_contextidr(map->ctxid);
687 isb();
688 } else {
689 write_ttbr0(read_ttbr1());
690 isb();
691 }
692
693 tlbi_all();
694 icache_inv_all();
695
696 /* Restore interrupts */
697 thread_unmask_exceptions(exceptions);
698 }
699
core_mmu_user_mapping_is_active(void)700 bool core_mmu_user_mapping_is_active(void)
701 {
702 bool ret;
703 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
704
705 ret = read_ttbr0() != read_ttbr1();
706 thread_unmask_exceptions(exceptions);
707
708 return ret;
709 }
710
print_mmap_area(const struct tee_mmap_region * mm __maybe_unused,const char * str __maybe_unused)711 static void print_mmap_area(const struct tee_mmap_region *mm __maybe_unused,
712 const char *str __maybe_unused)
713 {
714 if (!(mm->attr & TEE_MATTR_VALID_BLOCK))
715 debug_print("%s [%08" PRIxVA " %08" PRIxVA "] not mapped",
716 str, mm->va, mm->va + mm->size);
717 else
718 debug_print("%s [%08" PRIxVA " %08" PRIxVA "] %s-%s-%s-%s",
719 str, mm->va, mm->va + mm->size,
720 mattr_is_cached(mm->attr) ? "MEM" : "DEV",
721 mm->attr & TEE_MATTR_PW ? "RW" : "RO",
722 mm->attr & TEE_MATTR_PX ? "X" : "XN",
723 mm->attr & TEE_MATTR_SECURE ? "S" : "NS");
724 }
725
map_memarea_sections(const struct tee_mmap_region * mm,uint32_t * ttb)726 void map_memarea_sections(const struct tee_mmap_region *mm, uint32_t *ttb)
727 {
728 uint32_t attr = mattr_to_desc(1, mm->attr);
729 size_t idx = mm->va >> SECTION_SHIFT;
730 paddr_t pa = 0;
731 size_t n;
732
733 if (core_mmap_is_end_of_table(mm))
734 return;
735
736 print_mmap_area(mm, "section map");
737
738 attr = mattr_to_desc(1, mm->attr);
739 if (attr != INVALID_DESC)
740 pa = mm->pa;
741
742 n = ROUNDUP(mm->size, SECTION_SIZE) >> SECTION_SHIFT;
743 while (n--) {
744 assert(!attr || !ttb[idx] || ttb[idx] == (pa | attr));
745
746 ttb[idx] = pa | attr;
747 idx++;
748 pa += SECTION_SIZE;
749 }
750 }
751
core_init_mmu_prtn(struct mmu_partition * prtn,struct tee_mmap_region * mm)752 void core_init_mmu_prtn(struct mmu_partition *prtn, struct tee_mmap_region *mm)
753 {
754 void *ttb1 = (void *)core_mmu_get_main_ttb_va(prtn);
755 size_t n;
756
757 /* reset L1 table */
758 memset(ttb1, 0, L1_TBL_SIZE);
759
760 for (n = 0; !core_mmap_is_end_of_table(mm + n); n++)
761 if (!core_mmu_is_dynamic_vaspace(mm + n))
762 core_mmu_map_region(prtn, mm + n);
763 }
764
core_init_mmu(struct tee_mmap_region * mm)765 void core_init_mmu(struct tee_mmap_region *mm)
766 {
767 /* Initialize default pagetables */
768 core_init_mmu_prtn(&default_partition, mm);
769 }
770
core_init_mmu_regs(struct core_mmu_config * cfg)771 void core_init_mmu_regs(struct core_mmu_config *cfg)
772 {
773 cfg->ttbr = core_mmu_get_main_ttb_pa(&default_partition) |
774 TEE_MMU_DEFAULT_ATTRS;
775
776 cfg->prrr = ATTR_DEVICE_PRRR | ATTR_NORMAL_CACHED_PRRR |
777 ATTR_STRONGLY_O_PRRR | ATTR_TAGGED_CACHED_PRRR;
778 cfg->nmrr = ATTR_DEVICE_NMRR | ATTR_NORMAL_CACHED_NMRR |
779 ATTR_STRONGLY_O_NMRR | ATTR_TAGGED_CACHED_NMRR;
780
781 cfg->prrr |= PRRR_NS1 | PRRR_DS1;
782
783 /*
784 * Program Domain access control register with two domains:
785 * domain 0: teecore
786 * domain 1: TA
787 */
788 cfg->dacr = DACR_DOMAIN(0, DACR_DOMAIN_PERM_CLIENT) |
789 DACR_DOMAIN(1, DACR_DOMAIN_PERM_CLIENT);
790
791 /*
792 * Enable lookups using TTBR0 and TTBR1 with the split of addresses
793 * defined by TEE_MMU_TTBCR_N_VALUE.
794 */
795 cfg->ttbcr = TTBCR_N_VALUE;
796 }
797 DECLARE_KEEP_PAGER(core_init_mmu_regs);
798
core_mmu_get_fault_type(uint32_t fsr)799 enum core_mmu_fault core_mmu_get_fault_type(uint32_t fsr)
800 {
801 assert(!(fsr & FSR_LPAE));
802
803 switch (fsr & FSR_FS_MASK) {
804 case 0x1: /* DFSR[10,3:0] 0b00001 Alignment fault (DFSR only) */
805 return CORE_MMU_FAULT_ALIGNMENT;
806 case 0x2: /* DFSR[10,3:0] 0b00010 Debug event */
807 return CORE_MMU_FAULT_DEBUG_EVENT;
808 case 0x4: /* DFSR[10,3:0] b00100 Fault on instr cache maintenance */
809 case 0x5: /* DFSR[10,3:0] b00101 Translation fault first level */
810 case 0x7: /* DFSR[10,3:0] b00111 Translation fault second level */
811 return CORE_MMU_FAULT_TRANSLATION;
812 case 0xd: /* DFSR[10,3:0] b01101 Permission fault first level */
813 case 0xf: /* DFSR[10,3:0] b01111 Permission fault second level */
814 if (fsr & FSR_WNR)
815 return CORE_MMU_FAULT_WRITE_PERMISSION;
816 else
817 return CORE_MMU_FAULT_READ_PERMISSION;
818 case 0x3: /* DFSR[10,3:0] b00011 access bit fault on section */
819 case 0x6: /* DFSR[10,3:0] b00110 access bit fault on page */
820 return CORE_MMU_FAULT_ACCESS_BIT;
821 case (1 << 10) | 0x6:
822 /* DFSR[10,3:0] 0b10110 Async external abort (DFSR only) */
823 return CORE_MMU_FAULT_ASYNC_EXTERNAL;
824
825 default:
826 return CORE_MMU_FAULT_OTHER;
827 }
828 }
829