1 /*
2 * Copyright (c) 2017-2021, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <assert.h>
8 #include <errno.h>
9 #include <inttypes.h>
10 #include <stdbool.h>
11 #include <stdint.h>
12 #include <stdio.h>
13
14 #include <platform_def.h>
15
16 #include <arch_helpers.h>
17 #include <common/debug.h>
18 #include <lib/utils_def.h>
19 #include <lib/xlat_tables/xlat_tables_defs.h>
20 #include <lib/xlat_tables/xlat_tables_v2.h>
21
22 #include "xlat_tables_private.h"
23
24 #if LOG_LEVEL < LOG_LEVEL_VERBOSE
25
xlat_mmap_print(__unused const mmap_region_t * mmap)26 void xlat_mmap_print(__unused const mmap_region_t *mmap)
27 {
28 /* Empty */
29 }
30
xlat_tables_print(__unused xlat_ctx_t * ctx)31 void xlat_tables_print(__unused xlat_ctx_t *ctx)
32 {
33 /* Empty */
34 }
35
36 #else /* if LOG_LEVEL >= LOG_LEVEL_VERBOSE */
37
xlat_mmap_print(const mmap_region_t * mmap)38 void xlat_mmap_print(const mmap_region_t *mmap)
39 {
40 printf("mmap:\n");
41 const mmap_region_t *mm = mmap;
42
43 while (mm->size != 0U) {
44 printf(" VA:0x%lx PA:0x%llx size:0x%zx attr:0x%x granularity:0x%zx\n",
45 mm->base_va, mm->base_pa, mm->size, mm->attr,
46 mm->granularity);
47 ++mm;
48 };
49 printf("\n");
50 }
51
52 /* Print the attributes of the specified block descriptor. */
xlat_desc_print(const xlat_ctx_t * ctx,uint64_t desc)53 static void xlat_desc_print(const xlat_ctx_t *ctx, uint64_t desc)
54 {
55 uint64_t mem_type_index = ATTR_INDEX_GET(desc);
56 int xlat_regime = ctx->xlat_regime;
57
58 if (mem_type_index == ATTR_IWBWA_OWBWA_NTR_INDEX) {
59 printf("MEM");
60 } else if (mem_type_index == ATTR_NON_CACHEABLE_INDEX) {
61 printf("NC");
62 } else {
63 assert(mem_type_index == ATTR_DEVICE_INDEX);
64 printf("DEV");
65 }
66
67 if ((xlat_regime == EL3_REGIME) || (xlat_regime == EL2_REGIME)) {
68 /* For EL3 and EL2 only check the AP[2] and XN bits. */
69 printf(((desc & LOWER_ATTRS(AP_RO)) != 0ULL) ? "-RO" : "-RW");
70 printf(((desc & UPPER_ATTRS(XN)) != 0ULL) ? "-XN" : "-EXEC");
71 } else {
72 assert(xlat_regime == EL1_EL0_REGIME);
73 /*
74 * For EL0 and EL1:
75 * - In AArch64 PXN and UXN can be set independently but in
76 * AArch32 there is no UXN (XN affects both privilege levels).
77 * For consistency, we set them simultaneously in both cases.
78 * - RO and RW permissions must be the same in EL1 and EL0. If
79 * EL0 can access that memory region, so can EL1, with the
80 * same permissions.
81 */
82 #if ENABLE_ASSERTIONS
83 uint64_t xn_mask = xlat_arch_regime_get_xn_desc(EL1_EL0_REGIME);
84 uint64_t xn_perm = desc & xn_mask;
85
86 assert((xn_perm == xn_mask) || (xn_perm == 0ULL));
87 #endif
88 printf(((desc & LOWER_ATTRS(AP_RO)) != 0ULL) ? "-RO" : "-RW");
89 /* Only check one of PXN and UXN, the other one is the same. */
90 printf(((desc & UPPER_ATTRS(PXN)) != 0ULL) ? "-XN" : "-EXEC");
91 /*
92 * Privileged regions can only be accessed from EL1, user
93 * regions can be accessed from EL1 and EL0.
94 */
95 printf(((desc & LOWER_ATTRS(AP_ACCESS_UNPRIVILEGED)) != 0ULL)
96 ? "-USER" : "-PRIV");
97 }
98
99 #if ENABLE_RME
100 switch (desc & LOWER_ATTRS(EL3_S1_NSE | NS)) {
101 case 0ULL:
102 printf("-S");
103 break;
104 case LOWER_ATTRS(NS):
105 printf("-NS");
106 break;
107 case LOWER_ATTRS(EL3_S1_NSE):
108 printf("-RT");
109 break;
110 default: /* LOWER_ATTRS(EL3_S1_NSE | NS) */
111 printf("-RL");
112 }
113 #else
114 printf(((LOWER_ATTRS(NS) & desc) != 0ULL) ? "-NS" : "-S");
115 #endif
116
117 #ifdef __aarch64__
118 /* Check Guarded Page bit */
119 if ((desc & GP) != 0ULL) {
120 printf("-GP");
121 }
122 #endif
123 }
124
125 static const char * const level_spacers[] = {
126 "[LV0] ",
127 " [LV1] ",
128 " [LV2] ",
129 " [LV3] "
130 };
131
132 static const char *invalid_descriptors_ommited =
133 "%s(%d invalid descriptors omitted)\n";
134
135 /*
136 * Recursive function that reads the translation tables passed as an argument
137 * and prints their status.
138 */
xlat_tables_print_internal(xlat_ctx_t * ctx,uintptr_t table_base_va,const uint64_t * table_base,unsigned int table_entries,unsigned int level)139 static void xlat_tables_print_internal(xlat_ctx_t *ctx, uintptr_t table_base_va,
140 const uint64_t *table_base, unsigned int table_entries,
141 unsigned int level)
142 {
143 assert(level <= XLAT_TABLE_LEVEL_MAX);
144
145 uint64_t desc;
146 uintptr_t table_idx_va = table_base_va;
147 unsigned int table_idx = 0U;
148 size_t level_size = XLAT_BLOCK_SIZE(level);
149
150 /*
151 * Keep track of how many invalid descriptors are counted in a row.
152 * Whenever multiple invalid descriptors are found, only the first one
153 * is printed, and a line is added to inform about how many descriptors
154 * have been omitted.
155 */
156 int invalid_row_count = 0;
157
158 while (table_idx < table_entries) {
159
160 desc = table_base[table_idx];
161
162 if ((desc & DESC_MASK) == INVALID_DESC) {
163
164 if (invalid_row_count == 0) {
165 printf("%sVA:0x%lx size:0x%zx\n",
166 level_spacers[level],
167 table_idx_va, level_size);
168 }
169 invalid_row_count++;
170
171 } else {
172
173 if (invalid_row_count > 1) {
174 printf(invalid_descriptors_ommited,
175 level_spacers[level],
176 invalid_row_count - 1);
177 }
178 invalid_row_count = 0;
179
180 /*
181 * Check if this is a table or a block. Tables are only
182 * allowed in levels other than 3, but DESC_PAGE has the
183 * same value as DESC_TABLE, so we need to check.
184 */
185 if (((desc & DESC_MASK) == TABLE_DESC) &&
186 (level < XLAT_TABLE_LEVEL_MAX)) {
187 /*
188 * Do not print any PA for a table descriptor,
189 * as it doesn't directly map physical memory
190 * but instead points to the next translation
191 * table in the translation table walk.
192 */
193 printf("%sVA:0x%lx size:0x%zx\n",
194 level_spacers[level],
195 table_idx_va, level_size);
196
197 uintptr_t addr_inner = desc & TABLE_ADDR_MASK;
198
199 xlat_tables_print_internal(ctx, table_idx_va,
200 (uint64_t *)addr_inner,
201 XLAT_TABLE_ENTRIES, level + 1U);
202 } else {
203 printf("%sVA:0x%lx PA:0x%" PRIx64 " size:0x%zx ",
204 level_spacers[level], table_idx_va,
205 (uint64_t)(desc & TABLE_ADDR_MASK),
206 level_size);
207 xlat_desc_print(ctx, desc);
208 printf("\n");
209 }
210 }
211
212 table_idx++;
213 table_idx_va += level_size;
214 }
215
216 if (invalid_row_count > 1) {
217 printf(invalid_descriptors_ommited,
218 level_spacers[level], invalid_row_count - 1);
219 }
220 }
221
xlat_tables_print(xlat_ctx_t * ctx)222 void xlat_tables_print(xlat_ctx_t *ctx)
223 {
224 const char *xlat_regime_str;
225 int used_page_tables;
226
227 if (ctx->xlat_regime == EL1_EL0_REGIME) {
228 xlat_regime_str = "1&0";
229 } else if (ctx->xlat_regime == EL2_REGIME) {
230 xlat_regime_str = "2";
231 } else {
232 assert(ctx->xlat_regime == EL3_REGIME);
233 xlat_regime_str = "3";
234 }
235 VERBOSE("Translation tables state:\n");
236 VERBOSE(" Xlat regime: EL%s\n", xlat_regime_str);
237 VERBOSE(" Max allowed PA: 0x%llx\n", ctx->pa_max_address);
238 VERBOSE(" Max allowed VA: 0x%lx\n", ctx->va_max_address);
239 VERBOSE(" Max mapped PA: 0x%llx\n", ctx->max_pa);
240 VERBOSE(" Max mapped VA: 0x%lx\n", ctx->max_va);
241
242 VERBOSE(" Initial lookup level: %u\n", ctx->base_level);
243 VERBOSE(" Entries @initial lookup level: %u\n",
244 ctx->base_table_entries);
245
246 #if PLAT_XLAT_TABLES_DYNAMIC
247 used_page_tables = 0;
248 for (int i = 0; i < ctx->tables_num; ++i) {
249 if (ctx->tables_mapped_regions[i] != 0)
250 ++used_page_tables;
251 }
252 #else
253 used_page_tables = ctx->next_table;
254 #endif
255 VERBOSE(" Used %d sub-tables out of %d (spare: %d)\n",
256 used_page_tables, ctx->tables_num,
257 ctx->tables_num - used_page_tables);
258
259 xlat_tables_print_internal(ctx, 0U, ctx->base_table,
260 ctx->base_table_entries, ctx->base_level);
261 }
262
263 #endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
264
265 /*
266 * Do a translation table walk to find the block or page descriptor that maps
267 * virtual_addr.
268 *
269 * On success, return the address of the descriptor within the translation
270 * table. Its lookup level is stored in '*out_level'.
271 * On error, return NULL.
272 *
273 * xlat_table_base
274 * Base address for the initial lookup level.
275 * xlat_table_base_entries
276 * Number of entries in the translation table for the initial lookup level.
277 * virt_addr_space_size
278 * Size in bytes of the virtual address space.
279 */
find_xlat_table_entry(uintptr_t virtual_addr,void * xlat_table_base,unsigned int xlat_table_base_entries,unsigned long long virt_addr_space_size,unsigned int * out_level)280 static uint64_t *find_xlat_table_entry(uintptr_t virtual_addr,
281 void *xlat_table_base,
282 unsigned int xlat_table_base_entries,
283 unsigned long long virt_addr_space_size,
284 unsigned int *out_level)
285 {
286 unsigned int start_level;
287 uint64_t *table;
288 unsigned int entries;
289
290 start_level = GET_XLAT_TABLE_LEVEL_BASE(virt_addr_space_size);
291
292 table = xlat_table_base;
293 entries = xlat_table_base_entries;
294
295 for (unsigned int level = start_level;
296 level <= XLAT_TABLE_LEVEL_MAX;
297 ++level) {
298 uint64_t idx, desc, desc_type;
299
300 idx = XLAT_TABLE_IDX(virtual_addr, level);
301 if (idx >= entries) {
302 WARN("Missing xlat table entry at address 0x%lx\n",
303 virtual_addr);
304 return NULL;
305 }
306
307 desc = table[idx];
308 desc_type = desc & DESC_MASK;
309
310 if (desc_type == INVALID_DESC) {
311 VERBOSE("Invalid entry (memory not mapped)\n");
312 return NULL;
313 }
314
315 if (level == XLAT_TABLE_LEVEL_MAX) {
316 /*
317 * Only page descriptors allowed at the final lookup
318 * level.
319 */
320 assert(desc_type == PAGE_DESC);
321 *out_level = level;
322 return &table[idx];
323 }
324
325 if (desc_type == BLOCK_DESC) {
326 *out_level = level;
327 return &table[idx];
328 }
329
330 assert(desc_type == TABLE_DESC);
331 table = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
332 entries = XLAT_TABLE_ENTRIES;
333 }
334
335 /*
336 * This shouldn't be reached, the translation table walk should end at
337 * most at level XLAT_TABLE_LEVEL_MAX and return from inside the loop.
338 */
339 assert(false);
340
341 return NULL;
342 }
343
344
xlat_get_mem_attributes_internal(const xlat_ctx_t * ctx,uintptr_t base_va,uint32_t * attributes,uint64_t ** table_entry,unsigned long long * addr_pa,unsigned int * table_level)345 static int xlat_get_mem_attributes_internal(const xlat_ctx_t *ctx,
346 uintptr_t base_va, uint32_t *attributes, uint64_t **table_entry,
347 unsigned long long *addr_pa, unsigned int *table_level)
348 {
349 uint64_t *entry;
350 uint64_t desc;
351 unsigned int level;
352 unsigned long long virt_addr_space_size;
353
354 /*
355 * Sanity-check arguments.
356 */
357 assert(ctx != NULL);
358 assert(ctx->initialized);
359 assert((ctx->xlat_regime == EL1_EL0_REGIME) ||
360 (ctx->xlat_regime == EL2_REGIME) ||
361 (ctx->xlat_regime == EL3_REGIME));
362
363 virt_addr_space_size = (unsigned long long)ctx->va_max_address + 1ULL;
364 assert(virt_addr_space_size > 0U);
365
366 entry = find_xlat_table_entry(base_va,
367 ctx->base_table,
368 ctx->base_table_entries,
369 virt_addr_space_size,
370 &level);
371 if (entry == NULL) {
372 WARN("Address 0x%lx is not mapped.\n", base_va);
373 return -EINVAL;
374 }
375
376 if (addr_pa != NULL) {
377 *addr_pa = *entry & TABLE_ADDR_MASK;
378 }
379
380 if (table_entry != NULL) {
381 *table_entry = entry;
382 }
383
384 if (table_level != NULL) {
385 *table_level = level;
386 }
387
388 desc = *entry;
389
390 #if LOG_LEVEL >= LOG_LEVEL_VERBOSE
391 VERBOSE("Attributes: ");
392 xlat_desc_print(ctx, desc);
393 printf("\n");
394 #endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
395
396 assert(attributes != NULL);
397 *attributes = 0U;
398
399 uint64_t attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK;
400
401 if (attr_index == ATTR_IWBWA_OWBWA_NTR_INDEX) {
402 *attributes |= MT_MEMORY;
403 } else if (attr_index == ATTR_NON_CACHEABLE_INDEX) {
404 *attributes |= MT_NON_CACHEABLE;
405 } else {
406 assert(attr_index == ATTR_DEVICE_INDEX);
407 *attributes |= MT_DEVICE;
408 }
409
410 uint64_t ap2_bit = (desc >> AP2_SHIFT) & 1U;
411
412 if (ap2_bit == AP2_RW)
413 *attributes |= MT_RW;
414
415 if (ctx->xlat_regime == EL1_EL0_REGIME) {
416 uint64_t ap1_bit = (desc >> AP1_SHIFT) & 1U;
417
418 if (ap1_bit == AP1_ACCESS_UNPRIVILEGED)
419 *attributes |= MT_USER;
420 }
421
422 uint64_t ns_bit = (desc >> NS_SHIFT) & 1U;
423
424 if (ns_bit == 1U)
425 *attributes |= MT_NS;
426
427 uint64_t xn_mask = xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
428
429 if ((desc & xn_mask) == xn_mask) {
430 *attributes |= MT_EXECUTE_NEVER;
431 } else {
432 assert((desc & xn_mask) == 0U);
433 }
434
435 return 0;
436 }
437
438
xlat_get_mem_attributes_ctx(const xlat_ctx_t * ctx,uintptr_t base_va,uint32_t * attr)439 int xlat_get_mem_attributes_ctx(const xlat_ctx_t *ctx, uintptr_t base_va,
440 uint32_t *attr)
441 {
442 return xlat_get_mem_attributes_internal(ctx, base_va, attr,
443 NULL, NULL, NULL);
444 }
445
446
xlat_change_mem_attributes_ctx(const xlat_ctx_t * ctx,uintptr_t base_va,size_t size,uint32_t attr)447 int xlat_change_mem_attributes_ctx(const xlat_ctx_t *ctx, uintptr_t base_va,
448 size_t size, uint32_t attr)
449 {
450 /* Note: This implementation isn't optimized. */
451
452 assert(ctx != NULL);
453 assert(ctx->initialized);
454
455 unsigned long long virt_addr_space_size =
456 (unsigned long long)ctx->va_max_address + 1U;
457 assert(virt_addr_space_size > 0U);
458
459 if (!IS_PAGE_ALIGNED(base_va)) {
460 WARN("%s: Address 0x%lx is not aligned on a page boundary.\n",
461 __func__, base_va);
462 return -EINVAL;
463 }
464
465 if (size == 0U) {
466 WARN("%s: Size is 0.\n", __func__);
467 return -EINVAL;
468 }
469
470 if ((size % PAGE_SIZE) != 0U) {
471 WARN("%s: Size 0x%zx is not a multiple of a page size.\n",
472 __func__, size);
473 return -EINVAL;
474 }
475
476 if (((attr & MT_EXECUTE_NEVER) == 0U) && ((attr & MT_RW) != 0U)) {
477 WARN("%s: Mapping memory as read-write and executable not allowed.\n",
478 __func__);
479 return -EINVAL;
480 }
481
482 size_t pages_count = size / PAGE_SIZE;
483
484 VERBOSE("Changing memory attributes of %zu pages starting from address 0x%lx...\n",
485 pages_count, base_va);
486
487 uintptr_t base_va_original = base_va;
488
489 /*
490 * Sanity checks.
491 */
492 for (unsigned int i = 0U; i < pages_count; ++i) {
493 const uint64_t *entry;
494 uint64_t desc, attr_index;
495 unsigned int level;
496
497 entry = find_xlat_table_entry(base_va,
498 ctx->base_table,
499 ctx->base_table_entries,
500 virt_addr_space_size,
501 &level);
502 if (entry == NULL) {
503 WARN("Address 0x%lx is not mapped.\n", base_va);
504 return -EINVAL;
505 }
506
507 desc = *entry;
508
509 /*
510 * Check that all the required pages are mapped at page
511 * granularity.
512 */
513 if (((desc & DESC_MASK) != PAGE_DESC) ||
514 (level != XLAT_TABLE_LEVEL_MAX)) {
515 WARN("Address 0x%lx is not mapped at the right granularity.\n",
516 base_va);
517 WARN("Granularity is 0x%lx, should be 0x%lx.\n",
518 XLAT_BLOCK_SIZE(level), PAGE_SIZE);
519 return -EINVAL;
520 }
521
522 /*
523 * If the region type is device, it shouldn't be executable.
524 */
525 attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK;
526 if (attr_index == ATTR_DEVICE_INDEX) {
527 if ((attr & MT_EXECUTE_NEVER) == 0U) {
528 WARN("Setting device memory as executable at address 0x%lx.",
529 base_va);
530 return -EINVAL;
531 }
532 }
533
534 base_va += PAGE_SIZE;
535 }
536
537 /* Restore original value. */
538 base_va = base_va_original;
539
540 for (unsigned int i = 0U; i < pages_count; ++i) {
541
542 uint32_t old_attr = 0U, new_attr;
543 uint64_t *entry = NULL;
544 unsigned int level = 0U;
545 unsigned long long addr_pa = 0ULL;
546
547 (void) xlat_get_mem_attributes_internal(ctx, base_va, &old_attr,
548 &entry, &addr_pa, &level);
549
550 /*
551 * From attr, only MT_RO/MT_RW, MT_EXECUTE/MT_EXECUTE_NEVER and
552 * MT_USER/MT_PRIVILEGED are taken into account. Any other
553 * information is ignored.
554 */
555
556 /* Clean the old attributes so that they can be rebuilt. */
557 new_attr = old_attr & ~(MT_RW | MT_EXECUTE_NEVER | MT_USER);
558
559 /*
560 * Update attributes, but filter out the ones this function
561 * isn't allowed to change.
562 */
563 new_attr |= attr & (MT_RW | MT_EXECUTE_NEVER | MT_USER);
564
565 /*
566 * The break-before-make sequence requires writing an invalid
567 * descriptor and making sure that the system sees the change
568 * before writing the new descriptor.
569 */
570 *entry = INVALID_DESC;
571 #if !HW_ASSISTED_COHERENCY
572 dccvac((uintptr_t)entry);
573 #endif
574 /* Invalidate any cached copy of this mapping in the TLBs. */
575 xlat_arch_tlbi_va(base_va, ctx->xlat_regime);
576
577 /* Ensure completion of the invalidation. */
578 xlat_arch_tlbi_va_sync();
579
580 /* Write new descriptor */
581 *entry = xlat_desc(ctx, new_attr, addr_pa, level);
582 #if !HW_ASSISTED_COHERENCY
583 dccvac((uintptr_t)entry);
584 #endif
585 base_va += PAGE_SIZE;
586 }
587
588 /* Ensure that the last descriptor writen is seen by the system. */
589 dsbish();
590
591 return 0;
592 }
593