1 /*
2 * Copyright (c) 2022 Intel Corporation
3 * SPDX-License-Identifier: Apache-2.0
4 */
5 #include <zephyr/kernel.h>
6 #include <zephyr/cache.h>
7 #include <zephyr/arch/xtensa/arch.h>
8 #include <zephyr/arch/xtensa/xtensa_mmu.h>
9 #include <zephyr/linker/linker-defs.h>
10 #include <zephyr/logging/log.h>
11 #include <zephyr/kernel/mm.h>
12 #include <zephyr/toolchain.h>
13 #include <xtensa/corebits.h>
14 #include <xtensa_mmu_priv.h>
15
16 #include <kernel_arch_func.h>
17 #include <mmu.h>
18
19 /* Skip TLB IPI when updating page tables.
20 * This allows us to send IPI only after the last
21 * changes of a series.
22 */
23 #define OPTION_NO_TLB_IPI BIT(0)
24
25 /* Level 1 contains page table entries
26 * necessary to map the page table itself.
27 */
28 #define XTENSA_L1_PAGE_TABLE_ENTRIES 1024U
29
30 /* Size of level 1 page table.
31 */
32 #define XTENSA_L1_PAGE_TABLE_SIZE (XTENSA_L1_PAGE_TABLE_ENTRIES * sizeof(uint32_t))
33
34 /* Level 2 contains page table entries
35 * necessary to map the page table itself.
36 */
37 #define XTENSA_L2_PAGE_TABLE_ENTRIES 1024U
38
39 /* Size of level 2 page table.
40 */
41 #define XTENSA_L2_PAGE_TABLE_SIZE (XTENSA_L2_PAGE_TABLE_ENTRIES * sizeof(uint32_t))
42
43 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
44
45 BUILD_ASSERT(CONFIG_MMU_PAGE_SIZE == 0x1000,
46 "MMU_PAGE_SIZE value is invalid, only 4 kB pages are supported\n");
47
48 /*
49 * Level 1 page table has to be 4Kb to fit into one of the wired entries.
50 * All entries are initialized as INVALID, so an attempt to read an unmapped
51 * area will cause a double exception.
52 *
53 * Each memory domain contains its own l1 page table. The kernel l1 page table is
54 * located at the index 0.
55 */
56 static uint32_t l1_page_table[CONFIG_XTENSA_MMU_NUM_L1_TABLES][XTENSA_L1_PAGE_TABLE_ENTRIES]
57 __aligned(KB(4));
58
59
60 /*
61 * That is an alias for the page tables set used by the kernel.
62 */
63 uint32_t *xtensa_kernel_ptables = (uint32_t *)l1_page_table[0];
64
65 /*
66 * Each table in the level 2 maps a 4Mb memory range. It consists of 1024 entries each one
67 * covering a 4Kb page.
68 */
69 static uint32_t l2_page_tables[CONFIG_XTENSA_MMU_NUM_L2_TABLES][XTENSA_L2_PAGE_TABLE_ENTRIES]
70 __aligned(KB(4));
71
72 /*
73 * This additional variable tracks which l1 tables are in use. This is kept separated from
74 * the tables to keep alignment easier.
75 *
76 * @note: The first bit is set because it is used for the kernel page tables.
77 */
78 static ATOMIC_DEFINE(l1_page_table_track, CONFIG_XTENSA_MMU_NUM_L1_TABLES);
79
80 /*
81 * This additional variable tracks which l2 tables are in use. This is kept separated from
82 * the tables to keep alignment easier.
83 */
84 static ATOMIC_DEFINE(l2_page_tables_track, CONFIG_XTENSA_MMU_NUM_L2_TABLES);
85
86 /*
87 * Protects xtensa_domain_list and serializes access to page tables.
88 */
89 static struct k_spinlock xtensa_mmu_lock;
90
91 #ifdef CONFIG_USERSPACE
92
93 /*
94 * Each domain has its own ASID. ASID can go through 1 (kernel) to 255.
95 * When a TLB entry matches, the hw will check the ASID in the entry and finds
96 * the correspondent position in the RASID register. This position will then be
97 * compared with the current ring (CRING) to check the permission.
98 */
99 static uint8_t asid_count = 3;
100
101 /*
102 * List with all active and initialized memory domains.
103 */
104 static sys_slist_t xtensa_domain_list;
105 #endif /* CONFIG_USERSPACE */
106
107 extern char _heap_end[];
108 extern char _heap_start[];
109 /*
110 * Static definition of all code & data memory regions of the
111 * current Zephyr image. This information must be available &
112 * processed upon MMU initialization.
113 */
114
115 static const struct xtensa_mmu_range mmu_zephyr_ranges[] = {
116 /*
117 * Mark the zephyr execution regions (data, bss, noinit, etc.)
118 * cacheable, read / write and non-executable
119 */
120 {
121 /* This includes .data, .bss and various kobject sections. */
122 .start = (uint32_t)_image_ram_start,
123 .end = (uint32_t)_image_ram_end,
124 #ifdef CONFIG_XTENSA_RPO_CACHE
125 .attrs = XTENSA_MMU_PERM_W,
126 #else
127 .attrs = XTENSA_MMU_PERM_W | XTENSA_MMU_CACHED_WB,
128 #endif
129 .name = "data",
130 },
131 #if K_HEAP_MEM_POOL_SIZE > 0
132 /* System heap memory */
133 {
134 .start = (uint32_t)_heap_start,
135 .end = (uint32_t)_heap_end,
136 #ifdef CONFIG_XTENSA_RPO_CACHE
137 .attrs = XTENSA_MMU_PERM_W,
138 #else
139 .attrs = XTENSA_MMU_PERM_W | XTENSA_MMU_CACHED_WB,
140 #endif
141 .name = "heap",
142 },
143 #endif
144 /* Mark text segment cacheable, read only and executable */
145 {
146 .start = (uint32_t)__text_region_start,
147 .end = (uint32_t)__text_region_end,
148 .attrs = XTENSA_MMU_PERM_X | XTENSA_MMU_CACHED_WB | XTENSA_MMU_MAP_SHARED,
149 .name = "text",
150 },
151 /* Mark rodata segment cacheable, read only and non-executable */
152 {
153 .start = (uint32_t)__rodata_region_start,
154 .end = (uint32_t)__rodata_region_end,
155 .attrs = XTENSA_MMU_CACHED_WB | XTENSA_MMU_MAP_SHARED,
156 .name = "rodata",
157 },
158 };
159
160 /**
161 * @brief Check if the page table entry is illegal.
162 *
163 * @param[in] Page table entry.
164 */
is_pte_illegal(uint32_t pte)165 static inline bool is_pte_illegal(uint32_t pte)
166 {
167 uint32_t attr = pte & XTENSA_MMU_PTE_ATTR_MASK;
168
169 /*
170 * The ISA manual states only 12 and 14 are illegal values.
171 * 13 and 15 are not. So we need to be specific than simply
172 * testing if bits 2 and 3 are set.
173 */
174 return (attr == 12) || (attr == 14);
175 }
176
177 /*
178 * @brief Initialize all page table entries to be illegal.
179 *
180 * @param[in] Pointer to page table.
181 * @param[in] Number of page table entries in the page table.
182 */
init_page_table(uint32_t * ptable,size_t num_entries)183 static void init_page_table(uint32_t *ptable, size_t num_entries)
184 {
185 int i;
186
187 for (i = 0; i < num_entries; i++) {
188 ptable[i] = XTENSA_MMU_PTE_ILLEGAL;
189 }
190 }
191
alloc_l2_table(void)192 static inline uint32_t *alloc_l2_table(void)
193 {
194 uint16_t idx;
195
196 for (idx = 0; idx < CONFIG_XTENSA_MMU_NUM_L2_TABLES; idx++) {
197 if (!atomic_test_and_set_bit(l2_page_tables_track, idx)) {
198 return (uint32_t *)&l2_page_tables[idx];
199 }
200 }
201
202 return NULL;
203 }
204
map_memory_range(const uint32_t start,const uint32_t end,const uint32_t attrs)205 static void map_memory_range(const uint32_t start, const uint32_t end,
206 const uint32_t attrs)
207 {
208 uint32_t page, *table;
209 bool shared = !!(attrs & XTENSA_MMU_MAP_SHARED);
210 uint32_t sw_attrs = (attrs & XTENSA_MMU_PTE_ATTR_ORIGINAL) == XTENSA_MMU_PTE_ATTR_ORIGINAL ?
211 attrs : 0;
212
213 for (page = start; page < end; page += CONFIG_MMU_PAGE_SIZE) {
214 uint32_t pte = XTENSA_MMU_PTE(page,
215 shared ? XTENSA_MMU_SHARED_RING :
216 XTENSA_MMU_KERNEL_RING,
217 sw_attrs, attrs);
218 uint32_t l2_pos = XTENSA_MMU_L2_POS(page);
219 uint32_t l1_pos = XTENSA_MMU_L1_POS(page);
220
221 if (is_pte_illegal(xtensa_kernel_ptables[l1_pos])) {
222 table = alloc_l2_table();
223
224 __ASSERT(table != NULL, "There is no l2 page table available to "
225 "map 0x%08x\n", page);
226
227 init_page_table(table, XTENSA_L2_PAGE_TABLE_ENTRIES);
228
229 xtensa_kernel_ptables[l1_pos] =
230 XTENSA_MMU_PTE((uint32_t)table, XTENSA_MMU_KERNEL_RING,
231 sw_attrs, XTENSA_MMU_PAGE_TABLE_ATTR);
232 }
233
234 table = (uint32_t *)(xtensa_kernel_ptables[l1_pos] & XTENSA_MMU_PTE_PPN_MASK);
235 table[l2_pos] = pte;
236 }
237 }
238
map_memory(const uint32_t start,const uint32_t end,const uint32_t attrs)239 static void map_memory(const uint32_t start, const uint32_t end,
240 const uint32_t attrs)
241 {
242 #ifdef CONFIG_XTENSA_MMU_DOUBLE_MAP
243 uint32_t uc_attrs = attrs & ~XTENSA_MMU_PTE_ATTR_CACHED_MASK;
244 uint32_t c_attrs = attrs | XTENSA_MMU_CACHED_WB;
245
246 if (sys_cache_is_ptr_uncached((void *)start)) {
247 map_memory_range(start, end, uc_attrs);
248
249 map_memory_range(POINTER_TO_UINT(sys_cache_cached_ptr_get((void *)start)),
250 POINTER_TO_UINT(sys_cache_cached_ptr_get((void *)end)), c_attrs);
251 } else if (sys_cache_is_ptr_cached((void *)start)) {
252 map_memory_range(start, end, c_attrs);
253
254 map_memory_range(POINTER_TO_UINT(sys_cache_uncached_ptr_get((void *)start)),
255 POINTER_TO_UINT(sys_cache_uncached_ptr_get((void *)end)), uc_attrs);
256 } else
257 #endif
258 {
259 map_memory_range(start, end, attrs);
260 }
261 }
262
xtensa_init_page_tables(void)263 static void xtensa_init_page_tables(void)
264 {
265 volatile uint8_t entry;
266 static bool already_inited;
267
268 if (already_inited) {
269 return;
270 }
271 already_inited = true;
272
273 init_page_table(xtensa_kernel_ptables, XTENSA_L1_PAGE_TABLE_ENTRIES);
274 atomic_set_bit(l1_page_table_track, 0);
275
276 for (entry = 0; entry < ARRAY_SIZE(mmu_zephyr_ranges); entry++) {
277 const struct xtensa_mmu_range *range = &mmu_zephyr_ranges[entry];
278
279 map_memory(range->start, range->end, range->attrs | XTENSA_MMU_PTE_ATTR_ORIGINAL);
280 }
281
282 for (entry = 0; entry < xtensa_soc_mmu_ranges_num; entry++) {
283 const struct xtensa_mmu_range *range = &xtensa_soc_mmu_ranges[entry];
284
285 map_memory(range->start, range->end, range->attrs | XTENSA_MMU_PTE_ATTR_ORIGINAL);
286 }
287
288 /* Finally, the direct-mapped pages used in the page tables
289 * must be fixed up to use the same cache attribute (but these
290 * must be writable, obviously). They shouldn't be left at
291 * the default.
292 */
293 map_memory_range((uint32_t) &l1_page_table[0],
294 (uint32_t) &l1_page_table[CONFIG_XTENSA_MMU_NUM_L1_TABLES],
295 XTENSA_MMU_PAGE_TABLE_ATTR | XTENSA_MMU_PERM_W);
296 map_memory_range((uint32_t) &l2_page_tables[0],
297 (uint32_t) &l2_page_tables[CONFIG_XTENSA_MMU_NUM_L2_TABLES],
298 XTENSA_MMU_PAGE_TABLE_ATTR | XTENSA_MMU_PERM_W);
299
300 sys_cache_data_flush_all();
301 }
302
arch_xtensa_mmu_post_init(bool is_core0)303 __weak void arch_xtensa_mmu_post_init(bool is_core0)
304 {
305 ARG_UNUSED(is_core0);
306 }
307
xtensa_mmu_init(void)308 void xtensa_mmu_init(void)
309 {
310 xtensa_init_page_tables();
311
312 xtensa_mmu_init_paging();
313
314 /*
315 * This is used to determine whether we are faulting inside double
316 * exception if this is not zero. Sometimes SoC starts with this not
317 * being set to zero. So clear it during boot.
318 */
319 XTENSA_WSR(ZSR_DEPC_SAVE_STR, 0);
320
321 arch_xtensa_mmu_post_init(_current_cpu->id == 0);
322 }
323
xtensa_mmu_reinit(void)324 void xtensa_mmu_reinit(void)
325 {
326 /* First initialize the hardware */
327 xtensa_mmu_init_paging();
328
329 #ifdef CONFIG_USERSPACE
330 struct k_thread *thread = _current_cpu->current;
331 struct arch_mem_domain *domain =
332 &(thread->mem_domain_info.mem_domain->arch);
333
334
335 /* Set the page table for current context */
336 xtensa_mmu_set_paging(domain);
337 #endif /* CONFIG_USERSPACE */
338
339 arch_xtensa_mmu_post_init(_current_cpu->id == 0);
340 }
341
342 #ifdef CONFIG_ARCH_HAS_RESERVED_PAGE_FRAMES
343 /* Zephyr's linker scripts for Xtensa usually puts
344 * something before z_mapped_start (aka .text),
345 * i.e. vecbase, so that we need to reserve those
346 * space or else k_mem_map() would be mapping those,
347 * resulting in faults.
348 */
arch_reserved_pages_update(void)349 __weak void arch_reserved_pages_update(void)
350 {
351 uintptr_t page;
352 int idx;
353
354 for (page = CONFIG_SRAM_BASE_ADDRESS, idx = 0;
355 page < (uintptr_t)z_mapped_start;
356 page += CONFIG_MMU_PAGE_SIZE, idx++) {
357 k_mem_page_frame_set(&k_mem_page_frames[idx], K_MEM_PAGE_FRAME_RESERVED);
358 }
359 }
360 #endif /* CONFIG_ARCH_HAS_RESERVED_PAGE_FRAMES */
361
l2_page_table_map(uint32_t * l1_table,void * vaddr,uintptr_t phys,uint32_t flags,bool is_user)362 static bool l2_page_table_map(uint32_t *l1_table, void *vaddr, uintptr_t phys,
363 uint32_t flags, bool is_user)
364 {
365 uint32_t l1_pos = XTENSA_MMU_L1_POS((uint32_t)vaddr);
366 uint32_t l2_pos = XTENSA_MMU_L2_POS((uint32_t)vaddr);
367 uint32_t *table;
368
369 sys_cache_data_invd_range((void *)&l1_table[l1_pos], sizeof(l1_table[0]));
370
371 if (is_pte_illegal(l1_table[l1_pos])) {
372 table = alloc_l2_table();
373
374 if (table == NULL) {
375 return false;
376 }
377
378 init_page_table(table, XTENSA_L2_PAGE_TABLE_ENTRIES);
379
380 l1_table[l1_pos] = XTENSA_MMU_PTE((uint32_t)table, XTENSA_MMU_KERNEL_RING,
381 0, XTENSA_MMU_PAGE_TABLE_ATTR);
382
383 sys_cache_data_flush_range((void *)&l1_table[l1_pos], sizeof(l1_table[0]));
384 }
385
386 table = (uint32_t *)(l1_table[l1_pos] & XTENSA_MMU_PTE_PPN_MASK);
387 table[l2_pos] = XTENSA_MMU_PTE(phys, is_user ? XTENSA_MMU_USER_RING :
388 XTENSA_MMU_KERNEL_RING,
389 0, flags);
390
391 sys_cache_data_flush_range((void *)&table[l2_pos], sizeof(table[0]));
392 xtensa_tlb_autorefill_invalidate();
393
394 return true;
395 }
396
__arch_mem_map(void * va,uintptr_t pa,uint32_t xtensa_flags,bool is_user)397 static inline void __arch_mem_map(void *va, uintptr_t pa, uint32_t xtensa_flags, bool is_user)
398 {
399 bool ret;
400 void *vaddr, *vaddr_uc;
401 uintptr_t paddr, paddr_uc;
402 uint32_t flags, flags_uc;
403
404 if (IS_ENABLED(CONFIG_XTENSA_MMU_DOUBLE_MAP)) {
405 if (sys_cache_is_ptr_cached(va)) {
406 vaddr = va;
407 vaddr_uc = sys_cache_uncached_ptr_get(va);
408 } else {
409 vaddr = sys_cache_cached_ptr_get(va);
410 vaddr_uc = va;
411 }
412
413 if (sys_cache_is_ptr_cached((void *)pa)) {
414 paddr = pa;
415 paddr_uc = (uintptr_t)sys_cache_uncached_ptr_get((void *)pa);
416 } else {
417 paddr = (uintptr_t)sys_cache_cached_ptr_get((void *)pa);
418 paddr_uc = pa;
419 }
420
421 flags_uc = (xtensa_flags & ~XTENSA_MMU_PTE_ATTR_CACHED_MASK);
422 flags = flags_uc | XTENSA_MMU_CACHED_WB;
423 } else {
424 vaddr = va;
425 paddr = pa;
426 flags = xtensa_flags;
427 }
428
429 ret = l2_page_table_map(xtensa_kernel_ptables, (void *)vaddr, paddr,
430 flags, is_user);
431 __ASSERT(ret, "Virtual address (%p) already mapped", va);
432
433 if (IS_ENABLED(CONFIG_XTENSA_MMU_DOUBLE_MAP) && ret) {
434 ret = l2_page_table_map(xtensa_kernel_ptables, (void *)vaddr_uc, paddr_uc,
435 flags_uc, is_user);
436 __ASSERT(ret, "Virtual address (%p) already mapped", vaddr_uc);
437 }
438
439 #ifndef CONFIG_USERSPACE
440 ARG_UNUSED(ret);
441 #else
442 if (ret) {
443 sys_snode_t *node;
444 struct arch_mem_domain *domain;
445 k_spinlock_key_t key;
446
447 key = k_spin_lock(&z_mem_domain_lock);
448 SYS_SLIST_FOR_EACH_NODE(&xtensa_domain_list, node) {
449 domain = CONTAINER_OF(node, struct arch_mem_domain, node);
450
451 ret = l2_page_table_map(domain->ptables, (void *)vaddr, paddr,
452 flags, is_user);
453 __ASSERT(ret, "Virtual address (%p) already mapped for domain %p",
454 vaddr, domain);
455
456 if (IS_ENABLED(CONFIG_XTENSA_MMU_DOUBLE_MAP) && ret) {
457 ret = l2_page_table_map(domain->ptables,
458 (void *)vaddr_uc, paddr_uc,
459 flags_uc, is_user);
460 __ASSERT(ret, "Virtual address (%p) already mapped for domain %p",
461 vaddr_uc, domain);
462 }
463 }
464 k_spin_unlock(&z_mem_domain_lock, key);
465 }
466 #endif /* CONFIG_USERSPACE */
467 }
468
arch_mem_map(void * virt,uintptr_t phys,size_t size,uint32_t flags)469 void arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags)
470 {
471 uint32_t va = (uint32_t)virt;
472 uint32_t pa = (uint32_t)phys;
473 uint32_t rem_size = (uint32_t)size;
474 uint32_t xtensa_flags = 0;
475 k_spinlock_key_t key;
476 bool is_user;
477
478 if (size == 0) {
479 LOG_ERR("Cannot map physical memory at 0x%08X: invalid "
480 "zero size", (uint32_t)phys);
481 k_panic();
482 }
483
484 switch (flags & K_MEM_CACHE_MASK) {
485
486 case K_MEM_CACHE_WB:
487 xtensa_flags |= XTENSA_MMU_CACHED_WB;
488 break;
489 case K_MEM_CACHE_WT:
490 xtensa_flags |= XTENSA_MMU_CACHED_WT;
491 break;
492 case K_MEM_CACHE_NONE:
493 __fallthrough;
494 default:
495 break;
496 }
497
498 if ((flags & K_MEM_PERM_RW) == K_MEM_PERM_RW) {
499 xtensa_flags |= XTENSA_MMU_PERM_W;
500 }
501 if ((flags & K_MEM_PERM_EXEC) == K_MEM_PERM_EXEC) {
502 xtensa_flags |= XTENSA_MMU_PERM_X;
503 }
504
505 is_user = (flags & K_MEM_PERM_USER) == K_MEM_PERM_USER;
506
507 key = k_spin_lock(&xtensa_mmu_lock);
508
509 while (rem_size > 0) {
510 __arch_mem_map((void *)va, pa, xtensa_flags, is_user);
511
512 rem_size -= (rem_size >= KB(4)) ? KB(4) : rem_size;
513 va += KB(4);
514 pa += KB(4);
515 }
516
517 #if CONFIG_MP_MAX_NUM_CPUS > 1
518 xtensa_mmu_tlb_ipi();
519 #endif
520
521 sys_cache_data_flush_and_invd_all();
522 k_spin_unlock(&xtensa_mmu_lock, key);
523 }
524
525 /**
526 * @return True if page is executable (thus need to invalidate ITLB),
527 * false if not.
528 */
l2_page_table_unmap(uint32_t * l1_table,void * vaddr)529 static bool l2_page_table_unmap(uint32_t *l1_table, void *vaddr)
530 {
531 uint32_t l1_pos = XTENSA_MMU_L1_POS((uint32_t)vaddr);
532 uint32_t l2_pos = XTENSA_MMU_L2_POS((uint32_t)vaddr);
533 uint32_t *l2_table;
534 uint32_t table_pos;
535 bool exec;
536
537 sys_cache_data_invd_range((void *)&l1_table[l1_pos], sizeof(l1_table[0]));
538
539 if (is_pte_illegal(l1_table[l1_pos])) {
540 /* We shouldn't be unmapping an illegal entry.
541 * Return true so that we can invalidate ITLB too.
542 */
543 return true;
544 }
545
546 exec = l1_table[l1_pos] & XTENSA_MMU_PERM_X;
547
548 l2_table = (uint32_t *)(l1_table[l1_pos] & XTENSA_MMU_PTE_PPN_MASK);
549
550 sys_cache_data_invd_range((void *)&l2_table[l2_pos], sizeof(l2_table[0]));
551
552 l2_table[l2_pos] = XTENSA_MMU_PTE_ILLEGAL;
553
554 sys_cache_data_flush_range((void *)&l2_table[l2_pos], sizeof(l2_table[0]));
555
556 for (l2_pos = 0; l2_pos < XTENSA_L2_PAGE_TABLE_ENTRIES; l2_pos++) {
557 if (!is_pte_illegal(l2_table[l2_pos])) {
558 goto end;
559 }
560 }
561
562 l1_table[l1_pos] = XTENSA_MMU_PTE_ILLEGAL;
563 sys_cache_data_flush_range((void *)&l1_table[l1_pos], sizeof(l1_table[0]));
564
565 table_pos = (l2_table - (uint32_t *)l2_page_tables) / (XTENSA_L2_PAGE_TABLE_ENTRIES);
566 atomic_clear_bit(l2_page_tables_track, table_pos);
567
568 end:
569 /* Need to invalidate L2 page table as it is no longer valid. */
570 xtensa_tlb_autorefill_invalidate();
571 return exec;
572 }
573
__arch_mem_unmap(void * va)574 static inline void __arch_mem_unmap(void *va)
575 {
576 bool is_exec;
577 void *vaddr, *vaddr_uc;
578
579 if (IS_ENABLED(CONFIG_XTENSA_MMU_DOUBLE_MAP)) {
580 if (sys_cache_is_ptr_cached(va)) {
581 vaddr = va;
582 vaddr_uc = sys_cache_uncached_ptr_get(va);
583 } else {
584 vaddr = sys_cache_cached_ptr_get(va);
585 vaddr_uc = va;
586 }
587 } else {
588 vaddr = va;
589 }
590
591 is_exec = l2_page_table_unmap(xtensa_kernel_ptables, (void *)vaddr);
592
593 if (IS_ENABLED(CONFIG_XTENSA_MMU_DOUBLE_MAP)) {
594 (void)l2_page_table_unmap(xtensa_kernel_ptables, (void *)vaddr_uc);
595 }
596
597 #ifdef CONFIG_USERSPACE
598 sys_snode_t *node;
599 struct arch_mem_domain *domain;
600 k_spinlock_key_t key;
601
602 key = k_spin_lock(&z_mem_domain_lock);
603 SYS_SLIST_FOR_EACH_NODE(&xtensa_domain_list, node) {
604 domain = CONTAINER_OF(node, struct arch_mem_domain, node);
605
606 (void)l2_page_table_unmap(domain->ptables, (void *)vaddr);
607
608 if (IS_ENABLED(CONFIG_XTENSA_MMU_DOUBLE_MAP)) {
609 (void)l2_page_table_unmap(domain->ptables, (void *)vaddr_uc);
610 }
611 }
612 k_spin_unlock(&z_mem_domain_lock, key);
613 #endif /* CONFIG_USERSPACE */
614 }
615
arch_mem_unmap(void * addr,size_t size)616 void arch_mem_unmap(void *addr, size_t size)
617 {
618 uint32_t va = (uint32_t)addr;
619 uint32_t rem_size = (uint32_t)size;
620 k_spinlock_key_t key;
621
622 if (addr == NULL) {
623 LOG_ERR("Cannot unmap NULL pointer");
624 return;
625 }
626
627 if (size == 0) {
628 LOG_ERR("Cannot unmap virtual memory with zero size");
629 return;
630 }
631
632 key = k_spin_lock(&xtensa_mmu_lock);
633
634 while (rem_size > 0) {
635 __arch_mem_unmap((void *)va);
636
637 rem_size -= (rem_size >= KB(4)) ? KB(4) : rem_size;
638 va += KB(4);
639 }
640
641 #if CONFIG_MP_MAX_NUM_CPUS > 1
642 xtensa_mmu_tlb_ipi();
643 #endif
644
645 sys_cache_data_flush_and_invd_all();
646 k_spin_unlock(&xtensa_mmu_lock, key);
647 }
648
649 /* This should be implemented in the SoC layer.
650 * This weak version is here to avoid build errors.
651 */
xtensa_mmu_tlb_ipi(void)652 void __weak xtensa_mmu_tlb_ipi(void)
653 {
654 }
655
xtensa_mmu_tlb_shootdown(void)656 void xtensa_mmu_tlb_shootdown(void)
657 {
658 unsigned int key;
659
660 /* Need to lock interrupts to prevent any context
661 * switching until all the page tables are updated.
662 * Or else we would be switching to another thread
663 * and running that with incorrect page tables
664 * which would result in permission issues.
665 */
666 key = arch_irq_lock();
667
668 K_SPINLOCK(&xtensa_mmu_lock) {
669 /* We don't have information on which page tables have changed,
670 * so we just invalidate the cache for all L1 page tables.
671 */
672 sys_cache_data_invd_range((void *)l1_page_table, sizeof(l1_page_table));
673 sys_cache_data_invd_range((void *)l2_page_tables, sizeof(l2_page_tables));
674 }
675
676 #ifdef CONFIG_USERSPACE
677 struct k_thread *thread = _current_cpu->current;
678
679 /* If current thread is a user thread, we need to see if it has
680 * been migrated to another memory domain as the L1 page table
681 * is different from the currently used one.
682 */
683 if ((thread->base.user_options & K_USER) == K_USER) {
684 uint32_t ptevaddr_entry, ptevaddr,
685 thread_ptables, current_ptables;
686
687 /* Need to read the currently used L1 page table.
688 * We know that L1 page table is always mapped at way
689 * MMU_PTE_WAY, so we can skip the probing step by
690 * generating the query entry directly.
691 */
692 ptevaddr = (uint32_t)xtensa_ptevaddr_get();
693 ptevaddr_entry = XTENSA_MMU_PTE_ENTRY_VADDR(ptevaddr, ptevaddr)
694 | XTENSA_MMU_PTE_WAY;
695 current_ptables = xtensa_dtlb_paddr_read(ptevaddr_entry);
696 thread_ptables = (uint32_t)thread->arch.ptables;
697
698 if (thread_ptables != current_ptables) {
699 /* Need to remap the thread page tables if the ones
700 * indicated by the current thread are different
701 * than the current mapped page table.
702 */
703 struct arch_mem_domain *domain =
704 &(thread->mem_domain_info.mem_domain->arch);
705 xtensa_mmu_set_paging(domain);
706 }
707
708 }
709 #endif /* CONFIG_USERSPACE */
710
711 /* L2 are done via autofill, so invalidate autofill TLBs
712 * would refresh the L2 page tables.
713 *
714 * L1 will be refreshed during context switch so no need
715 * to do anything here.
716 */
717 xtensa_tlb_autorefill_invalidate();
718
719 arch_irq_unlock(key);
720 }
721
722 #ifdef CONFIG_USERSPACE
723
thread_page_tables_get(const struct k_thread * thread)724 static inline uint32_t *thread_page_tables_get(const struct k_thread *thread)
725 {
726 if ((thread->base.user_options & K_USER) != 0U) {
727 return thread->arch.ptables;
728 }
729
730 return xtensa_kernel_ptables;
731 }
732
alloc_l1_table(void)733 static inline uint32_t *alloc_l1_table(void)
734 {
735 uint16_t idx;
736
737 for (idx = 0; idx < CONFIG_XTENSA_MMU_NUM_L1_TABLES; idx++) {
738 if (!atomic_test_and_set_bit(l1_page_table_track, idx)) {
739 return (uint32_t *)&l1_page_table[idx];
740 }
741 }
742
743 return NULL;
744 }
745
dup_table(void)746 static uint32_t *dup_table(void)
747 {
748 uint16_t i, j;
749 uint32_t *dst_table = alloc_l1_table();
750
751 if (!dst_table) {
752 return NULL;
753 }
754
755 for (i = 0; i < XTENSA_L1_PAGE_TABLE_ENTRIES; i++) {
756 uint32_t *l2_table, *src_l2_table;
757
758 if (is_pte_illegal(xtensa_kernel_ptables[i]) ||
759 (i == XTENSA_MMU_L1_POS(XTENSA_MMU_PTEVADDR))) {
760 dst_table[i] = XTENSA_MMU_PTE_ILLEGAL;
761 continue;
762 }
763
764 src_l2_table = (uint32_t *)(xtensa_kernel_ptables[i] & XTENSA_MMU_PTE_PPN_MASK);
765 l2_table = alloc_l2_table();
766 if (l2_table == NULL) {
767 goto err;
768 }
769
770 for (j = 0; j < XTENSA_L2_PAGE_TABLE_ENTRIES; j++) {
771 uint32_t original_attr = XTENSA_MMU_PTE_SW_GET(src_l2_table[j]);
772
773 l2_table[j] = src_l2_table[j];
774 if (original_attr != 0x0) {
775 uint8_t ring;
776
777 ring = XTENSA_MMU_PTE_RING_GET(l2_table[j]);
778 l2_table[j] = XTENSA_MMU_PTE_ATTR_SET(l2_table[j], original_attr);
779 l2_table[j] = XTENSA_MMU_PTE_RING_SET(l2_table[j],
780 ring == XTENSA_MMU_SHARED_RING ?
781 XTENSA_MMU_SHARED_RING : XTENSA_MMU_KERNEL_RING);
782 }
783 }
784
785 /* The page table is using kernel ASID because we don't
786 * user thread manipulate it.
787 */
788 dst_table[i] = XTENSA_MMU_PTE((uint32_t)l2_table, XTENSA_MMU_KERNEL_RING,
789 0, XTENSA_MMU_PAGE_TABLE_ATTR);
790
791 sys_cache_data_flush_range((void *)l2_table, XTENSA_L2_PAGE_TABLE_SIZE);
792 }
793
794 sys_cache_data_flush_range((void *)dst_table, XTENSA_L1_PAGE_TABLE_SIZE);
795
796 return dst_table;
797
798 err:
799 /* TODO: Cleanup failed allocation*/
800 return NULL;
801 }
802
arch_mem_domain_init(struct k_mem_domain * domain)803 int arch_mem_domain_init(struct k_mem_domain *domain)
804 {
805 uint32_t *ptables;
806 k_spinlock_key_t key;
807 int ret;
808
809 /*
810 * For now, lets just assert if we have reached the maximum number
811 * of asid we assert.
812 */
813 __ASSERT(asid_count < (XTENSA_MMU_SHARED_ASID), "Reached maximum of ASID available");
814
815 key = k_spin_lock(&xtensa_mmu_lock);
816 /* If this is the default domain, we don't need
817 * to create a new set of page tables. We can just
818 * use the kernel page tables and save memory.
819 */
820
821 if (domain == &k_mem_domain_default) {
822 domain->arch.ptables = xtensa_kernel_ptables;
823 domain->arch.asid = asid_count;
824 goto end;
825 }
826
827
828 ptables = dup_table();
829
830 if (ptables == NULL) {
831 ret = -ENOMEM;
832 goto err;
833 }
834
835 domain->arch.ptables = ptables;
836 domain->arch.asid = ++asid_count;
837
838 sys_slist_append(&xtensa_domain_list, &domain->arch.node);
839
840 end:
841 xtensa_mmu_compute_domain_regs(&domain->arch);
842 ret = 0;
843
844 err:
845 k_spin_unlock(&xtensa_mmu_lock, key);
846
847 return ret;
848 }
849
region_map_update(uint32_t * ptables,uintptr_t start,size_t size,uint32_t ring,uint32_t flags)850 static int region_map_update(uint32_t *ptables, uintptr_t start,
851 size_t size, uint32_t ring, uint32_t flags)
852 {
853 int ret = 0;
854
855 for (size_t offset = 0; offset < size; offset += CONFIG_MMU_PAGE_SIZE) {
856 uint32_t *l2_table, pte;
857 uint32_t page = start + offset;
858 uint32_t l1_pos = XTENSA_MMU_L1_POS(page);
859 uint32_t l2_pos = XTENSA_MMU_L2_POS(page);
860 /* Make sure we grab a fresh copy of L1 page table */
861 sys_cache_data_invd_range((void *)&ptables[l1_pos], sizeof(ptables[0]));
862
863 l2_table = (uint32_t *)(ptables[l1_pos] & XTENSA_MMU_PTE_PPN_MASK);
864
865 sys_cache_data_invd_range((void *)&l2_table[l2_pos], sizeof(l2_table[0]));
866
867 pte = XTENSA_MMU_PTE_RING_SET(l2_table[l2_pos], ring);
868 pte = XTENSA_MMU_PTE_ATTR_SET(pte, flags);
869
870 l2_table[l2_pos] = pte;
871
872 sys_cache_data_flush_range((void *)&l2_table[l2_pos], sizeof(l2_table[0]));
873
874 xtensa_dtlb_vaddr_invalidate((void *)page);
875 }
876
877 return ret;
878 }
879
update_region(uint32_t * ptables,uintptr_t start,size_t size,uint32_t ring,uint32_t flags,uint32_t option)880 static inline int update_region(uint32_t *ptables, uintptr_t start,
881 size_t size, uint32_t ring, uint32_t flags,
882 uint32_t option)
883 {
884 int ret;
885 k_spinlock_key_t key;
886
887 key = k_spin_lock(&xtensa_mmu_lock);
888
889 #ifdef CONFIG_XTENSA_MMU_DOUBLE_MAP
890 uintptr_t va, va_uc;
891 uint32_t new_flags, new_flags_uc;
892
893 if (sys_cache_is_ptr_cached((void *)start)) {
894 va = start;
895 va_uc = (uintptr_t)sys_cache_uncached_ptr_get((void *)start);
896 } else {
897 va = (uintptr_t)sys_cache_cached_ptr_get((void *)start);
898 va_uc = start;
899 }
900
901 new_flags_uc = (flags & ~XTENSA_MMU_PTE_ATTR_CACHED_MASK);
902 new_flags = new_flags_uc | XTENSA_MMU_CACHED_WB;
903
904 ret = region_map_update(ptables, va, size, ring, new_flags);
905
906 if (ret == 0) {
907 ret = region_map_update(ptables, va_uc, size, ring, new_flags_uc);
908 }
909 #else
910 ret = region_map_update(ptables, start, size, ring, flags);
911 #endif /* CONFIG_XTENSA_MMU_DOUBLE_MAP */
912
913 #if CONFIG_MP_MAX_NUM_CPUS > 1
914 if ((option & OPTION_NO_TLB_IPI) != OPTION_NO_TLB_IPI) {
915 xtensa_mmu_tlb_ipi();
916 }
917 #endif
918
919 sys_cache_data_flush_and_invd_all();
920 k_spin_unlock(&xtensa_mmu_lock, key);
921
922 return ret;
923 }
924
reset_region(uint32_t * ptables,uintptr_t start,size_t size,uint32_t option)925 static inline int reset_region(uint32_t *ptables, uintptr_t start, size_t size, uint32_t option)
926 {
927 return update_region(ptables, start, size,
928 XTENSA_MMU_KERNEL_RING, XTENSA_MMU_PERM_W, option);
929 }
930
xtensa_user_stack_perms(struct k_thread * thread)931 void xtensa_user_stack_perms(struct k_thread *thread)
932 {
933 (void)memset((void *)thread->stack_info.start,
934 (IS_ENABLED(CONFIG_INIT_STACKS)) ? 0xAA : 0x00,
935 thread->stack_info.size - thread->stack_info.delta);
936
937 update_region(thread_page_tables_get(thread),
938 thread->stack_info.start, thread->stack_info.size,
939 XTENSA_MMU_USER_RING, XTENSA_MMU_PERM_W | XTENSA_MMU_CACHED_WB, 0);
940 }
941
arch_mem_domain_max_partitions_get(void)942 int arch_mem_domain_max_partitions_get(void)
943 {
944 return CONFIG_MAX_DOMAIN_PARTITIONS;
945 }
946
arch_mem_domain_partition_remove(struct k_mem_domain * domain,uint32_t partition_id)947 int arch_mem_domain_partition_remove(struct k_mem_domain *domain,
948 uint32_t partition_id)
949 {
950 struct k_mem_partition *partition = &domain->partitions[partition_id];
951
952 /* Reset the partition's region back to defaults */
953 return reset_region(domain->arch.ptables, partition->start,
954 partition->size, 0);
955 }
956
arch_mem_domain_partition_add(struct k_mem_domain * domain,uint32_t partition_id)957 int arch_mem_domain_partition_add(struct k_mem_domain *domain,
958 uint32_t partition_id)
959 {
960 struct k_mem_partition *partition = &domain->partitions[partition_id];
961 uint32_t ring = K_MEM_PARTITION_IS_USER(partition->attr) ? XTENSA_MMU_USER_RING :
962 XTENSA_MMU_KERNEL_RING;
963
964 return update_region(domain->arch.ptables, partition->start,
965 partition->size, ring, partition->attr, 0);
966 }
967
968 /* These APIs don't need to do anything */
arch_mem_domain_thread_add(struct k_thread * thread)969 int arch_mem_domain_thread_add(struct k_thread *thread)
970 {
971 int ret = 0;
972 bool is_user, is_migration;
973 uint32_t *old_ptables;
974 struct k_mem_domain *domain;
975
976 old_ptables = thread->arch.ptables;
977 domain = thread->mem_domain_info.mem_domain;
978 thread->arch.ptables = domain->arch.ptables;
979
980 is_user = (thread->base.user_options & K_USER) != 0;
981 is_migration = (old_ptables != NULL) && is_user;
982
983 if (is_migration) {
984 /* Give access to the thread's stack in its new
985 * memory domain if it is migrating.
986 */
987 update_region(thread_page_tables_get(thread),
988 thread->stack_info.start, thread->stack_info.size,
989 XTENSA_MMU_USER_RING,
990 XTENSA_MMU_PERM_W | XTENSA_MMU_CACHED_WB,
991 OPTION_NO_TLB_IPI);
992 /* and reset thread's stack permission in
993 * the old page tables.
994 */
995 ret = reset_region(old_ptables,
996 thread->stack_info.start,
997 thread->stack_info.size, 0);
998 }
999
1000 /* Need to switch to new page tables if this is
1001 * the current thread running.
1002 */
1003 if (thread == _current_cpu->current) {
1004 struct arch_mem_domain *arch_domain = &(domain->arch);
1005
1006 xtensa_mmu_set_paging(arch_domain);
1007 }
1008
1009 #if CONFIG_MP_MAX_NUM_CPUS > 1
1010 /* Need to tell other CPUs to switch to the new page table
1011 * in case the thread is running on one of them.
1012 *
1013 * Note that there is no need to send TLB IPI if this is
1014 * migration as it was sent above during reset_region().
1015 */
1016 if ((thread != _current_cpu->current) && !is_migration) {
1017 xtensa_mmu_tlb_ipi();
1018 }
1019 #endif
1020
1021 return ret;
1022 }
1023
arch_mem_domain_thread_remove(struct k_thread * thread)1024 int arch_mem_domain_thread_remove(struct k_thread *thread)
1025 {
1026 struct k_mem_domain *domain = thread->mem_domain_info.mem_domain;
1027
1028 if ((thread->base.user_options & K_USER) == 0) {
1029 return 0;
1030 }
1031
1032 if ((thread->base.thread_state & _THREAD_DEAD) == 0) {
1033 /* Thread is migrating to another memory domain and not
1034 * exiting for good; we weren't called from
1035 * z_thread_abort(). Resetting the stack region will
1036 * take place in the forthcoming thread_add() call.
1037 */
1038 return 0;
1039 }
1040
1041 /* Restore permissions on the thread's stack area since it is no
1042 * longer a member of the domain.
1043 *
1044 * Note that, since every thread must have an associated memory
1045 * domain, removing a thread from domain will be followed by
1046 * adding it back to another. So there is no need to send TLB IPI
1047 * at this point.
1048 */
1049 return reset_region(domain->arch.ptables,
1050 thread->stack_info.start,
1051 thread->stack_info.size, OPTION_NO_TLB_IPI);
1052 }
1053
page_validate(uint32_t * ptables,uint32_t page,uint8_t ring,bool write)1054 static bool page_validate(uint32_t *ptables, uint32_t page, uint8_t ring, bool write)
1055 {
1056 uint8_t asid_ring;
1057 uint32_t rasid, pte, *l2_table;
1058 uint32_t l1_pos = XTENSA_MMU_L1_POS(page);
1059 uint32_t l2_pos = XTENSA_MMU_L2_POS(page);
1060
1061 if (is_pte_illegal(ptables[l1_pos])) {
1062 return false;
1063 }
1064
1065 l2_table = (uint32_t *)(ptables[l1_pos] & XTENSA_MMU_PTE_PPN_MASK);
1066 pte = l2_table[l2_pos];
1067
1068 if (is_pte_illegal(pte)) {
1069 return false;
1070 }
1071
1072 asid_ring = 0;
1073 rasid = xtensa_rasid_get();
1074 for (uint32_t i = 0; i < 4; i++) {
1075 if (XTENSA_MMU_PTE_ASID_GET(pte, rasid) == XTENSA_MMU_RASID_ASID_GET(rasid, i)) {
1076 asid_ring = i;
1077 break;
1078 }
1079 }
1080
1081 if (ring > asid_ring) {
1082 return false;
1083 }
1084
1085 if (write) {
1086 return (XTENSA_MMU_PTE_ATTR_GET((pte)) & XTENSA_MMU_PERM_W) != 0;
1087 }
1088
1089 return true;
1090 }
1091
mem_buffer_validate(const void * addr,size_t size,int write,int ring)1092 static int mem_buffer_validate(const void *addr, size_t size, int write, int ring)
1093 {
1094 int ret = 0;
1095 uint8_t *virt;
1096 size_t aligned_size;
1097 const struct k_thread *thread = _current;
1098 uint32_t *ptables = thread_page_tables_get(thread);
1099
1100 /* addr/size arbitrary, fix this up into an aligned region */
1101 k_mem_region_align((uintptr_t *)&virt, &aligned_size,
1102 (uintptr_t)addr, size, CONFIG_MMU_PAGE_SIZE);
1103
1104 for (size_t offset = 0; offset < aligned_size;
1105 offset += CONFIG_MMU_PAGE_SIZE) {
1106 if (!page_validate(ptables, (uint32_t)(virt + offset), ring, write)) {
1107 ret = -1;
1108 break;
1109 }
1110 }
1111
1112 return ret;
1113 }
1114
xtensa_mem_kernel_has_access(const void * addr,size_t size,int write)1115 bool xtensa_mem_kernel_has_access(const void *addr, size_t size, int write)
1116 {
1117 return mem_buffer_validate(addr, size, write, XTENSA_MMU_KERNEL_RING) == 0;
1118 }
1119
arch_buffer_validate(const void * addr,size_t size,int write)1120 int arch_buffer_validate(const void *addr, size_t size, int write)
1121 {
1122 return mem_buffer_validate(addr, size, write, XTENSA_MMU_USER_RING);
1123 }
1124
1125 #ifdef CONFIG_XTENSA_MMU_FLUSH_AUTOREFILL_DTLBS_ON_SWAP
1126 /* This is only used when swapping page tables and auto-refill DTLBs
1127 * needing to be invalidated. Otherwise, SWAP_PAGE_TABLE assembly
1128 * is used to avoid a function call.
1129 */
xtensa_swap_update_page_tables(struct k_thread * incoming)1130 void xtensa_swap_update_page_tables(struct k_thread *incoming)
1131 {
1132 struct arch_mem_domain *domain =
1133 &(incoming->mem_domain_info.mem_domain->arch);
1134
1135 xtensa_mmu_set_paging(domain);
1136
1137 xtensa_dtlb_autorefill_invalidate();
1138 }
1139 #endif
1140
1141 #endif /* CONFIG_USERSPACE */
1142