1 /*
2  * Copyright (c) 2008-2016 Travis Geiselbrecht
3  *
4  * Use of this source code is governed by a MIT-style
5  * license that can be found in the LICENSE file or at
6  * https://opensource.org/licenses/MIT
7  */
8 #if ARCH_HAS_MMU
9 
10 #include <lk/debug.h>
11 #include <lk/trace.h>
12 #include <stdlib.h>
13 #include <sys/types.h>
14 #include <lk/err.h>
15 #include <string.h>
16 #include <lk/compiler.h>
17 #include <lk/pow2.h>
18 #include <arch.h>
19 #include <arch/ops.h>
20 #include <arch/mmu.h>
21 #include <arch/arm.h>
22 #include <arch/arm/mmu.h>
23 #include <kernel/vm.h>
24 
25 #define LOCAL_TRACE 0
26 #define TRACE_CONTEXT_SWITCH 0
27 
28 #define IS_SECTION_ALIGNED(x) IS_ALIGNED(x, SECTION_SIZE)
29 #define IS_SUPERSECTION_ALIGNED(x) IS_ALIGNED(x, SUPERSECTION_SIZE)
30 
31 /* locals */
32 static void arm_mmu_map_section(arch_aspace_t *aspace, addr_t paddr, addr_t vaddr, uint flags);
33 static void arm_mmu_unmap_section(arch_aspace_t *aspace, addr_t vaddr);
34 
35 /* the main translation table */
36 uint32_t arm_kernel_translation_table[TT_ENTRY_COUNT] __ALIGNED(16384) __SECTION(".bss.prebss.translation_table");
37 
38 /* convert user level mmu flags to flags that go in L1 descriptors */
mmu_flags_to_l1_arch_flags(uint flags)39 static uint32_t mmu_flags_to_l1_arch_flags(uint flags) {
40     uint32_t arch_flags = 0;
41     switch (flags & ARCH_MMU_FLAG_CACHE_MASK) {
42         case ARCH_MMU_FLAG_CACHED:
43             arch_flags |= MMU_MEMORY_L1_TYPE_NORMAL_WRITE_BACK_ALLOCATE;
44 #if WITH_SMP
45             arch_flags |= MMU_MEMORY_L1_SECTION_SHAREABLE;
46 #endif
47             break;
48         case ARCH_MMU_FLAG_UNCACHED:
49             arch_flags |= MMU_MEMORY_L1_TYPE_STRONGLY_ORDERED;
50             break;
51         case ARCH_MMU_FLAG_UNCACHED_DEVICE:
52             arch_flags |= MMU_MEMORY_L1_TYPE_DEVICE_SHARED;
53             break;
54         default:
55             /* invalid user-supplied flag */
56             DEBUG_ASSERT(1);
57             return ERR_INVALID_ARGS;
58     }
59 
60     switch (flags & (ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_RO)) {
61         case 0:
62             arch_flags |= MMU_MEMORY_L1_AP_P_RW_U_NA;
63             break;
64         case ARCH_MMU_FLAG_PERM_RO:
65             arch_flags |= MMU_MEMORY_L1_AP_P_RO_U_NA;
66             break;
67         case ARCH_MMU_FLAG_PERM_USER:
68             arch_flags |= MMU_MEMORY_L1_AP_P_RW_U_RW;
69             break;
70         case ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_RO:
71             arch_flags |= MMU_MEMORY_L1_AP_P_RO_U_RO;
72             break;
73     }
74 
75     if (flags & ARCH_MMU_FLAG_PERM_NO_EXECUTE) {
76         arch_flags |= MMU_MEMORY_L1_SECTION_XN;
77     }
78 
79     if (flags & ARCH_MMU_FLAG_NS) {
80         arch_flags |= MMU_MEMORY_L1_SECTION_NON_SECURE;
81     }
82 
83     return arch_flags;
84 }
85 
86 /* convert user level mmu flags to flags that go in L2 descriptors */
mmu_flags_to_l2_arch_flags_small_page(uint flags)87 static uint32_t mmu_flags_to_l2_arch_flags_small_page(uint flags) {
88     uint32_t arch_flags = 0;
89     switch (flags & ARCH_MMU_FLAG_CACHE_MASK) {
90         case ARCH_MMU_FLAG_CACHED:
91 #if WITH_SMP
92             arch_flags |= MMU_MEMORY_L2_SHAREABLE;
93 #endif
94             arch_flags |= MMU_MEMORY_L2_TYPE_NORMAL_WRITE_BACK_ALLOCATE;
95 #if WITH_SMP
96             arch_flags |= MMU_MEMORY_L2_SHAREABLE;
97 #endif
98             break;
99         case ARCH_MMU_FLAG_UNCACHED:
100             arch_flags |= MMU_MEMORY_L2_TYPE_STRONGLY_ORDERED;
101             break;
102         case ARCH_MMU_FLAG_UNCACHED_DEVICE:
103             arch_flags |= MMU_MEMORY_L2_TYPE_DEVICE_SHARED;
104             break;
105         default:
106             /* invalid user-supplied flag */
107             DEBUG_ASSERT(1);
108             return ERR_INVALID_ARGS;
109     }
110 
111     switch (flags & (ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_RO)) {
112         case 0:
113             arch_flags |= MMU_MEMORY_L2_AP_P_RW_U_NA;
114             break;
115         case ARCH_MMU_FLAG_PERM_RO:
116             arch_flags |= MMU_MEMORY_L2_AP_P_RO_U_NA;
117             break;
118         case ARCH_MMU_FLAG_PERM_USER:
119             arch_flags |= MMU_MEMORY_L2_AP_P_RW_U_RW;
120             break;
121         case ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_RO:
122             arch_flags |= MMU_MEMORY_L2_AP_P_RO_U_RO;
123             break;
124     }
125 
126     if (flags & ARCH_MMU_FLAG_PERM_NO_EXECUTE) {
127         arch_flags |= MMU_MEMORY_L2_DESCRIPTOR_SMALL_PAGE_XN;
128     } else {
129         arch_flags |= MMU_MEMORY_L2_DESCRIPTOR_SMALL_PAGE;
130     }
131 
132     return arch_flags;
133 }
134 
arm_mmu_map_section(arch_aspace_t * aspace,addr_t paddr,addr_t vaddr,uint flags)135 static void arm_mmu_map_section(arch_aspace_t *aspace, addr_t paddr, addr_t vaddr, uint flags) {
136     int index;
137 
138     LTRACEF("aspace %p tt %p pa 0x%lx va 0x%lx flags 0x%x\n", aspace, aspace->tt_virt, paddr, vaddr, flags);
139 
140     DEBUG_ASSERT(aspace);
141     DEBUG_ASSERT(aspace->tt_virt);
142     DEBUG_ASSERT(IS_SECTION_ALIGNED(paddr));
143     DEBUG_ASSERT(IS_SECTION_ALIGNED(vaddr));
144     DEBUG_ASSERT((flags & MMU_MEMORY_L1_DESCRIPTOR_MASK) == MMU_MEMORY_L1_DESCRIPTOR_SECTION);
145 
146     /* Get the index into the translation table */
147     index = vaddr / SECTION_SIZE;
148 
149     /* Set the entry value:
150      * (2<<0): Section entry
151      * (0<<5): Domain = 0
152      *  flags: TEX, CB and AP bit settings provided by the caller.
153      */
154     aspace->tt_virt[index] = (paddr & ~(MB-1)) | (MMU_MEMORY_DOMAIN_MEM << 5) | MMU_MEMORY_L1_DESCRIPTOR_SECTION | flags;
155 }
156 
arm_mmu_unmap_l1_entry(uint32_t * translation_table,uint32_t index)157 static void arm_mmu_unmap_l1_entry(uint32_t *translation_table, uint32_t index) {
158     DEBUG_ASSERT(translation_table);
159     DEBUG_ASSERT(index < TT_ENTRY_COUNT);
160 
161     translation_table[index] = 0;
162     DSB;
163     arm_invalidate_tlb_mva_no_barrier((vaddr_t)index * SECTION_SIZE);
164 }
165 
arm_mmu_unmap_section(arch_aspace_t * aspace,addr_t vaddr)166 static void arm_mmu_unmap_section(arch_aspace_t *aspace, addr_t vaddr) {
167     DEBUG_ASSERT(aspace);
168     DEBUG_ASSERT(IS_SECTION_ALIGNED(vaddr));
169     arm_mmu_unmap_l1_entry(aspace->tt_virt, vaddr / SECTION_SIZE);
170 }
171 
arm_mmu_early_init(void)172 void arm_mmu_early_init(void) {
173 }
174 
arm_mmu_init(void)175 void arm_mmu_init(void) {
176     /* unmap the initial mapings that are marked temporary */
177     struct mmu_initial_mapping *map = mmu_initial_mappings;
178     while (map->size > 0) {
179         if (map->flags & MMU_INITIAL_MAPPING_TEMPORARY) {
180             vaddr_t va = map->virt;
181             size_t size = map->size;
182 
183             DEBUG_ASSERT(IS_SECTION_ALIGNED(size));
184 
185             while (size > 0) {
186                 arm_mmu_unmap_l1_entry(arm_kernel_translation_table, va / SECTION_SIZE);
187                 va += MB;
188                 size -= MB;
189             }
190         }
191         map++;
192     }
193     arm_after_invalidate_tlb_barrier();
194 
195 #if KERNEL_ASPACE_BASE != 0
196     /* bounce the ttbr over to ttbr1 and leave 0 unmapped */
197     uint32_t n = __builtin_clz(KERNEL_ASPACE_BASE) + 1;
198     DEBUG_ASSERT(n <= 7);
199 
200     uint32_t ttbcr = (1<<4) | n; /* disable TTBCR0 and set the split between TTBR0 and TTBR1 */
201 
202     arm_write_ttbr1(arm_read_ttbr0());
203     ISB;
204     arm_write_ttbcr(ttbcr);
205     ISB;
206     arm_write_ttbr0(0);
207     ISB;
208 #endif
209 }
210 
arch_disable_mmu(void)211 void arch_disable_mmu(void) {
212     arm_write_sctlr(arm_read_sctlr() & ~(1<<0)); // mmu disabled
213 }
214 
arch_mmu_context_switch(arch_aspace_t * aspace)215 void arch_mmu_context_switch(arch_aspace_t *aspace) {
216     if (LOCAL_TRACE && TRACE_CONTEXT_SWITCH)
217         LTRACEF("aspace %p\n", aspace);
218 
219     uint32_t ttbr;
220     uint32_t ttbcr = arm_read_ttbcr();
221     if (aspace) {
222         ttbr = MMU_TTBRx_FLAGS | (aspace->tt_phys);
223         ttbcr &= ~(1<<4); // enable TTBR0
224     } else {
225         ttbr = 0;
226         ttbcr |= (1<<4); // disable TTBR0
227     }
228 
229     if (LOCAL_TRACE && TRACE_CONTEXT_SWITCH)
230         LTRACEF("ttbr 0x%x, ttbcr 0x%x\n", ttbr, ttbcr);
231     arm_write_ttbr0(ttbr);
232     arm_write_ttbcr(ttbcr);
233 }
234 
arch_mmu_query(arch_aspace_t * aspace,vaddr_t vaddr,paddr_t * paddr,uint * flags)235 status_t arch_mmu_query(arch_aspace_t *aspace, vaddr_t vaddr, paddr_t *paddr, uint *flags) {
236     LTRACEF("aspace %p, vaddr 0x%lx\n", aspace, vaddr);
237 
238     DEBUG_ASSERT(aspace);
239     DEBUG_ASSERT(aspace->tt_virt);
240 
241     DEBUG_ASSERT(arch_mmu_is_valid_vaddr(aspace, vaddr));
242     if (!arch_mmu_is_valid_vaddr(aspace, vaddr))
243         return ERR_OUT_OF_RANGE;
244 
245     /* Get the index into the translation table */
246     uint index = vaddr / MB;
247 
248     /* decode it */
249     uint32_t tt_entry = aspace->tt_virt[index];
250     switch (tt_entry & MMU_MEMORY_L1_DESCRIPTOR_MASK) {
251         case MMU_MEMORY_L1_DESCRIPTOR_INVALID:
252             return ERR_NOT_FOUND;
253         case MMU_MEMORY_L1_DESCRIPTOR_SECTION:
254             if (tt_entry & (1<<18)) {
255                 /* supersection */
256                 PANIC_UNIMPLEMENTED;
257             }
258 
259             /* section */
260             if (paddr)
261                 *paddr = MMU_MEMORY_L1_SECTION_ADDR(tt_entry) + (vaddr & (SECTION_SIZE - 1));
262 
263             if (flags) {
264                 *flags = 0;
265                 if (tt_entry & MMU_MEMORY_L1_SECTION_NON_SECURE)
266                     *flags |= ARCH_MMU_FLAG_NS;
267                 switch (tt_entry & MMU_MEMORY_L1_TYPE_MASK) {
268                     case MMU_MEMORY_L1_TYPE_STRONGLY_ORDERED:
269                         *flags |= ARCH_MMU_FLAG_UNCACHED;
270                         break;
271                     case MMU_MEMORY_L1_TYPE_DEVICE_SHARED:
272                     case MMU_MEMORY_L1_TYPE_DEVICE_NON_SHARED:
273                         *flags |= ARCH_MMU_FLAG_UNCACHED_DEVICE;
274                         break;
275                 }
276                 switch (tt_entry & MMU_MEMORY_L1_AP_MASK) {
277                     case MMU_MEMORY_L1_AP_P_RO_U_NA:
278                         *flags |= ARCH_MMU_FLAG_PERM_RO;
279                         break;
280                     case MMU_MEMORY_L1_AP_P_RW_U_NA:
281                         break;
282                     case MMU_MEMORY_L1_AP_P_RO_U_RO:
283                         *flags |= ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_RO;
284                         break;
285                     case MMU_MEMORY_L1_AP_P_RW_U_RW:
286                         *flags |= ARCH_MMU_FLAG_PERM_USER;
287                         break;
288                 }
289                 if (tt_entry & MMU_MEMORY_L1_SECTION_XN) {
290                     *flags |= ARCH_MMU_FLAG_PERM_NO_EXECUTE;
291                 }
292             }
293             break;
294         case MMU_MEMORY_L1_DESCRIPTOR_PAGE_TABLE: {
295             uint32_t *l2_table = paddr_to_kvaddr(MMU_MEMORY_L1_PAGE_TABLE_ADDR(tt_entry));
296             uint l2_index = (vaddr % SECTION_SIZE) / PAGE_SIZE;
297             uint32_t l2_entry = l2_table[l2_index];
298 
299             //LTRACEF("l2_table at %p, index %u, entry 0x%x\n", l2_table, l2_index, l2_entry);
300 
301             switch (l2_entry & MMU_MEMORY_L2_DESCRIPTOR_MASK) {
302                 default:
303                 case MMU_MEMORY_L2_DESCRIPTOR_INVALID:
304                     return ERR_NOT_FOUND;
305                 case MMU_MEMORY_L2_DESCRIPTOR_LARGE_PAGE:
306                     PANIC_UNIMPLEMENTED;
307                     break;
308                 case MMU_MEMORY_L2_DESCRIPTOR_SMALL_PAGE:
309                 case MMU_MEMORY_L2_DESCRIPTOR_SMALL_PAGE_XN:
310                     if (paddr)
311                         *paddr = MMU_MEMORY_L2_SMALL_PAGE_ADDR(l2_entry) + (vaddr & (PAGE_SIZE - 1));
312 
313                     if (flags) {
314                         *flags = 0;
315                         /* NS flag is only present on L1 entry */
316                         if (tt_entry & MMU_MEMORY_L1_PAGETABLE_NON_SECURE)
317                             *flags |= ARCH_MMU_FLAG_NS;
318                         switch (l2_entry & MMU_MEMORY_L2_TYPE_MASK) {
319                             case MMU_MEMORY_L2_TYPE_STRONGLY_ORDERED:
320                                 *flags |= ARCH_MMU_FLAG_UNCACHED;
321                                 break;
322                             case MMU_MEMORY_L2_TYPE_DEVICE_SHARED:
323                             case MMU_MEMORY_L2_TYPE_DEVICE_NON_SHARED:
324                                 *flags |= ARCH_MMU_FLAG_UNCACHED_DEVICE;
325                                 break;
326                         }
327                         switch (l2_entry & MMU_MEMORY_L2_AP_MASK) {
328                             case MMU_MEMORY_L2_AP_P_RO_U_NA:
329                                 *flags |= ARCH_MMU_FLAG_PERM_RO;
330                                 break;
331                             case MMU_MEMORY_L2_AP_P_RW_U_NA:
332                                 break;
333                             case MMU_MEMORY_L2_AP_P_RO_U_RO:
334                                 *flags |= ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_RO;
335                                 break;
336                             case MMU_MEMORY_L2_AP_P_RW_U_RW:
337                                 *flags |= ARCH_MMU_FLAG_PERM_USER;
338                                 break;
339                         }
340                         if ((l2_entry & MMU_MEMORY_L2_DESCRIPTOR_MASK) ==
341                                 MMU_MEMORY_L2_DESCRIPTOR_SMALL_PAGE_XN) {
342                             *flags |= ARCH_MMU_FLAG_PERM_NO_EXECUTE;
343                         }
344                     }
345                     break;
346             }
347 
348             break;
349         }
350         default:
351             PANIC_UNIMPLEMENTED;
352     }
353 
354     return NO_ERROR;
355 }
356 
357 
358 /*
359  *  We allow up to 4 adjacent L1 entries to point within the same memory page
360  *  allocated for L2 page tables.
361  *
362  *  L1:   | 0 | 1 | 2 | 3 | .... | N+0 | N+1 | N+2 | N+3 |
363  *  L2:   [       0       | .....[      (N/4)            |
364  */
365 #define L1E_PER_PAGE 4
366 
get_l2_table(arch_aspace_t * aspace,uint32_t l1_index,paddr_t * ppa)367 static status_t get_l2_table(arch_aspace_t *aspace, uint32_t l1_index, paddr_t *ppa) {
368     status_t ret;
369     paddr_t pa;
370     uint32_t tt_entry;
371 
372     DEBUG_ASSERT(aspace);
373     DEBUG_ASSERT(ppa);
374 
375     /* lookup an existing l2 pagetable */
376     for (uint i = 0; i < L1E_PER_PAGE; i++) {
377         tt_entry = aspace->tt_virt[ROUNDDOWN(l1_index, L1E_PER_PAGE) + i];
378         if ((tt_entry & MMU_MEMORY_L1_DESCRIPTOR_MASK)
379                 == MMU_MEMORY_L1_DESCRIPTOR_PAGE_TABLE) {
380             *ppa = (paddr_t)ROUNDDOWN(MMU_MEMORY_L1_PAGE_TABLE_ADDR(tt_entry), PAGE_SIZE)
381                    + (PAGE_SIZE / L1E_PER_PAGE) * (l1_index & (L1E_PER_PAGE-1));
382             return NO_ERROR;
383         }
384     }
385 
386     /* not found: allocate it */
387     uint32_t *l2_va = pmm_alloc_kpages(1, &aspace->pt_page_list);
388     if (!l2_va)
389         return ERR_NO_MEMORY;
390 
391     /* wipe it clean to set no access */
392     memset(l2_va, 0, PAGE_SIZE);
393 
394     /* get physical address */
395     ret = arm_vtop((vaddr_t)l2_va, &pa);
396     ASSERT(!ret);
397     ASSERT(paddr_to_kvaddr(pa));
398 
399     DEBUG_ASSERT(IS_PAGE_ALIGNED((vaddr_t)l2_va));
400     DEBUG_ASSERT(IS_PAGE_ALIGNED(pa));
401 
402     *ppa = pa + (PAGE_SIZE / L1E_PER_PAGE) * (l1_index & (L1E_PER_PAGE-1));
403 
404     LTRACEF("allocated pagetable at %p, pa 0x%lx, pa 0x%lx\n", l2_va, pa, *ppa);
405     return NO_ERROR;
406 }
407 
408 
put_l2_table(arch_aspace_t * aspace,uint32_t l1_index,paddr_t l2_pa)409 static void put_l2_table(arch_aspace_t *aspace, uint32_t l1_index, paddr_t l2_pa) {
410     DEBUG_ASSERT(aspace);
411 
412     /* check if any l1 entry points to this l2 table */
413     for (uint i = 0; i < L1E_PER_PAGE; i++) {
414         uint32_t tt_entry = aspace->tt_virt[ROUNDDOWN(l1_index, L1E_PER_PAGE) + i];
415         if ((tt_entry &  MMU_MEMORY_L1_DESCRIPTOR_MASK)
416                 == MMU_MEMORY_L1_DESCRIPTOR_PAGE_TABLE) {
417             return;
418         }
419     }
420 
421     /* we can free this l2 table */
422     vm_page_t *page = paddr_to_vm_page(l2_pa);
423     if (!page)
424         panic("bad page table paddr 0x%lx\n", l2_pa);
425 
426     /* verify that it is in our page list */
427     DEBUG_ASSERT(list_in_list(&page->node));
428 
429     list_delete(&page->node);
430 
431     LTRACEF("freeing pagetable at 0x%lx\n", l2_pa);
432     pmm_free_page(page);
433 }
434 
435 #if WITH_ARCH_MMU_PICK_SPOT
436 
are_regions_compatible(uint new_region_flags,uint adjacent_region_flags)437 static inline bool are_regions_compatible(uint new_region_flags,
438         uint adjacent_region_flags) {
439     /*
440      * Two regions are compatible if NS flag matches.
441      */
442     uint mask = ARCH_MMU_FLAG_NS;
443 
444     if ((new_region_flags & mask) == (adjacent_region_flags & mask))
445         return true;
446 
447     return false;
448 }
449 
450 
arch_mmu_pick_spot(vaddr_t base,uint prev_region_flags,vaddr_t end,uint next_region_flags,vaddr_t align,size_t size,uint flags)451 vaddr_t arch_mmu_pick_spot(vaddr_t base, uint prev_region_flags,
452                            vaddr_t end,  uint next_region_flags,
453                            vaddr_t align, size_t size, uint flags) {
454     LTRACEF("base 0x%lx, end 0x%lx, align %ld, size %zd, flags 0x%x\n",
455             base, end, align, size, flags);
456 
457     vaddr_t spot;
458 
459     if (align >= SECTION_SIZE ||
460             are_regions_compatible(flags, prev_region_flags)) {
461         spot = ALIGN(base, align);
462     } else {
463         spot = ALIGN(base, SECTION_SIZE);
464     }
465 
466     vaddr_t spot_end = spot + size - 1;
467     if (spot_end < spot || spot_end > end)
468         return end; /* wrapped around or it does not fit */
469 
470     if ((spot_end / SECTION_SIZE) == (end / SECTION_SIZE)) {
471         if (!are_regions_compatible(flags, next_region_flags))
472             return end;
473     }
474 
475     return spot;
476 }
477 #endif  /* WITH_ARCH_MMU_PICK_SPOT */
478 
479 
arch_mmu_map(arch_aspace_t * aspace,addr_t vaddr,paddr_t paddr,uint count,uint flags)480 int arch_mmu_map(arch_aspace_t *aspace, addr_t vaddr, paddr_t paddr, uint count, uint flags) {
481     LTRACEF("vaddr 0x%lx paddr 0x%lx count %u flags 0x%x\n", vaddr, paddr, count, flags);
482 
483     DEBUG_ASSERT(aspace);
484     DEBUG_ASSERT(aspace->tt_virt);
485 
486     DEBUG_ASSERT(arch_mmu_is_valid_vaddr(aspace, vaddr));
487     if (!arch_mmu_is_valid_vaddr(aspace, vaddr))
488         return ERR_OUT_OF_RANGE;
489 
490 #if !WITH_ARCH_MMU_PICK_SPOT
491     if (flags & ARCH_MMU_FLAG_NS) {
492         /* WITH_ARCH_MMU_PICK_SPOT is required to support NS memory */
493         panic("NS mem is not supported\n");
494     }
495 #endif
496 
497     /* paddr and vaddr must be aligned */
498     DEBUG_ASSERT(IS_PAGE_ALIGNED(vaddr));
499     DEBUG_ASSERT(IS_PAGE_ALIGNED(paddr));
500     if (!IS_PAGE_ALIGNED(vaddr) || !IS_PAGE_ALIGNED(paddr))
501         return ERR_INVALID_ARGS;
502 
503     if (count == 0)
504         return NO_ERROR;
505 
506     /* see what kind of mapping we can use */
507     int mapped = 0;
508     while (count > 0) {
509         if (IS_SECTION_ALIGNED(vaddr) && IS_SECTION_ALIGNED(paddr) && count >= SECTION_SIZE / PAGE_SIZE) {
510             /* we can use a section */
511 
512             /* compute the arch flags for L1 sections */
513             uint arch_flags = mmu_flags_to_l1_arch_flags(flags) |
514                               MMU_MEMORY_L1_DESCRIPTOR_SECTION;
515 
516             /* map it */
517             arm_mmu_map_section(aspace, paddr, vaddr, arch_flags);
518             count -= SECTION_SIZE / PAGE_SIZE;
519             mapped += SECTION_SIZE / PAGE_SIZE;
520             vaddr += SECTION_SIZE;
521             paddr += SECTION_SIZE;
522         } else {
523             /* will have to use a L2 mapping */
524             uint l1_index = vaddr / SECTION_SIZE;
525             uint32_t tt_entry = aspace->tt_virt[l1_index];
526 
527             LTRACEF("tt_entry 0x%x\n", tt_entry);
528             switch (tt_entry & MMU_MEMORY_L1_DESCRIPTOR_MASK) {
529                 case MMU_MEMORY_L1_DESCRIPTOR_SECTION:
530                     // XXX will have to break L1 mapping into a L2 page table
531                     PANIC_UNIMPLEMENTED;
532                     break;
533                 case MMU_MEMORY_L1_DESCRIPTOR_INVALID: {
534                     paddr_t l2_pa = 0;
535                     if (get_l2_table(aspace, l1_index, &l2_pa) != NO_ERROR) {
536                         TRACEF("failed to allocate pagetable\n");
537                         goto done;
538                     }
539                     tt_entry = l2_pa | MMU_MEMORY_L1_DESCRIPTOR_PAGE_TABLE;
540                     if (flags & ARCH_MMU_FLAG_NS)
541                         tt_entry |= MMU_MEMORY_L1_PAGETABLE_NON_SECURE;
542 
543                     aspace->tt_virt[l1_index] = tt_entry;
544                 }
545                 /* fallthrough */
546                 case MMU_MEMORY_L1_DESCRIPTOR_PAGE_TABLE: {
547                     uint32_t *l2_table = paddr_to_kvaddr(MMU_MEMORY_L1_PAGE_TABLE_ADDR(tt_entry));
548                     LTRACEF("l2_table at %p\n", l2_table);
549 
550                     DEBUG_ASSERT(l2_table);
551 
552                     // XXX handle 64K pages here
553 
554                     /* compute the arch flags for L2 4K pages */
555                     uint arch_flags = mmu_flags_to_l2_arch_flags_small_page(flags);
556 
557                     uint l2_index = (vaddr % SECTION_SIZE) / PAGE_SIZE;
558                     do {
559                         l2_table[l2_index++] = paddr | arch_flags;
560                         count--;
561                         mapped++;
562                         vaddr += PAGE_SIZE;
563                         paddr += PAGE_SIZE;
564                     } while (count && (l2_index != (SECTION_SIZE / PAGE_SIZE)));
565                     break;
566                 }
567                 default:
568                     PANIC_UNIMPLEMENTED;
569             }
570         }
571     }
572 
573 done:
574     DSB;
575     return mapped;
576 }
577 
arch_mmu_unmap(arch_aspace_t * aspace,vaddr_t vaddr,uint count)578 int arch_mmu_unmap(arch_aspace_t *aspace, vaddr_t vaddr, uint count) {
579     DEBUG_ASSERT(aspace);
580     DEBUG_ASSERT(aspace->tt_virt);
581 
582     DEBUG_ASSERT(arch_mmu_is_valid_vaddr(aspace, vaddr));
583 
584     if (!arch_mmu_is_valid_vaddr(aspace, vaddr))
585         return ERR_OUT_OF_RANGE;
586 
587     DEBUG_ASSERT(IS_PAGE_ALIGNED(vaddr));
588     if (!IS_PAGE_ALIGNED(vaddr))
589         return ERR_INVALID_ARGS;
590 
591     LTRACEF("vaddr 0x%lx count %u\n", vaddr, count);
592 
593     int unmapped = 0;
594     while (count > 0) {
595         uint l1_index = vaddr / SECTION_SIZE;
596         uint32_t tt_entry = aspace->tt_virt[l1_index];
597 
598         switch (tt_entry & MMU_MEMORY_L1_DESCRIPTOR_MASK) {
599             case MMU_MEMORY_L1_DESCRIPTOR_INVALID: {
600                 /* this top level page is not mapped, move on to the next one */
601                 uint page_cnt = MIN((SECTION_SIZE - (vaddr % SECTION_SIZE)) / PAGE_SIZE, count);
602                 vaddr += page_cnt * PAGE_SIZE;
603                 count -= page_cnt;
604                 break;
605             }
606             case MMU_MEMORY_L1_DESCRIPTOR_SECTION:
607                 if (IS_SECTION_ALIGNED(vaddr) && count >= SECTION_SIZE / PAGE_SIZE) {
608                     /* we're asked to remove at least all of this section, so just zero it out */
609                     // XXX test for supersection
610                     arm_mmu_unmap_section(aspace, vaddr);
611 
612                     vaddr += SECTION_SIZE;
613                     count -= SECTION_SIZE / PAGE_SIZE;
614                     unmapped += SECTION_SIZE / PAGE_SIZE;
615                 } else {
616                     // XXX handle unmapping just part of a section
617                     // will need to convert to a L2 table and then unmap the parts we are asked to
618                     PANIC_UNIMPLEMENTED;
619                 }
620                 break;
621             case MMU_MEMORY_L1_DESCRIPTOR_PAGE_TABLE: {
622                 uint32_t *l2_table = paddr_to_kvaddr(MMU_MEMORY_L1_PAGE_TABLE_ADDR(tt_entry));
623                 uint page_idx = (vaddr % SECTION_SIZE) / PAGE_SIZE;
624                 uint page_cnt = MIN((SECTION_SIZE / PAGE_SIZE) - page_idx, count);
625 
626                 /* unmap page run */
627                 for (uint i = 0; i < page_cnt; i++) {
628                     l2_table[page_idx++] = 0;
629                 }
630                 DSB;
631 
632                 /* invalidate tlb */
633                 for (uint i = 0; i < page_cnt; i++) {
634                     arm_invalidate_tlb_mva_no_barrier(vaddr);
635                     vaddr += PAGE_SIZE;
636                 }
637                 count -= page_cnt;
638                 unmapped += page_cnt;
639 
640                 /*
641                  * Check if all pages related to this l1 entry are deallocated.
642                  * We only need to check pages that we did not clear above starting
643                  * from page_idx and wrapped around SECTION.
644                  */
645                 page_cnt = (SECTION_SIZE / PAGE_SIZE) - page_cnt;
646                 while (page_cnt) {
647                     if (page_idx == (SECTION_SIZE / PAGE_SIZE))
648                         page_idx = 0;
649                     if (l2_table[page_idx++])
650                         break;
651                     page_cnt--;
652                 }
653                 if (!page_cnt) {
654                     /* we can kill l1 entry */
655                     arm_mmu_unmap_l1_entry(aspace->tt_virt, l1_index);
656 
657                     /* try to free l2 page itself */
658                     put_l2_table(aspace, l1_index, MMU_MEMORY_L1_PAGE_TABLE_ADDR(tt_entry));
659                 }
660                 break;
661             }
662 
663             default:
664                 // XXX not implemented supersections or L2 tables
665                 PANIC_UNIMPLEMENTED;
666         }
667     }
668     arm_after_invalidate_tlb_barrier();
669     return unmapped;
670 }
671 
arch_mmu_init_aspace(arch_aspace_t * aspace,vaddr_t base,size_t size,uint flags)672 status_t arch_mmu_init_aspace(arch_aspace_t *aspace, vaddr_t base, size_t size, uint flags) {
673     LTRACEF("aspace %p, base 0x%lx, size 0x%zx, flags 0x%x\n", aspace, base, size, flags);
674 
675     DEBUG_ASSERT(aspace);
676 
677     /* validate that the base + size is sane and doesn't wrap */
678     DEBUG_ASSERT(size > PAGE_SIZE);
679     DEBUG_ASSERT(base + size - 1 > base);
680 
681     list_initialize(&aspace->pt_page_list);
682 
683     if (flags & ARCH_ASPACE_FLAG_KERNEL) {
684         aspace->base = base;
685         aspace->size = size;
686         aspace->tt_virt = arm_kernel_translation_table;
687         aspace->tt_phys = vaddr_to_paddr(aspace->tt_virt);
688     } else {
689 
690         // XXX at the moment we can only really deal with 1GB user space, and thus
691         // needing only a single page for the top level translation table
692         DEBUG_ASSERT(base < GB && (base + size) <= GB);
693 
694         aspace->base = base;
695         aspace->size = size;
696 
697         uint32_t *va = pmm_alloc_kpages(1, &aspace->pt_page_list);
698         if (!va)
699             return ERR_NO_MEMORY;
700 
701         aspace->tt_virt = va;
702         aspace->tt_phys = vaddr_to_paddr(aspace->tt_virt);
703     }
704 
705     LTRACEF("tt_phys 0x%lx tt_virt %p\n", aspace->tt_phys, aspace->tt_virt);
706 
707     return NO_ERROR;
708 }
709 
arch_mmu_destroy_aspace(arch_aspace_t * aspace)710 status_t arch_mmu_destroy_aspace(arch_aspace_t *aspace) {
711     LTRACEF("aspace %p\n", aspace);
712 
713     // XXX free all of the pages allocated in aspace->pt_page_list
714     vm_page_t *p;
715     while ((p = list_remove_head_type(&aspace->pt_page_list, vm_page_t, node)) != NULL) {
716         LTRACEF("freeing page %p\n", p);
717         pmm_free_page(p);
718     }
719 
720     return NO_ERROR;
721 }
722 
723 #endif // ARCH_HAS_MMU
724