1 /*
2 * Copyright (c) 2015 Stefan Kristiansson
3 * Based on arch/arm/arm/mmu.c
4 * Copyright (c) 2008-2014 Travis Geiselbrecht
5 *
6 * Use of this source code is governed by a MIT-style
7 * license that can be found in the LICENSE file or at
8 * https://opensource.org/licenses/MIT
9 */
10 #include <lk/trace.h>
11 #include <lk/debug.h>
12 #include <lk/err.h>
13 #include <assert.h>
14 #include <string.h>
15 #include <arch/mmu.h>
16 #include <arch/or1k.h>
17 #include <arch/or1k/mmu.h>
18 #include <kernel/vm.h>
19
20 #define LOCAL_TRACE 0
21
22 #if WITH_KERNEL_VM
23
24 uint32_t or1k_kernel_translation_table[256] __ALIGNED(8192) __SECTION(".bss.prebss.translation_table");
25
26 /* Pessimistic tlb invalidation, which rather invalidate too much.
27 * TODO: make it more precise. */
or1k_invalidate_tlb(vaddr_t vaddr,uint count)28 void or1k_invalidate_tlb(vaddr_t vaddr, uint count) {
29 uint32_t dmmucfgr = mfspr(OR1K_SPR_SYS_DMMUCFGR_ADDR);
30 uint32_t immucfgr = mfspr(OR1K_SPR_SYS_IMMUCFGR_ADDR);
31 uint32_t num_dtlb_ways = OR1K_SPR_SYS_DMMUCFGR_NTW_GET(dmmucfgr) + 1;
32 uint32_t num_dtlb_sets = 1 << OR1K_SPR_SYS_DMMUCFGR_NTS_GET(dmmucfgr);
33 uint32_t num_itlb_ways = OR1K_SPR_SYS_IMMUCFGR_NTW_GET(immucfgr) + 1;
34 uint32_t num_itlb_sets = 1 << OR1K_SPR_SYS_IMMUCFGR_NTS_GET(immucfgr);
35 uint32_t offs;
36
37 for (; count; count--) {
38 offs = (vaddr >> PAGE_SIZE_SHIFT) & (num_dtlb_sets-1);
39 switch (num_dtlb_ways) {
40 case 4:
41 mtspr_off(0, OR1K_SPR_DMMU_DTLBW_MR_ADDR(3, offs), 0);
42 // fallthrough
43 case 3:
44 mtspr_off(0, OR1K_SPR_DMMU_DTLBW_MR_ADDR(2, offs), 0);
45 // fallthrough
46 case 2:
47 mtspr_off(0, OR1K_SPR_DMMU_DTLBW_MR_ADDR(1, offs), 0);
48 // fallthrough
49 case 1:
50 mtspr_off(0, OR1K_SPR_DMMU_DTLBW_MR_ADDR(0, offs), 0);
51 }
52
53 offs = (vaddr >> PAGE_SIZE_SHIFT) & (num_itlb_sets-1);
54 switch (num_itlb_ways) {
55 case 4:
56 mtspr_off(0, OR1K_SPR_IMMU_ITLBW_MR_ADDR(3, offs), 0);
57 // fallthrough
58 case 3:
59 mtspr_off(0, OR1K_SPR_IMMU_ITLBW_MR_ADDR(2, offs), 0);
60 // fallthrough
61 case 2:
62 mtspr_off(0, OR1K_SPR_IMMU_ITLBW_MR_ADDR(1, offs), 0);
63 // fallthrough
64 case 1:
65 mtspr_off(0, OR1K_SPR_IMMU_ITLBW_MR_ADDR(0, offs), 0);
66 }
67 vaddr += PAGE_SIZE;
68 }
69 }
70
arch_mmu_query(arch_aspace_t * aspace,vaddr_t vaddr,paddr_t * paddr,uint * flags)71 status_t arch_mmu_query(arch_aspace_t *aspace, vaddr_t vaddr, paddr_t *paddr, uint *flags) {
72 uint index = vaddr / SECTION_SIZE;
73 uint32_t pte = or1k_kernel_translation_table[index];
74 uint32_t vmask = SECTION_SIZE-1;
75
76 if (!(pte & OR1K_MMU_PG_PRESENT))
77 return ERR_NOT_FOUND;
78
79 /* not a l1 entry */
80 if (!(pte & OR1K_MMU_PG_L)) {
81 uint32_t *l2_table = paddr_to_kvaddr(pte & ~OR1K_MMU_PG_FLAGS_MASK);
82 index = (vaddr % SECTION_SIZE) / PAGE_SIZE;
83 pte = l2_table[index];
84 vmask = PAGE_SIZE-1;
85 }
86
87 if (paddr)
88 *paddr = (pte & ~OR1K_MMU_PG_FLAGS_MASK) | (vaddr & vmask);
89
90 if (flags) {
91 *flags = 0;
92 if (pte & OR1K_MMU_PG_U)
93 *flags |= ARCH_MMU_FLAG_PERM_USER;
94 if (!(pte & OR1K_MMU_PG_X))
95 *flags |= ARCH_MMU_FLAG_PERM_NO_EXECUTE;
96 if (!(pte & OR1K_MMU_PG_W))
97 *flags |= ARCH_MMU_FLAG_PERM_RO;
98 if (pte & OR1K_MMU_PG_CI)
99 *flags |= ARCH_MMU_FLAG_UNCACHED;
100 }
101
102 return NO_ERROR;
103 }
104
arch_mmu_unmap(arch_aspace_t * aspace,vaddr_t vaddr,uint count)105 int arch_mmu_unmap(arch_aspace_t *aspace, vaddr_t vaddr, uint count) {
106 LTRACEF("vaddr = 0x%lx, count = %d\n", vaddr, count);
107
108 if (!IS_PAGE_ALIGNED(vaddr))
109 return ERR_INVALID_ARGS;
110
111 uint unmapped = 0;
112 while (count) {
113 uint index = vaddr / SECTION_SIZE;
114 uint32_t pte = or1k_kernel_translation_table[index];
115 if (!(pte & OR1K_MMU_PG_PRESENT)) {
116 vaddr += PAGE_SIZE;
117 count--;
118 continue;
119 }
120 /* Unmapping of l2 tables is not implemented (yet) */
121 if (!(pte & OR1K_MMU_PG_L) || !IS_ALIGNED(vaddr, SECTION_SIZE) || count < SECTION_SIZE / PAGE_SIZE)
122 PANIC_UNIMPLEMENTED;
123
124 or1k_kernel_translation_table[index] = 0;
125 or1k_invalidate_tlb(vaddr, SECTION_SIZE / PAGE_SIZE);
126 vaddr += SECTION_SIZE;
127 count -= SECTION_SIZE / PAGE_SIZE;
128 unmapped += SECTION_SIZE / PAGE_SIZE;
129 }
130
131 return unmapped;
132 }
133
arch_mmu_map(arch_aspace_t * aspace,vaddr_t vaddr,paddr_t paddr,uint count,uint flags)134 int arch_mmu_map(arch_aspace_t *aspace, vaddr_t vaddr, paddr_t paddr, uint count, uint flags) {
135 uint l1_index;
136 uint32_t pte;
137 uint32_t arch_flags = 0;
138
139 LTRACEF("vaddr = 0x%lx, paddr = 0x%lx, count = %d, flags = 0x%x\n", vaddr, paddr, count, flags);
140
141 if (!IS_PAGE_ALIGNED(vaddr) || !IS_PAGE_ALIGNED(paddr))
142 return ERR_INVALID_ARGS;
143
144 if (flags & ARCH_MMU_FLAG_PERM_USER)
145 arch_flags |= OR1K_MMU_PG_U;
146 if (!(flags & ARCH_MMU_FLAG_PERM_NO_EXECUTE))
147 arch_flags |= OR1K_MMU_PG_X;
148 if (flags & ARCH_MMU_FLAG_CACHE_MASK)
149 arch_flags |= OR1K_MMU_PG_CI;
150 if (!(flags & ARCH_MMU_FLAG_PERM_RO))
151 arch_flags |= OR1K_MMU_PG_W;
152
153 uint mapped = 0;
154 while (count) {
155 l1_index = vaddr / SECTION_SIZE;
156 if (IS_ALIGNED(vaddr, SECTION_SIZE) && IS_ALIGNED(paddr, SECTION_SIZE) && count >= SECTION_SIZE / PAGE_SIZE) {
157 or1k_kernel_translation_table[l1_index] = (paddr & ~(SECTION_SIZE-1)) | arch_flags | OR1K_MMU_PG_PRESENT | OR1K_MMU_PG_L;
158 count -= SECTION_SIZE / PAGE_SIZE;
159 mapped += SECTION_SIZE / PAGE_SIZE;
160 vaddr += SECTION_SIZE;
161 paddr += SECTION_SIZE;
162 continue;
163 }
164
165 uint32_t *l2_table;
166
167 pte = or1k_kernel_translation_table[l1_index];
168
169 /* FIXME: l1 already mapped as a section */
170 if (pte & OR1K_MMU_PG_PRESENT && pte & OR1K_MMU_PG_L)
171 PANIC_UNIMPLEMENTED;
172
173 if (pte & OR1K_MMU_PG_PRESENT) {
174 l2_table = paddr_to_kvaddr(pte & ~OR1K_MMU_PG_FLAGS_MASK);
175 LTRACEF("l2_table at %p\n", l2_table);
176 } else {
177 l2_table = pmm_alloc_kpage();
178 if (!l2_table) {
179 TRACEF("failed to allocate pagetable\n");
180 return mapped;
181 }
182
183 memset(l2_table, 0, PAGE_SIZE);
184 paddr_t l2_pa = vaddr_to_paddr(l2_table);
185 LTRACEF("allocated pagetable at %p, pa 0x%lx\n", l2_table, l2_pa);
186 or1k_kernel_translation_table[l1_index] = l2_pa | arch_flags | OR1K_MMU_PG_PRESENT;
187 }
188
189 uint l2_index = (vaddr % SECTION_SIZE) / PAGE_SIZE;
190
191 LTRACEF("l2_index = 0x%x, vaddr = 0x%lx, paddr = 0x%lx\n", l2_index, vaddr, paddr);
192 l2_table[l2_index] = paddr | arch_flags | OR1K_MMU_PG_PRESENT | OR1K_MMU_PG_L;
193
194 count--;
195 mapped++;
196 vaddr += PAGE_SIZE;
197 paddr += PAGE_SIZE;
198 }
199
200 return mapped;
201 }
202
203 // initialize per address space
arch_mmu_init_aspace(arch_aspace_t * aspace,vaddr_t base,size_t size,uint flags)204 status_t arch_mmu_init_aspace(arch_aspace_t *aspace, vaddr_t base, size_t size, uint flags) {
205 LTRACEF("aspace %p, base %#lx, size %#zx, flags %#x\n", aspace, base, size, flags);
206
207 DEBUG_ASSERT(aspace);
208
209 // validate that the base + size is sane and doesn't wrap
210 DEBUG_ASSERT(size > PAGE_SIZE);
211 DEBUG_ASSERT(base + size - 1 > base);
212
213 #if 0
214 aspace->flags = flags;
215 if (flags & ARCH_ASPACE_FLAG_KERNEL) {
216 // at the moment we can only deal with address spaces as globally defined
217 DEBUG_ASSERT(base == KERNEL_ASPACE_BASE);
218 DEBUG_ASSERT(size == KERNEL_ASPACE_SIZE);
219
220 aspace->base = base;
221 aspace->size = size;
222 aspace->pt_virt = kernel_pgtable;
223 aspace->pt_phys = kernel_pgtable_phys;
224 } else {
225 PANIC_UNIMPLEMENTED;
226 }
227
228 LTRACEF("pt phys %#lx, pt virt %p\n", aspace->pt_phys, aspace->pt_virt);
229 #endif
230
231 return NO_ERROR;
232 }
233
arch_mmu_destroy_aspace(arch_aspace_t * aspace)234 status_t arch_mmu_destroy_aspace(arch_aspace_t *aspace) {
235 LTRACEF("aspace %p\n", aspace);
236
237 PANIC_UNIMPLEMENTED;
238 }
239
240 // load a new user address space context.
241 // aspace argument NULL should load kernel-only context
arch_mmu_context_switch(arch_aspace_t * aspace)242 void arch_mmu_context_switch(arch_aspace_t *aspace) {
243 LTRACEF("aspace %p\n", aspace);
244
245 PANIC_UNIMPLEMENTED;
246 }
247
248 #endif /* WITH_KERNEL_VM */
249