1 /*
2 * Copyright (c) 2006-2023, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2012-01-10 bernard porting to AM1808
9 * 2023-10-10 Shell Add permission control API
10 */
11
12 #include <rthw.h>
13 #include <rtthread.h>
14
15 #define DBG_TAG "hw.mmu"
16 #define DBG_LVL DBG_LOG
17 #include <rtdbg.h>
18
19 #include <board.h>
20 #include "cp15.h"
21 #include "mm_page.h"
22 #include "mmu.h"
23 #include <mm_aspace.h>
24 #include <tlb.h>
25
26 #ifdef RT_USING_SMART
27 #include <lwp_mm.h>
28 #include <lwp_arch.h>
29 #include "ioremap.h"
30 #else
31 #define KERNEL_VADDR_START 0
32 #endif
33
34 /* level1 page table, each entry for 1MB memory. */
35 volatile unsigned long MMUTable[4 * 1024] __attribute__((aligned(16 * 1024)));
36
rt_hw_set_domain_register(unsigned long domain_val)37 unsigned long rt_hw_set_domain_register(unsigned long domain_val)
38 {
39 unsigned long old_domain;
40
41 asm volatile ("mrc p15, 0, %0, c3, c0\n" : "=r" (old_domain));
42 asm volatile ("mcr p15, 0, %0, c3, c0\n" : :"r" (domain_val) : "memory");
43
44 return old_domain;
45 }
46
rt_hw_mmu_setmtt(rt_uint32_t vaddrStart,rt_uint32_t vaddrEnd,rt_uint32_t paddrStart,rt_uint32_t attr)47 void rt_hw_mmu_setmtt(rt_uint32_t vaddrStart, rt_uint32_t vaddrEnd,
48 rt_uint32_t paddrStart, rt_uint32_t attr)
49 {
50 volatile rt_uint32_t *pTT;
51 volatile int i, nSec;
52 pTT = (rt_uint32_t *)MMUTable + (vaddrStart >> 20);
53 nSec = (vaddrEnd >> 20) - (vaddrStart >> 20);
54 for(i = 0; i <= nSec; i++)
55 {
56 *pTT = attr | (((paddrStart >> 20) + i) << 20);
57 pTT++;
58 }
59 }
60
init_mm_setup(unsigned int * mtbl,unsigned int size,unsigned int pv_off)61 void init_mm_setup(unsigned int *mtbl, unsigned int size, unsigned int pv_off)
62 {
63 unsigned int va;
64
65 for (va = 0; va < 0x1000; va++)
66 {
67 unsigned int vaddr = (va << 20);
68
69 if (vaddr >= KERNEL_VADDR_START && vaddr - KERNEL_VADDR_START < size)
70 {
71 mtbl[va] = ((va << 20) + pv_off) | NORMAL_MEM;
72 }
73 else if (vaddr >= (KERNEL_VADDR_START + pv_off) && vaddr - (KERNEL_VADDR_START + pv_off) < size)
74 {
75 mtbl[va] = (va << 20) | NORMAL_MEM;
76 }
77 else
78 {
79 mtbl[va] = 0;
80 }
81 }
82 }
83
84 #ifndef RT_USING_SMART
_init_map_section(rt_uint32_t * mmu_table,rt_uint32_t va,rt_uint32_t size,rt_uint32_t pa,rt_uint32_t attr)85 static void _init_map_section(rt_uint32_t *mmu_table, rt_uint32_t va,
86 rt_uint32_t size,rt_uint32_t pa, rt_uint32_t attr)
87 {
88 volatile rt_uint32_t *ptt;
89 volatile int i, num_section;
90 ptt = (rt_uint32_t *)mmu_table + (va >> ARCH_SECTION_SHIFT);
91 num_section = size >> ARCH_SECTION_SHIFT;
92 for(i = 0; i <= num_section; i++)
93 {
94 *ptt = attr | (((pa >> ARCH_SECTION_SHIFT) + i) << ARCH_SECTION_SHIFT);
95 ptt++;
96 }
97 }
98 #endif
99
rt_hw_mem_setup_early(rt_uint32_t * early_mmu_talbe,rt_uint32_t pv_off)100 void rt_hw_mem_setup_early(rt_uint32_t *early_mmu_talbe,
101 rt_uint32_t pv_off)
102 {
103 rt_uint32_t size = 0;
104
105 size = 0x100000 + (rt_uint32_t)&__bss_end;
106 size &= ~(0x100000 - 1);
107 #ifdef RT_USING_SMART
108 size -= KERNEL_VADDR_START;
109 init_mm_setup(early_mmu_talbe, size, pv_off);
110 #else
111 rt_uint32_t normal_attr = NORMAL_MEM;
112 extern unsigned char _reset;
113 rt_uint32_t va = (rt_uint32_t) &_reset;
114 /* The starting virtual address is aligned along 0x1000000. */
115 va &= (0x1000000 - 1);
116 size -= va;
117 _init_map_section(early_mmu_talbe, va, size, va + pv_off, normal_attr);
118 #endif
119
120 }
121
rt_hw_init_mmu_table(struct mem_desc * mdesc,rt_uint32_t size)122 void rt_hw_init_mmu_table(struct mem_desc *mdesc, rt_uint32_t size)
123 {
124 void *vaddr;
125 size_t length;
126 /* init kernel space */
127 #ifdef RT_USING_SMART
128 rt_aspace_init(&rt_kernel_space, (void *)USER_VADDR_TOP, -USER_VADDR_TOP, (void *)MMUTable);
129 #else
130 rt_aspace_init(&rt_kernel_space, (void *)0x1000, 0 - 0x1000, (void *)MMUTable);
131 #endif /* RT_USING_SMART */
132
133 /* set page table */
134 for(; size > 0; size--)
135 {
136 if (mdesc->paddr_start == (rt_uint32_t)ARCH_MAP_FAILED)
137 mdesc->paddr_start = mdesc->vaddr_start + PV_OFFSET;
138
139 vaddr = (void *)mdesc->vaddr_start;
140 length = mdesc->vaddr_end - mdesc->vaddr_start;
141 rt_aspace_map_static(&rt_kernel_space, &mdesc->varea, &vaddr, length,
142 mdesc->attr, MMF_MAP_FIXED, &rt_mm_dummy_mapper, 0);
143
144 rt_hw_mmu_setmtt(mdesc->vaddr_start, mdesc->vaddr_end,
145 mdesc->paddr_start, mdesc->attr);
146 mdesc++;
147 }
148
149 rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void*)MMUTable, sizeof MMUTable);
150 }
151
rt_hw_mmu_init(void)152 void rt_hw_mmu_init(void)
153 {
154 rt_cpu_dcache_clean_flush();
155 rt_cpu_icache_flush();
156 rt_hw_cpu_dcache_disable();
157 rt_hw_cpu_icache_disable();
158 rt_cpu_mmu_disable();
159
160 /*rt_hw_cpu_dump_page_table(MMUTable);*/
161 rt_hw_set_domain_register(0x55555555);
162
163 rt_cpu_tlb_set(MMUTable);
164
165 rt_cpu_mmu_enable();
166
167 rt_hw_cpu_icache_enable();
168 rt_hw_cpu_dcache_enable();
169 }
170
rt_hw_mmu_map_init(struct rt_aspace * aspace,void * v_address,size_t size,size_t * vtable,size_t pv_off)171 int rt_hw_mmu_map_init(struct rt_aspace *aspace, void* v_address, size_t size, size_t *vtable, size_t pv_off)
172 {
173 size_t l1_off, va_s, va_e;
174
175 if (!aspace || !vtable)
176 {
177 return -1;
178 }
179
180 va_s = (size_t)v_address;
181 va_e = (size_t)v_address + size - 1;
182
183 if ( va_e < va_s)
184 {
185 return -1;
186 }
187
188 va_s >>= ARCH_SECTION_SHIFT;
189 va_e >>= ARCH_SECTION_SHIFT;
190
191 if (va_s == 0)
192 {
193 return -1;
194 }
195
196 for (l1_off = va_s; l1_off <= va_e; l1_off++)
197 {
198 size_t v = vtable[l1_off];
199
200 if (v & ARCH_MMU_USED_MASK)
201 {
202 return -1;
203 }
204 }
205
206 #ifdef RT_USING_SMART
207 rt_ioremap_start = v_address;
208 rt_ioremap_size = size;
209 rt_mpr_start = rt_ioremap_start - rt_mpr_size;
210 #else
211 rt_mpr_start = (void *)((rt_size_t)0 - rt_mpr_size);
212 #endif
213
214 return 0;
215 }
216
rt_hw_mmu_ioremap_init(rt_aspace_t aspace,void * v_address,size_t size)217 int rt_hw_mmu_ioremap_init(rt_aspace_t aspace, void* v_address, size_t size)
218 {
219 #ifdef RT_IOREMAP_LATE
220 size_t loop_va;
221 size_t l1_off;
222 size_t *mmu_l1, *mmu_l2;
223 size_t sections;
224
225 /* for kernel ioremap */
226 if ((size_t)v_address < KERNEL_VADDR_START)
227 {
228 return -1;
229 }
230 /* must align to section */
231 if ((size_t)v_address & ARCH_SECTION_MASK)
232 {
233 return -1;
234 }
235 /* must align to section */
236 if (size & ARCH_SECTION_MASK)
237 {
238 return -1;
239 }
240
241 loop_va = (size_t)v_address;
242 sections = (size >> ARCH_SECTION_SHIFT);
243 while (sections--)
244 {
245 l1_off = (loop_va >> ARCH_SECTION_SHIFT);
246 mmu_l1 = (size_t*)aspace->page_table + l1_off;
247
248 RT_ASSERT((*mmu_l1 & ARCH_MMU_USED_MASK) == 0);
249 mmu_l2 = (size_t*)rt_pages_alloc(0);
250 if (mmu_l2)
251 {
252 rt_memset(mmu_l2, 0, ARCH_PAGE_TBL_SIZE * 2);
253 /* cache maintain */
254 rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_l2, ARCH_PAGE_TBL_SIZE);
255
256 *mmu_l1 = (((size_t)mmu_l2 + PV_OFFSET) | 0x1);
257 /* cache maintain */
258 rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_l1, 4);
259 }
260 else
261 {
262 /* error */
263 return -1;
264 }
265
266 loop_va += ARCH_SECTION_SIZE;
267 }
268 #endif
269
270 return 0;
271 }
272
273
274
275
_kenrel_unmap_4K(unsigned long * lv0_tbl,void * v_addr)276 static void _kenrel_unmap_4K(unsigned long *lv0_tbl, void *v_addr)
277 {
278 size_t loop_va = (size_t)v_addr & ~ARCH_PAGE_MASK;
279 size_t l1_off, l2_off;
280 size_t *mmu_l1, *mmu_l2;
281
282 l1_off = (loop_va >> ARCH_SECTION_SHIFT);
283
284 l2_off = ((loop_va & ARCH_SECTION_MASK) >> ARCH_PAGE_SHIFT);
285 mmu_l1 = (size_t *)lv0_tbl + l1_off;
286
287 if (*mmu_l1 & ARCH_MMU_USED_MASK)
288 {
289 mmu_l2 = (size_t *)((*mmu_l1 & ~ARCH_PAGE_TBL_MASK) - PV_OFFSET);
290 }
291 else
292 {
293 return;
294 }
295
296 if (*(mmu_l2 + l2_off) & ARCH_MMU_USED_MASK)
297 {
298 *(mmu_l2 + l2_off) = 0;
299 /* cache maintain */
300 rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_l2 + l2_off, 4);
301
302 if (rt_pages_free(mmu_l2, 0))
303 {
304 *mmu_l1 = 0;
305 rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_l1, 4);
306 }
307 }
308 loop_va += ARCH_PAGE_SIZE;
309 }
310
_kenrel_map_4K(unsigned long * lv0_tbl,void * v_addr,void * p_addr,size_t attr)311 static int _kenrel_map_4K(unsigned long *lv0_tbl, void *v_addr, void *p_addr,
312 size_t attr)
313 {
314 size_t loop_va = (size_t)v_addr & ~ARCH_PAGE_MASK;
315 size_t loop_pa = (size_t)p_addr & ~ARCH_PAGE_MASK;
316 size_t l1_off, l2_off;
317 size_t *mmu_l1, *mmu_l2;
318
319 l1_off = (loop_va >> ARCH_SECTION_SHIFT);
320 l2_off = ((loop_va & ARCH_SECTION_MASK) >> ARCH_PAGE_SHIFT);
321 mmu_l1 = (size_t *)lv0_tbl + l1_off;
322
323 if (*mmu_l1 & ARCH_MMU_USED_MASK)
324 {
325 mmu_l2 = (size_t *)((*mmu_l1 & ~ARCH_PAGE_TBL_MASK) - PV_OFFSET);
326 rt_page_ref_inc(mmu_l2, 0);
327 }
328 else
329 {
330 mmu_l2 = (size_t *)rt_pages_alloc(0);
331 if (mmu_l2)
332 {
333 rt_memset(mmu_l2, 0, ARCH_PAGE_TBL_SIZE * 2);
334 /* cache maintain */
335 rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_l2, ARCH_PAGE_TBL_SIZE);
336
337 *mmu_l1 = (((size_t)mmu_l2 + PV_OFFSET) | 0x1);
338 /* cache maintain */
339 rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_l1, 4);
340 }
341 else
342 {
343 /* error, quit */
344 return -1;
345 }
346 }
347
348 *(mmu_l2 + l2_off) = (loop_pa | attr);
349 /* cache maintain */
350 rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_l2 + l2_off, 4);
351
352 loop_va += ARCH_PAGE_SIZE;
353 loop_pa += ARCH_PAGE_SIZE;
354
355 return 0;
356 }
357
rt_hw_mmu_map(rt_aspace_t aspace,void * v_addr,void * p_addr,size_t size,size_t attr)358 void *rt_hw_mmu_map(rt_aspace_t aspace, void *v_addr, void *p_addr, size_t size,
359 size_t attr)
360 {
361 int ret = -1;
362 void *unmap_va = v_addr;
363 size_t npages = size >> ARCH_PAGE_SHIFT;
364
365 // TODO trying with HUGEPAGE here
366 while (npages--)
367 {
368 ret = _kenrel_map_4K(aspace->page_table, v_addr, p_addr, attr);
369 if (ret != 0)
370 {
371 /* error, undo map */
372 while (unmap_va != v_addr)
373 {
374 rt_enter_critical();
375 _kenrel_unmap_4K(aspace->page_table, (void *)unmap_va);
376 rt_exit_critical();
377
378 unmap_va += ARCH_PAGE_SIZE;
379 }
380 break;
381 }
382 v_addr += ARCH_PAGE_SIZE;
383 p_addr += ARCH_PAGE_SIZE;
384 }
385
386 if (ret == 0)
387 {
388 return v_addr;
389 }
390
391 return NULL;
392 }
393
rt_hw_mmu_unmap(rt_aspace_t aspace,void * v_addr,size_t size)394 void rt_hw_mmu_unmap(rt_aspace_t aspace, void *v_addr, size_t size)
395 {
396 // caller guarantee that v_addr & size are page aligned
397 size_t npages = size >> ARCH_PAGE_SHIFT;
398
399 if (!aspace->page_table)
400 {
401 return;
402 }
403
404 while (npages--)
405 {
406 rt_enter_critical();
407 _kenrel_unmap_4K(aspace->page_table, v_addr);
408 rt_exit_critical();
409
410 v_addr += ARCH_PAGE_SIZE;
411 }
412 }
413
rt_hw_aspace_switch(rt_aspace_t aspace)414 void rt_hw_aspace_switch(rt_aspace_t aspace)
415 {
416 if (aspace != &rt_kernel_space)
417 {
418 void *pgtbl = aspace->page_table;
419 pgtbl = rt_kmem_v2p(pgtbl);
420
421 rt_hw_mmu_switch(pgtbl);
422
423 rt_hw_tlb_invalidate_all_local();
424 }
425 }
426
rt_hw_mmu_v2p(rt_aspace_t aspace,void * v_addr)427 void *rt_hw_mmu_v2p(rt_aspace_t aspace, void* v_addr)
428 {
429 size_t l1_off, l2_off;
430 size_t *mmu_l1, *mmu_l2;
431 size_t tmp;
432 size_t pa;
433
434 l1_off = (size_t)v_addr >> ARCH_SECTION_SHIFT;
435
436 RT_ASSERT(aspace);
437
438 mmu_l1 = (size_t*)aspace->page_table + l1_off;
439
440 tmp = *mmu_l1;
441
442 switch (tmp & ARCH_MMU_USED_MASK)
443 {
444 case 0: /* not used */
445 break;
446 case 1: /* page table */
447 mmu_l2 = (size_t *)((tmp & ~ARCH_PAGE_TBL_MASK) - PV_OFFSET);
448 l2_off = (((size_t)v_addr & ARCH_SECTION_MASK) >> ARCH_PAGE_SHIFT);
449 pa = *(mmu_l2 + l2_off);
450 if (pa & ARCH_MMU_USED_MASK)
451 {
452 if ((pa & ARCH_MMU_USED_MASK) == 1)
453 {
454 /* large page, not support */
455 break;
456 }
457 pa &= ~(ARCH_PAGE_MASK);
458 pa += ((size_t)v_addr & ARCH_PAGE_MASK);
459 return (void*)pa;
460 }
461 break;
462 case 2:
463 case 3:
464 /* section */
465 if (tmp & ARCH_TYPE_SUPERSECTION)
466 {
467 /* super section, not support */
468 break;
469 }
470 pa = (tmp & ~ARCH_SECTION_MASK);
471 pa += ((size_t)v_addr & ARCH_SECTION_MASK);
472 return (void*)pa;
473 }
474 return ARCH_MAP_FAILED;
475 }
476
rt_hw_mmu_control(struct rt_aspace * aspace,void * vaddr,size_t size,enum rt_mmu_cntl cmd)477 int rt_hw_mmu_control(struct rt_aspace *aspace, void *vaddr, size_t size,
478 enum rt_mmu_cntl cmd)
479 {
480 return -RT_ENOSYS;
481 }
482
483 #define KPTE_START (KERNEL_VADDR_START >> ARCH_SECTION_SHIFT)
484
rt_hw_mmu_pgtbl_create(void)485 void *rt_hw_mmu_pgtbl_create(void)
486 {
487 size_t *mmu_table;
488 mmu_table = (size_t *)rt_pages_alloc_ext(2, PAGE_ANY_AVAILABLE);
489 if (!mmu_table)
490 {
491 return RT_NULL;
492 }
493 rt_memcpy(mmu_table + KPTE_START, (size_t *)rt_kernel_space.page_table + KPTE_START, ARCH_PAGE_SIZE);
494 rt_memset(mmu_table, 0, 3 * ARCH_PAGE_SIZE);
495 rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_table, 4 * ARCH_PAGE_SIZE);
496
497 return mmu_table;
498 }
499
rt_hw_mmu_pgtbl_delete(void * pgtbl)500 void rt_hw_mmu_pgtbl_delete(void *pgtbl)
501 {
502 rt_pages_free(pgtbl, 2);
503 }
504