1 /*
2  * Copyright (c) 2006-2023, RT-Thread Development Team
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  *
6  * Change Logs:
7  * Date           Author       Notes
8  * 2012-01-10     bernard      porting to AM1808
9  * 2021-11-28     GuEe-GUI     first version
10  * 2022-12-10     WangXiaoyao  porting to MM
11  * 2024-07-08     Shell        added support for ASID
12  */
13 
14 #define DBG_TAG "hw.mmu"
15 #define DBG_LVL DBG_INFO
16 #include <rtdbg.h>
17 
18 #include <rthw.h>
19 #include <rtthread.h>
20 #include <stddef.h>
21 #include <stdint.h>
22 #include <string.h>
23 
24 #define __MMU_INTERNAL
25 
26 #include "mm_aspace.h"
27 #include "mm_page.h"
28 #include "mmu.h"
29 #include "tlb.h"
30 
31 #include "ioremap.h"
32 #ifdef RT_USING_SMART
33 #include <lwp_mm.h>
34 #endif
35 
36 #define TCR_CONFIG_TBI0     rt_hw_mmu_config_tbi(0)
37 #define TCR_CONFIG_TBI1     rt_hw_mmu_config_tbi(1)
38 
39 #define MMU_LEVEL_MASK   0x1ffUL
40 #define MMU_LEVEL_SHIFT  9
41 #define MMU_ADDRESS_BITS 39
42 #define MMU_ADDRESS_MASK 0x0000fffffffff000UL
43 #define MMU_ATTRIB_MASK  0xfff0000000000ffcUL
44 
45 #define MMU_TYPE_MASK  3UL
46 #define MMU_TYPE_USED  1UL
47 #define MMU_TYPE_BLOCK 1UL
48 #define MMU_TYPE_TABLE 3UL
49 #define MMU_TYPE_PAGE  3UL
50 
51 #define MMU_TBL_BLOCK_2M_LEVEL 2
52 #define MMU_TBL_PAGE_4k_LEVEL  3
53 #define MMU_TBL_LEVEL_NR       4
54 
55 /* restrict virtual address on usage of RT_NULL */
56 #ifndef KERNEL_VADDR_START
57 #define KERNEL_VADDR_START (ARCH_RAM_OFFSET + ARCH_TEXT_OFFSET)
58 #endif
59 
60 volatile unsigned long MMUTable[512] __attribute__((aligned(4 * 1024)));
61 
62 struct mmu_level_info
63 {
64     unsigned long *pos;
65     void *page;
66 };
67 
_kenrel_unmap_4K(unsigned long * lv0_tbl,void * v_addr)68 static void _kenrel_unmap_4K(unsigned long *lv0_tbl, void *v_addr)
69 {
70     int level;
71     unsigned long va = (unsigned long)v_addr;
72     unsigned long *cur_lv_tbl = lv0_tbl;
73     unsigned long page;
74     unsigned long off;
75     struct mmu_level_info level_info[4];
76     int ref;
77     int level_shift = MMU_ADDRESS_BITS;
78     unsigned long *pos;
79 
80     rt_memset(level_info, 0, sizeof level_info);
81     for (level = 0; level < MMU_TBL_LEVEL_NR; level++)
82     {
83         off = (va >> level_shift);
84         off &= MMU_LEVEL_MASK;
85         page = cur_lv_tbl[off];
86         if (!(page & MMU_TYPE_USED))
87         {
88             break;
89         }
90         if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK)
91         {
92             break;
93         }
94         /* next table entry in current level */
95         level_info[level].pos = cur_lv_tbl + off;
96         cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK);
97         cur_lv_tbl = (unsigned long *)((unsigned long)cur_lv_tbl - PV_OFFSET);
98         level_info[level].page = cur_lv_tbl;
99         level_shift -= MMU_LEVEL_SHIFT;
100     }
101 
102     level = MMU_TBL_PAGE_4k_LEVEL;
103     pos = level_info[level].pos;
104     if (pos)
105     {
106         *pos = (unsigned long)RT_NULL;
107         rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, pos, sizeof(void *));
108     }
109     level--;
110 
111     while (level >= 0)
112     {
113         pos = level_info[level].pos;
114         if (pos)
115         {
116             void *cur_page = level_info[level].page;
117             ref = rt_page_ref_get(cur_page, 0);
118             if (ref == 1)
119             {
120                 *pos = (unsigned long)RT_NULL;
121                 rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, pos, sizeof(void *));
122             }
123             rt_pages_free(cur_page, 0);
124         }
125         else
126         {
127             break;
128         }
129         level--;
130     }
131 
132     return;
133 }
134 
_kernel_map_4K(unsigned long * lv0_tbl,void * vaddr,void * paddr,unsigned long attr)135 static int _kernel_map_4K(unsigned long *lv0_tbl, void *vaddr, void *paddr, unsigned long attr)
136 {
137     int ret = 0;
138     int level;
139     unsigned long *cur_lv_tbl = lv0_tbl;
140     unsigned long page;
141     unsigned long off;
142     rt_ubase_t va = (rt_ubase_t)vaddr;
143     rt_ubase_t pa = (rt_ubase_t)paddr;
144     int level_shift = MMU_ADDRESS_BITS;
145 
146     if (va & ARCH_PAGE_MASK)
147     {
148         return MMU_MAP_ERROR_VANOTALIGN;
149     }
150     if (pa & ARCH_PAGE_MASK)
151     {
152         return MMU_MAP_ERROR_PANOTALIGN;
153     }
154     for (level = 0; level < MMU_TBL_PAGE_4k_LEVEL; level++)
155     {
156         off = (va >> level_shift);
157         off &= MMU_LEVEL_MASK;
158         if (!(cur_lv_tbl[off] & MMU_TYPE_USED))
159         {
160             page = (unsigned long)rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
161             if (!page)
162             {
163                 ret = MMU_MAP_ERROR_NOPAGE;
164                 goto err;
165             }
166             rt_memset((void *)page, 0, ARCH_PAGE_SIZE);
167             rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)page, ARCH_PAGE_SIZE);
168             cur_lv_tbl[off] = (page + PV_OFFSET) | MMU_TYPE_TABLE;
169             rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *));
170         }
171         else
172         {
173             page = cur_lv_tbl[off];
174             page &= MMU_ADDRESS_MASK;
175             /* page to va */
176             page -= PV_OFFSET;
177             rt_page_ref_inc((void *)page, 0);
178         }
179         page = cur_lv_tbl[off];
180         if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK)
181         {
182             /* is block! error! */
183             ret = MMU_MAP_ERROR_CONFLICT;
184             goto err;
185         }
186         cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK);
187         cur_lv_tbl = (unsigned long *)((unsigned long)cur_lv_tbl - PV_OFFSET);
188         level_shift -= MMU_LEVEL_SHIFT;
189     }
190     /* now is level page */
191     attr &= MMU_ATTRIB_MASK;
192     pa |= (attr | MMU_TYPE_PAGE); /* page */
193     off = (va >> ARCH_PAGE_SHIFT);
194     off &= MMU_LEVEL_MASK;
195     cur_lv_tbl[off] = pa; /* page */
196     rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *));
197     return ret;
198 err:
199     _kenrel_unmap_4K(lv0_tbl, (void *)va);
200     return ret;
201 }
202 
_kernel_map_2M(unsigned long * lv0_tbl,void * vaddr,void * paddr,unsigned long attr)203 static int _kernel_map_2M(unsigned long *lv0_tbl, void *vaddr, void *paddr, unsigned long attr)
204 {
205     int ret = 0;
206     int level;
207     unsigned long *cur_lv_tbl = lv0_tbl;
208     unsigned long page;
209     unsigned long off;
210     unsigned long va = (unsigned long)vaddr;
211     unsigned long pa = (unsigned long)paddr;
212 
213     int level_shift = MMU_ADDRESS_BITS;
214 
215     if (va & ARCH_SECTION_MASK)
216     {
217         return MMU_MAP_ERROR_VANOTALIGN;
218     }
219     if (pa & ARCH_PAGE_MASK)
220     {
221         return MMU_MAP_ERROR_PANOTALIGN;
222     }
223     for (level = 0; level < MMU_TBL_BLOCK_2M_LEVEL; level++)
224     {
225         off = (va >> level_shift);
226         off &= MMU_LEVEL_MASK;
227         if (!(cur_lv_tbl[off] & MMU_TYPE_USED))
228         {
229             page = (unsigned long)rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
230             if (!page)
231             {
232                 ret = MMU_MAP_ERROR_NOPAGE;
233                 goto err;
234             }
235             rt_memset((char *)page, 0, ARCH_PAGE_SIZE);
236             rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)page, ARCH_PAGE_SIZE);
237             cur_lv_tbl[off] = (page + PV_OFFSET) | MMU_TYPE_TABLE;
238             rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *));
239         }
240         else
241         {
242             page = cur_lv_tbl[off];
243             page &= MMU_ADDRESS_MASK;
244             /* page to va */
245             page -= PV_OFFSET;
246             rt_page_ref_inc((void *)page, 0);
247         }
248         page = cur_lv_tbl[off];
249         if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK)
250         {
251             /* is block! error! */
252             ret = MMU_MAP_ERROR_CONFLICT;
253             goto err;
254         }
255         cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK);
256         cur_lv_tbl = (unsigned long *)((unsigned long)cur_lv_tbl - PV_OFFSET);
257         level_shift -= MMU_LEVEL_SHIFT;
258     }
259     /* now is level page */
260     attr &= MMU_ATTRIB_MASK;
261     pa |= (attr | MMU_TYPE_BLOCK); /* block */
262     off = (va >> ARCH_SECTION_SHIFT);
263     off &= MMU_LEVEL_MASK;
264     cur_lv_tbl[off] = pa;
265     rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *));
266     return ret;
267 err:
268     _kenrel_unmap_4K(lv0_tbl, (void *)va);
269     return ret;
270 }
271 
rt_hw_mmu_map(rt_aspace_t aspace,void * v_addr,void * p_addr,size_t size,size_t attr)272 void *rt_hw_mmu_map(rt_aspace_t aspace, void *v_addr, void *p_addr, size_t size,
273                     size_t attr)
274 {
275     int ret = -1;
276 
277     void *unmap_va = v_addr;
278     size_t remaining_sz = size;
279     size_t stride;
280     int (*mapper)(unsigned long *lv0_tbl, void *vaddr, void *paddr, unsigned long attr);
281 
282     RT_ASSERT(!(size & ARCH_PAGE_MASK));
283 
284     while (remaining_sz)
285     {
286         if (((rt_ubase_t)v_addr & ARCH_SECTION_MASK) ||
287             ((rt_ubase_t)p_addr & ARCH_SECTION_MASK) ||
288             (remaining_sz < ARCH_SECTION_SIZE))
289         {
290             /* legacy 4k mapping */
291             stride = ARCH_PAGE_SIZE;
292             mapper = _kernel_map_4K;
293         }
294         else
295         {
296             /* 2m huge page */
297             stride = ARCH_SECTION_SIZE;
298             mapper = _kernel_map_2M;
299         }
300 
301         /* check aliasing */
302         #ifdef RT_DEBUGGING_ALIASING
303         #define _ALIAS_OFFSET(addr) ((long)(addr) & (RT_PAGE_AFFINITY_BLOCK_SIZE - 1))
304         if (rt_page_is_member((rt_base_t)p_addr) && _ALIAS_OFFSET(v_addr) != _ALIAS_OFFSET(p_addr))
305         {
306             LOG_W("Possibly aliasing on va(0x%lx) to pa(0x%lx)", v_addr, p_addr);
307             rt_backtrace();
308             RT_ASSERT(0);
309         }
310         #endif /* RT_DEBUGGING_ALIASING */
311 
312         MM_PGTBL_LOCK(aspace);
313         ret = mapper(aspace->page_table, v_addr, p_addr, attr);
314         MM_PGTBL_UNLOCK(aspace);
315 
316         if (ret != 0)
317         {
318             /* other types of return value are taken as programming error */
319             RT_ASSERT(ret == MMU_MAP_ERROR_NOPAGE);
320             /* error, undo map */
321             while (unmap_va != v_addr)
322             {
323                 MM_PGTBL_LOCK(aspace);
324                 _kenrel_unmap_4K(aspace->page_table, (void *)unmap_va);
325                 MM_PGTBL_UNLOCK(aspace);
326                 unmap_va = (char *)unmap_va + stride;
327             }
328             break;
329         }
330 
331         remaining_sz -= stride;
332         v_addr = (char *)v_addr + stride;
333         p_addr = (char *)p_addr + stride;
334     }
335 
336     if (ret == 0)
337     {
338         return unmap_va;
339     }
340 
341     return NULL;
342 }
343 
rt_hw_mmu_unmap(rt_aspace_t aspace,void * v_addr,size_t size)344 void rt_hw_mmu_unmap(rt_aspace_t aspace, void *v_addr, size_t size)
345 {
346     // caller guarantee that v_addr & size are page aligned
347     size_t npages = size >> ARCH_PAGE_SHIFT;
348 
349     if (!aspace->page_table)
350     {
351         return;
352     }
353 
354     while (npages--)
355     {
356         MM_PGTBL_LOCK(aspace);
357         if (rt_hw_mmu_v2p(aspace, v_addr) != ARCH_MAP_FAILED)
358             _kenrel_unmap_4K(aspace->page_table, v_addr);
359         MM_PGTBL_UNLOCK(aspace);
360         v_addr = (char *)v_addr + ARCH_PAGE_SIZE;
361     }
362 }
363 
364 #ifdef ARCH_USING_ASID
365 /**
366  * the asid is to identified specialized address space on TLB.
367  * In the best case, each address space has its own exclusive asid. However,
368  * ARM only guarantee with 8 bits of ID space, which give us only 254(except
369  * the reserved 1 ASID for kernel).
370  */
371 
372 static rt_spinlock_t _asid_lock = RT_SPINLOCK_INIT;
373 
_aspace_get_asid(rt_aspace_t aspace)374 rt_uint16_t _aspace_get_asid(rt_aspace_t aspace)
375 {
376     static rt_uint16_t _asid_pool = 0;
377     rt_uint16_t asid_to, asid_from;
378     rt_ubase_t ttbr0_from;
379 
380     asid_to = aspace->asid;
381     if (asid_to == 0)
382     {
383         rt_spin_lock(&_asid_lock);
384         #define MAX_ASID (1ul << MMU_SUPPORTED_ASID_BITS)
385         if (_asid_pool && _asid_pool < MAX_ASID)
386         {
387             asid_to = ++_asid_pool;
388             LOG_D("Allocated ASID %d to PID %d(aspace %p)", asid_to, lwp_self()->pid, aspace);
389         }
390         else
391         {
392             asid_to = _asid_pool = 1;
393             LOG_D("Overflowed ASID %d to PID %d(aspace %p)", asid_to, lwp_self()->pid, aspace);
394         }
395 
396         rt_spin_unlock(&_asid_lock);
397 
398         aspace->asid = asid_to;
399         rt_hw_tlb_invalidate_aspace(aspace);
400     }
401 
402     __asm__ volatile("mrs %0, ttbr0_el1" :"=r"(ttbr0_from));
403     asid_from = ttbr0_from >> MMU_ASID_SHIFT;
404     if (asid_from == asid_to)
405     {
406         LOG_D("Conflict ASID. from %d, to %d", asid_from, asid_to);
407         rt_hw_tlb_invalidate_aspace(aspace);
408     }
409     else
410     {
411         LOG_D("ASID switched. from %d, to %d", asid_from, asid_to);
412     }
413 
414     return asid_to;
415 }
416 
417 #else
418 
419 
_aspace_get_asid(rt_aspace_t aspace)420 rt_uint16_t _aspace_get_asid(rt_aspace_t aspace)
421 {
422     rt_hw_tlb_invalidate_all();
423     return 0;
424 }
425 #endif /* ARCH_USING_ASID */
426 
427 #define CREATE_TTBR0(pgtbl, asid) ((rt_ubase_t)(pgtbl) | (rt_ubase_t)(asid) << MMU_ASID_SHIFT)
rt_hw_aspace_switch(rt_aspace_t aspace)428 void rt_hw_aspace_switch(rt_aspace_t aspace)
429 {
430     if (aspace != &rt_kernel_space)
431     {
432         rt_ubase_t ttbr0;
433         void *pgtbl = aspace->page_table;
434         pgtbl = rt_kmem_v2p(pgtbl);
435 
436         ttbr0 = CREATE_TTBR0(pgtbl, _aspace_get_asid(aspace));
437 
438         __asm__ volatile("msr ttbr0_el1, %0" ::"r"(ttbr0));
439         __asm__ volatile("isb" ::: "memory");
440     }
441 }
442 
rt_hw_mmu_ktbl_set(unsigned long tbl)443 void rt_hw_mmu_ktbl_set(unsigned long tbl)
444 {
445 #ifdef RT_USING_SMART
446     tbl += PV_OFFSET;
447     __asm__ volatile("msr TTBR1_EL1, %0\n dsb sy\nisb" ::"r"(tbl) : "memory");
448 #else
449     __asm__ volatile("msr TTBR0_EL1, %0\n dsb sy\nisb" ::"r"(tbl) : "memory");
450 #endif
451     __asm__ volatile("tlbi vmalle1\n dsb sy\nisb" ::: "memory");
452     __asm__ volatile("ic ialluis\n dsb sy\nisb" ::: "memory");
453 }
454 
455 /**
456  * @brief setup Page Table for kernel space. It's a fixed map
457  * and all mappings cannot be changed after initialization.
458  *
459  * Memory region in struct mem_desc must be page aligned,
460  * otherwise is a failure and no report will be
461  * returned.
462  *
463  * @param mmu_info
464  * @param mdesc
465  * @param desc_nr
466  */
rt_hw_mmu_setup(rt_aspace_t aspace,struct mem_desc * mdesc,int desc_nr)467 void rt_hw_mmu_setup(rt_aspace_t aspace, struct mem_desc *mdesc, int desc_nr)
468 {
469     void *err;
470     for (size_t i = 0; i < desc_nr; i++)
471     {
472         size_t attr;
473         switch (mdesc->attr)
474         {
475         case NORMAL_MEM:
476             attr = MMU_MAP_K_RWCB;
477             break;
478         case NORMAL_NOCACHE_MEM:
479             attr = MMU_MAP_K_RW;
480             break;
481         case DEVICE_MEM:
482             attr = MMU_MAP_K_DEVICE;
483             break;
484         default:
485             attr = MMU_MAP_K_DEVICE;
486         }
487 
488         struct rt_mm_va_hint hint = {.flags = MMF_MAP_FIXED,
489                                     .limit_start = aspace->start,
490                                     .limit_range_size = aspace->size,
491                                     .map_size = mdesc->vaddr_end -
492                                                 mdesc->vaddr_start + 1,
493                                     .prefer = (void *)mdesc->vaddr_start};
494 
495         if (mdesc->paddr_start == (rt_size_t)ARCH_MAP_FAILED)
496             mdesc->paddr_start = mdesc->vaddr_start + PV_OFFSET;
497         int retval;
498         retval = rt_aspace_map_phy_static(aspace, &mdesc->varea, &hint, attr,
499                                  mdesc->paddr_start >> MM_PAGE_SHIFT, &err);
500         if (retval)
501         {
502             LOG_E("%s: map failed with code %d", __FUNCTION__, retval);
503             RT_ASSERT(0);
504         }
505         mdesc++;
506     }
507 
508     rt_hw_mmu_ktbl_set((unsigned long)rt_kernel_space.page_table);
509     rt_page_cleanup();
510 }
511 
_init_region(void * vaddr,size_t size)512 static void _init_region(void *vaddr, size_t size)
513 {
514     rt_ioremap_start = vaddr;
515     rt_ioremap_size = size;
516     rt_mpr_start = (char *)rt_ioremap_start - rt_mpr_size;
517 }
518 
519 
520 /**
521  * This function will initialize rt_mmu_info structure.
522  *
523  * @param mmu_info   rt_mmu_info structure
524  * @param v_address  virtual address
525  * @param size       map size
526  * @param vtable     mmu table
527  * @param pv_off     pv offset in kernel space
528  *
529  * @return 0 on successful and -1 for fail
530  */
rt_hw_mmu_map_init(rt_aspace_t aspace,void * v_address,size_t size,size_t * vtable,size_t pv_off)531 int rt_hw_mmu_map_init(rt_aspace_t aspace, void *v_address, size_t size,
532                        size_t *vtable, size_t pv_off)
533 {
534     size_t va_s, va_e;
535 
536     if (!aspace || !vtable)
537     {
538         return -1;
539     }
540 
541     va_s = (size_t)v_address;
542     va_e = (size_t)v_address + size - 1;
543 
544     if (va_e < va_s)
545     {
546         return -1;
547     }
548 
549     va_s >>= ARCH_SECTION_SHIFT;
550     va_e >>= ARCH_SECTION_SHIFT;
551 
552     if (va_s == 0)
553     {
554         return -1;
555     }
556 
557     rt_aspace_init(aspace, (void *)KERNEL_VADDR_START, 0 - KERNEL_VADDR_START,
558                    vtable);
559 
560     _init_region(v_address, size);
561 
562     return 0;
563 }
564 
rt_hw_mmu_config_tbi(int tbi_index)565 rt_weak long rt_hw_mmu_config_tbi(int tbi_index)
566 {
567     return 0;
568 }
569 
570 /************ setting el1 mmu register**************
571   MAIR_EL1
572   index 0 : memory outer writeback, write/read alloc
573   index 1 : memory nocache
574   index 2 : device nGnRnE
575  *****************************************************/
mmu_tcr_init(void)576 void mmu_tcr_init(void)
577 {
578     unsigned long val64;
579     unsigned long pa_range;
580 
581     val64 = 0x00447fUL;
582     __asm__ volatile("msr MAIR_EL1, %0\n dsb sy\n" ::"r"(val64));
583 
584     __asm__ volatile ("mrs %0, ID_AA64MMFR0_EL1":"=r"(val64));
585     pa_range = val64 & 0xf; /* PARange */
586 
587     /* TCR_EL1 */
588     val64 = (16UL << 0)                /* t0sz 48bit */
589             | (0x0UL << 6)             /* reserved */
590             | (0x0UL << 7)             /* epd0 */
591             | (0x3UL << 8)             /* t0 wb cacheable */
592             | (0x3UL << 10)            /* inner shareable */
593             | (0x2UL << 12)            /* t0 outer shareable */
594             | (0x0UL << 14)            /* t0 4K */
595             | (16UL << 16)             /* t1sz 48bit */
596             | (0x0UL << 22)            /* define asid use ttbr0.asid */
597             | (0x0UL << 23)            /* epd1 */
598             | (0x3UL << 24)            /* t1 inner wb cacheable */
599             | (0x3UL << 26)            /* t1 outer wb cacheable */
600             | (0x2UL << 28)            /* t1 outer shareable */
601             | (0x2UL << 30)            /* t1 4k */
602             | (pa_range << 32)         /* PA range */
603             | (0x0UL << 35)            /* reserved */
604             | (0x1UL << 36)            /* as: 0:8bit 1:16bit */
605             | (TCR_CONFIG_TBI0 << 37)  /* tbi0 */
606             | (TCR_CONFIG_TBI1 << 38); /* tbi1 */
607     __asm__ volatile("msr TCR_EL1, %0\n" ::"r"(val64));
608 }
609 
610 struct page_table
611 {
612     unsigned long page[512];
613 };
614 
615 /*  */
616 static struct page_table* __init_page_array;
617 static unsigned long __page_off = 0UL;
get_ttbrn_base(void)618 unsigned long get_ttbrn_base(void)
619 {
620     return (unsigned long) __init_page_array;
621 }
622 
set_free_page(void * page_array)623 void set_free_page(void *page_array)
624 {
625     __init_page_array = page_array;
626 }
627 
get_free_page(void)628 unsigned long get_free_page(void)
629 {
630     return (unsigned long) (__init_page_array[__page_off++].page);
631 }
632 
_map_single_page_2M(unsigned long * lv0_tbl,unsigned long va,unsigned long pa,unsigned long attr,rt_bool_t flush)633 static int _map_single_page_2M(unsigned long *lv0_tbl, unsigned long va,
634                                unsigned long pa, unsigned long attr,
635                                rt_bool_t flush)
636 {
637     int level;
638     unsigned long *cur_lv_tbl = lv0_tbl;
639     unsigned long page;
640     unsigned long off;
641     int level_shift = MMU_ADDRESS_BITS;
642 
643     if (va & ARCH_SECTION_MASK)
644     {
645         return MMU_MAP_ERROR_VANOTALIGN;
646     }
647     if (pa & ARCH_PAGE_MASK)
648     {
649         return MMU_MAP_ERROR_PANOTALIGN;
650     }
651     for (level = 0; level < MMU_TBL_BLOCK_2M_LEVEL; level++)
652     {
653         off = (va >> level_shift);
654         off &= MMU_LEVEL_MASK;
655         if (!(cur_lv_tbl[off] & MMU_TYPE_USED))
656         {
657             page = get_free_page();
658             if (!page)
659             {
660                 return MMU_MAP_ERROR_NOPAGE;
661             }
662             rt_memset((char *)page, 0, ARCH_PAGE_SIZE);
663             cur_lv_tbl[off] = page | MMU_TYPE_TABLE;
664             if (flush)
665             {
666                 rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *));
667             }
668         }
669         page = cur_lv_tbl[off];
670         if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK)
671         {
672             /* is block! error! */
673             return MMU_MAP_ERROR_CONFLICT;
674         }
675         cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK);
676         level_shift -= MMU_LEVEL_SHIFT;
677     }
678     attr &= MMU_ATTRIB_MASK;
679     pa |= (attr | MMU_TYPE_BLOCK); /* block */
680     off = (va >> ARCH_SECTION_SHIFT);
681     off &= MMU_LEVEL_MASK;
682     cur_lv_tbl[off] = pa;
683     if (flush)
684     {
685         rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *));
686     }
687     return 0;
688 }
689 
rt_hw_mmu_tbl_get(void)690 void *rt_hw_mmu_tbl_get(void)
691 {
692     uintptr_t tbl;
693     __asm__ volatile("MRS %0, TTBR0_EL1" : "=r"(tbl));
694     return rt_kmem_p2v((void *)(tbl & ((1ul << 48) - 2)));
695 }
696 
rt_ioremap_early(void * paddr,size_t size)697 void *rt_ioremap_early(void *paddr, size_t size)
698 {
699     volatile size_t count;
700     rt_ubase_t base;
701     static void *tbl = RT_NULL;
702 
703     if (!size)
704     {
705         return RT_NULL;
706     }
707 
708     if (!tbl)
709     {
710         tbl = rt_hw_mmu_tbl_get();
711     }
712 
713     /* get the total size required including overhead for alignment */
714     count = (size + ((rt_ubase_t)paddr & ARCH_SECTION_MASK)
715             + ARCH_SECTION_MASK) >> ARCH_SECTION_SHIFT;
716     base = (rt_ubase_t)paddr & (~ARCH_SECTION_MASK);
717 
718     while (count --> 0)
719     {
720         if (_map_single_page_2M(tbl, base, base, MMU_MAP_K_DEVICE, RT_TRUE))
721         {
722             return RT_NULL;
723         }
724 
725         base += ARCH_SECTION_SIZE;
726     }
727 
728     return paddr;
729 }
730 
_init_map_2M(unsigned long * lv0_tbl,unsigned long va,unsigned long pa,unsigned long count,unsigned long attr)731 static int _init_map_2M(unsigned long *lv0_tbl, unsigned long va,
732                         unsigned long pa, unsigned long count,
733                         unsigned long attr)
734 {
735     unsigned long i;
736     int ret;
737 
738     if (va & ARCH_SECTION_MASK)
739     {
740         return -1;
741     }
742     if (pa & ARCH_SECTION_MASK)
743     {
744         return -1;
745     }
746     for (i = 0; i < count; i++)
747     {
748         ret = _map_single_page_2M(lv0_tbl, va, pa, attr, RT_FALSE);
749         va += ARCH_SECTION_SIZE;
750         pa += ARCH_SECTION_SIZE;
751         if (ret != 0)
752         {
753             return ret;
754         }
755     }
756     return 0;
757 }
758 
_query(rt_aspace_t aspace,void * vaddr,int * plvl_shf)759 static unsigned long *_query(rt_aspace_t aspace, void *vaddr, int *plvl_shf)
760 {
761     int level;
762     unsigned long va = (unsigned long)vaddr;
763     unsigned long *cur_lv_tbl;
764     unsigned long page;
765     unsigned long off;
766     int level_shift = MMU_ADDRESS_BITS;
767 
768     cur_lv_tbl = aspace->page_table;
769     RT_ASSERT(cur_lv_tbl);
770 
771     for (level = 0; level < MMU_TBL_PAGE_4k_LEVEL; level++)
772     {
773         off = (va >> level_shift);
774         off &= MMU_LEVEL_MASK;
775 
776         if (!(cur_lv_tbl[off] & MMU_TYPE_USED))
777         {
778             *plvl_shf = level_shift;
779             return (void *)0;
780         }
781 
782         page = cur_lv_tbl[off];
783         if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK)
784         {
785             *plvl_shf = level_shift;
786             return &cur_lv_tbl[off];
787         }
788 
789         cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK);
790         cur_lv_tbl = (unsigned long *)((unsigned long)cur_lv_tbl - PV_OFFSET);
791         level_shift -= MMU_LEVEL_SHIFT;
792     }
793     /* now is level MMU_TBL_PAGE_4k_LEVEL */
794     off = (va >> ARCH_PAGE_SHIFT);
795     off &= MMU_LEVEL_MASK;
796     page = cur_lv_tbl[off];
797 
798     *plvl_shf = level_shift;
799     if (!(page & MMU_TYPE_USED))
800     {
801         return (void *)0;
802     }
803     return &cur_lv_tbl[off];
804 }
805 
rt_hw_mmu_v2p(rt_aspace_t aspace,void * v_addr)806 void *rt_hw_mmu_v2p(rt_aspace_t aspace, void *v_addr)
807 {
808     int level_shift;
809     unsigned long paddr;
810 
811     if (aspace == &rt_kernel_space)
812     {
813         paddr = (unsigned long)rt_hw_mmu_kernel_v2p(v_addr);
814     }
815     else
816     {
817         unsigned long *pte = _query(aspace, v_addr, &level_shift);
818 
819         if (pte)
820         {
821             paddr = *pte & MMU_ADDRESS_MASK;
822             paddr |= (rt_ubase_t)v_addr & ((1ul << level_shift) - 1);
823         }
824         else
825         {
826             paddr = (unsigned long)ARCH_MAP_FAILED;
827         }
828     }
829 
830     return (void *)paddr;
831 }
832 
_noncache(rt_ubase_t * pte)833 static int _noncache(rt_ubase_t *pte)
834 {
835     int err = 0;
836     const rt_ubase_t idx_shift = 2;
837     const rt_ubase_t idx_mask = 0x7 << idx_shift;
838     rt_ubase_t entry = *pte;
839     if ((entry & idx_mask) == (NORMAL_MEM << idx_shift))
840     {
841         *pte = (entry & ~idx_mask) | (NORMAL_NOCACHE_MEM << idx_shift);
842     }
843     else
844     {
845         // do not support other type to be noncache
846         err = -RT_ENOSYS;
847     }
848     return err;
849 }
850 
_cache(rt_ubase_t * pte)851 static int _cache(rt_ubase_t *pte)
852 {
853     int err = 0;
854     const rt_ubase_t idx_shift = 2;
855     const rt_ubase_t idx_mask = 0x7 << idx_shift;
856     rt_ubase_t entry = *pte;
857     if ((entry & idx_mask) == (NORMAL_NOCACHE_MEM << idx_shift))
858     {
859         *pte = (entry & ~idx_mask) | (NORMAL_MEM << idx_shift);
860     }
861     else
862     {
863         // do not support other type to be cache
864         err = -RT_ENOSYS;
865     }
866     return err;
867 }
868 
869 static int (*control_handler[MMU_CNTL_DUMMY_END])(rt_ubase_t *pte) = {
870     [MMU_CNTL_CACHE] = _cache,
871     [MMU_CNTL_NONCACHE] = _noncache,
872 };
873 
rt_hw_mmu_control(struct rt_aspace * aspace,void * vaddr,size_t size,enum rt_mmu_cntl cmd)874 int rt_hw_mmu_control(struct rt_aspace *aspace, void *vaddr, size_t size,
875                       enum rt_mmu_cntl cmd)
876 {
877     int level_shift;
878     int err = -RT_EINVAL;
879     rt_ubase_t vstart = (rt_ubase_t)vaddr;
880     rt_ubase_t vend = vstart + size;
881 
882     int (*handler)(rt_ubase_t * pte);
883     if (cmd >= 0 && cmd < MMU_CNTL_DUMMY_END)
884     {
885         handler = control_handler[cmd];
886 
887         while (vstart < vend)
888         {
889             rt_ubase_t *pte = _query(aspace, (void *)vstart, &level_shift);
890             rt_ubase_t range_end = vstart + (1ul << level_shift);
891             RT_ASSERT(range_end <= vend);
892 
893             if (pte)
894             {
895                 err = handler(pte);
896                 RT_ASSERT(err == RT_EOK);
897             }
898             vstart = range_end;
899         }
900     }
901     else
902     {
903         err = -RT_ENOSYS;
904     }
905 
906     return err;
907 }
908 
rt_hw_mem_setup_early(unsigned long * tbl0,unsigned long * tbl1,unsigned long size,unsigned long pv_off)909 void rt_hw_mem_setup_early(unsigned long *tbl0, unsigned long *tbl1,
910                            unsigned long size, unsigned long pv_off)
911 {
912     int ret;
913     unsigned long count = (size + ARCH_SECTION_MASK) >> ARCH_SECTION_SHIFT;
914     unsigned long normal_attr = MMU_MAP_K_RWCB;
915     extern unsigned char _start;
916     unsigned long va = (unsigned long) &_start - pv_off;
917     va = RT_ALIGN_DOWN(va, 0x200000);
918 
919     /* setup pv off */
920     rt_kmem_pvoff_set(pv_off);
921 
922     /* clean the first two pages */
923     rt_memset((char *)tbl0, 0, ARCH_PAGE_SIZE);
924     rt_memset((char *)tbl1, 0, ARCH_PAGE_SIZE);
925 
926     ret = _init_map_2M(tbl1, va, va + pv_off, count, normal_attr);
927     if (ret != 0)
928     {
929         while (1);
930     }
931     ret = _init_map_2M(tbl0, va + pv_off, va + pv_off, count, normal_attr);
932     if (ret != 0)
933     {
934         while (1);
935     }
936 }
937 
rt_hw_mmu_pgtbl_create(void)938 void *rt_hw_mmu_pgtbl_create(void)
939 {
940     size_t *mmu_table;
941     mmu_table = (size_t *)rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
942     if (!mmu_table)
943     {
944         return RT_NULL;
945     }
946 
947     memset(mmu_table, 0, ARCH_PAGE_SIZE);
948     rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_table, ARCH_PAGE_SIZE);
949     return mmu_table;
950 }
951 
rt_hw_mmu_pgtbl_delete(void * pgtbl)952 void rt_hw_mmu_pgtbl_delete(void *pgtbl)
953 {
954     rt_pages_free(pgtbl, 0);
955 }
956