1 /*
2  * Copyright (c) 2006-2021, RT-Thread Development Team
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  *
6  * Change Logs:
7  * Date           Author       Notes
8  * 2012-01-10     bernard      porting to AM1808
9  * 2020-07-26     lizhirui     porting to ls2k
10  */
11 
12 #include <rtthread.h>
13 #include <rthw.h>
14 #include <board.h>
15 #include <stddef.h>
16 
17 #include "cache.h"
18 #include "mips_mmu.h"
19 #include "mmu.h"
20 
21 void *current_mmu_table = RT_NULL;
22 
mmu_table_get()23 void *mmu_table_get()
24 {
25     return current_mmu_table;
26 }
27 
switch_mmu(void * mmu_table)28 void switch_mmu(void *mmu_table)
29 {
30     current_mmu_table = mmu_table;
31 
32     mmu_clear_tlb();
33     mmu_clear_itlb();
34 }
35 
36 /* dump 2nd level page table */
rt_hw_cpu_dump_page_table_2nd(rt_uint32_t * ptb)37 void rt_hw_cpu_dump_page_table_2nd(rt_uint32_t *ptb)
38 {
39     int i;
40     int fcnt = 0;
41 
42     for (i = 0; i < 256; i++)
43     {
44         rt_uint32_t pte2 = ptb[i];
45         if ((pte2 & 0x3) == 0)
46         {
47             if (fcnt == 0)
48                 rt_kprintf("    ");
49             rt_kprintf("%04x: ", i);
50             fcnt++;
51             if (fcnt == 16)
52             {
53                 rt_kprintf("fault\n");
54                 fcnt = 0;
55             }
56             continue;
57         }
58         if (fcnt != 0)
59         {
60             rt_kprintf("fault\n");
61             fcnt = 0;
62         }
63 
64         rt_kprintf("    %04x: %x: ", i, pte2);
65         if ((pte2 & 0x3) == 0x1)
66         {
67             rt_kprintf("L,ap:%x,xn:%d,texcb:%02x\n",
68                        ((pte2 >> 7) | (pte2 >> 4))& 0xf,
69                        (pte2 >> 15) & 0x1,
70                        ((pte2 >> 10) | (pte2 >> 2)) & 0x1f);
71         }
72         else
73         {
74             rt_kprintf("S,ap:%x,xn:%d,texcb:%02x\n",
75                        ((pte2 >> 7) | (pte2 >> 4))& 0xf, pte2 & 0x1,
76                        ((pte2 >> 4) | (pte2 >> 2)) & 0x1f);
77         }
78     }
79 }
80 
rt_hw_cpu_dump_page_table(rt_uint32_t * ptb)81 void rt_hw_cpu_dump_page_table(rt_uint32_t *ptb)
82 {
83     int i;
84     int fcnt = 0;
85 
86     rt_kprintf("page table@%p\n", ptb);
87     for (i = 0; i < 1024*4; i++)
88     {
89         rt_uint32_t pte1 = ptb[i];
90         if ((pte1 & 0x3) == 0)
91         {
92             rt_kprintf("%03x: ", i);
93             fcnt++;
94             if (fcnt == 16)
95             {
96                 rt_kprintf("fault\n");
97                 fcnt = 0;
98             }
99             continue;
100         }
101         if (fcnt != 0)
102         {
103             rt_kprintf("fault\n");
104             fcnt = 0;
105         }
106 
107         rt_kprintf("%03x: %08x: ", i, pte1);
108         if ((pte1 & 0x3) == 0x3)
109         {
110             rt_kprintf("LPAE\n");
111         }
112         else if ((pte1 & 0x3) == 0x1)
113         {
114             rt_kprintf("pte,ns:%d,domain:%d\n",
115                        (pte1 >> 3) & 0x1, (pte1 >> 5) & 0xf);
116             /*
117              *rt_hw_cpu_dump_page_table_2nd((void*)((pte1 & 0xfffffc000)
118              *                               - 0x80000000 + 0xC0000000));
119              */
120         }
121         else if (pte1 & (1 << 18))
122         {
123             rt_kprintf("super section,ns:%d,ap:%x,xn:%d,texcb:%02x\n",
124                        (pte1 >> 19) & 0x1,
125                        ((pte1 >> 13) | (pte1 >> 10))& 0xf,
126                        (pte1 >> 4) & 0x1,
127                        ((pte1 >> 10) | (pte1 >> 2)) & 0x1f);
128         }
129         else
130         {
131             rt_kprintf("section,ns:%d,ap:%x,"
132                        "xn:%d,texcb:%02x,domain:%d\n",
133                        (pte1 >> 19) & 0x1,
134                        ((pte1 >> 13) | (pte1 >> 10))& 0xf,
135                        (pte1 >> 4) & 0x1,
136                        (((pte1 & (0x7 << 12)) >> 10) |
137                         ((pte1 &        0x0c) >>  2)) & 0x1f,
138                        (pte1 >> 5) & 0xf);
139         }
140     }
141 }
142 
143 /* level1 page table, each entry for 1MB memory. */
144 volatile unsigned long MMUTable[4*1024] __attribute__((aligned(16*1024)));
rt_hw_mmu_setmtt(rt_uint32_t vaddrStart,rt_uint32_t vaddrEnd,rt_uint32_t paddrStart,rt_uint32_t attr)145 void rt_hw_mmu_setmtt(rt_uint32_t vaddrStart,
146                       rt_uint32_t vaddrEnd,
147                       rt_uint32_t paddrStart,
148                       rt_uint32_t attr)
149 {
150     volatile rt_uint32_t *pTT;
151     volatile int i, nSec;
152     pTT  = (rt_uint32_t *)MMUTable + (vaddrStart >> 20);
153     nSec = (vaddrEnd >> 20) - (vaddrStart >> 20);
154     for(i = 0; i <= nSec; i++)
155     {
156         *pTT = attr | (((paddrStart >> 20) + i) << 20);
157         pTT++;
158     }
159 }
160 
rt_hw_set_domain_register(unsigned long domain_val)161 unsigned long rt_hw_set_domain_register(unsigned long domain_val)
162 {
163     unsigned long old_domain;
164 
165     //asm volatile ("mrc p15, 0, %0, c3, c0\n" : "=r" (old_domain));
166     //asm volatile ("mcr p15, 0, %0, c3, c0\n" : :"r" (domain_val) : "memory");
167 
168     return old_domain;
169 }
170 
171 void rt_hw_cpu_dcache_clean(void *addr, int size);
rt_hw_init_mmu_table(struct mem_desc * mdesc,rt_uint32_t size)172 void rt_hw_init_mmu_table(struct mem_desc *mdesc, rt_uint32_t size)
173 {
174     /* set page table */
175     for(; size > 0; size--)
176     {
177         rt_hw_mmu_setmtt(mdesc->vaddr_start, mdesc->vaddr_end,
178                 mdesc->paddr_start, mdesc->attr);
179         mdesc++;
180     }
181     rt_hw_cpu_dcache_clean((void*)MMUTable, sizeof MMUTable);
182 }
183 
rt_hw_mmu_init(void)184 void rt_hw_mmu_init(void)
185 {
186     rt_cpu_dcache_clean_flush();
187     rt_cpu_icache_flush();
188     rt_hw_cpu_dcache_disable();
189     rt_hw_cpu_icache_disable();
190     rt_cpu_mmu_disable();
191 
192     /*rt_hw_cpu_dump_page_table(MMUTable);*/
193     rt_hw_set_domain_register(0x55555555);
194 
195     rt_cpu_tlb_set(MMUTable);
196 
197     rt_cpu_mmu_enable();
198 
199     rt_hw_cpu_icache_enable();
200     rt_hw_cpu_dcache_enable();
201 }
202 
203 /*
204  mem map
205 */
206 
207 void rt_hw_cpu_dcache_clean(void *addr, int size);
208 
rt_hw_mmu_map_init(rt_mmu_info * mmu_info,void * v_address,size_t size,size_t * vtable,size_t pv_off)209 int rt_hw_mmu_map_init(rt_mmu_info *mmu_info, void* v_address, size_t size, size_t *vtable, size_t pv_off)
210 {
211     size_t l1_off, va_s, va_e;
212     rt_base_t level;
213 
214     if (!mmu_info || !vtable)
215     {
216         return -1;
217     }
218 
219     va_s = (size_t)v_address;
220     va_e = (size_t)v_address + size - 1;
221 
222     if ( va_e < va_s)
223     {
224         return -1;
225     }
226 
227     va_s >>= ARCH_SECTION_SHIFT;
228     va_e >>= ARCH_SECTION_SHIFT;
229 
230     if (va_s == 0)
231     {
232         return -1;
233     }
234 
235     level = rt_hw_interrupt_disable();
236 
237     for (l1_off = va_s; l1_off <= va_e; l1_off++)
238     {
239         size_t v = vtable[l1_off];
240 
241         if (v & ARCH_MMU_USED_MASK)
242         {
243             rt_kprintf("Error:vtable[%d] = 0x%p(is not zero),va_s = 0x%p,va_e = 0x%p!\n",l1_off,v,va_s,va_e);
244             rt_hw_interrupt_enable(level);
245             return -1;
246         }
247     }
248 
249     mmu_info->vtable = vtable;
250     mmu_info->vstart = va_s;
251     mmu_info->vend = va_e;
252     mmu_info->pv_off = pv_off;
253 
254     rt_hw_interrupt_enable(level);
255 
256     return 0;
257 }
258 
find_vaddr(rt_mmu_info * mmu_info,int pages)259 static size_t find_vaddr(rt_mmu_info *mmu_info, int pages)
260 {
261     size_t l1_off, l2_off;
262     size_t *mmu_l1, *mmu_l2;
263     size_t find_off = 0;
264     size_t find_va = 0;
265     int n = 0;
266 
267     if (!pages)
268     {
269         return 0;
270     }
271 
272     if (!mmu_info)
273     {
274         return 0;
275     }
276 
277     for (l1_off = mmu_info->vstart; l1_off <= mmu_info->vend; l1_off++)
278     {
279         mmu_l1 =  (size_t*)mmu_info->vtable + l1_off;
280         if (*mmu_l1 & ARCH_MMU_USED_MASK)
281         {
282             mmu_l2 = (size_t *)((*mmu_l1 & ~ARCH_PAGE_TBL_MASK) - mmu_info->pv_off);
283             for (l2_off = 0; l2_off < (ARCH_SECTION_SIZE/ARCH_PAGE_SIZE); l2_off++)
284             {
285                 if (*(mmu_l2 + l2_off) & ARCH_MMU_USED_MASK)
286                 {
287                     /* in use */
288                     n = 0;
289                 }
290                 else
291                 {
292                     if (!n)
293                     {
294                         find_va = l1_off;
295                         find_off = l2_off;
296                     }
297                     n++;
298                     if (n >= pages)
299                     {
300                         return (find_va << ARCH_SECTION_SHIFT) + (find_off << ARCH_PAGE_SHIFT);
301                     }
302                 }
303             }
304         }
305         else
306         {
307             if (!n)
308             {
309                 find_va = l1_off;
310                 find_off = 0;
311             }
312             n += (ARCH_SECTION_SIZE/ARCH_PAGE_SIZE);
313             if (n >= pages)
314             {
315                 return (find_va << ARCH_SECTION_SHIFT) + (find_off << ARCH_PAGE_SHIFT);
316             }
317         }
318     }
319     return 0;
320 }
321 
322 #ifdef RT_USING_SMART
check_vaddr(rt_mmu_info * mmu_info,void * va,int pages)323 static int check_vaddr(rt_mmu_info *mmu_info, void *va, int pages)
324 {
325     size_t loop_va = (size_t)va & ~ARCH_PAGE_MASK;
326     size_t l1_off, l2_off;
327     size_t *mmu_l1, *mmu_l2;
328 
329     if (!pages)
330     {
331         return -1;
332     }
333 
334     if (!mmu_info)
335     {
336         return -1;
337     }
338 
339     while (pages--)
340     {
341         l1_off = (loop_va >> ARCH_SECTION_SHIFT);
342         l2_off = ((loop_va & ARCH_SECTION_MASK) >> ARCH_PAGE_SHIFT);
343         mmu_l1 =  (size_t*)mmu_info->vtable + l1_off;
344 
345         if (*mmu_l1 & ARCH_MMU_USED_MASK)
346         {
347             mmu_l2 = (size_t *)((*mmu_l1 & ~ARCH_PAGE_TBL_MASK) - mmu_info->pv_off);
348             if (*(mmu_l2 + l2_off) & ARCH_MMU_USED_MASK)
349             {
350                 return -1;
351             }
352         }
353         loop_va += ARCH_PAGE_SIZE;
354     }
355     return 0;
356 }
357 #endif
358 
__rt_hw_mmu_unmap(rt_mmu_info * mmu_info,void * v_addr,size_t npages)359 static void __rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void* v_addr, size_t npages)
360 {
361     size_t loop_va = (size_t)v_addr & ~ARCH_PAGE_MASK;
362     size_t l1_off, l2_off;
363     size_t *mmu_l1, *mmu_l2;
364     size_t *ref_cnt;
365 
366     if (!mmu_info)
367     {
368         return;
369     }
370 
371     while (npages--)
372     {
373         l1_off = (loop_va >> ARCH_SECTION_SHIFT);
374         if (l1_off < mmu_info->vstart || l1_off > mmu_info->vend)
375         {
376             return;
377         }
378 
379         l2_off = ((loop_va & ARCH_SECTION_MASK) >> ARCH_PAGE_SHIFT);
380         mmu_l1 =  (size_t*)mmu_info->vtable + l1_off;
381 
382         if (*mmu_l1 & ARCH_MMU_USED_MASK)
383         {
384             mmu_l2 = (size_t *)((*mmu_l1 & ~ARCH_PAGE_TBL_MASK) - mmu_info->pv_off);
385         }
386         else
387         {
388             return;
389         }
390 
391         if (*(mmu_l2 + l2_off) & ARCH_MMU_USED_MASK)
392         {
393             *(mmu_l2 + l2_off) = 0;
394             /* cache maintain */
395             rt_hw_cpu_dcache_clean(mmu_l2 + l2_off, 4);
396 
397             ref_cnt = mmu_l2 + (ARCH_SECTION_SIZE/ARCH_PAGE_SIZE);
398             (*ref_cnt)--;
399             if (!*ref_cnt)
400             {
401 #ifdef RT_USING_SMART
402                 rt_pages_free(mmu_l2, 0);
403 #else
404                 rt_free_align(mmu_l2);
405 #endif
406                 *mmu_l1 = 0;
407 
408                 /* cache maintain */
409                 rt_hw_cpu_dcache_clean(mmu_l1, 4);
410             }
411         }
412         loop_va += ARCH_PAGE_SIZE;
413     }
414 }
415 
__rt_hw_mmu_map(rt_mmu_info * mmu_info,void * v_addr,void * p_addr,size_t npages,size_t attr)416 static int __rt_hw_mmu_map(rt_mmu_info *mmu_info, void* v_addr, void* p_addr, size_t npages, size_t attr)
417 {
418     size_t loop_va = (size_t)v_addr & ~ARCH_PAGE_MASK;
419     size_t loop_pa = (size_t)p_addr & ~ARCH_PAGE_MASK;
420     size_t l1_off, l2_off;
421     size_t *mmu_l1, *mmu_l2;
422     size_t *ref_cnt;
423 
424     if (!mmu_info)
425     {
426         return -1;
427     }
428 
429     while (npages--)
430     {
431         l1_off = (loop_va >> ARCH_SECTION_SHIFT);
432         l2_off = ((loop_va & ARCH_SECTION_MASK) >> ARCH_PAGE_SHIFT);
433         mmu_l1 =  (size_t*)mmu_info->vtable + l1_off;
434 
435         if (*mmu_l1 & ARCH_MMU_USED_MASK)
436         {
437             mmu_l2 = (size_t *)((*mmu_l1 & ~ARCH_PAGE_TBL_MASK) - mmu_info->pv_off);
438         }
439         else
440         {
441 #ifdef RT_USING_SMART
442             mmu_l2 = (size_t*)rt_pages_alloc(0);
443 #else
444             mmu_l2 = (size_t*)rt_malloc_align(ARCH_PAGE_TBL_SIZE * 2, ARCH_PAGE_TBL_SIZE);
445 #endif
446             if (mmu_l2)
447             {
448                 rt_memset(mmu_l2, 0, ARCH_PAGE_TBL_SIZE * 2);
449                 /* cache maintain */
450                 rt_hw_cpu_dcache_clean(mmu_l2, ARCH_PAGE_TBL_SIZE);
451 
452                 *mmu_l1 = (((size_t)mmu_l2 + mmu_info->pv_off) | 0x1);
453                 /* cache maintain */
454                 rt_hw_cpu_dcache_clean(mmu_l1, 4);
455             }
456             else
457             {
458                 /* error, unmap and quit */
459                 __rt_hw_mmu_unmap(mmu_info, v_addr, npages);
460                 return -1;
461             }
462         }
463 
464         ref_cnt = mmu_l2 + (ARCH_SECTION_SIZE/ARCH_PAGE_SIZE);
465         (*ref_cnt)++;
466 
467         *(mmu_l2 + l2_off) = (loop_pa | attr);
468         /* cache maintain */
469         rt_hw_cpu_dcache_clean(mmu_l2 + l2_off, 4);
470 
471         loop_va += ARCH_PAGE_SIZE;
472         loop_pa += ARCH_PAGE_SIZE;
473     }
474     return 0;
475 }
476 
rt_hw_cpu_tlb_invalidate(void)477 static void rt_hw_cpu_tlb_invalidate(void)
478 {
479     mmu_clear_tlb();
480     mmu_clear_itlb();
481 }
482 
483 #ifdef RT_USING_SMART
_rt_hw_mmu_map(rt_mmu_info * mmu_info,void * v_addr,void * p_addr,size_t size,size_t attr)484 void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void* p_addr, size_t size, size_t attr)
485 {
486     size_t pa_s, pa_e;
487     size_t vaddr;
488     int pages;
489     int ret;
490 
491     if (!size)
492     {
493         return 0;
494     }
495     pa_s = (size_t)p_addr;
496     pa_e = (size_t)p_addr + size - 1;
497     pa_s >>= ARCH_PAGE_SHIFT;
498     pa_e >>= ARCH_PAGE_SHIFT;
499     pages = pa_e - pa_s + 1;
500     if (v_addr)
501     {
502         vaddr = (size_t)v_addr;
503         pa_s = (size_t)p_addr;
504         if ((vaddr & ARCH_PAGE_MASK) != (pa_s & ARCH_PAGE_MASK))
505         {
506             return 0;
507         }
508         vaddr &= ~ARCH_PAGE_MASK;
509         if (check_vaddr(mmu_info, (void*)vaddr, pages) != 0)
510         {
511             return 0;
512         }
513     }
514     else
515     {
516         vaddr = find_vaddr(mmu_info, pages);
517     }
518     if (vaddr) {
519         ret = __rt_hw_mmu_map(mmu_info, (void*)vaddr, p_addr, pages, attr);
520         if (ret == 0)
521         {
522             rt_hw_cpu_tlb_invalidate();
523             return (void*)(vaddr + ((size_t)p_addr & ARCH_PAGE_MASK));
524         }
525     }
526     return 0;
527 }
528 #else
_rt_hw_mmu_map(rt_mmu_info * mmu_info,void * p_addr,size_t size,size_t attr)529 void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void* p_addr, size_t size, size_t attr)
530 {
531     size_t pa_s, pa_e;
532     size_t vaddr;
533     int pages;
534     int ret;
535 
536     pa_s = (size_t)p_addr;
537     pa_e = (size_t)p_addr + size - 1;
538     pa_s >>= ARCH_PAGE_SHIFT;
539     pa_e >>= ARCH_PAGE_SHIFT;
540     pages = pa_e - pa_s + 1;
541     vaddr = find_vaddr(mmu_info, pages);
542     if (vaddr) {
543         ret = __rt_hw_mmu_map(mmu_info, (void*)vaddr, p_addr, pages, attr);
544         if (ret == 0)
545         {
546             rt_hw_cpu_tlb_invalidate();
547             return (void*)(vaddr + ((size_t)p_addr & ARCH_PAGE_MASK));
548         }
549     }
550     return 0;
551 }
552 #endif
553 
554 #ifdef RT_USING_SMART
__rt_hw_mmu_map_auto(rt_mmu_info * mmu_info,void * v_addr,size_t npages,size_t attr)555 static int __rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void* v_addr, size_t npages, size_t attr)
556 {
557     size_t loop_va = (size_t)v_addr & ~ARCH_PAGE_MASK;
558     size_t loop_pa;
559     size_t l1_off, l2_off;
560     size_t *mmu_l1, *mmu_l2;
561     size_t *ref_cnt;
562 
563     if (!mmu_info)
564     {
565         return -1;
566     }
567 
568     while (npages--)
569     {
570         loop_pa = (size_t)rt_pages_alloc(0) + mmu_info->pv_off;
571         if (!loop_pa)
572             goto err;
573 
574         //rt_kprintf("vaddr = %08x is mapped to paddr = %08x\n",v_addr,loop_pa);
575         l1_off = (loop_va >> ARCH_SECTION_SHIFT);
576         l2_off = ((loop_va & ARCH_SECTION_MASK) >> ARCH_PAGE_SHIFT);
577         mmu_l1 =  (size_t*)mmu_info->vtable + l1_off;
578 
579         if (*mmu_l1 & ARCH_MMU_USED_MASK)
580         {
581             mmu_l2 = (size_t *)((*mmu_l1 & ~ARCH_PAGE_TBL_MASK) - mmu_info->pv_off);
582         }
583         else
584         {
585             //mmu_l2 = (size_t*)rt_malloc_align(ARCH_PAGE_TBL_SIZE * 2, ARCH_PAGE_TBL_SIZE);
586             mmu_l2 = (size_t*)rt_pages_alloc(0);
587             if (mmu_l2)
588             {
589                 rt_memset(mmu_l2, 0, ARCH_PAGE_TBL_SIZE * 2);
590                 /* cache maintain */
591                 rt_hw_cpu_dcache_clean(mmu_l2, ARCH_PAGE_TBL_SIZE);
592 
593                 *mmu_l1 = (((size_t)mmu_l2 + mmu_info->pv_off) | 0x1);
594                 /* cache maintain */
595                 rt_hw_cpu_dcache_clean(mmu_l1, 4);
596             }
597             else
598                 goto err;
599         }
600 
601         ref_cnt = mmu_l2 + (ARCH_SECTION_SIZE/ARCH_PAGE_SIZE);
602         (*ref_cnt)++;
603 
604         //loop_pa += mmu_info->pv_off;
605         *(mmu_l2 + l2_off) = (loop_pa | attr);
606         /* cache maintain */
607         rt_hw_cpu_dcache_clean(mmu_l2 + l2_off, 4);
608 
609         loop_va += ARCH_PAGE_SIZE;
610     }
611     return 0;
612 err:
613     {
614         /* error, unmap and quit */
615         int i;
616         void *va, *pa;
617 
618         va = (void*)((size_t)v_addr & ~ARCH_PAGE_MASK);
619         for (i = 0; i < npages; i++)
620         {
621             pa = rt_hw_mmu_v2p(mmu_info, va);
622             pa -= mmu_info->pv_off;
623             rt_pages_free(pa, 0);
624             va += ARCH_PAGE_SIZE;
625         }
626 
627         __rt_hw_mmu_unmap(mmu_info, v_addr, npages);
628         return -1;
629     }
630 }
631 
_rt_hw_mmu_map_auto(rt_mmu_info * mmu_info,void * v_addr,size_t size,size_t attr)632 void *_rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, size_t size, size_t attr)
633 {
634     size_t vaddr;
635     size_t offset;
636     int pages;
637     int ret;
638 
639     if (!size)
640     {
641         return 0;
642     }
643     offset = (size_t)v_addr & ARCH_PAGE_MASK;
644     size += (offset + ARCH_PAGE_SIZE - 1);
645     pages = (size >> ARCH_PAGE_SHIFT);
646     if (v_addr)
647     {
648         vaddr = (size_t)v_addr;
649         vaddr &= ~ARCH_PAGE_MASK;
650         if (check_vaddr(mmu_info, (void*)vaddr, pages) != 0)
651         {
652             return 0;
653         }
654     }
655     else
656     {
657         vaddr = find_vaddr(mmu_info, pages);
658     }
659     if (vaddr) {
660         ret = __rt_hw_mmu_map_auto(mmu_info, (void*)vaddr, pages, attr);
661         if (ret == 0)
662         {
663             rt_hw_cpu_tlb_invalidate();
664             return (void*)vaddr + offset;
665         }
666     }
667     return 0;
668 }
669 #endif
670 
_rt_hw_mmu_unmap(rt_mmu_info * mmu_info,void * v_addr,size_t size)671 void _rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void* v_addr, size_t size)
672 {
673     size_t va_s, va_e;
674     int pages;
675 
676     va_s = (size_t)v_addr;
677     va_e = (size_t)v_addr + size - 1;
678     va_s >>= ARCH_PAGE_SHIFT;
679     va_e >>= ARCH_PAGE_SHIFT;
680     pages = va_e - va_s + 1;
681     __rt_hw_mmu_unmap(mmu_info, v_addr, pages);
682     rt_hw_cpu_tlb_invalidate();
683 }
684 
685 //va --> pa
rt_hw_kernel_virt_to_phys(void * v_addr,size_t size)686 void *rt_hw_kernel_virt_to_phys(void *v_addr, size_t size)
687 {
688     void *p_addr = 0;
689 
690     return p_addr;
691 }
692 
693 //pa --> va
rt_hw_kernel_phys_to_virt(void * p_addr,size_t size)694 void *rt_hw_kernel_phys_to_virt(void *p_addr, size_t size)
695 {
696     void *v_addr = 0;
697 
698     #ifdef RT_USING_SMART
699     extern rt_mmu_info mmu_info;
700     v_addr = rt_hw_mmu_map(&mmu_info, 0, p_addr, size, MMU_MAP_K_RW);
701     #else
702     v_addr = p_addr;
703     #endif
704 
705     return v_addr;
706 }
707 
708 #ifdef RT_USING_SMART
rt_hw_mmu_map(rt_mmu_info * mmu_info,void * v_addr,void * p_addr,size_t size,size_t attr)709 void *rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void* p_addr, size_t size, size_t attr)
710 {
711     void *ret;
712     rt_base_t level;
713 
714     level = rt_hw_interrupt_disable();
715     ret = _rt_hw_mmu_map(mmu_info, v_addr, p_addr, size, attr);
716     rt_hw_interrupt_enable(level);
717     return ret;
718 }
719 
rt_hw_mmu_map_auto(rt_mmu_info * mmu_info,void * v_addr,size_t size,size_t attr)720 void *rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, size_t size, size_t attr)
721 {
722     void *ret;
723     rt_base_t level;
724 
725     level = rt_hw_interrupt_disable();
726     ret = _rt_hw_mmu_map_auto(mmu_info, v_addr, size, attr);
727     rt_hw_interrupt_enable(level);
728     return ret;
729 }
730 #endif
731 
rt_hw_mmu_unmap(rt_mmu_info * mmu_info,void * v_addr,size_t size)732 void rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void* v_addr, size_t size)
733 {
734     rt_base_t level;
735 
736     level = rt_hw_interrupt_disable();
737     _rt_hw_mmu_unmap(mmu_info, v_addr, size);
738     rt_hw_interrupt_enable(level);
739 }
740 
_rt_hw_mmu_v2p(rt_mmu_info * mmu_info,void * v_addr)741 void *_rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void* v_addr)
742 {
743     size_t l1_off, l2_off;
744     size_t *mmu_l1, *mmu_l2;
745     size_t tmp;
746     size_t pa;
747 
748     l1_off = (size_t)v_addr >> ARCH_SECTION_SHIFT;
749 
750     if (!mmu_info)
751     {
752         return (void*)0;
753     }
754 
755     mmu_l1 =  (size_t*)mmu_info->vtable + l1_off;
756 
757     tmp = *mmu_l1;
758 
759     switch (tmp & ARCH_MMU_USED_MASK)
760     {
761         case 0: /* not used */
762             break;
763         case 1: /* page table */
764             mmu_l2 = (size_t *)((tmp & ~ARCH_PAGE_TBL_MASK) - mmu_info->pv_off);
765             l2_off = (((size_t)v_addr & ARCH_SECTION_MASK) >> ARCH_PAGE_SHIFT);
766             pa = *(mmu_l2 + l2_off);
767             if (pa & ARCH_MMU_USED_MASK)
768             {
769                 if ((pa & ARCH_MMU_USED_MASK) == 1)
770                 {
771                     /* lage page, not support */
772                     break;
773                 }
774                 pa &= ~(ARCH_PAGE_MASK);
775                 pa += ((size_t)v_addr & ARCH_PAGE_MASK);
776                 return (void*)pa;
777             }
778             break;
779         case 2:
780         case 3:
781             /* section */
782             if (tmp & ARCH_TYPE_SUPERSECTION)
783             {
784                 /* super section, not support */
785                 break;
786             }
787             pa = (tmp & ~ARCH_SECTION_MASK);
788             pa += ((size_t)v_addr & ARCH_SECTION_MASK);
789             return (void*)pa;
790     }
791     return (void*)0;
792 }
793 
rt_hw_mmu_v2p(rt_mmu_info * mmu_info,void * v_addr)794 void *rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void* v_addr)
795 {
796     void *ret;
797     rt_base_t level;
798 
799     level = rt_hw_interrupt_disable();
800     ret = _rt_hw_mmu_v2p(mmu_info, v_addr);
801     rt_hw_interrupt_enable(level);
802     return ret;
803 }
804 
805 #ifdef RT_USING_SMART
init_mm_setup(unsigned int * mtbl,unsigned int size,unsigned int pv_off)806 void init_mm_setup(unsigned int *mtbl, unsigned int size, unsigned int pv_off)
807 {
808     unsigned int va;
809 
810     for (va = 0; va < 0x1000; va++) {
811         unsigned int vaddr = (va << 20);
812         if (vaddr >= KERNEL_VADDR_START && vaddr - KERNEL_VADDR_START < size) {
813             mtbl[va] = ((va << 20) + pv_off) | NORMAL_MEM;
814         } else if (vaddr >= (KERNEL_VADDR_START + pv_off) && vaddr - (KERNEL_VADDR_START + pv_off) < size) {
815             mtbl[va] = (va << 20) | NORMAL_MEM;
816         } else {
817             mtbl[va] = 0;
818         }
819     }
820 }
821 #endif
822