1 /*
2 * Copyright (c) 2006-2018, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2021-05-06 Jesven first version
9 */
10 #include <rthw.h>
11 #include <rtthread.h>
12
13 #include <mmu.h>
14 #include <mm_aspace.h>
15 #include <ioremap.h>
16
17 void *rt_ioremap_start;
18 size_t rt_ioremap_size;
19
20 #ifdef RT_USING_SMART
21 #include <lwp_mm.h>
22 #endif
23
24 #define DBG_TAG "mm.ioremap"
25 #define DBG_LVL DBG_LOG
26 #include <rtdbg.h>
27
28 enum ioremap_type
29 {
30 MM_AREA_TYPE_PHY,
31 MM_AREA_TYPE_PHY_WT,
32 MM_AREA_TYPE_PHY_CACHED
33 };
34
_ioremap_type(void * paddr,size_t size,enum ioremap_type type)35 static void *_ioremap_type(void *paddr, size_t size, enum ioremap_type type)
36 {
37 char *v_addr = NULL;
38 size_t attr;
39 size_t lo_off;
40 int err;
41 size_t pa_off = (rt_ubase_t)paddr & ~(RT_PAGE_AFFINITY_BLOCK_SIZE - 1);
42
43 lo_off = (rt_ubase_t)paddr - pa_off;
44 pa_off = MM_PA_TO_OFF(pa_off);
45
46 struct rt_mm_va_hint hint = {
47 .prefer = RT_NULL,
48 .map_size = RT_ALIGN(size + lo_off, RT_PAGE_AFFINITY_BLOCK_SIZE),
49 .flags = MMF_CREATE(0, RT_PAGE_AFFINITY_BLOCK_SIZE),
50 .limit_start = rt_ioremap_start,
51 .limit_range_size = rt_ioremap_size,
52 };
53
54 switch (type)
55 {
56 case MM_AREA_TYPE_PHY:
57 attr = MMU_MAP_K_DEVICE;
58 break;
59 case MM_AREA_TYPE_PHY_WT:
60 attr = MMU_MAP_K_RW;
61 break;
62 case MM_AREA_TYPE_PHY_CACHED:
63 attr = MMU_MAP_K_RWCB;
64 break;
65 default:
66 return v_addr;
67 }
68 err = rt_aspace_map_phy(&rt_kernel_space, &hint, attr, pa_off, (void **)&v_addr);
69
70 if (err)
71 {
72 LOG_W("IOREMAP 0x%lx failed %d\n", paddr, err);
73 v_addr = NULL;
74 }
75 else
76 {
77 v_addr = v_addr + lo_off;
78 }
79 return v_addr;
80 }
81
rt_ioremap_early(void * paddr,size_t size)82 rt_weak void *rt_ioremap_early(void *paddr, size_t size)
83 {
84 if (!size)
85 {
86 return RT_NULL;
87 }
88
89 return paddr;
90 }
91
rt_ioremap(void * paddr,size_t size)92 void *rt_ioremap(void *paddr, size_t size)
93 {
94 return _ioremap_type(paddr, size, MM_AREA_TYPE_PHY);
95 }
96
rt_ioremap_nocache(void * paddr,size_t size)97 void *rt_ioremap_nocache(void *paddr, size_t size)
98 {
99 return _ioremap_type(paddr, size, MM_AREA_TYPE_PHY);
100 }
101
rt_ioremap_wt(void * paddr,size_t size)102 void *rt_ioremap_wt(void *paddr, size_t size)
103 {
104 return _ioremap_type(paddr, size, MM_AREA_TYPE_PHY_WT);
105 }
106
rt_ioremap_cached(void * paddr,size_t size)107 void *rt_ioremap_cached(void *paddr, size_t size)
108 {
109 return _ioremap_type(paddr, size, MM_AREA_TYPE_PHY_CACHED);
110 }
111
rt_iounmap(volatile void * vaddr)112 void rt_iounmap(volatile void *vaddr)
113 {
114 rt_aspace_unmap(&rt_kernel_space, (void *)vaddr);
115 }
116
117