1 /*
2 * Copyright (c) 2006-2023, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2021-05-12 RT-Thread the first version
9 * 2023-08-15 Shell Support more mapping attribution
10 */
11 #ifndef __MMU_H_
12 #define __MMU_H_
13
14 #ifndef __ASSEMBLY__
15
16 #include <rtthread.h>
17 #include <mm_aspace.h>
18
19 /* normal memory wra mapping type */
20 #define NORMAL_MEM 0
21 /* normal nocache memory mapping type */
22 #define NORMAL_NOCACHE_MEM 1
23 /* device mapping type */
24 #define DEVICE_MEM 2
25
26 struct mem_desc
27 {
28 unsigned long vaddr_start;
29 unsigned long vaddr_end;
30 unsigned long paddr_start;
31 unsigned long attr;
32 struct rt_varea varea;
33 };
34
35 #endif /* !__ASSEMBLY__ */
36
37 #define RT_HW_MMU_PROT_READ 1
38 #define RT_HW_MMU_PROT_WRITE 2
39 #define RT_HW_MMU_PROT_EXECUTE 4
40 #define RT_HW_MMU_PROT_KERNEL 8
41 #define RT_HW_MMU_PROT_USER 16
42 #define RT_HW_MMU_PROT_CACHE 32
43
44 #define MMU_ASID_SHIFT 48
45 #define MMU_NG_SHIFT 11 /* not global bit */
46 #define MMU_AF_SHIFT 10
47 #define MMU_SHARED_SHIFT 8
48 #define MMU_AP_SHIFT 6
49 #define MMU_MA_SHIFT 2
50 #define MMU_AP_MASK (0x3 << MMU_AP_SHIFT)
51
52 /* we dont support feat detecting for now, so 8-bit is used to fallback */
53 #define MMU_SUPPORTED_ASID_BITS 8
54
55 #define MMU_AP_KAUN 0UL /* kernel r/w, user none */
56 #define MMU_AP_KAUA 1UL /* kernel r/w, user r/w */
57 #define MMU_AP_KRUN 2UL /* kernel r, user none */
58 #define MMU_AP_KRUR 3UL /* kernel r, user r */
59 #define MMU_ATTR_AF (1ul << MMU_AF_SHIFT) /* the access flag */
60 #define MMU_ATTR_DBM (1ul << 51) /* the dirty bit modifier */
61
62 #define MMU_MAP_CUSTOM(ap, mtype, nglobal) \
63 ((0x1UL << MMU_AF_SHIFT) | (0x2UL << MMU_SHARED_SHIFT) | \
64 ((ap) << MMU_AP_SHIFT) | ((mtype) << MMU_MA_SHIFT)) | \
65 ((rt_ubase_t)(nglobal) << MMU_NG_SHIFT)
66 #define MMU_MAP_K_ROCB MMU_MAP_CUSTOM(MMU_AP_KRUN, NORMAL_MEM, 0)
67 #define MMU_MAP_K_RO MMU_MAP_CUSTOM(MMU_AP_KRUN, NORMAL_NOCACHE_MEM, 0)
68 #define MMU_MAP_K_RWCB MMU_MAP_CUSTOM(MMU_AP_KAUN, NORMAL_MEM, 0)
69 #define MMU_MAP_K_RW MMU_MAP_CUSTOM(MMU_AP_KAUN, NORMAL_NOCACHE_MEM, 0)
70 #define MMU_MAP_K_DEVICE MMU_MAP_CUSTOM(MMU_AP_KAUN, DEVICE_MEM, 0)
71 #define MMU_MAP_U_ROCB MMU_MAP_CUSTOM(MMU_AP_KRUR, NORMAL_MEM, 1)
72 #define MMU_MAP_U_RO MMU_MAP_CUSTOM(MMU_AP_KRUR, NORMAL_NOCACHE_MEM, 1)
73 #define MMU_MAP_U_RWCB MMU_MAP_CUSTOM(MMU_AP_KAUA, NORMAL_MEM, 1)
74 #define MMU_MAP_U_RW MMU_MAP_CUSTOM(MMU_AP_KAUA, NORMAL_NOCACHE_MEM, 1)
75 #define MMU_MAP_U_DEVICE MMU_MAP_CUSTOM(MMU_AP_KAUA, DEVICE_MEM, 1)
76 #define MMU_MAP_TRACE(attr) ((attr) & ~(MMU_ATTR_AF | MMU_ATTR_DBM))
77
78 #define ARCH_SECTION_SHIFT 21
79 #define ARCH_SECTION_SIZE (1 << ARCH_SECTION_SHIFT)
80 #define ARCH_SECTION_MASK (ARCH_SECTION_SIZE - 1)
81 #define ARCH_PAGE_SHIFT 12
82 #define ARCH_PAGE_SIZE (1 << ARCH_PAGE_SHIFT)
83 #define ARCH_PAGE_MASK (ARCH_PAGE_SIZE - 1)
84 #define ARCH_PAGE_TBL_SHIFT 12
85 #define ARCH_PAGE_TBL_SIZE (1 << ARCH_PAGE_TBL_SHIFT)
86 #define ARCH_PAGE_TBL_MASK (ARCH_PAGE_TBL_SIZE - 1)
87
88 #define ARCH_VADDR_WIDTH 48
89 #define ARCH_ADDRESS_WIDTH_BITS 64
90
91 #define MMU_MAP_ERROR_VANOTALIGN -1
92 #define MMU_MAP_ERROR_PANOTALIGN -2
93 #define MMU_MAP_ERROR_NOPAGE -3
94 #define MMU_MAP_ERROR_CONFLICT -4
95
96 #define ARCH_MAP_FAILED ((void *)0x1ffffffffffff)
97
98 #define ARCH_EARLY_MAP_SIZE (0x40000000)
99 /* this is big enough for even 16TB first-time mapping */
100 #define ARCH_PAGE_INIT_THRESHOLD (0x10000000)
101
102 #ifndef __ASSEMBLY__
103
104 struct rt_aspace;
105
106 void rt_hw_mmu_ktbl_set(unsigned long tbl);
107 void rt_hw_mem_setup_early(unsigned long *tbl0, unsigned long *tbl1,
108 unsigned long size, unsigned long pv_off);
109 void rt_hw_mmu_setup(struct rt_aspace *aspace, struct mem_desc *mdesc,
110 int desc_nr);
111
112 int rt_hw_mmu_map_init(rt_aspace_t aspace, void *v_address, size_t size, size_t *vtable, size_t pv_off);
113 void *rt_hw_mmu_map(struct rt_aspace *aspace, void *v_addr, void *p_addr,
114 size_t size, size_t attr);
115 void rt_hw_mmu_unmap(struct rt_aspace *aspace, void *v_addr, size_t size);
116 void rt_hw_aspace_switch(struct rt_aspace *aspace);
117 void *rt_hw_mmu_v2p(struct rt_aspace *aspace, void *vaddr);
118 void *rt_hw_mmu_pgtbl_create(void);
119 void rt_hw_mmu_pgtbl_delete(void *pgtbl);
120 void *rt_hw_mmu_tbl_get(void);
121
rt_hw_mmu_kernel_v2p(void * v_addr)122 static inline void *rt_hw_mmu_kernel_v2p(void *v_addr)
123 {
124 rt_ubase_t par;
125 void *paddr;
126 __asm__ volatile("at s1e1w, %0"::"r"(v_addr):"memory");
127 __asm__ volatile("mrs %0, par_el1":"=r"(par)::"memory");
128
129 if (par & 0x1)
130 {
131 paddr = ARCH_MAP_FAILED;
132 }
133 else
134 {
135 #define MMU_ADDRESS_MASK 0x0000fffffffff000UL
136 par &= MMU_ADDRESS_MASK;
137 par |= (rt_ubase_t)v_addr & ARCH_PAGE_MASK;
138 paddr = (void *)par;
139 }
140
141 return paddr;
142 }
143 /**
144 * @brief Add permission from attribution
145 *
146 * @param attr architecture specified mmu attribution
147 * @param prot protect that will be added
148 * @return size_t returned attribution
149 */
rt_hw_mmu_attr_add_perm(size_t attr,rt_base_t prot)150 rt_inline size_t rt_hw_mmu_attr_add_perm(size_t attr, rt_base_t prot)
151 {
152 switch (prot)
153 {
154 /* remove write permission for user */
155 case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_USER:
156 attr = (attr & ~MMU_AP_MASK) | (MMU_AP_KAUA << MMU_AP_SHIFT);
157 break;
158 default:
159 RT_ASSERT(0);
160 }
161 return attr;
162 }
163
164 /**
165 * @brief Remove permission from attribution
166 *
167 * @param attr architecture specified mmu attribution
168 * @param prot protect that will be removed
169 * @return size_t returned attribution
170 */
rt_hw_mmu_attr_rm_perm(size_t attr,rt_base_t prot)171 rt_inline size_t rt_hw_mmu_attr_rm_perm(size_t attr, rt_base_t prot)
172 {
173 switch (prot)
174 {
175 /* remove write permission for user */
176 case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_USER:
177 if (attr & 0x40)
178 attr |= 0x80;
179 break;
180 default:
181 RT_ASSERT(0);
182 }
183 return attr;
184 }
185
186 /**
187 * @brief Test permission from attribution
188 *
189 * @param attr architecture specified mmu attribution
190 * @param prot protect that will be test
191 * @return rt_bool_t RT_TRUE if the prot is allowed, otherwise RT_FALSE
192 */
rt_hw_mmu_attr_test_perm(size_t attr,rt_base_t prot)193 rt_inline rt_bool_t rt_hw_mmu_attr_test_perm(size_t attr, rt_base_t prot)
194 {
195 rt_bool_t rc;
196 switch (prot)
197 {
198 /* test write permission for user */
199 case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_USER:
200 if ((attr & MMU_AP_MASK) == (MMU_AP_KAUA << MMU_AP_SHIFT))
201 rc = RT_TRUE;
202 else
203 rc = RT_FALSE;
204 break;
205 default:
206 RT_ASSERT(0);
207 }
208 return rc;
209 }
210
211 int rt_hw_mmu_control(struct rt_aspace *aspace, void *vaddr, size_t size,
212 enum rt_mmu_cntl cmd);
213
214 #endif /* !__ASSEMBLY__ */
215
216 #endif
217