1 /*
2 * Copyright (c) 2006-2023, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2019-03-25 quanzhao the first version
9 * 2023-10-10 Shell Add permission control API
10 */
11 #ifndef __MMU_H_
12 #define __MMU_H_
13
14 #include <rtthread.h>
15 #include <mm_aspace.h>
16
17 #define DESC_SEC (0x2)
18 #define MEMWBWA ((1<<12)|(3<<2)) /* write back, write allocate */
19 #define MEMWB (3<<2) /* write back, no write allocate */
20 #define MEMWT (2<<2) /* write through, no write allocate */
21 #define SHAREDEVICE (1<<2) /* shared device */
22 #define STRONGORDER (0<<2) /* strong ordered */
23 #define XN (1<<4) /* eXecute Never */
24 #ifdef RT_USING_SMART
25 #define AP_RW (1<<10) /* supervisor=RW, user=No */
26 #define AP_RO ((1<<10) |(1 << 15)) /* supervisor=RW, user=No */
27 #else
28 #define AP_RW (3<<10) /* supervisor=RW, user=RW */
29 #define AP_RO (2<<10) /* supervisor=RW, user=RO */
30 #endif
31
32 #define SHARED (1<<16) /* shareable */
33
34 #define DOMAIN_FAULT (0x0)
35 #define DOMAIN_CHK (0x1)
36 #define DOMAIN_NOTCHK (0x3)
37 #define DOMAIN0 (0x0<<5)
38 #define DOMAIN1 (0x1<<5)
39
40 #define DOMAIN0_ATTR (DOMAIN_CHK<<0)
41 #define DOMAIN1_ATTR (DOMAIN_FAULT<<2)
42
43 /* device mapping type */
44 #define DEVICE_MEM (SHARED|AP_RW|DOMAIN0|SHAREDEVICE|DESC_SEC|XN)
45 /* normal memory mapping type */
46 #define NORMAL_MEM (SHARED|AP_RW|DOMAIN0|MEMWBWA|DESC_SEC)
47
48 #define STRONG_ORDER_MEM (SHARED|AP_RO|XN|DESC_SEC)
49
50 struct mem_desc
51 {
52 rt_uint32_t vaddr_start;
53 rt_uint32_t vaddr_end;
54 rt_uint32_t paddr_start;
55 rt_uint32_t attr;
56 struct rt_varea varea;
57 };
58
59 #define MMU_MAP_MTBL_XN (1<<0)
60 #define MMU_MAP_MTBL_A (1<<1)
61 #define MMU_MAP_MTBL_B (1<<2)
62 #define MMU_MAP_MTBL_C (1<<3)
63 #define MMU_MAP_MTBL_AP01(x) (x<<4)
64 #define MMU_MAP_MTBL_TEX(x) (x<<6)
65 #define MMU_MAP_MTBL_AP2(x) (x<<9)
66 #define MMU_MAP_MTBL_SHARE (1<<10)
67 #define MMU_MAP_MTBL_NG(x) (x<<11)
68
69 #define MMU_MAP_K_ROCB ((MMU_MAP_MTBL_NG(0))|(MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(1)|MMU_MAP_MTBL_AP01(1)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_B|MMU_MAP_MTBL_C|MMU_MAP_MTBL_SHARE))
70 #define MMU_MAP_K_RO ((MMU_MAP_MTBL_NG(0))|(MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(1)|MMU_MAP_MTBL_AP01(1)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_C|MMU_MAP_MTBL_SHARE))
71 #define MMU_MAP_K_RWCB ((MMU_MAP_MTBL_NG(0))|(MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(0)|MMU_MAP_MTBL_AP01(1)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_B|MMU_MAP_MTBL_C|MMU_MAP_MTBL_SHARE))
72 #define MMU_MAP_K_RW ((MMU_MAP_MTBL_NG(0))|(MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(0)|MMU_MAP_MTBL_AP01(1)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_SHARE))
73 #define MMU_MAP_K_DEVICE ((MMU_MAP_MTBL_NG(0))|(MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(0)|MMU_MAP_MTBL_AP01(1)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_B|MMU_MAP_MTBL_SHARE))
74 #define MMU_MAP_U_ROCB ((MMU_MAP_MTBL_NG(1))|(MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(0)|MMU_MAP_MTBL_AP01(2)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_B|MMU_MAP_MTBL_C|MMU_MAP_MTBL_SHARE))
75 #define MMU_MAP_U_RO ((MMU_MAP_MTBL_NG(1))|(MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(0)|MMU_MAP_MTBL_AP01(2)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_C|MMU_MAP_MTBL_SHARE))
76 #define MMU_MAP_U_RWCB ((MMU_MAP_MTBL_NG(1))|(MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(0)|MMU_MAP_MTBL_AP01(3)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_B|MMU_MAP_MTBL_C|MMU_MAP_MTBL_SHARE))
77 #define MMU_MAP_U_RW ((MMU_MAP_MTBL_NG(1))|(MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(0)|MMU_MAP_MTBL_AP01(3)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_SHARE))
78 #define MMU_MAP_U_DEVICE ((MMU_MAP_MTBL_NG(1))|(MMU_MAP_MTBL_A|MMU_MAP_MTBL_AP2(0)|MMU_MAP_MTBL_AP01(3)|MMU_MAP_MTBL_TEX(0)|MMU_MAP_MTBL_B|MMU_MAP_MTBL_SHARE))
79 #define MMU_MAP_TRACE(attr) (attr)
80
81 #define ARCH_SECTION_SHIFT 20
82 #define ARCH_SECTION_SIZE (1 << ARCH_SECTION_SHIFT)
83 #define ARCH_SECTION_MASK (ARCH_SECTION_SIZE - 1)
84 #define ARCH_PAGE_SHIFT 12
85 #define ARCH_PAGE_SIZE (1 << ARCH_PAGE_SHIFT)
86 #define ARCH_PAGE_MASK (ARCH_PAGE_SIZE - 1)
87 #define ARCH_PAGE_TBL_SHIFT 10
88 #define ARCH_PAGE_TBL_SIZE (1 << ARCH_PAGE_TBL_SHIFT)
89 #define ARCH_PAGE_TBL_MASK (ARCH_PAGE_TBL_SIZE - 1)
90
91 #define ARCH_MMU_USED_MASK 3
92
93 #define ARCH_TYPE_SUPERSECTION (1 << 18)
94
95 #define ARCH_ADDRESS_WIDTH_BITS 32
96 #define ARCH_VADDR_WIDTH 32
97
98 /**
99 * *info it's possible to map (-1ul & ~ARCH_PAGE_MASK) but a not aligned -1 is
100 * never returned on a successful mapping
101 */
102 #define ARCH_MAP_FAILED ((void *)-1)
103
104 #define RT_HW_MMU_PROT_READ 1
105 #define RT_HW_MMU_PROT_WRITE 2
106 #define RT_HW_MMU_PROT_EXECUTE 4
107 #define RT_HW_MMU_PROT_KERNEL 8
108 #define RT_HW_MMU_PROT_USER 16
109 #define RT_HW_MMU_PROT_CACHE 32
110
111 int rt_hw_mmu_ioremap_init(struct rt_aspace *aspace, void *v_address, size_t size);
112 void rt_hw_init_mmu_table(struct mem_desc *mdesc, rt_uint32_t size);
113
114 void rt_hw_mmu_setup(struct rt_aspace *aspace, struct mem_desc *mdesc, int desc_nr);
115 void rt_hw_mmu_init(void);
116 int rt_hw_mmu_map_init(struct rt_aspace *aspace, void *v_address, size_t size, size_t *vtable, size_t pv_off);
117 void *rt_hw_mmu_map(struct rt_aspace *aspace, void *v_addr, void *p_addr, size_t size, size_t attr);
118 void rt_hw_mmu_unmap(struct rt_aspace *aspace, void *v_addr, size_t size);
119
120 void rt_hw_aspace_switch(struct rt_aspace *aspace);
121 void rt_hw_mmu_switch(void *tbl);
122
123 void *rt_hw_mmu_v2p(struct rt_aspace *aspace, void *vaddr);
124 void *rt_hw_mmu_tbl_get(void);
125
126 int rt_hw_mmu_control(struct rt_aspace *aspace, void *vaddr, size_t size, enum rt_mmu_cntl cmd);
127
128 void *rt_hw_mmu_pgtbl_create(void);
129 void rt_hw_mmu_pgtbl_delete(void *pgtbl);
130
131 #define AP_APX_MASK (MMU_MAP_MTBL_AP2(0x1) | MMU_MAP_MTBL_AP01(0x3))
132 #define AP_APX_URW_KRW (MMU_MAP_MTBL_AP2(0x0) | MMU_MAP_MTBL_AP01(0x3))
133 #define AP_APX_URO_KRO (MMU_MAP_MTBL_AP2(0x1) | MMU_MAP_MTBL_AP01(0x2))
134
135 /**
136 * @brief Remove permission from attribution
137 *
138 * @param attr architecture specified mmu attribution
139 * @param prot protect that will be removed
140 * @return size_t returned attribution
141 */
rt_hw_mmu_attr_rm_perm(size_t attr,rt_base_t prot)142 rt_inline size_t rt_hw_mmu_attr_rm_perm(size_t attr, rt_base_t prot)
143 {
144 switch (prot)
145 {
146 /* remove write permission for user */
147 case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_USER:
148 if ((attr & AP_APX_MASK) == AP_APX_URW_KRW)
149 attr &= ~MMU_MAP_MTBL_AP01(0x1);
150 break;
151 case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_KERNEL:
152 switch (attr & AP_APX_MASK)
153 {
154 case MMU_MAP_MTBL_AP01(0):
155 break;
156 case MMU_MAP_MTBL_AP01(3):
157 attr = (attr & AP_APX_MASK) | AP_APX_URO_KRO;
158 default:
159 attr |= MMU_MAP_MTBL_AP2(0x1);
160 break;
161 }
162 break;
163 default:
164 RT_ASSERT(0);
165 }
166 return attr;
167 }
168
169 /**
170 * @brief Add permission from attribution
171 *
172 * @param attr architecture specified mmu attribution
173 * @param prot protect that will be added
174 * @return size_t returned attribution
175 */
rt_hw_mmu_attr_add_perm(size_t attr,rt_base_t prot)176 rt_inline size_t rt_hw_mmu_attr_add_perm(size_t attr, rt_base_t prot)
177 {
178 switch (prot)
179 {
180 /* add write permission for user */
181 case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_USER:
182 case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_KERNEL:
183 attr |= MMU_MAP_MTBL_AP01(0x3);
184 attr &= ~MMU_MAP_MTBL_AP2(0x1);
185 break;
186 default:
187 RT_ASSERT(0);
188 }
189 return attr;
190 }
191
192 /**
193 * @brief Test permission from attribution
194 *
195 * @param attr architecture specified mmu attribution
196 * @param prot protect that will be test
197 * @return rt_bool_t RT_TRUE if the prot is allowed, otherwise RT_FALSE
198 */
rt_hw_mmu_attr_test_perm(size_t attr,rt_base_t prot)199 rt_inline rt_bool_t rt_hw_mmu_attr_test_perm(size_t attr, rt_base_t prot)
200 {
201 rt_bool_t rc = 0;
202 switch (prot)
203 {
204 /* test write permission for user */
205 case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_USER:
206 rc = (AP_APX_MASK & attr) == (AP_APX_URW_KRW);
207 break;
208 default:
209 RT_ASSERT(0);
210 }
211 return rc;
212 }
213
214 #endif
215