1 /*
2 * Copyright (c) 2006-2023, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2023-09-25 tangzz98 the first version
9 */
10
11 #include <rtdef.h>
12 #include <mprotect.h>
13
14 #define DBG_ENABLE
15 #define DBG_SECTION_NAME "MEMORY PROTECTION"
16 #define DBG_LEVEL DBG_ERROR
17 #include <rtdbg.h>
18
19 #define MEM_REGION_TO_MPU_INDEX(thread, region) ((((rt_size_t)region - (rt_size_t)(thread->mem_regions)) / sizeof(rt_mem_region_t)) + NUM_STATIC_REGIONS)
20
21 extern rt_mem_region_t *rt_mprotect_find_free_region(rt_thread_t thread);
22 extern rt_mem_region_t *rt_mprotect_find_region(rt_thread_t thread, rt_mem_region_t *region);
23
24 static rt_hw_mpu_exception_hook_t mem_manage_hook = RT_NULL;
25 static rt_uint8_t mpu_mair[8U];
26
rt_hw_mpu_region_default_attr(rt_mem_region_t * region)27 rt_weak rt_uint8_t rt_hw_mpu_region_default_attr(rt_mem_region_t *region)
28 {
29 static rt_uint8_t default_mem_attr[] =
30 {
31 ARM_MPU_ATTR(ARM_MPU_ATTR_MEMORY_(1U, 0U, 1U, 0U), ARM_MPU_ATTR_MEMORY_(1U, 0U, 1U, 0U)),
32 ARM_MPU_ATTR(ARM_MPU_ATTR_MEMORY_(1U, 1U, 1U, 1U), ARM_MPU_ATTR_MEMORY_(1U, 1U, 1U, 1U)),
33 ARM_MPU_ATTR_DEVICE_nGnRE,
34 ARM_MPU_ATTR(ARM_MPU_ATTR_MEMORY_(1U, 1U, 1U, 1U), ARM_MPU_ATTR_MEMORY_(1U, 1U, 1U, 1U)),
35 ARM_MPU_ATTR(ARM_MPU_ATTR_MEMORY_(1U, 0U, 1U, 0U), ARM_MPU_ATTR_MEMORY_(1U, 0U, 1U, 0U)),
36 ARM_MPU_ATTR_DEVICE_nGnRE,
37 ARM_MPU_ATTR_DEVICE_nGnRE
38 };
39 rt_uint8_t attr = 0U;
40 if ((rt_uint32_t)region->start >= 0xE0000000U)
41 {
42 attr = ((rt_uint32_t)region->start >= 0xE0100000U) ? ARM_MPU_ATTR_DEVICE_nGnRE : ARM_MPU_ATTR_DEVICE_nGnRnE;
43 }
44 else
45 {
46 attr = default_mem_attr[((rt_uint32_t)region->start & ~0xFFFFFFFU) >> 29U];
47 }
48 return attr;
49 }
50
_mpu_rbar_rlar(rt_mem_region_t * region)51 static rt_err_t _mpu_rbar_rlar(rt_mem_region_t *region)
52 {
53 rt_uint32_t rlar = 0U;
54 rt_uint8_t mair_attr;
55 rt_uint8_t index;
56 rt_uint8_t attr_indx = 0xFFU;
57 region->attr.rbar = (rt_uint32_t)region->start | (region->attr.rbar & (~MPU_RBAR_BASE_Msk));
58 rlar |= ((rt_uint32_t)region->start + region->size - 1U) & MPU_RLAR_LIMIT_Msk;
59 if (region->attr.mair_attr == RT_ARM_DEFAULT_MAIR_ATTR)
60 {
61 mair_attr = rt_hw_mpu_region_default_attr(region);
62 }
63 else
64 {
65 mair_attr = (rt_uint8_t)region->attr.mair_attr;
66 }
67 for (index = 0U; index < 8U; index++)
68 {
69 if (mpu_mair[index] == RT_ARM_DEFAULT_MAIR_ATTR)
70 {
71 break;
72 }
73 else if (mpu_mair[index] == mair_attr)
74 {
75 attr_indx = index;
76 break;
77 }
78 }
79 /*
80 * Current region's mair_attr does not match any existing region.
81 * All entries in MPU_MAIR are configured.
82 */
83 if (index == 8U)
84 {
85 return RT_ERROR;
86 }
87 /* An existing region has the same mair_attr. */
88 if (attr_indx != 0xFFU)
89 {
90 rlar |= attr_indx & MPU_RLAR_AttrIndx_Msk;
91 }
92 /* Current region's mair_attr does not match any existing region. */
93 else
94 {
95 ARM_MPU_SetMemAttr(index, mair_attr);
96 rlar |= index & MPU_RLAR_AttrIndx_Msk;
97 }
98 rlar |= MPU_RLAR_EN_Msk;
99 region->attr.rlar = rlar;
100
101 return RT_EOK;
102 }
103
rt_hw_mpu_region_valid(rt_mem_region_t * region)104 rt_bool_t rt_hw_mpu_region_valid(rt_mem_region_t *region)
105 {
106 if (region->size < MPU_MIN_REGION_SIZE)
107 {
108 LOG_E("Region size is too small");
109 return RT_FALSE;
110 }
111 if (region->size & (~(MPU_MIN_REGION_SIZE - 1U)) != region->size)
112 {
113 LOG_E("Region size is not a multiple of 32 bytes");
114 return RT_FALSE;
115 }
116 if ((rt_uint32_t)region->start & (MPU_MIN_REGION_SIZE - 1U) != 0U)
117 {
118 LOG_E("Region is not aligned by 32 bytes");
119 return RT_FALSE;
120 }
121 return RT_TRUE;
122 }
123
rt_hw_mpu_init(void)124 rt_err_t rt_hw_mpu_init(void)
125 {
126 extern rt_mem_region_t static_regions[NUM_STATIC_REGIONS];
127 rt_uint8_t num_mpu_regions;
128 rt_uint8_t num_dynamic_regions;
129 rt_uint8_t index;
130 num_mpu_regions = (rt_uint8_t)((MPU->TYPE & MPU_TYPE_DREGION_Msk) >> MPU_TYPE_DREGION_Pos);
131 if (num_mpu_regions == 0U)
132 {
133 LOG_E("Hardware does not support MPU");
134 return RT_ERROR;
135 }
136 if (num_mpu_regions != NUM_MEM_REGIONS)
137 {
138 LOG_E("Incorrect setting of NUM_MEM_REGIONS");
139 LOG_E("NUM_MEM_REGIONS = %d, hardware support %d MPU regions", NUM_MEM_REGIONS, num_mpu_regions);
140 return RT_ERROR;
141 }
142
143 num_dynamic_regions = NUM_DYNAMIC_REGIONS + NUM_EXCLUSIVE_REGIONS;
144 if (num_dynamic_regions + NUM_STATIC_REGIONS > num_mpu_regions)
145 {
146 LOG_E("Insufficient MPU regions: %d hardware MPU regions", num_mpu_regions);
147 #ifdef RT_USING_HW_STACK_GUARD
148 LOG_E("Current configuration requires %d static regions + %d configurable regions + %d exclusive regions + %d stack guard regions", NUM_STATIC_REGIONS, NUM_CONFIGURABLE_REGIONS, NUM_EXCLUSIVE_REGIONS, 1);
149 #else
150 LOG_E("Current configuration requires %d static regions + %d configurable regions + %d exclusive regions", NUM_STATIC_REGIONS, NUM_CONFIGURABLE_REGIONS, NUM_EXCLUSIVE_REGIONS);
151 #endif
152 return RT_ERROR;
153 }
154 for (index = 0U; index < 8U; index++)
155 {
156 mpu_mair[index] = RT_ARM_DEFAULT_MAIR_ATTR;
157 }
158
159 ARM_MPU_Disable();
160 for (index = 0U; index < NUM_STATIC_REGIONS; index++)
161 {
162 if (rt_hw_mpu_region_valid(&(static_regions[index])) == RT_FALSE)
163 {
164 return RT_ERROR;
165 }
166 if (_mpu_rbar_rlar(&(static_regions[index])) == RT_ERROR)
167 {
168 LOG_E("Number of different mair_attr configurations exceeds 8");
169 return RT_ERROR;
170 }
171 ARM_MPU_SetRegion(index, static_regions[index].attr.rbar, static_regions[index].attr.rlar);
172 }
173 /* Enable background region. */
174 ARM_MPU_Enable(MPU_CTRL_PRIVDEFENA_Msk);
175
176 return RT_EOK;
177 }
178
rt_hw_mpu_add_region(rt_thread_t thread,rt_mem_region_t * region)179 rt_err_t rt_hw_mpu_add_region(rt_thread_t thread, rt_mem_region_t *region)
180 {
181 rt_uint8_t index;
182 rt_mem_region_t *free_region;
183 if (rt_hw_mpu_region_valid(region) == RT_FALSE)
184 {
185 return RT_ERROR;
186 }
187 rt_enter_critical();
188 if (_mpu_rbar_rlar(region) == RT_ERROR)
189 {
190 rt_exit_critical();
191 LOG_E("Number of different mair_attr configurations exceeds 8");
192 return RT_ERROR;
193 }
194 if (thread == RT_NULL)
195 {
196 rt_exit_critical();
197 return RT_EOK;
198 }
199 free_region = rt_mprotect_find_free_region(thread);
200 if (free_region == RT_NULL)
201 {
202 rt_exit_critical();
203 LOG_E("Insufficient regions");
204 return RT_ERROR;
205 }
206 rt_memcpy(free_region, region, sizeof(rt_mem_region_t));
207 if (thread == rt_thread_self())
208 {
209 index = MEM_REGION_TO_MPU_INDEX(thread, free_region);
210 ARM_MPU_SetRegion(index, region->attr.rbar, region->attr.rlar);
211 }
212 rt_exit_critical();
213 return RT_EOK;
214 }
215
rt_hw_mpu_delete_region(rt_thread_t thread,rt_mem_region_t * region)216 rt_err_t rt_hw_mpu_delete_region(rt_thread_t thread, rt_mem_region_t *region)
217 {
218 rt_uint8_t index;
219 rt_enter_critical();
220 rt_mem_region_t *found_region = rt_mprotect_find_region(thread, region);
221 if (found_region == RT_NULL)
222 {
223 rt_exit_critical();
224 LOG_E("Region not found");
225 return RT_ERROR;
226 }
227 rt_memset(found_region, 0, sizeof(rt_mem_region_t));
228 if (thread == rt_thread_self())
229 {
230 index = MEM_REGION_TO_MPU_INDEX(thread, found_region);
231 ARM_MPU_ClrRegion(index);
232 }
233 rt_exit_critical();
234 return RT_EOK;
235 }
236
rt_hw_mpu_update_region(rt_thread_t thread,rt_mem_region_t * region)237 rt_err_t rt_hw_mpu_update_region(rt_thread_t thread, rt_mem_region_t *region)
238 {
239 rt_uint8_t index;
240 if (rt_hw_mpu_region_valid(region) == RT_FALSE)
241 {
242 return RT_ERROR;
243 }
244 rt_enter_critical();
245 if (_mpu_rbar_rlar(region) == RT_ERROR)
246 {
247 rt_exit_critical();
248 LOG_E("Number of different mair_attr configurations exceeds 8");
249 return RT_ERROR;
250 }
251 rt_mem_region_t *old_region = rt_mprotect_find_region(thread, region);
252 if (old_region == RT_NULL)
253 {
254 rt_exit_critical();
255 LOG_E("Region not found");
256 return RT_ERROR;
257 }
258 rt_memcpy(old_region, region, sizeof(rt_mem_region_t));
259 if (thread == rt_thread_self())
260 {
261 index = MEM_REGION_TO_MPU_INDEX(thread, old_region);
262 ARM_MPU_SetRegion(index, region->attr.rbar, region->attr.rlar);
263 }
264 rt_exit_critical();
265 return RT_EOK;
266 }
267
rt_hw_mpu_exception_set_hook(rt_hw_mpu_exception_hook_t hook)268 rt_err_t rt_hw_mpu_exception_set_hook(rt_hw_mpu_exception_hook_t hook)
269 {
270 mem_manage_hook = hook;
271 return RT_EOK;
272 }
273
rt_hw_mpu_table_switch(rt_thread_t thread)274 void rt_hw_mpu_table_switch(rt_thread_t thread)
275 {
276 extern rt_mem_exclusive_region_t exclusive_regions[NUM_EXCLUSIVE_REGIONS];
277 rt_uint8_t i;
278 rt_uint8_t index = NUM_STATIC_REGIONS;
279 if (thread->mem_regions != RT_NULL)
280 {
281 for (i = 0U; i < NUM_DYNAMIC_REGIONS; i++)
282 {
283 if (((rt_mem_region_t *)thread->mem_regions)[i].size != 0U)
284 {
285 ARM_MPU_SetRegion(index, ((rt_mem_region_t *)thread->mem_regions)[i].attr.rbar, ((rt_mem_region_t *)thread->mem_regions)[i].attr.rlar);
286 index += 1U;
287 }
288 }
289 }
290 for (i = 0U; i < NUM_EXCLUSIVE_REGIONS; i++)
291 {
292 if ((exclusive_regions[i].owner != RT_NULL) && (exclusive_regions[i].owner != thread))
293 {
294 ARM_MPU_SetRegion(index, exclusive_regions[i].region.attr.rbar, exclusive_regions[i].region.attr.rlar);
295 index += 1U;
296 }
297 }
298 for ( ; index < NUM_MEM_REGIONS; index++)
299 {
300 ARM_MPU_ClrRegion(index);
301 }
302 }
303
MemManage_Handler(void)304 void MemManage_Handler(void)
305 {
306 extern rt_mem_region_t static_regions[NUM_STATIC_REGIONS];
307 extern rt_mem_exclusive_region_t exclusive_regions[NUM_EXCLUSIVE_REGIONS];
308 rt_mem_exception_info_t info;
309 rt_int8_t i;
310 rt_memset(&info, 0, sizeof(rt_mem_exception_info_t));
311 info.thread = rt_thread_self();
312 if (SCB->CFSR & SCB_CFSR_MMARVALID_Msk)
313 {
314 info.addr = (void *)(SCB->MMFAR);
315 for (i = NUM_EXCLUSIVE_REGIONS - 1; i >= 0; i--)
316 {
317 if ((exclusive_regions[i].owner != RT_NULL) && ((exclusive_regions[i].owner != rt_thread_self())) && ADDR_IN_REGION(info.addr, (rt_mem_region_t *)&(exclusive_regions[i])))
318 {
319 rt_memcpy(&(info.region), &(exclusive_regions[i]), sizeof(rt_mem_region_t));
320 break;
321 }
322 }
323 if (info.region.size == 0U)
324 {
325 if (info.thread->mem_regions != RT_NULL)
326 {
327 for (i = NUM_DYNAMIC_REGIONS - 1; i >= 0; i--)
328 {
329 if ((((rt_mem_region_t *)info.thread->mem_regions)[i].size != 0U) && ADDR_IN_REGION(info.addr, &(((rt_mem_region_t *)info.thread->mem_regions)[i])))
330 {
331 rt_memcpy(&(info.region), &(((rt_mem_region_t *)info.thread->mem_regions)[i]), sizeof(rt_mem_region_t));
332 break;
333 }
334 }
335 }
336 if (info.region.size == 0U)
337 {
338 for (i = NUM_STATIC_REGIONS - 1; i >= 0; i--)
339 {
340 if (ADDR_IN_REGION(info.addr, &(static_regions[i])))
341 {
342 rt_memcpy(&(info.region), &(static_regions[i]), sizeof(rt_mem_region_t));
343 break;
344 }
345 }
346 }
347 }
348 }
349 info.mmfsr = (SCB->CFSR & SCB_CFSR_MEMFAULTSR_Msk) >> SCB_CFSR_MEMFAULTSR_Pos;
350 if (mem_manage_hook != RT_NULL)
351 {
352 mem_manage_hook(&info);
353 }
354 while (1);
355 }
356