1 /*
2  * Copyright (c) 2006-2023, RT-Thread Development Team
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  *
6  * Change Logs:
7  * Date           Author       Notes
8  * 2023-09-25     tangzz98     the first version
9  */
10 
11 #include <rtdef.h>
12 #include <mprotect.h>
13 
14 #define DBG_ENABLE
15 #define DBG_SECTION_NAME "MEMORY PROTECTION"
16 #define DBG_LEVEL DBG_ERROR
17 #include <rtdbg.h>
18 
19 #define MEM_REGION_TO_MPU_INDEX(thread, region) ((((rt_size_t)region - (rt_size_t)(thread->mem_regions)) / sizeof(rt_mem_region_t)) + NUM_STATIC_REGIONS)
20 
21 extern rt_mem_region_t *rt_mprotect_find_free_region(rt_thread_t thread);
22 extern rt_mem_region_t *rt_mprotect_find_region(rt_thread_t thread, rt_mem_region_t *region);
23 
24 static rt_hw_mpu_exception_hook_t mem_manage_hook = RT_NULL;
25 
rt_hw_mpu_region_default_attr(rt_mem_region_t * region)26 rt_weak rt_uint32_t rt_hw_mpu_region_default_attr(rt_mem_region_t *region)
27 {
28     static rt_uint32_t default_mem_attr[] =
29     {
30         NORMAL_OUTER_INNER_WRITE_THROUGH_NON_SHAREABLE,
31         NORMAL_OUTER_INNER_WRITE_BACK_WRITE_READ_ALLOCATE_NON_SHAREABLE,
32         DEVICE_NON_SHAREABLE,
33         NORMAL_OUTER_INNER_WRITE_BACK_WRITE_READ_ALLOCATE_NON_SHAREABLE,
34         NORMAL_OUTER_INNER_WRITE_THROUGH_NON_SHAREABLE,
35         DEVICE_SHAREABLE,
36         DEVICE_NON_SHAREABLE
37     };
38     rt_uint32_t attr = 0U;
39     if ((rt_uint32_t)region->start >= 0xE0000000U)
40     {
41         attr = ((rt_uint32_t)region->start >= 0xE0100000U) ? STRONGLY_ORDERED_SHAREABLE : DEVICE_SHAREABLE;
42     }
43     else
44     {
45         attr = default_mem_attr[((rt_uint32_t)region->start & ~0xFFFFFFFU) >> 29U];
46     }
47     return attr;
48 }
49 
_mpu_rasr(rt_mem_region_t * region)50 static rt_uint32_t _mpu_rasr(rt_mem_region_t *region)
51 {
52     rt_uint32_t rasr = 0U;
53     if ((region->attr.rasr & RESERVED) == RESERVED)
54     {
55         rasr |= rt_hw_mpu_region_default_attr(region);
56         rasr |= region->attr.rasr & (MPU_RASR_XN_Msk | MPU_RASR_AP_Msk);
57     }
58     else
59     {
60         rasr |= region->attr.rasr & MPU_RASR_ATTRS_Msk;
61     }
62     rasr |= ((32U - __builtin_clz(region->size - 1U) - 2U + 1U) << MPU_RASR_SIZE_Pos) & MPU_RASR_SIZE_Msk;
63     rasr |= MPU_RASR_ENABLE_Msk;
64     return rasr;
65 }
66 
rt_hw_mpu_region_valid(rt_mem_region_t * region)67 rt_bool_t rt_hw_mpu_region_valid(rt_mem_region_t *region)
68 {
69     if (region->size < MPU_MIN_REGION_SIZE)
70     {
71         LOG_E("Region size is too small");
72         return RT_FALSE;
73     }
74     if (region->size & (region->size - 1U) != 0U)
75     {
76         LOG_E("Region size is not power of 2");
77         return RT_FALSE;
78     }
79     if ((rt_uint32_t)region->start & (region->size - 1U) != 0U)
80     {
81         LOG_E("Region is not naturally aligned");
82         return RT_FALSE;
83     }
84     return RT_TRUE;
85 }
86 
rt_hw_mpu_init(void)87 rt_err_t rt_hw_mpu_init(void)
88 {
89     extern rt_mem_region_t static_regions[NUM_STATIC_REGIONS];
90     rt_uint8_t num_mpu_regions;
91     rt_uint8_t num_dynamic_regions;
92     rt_uint8_t index;
93     num_mpu_regions = (rt_uint8_t)((MPU->TYPE & MPU_TYPE_DREGION_Msk) >> MPU_TYPE_DREGION_Pos);
94     if (num_mpu_regions == 0U)
95     {
96         LOG_E("Hardware does not support MPU");
97         return RT_ERROR;
98     }
99     if (num_mpu_regions != NUM_MEM_REGIONS)
100     {
101         LOG_E("Incorrect setting of NUM_MEM_REGIONS");
102         LOG_E("NUM_MEM_REGIONS = %d, hardware support %d MPU regions", NUM_MEM_REGIONS, num_mpu_regions);
103         return RT_ERROR;
104     }
105 
106     num_dynamic_regions = NUM_DYNAMIC_REGIONS + NUM_EXCLUSIVE_REGIONS;
107     if (num_dynamic_regions + NUM_STATIC_REGIONS > num_mpu_regions)
108     {
109         LOG_E("Insufficient MPU regions: %d hardware MPU regions", num_mpu_regions);
110 #ifdef RT_USING_HW_STACK_GUARD
111         LOG_E("Current configuration requires %d static regions + %d configurable regions + %d exclusive regions + %d stack guard regions", NUM_STATIC_REGIONS, NUM_CONFIGURABLE_REGIONS, NUM_EXCLUSIVE_REGIONS, 2);
112 #else
113         LOG_E("Current configuration requires %d static regions + %d configurable regions + %d exclusive regions", NUM_STATIC_REGIONS, NUM_CONFIGURABLE_REGIONS, NUM_EXCLUSIVE_REGIONS);
114 #endif
115         return RT_ERROR;
116     }
117 
118     ARM_MPU_Disable();
119     for (index = 0U; index < NUM_STATIC_REGIONS; index++)
120     {
121         if (rt_hw_mpu_region_valid(&(static_regions[index])) == RT_FALSE)
122         {
123             return RT_ERROR;
124         }
125         static_regions[index].attr.rasr = _mpu_rasr(&(static_regions[index]));
126         ARM_MPU_SetRegion(ARM_MPU_RBAR(index, (rt_uint32_t)static_regions[index].start), static_regions[index].attr.rasr);
127     }
128     /* Enable background region. */
129     ARM_MPU_Enable(MPU_CTRL_PRIVDEFENA_Msk);
130 
131     return RT_EOK;
132 }
133 
rt_hw_mpu_add_region(rt_thread_t thread,rt_mem_region_t * region)134 rt_err_t rt_hw_mpu_add_region(rt_thread_t thread, rt_mem_region_t *region)
135 {
136     rt_uint8_t index;
137     rt_mem_region_t *free_region;
138     if (rt_hw_mpu_region_valid(region) == RT_FALSE)
139     {
140         return RT_ERROR;
141     }
142     region->attr.rasr = _mpu_rasr(region);
143     if (thread == RT_NULL)
144     {
145         return RT_EOK;
146     }
147     rt_enter_critical();
148     free_region = rt_mprotect_find_free_region(thread);
149     if (free_region == RT_NULL)
150     {
151         rt_exit_critical();
152         LOG_E("Insufficient regions");
153         return RT_ERROR;
154     }
155     rt_memcpy(free_region, region, sizeof(rt_mem_region_t));
156     if (thread == rt_thread_self())
157     {
158         index = MEM_REGION_TO_MPU_INDEX(thread, free_region);
159         ARM_MPU_SetRegion(ARM_MPU_RBAR(index, (rt_uint32_t)region->start), region->attr.rasr);
160     }
161     rt_exit_critical();
162     return RT_EOK;
163 }
164 
rt_hw_mpu_delete_region(rt_thread_t thread,rt_mem_region_t * region)165 rt_err_t rt_hw_mpu_delete_region(rt_thread_t thread, rt_mem_region_t *region)
166 {
167     rt_uint8_t index;
168     rt_enter_critical();
169     rt_mem_region_t *found_region = rt_mprotect_find_region(thread, region);
170     if (found_region == RT_NULL)
171     {
172         rt_exit_critical();
173         LOG_E("Region not found");
174         return RT_ERROR;
175     }
176     rt_memset(found_region, 0, sizeof(rt_mem_region_t));
177     if (thread == rt_thread_self())
178     {
179         index = MEM_REGION_TO_MPU_INDEX(thread, found_region);
180         ARM_MPU_ClrRegion(index);
181     }
182     rt_exit_critical();
183     return RT_EOK;
184 }
185 
rt_hw_mpu_update_region(rt_thread_t thread,rt_mem_region_t * region)186 rt_err_t rt_hw_mpu_update_region(rt_thread_t thread, rt_mem_region_t *region)
187 {
188     rt_uint8_t index;
189     if (rt_hw_mpu_region_valid(region) == RT_FALSE)
190     {
191         return RT_ERROR;
192     }
193     region->attr.rasr = _mpu_rasr(region);
194     rt_enter_critical();
195     rt_mem_region_t *old_region = rt_mprotect_find_region(thread, region);
196     if (old_region == RT_NULL)
197     {
198         rt_exit_critical();
199         LOG_E("Region not found");
200         return RT_ERROR;
201     }
202     rt_memcpy(old_region, region, sizeof(rt_mem_region_t));
203     if (thread == rt_thread_self())
204     {
205         index = MEM_REGION_TO_MPU_INDEX(thread, old_region);
206         ARM_MPU_SetRegion(ARM_MPU_RBAR(index, (rt_uint32_t)region->start), region->attr.rasr);
207     }
208     rt_exit_critical();
209     return RT_EOK;
210 }
211 
rt_hw_mpu_exception_set_hook(rt_hw_mpu_exception_hook_t hook)212 rt_err_t rt_hw_mpu_exception_set_hook(rt_hw_mpu_exception_hook_t hook)
213 {
214     mem_manage_hook = hook;
215     return RT_EOK;
216 }
217 
rt_hw_mpu_table_switch(rt_thread_t thread)218 void rt_hw_mpu_table_switch(rt_thread_t thread)
219 {
220     extern rt_mem_exclusive_region_t exclusive_regions[NUM_EXCLUSIVE_REGIONS];
221     rt_uint8_t i;
222     rt_uint8_t index = NUM_STATIC_REGIONS;
223     if (thread->mem_regions != RT_NULL)
224     {
225         for (i = 0U; i < NUM_DYNAMIC_REGIONS; i++)
226         {
227             if (((rt_mem_region_t *)thread->mem_regions)[i].size != 0U)
228             {
229                 ARM_MPU_SetRegion(ARM_MPU_RBAR(index, (rt_uint32_t)(((rt_mem_region_t *)thread->mem_regions)[i].start)), ((rt_mem_region_t *)thread->mem_regions)[i].attr.rasr);
230                 index += 1U;
231             }
232         }
233     }
234     for (i = 0U; i < NUM_EXCLUSIVE_REGIONS; i++)
235     {
236         if ((exclusive_regions[i].owner != RT_NULL) && (exclusive_regions[i].owner != thread))
237         {
238             ARM_MPU_SetRegion(ARM_MPU_RBAR(index, (rt_uint32_t)(exclusive_regions[i].region.start)), exclusive_regions[i].region.attr.rasr);
239             index += 1U;
240         }
241     }
242     for ( ; index < NUM_MEM_REGIONS; index++)
243     {
244         ARM_MPU_ClrRegion(index);
245     }
246 }
247 
MemManage_Handler(void)248 void MemManage_Handler(void)
249 {
250     extern rt_mem_region_t static_regions[NUM_STATIC_REGIONS];
251     extern rt_mem_exclusive_region_t exclusive_regions[NUM_EXCLUSIVE_REGIONS];
252     rt_mem_exception_info_t info;
253     rt_int8_t i;
254     rt_memset(&info, 0, sizeof(rt_mem_exception_info_t));
255     info.thread = rt_thread_self();
256     if (SCB->CFSR & SCB_CFSR_MMARVALID_Msk)
257     {
258         info.addr = (void *)(SCB->MMFAR);
259         for (i = NUM_EXCLUSIVE_REGIONS - 1; i >= 0; i--)
260         {
261         if ((exclusive_regions[i].owner != RT_NULL) && ((exclusive_regions[i].owner != rt_thread_self())) && ADDR_IN_REGION(info.addr, (rt_mem_region_t *)&(exclusive_regions[i])))
262             {
263                 rt_memcpy(&(info.region), &(exclusive_regions[i]), sizeof(rt_mem_region_t));
264                 break;
265             }
266         }
267         if (info.region.size == 0U)
268         {
269             if (info.thread->mem_regions != RT_NULL)
270             {
271                 for (i = NUM_DYNAMIC_REGIONS - 1; i >= 0; i--)
272                 {
273                     if ((((rt_mem_region_t *)info.thread->mem_regions)[i].size != 0U) && ADDR_IN_REGION(info.addr, &(((rt_mem_region_t *)info.thread->mem_regions)[i])))
274                     {
275                         rt_memcpy(&(info.region), &(((rt_mem_region_t *)info.thread->mem_regions)[i]), sizeof(rt_mem_region_t));
276                         break;
277                     }
278                 }
279             }
280             if (info.region.size == 0U)
281             {
282                 for (i = NUM_STATIC_REGIONS - 1; i >= 0; i--)
283                 {
284                     if (ADDR_IN_REGION(info.addr, &(static_regions[i])))
285                     {
286                         rt_memcpy(&(info.region), &(static_regions[i]), sizeof(rt_mem_region_t));
287                         break;
288                     }
289                 }
290             }
291         }
292     }
293     info.mmfsr = (SCB->CFSR & SCB_CFSR_MEMFAULTSR_Msk) >> SCB_CFSR_MEMFAULTSR_Pos;
294     if (mem_manage_hook != RT_NULL)
295     {
296         mem_manage_hook(&info);
297     }
298     while (1);
299 }
300