1 /*
2 * Copyright (c) 2017 Linaro Limited.
3 * Copyright 2025 Arm Limited and/or its affiliates <open-source-office@arm.com>
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 #include <zephyr/device.h>
9 #include <zephyr/init.h>
10 #include <zephyr/kernel.h>
11 #include <zephyr/kernel_structs.h>
12
13 #include "arm_core_mpu_dev.h"
14 #include <zephyr/linker/linker-defs.h>
15
16 #define LOG_LEVEL CONFIG_MPU_LOG_LEVEL
17 #include <zephyr/logging/log.h>
18 LOG_MODULE_REGISTER(mpu);
19
20 extern void arm_core_mpu_enable(void);
21 extern void arm_core_mpu_disable(void);
22
23 /*
24 * Maximum number of dynamic memory partitions that may be supplied to the MPU
25 * driver for programming during run-time. Note that the actual number of the
26 * available MPU regions for dynamic programming depends on the number of the
27 * static MPU regions currently being programmed, and the total number of HW-
28 * available MPU regions. This macro is only used internally in function
29 * z_arm_configure_dynamic_mpu_regions(), to reserve sufficient area for the
30 * array of dynamic regions passed to the underlying driver.
31 */
32 #if defined(CONFIG_USERSPACE)
33 #define _MAX_DYNAMIC_MPU_REGIONS_NUM \
34 CONFIG_MAX_DOMAIN_PARTITIONS + /* User thread stack */ 1 + \
35 (IS_ENABLED(CONFIG_MPU_STACK_GUARD) ? 1 : 0)
36 #else
37 #define _MAX_DYNAMIC_MPU_REGIONS_NUM \
38 (IS_ENABLED(CONFIG_MPU_STACK_GUARD) ? 1 : 0)
39 #endif /* CONFIG_USERSPACE */
40
41 /* Convenience macros to denote the start address and the size of the system
42 * memory area, where dynamic memory regions may be programmed at run-time.
43 */
44 #if defined(CONFIG_USERSPACE)
45 #define _MPU_DYNAMIC_REGIONS_AREA_START ((uint32_t)&_app_smem_start)
46 #else
47 #define _MPU_DYNAMIC_REGIONS_AREA_START ((uint32_t)&__kernel_ram_start)
48 #endif /* CONFIG_USERSPACE */
49 #define _MPU_DYNAMIC_REGIONS_AREA_SIZE ((uint32_t)&__kernel_ram_end - \
50 _MPU_DYNAMIC_REGIONS_AREA_START)
51
52 #if !defined(CONFIG_MULTITHREADING) && defined(CONFIG_MPU_STACK_GUARD)
53 K_THREAD_STACK_DECLARE(z_main_stack, CONFIG_MAIN_STACK_SIZE);
54 #endif
55
56 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) \
57 && defined(CONFIG_MPU_STACK_GUARD)
58 uint32_t z_arm_mpu_stack_guard_and_fpu_adjust(struct k_thread *thread);
59 #endif
60
61 #if defined(CONFIG_CODE_DATA_RELOCATION_SRAM)
62 extern char __ram_text_reloc_start[];
63 extern char __ram_text_reloc_size[];
64 #endif
65
66 #if defined(CONFIG_SRAM_VECTOR_TABLE)
67 extern char _sram_vector_start[];
68 extern char _sram_vector_size[];
69 #endif
70
71 static const struct z_arm_mpu_partition static_regions[] = {
72 #if defined(CONFIG_COVERAGE_GCOV) && defined(CONFIG_USERSPACE)
73 {
74 /* GCOV code coverage accounting area. Needs User permissions
75 * to function
76 */
77 .start = (uint32_t)&__gcov_bss_start,
78 .size = (uint32_t)&__gcov_bss_size,
79 .attr = K_MEM_PARTITION_P_RW_U_RW,
80 },
81 #endif /* CONFIG_COVERAGE_GCOV && CONFIG_USERSPACE */
82 #if defined(CONFIG_NOCACHE_MEMORY)
83 {
84 /* Special non-cacheable RAM area */
85 .start = (uint32_t)&_nocache_ram_start,
86 .size = (uint32_t)&_nocache_ram_size,
87 .attr = K_MEM_PARTITION_P_RW_U_NA_NOCACHE,
88 },
89 #endif /* CONFIG_NOCACHE_MEMORY */
90 #if defined(CONFIG_ARCH_HAS_RAMFUNC_SUPPORT)
91 {
92 /* Special RAM area for program text */
93 .start = (uint32_t)&__ramfunc_start,
94 .size = (uint32_t)&__ramfunc_size,
95 #if defined(CONFIG_ARM_MPU_PXN) && defined(CONFIG_USERSPACE)
96 .attr = K_MEM_PARTITION_P_R_U_RX,
97 #else
98 .attr = K_MEM_PARTITION_P_RX_U_RX,
99 #endif
100 },
101 #endif /* CONFIG_ARCH_HAS_RAMFUNC_SUPPORT */
102 #if defined(CONFIG_CODE_DATA_RELOCATION_SRAM)
103 {
104 /* RAM area for relocated text */
105 .start = (uint32_t)&__ram_text_reloc_start,
106 .size = (uint32_t)&__ram_text_reloc_size,
107 #if defined(CONFIG_ARM_MPU_PXN) && defined(CONFIG_USERSPACE)
108 .attr = K_MEM_PARTITION_P_R_U_RX,
109 #else
110 .attr = K_MEM_PARTITION_P_RX_U_RX,
111 #endif
112 },
113 #endif /* CONFIG_CODE_DATA_RELOCATION_SRAM */
114 #if defined(CONFIG_SRAM_VECTOR_TABLE)
115 {
116 /* Vector table in SRAM */
117 .start = (uint32_t)&_sram_vector_start,
118 .size = (uint32_t)&_sram_vector_size,
119 #if defined(CONFIG_ARM_MPU_PXN) && defined(CONFIG_USERSPACE)
120 .attr = K_MEM_PARTITION_P_R_U_RX,
121 #else
122 .attr = K_MEM_PARTITION_P_RO_U_RO,
123 #endif
124 },
125 #endif /* CONFIG_SRAM_VECTOR_TABLE */
126 #if !defined(CONFIG_MULTITHREADING) && defined(CONFIG_MPU_STACK_GUARD)
127 /* Main stack MPU guard to detect overflow.
128 * Note:
129 * FPU_SHARING and USERSPACE are not supported features
130 * under CONFIG_MULTITHREADING=n, so the MPU guard (if
131 * exists) is reserved aside of CONFIG_MAIN_STACK_SIZE
132 * and there is no requirement for larger guard area (FP
133 * context is not stacked).
134 */
135 {
136 .start = (uint32_t)z_main_stack,
137 .size = (uint32_t)MPU_GUARD_ALIGN_AND_SIZE,
138 .attr = K_MEM_PARTITION_P_RO_U_NA,
139 },
140 #endif /* !CONFIG_MULTITHREADING && CONFIG_MPU_STACK_GUARD */
141 };
142
143 /**
144 * @brief Use the HW-specific MPU driver to program
145 * the static MPU regions.
146 *
147 * Program the static MPU regions using the HW-specific MPU driver. The
148 * function is meant to be invoked only once upon system initialization.
149 *
150 * If the function attempts to configure a number of regions beyond the
151 * MPU HW limitations, the system behavior will be undefined.
152 *
153 * For some MPU architectures, such as the unmodified ARMv8-M MPU,
154 * the function must execute with MPU enabled.
155 */
z_arm_configure_static_mpu_regions(void)156 void z_arm_configure_static_mpu_regions(void)
157 {
158 /* Configure the static MPU regions within firmware SRAM boundaries.
159 * Start address of the image is given by _image_ram_start. The end
160 * of the firmware SRAM area is marked by __kernel_ram_end, taking
161 * into account the unused SRAM area, as well.
162 */
163 #ifdef CONFIG_AARCH32_ARMV8_R
164 arm_core_mpu_disable();
165 #endif
166 arm_core_mpu_configure_static_mpu_regions(static_regions,
167 ARRAY_SIZE(static_regions),
168 (uint32_t)&_image_ram_start,
169 (uint32_t)&__kernel_ram_end);
170 #ifdef CONFIG_AARCH32_ARMV8_R
171 arm_core_mpu_enable();
172 #endif
173
174 #if defined(CONFIG_MPU_REQUIRES_NON_OVERLAPPING_REGIONS) && \
175 defined(CONFIG_MULTITHREADING)
176 /* Define a constant array of z_arm_mpu_partition objects that holds the
177 * boundaries of the areas, inside which dynamic region programming
178 * is allowed. The information is passed to the underlying driver at
179 * initialization.
180 */
181 const struct z_arm_mpu_partition dyn_region_areas[] = {
182 {
183 .start = _MPU_DYNAMIC_REGIONS_AREA_START,
184 .size = _MPU_DYNAMIC_REGIONS_AREA_SIZE,
185 }
186 };
187
188 arm_core_mpu_mark_areas_for_dynamic_regions(dyn_region_areas,
189 ARRAY_SIZE(dyn_region_areas));
190 #endif /* CONFIG_MPU_REQUIRES_NON_OVERLAPPING_REGIONS */
191 }
192
193 /**
194 * @brief Use the HW-specific MPU driver to program
195 * the dynamic MPU regions.
196 *
197 * Program the dynamic MPU regions using the HW-specific MPU
198 * driver. This function is meant to be invoked every time the
199 * memory map is to be re-programmed, e.g during thread context
200 * switch, entering user mode, reconfiguring memory domain, etc.
201 *
202 * For some MPU architectures, such as the unmodified ARMv8-M MPU,
203 * the function must execute with MPU enabled.
204 *
205 * This function is not inherently thread-safe, but the memory domain
206 * spinlock needs to be held anyway.
207 */
z_arm_configure_dynamic_mpu_regions(struct k_thread * thread)208 void z_arm_configure_dynamic_mpu_regions(struct k_thread *thread)
209 {
210 /* Define an array of z_arm_mpu_partition objects to hold the configuration
211 * of the respective dynamic MPU regions to be programmed for
212 * the given thread. The array of partitions (along with its
213 * actual size) will be supplied to the underlying MPU driver.
214 *
215 * The drivers of what regions get configured are CONFIG_USERSPACE,
216 * CONFIG_MPU_STACK_GUARD, and K_USER/supervisor threads.
217 *
218 * If CONFIG_USERSPACE is defined and the thread is a member of any
219 * memory domain then any partitions defined within that domain get a
220 * defined region.
221 *
222 * If CONFIG_USERSPACE is defined and the thread is a user thread
223 * (K_USER) the usermode thread stack is defined a region.
224 *
225 * IF CONFIG_MPU_STACK_GUARD is defined the thread is a supervisor
226 * thread, the stack guard will be defined in front of the
227 * thread->stack_info.start. On a K_USER thread, the guard is defined
228 * in front of the privilege mode stack, thread->arch.priv_stack_start.
229 */
230 static struct z_arm_mpu_partition
231 dynamic_regions[_MAX_DYNAMIC_MPU_REGIONS_NUM];
232
233 uint8_t region_num = 0U;
234
235 #if defined(CONFIG_USERSPACE)
236 /* Memory domain */
237 LOG_DBG("configure thread %p's domain", thread);
238 struct k_mem_domain *mem_domain = thread->mem_domain_info.mem_domain;
239
240 if (mem_domain) {
241 LOG_DBG("configure domain: %p", mem_domain);
242 uint32_t num_partitions = mem_domain->num_partitions;
243 struct k_mem_partition *partition;
244 int i;
245
246 LOG_DBG("configure domain: %p", mem_domain);
247
248 for (i = 0; i < CONFIG_MAX_DOMAIN_PARTITIONS; i++) {
249 partition = &mem_domain->partitions[i];
250 if (partition->size == 0) {
251 /* Zero size indicates a non-existing
252 * memory partition.
253 */
254 continue;
255 }
256 LOG_DBG("set region 0x%lx 0x%x",
257 partition->start, partition->size);
258 __ASSERT(region_num < _MAX_DYNAMIC_MPU_REGIONS_NUM,
259 "Out-of-bounds error for dynamic region map.");
260
261 dynamic_regions[region_num].start = partition->start;
262 dynamic_regions[region_num].size = partition->size;
263 dynamic_regions[region_num].attr = partition->attr;
264
265 region_num++;
266 num_partitions--;
267 if (num_partitions == 0U) {
268 break;
269 }
270 }
271 }
272 /* Thread user stack */
273 LOG_DBG("configure user thread %p's context", thread);
274 if (thread->arch.priv_stack_start) {
275 /* K_USER thread stack needs a region */
276 uintptr_t base = (uintptr_t)thread->stack_obj;
277 size_t size = thread->stack_info.size +
278 (thread->stack_info.start - base);
279
280 __ASSERT(region_num < _MAX_DYNAMIC_MPU_REGIONS_NUM,
281 "Out-of-bounds error for dynamic region map.");
282
283 dynamic_regions[region_num].start = base;
284 dynamic_regions[region_num].size = size;
285 dynamic_regions[region_num].attr = K_MEM_PARTITION_P_RW_U_RW;
286
287 region_num++;
288 }
289 #endif /* CONFIG_USERSPACE */
290
291 #if defined(CONFIG_MPU_STACK_GUARD)
292 /* Define a stack guard region for either the thread stack or the
293 * supervisor/privilege mode stack depending on the type of thread
294 * being mapped.
295 */
296
297 /* Privileged stack guard */
298 uintptr_t guard_start;
299 size_t guard_size = MPU_GUARD_ALIGN_AND_SIZE;
300
301 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
302 guard_size = z_arm_mpu_stack_guard_and_fpu_adjust(thread);
303 #endif
304
305 #if defined(CONFIG_USERSPACE)
306 if (thread->arch.priv_stack_start) {
307 /* A K_USER thread has the stack guard protecting the privilege
308 * stack and not on the usermode stack because the user mode
309 * stack already has its own defined memory region.
310 */
311 guard_start = thread->arch.priv_stack_start - guard_size;
312
313 __ASSERT((uintptr_t)&z_priv_stacks_ram_start <= guard_start,
314 "Guard start: (0x%lx) below privilege stacks boundary: (%p)",
315 guard_start, z_priv_stacks_ram_start);
316 } else
317 #endif /* CONFIG_USERSPACE */
318 {
319 /* A supervisor thread only has the normal thread stack to
320 * protect with a stack guard.
321 */
322 guard_start = thread->stack_info.start - guard_size;
323 #ifdef CONFIG_USERSPACE
324 __ASSERT((uintptr_t)thread->stack_obj == guard_start,
325 "Guard start (0x%lx) not beginning at stack object (%p)\n",
326 guard_start, thread->stack_obj);
327 #endif /* CONFIG_USERSPACE */
328 }
329
330 __ASSERT(region_num < _MAX_DYNAMIC_MPU_REGIONS_NUM,
331 "Out-of-bounds error for dynamic region map.");
332
333 dynamic_regions[region_num].start = guard_start;
334 dynamic_regions[region_num].size = guard_size;
335 dynamic_regions[region_num].attr = K_MEM_PARTITION_P_RO_U_NA;
336
337 region_num++;
338 #endif /* CONFIG_MPU_STACK_GUARD */
339
340 /* Configure the dynamic MPU regions */
341 #ifdef CONFIG_AARCH32_ARMV8_R
342 arm_core_mpu_disable();
343 #endif
344 arm_core_mpu_configure_dynamic_mpu_regions(dynamic_regions,
345 region_num);
346 #ifdef CONFIG_AARCH32_ARMV8_R
347 arm_core_mpu_enable();
348 #endif
349 }
350
351 #if defined(CONFIG_USERSPACE)
arch_mem_domain_max_partitions_get(void)352 int arch_mem_domain_max_partitions_get(void)
353 {
354 int available_regions = arm_core_mpu_get_max_available_dyn_regions();
355
356 available_regions -=
357 ARM_CORE_MPU_NUM_MPU_REGIONS_FOR_THREAD_STACK;
358
359 if (IS_ENABLED(CONFIG_MPU_STACK_GUARD)) {
360 available_regions -=
361 ARM_CORE_MPU_NUM_MPU_REGIONS_FOR_MPU_STACK_GUARD;
362 }
363
364 return ARM_CORE_MPU_MAX_DOMAIN_PARTITIONS_GET(available_regions);
365 }
366
arch_buffer_validate(const void * addr,size_t size,int write)367 int arch_buffer_validate(const void *addr, size_t size, int write)
368 {
369 return arm_core_mpu_buffer_validate(addr, size, write);
370 }
371
372 #endif /* CONFIG_USERSPACE */
373