1 /*
2  * Copyright (c) 2022 BayLibre, SAS
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  *
6  * Physical Memory Protection (PMP) is RISC-V parlance for an MPU.
7  *
8  * The PMP is comprized of a number of entries or slots. This number depends
9  * on the hardware design. For each slot there is an address register and
10  * a configuration register. While each address register is matched to an
11  * actual CSR register, configuration registers are small and therefore
12  * several of them are bundled in a few additional CSR registers.
13  *
14  * PMP slot configurations are updated in memory to avoid read-modify-write
15  * cycles on corresponding CSR registers. Relevant CSR registers are always
16  * written in batch from their shadow copy in RAM for better efficiency.
17  *
18  * In the stackguard case we keep an m-mode copy for each thread. Each user
19  * mode threads also has a u-mode copy. This makes faster context switching
20  * as precomputed content just have to be written to actual registers with
21  * no additional processing.
22  *
23  * Thread-specific m-mode and u-mode PMP entries start from the PMP slot
24  * indicated by global_pmp_end_index. Lower slots are used by global entries
25  * which are never modified.
26  */
27 
28 #include <zephyr/kernel.h>
29 #include <kernel_internal.h>
30 #include <zephyr/linker/linker-defs.h>
31 #include <pmp.h>
32 #include <zephyr/arch/arch_interface.h>
33 #include <zephyr/arch/riscv/csr.h>
34 
35 #define LOG_LEVEL CONFIG_MPU_LOG_LEVEL
36 #include <zephyr/logging/log.h>
37 LOG_MODULE_REGISTER(mpu);
38 
39 #define PMP_DEBUG_DUMP 0
40 
41 #ifdef CONFIG_64BIT
42 # define PR_ADDR "0x%016lx"
43 #else
44 # define PR_ADDR "0x%08lx"
45 #endif
46 
47 #define PMP_TOR_SUPPORTED	!IS_ENABLED(CONFIG_PMP_NO_TOR)
48 #define PMP_NA4_SUPPORTED	!IS_ENABLED(CONFIG_PMP_NO_NA4)
49 #define PMP_NAPOT_SUPPORTED	!IS_ENABLED(CONFIG_PMP_NO_NAPOT)
50 
51 #define PMPCFG_STRIDE sizeof(unsigned long)
52 
53 #define PMP_ADDR(addr)			((addr) >> 2)
54 #define NAPOT_RANGE(size)		(((size) - 1) >> 1)
55 #define PMP_ADDR_NAPOT(addr, size)	PMP_ADDR(addr | NAPOT_RANGE(size))
56 
57 #define PMP_NONE 0
58 
print_pmp_entries(unsigned int pmp_start,unsigned int pmp_end,unsigned long * pmp_addr,unsigned long * pmp_cfg,const char * banner)59 static void print_pmp_entries(unsigned int pmp_start, unsigned int pmp_end,
60 			      unsigned long *pmp_addr, unsigned long *pmp_cfg,
61 			      const char *banner)
62 {
63 	uint8_t *pmp_n_cfg = (uint8_t *)pmp_cfg;
64 	unsigned int index;
65 
66 	LOG_DBG("PMP %s:", banner);
67 	for (index = pmp_start; index < pmp_end; index++) {
68 		unsigned long start, end, tmp;
69 
70 		switch (pmp_n_cfg[index] & PMP_A) {
71 		case PMP_TOR:
72 			start = (index == 0) ? 0 : (pmp_addr[index - 1] << 2);
73 			end = (pmp_addr[index] << 2) - 1;
74 			break;
75 		case PMP_NA4:
76 			start = pmp_addr[index] << 2;
77 			end = start + 3;
78 			break;
79 		case PMP_NAPOT:
80 			tmp = (pmp_addr[index] << 2) | 0x3;
81 			start = tmp & (tmp + 1);
82 			end   = tmp | (tmp + 1);
83 			break;
84 		default:
85 			start = 0;
86 			end = 0;
87 			break;
88 		}
89 
90 		if (end == 0) {
91 			LOG_DBG("%3d: "PR_ADDR" 0x%02x", index,
92 				pmp_addr[index],
93 				pmp_n_cfg[index]);
94 		} else {
95 			LOG_DBG("%3d: "PR_ADDR" 0x%02x --> "
96 				PR_ADDR"-"PR_ADDR" %c%c%c%s",
97 				index, pmp_addr[index], pmp_n_cfg[index],
98 				start, end,
99 				(pmp_n_cfg[index] & PMP_R) ? 'R' : '-',
100 				(pmp_n_cfg[index] & PMP_W) ? 'W' : '-',
101 				(pmp_n_cfg[index] & PMP_X) ? 'X' : '-',
102 				(pmp_n_cfg[index] & PMP_L) ? " LOCKED" : "");
103 		}
104 	}
105 }
106 
dump_pmp_regs(const char * banner)107 static void dump_pmp_regs(const char *banner)
108 {
109 	unsigned long pmp_addr[CONFIG_PMP_SLOTS];
110 	unsigned long pmp_cfg[CONFIG_PMP_SLOTS / PMPCFG_STRIDE];
111 
112 #define PMPADDR_READ(x) pmp_addr[x] = csr_read(pmpaddr##x)
113 
114 	FOR_EACH(PMPADDR_READ, (;), 0, 1, 2, 3, 4, 5, 6, 7);
115 #if CONFIG_PMP_SLOTS > 8
116 	FOR_EACH(PMPADDR_READ, (;), 8, 9, 10, 11, 12, 13, 14, 15);
117 #endif
118 
119 #undef PMPADDR_READ
120 
121 #ifdef CONFIG_64BIT
122 	pmp_cfg[0] = csr_read(pmpcfg0);
123 #if CONFIG_PMP_SLOTS > 8
124 	pmp_cfg[1] = csr_read(pmpcfg2);
125 #endif
126 #else
127 	pmp_cfg[0] = csr_read(pmpcfg0);
128 	pmp_cfg[1] = csr_read(pmpcfg1);
129 #if CONFIG_PMP_SLOTS > 8
130 	pmp_cfg[2] = csr_read(pmpcfg2);
131 	pmp_cfg[3] = csr_read(pmpcfg3);
132 #endif
133 #endif
134 
135 	print_pmp_entries(0, CONFIG_PMP_SLOTS, pmp_addr, pmp_cfg, banner);
136 }
137 
138 /**
139  * @brief Set PMP shadow register values in memory
140  *
141  * Register content is built using this function which selects the most
142  * appropriate address matching mode automatically. Note that the special
143  * case start=0 size=0 is valid and means the whole address range.
144  *
145  * @param index_p Location of the current PMP slot index to use. This index
146  *                will be updated according to the number of slots used.
147  * @param perm PMP permission flags
148  * @param start Start address of the memory area to cover
149  * @param size Size of the memory area to cover
150  * @param pmp_addr Array of pmpaddr values (starting at entry 0).
151  * @param pmp_cfg Array of pmpcfg values (starting at entry 0).
152  * @param index_limit Index value representing the size of the provided arrays.
153  * @return true on success, false when out of free PMP slots.
154  */
set_pmp_entry(unsigned int * index_p,uint8_t perm,uintptr_t start,size_t size,unsigned long * pmp_addr,unsigned long * pmp_cfg,unsigned int index_limit)155 static bool set_pmp_entry(unsigned int *index_p, uint8_t perm,
156 			  uintptr_t start, size_t size,
157 			  unsigned long *pmp_addr, unsigned long *pmp_cfg,
158 			  unsigned int index_limit)
159 {
160 	uint8_t *pmp_n_cfg = (uint8_t *)pmp_cfg;
161 	unsigned int index = *index_p;
162 	bool ok = true;
163 
164 	__ASSERT((start & (CONFIG_PMP_GRANULARITY - 1)) == 0, "misaligned start address");
165 	__ASSERT((size & (CONFIG_PMP_GRANULARITY - 1)) == 0, "misaligned size");
166 
167 	if (index >= index_limit) {
168 		LOG_ERR("out of PMP slots");
169 		ok = false;
170 	} else if (PMP_TOR_SUPPORTED &&
171 		   ((index == 0 && start == 0) ||
172 		    (index != 0 && pmp_addr[index - 1] == PMP_ADDR(start)))) {
173 		/* We can use TOR using only one additional slot */
174 		pmp_addr[index] = PMP_ADDR(start + size);
175 		pmp_n_cfg[index] = perm | PMP_TOR;
176 		index += 1;
177 	} else if (PMP_NA4_SUPPORTED && size == 4) {
178 		pmp_addr[index] = PMP_ADDR(start);
179 		pmp_n_cfg[index] = perm | PMP_NA4;
180 		index += 1;
181 	} else if (PMP_NAPOT_SUPPORTED &&
182 		   ((size  & (size - 1)) == 0) /* power of 2 */ &&
183 		   ((start & (size - 1)) == 0) /* naturally aligned */ &&
184 		   (PMP_NA4_SUPPORTED || (size != 4))) {
185 		pmp_addr[index] = PMP_ADDR_NAPOT(start, size);
186 		pmp_n_cfg[index] = perm | PMP_NAPOT;
187 		index += 1;
188 	} else if (PMP_TOR_SUPPORTED && index + 1 >= index_limit) {
189 		LOG_ERR("out of PMP slots");
190 		ok = false;
191 	} else if (PMP_TOR_SUPPORTED) {
192 		pmp_addr[index] = PMP_ADDR(start);
193 		pmp_n_cfg[index] = 0;
194 		index += 1;
195 		pmp_addr[index] = PMP_ADDR(start + size);
196 		pmp_n_cfg[index] = perm | PMP_TOR;
197 		index += 1;
198 	} else {
199 		LOG_ERR("inappropriate PMP range (start=%#lx size=%#zx)", start, size);
200 		ok = false;
201 	}
202 
203 	*index_p = index;
204 	return ok;
205 }
206 
207 #ifdef CONFIG_PMP_STACK_GUARD
set_pmp_mprv_catchall(unsigned int * index_p,unsigned long * pmp_addr,unsigned long * pmp_cfg,unsigned int index_limit)208 static inline bool set_pmp_mprv_catchall(unsigned int *index_p,
209 					 unsigned long *pmp_addr, unsigned long *pmp_cfg,
210 					 unsigned int index_limit)
211 {
212 	/*
213 	 * We'll be using MPRV. Make a fallback entry with everything
214 	 * accessible as if no PMP entries were matched which is otherwise
215 	 * the default behavior for m-mode without MPRV.
216 	 */
217 	bool ok = set_pmp_entry(index_p, PMP_R | PMP_W | PMP_X,
218 				0, 0, pmp_addr, pmp_cfg, index_limit);
219 
220 #ifdef CONFIG_QEMU_TARGET
221 	if (ok) {
222 		/*
223 		 * Workaround: The above produced 0x1fffffff which is correct.
224 		 * But there is a QEMU bug that prevents it from interpreting
225 		 * this value correctly. Hardcode the special case used by
226 		 * QEMU to bypass this bug for now. The QEMU fix is here:
227 		 * https://lists.gnu.org/archive/html/qemu-devel/2022-04/msg00961.html
228 		 */
229 		pmp_addr[*index_p - 1] = -1L;
230 	}
231 #endif
232 
233 	return ok;
234 }
235 #endif /* CONFIG_PMP_STACK_GUARD */
236 
237 /**
238  * @brief Write a range of PMP entries to corresponding PMP registers
239  *
240  * PMP registers are accessed with the csr instruction which only takes an
241  * immediate value as the actual register. This is performed more efficiently
242  * in assembly code (pmp.S) than what is possible with C code.
243  *
244  * Requirement: start < end && end <= CONFIG_PMP_SLOTS
245  *
246  * @param start Start of the PMP range to be written
247  * @param end End (exclusive) of the PMP range to be written
248  * @param clear_trailing_entries True if trailing entries must be turned off
249  * @param pmp_addr Array of pmpaddr values (starting at entry 0).
250  * @param pmp_cfg Array of pmpcfg values (starting at entry 0).
251  */
252 extern void z_riscv_write_pmp_entries(unsigned int start, unsigned int end,
253 				      bool clear_trailing_entries,
254 				      const unsigned long *pmp_addr,
255 				      const unsigned long *pmp_cfg);
256 
257 /**
258  * @brief Write a range of PMP entries to corresponding PMP registers
259  *
260  * This performs some sanity checks before calling z_riscv_write_pmp_entries().
261  *
262  * @param start Start of the PMP range to be written
263  * @param end End (exclusive) of the PMP range to be written
264  * @param clear_trailing_entries True if trailing entries must be turned off
265  * @param pmp_addr Array of pmpaddr values (starting at entry 0).
266  * @param pmp_cfg Array of pmpcfg values (starting at entry 0).
267  * @param index_limit Index value representing the size of the provided arrays.
268  */
write_pmp_entries(unsigned int start,unsigned int end,bool clear_trailing_entries,unsigned long * pmp_addr,unsigned long * pmp_cfg,unsigned int index_limit)269 static void write_pmp_entries(unsigned int start, unsigned int end,
270 			      bool clear_trailing_entries,
271 			      unsigned long *pmp_addr, unsigned long *pmp_cfg,
272 			      unsigned int index_limit)
273 {
274 	__ASSERT(start < end && end <= index_limit &&
275 		 index_limit <= CONFIG_PMP_SLOTS,
276 		 "bad PMP range (start=%u end=%u)", start, end);
277 
278 	/* Be extra paranoid in case assertions are disabled */
279 	if (start >= end || end > index_limit) {
280 		k_panic();
281 	}
282 
283 	if (clear_trailing_entries) {
284 		/*
285 		 * There are many config entries per pmpcfg register.
286 		 * Make sure to clear trailing garbage in the last
287 		 * register to be written if any. Remaining registers
288 		 * will be cleared in z_riscv_write_pmp_entries().
289 		 */
290 		uint8_t *pmp_n_cfg = (uint8_t *)pmp_cfg;
291 		unsigned int index;
292 
293 		for (index = end; index % PMPCFG_STRIDE != 0; index++) {
294 			pmp_n_cfg[index] = 0;
295 		}
296 	}
297 
298 	print_pmp_entries(start, end, pmp_addr, pmp_cfg, "register write");
299 
300 #ifdef CONFIG_QEMU_TARGET
301 	/*
302 	 * A QEMU bug may create bad transient PMP representations causing
303 	 * false access faults to be reported. Work around it by setting
304 	 * pmp registers to zero from the update start point to the end
305 	 * before updating them with new values.
306 	 * The QEMU fix is here with more details about this bug:
307 	 * https://lists.gnu.org/archive/html/qemu-devel/2022-06/msg02800.html
308 	 */
309 	static const unsigned long pmp_zero[CONFIG_PMP_SLOTS] = { 0, };
310 
311 	z_riscv_write_pmp_entries(start, CONFIG_PMP_SLOTS, false,
312 				  pmp_zero, pmp_zero);
313 #endif
314 
315 	z_riscv_write_pmp_entries(start, end, clear_trailing_entries,
316 				  pmp_addr, pmp_cfg);
317 }
318 
319 /**
320  * @brief Abstract the last 3 arguments to set_pmp_entry() and
321  *        write_pmp_entries( for m-mode.
322  */
323 #define PMP_M_MODE(thread) \
324 	thread->arch.m_mode_pmpaddr_regs, \
325 	thread->arch.m_mode_pmpcfg_regs, \
326 	ARRAY_SIZE(thread->arch.m_mode_pmpaddr_regs)
327 
328 /**
329  * @brief Abstract the last 3 arguments to set_pmp_entry() and
330  *        write_pmp_entries( for u-mode.
331  */
332 #define PMP_U_MODE(thread) \
333 	thread->arch.u_mode_pmpaddr_regs, \
334 	thread->arch.u_mode_pmpcfg_regs, \
335 	ARRAY_SIZE(thread->arch.u_mode_pmpaddr_regs)
336 
337 /*
338  * This is used to seed thread PMP copies with global m-mode cfg entries
339  * sharing the same cfg register. Locked entries aren't modifiable but
340  * we could have non-locked entries here too.
341  */
342 static unsigned long global_pmp_cfg[1];
343 static unsigned long global_pmp_last_addr;
344 
345 /* End of global PMP entry range */
346 static unsigned int global_pmp_end_index;
347 
348 /**
349  * @Brief Initialize the PMP with global entries on each CPU
350  */
z_riscv_pmp_init(void)351 void z_riscv_pmp_init(void)
352 {
353 	unsigned long pmp_addr[CONFIG_PMP_SLOTS];
354 	unsigned long pmp_cfg[CONFIG_PMP_SLOTS / PMPCFG_STRIDE];
355 	unsigned int index = 0;
356 
357 	/* The read-only area is always there for every mode */
358 	set_pmp_entry(&index, PMP_R | PMP_X | PMP_L,
359 		      (uintptr_t)__rom_region_start,
360 		      (size_t)__rom_region_size,
361 		      pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr));
362 
363 #ifdef CONFIG_NULL_POINTER_EXCEPTION_DETECTION_PMP
364 	/*
365 	 * Use a PMP slot to make region (starting at address 0x0) inaccessible
366 	 * for detecting null pointer dereferencing.
367 	 */
368 	set_pmp_entry(&index, PMP_NONE | PMP_L,
369 		      0,
370 		      CONFIG_NULL_POINTER_EXCEPTION_REGION_SIZE,
371 		      pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr));
372 #endif
373 
374 #ifdef CONFIG_PMP_STACK_GUARD
375 #ifdef CONFIG_MULTITHREADING
376 	/*
377 	 * Set the stack guard for this CPU's IRQ stack by making the bottom
378 	 * addresses inaccessible. This will never change so we do it here
379 	 * and lock it too.
380 	 */
381 	set_pmp_entry(&index, PMP_NONE | PMP_L,
382 		      (uintptr_t)z_interrupt_stacks[_current_cpu->id],
383 		      Z_RISCV_STACK_GUARD_SIZE,
384 		      pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr));
385 
386 	/*
387 	 * This early, the kernel init code uses the IRQ stack and we want to
388 	 * safeguard it as soon as possible. But we need a temporary default
389 	 * "catch all" PMP entry for MPRV to work. Later on, this entry will
390 	 * be set for each thread by z_riscv_pmp_stackguard_prepare().
391 	 */
392 	set_pmp_mprv_catchall(&index, pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr));
393 
394 	 /* Write those entries to PMP regs. */
395 	write_pmp_entries(0, index, true, pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr));
396 
397 	/* Activate our non-locked PMP entries for m-mode */
398 	csr_set(mstatus, MSTATUS_MPRV);
399 
400 	/* And forget about that last entry as we won't need it later */
401 	index--;
402 #else
403 	/* Without multithreading setup stack guards for IRQ and main stacks */
404 	set_pmp_entry(&index, PMP_NONE | PMP_L,
405 		      (uintptr_t)z_interrupt_stacks,
406 		      Z_RISCV_STACK_GUARD_SIZE,
407 		      pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr));
408 
409 	set_pmp_entry(&index, PMP_NONE | PMP_L,
410 		      (uintptr_t)z_main_stack,
411 		      Z_RISCV_STACK_GUARD_SIZE,
412 		      pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr));
413 
414 	/* Write those entries to PMP regs. */
415 	write_pmp_entries(0, index, true, pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr));
416 #endif /* CONFIG_MULTITHREADING */
417 #else
418 	 /* Write those entries to PMP regs. */
419 	write_pmp_entries(0, index, true, pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr));
420 #endif
421 
422 #ifdef CONFIG_SMP
423 #ifdef CONFIG_PMP_STACK_GUARD
424 	/*
425 	 * The IRQ stack guard area is different for each CPU.
426 	 * Make sure TOR entry sharing won't be attempted with it by
427 	 * remembering a bogus address for those entries.
428 	 */
429 	pmp_addr[index - 1] = -1L;
430 #endif
431 
432 	/* Make sure secondary CPUs produced the same values */
433 	if (global_pmp_end_index != 0) {
434 		__ASSERT(global_pmp_end_index == index, "");
435 		__ASSERT(global_pmp_cfg[0] == pmp_cfg[0], "");
436 		__ASSERT(global_pmp_last_addr == pmp_addr[index - 1], "");
437 	}
438 #endif
439 
440 	global_pmp_cfg[0] = pmp_cfg[0];
441 	global_pmp_last_addr = pmp_addr[index - 1];
442 	global_pmp_end_index = index;
443 
444 	if (PMP_DEBUG_DUMP) {
445 		dump_pmp_regs("initial register dump");
446 	}
447 }
448 
449 /**
450  * @Brief Initialize the per-thread PMP register copy with global values.
451  */
452 #if (defined(CONFIG_PMP_STACK_GUARD) && defined(CONFIG_MULTITHREADING)) || defined(CONFIG_USERSPACE)
z_riscv_pmp_thread_init(unsigned long * pmp_addr,unsigned long * pmp_cfg,unsigned int index_limit)453 static inline unsigned int z_riscv_pmp_thread_init(unsigned long *pmp_addr,
454 						   unsigned long *pmp_cfg,
455 						   unsigned int index_limit)
456 {
457 	ARG_UNUSED(index_limit);
458 
459 	/*
460 	 * Retrieve pmpcfg0 partial content from global entries.
461 	 */
462 	pmp_cfg[0] = global_pmp_cfg[0];
463 
464 	/*
465 	 * Retrieve the pmpaddr value matching the last global PMP slot.
466 	 * This is so that set_pmp_entry() can safely attempt TOR with it.
467 	 */
468 	pmp_addr[global_pmp_end_index - 1] = global_pmp_last_addr;
469 
470 	return global_pmp_end_index;
471 }
472 #endif
473 
474 #ifdef CONFIG_PMP_STACK_GUARD
475 
476 #ifdef CONFIG_MULTITHREADING
477 /**
478  * @brief Prepare the PMP stackguard content for given thread.
479  *
480  * This is called once during new thread creation.
481  */
z_riscv_pmp_stackguard_prepare(struct k_thread * thread)482 void z_riscv_pmp_stackguard_prepare(struct k_thread *thread)
483 {
484 	unsigned int index = z_riscv_pmp_thread_init(PMP_M_MODE(thread));
485 	uintptr_t stack_bottom;
486 
487 	/* make the bottom addresses of our stack inaccessible */
488 	stack_bottom = thread->stack_info.start - K_KERNEL_STACK_RESERVED;
489 #ifdef CONFIG_USERSPACE
490 	if (thread->arch.priv_stack_start != 0) {
491 		stack_bottom = thread->arch.priv_stack_start;
492 	} else if (z_stack_is_user_capable(thread->stack_obj)) {
493 		stack_bottom = thread->stack_info.start - K_THREAD_STACK_RESERVED;
494 	}
495 #endif
496 	set_pmp_entry(&index, PMP_NONE,
497 		      stack_bottom, Z_RISCV_STACK_GUARD_SIZE,
498 		      PMP_M_MODE(thread));
499 	set_pmp_mprv_catchall(&index, PMP_M_MODE(thread));
500 
501 	/* remember how many entries we use */
502 	thread->arch.m_mode_pmp_end_index = index;
503 }
504 
505 /**
506  * @brief Write PMP stackguard content to actual PMP registers
507  *
508  * This is called on every context switch.
509  */
z_riscv_pmp_stackguard_enable(struct k_thread * thread)510 void z_riscv_pmp_stackguard_enable(struct k_thread *thread)
511 {
512 	LOG_DBG("pmp_stackguard_enable for thread %p", thread);
513 
514 	/*
515 	 * Disable (non-locked) PMP entries for m-mode while we update them.
516 	 * While at it, also clear MSTATUS_MPP as it must be cleared for
517 	 * MSTATUS_MPRV to be effective later.
518 	 */
519 	csr_clear(mstatus, MSTATUS_MPRV | MSTATUS_MPP);
520 
521 	/* Write our m-mode MPP entries */
522 	write_pmp_entries(global_pmp_end_index, thread->arch.m_mode_pmp_end_index,
523 			  false /* no need to clear to the end */,
524 			  PMP_M_MODE(thread));
525 
526 	if (PMP_DEBUG_DUMP) {
527 		dump_pmp_regs("m-mode register dump");
528 	}
529 
530 	/* Activate our non-locked PMP entries in m-mode */
531 	csr_set(mstatus, MSTATUS_MPRV);
532 }
533 
534 #endif /* CONFIG_MULTITHREADING */
535 
536 /**
537  * @brief Remove PMP stackguard content to actual PMP registers
538  */
z_riscv_pmp_stackguard_disable(void)539 void z_riscv_pmp_stackguard_disable(void)
540 {
541 
542 	unsigned long pmp_addr[PMP_M_MODE_SLOTS];
543 	unsigned long pmp_cfg[PMP_M_MODE_SLOTS / sizeof(unsigned long)];
544 	unsigned int index = global_pmp_end_index;
545 
546 	/* Retrieve the pmpaddr value matching the last global PMP slot. */
547 	pmp_addr[global_pmp_end_index - 1] = global_pmp_last_addr;
548 
549 	/* Disable (non-locked) PMP entries for m-mode while we update them. */
550 	csr_clear(mstatus, MSTATUS_MPRV);
551 
552 	/*
553 	 * Set a temporary default "catch all" PMP entry for MPRV to work,
554 	 * except for the global locked entries.
555 	 */
556 	set_pmp_mprv_catchall(&index, pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr));
557 
558 	/* Write "catch all" entry and clear unlocked entries to PMP regs. */
559 	write_pmp_entries(global_pmp_end_index, index,
560 			  true, pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr));
561 
562 	if (PMP_DEBUG_DUMP) {
563 		dump_pmp_regs("catch all register dump");
564 	}
565 }
566 
567 #endif /* CONFIG_PMP_STACK_GUARD */
568 
569 #ifdef CONFIG_USERSPACE
570 
571 /**
572  * @brief Initialize the usermode portion of the PMP configuration.
573  *
574  * This is called once during new thread creation.
575  */
z_riscv_pmp_usermode_init(struct k_thread * thread)576 void z_riscv_pmp_usermode_init(struct k_thread *thread)
577 {
578 	/* Only indicate that the u-mode PMP is not prepared yet */
579 	thread->arch.u_mode_pmp_end_index = 0;
580 }
581 
582 /**
583  * @brief Prepare the u-mode PMP content for given thread.
584  *
585  * This is called once before making the transition to usermode.
586  */
z_riscv_pmp_usermode_prepare(struct k_thread * thread)587 void z_riscv_pmp_usermode_prepare(struct k_thread *thread)
588 {
589 	unsigned int index = z_riscv_pmp_thread_init(PMP_U_MODE(thread));
590 
591 	LOG_DBG("pmp_usermode_prepare for thread %p", thread);
592 
593 	/* Map the usermode stack */
594 	set_pmp_entry(&index, PMP_R | PMP_W,
595 		      thread->stack_info.start, thread->stack_info.size,
596 		      PMP_U_MODE(thread));
597 
598 	thread->arch.u_mode_pmp_domain_offset = index;
599 	thread->arch.u_mode_pmp_end_index = index;
600 	thread->arch.u_mode_pmp_update_nr = 0;
601 }
602 
603 /**
604  * @brief Convert partition information into PMP entries
605  */
resync_pmp_domain(struct k_thread * thread,struct k_mem_domain * domain)606 static void resync_pmp_domain(struct k_thread *thread,
607 			      struct k_mem_domain *domain)
608 {
609 	unsigned int index = thread->arch.u_mode_pmp_domain_offset;
610 	int p_idx, remaining_partitions;
611 	bool ok;
612 
613 	k_spinlock_key_t key = k_spin_lock(&z_mem_domain_lock);
614 
615 	remaining_partitions = domain->num_partitions;
616 	for (p_idx = 0; remaining_partitions > 0; p_idx++) {
617 		struct k_mem_partition *part = &domain->partitions[p_idx];
618 
619 		if (part->size == 0) {
620 			/* skip empty partition */
621 			continue;
622 		}
623 
624 		remaining_partitions--;
625 
626 		if (part->size < 4) {
627 			/* * 4 bytes is the minimum we can map */
628 			LOG_ERR("non-empty partition too small");
629 			__ASSERT(false, "");
630 			continue;
631 		}
632 
633 		ok = set_pmp_entry(&index, part->attr.pmp_attr,
634 				   part->start, part->size, PMP_U_MODE(thread));
635 		__ASSERT(ok,
636 			 "no PMP slot left for %d remaining partitions in domain %p",
637 			 remaining_partitions + 1, domain);
638 	}
639 
640 	thread->arch.u_mode_pmp_end_index = index;
641 	thread->arch.u_mode_pmp_update_nr = domain->arch.pmp_update_nr;
642 
643 	k_spin_unlock(&z_mem_domain_lock, key);
644 }
645 
646 /**
647  * @brief Write PMP usermode content to actual PMP registers
648  *
649  * This is called on every context switch.
650  */
z_riscv_pmp_usermode_enable(struct k_thread * thread)651 void z_riscv_pmp_usermode_enable(struct k_thread *thread)
652 {
653 	struct k_mem_domain *domain = thread->mem_domain_info.mem_domain;
654 
655 	LOG_DBG("pmp_usermode_enable for thread %p with domain %p", thread, domain);
656 
657 	if (thread->arch.u_mode_pmp_end_index == 0) {
658 		/* z_riscv_pmp_usermode_prepare() has not been called yet */
659 		return;
660 	}
661 
662 	if (thread->arch.u_mode_pmp_update_nr != domain->arch.pmp_update_nr) {
663 		/*
664 		 * Resynchronize our PMP entries with
665 		 * the latest domain partition information.
666 		 */
667 		resync_pmp_domain(thread, domain);
668 	}
669 
670 #ifdef CONFIG_PMP_STACK_GUARD
671 	/* Make sure m-mode PMP usage is disabled before we reprogram it */
672 	csr_clear(mstatus, MSTATUS_MPRV);
673 #endif
674 
675 	/* Write our u-mode MPP entries */
676 	write_pmp_entries(global_pmp_end_index, thread->arch.u_mode_pmp_end_index,
677 			  true /* must clear to the end */,
678 			  PMP_U_MODE(thread));
679 
680 	if (PMP_DEBUG_DUMP) {
681 		dump_pmp_regs("u-mode register dump");
682 	}
683 }
684 
arch_mem_domain_max_partitions_get(void)685 int arch_mem_domain_max_partitions_get(void)
686 {
687 	int available_pmp_slots = CONFIG_PMP_SLOTS;
688 
689 	/* remove those slots dedicated to global entries */
690 	available_pmp_slots -= global_pmp_end_index;
691 
692 	/*
693 	 * User thread stack mapping:
694 	 * 1 slot if CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT=y,
695 	 * most likely 2 slots otherwise.
696 	 */
697 	available_pmp_slots -=
698 		IS_ENABLED(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT) ? 1 : 2;
699 
700 	/*
701 	 * Each partition may require either 1 or 2 PMP slots depending
702 	 * on a couple factors that are not known in advance. Even when
703 	 * arch_mem_domain_partition_add() is called, we can't tell if a
704 	 * given partition will fit in the remaining PMP slots of an
705 	 * affected thread if it hasn't executed in usermode yet.
706 	 *
707 	 * Give the most optimistic answer here (which should be pretty
708 	 * accurate if CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT=y) and be
709 	 * prepared to deny availability in resync_pmp_domain() if this
710 	 * estimate was too high.
711 	 */
712 	return available_pmp_slots;
713 }
714 
arch_mem_domain_init(struct k_mem_domain * domain)715 int arch_mem_domain_init(struct k_mem_domain *domain)
716 {
717 	domain->arch.pmp_update_nr = 0;
718 	return 0;
719 }
720 
arch_mem_domain_partition_add(struct k_mem_domain * domain,uint32_t partition_id)721 int arch_mem_domain_partition_add(struct k_mem_domain *domain,
722 				  uint32_t partition_id)
723 {
724 	ARG_UNUSED(partition_id);
725 
726 	/* Force resynchronization for every thread using this domain */
727 	domain->arch.pmp_update_nr += 1;
728 	return 0;
729 }
730 
arch_mem_domain_partition_remove(struct k_mem_domain * domain,uint32_t partition_id)731 int arch_mem_domain_partition_remove(struct k_mem_domain *domain,
732 				     uint32_t partition_id)
733 {
734 	ARG_UNUSED(partition_id);
735 
736 	/* Force resynchronization for every thread using this domain */
737 	domain->arch.pmp_update_nr += 1;
738 	return 0;
739 }
740 
arch_mem_domain_thread_add(struct k_thread * thread)741 int arch_mem_domain_thread_add(struct k_thread *thread)
742 {
743 	/* Force resynchronization for this thread */
744 	thread->arch.u_mode_pmp_update_nr = 0;
745 	return 0;
746 }
747 
arch_mem_domain_thread_remove(struct k_thread * thread)748 int arch_mem_domain_thread_remove(struct k_thread *thread)
749 {
750 	ARG_UNUSED(thread);
751 
752 	return 0;
753 }
754 
755 #define IS_WITHIN(inner_start, inner_size, outer_start, outer_size) \
756 	((inner_start) >= (outer_start) && (inner_size) <= (outer_size) && \
757 	 ((inner_start) - (outer_start)) <= ((outer_size) - (inner_size)))
758 
arch_buffer_validate(const void * addr,size_t size,int write)759 int arch_buffer_validate(const void *addr, size_t size, int write)
760 {
761 	uintptr_t start = (uintptr_t)addr;
762 	int ret = -1;
763 
764 	/* Check if this is on the stack */
765 	if (IS_WITHIN(start, size,
766 		      _current->stack_info.start, _current->stack_info.size)) {
767 		return 0;
768 	}
769 
770 	/* Check if this is within the global read-only area */
771 	if (!write) {
772 		uintptr_t ro_start = (uintptr_t)__rom_region_start;
773 		size_t ro_size = (size_t)__rom_region_size;
774 
775 		if (IS_WITHIN(start, size, ro_start, ro_size)) {
776 			return 0;
777 		}
778 	}
779 
780 	/* Look for a matching partition in our memory domain */
781 	struct k_mem_domain *domain = _current->mem_domain_info.mem_domain;
782 	int p_idx, remaining_partitions;
783 	k_spinlock_key_t key = k_spin_lock(&z_mem_domain_lock);
784 
785 	remaining_partitions = domain->num_partitions;
786 	for (p_idx = 0; remaining_partitions > 0; p_idx++) {
787 		struct k_mem_partition *part = &domain->partitions[p_idx];
788 
789 		if (part->size == 0) {
790 			/* unused partition */
791 			continue;
792 		}
793 
794 		remaining_partitions--;
795 
796 		if (!IS_WITHIN(start, size, part->start, part->size)) {
797 			/* unmatched partition */
798 			continue;
799 		}
800 
801 		/* partition matched: determine access result */
802 		if ((part->attr.pmp_attr & (write ? PMP_W : PMP_R)) != 0) {
803 			ret = 0;
804 		}
805 		break;
806 	}
807 
808 	k_spin_unlock(&z_mem_domain_lock, key);
809 	return ret;
810 }
811 
812 #endif /* CONFIG_USERSPACE */
813