1 /*
2 * Copyright (c) 2017 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7
8 #include <zephyr/kernel.h>
9 #include <string.h>
10 #include <zephyr/sys/math_extras.h>
11 #include <zephyr/sys/rb.h>
12 #include <zephyr/kernel_structs.h>
13 #include <zephyr/sys/sys_io.h>
14 #include <ksched.h>
15 #include <zephyr/syscall.h>
16 #include <zephyr/internal/syscall_handler.h>
17 #include <zephyr/device.h>
18 #include <zephyr/init.h>
19 #include <stdbool.h>
20 #include <zephyr/app_memory/app_memdomain.h>
21 #include <zephyr/sys/libc-hooks.h>
22 #include <zephyr/sys/mutex.h>
23 #include <zephyr/sys/util.h>
24 #include <inttypes.h>
25 #include <zephyr/linker/linker-defs.h>
26
27 #ifdef Z_LIBC_PARTITION_EXISTS
28 K_APPMEM_PARTITION_DEFINE(z_libc_partition);
29 #endif /* Z_LIBC_PARTITION_EXISTS */
30
31 /* TODO: Find a better place to put this. Since we pull the entire
32 * lib..__modules__crypto__mbedtls.a globals into app shared memory
33 * section, we can't put this in zephyr_init.c of the mbedtls module.
34 */
35 #ifdef CONFIG_MBEDTLS
36 K_APPMEM_PARTITION_DEFINE(k_mbedtls_partition);
37 #endif /* CONFIG_MBEDTLS */
38
39 #include <zephyr/logging/log.h>
40 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
41
42 /* The originally synchronization strategy made heavy use of recursive
43 * irq_locking, which ports poorly to spinlocks which are
44 * non-recursive. Rather than try to redesign as part of
45 * spinlockification, this uses multiple locks to preserve the
46 * original semantics exactly. The locks are named for the data they
47 * protect where possible, or just for the code that uses them where
48 * not.
49 */
50 #ifdef CONFIG_DYNAMIC_OBJECTS
51 static struct k_spinlock lists_lock; /* kobj dlist */
52 static struct k_spinlock objfree_lock; /* k_object_free */
53
54 #ifdef CONFIG_GEN_PRIV_STACKS
55 /* On ARM & ARC MPU & RISC-V PMP we may have two different alignment requirement
56 * when dynamically allocating thread stacks, one for the privileged
57 * stack and other for the user stack, so we need to account the
58 * worst alignment scenario and reserve space for that.
59 */
60 #if defined(CONFIG_ARM_MPU) || defined(CONFIG_ARC_MPU) || defined(CONFIG_RISCV_PMP)
61 #define STACK_ELEMENT_DATA_SIZE(size) \
62 (sizeof(struct z_stack_data) + CONFIG_PRIVILEGED_STACK_SIZE + \
63 Z_THREAD_STACK_OBJ_ALIGN(size) + K_THREAD_STACK_LEN(size))
64 #else
65 #define STACK_ELEMENT_DATA_SIZE(size) (sizeof(struct z_stack_data) + \
66 K_THREAD_STACK_LEN(size))
67 #endif /* CONFIG_ARM_MPU || CONFIG_ARC_MPU || CONFIG_RISCV_PMP */
68 #else
69 #define STACK_ELEMENT_DATA_SIZE(size) K_THREAD_STACK_LEN(size)
70 #endif /* CONFIG_GEN_PRIV_STACKS */
71
72 #endif /* CONFIG_DYNAMIC_OBJECTS */
73 static struct k_spinlock obj_lock; /* kobj struct data */
74
75 #define MAX_THREAD_BITS (CONFIG_MAX_THREAD_BYTES * BITS_PER_BYTE)
76
77 #ifdef CONFIG_DYNAMIC_OBJECTS
78 extern uint8_t _thread_idx_map[CONFIG_MAX_THREAD_BYTES];
79 #endif /* CONFIG_DYNAMIC_OBJECTS */
80
81 static void clear_perms_cb(struct k_object *ko, void *ctx_ptr);
82
otype_to_str(enum k_objects otype)83 const char *otype_to_str(enum k_objects otype)
84 {
85 const char *ret;
86 /* -fdata-sections doesn't work right except in very recent
87 * GCC and these literal strings would appear in the binary even if
88 * otype_to_str was omitted by the linker
89 */
90 #ifdef CONFIG_LOG
91 switch (otype) {
92 /* otype-to-str.h is generated automatically during build by
93 * gen_kobject_list.py
94 */
95 case K_OBJ_ANY:
96 ret = "generic";
97 break;
98 #include <zephyr/otype-to-str.h>
99 default:
100 ret = "?";
101 break;
102 }
103 #else
104 ARG_UNUSED(otype);
105 ret = NULL;
106 #endif /* CONFIG_LOG */
107 return ret;
108 }
109
110 struct perm_ctx {
111 int parent_id;
112 int child_id;
113 struct k_thread *parent;
114 };
115
116 #ifdef CONFIG_GEN_PRIV_STACKS
117 /* See write_gperf_table() in scripts/build/gen_kobject_list.py. The privilege
118 * mode stacks are allocated as an array. The base of the array is
119 * aligned to Z_PRIVILEGE_STACK_ALIGN, and all members must be as well.
120 */
z_priv_stack_find(k_thread_stack_t * stack)121 uint8_t *z_priv_stack_find(k_thread_stack_t *stack)
122 {
123 struct k_object *obj = k_object_find(stack);
124
125 __ASSERT(obj != NULL, "stack object not found");
126 __ASSERT(obj->type == K_OBJ_THREAD_STACK_ELEMENT,
127 "bad stack object");
128
129 return obj->data.stack_data->priv;
130 }
131 #endif /* CONFIG_GEN_PRIV_STACKS */
132
133 #ifdef CONFIG_DYNAMIC_OBJECTS
134
135 /*
136 * Note that dyn_obj->data is where the kernel object resides
137 * so it is the one that actually needs to be aligned.
138 * Due to the need to get the fields inside struct dyn_obj
139 * from kernel object pointers (i.e. from data[]), the offset
140 * from data[] needs to be fixed at build time. Therefore,
141 * data[] is declared with __aligned(), such that when dyn_obj
142 * is allocated with alignment, data[] is also aligned.
143 * Due to this requirement, data[] needs to be aligned with
144 * the maximum alignment needed for all kernel objects
145 * (hence the following DYN_OBJ_DATA_ALIGN).
146 */
147 #ifdef ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT
148 #define DYN_OBJ_DATA_ALIGN_K_THREAD (ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT)
149 #else
150 #define DYN_OBJ_DATA_ALIGN_K_THREAD (sizeof(void *))
151 #endif /* ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT */
152
153 #ifdef CONFIG_DYNAMIC_THREAD_STACK_SIZE
154 #if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_PMP_STACK_GUARD)
155 #define DYN_OBJ_DATA_ALIGN_K_THREAD_STACK \
156 Z_THREAD_STACK_OBJ_ALIGN(CONFIG_DYNAMIC_THREAD_STACK_SIZE)
157 #else
158 #define DYN_OBJ_DATA_ALIGN_K_THREAD_STACK \
159 Z_THREAD_STACK_OBJ_ALIGN(CONFIG_PRIVILEGED_STACK_SIZE)
160 #endif /* CONFIG_MPU_STACK_GUARD || CONFIG_PMP_STACK_GUARD */
161 #else
162 #define DYN_OBJ_DATA_ALIGN_K_THREAD_STACK \
163 Z_THREAD_STACK_OBJ_ALIGN(ARCH_STACK_PTR_ALIGN)
164 #endif /* CONFIG_DYNAMIC_THREAD_STACK_SIZE */
165
166 #define DYN_OBJ_DATA_ALIGN \
167 MAX(DYN_OBJ_DATA_ALIGN_K_THREAD, (sizeof(void *)))
168
169 struct dyn_obj {
170 struct k_object kobj;
171 sys_dnode_t dobj_list;
172
173 /* The object itself */
174 void *data;
175 };
176
177 extern struct k_object *z_object_gperf_find(const void *obj);
178 extern void z_object_gperf_wordlist_foreach(_wordlist_cb_func_t func,
179 void *context);
180
181 /*
182 * Linked list of allocated kernel objects, for iteration over all allocated
183 * objects (and potentially deleting them during iteration).
184 */
185 static sys_dlist_t obj_list = SYS_DLIST_STATIC_INIT(&obj_list);
186
187 /*
188 * TODO: Write some hash table code that will replace obj_list.
189 */
190
obj_size_get(enum k_objects otype)191 static size_t obj_size_get(enum k_objects otype)
192 {
193 size_t ret;
194
195 switch (otype) {
196 #include <zephyr/otype-to-size.h>
197 default:
198 ret = sizeof(const struct device);
199 break;
200 }
201
202 return ret;
203 }
204
obj_align_get(enum k_objects otype)205 static size_t obj_align_get(enum k_objects otype)
206 {
207 size_t ret;
208
209 switch (otype) {
210 case K_OBJ_THREAD:
211 #ifdef ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT
212 ret = ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT;
213 #else
214 ret = __alignof(struct dyn_obj);
215 #endif /* ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT */
216 break;
217 default:
218 ret = __alignof(struct dyn_obj);
219 break;
220 }
221
222 return ret;
223 }
224
dyn_object_find(const void * obj)225 static struct dyn_obj *dyn_object_find(const void *obj)
226 {
227 struct dyn_obj *node;
228 k_spinlock_key_t key;
229
230 /* For any dynamically allocated kernel object, the object
231 * pointer is just a member of the containing struct dyn_obj,
232 * so just a little arithmetic is necessary to locate the
233 * corresponding struct rbnode
234 */
235 key = k_spin_lock(&lists_lock);
236
237 SYS_DLIST_FOR_EACH_CONTAINER(&obj_list, node, dobj_list) {
238 if (node->kobj.name == obj) {
239 goto end;
240 }
241 }
242
243 /* No object found */
244 node = NULL;
245
246 end:
247 k_spin_unlock(&lists_lock, key);
248
249 return node;
250 }
251
252 /**
253 * @internal
254 *
255 * @brief Allocate a new thread index for a new thread.
256 *
257 * This finds an unused thread index that can be assigned to a new
258 * thread. If too many threads have been allocated, the kernel will
259 * run out of indexes and this function will fail.
260 *
261 * Note that if an unused index is found, that index will be marked as
262 * used after return of this function.
263 *
264 * @param tidx The new thread index if successful
265 *
266 * @return true if successful, false if failed
267 **/
thread_idx_alloc(uintptr_t * tidx)268 static bool thread_idx_alloc(uintptr_t *tidx)
269 {
270 int i;
271 int idx;
272 int base;
273
274 base = 0;
275 for (i = 0; i < CONFIG_MAX_THREAD_BYTES; i++) {
276 idx = find_lsb_set(_thread_idx_map[i]);
277
278 if (idx != 0) {
279 *tidx = base + (idx - 1);
280
281 /* Clear the bit. We already know the array index,
282 * and the bit to be cleared.
283 */
284 _thread_idx_map[i] &= ~(BIT(idx - 1));
285
286 /* Clear permission from all objects */
287 k_object_wordlist_foreach(clear_perms_cb,
288 (void *)*tidx);
289
290 return true;
291 }
292
293 base += 8;
294 }
295
296 return false;
297 }
298
299 /**
300 * @internal
301 *
302 * @brief Free a thread index.
303 *
304 * This frees a thread index so it can be used by another
305 * thread.
306 *
307 * @param tidx The thread index to be freed
308 **/
thread_idx_free(uintptr_t tidx)309 static void thread_idx_free(uintptr_t tidx)
310 {
311 /* To prevent leaked permission when index is recycled */
312 k_object_wordlist_foreach(clear_perms_cb, (void *)tidx);
313
314 /* Figure out which bits to set in _thread_idx_map[] and set it. */
315 int base = tidx / NUM_BITS(_thread_idx_map[0]);
316 int offset = tidx % NUM_BITS(_thread_idx_map[0]);
317
318 _thread_idx_map[base] |= BIT(offset);
319 }
320
dynamic_object_create(enum k_objects otype,size_t align,size_t size)321 static struct k_object *dynamic_object_create(enum k_objects otype, size_t align,
322 size_t size)
323 {
324 struct dyn_obj *dyn;
325
326 dyn = z_thread_aligned_alloc(align, sizeof(struct dyn_obj));
327 if (dyn == NULL) {
328 return NULL;
329 }
330
331 if (otype == K_OBJ_THREAD_STACK_ELEMENT) {
332 size_t adjusted_size;
333
334 if (size == 0) {
335 k_free(dyn);
336 return NULL;
337 }
338
339 adjusted_size = STACK_ELEMENT_DATA_SIZE(size);
340 dyn->data = z_thread_aligned_alloc(DYN_OBJ_DATA_ALIGN_K_THREAD_STACK,
341 adjusted_size);
342 if (dyn->data == NULL) {
343 k_free(dyn);
344 return NULL;
345 }
346
347 #ifdef CONFIG_GEN_PRIV_STACKS
348 struct z_stack_data *stack_data = (struct z_stack_data *)
349 ((uint8_t *)dyn->data + adjusted_size - sizeof(*stack_data));
350 stack_data->priv = (uint8_t *)dyn->data;
351 stack_data->size = adjusted_size;
352 dyn->kobj.data.stack_data = stack_data;
353 #if defined(CONFIG_ARM_MPU) || defined(CONFIG_ARC_MPU) || defined(CONFIG_RISCV_PMP)
354 dyn->kobj.name = (void *)ROUND_UP(
355 ((uint8_t *)dyn->data + CONFIG_PRIVILEGED_STACK_SIZE),
356 Z_THREAD_STACK_OBJ_ALIGN(size));
357 #else
358 dyn->kobj.name = dyn->data;
359 #endif /* CONFIG_ARM_MPU || CONFIG_ARC_MPU || CONFIG_RISCV_PMP */
360 #else
361 dyn->kobj.name = dyn->data;
362 dyn->kobj.data.stack_size = adjusted_size;
363 #endif /* CONFIG_GEN_PRIV_STACKS */
364 } else {
365 dyn->data = z_thread_aligned_alloc(align, obj_size_get(otype) + size);
366 if (dyn->data == NULL) {
367 k_free(dyn);
368 return NULL;
369 }
370 dyn->kobj.name = dyn->data;
371 }
372
373 dyn->kobj.type = otype;
374 dyn->kobj.flags = 0;
375 (void)memset(dyn->kobj.perms, 0, CONFIG_MAX_THREAD_BYTES);
376
377 k_spinlock_key_t key = k_spin_lock(&lists_lock);
378
379 sys_dlist_append(&obj_list, &dyn->dobj_list);
380 k_spin_unlock(&lists_lock, key);
381
382 return &dyn->kobj;
383 }
384
k_object_create_dynamic_aligned(size_t align,size_t size)385 struct k_object *k_object_create_dynamic_aligned(size_t align, size_t size)
386 {
387 struct k_object *obj = dynamic_object_create(K_OBJ_ANY, align, size);
388
389 if (obj == NULL) {
390 LOG_ERR("could not allocate kernel object, out of memory");
391 }
392
393 return obj;
394 }
395
z_object_alloc(enum k_objects otype,size_t size)396 static void *z_object_alloc(enum k_objects otype, size_t size)
397 {
398 struct k_object *zo;
399 uintptr_t tidx = 0;
400
401 if ((otype <= K_OBJ_ANY) || (otype >= K_OBJ_LAST)) {
402 LOG_ERR("bad object type %d requested", otype);
403 return NULL;
404 }
405
406 switch (otype) {
407 case K_OBJ_THREAD:
408 if (!thread_idx_alloc(&tidx)) {
409 LOG_ERR("out of free thread indexes");
410 return NULL;
411 }
412 break;
413 /* The following are currently not allowed at all */
414 case K_OBJ_FUTEX: /* Lives in user memory */
415 case K_OBJ_SYS_MUTEX: /* Lives in user memory */
416 case K_OBJ_NET_SOCKET: /* Indeterminate size */
417 LOG_ERR("forbidden object type '%s' requested",
418 otype_to_str(otype));
419 return NULL;
420 default:
421 /* Remainder within bounds are permitted */
422 break;
423 }
424
425 zo = dynamic_object_create(otype, obj_align_get(otype), size);
426 if (zo == NULL) {
427 if (otype == K_OBJ_THREAD) {
428 thread_idx_free(tidx);
429 }
430 return NULL;
431 }
432
433 if (otype == K_OBJ_THREAD) {
434 zo->data.thread_id = tidx;
435 }
436
437 /* The allocating thread implicitly gets permission on kernel objects
438 * that it allocates
439 */
440 k_thread_perms_set(zo, _current);
441
442 /* Activates reference counting logic for automatic disposal when
443 * all permissions have been revoked
444 */
445 zo->flags |= K_OBJ_FLAG_ALLOC;
446
447 return zo->name;
448 }
449
z_impl_k_object_alloc(enum k_objects otype)450 void *z_impl_k_object_alloc(enum k_objects otype)
451 {
452 return z_object_alloc(otype, 0);
453 }
454
z_impl_k_object_alloc_size(enum k_objects otype,size_t size)455 void *z_impl_k_object_alloc_size(enum k_objects otype, size_t size)
456 {
457 return z_object_alloc(otype, size);
458 }
459
k_object_free(void * obj)460 void k_object_free(void *obj)
461 {
462 struct dyn_obj *dyn;
463
464 /* This function is intentionally not exposed to user mode.
465 * There's currently no robust way to track that an object isn't
466 * being used by some other thread
467 */
468
469 k_spinlock_key_t key = k_spin_lock(&objfree_lock);
470
471 dyn = dyn_object_find(obj);
472 if (dyn != NULL) {
473 sys_dlist_remove(&dyn->dobj_list);
474
475 if (dyn->kobj.type == K_OBJ_THREAD) {
476 thread_idx_free(dyn->kobj.data.thread_id);
477 }
478 }
479 k_spin_unlock(&objfree_lock, key);
480
481 if (dyn != NULL) {
482 k_free(dyn->data);
483 k_free(dyn);
484 }
485 }
486
k_object_find(const void * obj)487 struct k_object *k_object_find(const void *obj)
488 {
489 struct k_object *ret;
490
491 ret = z_object_gperf_find(obj);
492
493 if (ret == NULL) {
494 struct dyn_obj *dyn;
495
496 /* The cast to pointer-to-non-const violates MISRA
497 * 11.8 but is justified since we know dynamic objects
498 * were not declared with a const qualifier.
499 */
500 dyn = dyn_object_find(obj);
501 if (dyn != NULL) {
502 ret = &dyn->kobj;
503 }
504 }
505
506 return ret;
507 }
508
k_object_wordlist_foreach(_wordlist_cb_func_t func,void * context)509 void k_object_wordlist_foreach(_wordlist_cb_func_t func, void *context)
510 {
511 struct dyn_obj *obj, *next;
512
513 z_object_gperf_wordlist_foreach(func, context);
514
515 k_spinlock_key_t key = k_spin_lock(&lists_lock);
516
517 SYS_DLIST_FOR_EACH_CONTAINER_SAFE(&obj_list, obj, next, dobj_list) {
518 func(&obj->kobj, context);
519 }
520 k_spin_unlock(&lists_lock, key);
521 }
522 #endif /* CONFIG_DYNAMIC_OBJECTS */
523
524 /* In the earlier linker-passes before we have the real generated
525 * implementation of the lookup functions, we need some weak dummies.
526 * Being __weak, they will be replaced by the generated implementations in
527 * the later linker passes.
528 */
529 #ifdef CONFIG_DYNAMIC_OBJECTS
530 Z_GENERIC_SECTION(.kobject_data.text.dummies)
z_object_gperf_find(const void * obj)531 __weak struct k_object *z_object_gperf_find(const void *obj)
532 {
533 return NULL;
534 }
535 Z_GENERIC_SECTION(.kobject_data.text.dummies)
z_object_gperf_wordlist_foreach(_wordlist_cb_func_t func,void * context)536 __weak void z_object_gperf_wordlist_foreach(_wordlist_cb_func_t func, void *context)
537 {
538 }
539 #else
540 Z_GENERIC_SECTION(.kobject_data.text.dummies)
k_object_find(const void * obj)541 __weak struct k_object *k_object_find(const void *obj)
542 {
543 return NULL;
544 }
545 Z_GENERIC_SECTION(.kobject_data.text.dummies)
k_object_wordlist_foreach(_wordlist_cb_func_t func,void * context)546 __weak void k_object_wordlist_foreach(_wordlist_cb_func_t func, void *context)
547 {
548 }
549 #endif
550
thread_index_get(struct k_thread * thread)551 static unsigned int thread_index_get(struct k_thread *thread)
552 {
553 struct k_object *ko;
554
555 ko = k_object_find(thread);
556
557 if (ko == NULL) {
558 return -1;
559 }
560
561 return ko->data.thread_id;
562 }
563
unref_check(struct k_object * ko,uintptr_t index)564 static void unref_check(struct k_object *ko, uintptr_t index)
565 {
566 k_spinlock_key_t key = k_spin_lock(&obj_lock);
567
568 sys_bitfield_clear_bit((mem_addr_t)&ko->perms, index);
569
570 #ifdef CONFIG_DYNAMIC_OBJECTS
571 if ((ko->flags & K_OBJ_FLAG_ALLOC) == 0U) {
572 /* skip unref check for static kernel object */
573 goto out;
574 }
575
576 void *vko = ko;
577
578 struct dyn_obj *dyn = CONTAINER_OF(vko, struct dyn_obj, kobj);
579
580 __ASSERT(IS_PTR_ALIGNED(dyn, struct dyn_obj), "unaligned z_object");
581
582 for (int i = 0; i < CONFIG_MAX_THREAD_BYTES; i++) {
583 if (ko->perms[i] != 0U) {
584 goto out;
585 }
586 }
587
588 /* This object has no more references. Some objects may have
589 * dynamically allocated resources, require cleanup, or need to be
590 * marked as uninitialized when all references are gone. What
591 * specifically needs to happen depends on the object type.
592 */
593 switch (ko->type) {
594 #ifdef CONFIG_PIPES
595 case K_OBJ_PIPE:
596 k_pipe_cleanup((struct k_pipe *)ko->name);
597 break;
598 #endif /* CONFIG_PIPES */
599 case K_OBJ_MSGQ:
600 k_msgq_cleanup((struct k_msgq *)ko->name);
601 break;
602 case K_OBJ_STACK:
603 k_stack_cleanup((struct k_stack *)ko->name);
604 break;
605 default:
606 /* Nothing to do */
607 break;
608 }
609
610 sys_dlist_remove(&dyn->dobj_list);
611 k_free(dyn->data);
612 k_free(dyn);
613 out:
614 #endif /* CONFIG_DYNAMIC_OBJECTS */
615 k_spin_unlock(&obj_lock, key);
616 }
617
wordlist_cb(struct k_object * ko,void * ctx_ptr)618 static void wordlist_cb(struct k_object *ko, void *ctx_ptr)
619 {
620 struct perm_ctx *ctx = (struct perm_ctx *)ctx_ptr;
621
622 if (sys_bitfield_test_bit((mem_addr_t)&ko->perms, ctx->parent_id) &&
623 ((struct k_thread *)ko->name != ctx->parent)) {
624 sys_bitfield_set_bit((mem_addr_t)&ko->perms, ctx->child_id);
625 }
626 }
627
k_thread_perms_inherit(struct k_thread * parent,struct k_thread * child)628 void k_thread_perms_inherit(struct k_thread *parent, struct k_thread *child)
629 {
630 struct perm_ctx ctx = {
631 thread_index_get(parent),
632 thread_index_get(child),
633 parent
634 };
635
636 if ((ctx.parent_id != -1) && (ctx.child_id != -1)) {
637 k_object_wordlist_foreach(wordlist_cb, &ctx);
638 }
639 }
640
k_thread_perms_set(struct k_object * ko,struct k_thread * thread)641 void k_thread_perms_set(struct k_object *ko, struct k_thread *thread)
642 {
643 int index = thread_index_get(thread);
644
645 if (index != -1) {
646 sys_bitfield_set_bit((mem_addr_t)&ko->perms, index);
647 }
648 }
649
k_thread_perms_clear(struct k_object * ko,struct k_thread * thread)650 void k_thread_perms_clear(struct k_object *ko, struct k_thread *thread)
651 {
652 int index = thread_index_get(thread);
653
654 if (index != -1) {
655 sys_bitfield_clear_bit((mem_addr_t)&ko->perms, index);
656 unref_check(ko, index);
657 }
658 }
659
clear_perms_cb(struct k_object * ko,void * ctx_ptr)660 static void clear_perms_cb(struct k_object *ko, void *ctx_ptr)
661 {
662 uintptr_t id = (uintptr_t)ctx_ptr;
663
664 unref_check(ko, id);
665 }
666
k_thread_perms_all_clear(struct k_thread * thread)667 void k_thread_perms_all_clear(struct k_thread *thread)
668 {
669 uintptr_t index = thread_index_get(thread);
670
671 if ((int)index != -1) {
672 k_object_wordlist_foreach(clear_perms_cb, (void *)index);
673 }
674 }
675
thread_perms_test(struct k_object * ko)676 static int thread_perms_test(struct k_object *ko)
677 {
678 int index;
679
680 if ((ko->flags & K_OBJ_FLAG_PUBLIC) != 0U) {
681 return 1;
682 }
683
684 index = thread_index_get(_current);
685 if (index != -1) {
686 return sys_bitfield_test_bit((mem_addr_t)&ko->perms, index);
687 }
688 return 0;
689 }
690
dump_permission_error(struct k_object * ko)691 static void dump_permission_error(struct k_object *ko)
692 {
693 int index = thread_index_get(_current);
694 LOG_ERR("thread %p (%d) does not have permission on %s %p",
695 _current, index,
696 otype_to_str(ko->type), ko->name);
697 LOG_HEXDUMP_ERR(ko->perms, sizeof(ko->perms), "permission bitmap");
698 }
699
k_object_dump_error(int retval,const void * obj,struct k_object * ko,enum k_objects otype)700 void k_object_dump_error(int retval, const void *obj, struct k_object *ko,
701 enum k_objects otype)
702 {
703 switch (retval) {
704 case -EBADF:
705 LOG_ERR("%p is not a valid %s", obj, otype_to_str(otype));
706 if (ko == NULL) {
707 LOG_ERR("address is not a known kernel object");
708 } else {
709 LOG_ERR("address is actually a %s",
710 otype_to_str(ko->type));
711 }
712 break;
713 case -EPERM:
714 dump_permission_error(ko);
715 break;
716 case -EINVAL:
717 LOG_ERR("%p used before initialization", obj);
718 break;
719 case -EADDRINUSE:
720 LOG_ERR("%p %s in use", obj, otype_to_str(otype));
721 break;
722 default:
723 /* Not handled error */
724 break;
725 }
726 }
727
z_impl_k_object_access_grant(const void * object,struct k_thread * thread)728 void z_impl_k_object_access_grant(const void *object, struct k_thread *thread)
729 {
730 struct k_object *ko = k_object_find(object);
731
732 if (ko != NULL) {
733 k_thread_perms_set(ko, thread);
734 }
735 }
736
k_object_access_revoke(const void * object,struct k_thread * thread)737 void k_object_access_revoke(const void *object, struct k_thread *thread)
738 {
739 struct k_object *ko = k_object_find(object);
740
741 if (ko != NULL) {
742 k_thread_perms_clear(ko, thread);
743 }
744 }
745
z_impl_k_object_release(const void * object)746 void z_impl_k_object_release(const void *object)
747 {
748 k_object_access_revoke(object, _current);
749 }
750
k_object_access_all_grant(const void * object)751 void k_object_access_all_grant(const void *object)
752 {
753 struct k_object *ko = k_object_find(object);
754
755 if (ko != NULL) {
756 ko->flags |= K_OBJ_FLAG_PUBLIC;
757 }
758 }
759
k_object_validate(struct k_object * ko,enum k_objects otype,enum _obj_init_check init)760 int k_object_validate(struct k_object *ko, enum k_objects otype,
761 enum _obj_init_check init)
762 {
763 if (unlikely((ko == NULL) ||
764 ((otype != K_OBJ_ANY) && (ko->type != otype)))) {
765 return -EBADF;
766 }
767
768 /* Manipulation of any kernel objects by a user thread requires that
769 * thread be granted access first, even for uninitialized objects
770 */
771 if (unlikely(thread_perms_test(ko) == 0)) {
772 return -EPERM;
773 }
774
775 /* Initialization state checks. _OBJ_INIT_ANY, we don't care */
776 if (likely(init == _OBJ_INIT_TRUE)) {
777 /* Object MUST be initialized */
778 if (unlikely((ko->flags & K_OBJ_FLAG_INITIALIZED) == 0U)) {
779 return -EINVAL;
780 }
781 } else if (init == _OBJ_INIT_FALSE) { /* _OBJ_INIT_FALSE case */
782 /* Object MUST NOT be initialized */
783 if (unlikely((ko->flags & K_OBJ_FLAG_INITIALIZED) != 0U)) {
784 return -EADDRINUSE;
785 }
786 } else {
787 /* _OBJ_INIT_ANY */
788 }
789
790 return 0;
791 }
792
k_object_init(const void * obj)793 void k_object_init(const void *obj)
794 {
795 struct k_object *ko;
796
797 /* By the time we get here, if the caller was from userspace, all the
798 * necessary checks have been done in k_object_validate(), which takes
799 * place before the object is initialized.
800 *
801 * This function runs after the object has been initialized and
802 * finalizes it
803 */
804
805 ko = k_object_find(obj);
806 if (ko == NULL) {
807 /* Supervisor threads can ignore rules about kernel objects
808 * and may declare them on stacks, etc. Such objects will never
809 * be usable from userspace, but we shouldn't explode.
810 */
811 return;
812 }
813
814 /* Allows non-initialization system calls to be made on this object */
815 ko->flags |= K_OBJ_FLAG_INITIALIZED;
816 }
817
k_object_recycle(const void * obj)818 void k_object_recycle(const void *obj)
819 {
820 struct k_object *ko = k_object_find(obj);
821
822 if (ko != NULL) {
823 (void)memset(ko->perms, 0, sizeof(ko->perms));
824 k_thread_perms_set(ko, _current);
825 ko->flags |= K_OBJ_FLAG_INITIALIZED;
826 }
827 }
828
k_object_uninit(const void * obj)829 void k_object_uninit(const void *obj)
830 {
831 struct k_object *ko;
832
833 /* See comments in k_object_init() */
834 ko = k_object_find(obj);
835 if (ko == NULL) {
836 return;
837 }
838
839 ko->flags &= ~K_OBJ_FLAG_INITIALIZED;
840 }
841
842 /*
843 * Copy to/from helper functions used in syscall handlers
844 */
k_usermode_alloc_from_copy(const void * src,size_t size)845 void *k_usermode_alloc_from_copy(const void *src, size_t size)
846 {
847 void *dst = NULL;
848
849 /* Does the caller in user mode have access to read this memory? */
850 if (K_SYSCALL_MEMORY_READ(src, size)) {
851 goto out_err;
852 }
853
854 dst = z_thread_malloc(size);
855 if (dst == NULL) {
856 LOG_ERR("out of thread resource pool memory (%zu)", size);
857 goto out_err;
858 }
859
860 (void)memcpy(dst, src, size);
861 out_err:
862 return dst;
863 }
864
user_copy(void * dst,const void * src,size_t size,bool to_user)865 static int user_copy(void *dst, const void *src, size_t size, bool to_user)
866 {
867 int ret = EFAULT;
868
869 /* Does the caller in user mode have access to this memory? */
870 if (to_user ? K_SYSCALL_MEMORY_WRITE(dst, size) :
871 K_SYSCALL_MEMORY_READ(src, size)) {
872 goto out_err;
873 }
874
875 (void)memcpy(dst, src, size);
876 ret = 0;
877 out_err:
878 return ret;
879 }
880
k_usermode_from_copy(void * dst,const void * src,size_t size)881 int k_usermode_from_copy(void *dst, const void *src, size_t size)
882 {
883 return user_copy(dst, src, size, false);
884 }
885
k_usermode_to_copy(void * dst,const void * src,size_t size)886 int k_usermode_to_copy(void *dst, const void *src, size_t size)
887 {
888 return user_copy(dst, src, size, true);
889 }
890
k_usermode_string_alloc_copy(const char * src,size_t maxlen)891 char *k_usermode_string_alloc_copy(const char *src, size_t maxlen)
892 {
893 size_t actual_len;
894 int err;
895 char *ret = NULL;
896
897 actual_len = k_usermode_string_nlen(src, maxlen, &err);
898 if (err != 0) {
899 goto out;
900 }
901 if (actual_len == maxlen) {
902 /* Not NULL terminated */
903 LOG_ERR("string too long %p (%zu)", src, actual_len);
904 goto out;
905 }
906 if (size_add_overflow(actual_len, 1, &actual_len)) {
907 LOG_ERR("overflow");
908 goto out;
909 }
910
911 ret = k_usermode_alloc_from_copy(src, actual_len);
912
913 /* Someone may have modified the source string during the above
914 * checks. Ensure what we actually copied is still terminated
915 * properly.
916 */
917 if (ret != NULL) {
918 ret[actual_len - 1U] = '\0';
919 }
920 out:
921 return ret;
922 }
923
k_usermode_string_copy(char * dst,const char * src,size_t maxlen)924 int k_usermode_string_copy(char *dst, const char *src, size_t maxlen)
925 {
926 size_t actual_len;
927 int ret, err;
928
929 actual_len = k_usermode_string_nlen(src, maxlen, &err);
930 if (err != 0) {
931 ret = EFAULT;
932 goto out;
933 }
934 if (actual_len == maxlen) {
935 /* Not NULL terminated */
936 LOG_ERR("string too long %p (%zu)", src, actual_len);
937 ret = EINVAL;
938 goto out;
939 }
940 if (size_add_overflow(actual_len, 1, &actual_len)) {
941 LOG_ERR("overflow");
942 ret = EINVAL;
943 goto out;
944 }
945
946 ret = k_usermode_from_copy(dst, src, actual_len);
947
948 /* See comment above in k_usermode_string_alloc_copy() */
949 dst[actual_len - 1] = '\0';
950 out:
951 return ret;
952 }
953
954 /*
955 * Application memory region initialization
956 */
957
958 extern char __app_shmem_regions_start[];
959 extern char __app_shmem_regions_end[];
960
app_shmem_bss_zero(void)961 static int app_shmem_bss_zero(void)
962 {
963 struct z_app_region *region, *end;
964
965
966 end = (struct z_app_region *)&__app_shmem_regions_end[0];
967 region = (struct z_app_region *)&__app_shmem_regions_start[0];
968
969 for ( ; region < end; region++) {
970 #if defined(CONFIG_DEMAND_PAGING) && !defined(CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT)
971 /* When BSS sections are not present at boot, we need to wait for
972 * paging mechanism to be initialized before we can zero out BSS.
973 */
974 extern bool z_sys_post_kernel;
975 bool do_clear = z_sys_post_kernel;
976
977 /* During pre-kernel init, z_sys_post_kernel == false, but
978 * with pinned rodata region, so clear. Otherwise skip.
979 * In post-kernel init, z_sys_post_kernel == true,
980 * skip those in pinned rodata region as they have already
981 * been cleared and possibly already in use. Otherwise clear.
982 */
983 if (((uint8_t *)region->bss_start >= (uint8_t *)_app_smem_pinned_start) &&
984 ((uint8_t *)region->bss_start < (uint8_t *)_app_smem_pinned_end)) {
985 do_clear = !do_clear;
986 }
987
988 if (do_clear)
989 #endif /* CONFIG_DEMAND_PAGING && !CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT */
990 {
991 (void)memset(region->bss_start, 0, region->bss_size);
992 }
993 }
994
995 return 0;
996 }
997
998 SYS_INIT_NAMED(app_shmem_bss_zero_pre, app_shmem_bss_zero,
999 PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
1000
1001 #if defined(CONFIG_DEMAND_PAGING) && !defined(CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT)
1002 /* When BSS sections are not present at boot, we need to wait for
1003 * paging mechanism to be initialized before we can zero out BSS.
1004 */
1005 SYS_INIT_NAMED(app_shmem_bss_zero_post, app_shmem_bss_zero,
1006 POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
1007 #endif /* CONFIG_DEMAND_PAGING && !CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT */
1008
1009 /*
1010 * Default handlers if otherwise unimplemented
1011 */
1012
handler_bad_syscall(uintptr_t bad_id,uintptr_t arg2,uintptr_t arg3,uintptr_t arg4,uintptr_t arg5,uintptr_t arg6,void * ssf)1013 static uintptr_t handler_bad_syscall(uintptr_t bad_id, uintptr_t arg2,
1014 uintptr_t arg3, uintptr_t arg4,
1015 uintptr_t arg5, uintptr_t arg6,
1016 void *ssf)
1017 {
1018 ARG_UNUSED(arg2);
1019 ARG_UNUSED(arg3);
1020 ARG_UNUSED(arg4);
1021 ARG_UNUSED(arg5);
1022 ARG_UNUSED(arg6);
1023
1024 LOG_ERR("Bad system call id %" PRIuPTR " invoked", bad_id);
1025 arch_syscall_oops(ssf);
1026 CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
1027 }
1028
handler_no_syscall(uintptr_t arg1,uintptr_t arg2,uintptr_t arg3,uintptr_t arg4,uintptr_t arg5,uintptr_t arg6,void * ssf)1029 static uintptr_t handler_no_syscall(uintptr_t arg1, uintptr_t arg2,
1030 uintptr_t arg3, uintptr_t arg4,
1031 uintptr_t arg5, uintptr_t arg6, void *ssf)
1032 {
1033 ARG_UNUSED(arg1);
1034 ARG_UNUSED(arg2);
1035 ARG_UNUSED(arg3);
1036 ARG_UNUSED(arg4);
1037 ARG_UNUSED(arg5);
1038 ARG_UNUSED(arg6);
1039
1040 LOG_ERR("Unimplemented system call");
1041 arch_syscall_oops(ssf);
1042 CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
1043 }
1044
1045 #include <zephyr/syscall_dispatch.c>
1046