/kernel/include/ |
A D | kthread.h | 38 void z_thread_monitor_exit(struct k_thread *thread); 60 static inline int thread_is_preemptible(struct k_thread *thread) in thread_is_preemptible() 67 static inline int thread_is_metairq(struct k_thread *thread) in thread_is_metairq() 79 static inline bool is_thread_dummy(struct k_thread *thread) in is_thread_dummy() 86 static inline bool z_is_thread_suspended(struct k_thread *thread) in z_is_thread_suspended() 91 static inline bool z_is_thread_pending(struct k_thread *thread) in z_is_thread_pending() 109 static inline bool z_is_thread_ready(struct k_thread *thread) in z_is_thread_ready() 119 static inline bool z_is_thread_queued(struct k_thread *thread) in z_is_thread_queued() 138 SYS_PORT_TRACING_FUNC(k_thread, sched_suspend, thread); in z_mark_thread_as_suspended() 145 SYS_PORT_TRACING_FUNC(k_thread, sched_resume, thread); in z_mark_thread_as_not_suspended() [all …]
|
A D | ksched.h | 49 extern struct k_thread _thread_dummy; 61 void z_unpend_thread(struct k_thread *thread); 67 void z_reset_time_slice(struct k_thread *curr); 69 void z_sched_start(struct k_thread *thread); 70 void z_ready_thread(struct k_thread *thread); 71 void z_requeue_current(struct k_thread *curr); 72 struct k_thread *z_swap_next_thread(void); 73 void z_thread_abort(struct k_thread *thread); 75 bool thread_is_sliceable(struct k_thread *thread); 164 struct k_thread *thread = NULL; in z_unpend_first_thread() [all …]
|
A D | kernel_arch_interface.h | 76 void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, 160 arch_thread_return_value_set(struct k_thread *thread, unsigned int value); 174 void arch_switch_to_main_thread(struct k_thread *main_thread, char *stack_ptr, 201 int arch_float_disable(struct k_thread *thread); 221 int arch_float_enable(struct k_thread *thread, unsigned int options); 230 int arch_coprocessors_disable(struct k_thread *thread); 256 int arch_thread_priv_stack_space_get(const struct k_thread *thread, size_t *stack_size, 622 int arch_thread_name_set(struct k_thread *thread, const char *str); 661 uintptr_t arch_coredump_stack_ptr_get(const struct k_thread *thread); 672 void arch_coredump_priv_stack_dump(struct k_thread *thread); [all …]
|
A D | priority_q.h | 73 static ALWAYS_INLINE int32_t z_sched_prio_cmp(struct k_thread *thread_1, struct k_thread *thread_2) in z_sched_prio_cmp() 108 struct k_thread *t; in z_priq_simple_add() 136 struct k_thread *t; in z_priq_simple_yield() 145 t = CONTAINER_OF(n, struct k_thread, base.qnode_dlist); in z_priq_simple_yield() 160 struct k_thread *thread = NULL; in z_priq_simple_best() 175 struct k_thread *thread; in z_priq_simple_mask_best() 200 struct k_thread *t; in z_priq_rb_add() 240 struct k_thread *thread = NULL; in z_priq_rb_best() 244 thread = CONTAINER_OF(n, struct k_thread, base.qnode_rb); in z_priq_rb_best() 293 struct k_thread *thread) in z_priq_mq_add() [all …]
|
A D | kernel_internal.h | 73 extern char *z_setup_new_thread(struct k_thread *new_thread, 116 arch_thread_return_value_set(struct k_thread *thread, unsigned int value) in arch_thread_return_value_set() 123 z_thread_return_value_set_with_data(struct k_thread *thread, in z_thread_return_value_set_with_data() 144 extern struct k_thread z_main_thread; 148 extern struct k_thread z_idle_threads[CONFIG_MP_MAX_NUM_CPUS]; 165 void z_mem_domain_init_thread(struct k_thread *thread); 168 void z_mem_domain_exit_thread(struct k_thread *thread); 292 void k_thread_abort_cleanup(struct k_thread *thread); 304 void k_thread_abort_cleanup_check_reuse(struct k_thread *thread);
|
A D | wait_q.h | 36 static inline struct k_thread *z_waitq_head(_wait_q_t *w) in z_waitq_head() 38 return (struct k_thread *)rb_get_min(&w->waitq.tree); in z_waitq_head() 52 static inline struct k_thread *z_waitq_head(_wait_q_t *w) 54 return (struct k_thread *)sys_dlist_peek_head(&w->waitq);
|
A D | timeout_q.h | 59 static inline k_ticks_t z_add_thread_timeout(struct k_thread *thread, k_timeout_t ticks) in z_add_thread_timeout() 64 static inline void z_abort_thread_timeout(struct k_thread *thread) in z_abort_thread_timeout() 69 static inline bool z_is_aborted_thread_timeout(struct k_thread *thread) in z_is_aborted_thread_timeout() 90 static inline k_ticks_t z_add_thread_timeout(struct k_thread *thread, k_timeout_t ticks)
|
A D | kswap.h | 29 void z_smp_release_global_lock(struct k_thread *thread); 53 static inline void z_sched_switch_spin(struct k_thread *thread) in z_sched_switch_spin() 81 struct k_thread *new_thread, *old_thread; in do_swap() 197 static inline void z_sched_switch_spin(struct k_thread *thread) in z_sched_switch_spin() 246 void z_dummy_thread_init(struct k_thread *dummy_thread);
|
A D | ipi.h | 24 atomic_val_t ipi_mask_create(struct k_thread *thread);
|
/kernel/ |
A D | sched.c | 28 extern struct k_thread *pending_current; 291 struct k_thread *thread = next_up(); in update_cache() 644 struct k_thread *thread = NULL; in z_unpend1_no_timeout() 741 struct k_thread *new_thread; in need_swap() 804 struct k_thread *ret = next_up(); in z_swap_next_thread() 942 struct k_thread *thread; in z_unpend_all() 1007 struct k_thread *thread = tid; in z_impl_k_thread_deadline_set() 1030 struct k_thread *thread = tid; in z_vrfy_k_thread_deadline_set() 1222 struct k_thread *thread; in unpend_all() 1458 struct k_thread *thread; in z_sched_wake() [all …]
|
A D | thread_monitor.c | 15 void z_thread_monitor_exit(struct k_thread *thread) in z_thread_monitor_exit() 22 struct k_thread *prev_thread; in z_thread_monitor_exit() 43 struct k_thread *thread; in thread_foreach_helper() 82 SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach); in k_thread_foreach() 84 SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach); in k_thread_foreach() 89 SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach_unlocked); in k_thread_foreach_unlocked() 91 SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach_unlocked); in k_thread_foreach_unlocked() 98 SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach); in k_thread_foreach_filter_by_cpu() 100 SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach); in k_thread_foreach_filter_by_cpu() 106 SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach_unlocked); in k_thread_foreach_unlocked_filter_by_cpu() [all …]
|
A D | float.c | 11 int z_impl_k_float_disable(struct k_thread *thread) in z_impl_k_float_disable() 21 int z_impl_k_float_enable(struct k_thread *thread, unsigned int options) in z_impl_k_float_enable() 33 static inline int z_vrfy_k_float_disable(struct k_thread *thread) in z_vrfy_k_float_disable() 40 static inline int z_vrfy_k_float_enable(struct k_thread *thread, unsigned int options) in z_vrfy_k_float_enable()
|
A D | timeslicing.c | 22 struct k_thread *pending_current; 25 static inline int slice_time(struct k_thread *thread) in slice_time() 39 bool thread_is_sliceable(struct k_thread *thread) in thread_is_sliceable() 68 void z_reset_time_slice(struct k_thread *thread) in z_reset_time_slice() 90 void k_thread_time_slice_set(struct k_thread *thread, int32_t thread_slice_ticks, in k_thread_time_slice_set() 106 struct k_thread *curr = _current; in z_time_slice()
|
A D | priority_queues.c | 14 struct k_thread *thread_a, *thread_b; in z_priq_rb_lessthan() 17 thread_a = CONTAINER_OF(a, struct k_thread, base.qnode_rb); in z_priq_rb_lessthan() 18 thread_b = CONTAINER_OF(b, struct k_thread, base.qnode_rb); in z_priq_rb_lessthan()
|
A D | thread.c | 58 offsetof(struct k_thread, obj_core)); in init_thread_obj_core_list() 509 char *z_setup_new_thread(struct k_thread *new_thread, in z_setup_new_thread() 772 SYS_PORT_TRACING_FUNC(k_thread, user_mode_enter); in k_thread_user_mode_enter() 900 const struct k_thread *thread) in z_vrfy_k_thread_timeout_remaining_ticks() 908 const struct k_thread *thread) in z_vrfy_k_thread_timeout_expires_ticks() 924 SYS_PORT_TRACING_FUNC(k_thread, switched_in); in z_thread_mark_switched_in() 942 SYS_PORT_TRACING_FUNC(k_thread, switched_out); in z_thread_mark_switched_out() 1020 static struct k_thread *thread_to_cleanup; 1030 void defer_thread_cleanup(struct k_thread *thread) in defer_thread_cleanup() 1062 void do_thread_cleanup(struct k_thread *thread) in do_thread_cleanup() [all …]
|
A D | busy_wait.c | 14 SYS_PORT_TRACING_FUNC_ENTER(k_thread, busy_wait, usec_to_wait); in z_impl_k_busy_wait() 16 SYS_PORT_TRACING_FUNC_EXIT(k_thread, busy_wait, usec_to_wait); in z_impl_k_busy_wait() 47 SYS_PORT_TRACING_FUNC_EXIT(k_thread, busy_wait, usec_to_wait); in z_impl_k_busy_wait()
|
A D | nothread.c | 27 SYS_PORT_TRACING_FUNC_ENTER(k_thread, sleep, timeout); in z_impl_k_sleep() 33 SYS_PORT_TRACING_FUNC_EXIT(k_thread, sleep, timeout, (int32_t) K_TICKS_FOREVER); in z_impl_k_sleep() 57 SYS_PORT_TRACING_FUNC_EXIT(k_thread, sleep, timeout, ret); in z_impl_k_sleep()
|
A D | usage.c | 61 static void sched_thread_update_usage(struct k_thread *thread, uint32_t cycles) in sched_thread_update_usage() 74 void z_sched_usage_start(struct k_thread *thread) in z_sched_usage_start() 173 void z_sched_thread_usage(struct k_thread *thread, in z_sched_thread_usage() 360 struct k_thread *thread; in z_thread_stats_query() 362 thread = CONTAINER_OF(obj_core, struct k_thread, obj_core); in z_thread_stats_query() 373 struct k_thread *thread; in z_thread_stats_reset() 375 thread = CONTAINER_OF(obj_core, struct k_thread, obj_core); in z_thread_stats_reset() 418 struct k_thread *thread; in z_thread_stats_disable() 420 thread = CONTAINER_OF(obj_core, struct k_thread, obj_core); in z_thread_stats_disable() 431 struct k_thread *thread; in z_thread_stats_enable() [all …]
|
A D | events.c | 44 struct k_thread *head; 103 static int event_walk_op(struct k_thread *thread, void *data) in event_walk_op() 137 struct k_thread *thread; in k_event_post_internal() 166 struct k_thread *next; in k_event_post_internal() 247 struct k_thread *thread; in k_event_wait_internal()
|
A D | ipi.c | 26 atomic_val_t ipi_mask_create(struct k_thread *thread) in ipi_mask_create() 35 struct k_thread *cpu_thread; in ipi_mask_create()
|
A D | smp.c | 86 void z_smp_release_global_lock(struct k_thread *thread) in z_smp_release_global_lock() 252 __attribute_const__ struct k_thread *z_smp_current_get(void) in z_smp_current_get() 260 struct k_thread *t = _current_cpu->current; in z_smp_current_get()
|
A D | userspace.c | 113 struct k_thread *parent; 551 static unsigned int thread_index_get(struct k_thread *thread) in thread_index_get() 623 ((struct k_thread *)ko->name != ctx->parent)) { in wordlist_cb() 628 void k_thread_perms_inherit(struct k_thread *parent, struct k_thread *child) in k_thread_perms_inherit() 641 void k_thread_perms_set(struct k_object *ko, struct k_thread *thread) in k_thread_perms_set() 650 void k_thread_perms_clear(struct k_object *ko, struct k_thread *thread) in k_thread_perms_clear() 667 void k_thread_perms_all_clear(struct k_thread *thread) in k_thread_perms_all_clear() 728 void z_impl_k_object_access_grant(const void *object, struct k_thread *thread) in z_impl_k_object_access_grant() 737 void k_object_access_revoke(const void *object, struct k_thread *thread) in k_object_access_revoke()
|
A D | fatal.c | 49 static const char *thread_name_get(struct k_thread *thread) in thread_name_get() 92 struct k_thread *thread = IS_ENABLED(CONFIG_MULTITHREADING) ? in z_fatal_error()
|
A D | mailbox.c | 157 struct k_thread *sending_thread; in mbox_message_dispose() 213 struct k_thread *sending_thread; in mbox_message_put() 214 struct k_thread *receiving_thread; in mbox_message_put() 327 async->tx_msg._syncing_thread = (struct k_thread *)&async->thread; in k_mbox_async_put() 385 struct k_thread *sending_thread; in k_mbox_get()
|
/kernel/paging/ |
A D | statistics.c | 112 void z_impl_k_mem_paging_thread_stats_get(struct k_thread *thread, in z_impl_k_mem_paging_thread_stats_get() 125 void z_vrfy_k_mem_paging_thread_stats_get(struct k_thread *thread, in z_vrfy_k_mem_paging_thread_stats_get()
|