Home
last modified time | relevance | path

Searched refs:task (Results 1 – 25 of 118) sorted by relevance

12345

/kernel/livepatch/
A Dtransition.c85 struct task_struct *g, *task; in klp_complete_transition() local
129 task = idle_task(cpu); in klp_complete_transition()
288 if (task_curr(task) && task != current) in klp_check_and_switch_task()
326 if (task == current) in klp_try_switch_task()
337 __func__, task->comm, task->pid); in klp_try_switch_task()
341 __func__, task->comm, task->pid); in klp_try_switch_task()
345 __func__, task->comm, task->pid, old_name); in klp_try_switch_task()
350 __func__, ret, task->comm, task->pid); in klp_try_switch_task()
459 task = idle_task(cpu); in klp_try_complete_transition()
537 task = idle_task(cpu); in klp_start_transition()
[all …]
/kernel/bpf/
A Dtask_iter.c72 task = __next_thread(task); in task_group_seq_get_next()
188 ctx.task = task; in DEFINE_BPF_ITER_FUNC()
213 if ((!!linfo->task.tid + !!linfo->task.pid + !!linfo->task.pid_fd) > 1) in bpf_iter_attach_task()
219 aux->task.pid = linfo->task.tid; in bpf_iter_attach_task()
223 aux->task.pid = linfo->task.pid; in bpf_iter_attach_task()
357 ctx.task = info->task; in DEFINE_BPF_ITER_FUNC()
619 ctx.task = info->task; in DEFINE_BPF_ITER_FUNC()
671 info->iter.task.tid = aux->task.pid; in bpf_iter_fill_link_info()
674 info->iter.task.pid = aux->task.pid; in bpf_iter_fill_link_info()
836 kit->data->task = get_task_struct(task); in bpf_iter_task_vma_new()
[all …]
A Dbpf_task_storage.c50 return &task->bpf_storage; in task_storage_ptr()
91 struct task_struct *task; in bpf_pid_task_storage_lookup_elem() local
106 if (!task) { in bpf_pid_task_storage_lookup_elem()
125 struct task_struct *task; in bpf_pid_task_storage_update_elem() local
143 if (!task) { in bpf_pid_task_storage_update_elem()
179 struct task_struct *task; in bpf_pid_task_storage_delete_elem() local
194 if (!task) { in bpf_pid_task_storage_delete_elem()
267 task) in BPF_CALL_2()
273 if (!task) in BPF_CALL_2()
288 task) in BPF_CALL_2()
[all …]
/kernel/
A Dtask_work.c60 if (WARN_ON_ONCE(task != current)) in task_work_add()
68 head = READ_ONCE(task->task_works); in task_work_add()
79 set_notify_resume(task); in task_work_add()
82 set_notify_signal(task); in task_work_add()
85 __set_notify_signal(task); in task_work_add()
118 if (likely(!task_work_pending(task))) in task_work_cancel_match()
196 struct task_struct *task = current; in task_work_run() local
204 work = READ_ONCE(task->task_works); in task_work_run()
208 if (task->flags & PF_EXITING) in task_work_run()
222 raw_spin_lock_irq(&task->pi_lock); in task_work_run()
[all …]
A Dpid.c332 &task->thread_pid : in task_pid_ptr()
380 attach_pid(task, type); in change_pid()
444 struct task_struct *task; in find_get_task_by_vpid() local
448 if (task) in find_get_task_by_vpid()
449 get_task_struct(task); in find_get_task_by_vpid()
452 return task; in find_get_task_by_vpid()
573 struct task_struct *task; in pidfd_get_task() local
595 if (!task) in pidfd_get_task()
599 return task; in pidfd_get_task()
873 if (!task) in pidfd_getfd()
[all …]
A Dptrace.c168 if (task_pid_vnr(task) == task->ptrace_message) in looks_like_a_spurious_pid()
193 if (task_is_traced(task) && !looks_like_a_spurious_pid(task) && in ptrace_freeze_traced()
316 tcred = __task_cred(task); in __ptrace_may_access()
340 mm = task->mm; in __ptrace_may_access()
352 task_lock(task); in ptrace_may_access()
354 task_unlock(task); in ptrace_may_access()
402 if (task_is_stopped(task) && in ptrace_set_stopped()
435 audit_ptrace(task); in ptrace_attach()
459 if (task->ptrace) in ptrace_attach()
462 task->ptrace = flags; in ptrace_attach()
[all …]
A Dhung_task.c100 static void debug_show_blocker(struct task_struct *task) in debug_show_blocker() argument
108 blocker = READ_ONCE(task->blocker); in debug_show_blocker()
141 task->comm, task->pid); in debug_show_blocker()
145 task->comm, task->pid); in debug_show_blocker()
150 task->comm, task->pid); in debug_show_blocker()
164 task->comm, task->pid, t->comm, t->pid); in debug_show_blocker()
168 task->comm, task->pid, t->comm, t->pid); in debug_show_blocker()
173 task->comm, task->pid, rwsem_blocked_as, t->comm, in debug_show_blocker()
182 static inline void debug_show_blocker(struct task_struct *task) in debug_show_blocker() argument
A Dkthread.c71 struct task_struct *task; member
134 kthread->task = p; in set_kthread_struct()
552 return task; in __kthread_create_on_node()
590 return task; in kthread_create_on_node()
979 WARN_ON(worker->task && worker->task != current); in kthread_worker_fn()
991 worker->task = NULL; in kthread_worker_fn()
1048 if (IS_ERR(task)) in __kthread_create_worker_on_node()
1052 worker->task = task; in __kthread_create_worker_on_node()
1584 task = worker->task; in kthread_destroy_worker()
1585 if (WARN_ON(!task)) in kthread_destroy_worker()
[all …]
A Dpid_namespace.c195 struct task_struct *task, *me = current; in zap_pid_ns_processes() local
228 task = pid_task(pid, PIDTYPE_PID); in zap_pid_ns_processes()
229 if (task && !__fatal_signal_pending(task)) in zap_pid_ns_processes()
230 group_send_sig_info(SIGKILL, SEND_SIG_PRIV, task, PIDTYPE_MAX); in zap_pid_ns_processes()
352 static struct ns_common *pidns_get(struct task_struct *task) in pidns_get() argument
357 ns = task_active_pid_ns(task); in pidns_get()
365 static struct ns_common *pidns_for_children_get(struct task_struct *task) in pidns_for_children_get() argument
369 task_lock(task); in pidns_for_children_get()
370 if (task->nsproxy) { in pidns_for_children_get()
371 ns = task->nsproxy->pid_ns_for_children; in pidns_for_children_get()
[all …]
A Dworkqueue_internal.h44 struct task_struct *task; /* I: worker task */ member
78 void wq_worker_running(struct task_struct *task);
79 void wq_worker_sleeping(struct task_struct *task);
80 void wq_worker_tick(struct task_struct *task);
81 work_func_t wq_worker_last_func(struct task_struct *task);
A Ddelayacct.c292 void __delayacct_irq(struct task_struct *task, u32 delta) in __delayacct_irq() argument
296 raw_spin_lock_irqsave(&task->delays->lock, flags); in __delayacct_irq()
297 task->delays->irq_delay += delta; in __delayacct_irq()
298 task->delays->irq_count++; in __delayacct_irq()
299 if (delta > task->delays->irq_delay_max) in __delayacct_irq()
300 task->delays->irq_delay_max = delta; in __delayacct_irq()
301 if (delta && (!task->delays->irq_delay_min || delta < task->delays->irq_delay_min)) in __delayacct_irq()
302 task->delays->irq_delay_min = delta; in __delayacct_irq()
303 raw_spin_unlock_irqrestore(&task->delays->lock, flags); in __delayacct_irq()
A Dcred.c160 cred = __task_cred((task)); in get_task_cred()
208 struct task_struct *task = current; in prepare_creds() local
218 old = task->cred; in prepare_creds()
394 struct task_struct *task = current; in commit_creds() local
395 const struct cred *old = task->real_cred; in commit_creds()
400 BUG_ON(task->cred != old); in commit_creds()
411 if (task->mm) in commit_creds()
412 set_dumpable(task->mm, suid_dumpable); in commit_creds()
413 task->pdeath_signal = 0; in commit_creds()
438 rcu_assign_pointer(task->real_cred, new); in commit_creds()
[all …]
/kernel/locking/
A Drtmutex.c351 int prio = task->prio; in __waiter_prio()
550 get_task_struct(task); in rt_mutex_wake_q_add_task()
551 wqh->rtlock_task = task; in rt_mutex_wake_q_add_task()
553 wake_q_add(&wqh->head, task); in rt_mutex_wake_q_add_task()
717 put_task_struct(task); in rt_mutex_adjust_prio_chain()
737 waiter = task->pi_blocked_on; in rt_mutex_adjust_prio_chain()
890 put_task_struct(task); in rt_mutex_adjust_prio_chain()
960 put_task_struct(task); in rt_mutex_adjust_prio_chain()
1071 put_task_struct(task); in rt_mutex_adjust_prio_chain()
1176 task->pi_blocked_on = NULL; in try_to_take_rt_mutex()
[all …]
A Drtmutex_api.c313 struct task_struct *task, in __rt_mutex_start_proxy_lock() argument
320 if (try_to_take_rt_mutex(lock, task, NULL)) in __rt_mutex_start_proxy_lock()
361 struct task_struct *task) in rt_mutex_start_proxy_lock() argument
483 raw_spin_lock_irqsave(&task->pi_lock, flags); in rt_mutex_adjust_pi()
485 waiter = task->pi_blocked_on; in rt_mutex_adjust_pi()
487 raw_spin_unlock_irqrestore(&task->pi_lock, flags); in rt_mutex_adjust_pi()
491 raw_spin_unlock_irqrestore(&task->pi_lock, flags); in rt_mutex_adjust_pi()
494 get_task_struct(task); in rt_mutex_adjust_pi()
497 next_lock, NULL, task); in rt_mutex_adjust_pi()
509 void rt_mutex_debug_task_free(struct task_struct *task) in rt_mutex_debug_task_free() argument
[all …]
A Dmutex-debug.c52 struct task_struct *task) in debug_mutex_add_waiter() argument
57 DEBUG_LOCKS_WARN_ON(__get_task_blocked_on(task)); in debug_mutex_add_waiter()
61 struct task_struct *task) in debug_mutex_remove_waiter() argument
63 struct mutex *blocked_on = __get_task_blocked_on(task); in debug_mutex_remove_waiter()
66 DEBUG_LOCKS_WARN_ON(waiter->task != task); in debug_mutex_remove_waiter()
70 waiter->task = NULL; in debug_mutex_remove_waiter()
/kernel/cgroup/
A Dlegacy_freezer.c164 struct task_struct *task; in freezer_attach() local
183 __thaw_task(task); in freezer_attach()
190 freeze_task(task); in freezer_attach()
224 freezer = task_freezer(task); in freezer_fork()
226 freeze_task(task); in freezer_fork()
253 struct task_struct *task; in update_if_frozen() local
278 if (freezing(task) && !frozen(task)) in update_if_frozen()
317 struct task_struct *task; in freeze_cgroup() local
321 freeze_task(task); in freeze_cgroup()
328 struct task_struct *task; in unfreeze_cgroup() local
[all …]
A Dfreezer.c157 if (!lock_task_sighand(task, &flags)) in cgroup_freeze_task()
161 task->jobctl |= JOBCTL_TRAP_FREEZE; in cgroup_freeze_task()
162 signal_wake_up(task, false); in cgroup_freeze_task()
165 wake_up_process(task); in cgroup_freeze_task()
168 unlock_task_sighand(task, &flags); in cgroup_freeze_task()
177 struct task_struct *task; in cgroup_do_freeze() local
199 if (task->flags & PF_KTHREAD) in cgroup_do_freeze()
201 cgroup_freeze_task(task, freeze); in cgroup_do_freeze()
227 if (task->flags & PF_KTHREAD) in cgroup_freezer_migrate_task()
236 !task->frozen) in cgroup_freezer_migrate_task()
[all …]
A Dpids.c202 struct task_struct *task; in pids_can_attach() local
205 cgroup_taskset_for_each(task, dst_css, tset) { in pids_can_attach()
215 old_css = task_css(task, pids_cgrp_id); in pids_can_attach()
227 struct task_struct *task; in pids_cancel_attach() local
230 cgroup_taskset_for_each(task, dst_css, tset) { in pids_cancel_attach()
235 old_css = task_css(task, pids_cgrp_id); in pids_cancel_attach()
273 static int pids_can_fork(struct task_struct *task, struct css_set *cset) in pids_can_fork() argument
286 static void pids_cancel_fork(struct task_struct *task, struct css_set *cset) in pids_cancel_fork() argument
294 static void pids_release(struct task_struct *task) in pids_release() argument
296 struct pids_cgroup *pids = css_pids(task_css(task, pids_cgrp_id)); in pids_release()
/kernel/trace/rv/monitors/sleep/
A Dsleep.c33 ltl_atom_set(mon, LTL_RT, rt_or_dl_task(task)); in ltl_atoms_fetch()
56 if (task->flags & PF_KTHREAD) { in ltl_atoms_init()
67 if (strstarts(task->comm, "migration/")) in ltl_atoms_init()
72 if (strstarts(task->comm, "rcu")) in ltl_atoms_init()
88 ltl_atom_pulse(task, LTL_SLEEP, true); in handle_sched_set_state()
90 ltl_atom_pulse(task, LTL_ABORT_SLEEP, true); in handle_sched_set_state()
95 ltl_atom_pulse(task, LTL_WAKE, true); in handle_sched_wakeup()
101 ltl_atom_pulse(task, LTL_WOKEN_BY_HARDIRQ, true); in handle_sched_waking()
103 if (current->prio <= task->prio) in handle_sched_waking()
106 ltl_atom_pulse(task, LTL_WOKEN_BY_NMI, true); in handle_sched_waking()
[all …]
/kernel/entry/
A Dsyscall_user_dispatch.c118 task->syscall_dispatch.selector = selector; in task_set_syscall_user_dispatch()
119 task->syscall_dispatch.offset = offset; in task_set_syscall_user_dispatch()
120 task->syscall_dispatch.len = len; in task_set_syscall_user_dispatch()
121 task->syscall_dispatch.on_dispatch = false; in task_set_syscall_user_dispatch()
124 set_task_syscall_work(task, SYSCALL_USER_DISPATCH); in task_set_syscall_user_dispatch()
126 clear_task_syscall_work(task, SYSCALL_USER_DISPATCH); in task_set_syscall_user_dispatch()
137 int syscall_user_dispatch_get_config(struct task_struct *task, unsigned long size, in syscall_user_dispatch_get_config() argument
140 struct syscall_user_dispatch *sd = &task->syscall_dispatch; in syscall_user_dispatch_get_config()
146 if (test_task_syscall_work(task, SYSCALL_USER_DISPATCH)) in syscall_user_dispatch_get_config()
161 int syscall_user_dispatch_set_config(struct task_struct *task, unsigned long size, in syscall_user_dispatch_set_config() argument
[all …]
/kernel/sched/
A Dcore_sched.c135 struct task_struct *task, *p; in sched_core_share_pid() local
152 task = current; in sched_core_share_pid()
154 task = find_task_by_vpid(pid); in sched_core_share_pid()
155 if (!task) { in sched_core_share_pid()
160 get_task_struct(task); in sched_core_share_pid()
167 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { in sched_core_share_pid()
178 cookie = sched_core_clone_cookie(task); in sched_core_share_pid()
203 cookie = sched_core_clone_cookie(task); in sched_core_share_pid()
213 __sched_core_set(task, cookie); in sched_core_share_pid()
218 grp = task_pid_type(task, type); in sched_core_share_pid()
[all …]
A Dpsi.c629 struct task_struct *task; in psi_schedule_rtpoll_work() local
645 if (likely(task)) in psi_schedule_rtpoll_work()
900 task->pid, task->comm, task_cpu(task), in psi_flags_change()
905 task->psi_flags &= ~clear; in psi_flags_change()
906 task->psi_flags |= set; in psi_flags_change()
911 int cpu = task_cpu(task); in psi_task_change()
914 if (!task->pid) in psi_task_change()
1366 struct task_struct *task; in psi_trigger_create() local
1369 if (IS_ERR(task)) { in psi_trigger_create()
1372 return ERR_CAST(task); in psi_trigger_create()
[all …]
/kernel/unwind/
A Ddeferred.c150 static void process_unwind_deferred(struct task_struct *task) in process_unwind_deferred() argument
152 struct unwind_task_info *info = &task->unwind_info; in process_unwind_deferred()
194 void unwind_deferred_task_exit(struct task_struct *task) in unwind_deferred_task_exit() argument
201 process_unwind_deferred(task); in unwind_deferred_task_exit()
203 task_work_cancel(task, &info->work); in unwind_deferred_task_exit()
347 void unwind_task_init(struct task_struct *task) in unwind_task_init() argument
349 struct unwind_task_info *info = &task->unwind_info; in unwind_task_init()
356 void unwind_task_free(struct task_struct *task) in unwind_task_free() argument
358 struct unwind_task_info *info = &task->unwind_info; in unwind_task_free()
361 task_work_cancel(task, &info->work); in unwind_task_free()
/kernel/trace/rv/
A Drv_trace.h135 TP_PROTO(struct task_struct *task, char *states, char *atoms, char *next),
137 TP_ARGS(task, states, atoms, next),
140 __string(comm, task->comm)
149 __entry->pid = task->pid;
161 TP_PROTO(struct task_struct *task),
163 TP_ARGS(task),
166 __string(comm, task->comm)
172 __entry->pid = task->pid;
/kernel/events/
A Dcore.c351 task = ctx->task; in event_function_call()
387 task = ctx->task; in event_function_local()
1510 WARN_ON_ONCE(ctx->task != task); in perf_lock_task_context()
3105 task = ctx->task; in perf_install_in_context()
3710 WRITE_ONCE(next_ctx->task, task); in perf_event_context_sched_out()
4921 ctx->task = get_task_struct(task); in alloc_perf_context()
8849 .task = task, in perf_event_task()
9006 .task = task, in perf_event_comm()
9104 .task = task, in perf_event_namespaces()
9731 .task = task, in perf_event_switch()
[all …]

Completed in 83 milliseconds

12345