| /kernel/sched/ |
| A D | syscalls.c | 42 return __normal_prio(p->policy, p->rt_priority, PRIO_TO_NICE(p->static_prio)); in normal_prio() 54 p->normal_prio = normal_prio(p); in effective_prio() 88 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { in set_user_nice() 103 p->prio = effective_prio(p); in set_user_nice() 114 p->sched_class->prio_changed(rq, p, old_prio); in set_user_nice() 310 p->timer_slack_ns = p->default_timer_slack_ns; in DEFINE_CLASS() 319 p->normal_prio = normal_prio(p); in DEFINE_CLASS() 492 if (!is_nice_reduction(p, task_nice(p))) in user_check_sched_setscheduler() 880 if (!p) in do_sched_setscheduler() 996 if (!p) in SYSCALL_DEFINE3() [all …]
|
| A D | core.c | 1448 p->sched_class->reweight_task(task_rq(p), p, &lw); in set_load_weight() 2064 if (!p || p == current) in get_wchan() 3286 task_pid_nr(p), p->comm, in force_compatible_cpus_allowed_ptr() 4738 p->prio = p->normal_prio = p->static_prio; in sched_fork() 4805 p->sched_class->task_fork(p); in sched_cgroup_fork() 4863 __set_task_cpu(p, select_task_rq(p, task_cpu(p), &wake_flags)); in wake_up_new_task() 6655 for (p = donor; task_is_blocked(p); p = owner) { in find_proxy_task() 7449 p->dl.pi_se = &p->dl; in rt_mutex_setprio() 7453 p->dl.pi_se = &p->dl; in rt_mutex_setprio() 7458 p->dl.pi_se = &p->dl; in rt_mutex_setprio() [all …]
|
| A D | ext.c | 2179 p->comm, p->pid); in mark_direct_dispatch() 2183 p->comm, p->pid); in mark_direct_dispatch() 2228 p->comm, p->pid, opss); in direct_dispatch() 2635 p->comm, p->pid, task_cpu(p), cpu); in task_can_run_on_remote_rq() 2648 cpu, p->comm, p->pid); in task_can_run_on_remote_rq() 3835 p->comm, p->pid); in scx_init_task() 3868 p, p->scx.weight); in scx_enable_task() 4023 p, p->scx.weight); in reweight_task_scx() 5018 check_class_changed(task_rq(p), p, old_class, p->prio); in scx_disable_workfn() 5227 marker, task_state_to_char(p), p->comm, p->pid, in scx_dump_task() [all …]
|
| A D | deadline.c | 349 dl_rq_change_utilization(task_rq(p), &p->dl, new_bw); in dl_change_utilization() 686 if (p->dl.dl_non_contending || p->dl.dl_throttled) { in dl_task_offline_migration() 1770 sub_running_bw(&p->dl, dl_rq_of_se(&p->dl)); in inactive_task_timer() 1771 sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl)); in inactive_task_timer() 2127 if (!task_current(rq, p) && !p->dl.dl_throttled && p->nr_cpus_allowed > 1) in enqueue_task_dl() 2139 if (!p->dl.dl_throttled && !dl_server(&p->dl)) in dequeue_task_dl() 2402 return p; in __pick_task_dl() 2425 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1) in put_prev_task_dl() 2648 return p; in pick_next_pushable_dl_task() 2786 if (p && dl_time_before(p->dl.deadline, dmin) && in pull_dl_task() [all …]
|
| A D | core_sched.c | 62 rq = task_rq_lock(p, &rf); in sched_core_update_cookie() 72 if (sched_core_enqueued(p)) in sched_core_update_cookie() 75 old_cookie = p->core_cookie; in sched_core_update_cookie() 76 p->core_cookie = cookie; in sched_core_update_cookie() 82 sched_core_enqueue(rq, p); in sched_core_update_cookie() 93 if (task_on_cpu(rq, p)) in sched_core_update_cookie() 96 task_rq_unlock(rq, p, &rf); in sched_core_update_cookie() 114 RB_CLEAR_NODE(&p->core_node); in sched_core_fork() 135 struct task_struct *task, *p; in sched_core_share_pid() local 247 struct task_struct *p; in __sched_core_account_forceidle() local [all …]
|
| A D | fair.c | 2491 .p = p, in task_numa_migrate() 2532 sched_setnuma(p, task_node(p)); in task_numa_migrate() 2637 if (task_node(p) == p->numa_preferred_nid) in numa_migrate_preferred() 2754 task_scan_min(p), task_scan_max(p)); in update_task_scan_period() 3562 p->numa_work.next = &p->numa_work; in init_numa_balancing() 3633 if (!p->mm || !p->numa_faults || (p->flags & PF_EXITING)) in update_scan_period() 3656 p->numa_scan_period = task_scan_start(p); in update_scan_period() 5096 if (!p || (p->nr_cpus_allowed == 1) || in update_misfit_status() 6985 if (p && &p->se == se) in dequeue_entities() 7952 else if (p && unlikely(task_on_rq_queued(p) || current == p)) in cpu_util() [all …]
|
| A D | rt.c | 313 return task_rq(p); in rq_of_rt_se() 400 plist_node_init(&p->pushable_tasks, p->prio); in enqueue_pushable_task() 578 return p->prio != p->normal_prio; in rt_se_boosted() 1446 if (!task_current(rq, p) && p->nr_cpus_allowed > 1) in enqueue_task_rt() 1596 if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) { in balance_rt() 1707 return p; in pick_task_rt() 1728 if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1) in put_prev_task_rt() 1749 return p; in pick_highest_pushable_task() 1866 return p; in pick_next_pushable_task() 2293 if (p && (p->prio < this_rq->rt.highest_prio.curr)) { in pull_rt_task() [all …]
|
| A D | autogroup.c | 78 if (!lock_task_sighand(p, &flags)) in autogroup_task_get() 81 ag = autogroup_kref_get(p->signal->autogroup); in autogroup_task_get() 82 unlock_task_sighand(p, &flags); in autogroup_task_get() 143 if (p->flags & PF_EXITING) in task_wants_autogroup() 156 sched_move_task(p, true); in sched_autogroup_exit_task() 169 prev = p->signal->autogroup; in autogroup_move_group() 171 unlock_task_sighand(p, &flags); in autogroup_move_group() 187 for_each_thread(p, t) in autogroup_move_group() 190 unlock_task_sighand(p, &flags); in autogroup_move_group() 199 autogroup_move_group(p, ag); in sched_autogroup_create_attach() [all …]
|
| A D | stats.h | 142 if (task_on_cpu(task_rq(p), p)) in psi_enqueue() 145 if (p->se.sched_delayed) { in psi_enqueue() 148 if (p->in_memstall) in psi_enqueue() 150 if (p->in_iowait) in psi_enqueue() 155 if (p->in_memstall) in psi_enqueue() 159 if (p->in_iowait) in psi_enqueue() 162 if (p->in_memstall) in psi_enqueue() 191 psi_task_change(p, p->psi_flags, 0); in psi_dequeue() 203 if (unlikely(p->psi_flags)) { in psi_ttwu_dequeue() 207 rq = __task_rq_lock(p, &rf); in psi_ttwu_dequeue() [all …]
|
| A D | cputime.c | 127 p->utime += cputime; in account_user_time() 136 acct_account_cputime(p); in account_user_time() 149 p->utime += cputime; in account_guest_time() 151 p->gtime += cputime; in account_guest_time() 154 if (task_nice(p) > 0) { in account_guest_time() 173 p->stime += cputime; in account_system_index_time() 180 acct_account_cputime(p); in account_system_index_time() 194 account_guest_time(p, cputime); in account_system_time() 403 account_user_time(p, cputime); in irqtime_account_process_tick() 453 *ut = p->utime; in task_cputime_adjusted() [all …]
|
| A D | sched.h | 1327 #define task_rq(p) cpu_rq(task_cpu(p)) argument 1578 return p->se.cfs_rq; in task_cfs_rq() 1799 __acquires(p->pi_lock) 1812 __releases(p->pi_lock) in task_rq_unlock() 2142 set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]); in set_task_rq() 2174 set_task_rq(p, cpu); in __set_task_cpu() 2183 p->wake_cpu = cpu; in __set_task_cpu() 2248 return rq->curr == p; in task_current() 2272 return p->on_cpu; in task_on_cpu() 2575 if (!(p->flags & PF_KTHREAD) && !task_cpu_possible(cpu, p)) in task_allowed_on_cpu() [all …]
|
| A D | ext.h | 36 void scx_pre_fork(struct task_struct *p); 37 int scx_fork(struct task_struct *p); 38 void scx_post_fork(struct task_struct *p); 39 void scx_cancel_fork(struct task_struct *p); 45 bool scx_allow_ttwu_queue(const struct task_struct *p); 56 static inline bool task_on_scx(const struct task_struct *p) in task_on_scx() argument 58 return scx_enabled() && p->sched_class == &ext_sched_class; in task_on_scx() 69 static inline void scx_pre_fork(struct task_struct *p) {} in scx_pre_fork() argument 71 static inline void scx_post_fork(struct task_struct *p) {} in scx_post_fork() argument 72 static inline void scx_cancel_fork(struct task_struct *p) {} in scx_cancel_fork() argument [all …]
|
| A D | membarrier.c | 271 struct task_struct *p; in membarrier_global_expedited() local 293 if (!p->mm) in membarrier_global_expedited() 365 struct task_struct *p; in membarrier_private_expedited() local 371 if (!p || p->mm != mm) { in membarrier_private_expedited() 381 struct task_struct *p; in membarrier_private_expedited() local 384 if (p && p->mm == mm) in membarrier_private_expedited() 480 struct task_struct *p; in sync_runqueues_membarrier_state() local 483 if (p && p->mm == mm) in sync_runqueues_membarrier_state() 499 struct mm_struct *mm = p->mm; in membarrier_register_global_expedited() 518 struct mm_struct *mm = p->mm; in membarrier_register_private_expedited() [all …]
|
| A D | debug.c | 737 p->comm, task_pid_nr(p), in print_task() 739 entity_eligible(cfs_rq_of(&p->se), &p->se) ? 'E' : 'N', in print_task() 744 (long long)(p->nvcsw + p->nivcsw), in print_task() 745 p->prio); in print_task() 753 SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p)); in print_task() 794 print_task(m, rq, p); in print_rq() 1136 if (p->mm) in sched_show_numa() 1143 task_node(p), task_numa_group_id(p)); in sched_show_numa() 1153 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns), in proc_sched_show_task() 1166 nr_switches = p->nvcsw + p->nivcsw; in proc_sched_show_task() [all …]
|
| /kernel/trace/ |
| A D | trace_boot.c | 44 if (p && *p != '\0') { in trace_boot_set_instance_options() 54 if (p && *p != '\0') { in trace_boot_set_instance_options() 60 if (p && *p != '\0') { in trace_boot_set_instance_options() 69 if (p && *p != '\0') { in trace_boot_set_instance_options() 227 if (p) { in trace_boot_hist_add_array() 370 if (p) in trace_boot_compose_hist_cmd() 374 if (p) in trace_boot_compose_hist_cmd() 407 if (p) in trace_boot_compose_hist_cmd() 488 if (p && *p != '\0') { in trace_boot_init_one_event() 599 if (p && *p != '\0') { in trace_boot_enable_tracer() [all …]
|
| A D | trace_branch.c | 39 const char *p; in probe_likely_condition() local 70 while (p >= f->data.file && *p != '/') in probe_likely_condition() 71 p--; in probe_likely_condition() 72 p++; in probe_likely_condition() 247 percent /= p->correct + p->incorrect; in get_incorrect_percent() 259 f = p->file + strlen(p->file); in branch_stat_process_file() 306 p->data.correct, p->constant, l, p->data.incorrect); in annotate_branch_stat_show() 321 ++p; in annotated_branch_stat_next() 326 return p; in annotated_branch_stat_next() 411 ++p; in all_branch_stat_next() [all …]
|
| /kernel/ |
| A D | kprobes.c | 405 return kprobe_aggrprobe(p) && kprobe_disabled(p) && in kprobe_unused() 503 if (p && kprobe_optready(p)) { in get_optimized_kprobe() 733 if (!kprobe_aggrprobe(p) || kprobe_disarmed(p)) in unoptimize_kprobe() 1020 #define __arm_kprobe(p) arch_arm_kprobe(p) argument 1022 #define kprobe_disarmed(p) kprobe_disabled(p) argument 1273 if (p->post_handler && !kprobe_gone(p)) in init_aggr_kprobe() 1390 if (!p) in within_kprobe_blacklist() 1473 return _kprobe_addr(p->addr, p->symbol_name, p->offset, &on_func_entry); in kprobe_addr() 1638 addr = _kprobe_addr(p->addr, p->symbol_name, p->offset, &on_func_entry); in register_kprobe() 1755 if (p->post_handler && !kprobe_gone(p)) { in __unregister_kprobe_top() [all …]
|
| A D | freezer.c | 56 bool frozen(struct task_struct *p) in frozen() argument 103 signal_wake_up(p, 0); in fake_signal_wake_up() 104 unlock_task_sighand(p, &flags); in fake_signal_wake_up() 117 if (task_is_runnable(p)) in __set_task_frozen() 120 if (p != current && task_curr(p)) in __set_task_frozen() 141 p->saved_state = p->__state; in __set_task_frozen() 168 if (!freezing(p) || frozen(p) || __freeze_task(p)) { in freeze_task() 173 if (!(p->flags & PF_KTHREAD)) in freeze_task() 174 fake_signal_wake_up(p); in freeze_task() 176 wake_up_state(p, TASK_NORMAL); in freeze_task() [all …]
|
| A D | exit.c | 259 pidfs_exit(p); in release_task() 310 p = leader; in release_task() 359 (p->exit_state && thread_group_empty(p)) || in will_become_orphaned_pgrp() 364 task_session(p->real_parent) == task_session(p)) in will_become_orphaned_pgrp() 685 p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) { in reparent_leader() 686 if (do_notify_parent(p, p->exit_signal)) { in reparent_leader() 1169 ? p->signal->group_exit_code : p->exit_code; in wait_task_zombie() 1181 state = (ptrace_reparented(p) && thread_group_leader(p)) ? in wait_task_zombie() 1248 ? p->signal->group_exit_code : p->exit_code; in wait_task_zombie() 1258 if (do_notify_parent(p, p->exit_signal)) in wait_task_zombie() [all …]
|
| A D | fork.c | 1897 memset(&p->rv, 0, sizeof(p->rv)); in rv_task_fork() 2005 if (!p) in copy_process() 2022 strscpy_pad(p->comm, args->name, sizeof(p->comm)); in copy_process() 2070 p->utime = p->stime = p->gtime = 0; in copy_process() 2072 p->utimescaled = p->stimescaled = 0; in copy_process() 2106 p->mempolicy = mpol_dup(p->mempolicy); in copy_process() 2118 memset(&p->irqtrace, 0, sizeof(p->irqtrace)); in copy_process() 2249 p->group_leader = p; in copy_process() 2250 p->tgid = p->pid; in copy_process() 2428 return p; in copy_process() [all …]
|
| A D | resource.c | 64 while (!p->sibling && p->parent) { in next_resource() 65 p = p->parent; in next_resource() 100 return p; in r_start() 126 for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent) in r_show() 195 tmp = *p; in __request_resource() 215 tmp = *p; in __release_resource() 246 tmp = p; in __release_child_resources() 247 p = p->sibling; in __release_child_resources() 370 if (p) { in find_next_iomem_res() 566 for (p = parent->child; p ; p = p->sibling) { in __region_intersects() [all …]
|
| /kernel/cgroup/ |
| A D | pids.c | 130 struct pids_cgroup *p; in pids_uncharge() local 132 for (p = pids; parent_pids(p); p = parent_pids(p)) in pids_uncharge() 133 pids_cancel(p, num); in pids_uncharge() 147 struct pids_cgroup *p; in pids_charge() local 149 for (p = pids; parent_pids(p); p = parent_pids(p)) { in pids_charge() 152 pids_update_watermark(p, new); in pids_charge() 168 struct pids_cgroup *p, *q; in pids_try_charge() local 170 for (p = pids; parent_pids(p); p = parent_pids(p)) { in pids_try_charge() 180 *fail = p; in pids_try_charge() 195 pids_cancel(p, num); in pids_try_charge() [all …]
|
| /kernel/debug/kdb/ |
| A D | kdb_bt.c | 26 if (!addr && kdb_task_has_cpu(p)) { in kdb_show_stack() 33 show_stack(p, addr, KERN_EMERG); in kdb_show_stack() 84 if (!kdb_task_state(p, mask)) in kdb_bt1() 87 kdb_ps1(p); in kdb_bt1() 88 kdb_show_stack(p, NULL); in kdb_bt1() 139 struct task_struct *g, *p; in kdb_bt() local 147 p = curr_task(cpu); in kdb_bt() 152 for_each_process_thread(g, p) { in kdb_bt() 155 if (task_curr(p)) in kdb_bt() 161 struct task_struct *p; in kdb_bt() local [all …]
|
| /kernel/bpf/ |
| A D | cgroup_iter.c | 65 if (p->visited_all) in cgroup_iter_seq_start() 75 p->terminate = false; in cgroup_iter_seq_start() 76 p->visited_all = false; in cgroup_iter_seq_start() 82 return p->start_css; in cgroup_iter_seq_start() 97 p->visited_all = true; in cgroup_iter_seq_stop() 107 if (p->terminate) in cgroup_iter_seq_next() 142 p->terminate = true; in __cgroup_iter_seq_show() 173 css_get(p->start_css); in BTF_ID_LIST_GLOBAL_SINGLE() 174 p->terminate = false; in BTF_ID_LIST_GLOBAL_SINGLE() 175 p->visited_all = false; in BTF_ID_LIST_GLOBAL_SINGLE() [all …]
|
| /kernel/power/ |
| A D | process.c | 32 struct task_struct *g, *p; in try_to_freeze_tasks() local 53 for_each_process_thread(g, p) { in try_to_freeze_tasks() 54 if (p == current || !freeze_task(p)) in try_to_freeze_tasks() 101 if (p != current && freezing(p) && !frozen(p)) in try_to_freeze_tasks() 102 sched_show_task(p); in try_to_freeze_tasks() 181 struct task_struct *g, *p; in thaw_processes() local 200 WARN_ON((p != curr) && (p->flags & PF_SUSPEND_TASK)); in thaw_processes() 201 __thaw_task(p); in thaw_processes() 217 struct task_struct *g, *p; in thaw_kernel_threads() local 226 if (p->flags & PF_KTHREAD) in thaw_kernel_threads() [all …]
|