/linux-6.3-rc2/arch/x86/um/ |
A D | ptrace_32.c | 199 int err, n, cpu = task_cpu(child); in get_fpregs() 216 int n, cpu = task_cpu(child); in set_fpregs() 229 int err, n, cpu = task_cpu(child); in get_fpxregs() 245 int n, cpu = task_cpu(child); in set_fpxregs()
|
/linux-6.3-rc2/arch/ia64/include/asm/ |
A D | switch_to.h | 62 (task_cpu(current) != \ 64 task_thread_info(current)->last_cpu = task_cpu(current); \
|
/linux-6.3-rc2/Documentation/translations/zh_CN/scheduler/ |
A D | sched-capacity.rst | 302 task_util(p) < capacity(task_cpu(p)) 358 则任务可能变为CPU受限的,也就是说 ``task_util(p) > capacity(task_cpu(p))`` ;CPU算力 374 task_uclamp_min(p) <= capacity(task_cpu(cpu)) 387 task_bandwidth(p) < capacity(task_cpu(p))
|
/linux-6.3-rc2/kernel/sched/ |
A D | stop_task.c | 15 return task_cpu(p); /* stop tasks as never migrate */ in select_task_rq_stop()
|
A D | core.c | 2153 return cpu_curr(task_cpu(p)) == p; in task_curr() 2335 WARN_ON_ONCE(task_cpu(p) != new_cpu); in move_queued_task() 3190 if (task_cpu(p) != new_cpu) { in set_task_cpu() 3444 cpu = task_cpu(p); in kick_process() 4261 if (task_cpu(p) != cpu) { in try_to_wake_up() 4272 cpu = task_cpu(p); in try_to_wake_up() 4280 ttwu_stat(p, task_cpu(p), wake_flags); in try_to_wake_up() 4792 p->recent_used_cpu = task_cpu(p); in wake_up_new_task() 9233 int curr_cpu = task_cpu(p); in migrate_task_to() 9689 if (task_cpu(p) != cpu) in dump_rq_tasks() [all …]
|
A D | deadline.c | 428 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); in task_non_contending() 433 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); in task_non_contending() 1423 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); in inactive_task_timer() 1432 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); in inactive_task_timer() 2136 int cpu = task_cpu(task); in find_later_rq() 2281 WARN_ON_ONCE(rq->cpu != task_cpu(p)); in pick_next_pushable_dl_task() 2513 __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); in set_cpus_allowed_dl() 2840 int cpus, err = -1, cpu = task_cpu(p); in sched_dl_overflow()
|
A D | psi.c | 876 task->pid, task->comm, task_cpu(task), in psi_flags_change() 887 int cpu = task_cpu(task); in psi_task_change() 908 int cpu = task_cpu(prev); in psi_task_switch() 984 int cpu = task_cpu(task); in psi_account_irqtime()
|
A D | idle.c | 390 return task_cpu(p); /* IDLE tasks as never migrated */ in select_task_rq_idle()
|
A D | cpudeadline.c | 137 (cpu == task_cpu(p) && cap == max_cap)) { in cpudl_find()
|
A D | cpuacct.c | 336 unsigned int cpu = task_cpu(tsk); in cpuacct_charge()
|
/linux-6.3-rc2/kernel/trace/ |
A D | trace_sched_wakeup.c | 393 entry->next_cpu = task_cpu(next); in tracing_sched_switch_trace() 421 entry->next_cpu = task_cpu(wakee); in tracing_sched_wakeup_trace() 567 wakeup_cpu = task_cpu(p); in probe_wakeup()
|
/linux-6.3-rc2/include/linux/ |
A D | kdb.h | 193 unsigned int cpu = task_cpu(p); in kdb_process_cpu()
|
A D | sched.h | 2237 static inline unsigned int task_cpu(const struct task_struct *p) in task_cpu() function 2246 static inline unsigned int task_cpu(const struct task_struct *p) in task_cpu() function 2290 return READ_ONCE(owner->on_cpu) && !vcpu_is_preempted(task_cpu(owner)); in owner_on_cpu()
|
/linux-6.3-rc2/include/linux/sched/ |
A D | topology.h | 280 return cpu_to_node(task_cpu(p)); in task_node()
|
/linux-6.3-rc2/kernel/rcu/ |
A D | tasks.h | 876 cpu = task_cpu(t); in check_holdout_task() 1380 int cpu = task_cpu(t); in trc_inspect_reader() 1460 cpu = task_cpu(t); in trc_wait_for_one_reader() 1585 if (task_curr(t) && cpu_online(task_cpu(t))) in trc_check_slow_task() 1604 cpu = task_cpu(t); in show_stalled_task_trace()
|
A D | tree_stall.h | 420 cpu = task_cpu(rcuc); in rcu_is_rcuc_kthread_starving() 528 cpu = gpk ? task_cpu(gpk) : -1; in rcu_check_gp_kthread_starvation() 571 cpu = task_cpu(gpk); in rcu_check_gp_kthread_expired_fqs_timer()
|
A D | tree_nocb.h | 1601 rdp->nocb_gp_kthread ? (int)task_cpu(rdp->nocb_gp_kthread) : -1, in show_rcu_nocb_gp_state() 1646 rdp->nocb_cb_kthread ? (int)task_cpu(rdp->nocb_cb_kthread) : -1, in show_rcu_nocb_state()
|
/linux-6.3-rc2/Documentation/scheduler/ |
A D | sched-capacity.rst | 341 task_util(p) < capacity(task_cpu(p)) 404 then it might become CPU-bound, IOW ``task_util(p) > capacity(task_cpu(p))``; 423 task_uclamp_min(p) <= capacity(task_cpu(cpu)) 437 task_bandwidth(p) < capacity(task_cpu(p))
|
/linux-6.3-rc2/include/trace/events/ |
A D | sched.h | 158 __entry->target_cpu = task_cpu(p); 292 __entry->orig_cpu = task_cpu(p);
|
/linux-6.3-rc2/io_uring/ |
A D | fdinfo.c | 146 seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1); in __io_uring_show_fdinfo()
|
/linux-6.3-rc2/kernel/ |
A D | stop_machine.c | 58 struct cpu_stopper *stopper = per_cpu_ptr(&cpu_stopper, task_cpu(task)); in print_stop_info()
|
/linux-6.3-rc2/arch/parisc/kernel/ |
A D | traps.c | 147 level, task_cpu(current), cr30, cr31); in show_regs()
|
/linux-6.3-rc2/arch/mips/kernel/ |
A D | process.c | 849 cpumask_set_cpu(task_cpu(t), &process_cpus); in mips_set_process_fp_mode()
|
/linux-6.3-rc2/fs/proc/ |
A D | array.c | 618 seq_put_decimal_ll(m, " ", task_cpu(task)); in do_task_stat()
|
/linux-6.3-rc2/arch/powerpc/kernel/ |
A D | process.c | 2118 unsigned long cpu = task_cpu(p); in valid_irq_stack() 2139 unsigned long cpu = task_cpu(p); in valid_emergency_stack()
|