Lines Matching refs:p

1067 static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu);
1068 static unsigned long task_h_load(struct task_struct *p);
1117 void post_init_entity_util_avg(struct task_struct *p) in post_init_entity_util_avg() argument
1119 struct sched_entity *se = &p->se; in post_init_entity_util_avg()
1125 if (p->sched_class != &fair_sched_class) { in post_init_entity_util_avg()
1266 struct task_struct *p = NULL; in update_stats_wait_start_fair() local
1274 p = task_of(se); in update_stats_wait_start_fair()
1276 __update_stats_wait_start(rq_of(cfs_rq), p, stats); in update_stats_wait_start_fair()
1283 struct task_struct *p = NULL; in update_stats_wait_end_fair() local
1300 p = task_of(se); in update_stats_wait_end_fair()
1302 __update_stats_wait_end(rq_of(cfs_rq), p, stats); in update_stats_wait_end_fair()
1474 static struct numa_group *deref_task_numa_group(struct task_struct *p) in deref_task_numa_group() argument
1476 return rcu_dereference_check(p->numa_group, p == current || in deref_task_numa_group()
1477 (lockdep_is_held(__rq_lockp(task_rq(p))) && !READ_ONCE(p->on_cpu))); in deref_task_numa_group()
1480 static struct numa_group *deref_curr_numa_group(struct task_struct *p) in deref_curr_numa_group() argument
1482 return rcu_dereference_protected(p->numa_group, p == current); in deref_curr_numa_group()
1488 static unsigned int task_nr_scan_windows(struct task_struct *p) in task_nr_scan_windows() argument
1499 rss = get_mm_rss(p->mm); in task_nr_scan_windows()
1510 static unsigned int task_scan_min(struct task_struct *p) in task_scan_min() argument
1520 scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p); in task_scan_min()
1524 static unsigned int task_scan_start(struct task_struct *p) in task_scan_start() argument
1526 unsigned long smin = task_scan_min(p); in task_scan_start()
1532 ng = rcu_dereference(p->numa_group); in task_scan_start()
1546 static unsigned int task_scan_max(struct task_struct *p) in task_scan_max() argument
1548 unsigned long smin = task_scan_min(p); in task_scan_max()
1553 smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p); in task_scan_max()
1556 ng = deref_curr_numa_group(p); in task_scan_max()
1572 static void account_numa_enqueue(struct rq *rq, struct task_struct *p) in account_numa_enqueue() argument
1574 rq->nr_numa_running += (p->numa_preferred_nid != NUMA_NO_NODE); in account_numa_enqueue()
1575 rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p)); in account_numa_enqueue()
1578 static void account_numa_dequeue(struct rq *rq, struct task_struct *p) in account_numa_dequeue() argument
1580 rq->nr_numa_running -= (p->numa_preferred_nid != NUMA_NO_NODE); in account_numa_dequeue()
1581 rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p)); in account_numa_dequeue()
1593 pid_t task_numa_group_id(struct task_struct *p) in task_numa_group_id() argument
1599 ng = rcu_dereference(p->numa_group); in task_numa_group_id()
1618 static inline unsigned long task_faults(struct task_struct *p, int nid) in task_faults() argument
1620 if (!p->numa_faults) in task_faults()
1623 return p->numa_faults[task_faults_idx(NUMA_MEM, nid, 0)] + in task_faults()
1624 p->numa_faults[task_faults_idx(NUMA_MEM, nid, 1)]; in task_faults()
1627 static inline unsigned long group_faults(struct task_struct *p, int nid) in group_faults() argument
1629 struct numa_group *ng = deref_task_numa_group(p); in group_faults()
1681 static unsigned long score_nearby_nodes(struct task_struct *p, int nid, in score_nearby_nodes() argument
1723 faults = task_faults(p, node); in score_nearby_nodes()
1725 faults = group_faults(p, node); in score_nearby_nodes()
1752 static inline unsigned long task_weight(struct task_struct *p, int nid, in task_weight() argument
1757 if (!p->numa_faults) in task_weight()
1760 total_faults = p->total_numa_faults; in task_weight()
1765 faults = task_faults(p, nid); in task_weight()
1766 faults += score_nearby_nodes(p, nid, dist, true); in task_weight()
1771 static inline unsigned long group_weight(struct task_struct *p, int nid, in group_weight() argument
1774 struct numa_group *ng = deref_task_numa_group(p); in group_weight()
1785 faults = group_faults(p, nid); in group_weight()
1786 faults += score_nearby_nodes(p, nid, dist, false); in group_weight()
1905 bool should_numa_migrate_memory(struct task_struct *p, struct folio *folio, in should_numa_migrate_memory() argument
1908 struct numa_group *ng = deref_curr_numa_group(p); in should_numa_migrate_memory()
1961 if ((p->numa_preferred_nid == NUMA_NO_NODE || p->numa_scan_seq <= 4) && in should_numa_migrate_memory()
1962 (cpupid_pid_unset(last_cpupid) || cpupid_match_pid(p, last_cpupid))) in should_numa_migrate_memory()
1987 if (cpupid_match_pid(p, last_cpupid)) in should_numa_migrate_memory()
2010 return group_faults_cpu(ng, dst_nid) * group_faults(p, src_nid) * 3 > in should_numa_migrate_memory()
2011 group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4; in should_numa_migrate_memory()
2046 struct task_struct *p; member
2134 !cpumask_test_cpu(cpu, env->p->cpus_ptr)) in update_numa_stats()
2154 struct task_struct *p, long imp) in task_numa_assign() argument
2166 !cpumask_test_cpu(cpu, env->p->cpus_ptr)) { in task_numa_assign()
2192 if (p) in task_numa_assign()
2193 get_task_struct(p); in task_numa_assign()
2195 env->best_task = p; in task_numa_assign()
2244 struct numa_group *cur_ng, *p_ng = deref_curr_numa_group(env->p); in task_numa_compare()
2267 if (cur == env->p) { in task_numa_compare()
2375 load = task_h_load(env->p) - task_h_load(cur); in task_numa_compare()
2471 load = task_h_load(env->p); in task_numa_find_cpu()
2479 if (!cpumask_test_cpu(cpu, env->p->cpus_ptr)) in task_numa_find_cpu()
2488 static int task_numa_migrate(struct task_struct *p) in task_numa_migrate() argument
2491 .p = p, in task_numa_migrate()
2493 .src_cpu = task_cpu(p), in task_numa_migrate()
2494 .src_nid = task_node(p), in task_numa_migrate()
2532 sched_setnuma(p, task_node(p)); in task_numa_migrate()
2536 env.dst_nid = p->numa_preferred_nid; in task_numa_migrate()
2538 taskweight = task_weight(p, env.src_nid, dist); in task_numa_migrate()
2539 groupweight = group_weight(p, env.src_nid, dist); in task_numa_migrate()
2541 taskimp = task_weight(p, env.dst_nid, dist) - taskweight; in task_numa_migrate()
2542 groupimp = group_weight(p, env.dst_nid, dist) - groupweight; in task_numa_migrate()
2555 ng = deref_curr_numa_group(p); in task_numa_migrate()
2558 if (nid == env.src_nid || nid == p->numa_preferred_nid) in task_numa_migrate()
2564 taskweight = task_weight(p, env.src_nid, dist); in task_numa_migrate()
2565 groupweight = group_weight(p, env.src_nid, dist); in task_numa_migrate()
2569 taskimp = task_weight(p, nid, dist) - taskweight; in task_numa_migrate()
2570 groupimp = group_weight(p, nid, dist) - groupweight; in task_numa_migrate()
2595 if (nid != p->numa_preferred_nid) in task_numa_migrate()
2596 sched_setnuma(p, nid); in task_numa_migrate()
2601 trace_sched_stick_numa(p, env.src_cpu, NULL, -1); in task_numa_migrate()
2607 ret = migrate_task_to(p, env.best_cpu); in task_numa_migrate()
2610 trace_sched_stick_numa(p, env.src_cpu, NULL, env.best_cpu); in task_numa_migrate()
2614 ret = migrate_swap(p, env.best_task, env.best_cpu, env.src_cpu); in task_numa_migrate()
2618 trace_sched_stick_numa(p, env.src_cpu, env.best_task, env.best_cpu); in task_numa_migrate()
2624 static void numa_migrate_preferred(struct task_struct *p) in numa_migrate_preferred() argument
2629 if (unlikely(p->numa_preferred_nid == NUMA_NO_NODE || !p->numa_faults)) in numa_migrate_preferred()
2633 interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16); in numa_migrate_preferred()
2634 p->numa_migrate_retry = jiffies + interval; in numa_migrate_preferred()
2637 if (task_node(p) == p->numa_preferred_nid) in numa_migrate_preferred()
2641 task_numa_migrate(p); in numa_migrate_preferred()
2687 static void update_task_scan_period(struct task_struct *p, in update_task_scan_period() argument
2694 unsigned long remote = p->numa_faults_locality[0]; in update_task_scan_period()
2695 unsigned long local = p->numa_faults_locality[1]; in update_task_scan_period()
2704 if (local + shared == 0 || p->numa_faults_locality[2]) { in update_task_scan_period()
2705 p->numa_scan_period = min(p->numa_scan_period_max, in update_task_scan_period()
2706 p->numa_scan_period << 1); in update_task_scan_period()
2708 p->mm->numa_next_scan = jiffies + in update_task_scan_period()
2709 msecs_to_jiffies(p->numa_scan_period); in update_task_scan_period()
2720 period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS); in update_task_scan_period()
2753 p->numa_scan_period = clamp(p->numa_scan_period + diff, in update_task_scan_period()
2754 task_scan_min(p), task_scan_max(p)); in update_task_scan_period()
2755 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality)); in update_task_scan_period()
2765 static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period) in numa_get_avg_runtime() argument
2769 now = p->se.exec_start; in numa_get_avg_runtime()
2770 runtime = p->se.sum_exec_runtime; in numa_get_avg_runtime()
2772 if (p->last_task_numa_placement) { in numa_get_avg_runtime()
2773 delta = runtime - p->last_sum_exec_runtime; in numa_get_avg_runtime()
2774 *period = now - p->last_task_numa_placement; in numa_get_avg_runtime()
2780 delta = p->se.avg.load_sum; in numa_get_avg_runtime()
2784 p->last_sum_exec_runtime = runtime; in numa_get_avg_runtime()
2785 p->last_task_numa_placement = now; in numa_get_avg_runtime()
2795 static int preferred_group_nid(struct task_struct *p, int nid) in preferred_group_nid() argument
2816 score = group_weight(p, node, dist); in preferred_group_nid()
2852 faults += group_faults(p, b); in preferred_group_nid()
2878 static void task_numa_placement(struct task_struct *p) in task_numa_placement() argument
2893 seq = READ_ONCE(p->mm->numa_scan_seq); in task_numa_placement()
2894 if (p->numa_scan_seq == seq) in task_numa_placement()
2896 p->numa_scan_seq = seq; in task_numa_placement()
2897 p->numa_scan_period_max = task_scan_max(p); in task_numa_placement()
2899 total_faults = p->numa_faults_locality[0] + in task_numa_placement()
2900 p->numa_faults_locality[1]; in task_numa_placement()
2901 runtime = numa_get_avg_runtime(p, &period); in task_numa_placement()
2904 ng = deref_curr_numa_group(p); in task_numa_placement()
2926 diff = p->numa_faults[membuf_idx] - p->numa_faults[mem_idx] / 2; in task_numa_placement()
2927 fault_types[priv] += p->numa_faults[membuf_idx]; in task_numa_placement()
2928 p->numa_faults[membuf_idx] = 0; in task_numa_placement()
2938 f_weight = (f_weight * p->numa_faults[cpubuf_idx]) / in task_numa_placement()
2940 f_diff = f_weight - p->numa_faults[cpu_idx] / 2; in task_numa_placement()
2941 p->numa_faults[cpubuf_idx] = 0; in task_numa_placement()
2943 p->numa_faults[mem_idx] += diff; in task_numa_placement()
2944 p->numa_faults[cpu_idx] += f_diff; in task_numa_placement()
2945 faults += p->numa_faults[mem_idx]; in task_numa_placement()
2946 p->total_numa_faults += diff; in task_numa_placement()
2979 max_nid = preferred_group_nid(p, max_nid); in task_numa_placement()
2984 if (max_nid != p->numa_preferred_nid) in task_numa_placement()
2985 sched_setnuma(p, max_nid); in task_numa_placement()
2988 update_task_scan_period(p, fault_types[0], fault_types[1]); in task_numa_placement()
3002 static void task_numa_group(struct task_struct *p, int cpupid, int flags, in task_numa_group() argument
3011 if (unlikely(!deref_curr_numa_group(p))) { in task_numa_group()
3024 grp->gid = p->pid; in task_numa_group()
3027 grp->faults[i] = p->numa_faults[i]; in task_numa_group()
3029 grp->total_faults = p->total_numa_faults; in task_numa_group()
3032 rcu_assign_pointer(p->numa_group, grp); in task_numa_group()
3045 my_grp = deref_curr_numa_group(p); in task_numa_group()
3085 my_grp->faults[i] -= p->numa_faults[i]; in task_numa_group()
3086 grp->faults[i] += p->numa_faults[i]; in task_numa_group()
3088 my_grp->total_faults -= p->total_numa_faults; in task_numa_group()
3089 grp->total_faults += p->total_numa_faults; in task_numa_group()
3097 rcu_assign_pointer(p->numa_group, grp); in task_numa_group()
3114 void task_numa_free(struct task_struct *p, bool final) in task_numa_free() argument
3117 struct numa_group *grp = rcu_dereference_raw(p->numa_group); in task_numa_free()
3118 unsigned long *numa_faults = p->numa_faults; in task_numa_free()
3128 grp->faults[i] -= p->numa_faults[i]; in task_numa_free()
3129 grp->total_faults -= p->total_numa_faults; in task_numa_free()
3133 RCU_INIT_POINTER(p->numa_group, NULL); in task_numa_free()
3138 p->numa_faults = NULL; in task_numa_free()
3141 p->total_numa_faults = 0; in task_numa_free()
3152 struct task_struct *p = current; in task_numa_fault() local
3163 if (!p->mm) in task_numa_fault()
3176 if (unlikely(!p->numa_faults)) { in task_numa_fault()
3177 int size = sizeof(*p->numa_faults) * in task_numa_fault()
3180 p->numa_faults = kzalloc(size, GFP_KERNEL|__GFP_NOWARN); in task_numa_fault()
3181 if (!p->numa_faults) in task_numa_fault()
3184 p->total_numa_faults = 0; in task_numa_fault()
3185 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality)); in task_numa_fault()
3195 priv = cpupid_match_pid(p, last_cpupid); in task_numa_fault()
3197 task_numa_group(p, last_cpupid, flags, &priv); in task_numa_fault()
3206 ng = deref_curr_numa_group(p); in task_numa_fault()
3216 if (time_after(jiffies, p->numa_migrate_retry)) { in task_numa_fault()
3217 task_numa_placement(p); in task_numa_fault()
3218 numa_migrate_preferred(p); in task_numa_fault()
3222 p->numa_pages_migrated += pages; in task_numa_fault()
3224 p->numa_faults_locality[2] += pages; in task_numa_fault()
3226 p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages; in task_numa_fault()
3227 p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages; in task_numa_fault()
3228 p->numa_faults_locality[local] += pages; in task_numa_fault()
3231 static void reset_ptenuma_scan(struct task_struct *p) in reset_ptenuma_scan() argument
3241 WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1); in reset_ptenuma_scan()
3242 p->mm->numa_scan_offset = 0; in reset_ptenuma_scan()
3291 struct task_struct *p = current; in task_numa_work() local
3292 struct mm_struct *mm = p->mm; in task_numa_work()
3293 u64 runtime = p->se.sum_exec_runtime; in task_numa_work()
3302 WARN_ON_ONCE(p != container_of(work, struct task_struct, numa_work)); in task_numa_work()
3313 if (p->flags & PF_EXITING) in task_numa_work()
3337 if (p->numa_scan_period == 0) { in task_numa_work()
3338 p->numa_scan_period_max = task_scan_max(p); in task_numa_work()
3339 p->numa_scan_period = task_scan_start(p); in task_numa_work()
3342 next_scan = now + msecs_to_jiffies(p->numa_scan_period); in task_numa_work()
3350 p->node_stamp += 2 * TICK_NSEC; in task_numa_work()
3374 reset_ptenuma_scan(p); in task_numa_work()
3530 reset_ptenuma_scan(p); in task_numa_work()
3539 if (unlikely(p->se.sum_exec_runtime != runtime)) { in task_numa_work()
3540 u64 diff = p->se.sum_exec_runtime - runtime; in task_numa_work()
3541 p->node_stamp += 32 * diff; in task_numa_work()
3545 void init_numa_balancing(unsigned long clone_flags, struct task_struct *p) in init_numa_balancing() argument
3548 struct mm_struct *mm = p->mm; in init_numa_balancing()
3557 p->node_stamp = 0; in init_numa_balancing()
3558 p->numa_scan_seq = mm ? mm->numa_scan_seq : 0; in init_numa_balancing()
3559 p->numa_scan_period = sysctl_numa_balancing_scan_delay; in init_numa_balancing()
3560 p->numa_migrate_retry = 0; in init_numa_balancing()
3562 p->numa_work.next = &p->numa_work; in init_numa_balancing()
3563 p->numa_faults = NULL; in init_numa_balancing()
3564 p->numa_pages_migrated = 0; in init_numa_balancing()
3565 p->total_numa_faults = 0; in init_numa_balancing()
3566 RCU_INIT_POINTER(p->numa_group, NULL); in init_numa_balancing()
3567 p->last_task_numa_placement = 0; in init_numa_balancing()
3568 p->last_sum_exec_runtime = 0; in init_numa_balancing()
3570 init_task_work(&p->numa_work, task_numa_work); in init_numa_balancing()
3574 p->numa_preferred_nid = NUMA_NO_NODE; in init_numa_balancing()
3588 p->node_stamp = delay; in init_numa_balancing()
3625 static void update_scan_period(struct task_struct *p, int new_cpu) in update_scan_period() argument
3627 int src_nid = cpu_to_node(task_cpu(p)); in update_scan_period()
3633 if (!p->mm || !p->numa_faults || (p->flags & PF_EXITING)) in update_scan_period()
3644 if (p->numa_scan_seq) { in update_scan_period()
3650 if (dst_nid == p->numa_preferred_nid || in update_scan_period()
3651 (p->numa_preferred_nid != NUMA_NO_NODE && in update_scan_period()
3652 src_nid != p->numa_preferred_nid)) in update_scan_period()
3656 p->numa_scan_period = task_scan_start(p); in update_scan_period()
3665 static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p) in account_numa_enqueue() argument
3669 static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p) in account_numa_dequeue() argument
3673 static inline void update_scan_period(struct task_struct *p, int new_cpu) in update_scan_period() argument
3823 static void reweight_task_fair(struct rq *rq, struct task_struct *p, in reweight_task_fair() argument
3826 struct sched_entity *se = &p->se; in reweight_task_fair()
4811 static inline unsigned long task_util(struct task_struct *p) in task_util() argument
4813 return READ_ONCE(p->se.avg.util_avg); in task_util()
4816 static inline unsigned long task_runnable(struct task_struct *p) in task_runnable() argument
4818 return READ_ONCE(p->se.avg.runnable_avg); in task_runnable()
4821 static inline unsigned long _task_util_est(struct task_struct *p) in _task_util_est() argument
4823 return READ_ONCE(p->se.avg.util_est) & ~UTIL_AVG_UNCHANGED; in _task_util_est()
4826 static inline unsigned long task_util_est(struct task_struct *p) in task_util_est() argument
4828 return max(task_util(p), _task_util_est(p)); in task_util_est()
4832 struct task_struct *p) in util_est_enqueue() argument
4841 enqueued += _task_util_est(p); in util_est_enqueue()
4848 struct task_struct *p) in util_est_dequeue() argument
4857 enqueued -= min_t(unsigned int, enqueued, _task_util_est(p)); in util_est_dequeue()
4866 struct task_struct *p, in util_est_update() argument
4882 ewma = READ_ONCE(p->se.avg.util_est); in util_est_update()
4892 dequeued = task_util(p); in util_est_update()
4915 if ((dequeued + UTIL_EST_MARGIN) < task_runnable(p)) in util_est_update()
4940 WRITE_ONCE(p->se.avg.util_est, ewma); in util_est_update()
4942 trace_sched_util_est_se_tp(&p->se); in util_est_update()
5073 static inline int task_fits_cpu(struct task_struct *p, int cpu) in task_fits_cpu() argument
5075 unsigned long uclamp_min = uclamp_eff_value(p, UCLAMP_MIN); in task_fits_cpu()
5076 unsigned long uclamp_max = uclamp_eff_value(p, UCLAMP_MAX); in task_fits_cpu()
5077 unsigned long util = task_util_est(p); in task_fits_cpu()
5085 static inline void update_misfit_status(struct task_struct *p, struct rq *rq) in update_misfit_status() argument
5096 if (!p || (p->nr_cpus_allowed == 1) || in update_misfit_status()
5097 (arch_scale_cpu_capacity(cpu) == p->max_allowed_capacity) || in update_misfit_status()
5098 task_fits_cpu(p, cpu)) { in update_misfit_status()
5108 rq->misfit_task_load = max_t(unsigned long, task_h_load(p), 1); in update_misfit_status()
5111 void __setparam_fair(struct task_struct *p, const struct sched_attr *attr) in __setparam_fair() argument
5113 struct sched_entity *se = &p->se; in __setparam_fair()
5115 p->static_prio = NICE_TO_PRIO(attr->sched_nice); in __setparam_fair()
6595 bool cfs_task_bw_constrained(struct task_struct *p) in cfs_task_bw_constrained() argument
6597 struct cfs_rq *cfs_rq = task_cfs_rq(p); in cfs_task_bw_constrained()
6611 static void sched_fair_update_stop_tick(struct rq *rq, struct task_struct *p) in sched_fair_update_stop_tick() argument
6630 if (cfs_task_bw_constrained(p)) in sched_fair_update_stop_tick()
6672 bool cfs_task_bw_constrained(struct task_struct *p) in cfs_task_bw_constrained() argument
6680 static inline void sched_fair_update_stop_tick(struct rq *rq, struct task_struct *p) {} in sched_fair_update_stop_tick() argument
6688 static void hrtick_start_fair(struct rq *rq, struct task_struct *p) in hrtick_start_fair() argument
6690 struct sched_entity *se = &p->se; in hrtick_start_fair()
6692 WARN_ON_ONCE(task_rq(p) != rq); in hrtick_start_fair()
6700 if (task_current_donor(rq, p)) in hrtick_start_fair()
6724 hrtick_start_fair(struct rq *rq, struct task_struct *p) in hrtick_start_fair() argument
6824 enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) in enqueue_task_fair() argument
6827 struct sched_entity *se = &p->se; in enqueue_task_fair()
6828 int h_nr_idle = task_has_idle_policy(p); in enqueue_task_fair()
6840 if (!p->se.sched_delayed || (flags & ENQUEUE_DELAYED)) in enqueue_task_fair()
6841 util_est_enqueue(&rq->cfs, p); in enqueue_task_fair()
6853 if (p->in_iowait) in enqueue_task_fair()
6966 struct task_struct *p = NULL; in dequeue_entities() local
6974 p = task_of(se); in dequeue_entities()
6976 h_nr_idle = task_has_idle_policy(p); in dequeue_entities()
6985 if (p && &p->se == se) in dequeue_entities()
7051 if (p && task_delayed) { in dequeue_entities()
7053 WARN_ON_ONCE(p->on_rq != 1); in dequeue_entities()
7063 __block_task(rq, p); in dequeue_entities()
7074 static bool dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) in dequeue_task_fair() argument
7076 if (!p->se.sched_delayed) in dequeue_task_fair()
7077 util_est_dequeue(&rq->cfs, p); in dequeue_task_fair()
7079 util_est_update(&rq->cfs, p, flags & DEQUEUE_SLEEP); in dequeue_task_fair()
7080 if (dequeue_entities(rq, &p->se, flags) < 0) in dequeue_task_fair()
7132 static unsigned long cpu_load_without(struct rq *rq, struct task_struct *p) in cpu_load_without() argument
7138 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_load_without()
7145 lsub_positive(&load, task_h_load(p)); in cpu_load_without()
7155 static unsigned long cpu_runnable_without(struct rq *rq, struct task_struct *p) in cpu_runnable_without() argument
7161 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_runnable_without()
7168 lsub_positive(&runnable, p->se.avg.runnable_avg); in cpu_runnable_without()
7178 static void record_wakee(struct task_struct *p) in record_wakee() argument
7189 if (current->last_wakee != p) { in record_wakee()
7190 current->last_wakee = p; in record_wakee()
7212 static int wake_wide(struct task_struct *p) in wake_wide() argument
7215 unsigned int slave = p->wakee_flips; in wake_wide()
7269 wake_affine_weight(struct sched_domain *sd, struct task_struct *p, in wake_affine_weight() argument
7286 task_load = task_h_load(p); in wake_affine_weight()
7311 static int wake_affine(struct sched_domain *sd, struct task_struct *p, in wake_affine() argument
7320 target = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync); in wake_affine()
7322 schedstat_inc(p->stats.nr_wakeups_affine_attempts); in wake_affine()
7327 schedstat_inc(p->stats.nr_wakeups_affine); in wake_affine()
7332 sched_balance_find_dst_group(struct sched_domain *sd, struct task_struct *p, int this_cpu);
7338 sched_balance_find_dst_group_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) in sched_balance_find_dst_group_cpu() argument
7352 for_each_cpu_and(i, sched_group_span(group), p->cpus_ptr) { in sched_balance_find_dst_group_cpu()
7355 if (!sched_core_cookie_match(rq, p)) in sched_balance_find_dst_group_cpu()
7394 static inline int sched_balance_find_dst_cpu(struct sched_domain *sd, struct task_struct *p, in sched_balance_find_dst_cpu() argument
7399 if (!cpumask_intersects(sched_domain_span(sd), p->cpus_ptr)) in sched_balance_find_dst_cpu()
7407 sync_entity_load_avg(&p->se); in sched_balance_find_dst_cpu()
7419 group = sched_balance_find_dst_group(sd, p, cpu); in sched_balance_find_dst_cpu()
7425 new_cpu = sched_balance_find_dst_group_cpu(group, p, cpu); in sched_balance_find_dst_cpu()
7447 static inline int __select_idle_cpu(int cpu, struct task_struct *p) in __select_idle_cpu() argument
7450 sched_cpu_cookie_match(cpu_rq(cpu), p)) in __select_idle_cpu()
7514 static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpus, int *idle_cpu) in select_idle_core() argument
7545 static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target) in select_idle_smt() argument
7549 for_each_cpu_and(cpu, cpu_smt_mask(target), p->cpus_ptr) { in select_idle_smt()
7576 static inline int select_idle_core(struct task_struct *p, int core, struct cpumask *cpus, int *idle… in select_idle_core() argument
7578 return __select_idle_cpu(core, p); in select_idle_core()
7581 static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target) in select_idle_smt() argument
7593 static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool has_idle_core, int … in select_idle_cpu() argument
7599 cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr); in select_idle_cpu()
7621 i = select_idle_core(p, cpu, cpus, &idle_cpu); in select_idle_cpu()
7627 idle_cpu = __select_idle_cpu(cpu, p); in select_idle_cpu()
7638 i = select_idle_core(p, cpu, cpus, &idle_cpu); in select_idle_cpu()
7645 idle_cpu = __select_idle_cpu(cpu, p); in select_idle_cpu()
7663 select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target) in select_idle_capacity() argument
7671 cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr); in select_idle_capacity()
7673 task_util = task_util_est(p); in select_idle_capacity()
7674 util_min = uclamp_eff_value(p, UCLAMP_MIN); in select_idle_capacity()
7675 util_max = uclamp_eff_value(p, UCLAMP_MAX); in select_idle_capacity()
7728 static int select_idle_sibling(struct task_struct *p, int prev, int target) in select_idle_sibling() argument
7740 sync_entity_load_avg(&p->se); in select_idle_sibling()
7741 task_util = task_util_est(p); in select_idle_sibling()
7742 util_min = uclamp_eff_value(p, UCLAMP_MIN); in select_idle_sibling()
7743 util_max = uclamp_eff_value(p, UCLAMP_MAX); in select_idle_sibling()
7786 recent_used_cpu = p->recent_used_cpu; in select_idle_sibling()
7787 p->recent_used_cpu = prev; in select_idle_sibling()
7792 cpumask_test_cpu(recent_used_cpu, p->cpus_ptr) && in select_idle_sibling()
7818 i = select_idle_capacity(p, sd, target); in select_idle_sibling()
7831 i = select_idle_smt(p, sd, prev); in select_idle_sibling()
7837 i = select_idle_cpu(p, sd, has_idle_core, target); in select_idle_sibling()
7897 cpu_util(int cpu, struct task_struct *p, int dst_cpu, int boost) in cpu_util() argument
7914 if (p && task_cpu(p) == cpu && dst_cpu != cpu) in cpu_util()
7915 lsub_positive(&util, task_util(p)); in cpu_util()
7916 else if (p && task_cpu(p) != cpu && dst_cpu == cpu) in cpu_util()
7917 util += task_util(p); in cpu_util()
7951 util_est += _task_util_est(p); in cpu_util()
7952 else if (p && unlikely(task_on_rq_queued(p) || current == p)) in cpu_util()
7953 lsub_positive(&util_est, _task_util_est(p)); in cpu_util()
7984 static unsigned long cpu_util_without(int cpu, struct task_struct *p) in cpu_util_without() argument
7987 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_util_without()
7988 p = NULL; in cpu_util_without()
7990 return cpu_util(cpu, p, -1, 0); in cpu_util_without()
8115 struct task_struct *p, int prev_cpu) in eenv_task_busy_time() argument
8123 busy_time = scale_irq_capacity(task_util_est(p), irq, max_cap); in eenv_task_busy_time()
8151 struct task_struct *p) in eenv_pd_busy_time() argument
8157 unsigned long util = cpu_util(cpu, p, -1, 0); in eenv_pd_busy_time()
8174 struct task_struct *p, int dst_cpu) in eenv_pd_max_util() argument
8180 struct task_struct *tsk = (cpu == dst_cpu) ? p : NULL; in eenv_pd_max_util()
8181 unsigned long util = cpu_util(cpu, p, dst_cpu, 1); in eenv_pd_max_util()
8195 min = max(min, uclamp_eff_value(p, UCLAMP_MIN)); in eenv_pd_max_util()
8202 max = uclamp_eff_value(p, UCLAMP_MAX); in eenv_pd_max_util()
8204 max = max(max, uclamp_eff_value(p, UCLAMP_MAX)); in eenv_pd_max_util()
8221 struct cpumask *pd_cpus, struct task_struct *p, int dst_cpu) in compute_energy() argument
8223 unsigned long max_util = eenv_pd_max_util(eenv, pd_cpus, p, dst_cpu); in compute_energy()
8232 trace_sched_compute_energy_tp(p, dst_cpu, energy, max_util, busy_time); in compute_energy()
8276 static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu) in find_energy_efficient_cpu() argument
8280 unsigned long p_util_min = uclamp_is_used() ? uclamp_eff_value(p, UCLAMP_MIN) : 0; in find_energy_efficient_cpu()
8281 unsigned long p_util_max = uclamp_is_used() ? uclamp_eff_value(p, UCLAMP_MAX) : 1024; in find_energy_efficient_cpu()
8308 sync_entity_load_avg(&p->se); in find_energy_efficient_cpu()
8309 if (!task_util_est(p) && p_util_min == 0) in find_energy_efficient_cpu()
8312 eenv_task_busy_time(&eenv, p, prev_cpu); in find_energy_efficient_cpu()
8343 if (!cpumask_test_cpu(cpu, p->cpus_ptr)) in find_energy_efficient_cpu()
8346 util = cpu_util(cpu, p, cpu, 0); in find_energy_efficient_cpu()
8397 eenv_pd_busy_time(&eenv, cpus, p); in find_energy_efficient_cpu()
8399 base_energy = compute_energy(&eenv, pd, cpus, p, -1); in find_energy_efficient_cpu()
8403 prev_delta = compute_energy(&eenv, pd, cpus, p, in find_energy_efficient_cpu()
8427 cur_delta = compute_energy(&eenv, pd, cpus, p, in find_energy_efficient_cpu()
8474 select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags) in select_task_rq_fair() argument
8487 lockdep_assert_held(&p->pi_lock); in select_task_rq_fair()
8489 record_wakee(p); in select_task_rq_fair()
8492 cpumask_test_cpu(cpu, p->cpus_ptr)) in select_task_rq_fair()
8496 new_cpu = find_energy_efficient_cpu(p, prev_cpu); in select_task_rq_fair()
8502 want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, p->cpus_ptr); in select_task_rq_fair()
8514 new_cpu = wake_affine(tmp, p, cpu, prev_cpu, sync); in select_task_rq_fair()
8533 new_cpu = sched_balance_find_dst_cpu(sd, p, cpu, prev_cpu, sd_flag); in select_task_rq_fair()
8536 new_cpu = select_idle_sibling(p, prev_cpu, new_cpu); in select_task_rq_fair()
8548 static void migrate_task_rq_fair(struct task_struct *p, int new_cpu) in migrate_task_rq_fair() argument
8550 struct sched_entity *se = &p->se; in migrate_task_rq_fair()
8552 if (!task_on_rq_migrating(p)) { in migrate_task_rq_fair()
8571 update_scan_period(p, new_cpu); in migrate_task_rq_fair()
8574 static void task_dead_fair(struct task_struct *p) in task_dead_fair() argument
8576 struct sched_entity *se = &p->se; in task_dead_fair()
8582 rq = task_rq_lock(p, &rf); in task_dead_fair()
8587 task_rq_unlock(rq, p, &rf); in task_dead_fair()
8596 static void set_task_max_allowed_capacity(struct task_struct *p) in set_task_max_allowed_capacity() argument
8608 if (!cpumask_intersects(p->cpus_ptr, cpumask)) in set_task_max_allowed_capacity()
8611 p->max_allowed_capacity = entry->capacity; in set_task_max_allowed_capacity()
8617 static void set_cpus_allowed_fair(struct task_struct *p, struct affinity_context *ctx) in set_cpus_allowed_fair() argument
8619 set_cpus_allowed_common(p, ctx); in set_cpus_allowed_fair()
8620 set_task_max_allowed_capacity(p); in set_cpus_allowed_fair()
8646 static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int wake_flags) in check_preempt_wakeup_fair() argument
8649 struct sched_entity *se = &donor->se, *pse = &p->se; in check_preempt_wakeup_fair()
8711 if (unlikely(!normal_policy(p->policy))) in check_preempt_wakeup_fair()
8767 static void __set_next_task_fair(struct rq *rq, struct task_struct *p, bool first);
8768 static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first);
8774 struct task_struct *p; in pick_next_task_fair() local
8778 p = pick_task_fair(rq); in pick_next_task_fair()
8779 if (!p) in pick_next_task_fair()
8781 se = &p->se; in pick_next_task_fair()
8787 __put_prev_set_next_dl_server(rq, prev, p); in pick_next_task_fair()
8800 if (prev != p) { in pick_next_task_fair()
8821 __set_next_task_fair(rq, p, true); in pick_next_task_fair()
8824 return p; in pick_next_task_fair()
8828 put_prev_set_next_task(rq, prev, p); in pick_next_task_fair()
8829 return p; in pick_next_task_fair()
8927 static bool yield_to_task_fair(struct rq *rq, struct task_struct *p) in yield_to_task_fair() argument
8929 struct sched_entity *se = &p->se; in yield_to_task_fair()
9151 static int task_hot(struct task_struct *p, struct lb_env *env) in task_hot() argument
9157 if (p->sched_class != &fair_sched_class) in task_hot()
9160 if (unlikely(task_has_idle_policy(p))) in task_hot()
9171 (&p->se == cfs_rq_of(&p->se)->next)) in task_hot()
9181 if (!sched_core_cookie_match(cpu_rq(env->dst_cpu), p)) in task_hot()
9187 delta = rq_clock_task(env->src_rq) - p->se.exec_start; in task_hot()
9198 static long migrate_degrades_locality(struct task_struct *p, struct lb_env *env) in migrate_degrades_locality() argument
9200 struct numa_group *numa_group = rcu_dereference(p->numa_group); in migrate_degrades_locality()
9207 if (!p->numa_faults || !(env->sd->flags & SD_NUMA)) in migrate_degrades_locality()
9217 if (src_nid == p->numa_preferred_nid) { in migrate_degrades_locality()
9225 if (dst_nid == p->numa_preferred_nid) in migrate_degrades_locality()
9234 src_weight = group_weight(p, src_nid, dist); in migrate_degrades_locality()
9235 dst_weight = group_weight(p, dst_nid, dist); in migrate_degrades_locality()
9237 src_weight = task_weight(p, src_nid, dist); in migrate_degrades_locality()
9238 dst_weight = task_weight(p, dst_nid, dist); in migrate_degrades_locality()
9245 static inline long migrate_degrades_locality(struct task_struct *p, in migrate_degrades_locality() argument
9260 static inline int task_is_ineligible_on_dst_cpu(struct task_struct *p, int dest_cpu) in task_is_ineligible_on_dst_cpu() argument
9265 dst_cfs_rq = task_group(p)->cfs_rq[dest_cpu]; in task_is_ineligible_on_dst_cpu()
9270 !entity_eligible(task_cfs_rq(p), &p->se)) in task_is_ineligible_on_dst_cpu()
9280 int can_migrate_task(struct task_struct *p, struct lb_env *env) in can_migrate_task() argument
9285 if (p->sched_task_hot) in can_migrate_task()
9286 p->sched_task_hot = 0; in can_migrate_task()
9297 if ((p->se.sched_delayed) && (env->migration_type != migrate_load)) in can_migrate_task()
9300 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu)) in can_migrate_task()
9310 task_is_ineligible_on_dst_cpu(p, env->dst_cpu)) in can_migrate_task()
9314 if (kthread_is_per_cpu(p)) in can_migrate_task()
9317 if (task_is_blocked(p)) in can_migrate_task()
9320 if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) { in can_migrate_task()
9323 schedstat_inc(p->stats.nr_failed_migrations_affine); in can_migrate_task()
9342 cpu = cpumask_first_and_and(env->dst_grpmask, env->cpus, p->cpus_ptr); in can_migrate_task()
9355 if (task_on_cpu(env->src_rq, p) || in can_migrate_task()
9356 task_current_donor(env->src_rq, p)) { in can_migrate_task()
9357 schedstat_inc(p->stats.nr_failed_migrations_running); in can_migrate_task()
9371 degrades = migrate_degrades_locality(p, env); in can_migrate_task()
9373 hot = task_hot(p, env); in can_migrate_task()
9379 p->sched_task_hot = 1; in can_migrate_task()
9383 schedstat_inc(p->stats.nr_failed_migrations_hot); in can_migrate_task()
9390 static void detach_task(struct task_struct *p, struct lb_env *env) in detach_task() argument
9394 if (p->sched_task_hot) { in detach_task()
9395 p->sched_task_hot = 0; in detach_task()
9397 schedstat_inc(p->stats.nr_forced_migrations); in detach_task()
9400 WARN_ON(task_current(env->src_rq, p)); in detach_task()
9401 WARN_ON(task_current_donor(env->src_rq, p)); in detach_task()
9403 deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK); in detach_task()
9404 set_task_cpu(p, env->dst_cpu); in detach_task()
9415 struct task_struct *p; in detach_one_task() local
9419 list_for_each_entry_reverse(p, in detach_one_task()
9421 if (!can_migrate_task(p, env)) in detach_one_task()
9424 detach_task(p, env); in detach_one_task()
9433 return p; in detach_one_task()
9448 struct task_struct *p; in detach_tasks() local
9485 p = list_last_entry(tasks, struct task_struct, se.group_node); in detach_tasks()
9487 if (!can_migrate_task(p, env)) in detach_tasks()
9499 load = max_t(unsigned long, task_h_load(p), 1); in detach_tasks()
9518 util = task_util_est(p); in detach_tasks()
9532 if (task_fits_cpu(p, env->src_cpu)) in detach_tasks()
9539 detach_task(p, env); in detach_tasks()
9540 list_add(&p->se.group_node, &env->tasks); in detach_tasks()
9563 if (p->sched_task_hot) in detach_tasks()
9564 schedstat_inc(p->stats.nr_failed_migrations_hot); in detach_tasks()
9566 list_move(&p->se.group_node, tasks); in detach_tasks()
9582 static void attach_task(struct rq *rq, struct task_struct *p) in attach_task() argument
9586 WARN_ON_ONCE(task_rq(p) != rq); in attach_task()
9587 activate_task(rq, p, ENQUEUE_NOCLOCK); in attach_task()
9588 wakeup_preempt(rq, p, 0); in attach_task()
9595 static void attach_one_task(struct rq *rq, struct task_struct *p) in attach_one_task() argument
9601 attach_task(rq, p); in attach_one_task()
9612 struct task_struct *p; in attach_tasks() local
9619 p = list_first_entry(tasks, struct task_struct, se.group_node); in attach_tasks()
9620 list_del_init(&p->se.group_node); in attach_tasks()
9622 attach_task(env->dst_rq, p); in attach_tasks()
9773 static unsigned long task_h_load(struct task_struct *p) in task_h_load() argument
9775 struct cfs_rq *cfs_rq = task_cfs_rq(p); in task_h_load()
9778 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load, in task_h_load()
9794 static unsigned long task_h_load(struct task_struct *p) in task_h_load() argument
9796 return p->se.avg.load_avg; in task_h_load()
10530 static unsigned int task_running_on_cpu(int cpu, struct task_struct *p) in task_running_on_cpu() argument
10533 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in task_running_on_cpu()
10536 if (task_on_rq_queued(p)) in task_running_on_cpu()
10549 static int idle_cpu_without(int cpu, struct task_struct *p) in idle_cpu_without() argument
10553 if (rq->curr != rq->idle && rq->curr != p) in idle_cpu_without()
10578 struct task_struct *p) in update_sg_wakeup_stats() argument
10592 sgs->group_load += cpu_load_without(rq, p); in update_sg_wakeup_stats()
10593 sgs->group_util += cpu_util_without(i, p); in update_sg_wakeup_stats()
10594 sgs->group_runnable += cpu_runnable_without(rq, p); in update_sg_wakeup_stats()
10595 local = task_running_on_cpu(i, p); in update_sg_wakeup_stats()
10604 if (!nr_running && idle_cpu_without(i, p)) in update_sg_wakeup_stats()
10610 task_fits_cpu(p, i)) in update_sg_wakeup_stats()
10690 sched_balance_find_dst_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) in sched_balance_find_dst_group() argument
10706 p->cpus_ptr)) in sched_balance_find_dst_group()
10710 if (!sched_group_cookie_match(cpu_rq(this_cpu), p, group)) in sched_balance_find_dst_group()
10723 update_sg_wakeup_stats(sd, group, sgs, p); in sched_balance_find_dst_group()
10809 if (cpu_to_node(this_cpu) == p->numa_preferred_nid) in sched_balance_find_dst_group()
10813 if (cpu_to_node(idlest_cpu) == p->numa_preferred_nid) in sched_balance_find_dst_group()
10825 if (p->nr_cpus_allowed != NR_CPUS) { in sched_balance_find_dst_group()
10828 cpumask_and(cpus, sched_group_span(local), p->cpus_ptr); in sched_balance_find_dst_group()
11968 struct task_struct *p = NULL; in active_load_balance_cpu_stop() local
12017 p = detach_one_task(&env); in active_load_balance_cpu_stop()
12018 if (p) { in active_load_balance_cpu_stop()
12031 if (p) in active_load_balance_cpu_stop()
12032 attach_one_task(target_rq, p); in active_load_balance_cpu_stop()
12940 void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi) in task_vruntime_update() argument
12942 struct sched_entity *se = &p->se; in task_vruntime_update()
12944 if (p->sched_class != &fair_sched_class) in task_vruntime_update()
12998 static int task_is_throttled_fair(struct task_struct *p, int cpu) in task_is_throttled_fair() argument
13003 cfs_rq = task_group(p)->cfs_rq[cpu]; in task_is_throttled_fair()
13045 static void task_fork_fair(struct task_struct *p) in task_fork_fair() argument
13047 set_task_max_allowed_capacity(p); in task_fork_fair()
13055 prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) in prio_changed_fair() argument
13057 if (!task_on_rq_queued(p)) in prio_changed_fair()
13068 if (task_current_donor(rq, p)) { in prio_changed_fair()
13069 if (p->prio > oldprio) in prio_changed_fair()
13072 wakeup_preempt(rq, p, 0); in prio_changed_fair()
13140 static void detach_task_cfs_rq(struct task_struct *p) in detach_task_cfs_rq() argument
13142 struct sched_entity *se = &p->se; in detach_task_cfs_rq()
13147 static void attach_task_cfs_rq(struct task_struct *p) in attach_task_cfs_rq() argument
13149 struct sched_entity *se = &p->se; in attach_task_cfs_rq()
13154 static void switched_from_fair(struct rq *rq, struct task_struct *p) in switched_from_fair() argument
13156 detach_task_cfs_rq(p); in switched_from_fair()
13159 static void switched_to_fair(struct rq *rq, struct task_struct *p) in switched_to_fair() argument
13161 WARN_ON_ONCE(p->se.sched_delayed); in switched_to_fair()
13163 attach_task_cfs_rq(p); in switched_to_fair()
13165 set_task_max_allowed_capacity(p); in switched_to_fair()
13167 if (task_on_rq_queued(p)) { in switched_to_fair()
13173 if (task_current_donor(rq, p)) in switched_to_fair()
13176 wakeup_preempt(rq, p, 0); in switched_to_fair()
13180 static void __set_next_task_fair(struct rq *rq, struct task_struct *p, bool first) in __set_next_task_fair() argument
13182 struct sched_entity *se = &p->se; in __set_next_task_fair()
13184 if (task_on_rq_queued(p)) { in __set_next_task_fair()
13197 hrtick_start_fair(rq, p); in __set_next_task_fair()
13199 update_misfit_status(p, rq); in __set_next_task_fair()
13200 sched_fair_update_stop_tick(rq, p); in __set_next_task_fair()
13209 static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first) in set_next_task_fair() argument
13211 struct sched_entity *se = &p->se; in set_next_task_fair()
13221 __set_next_task_fair(rq, p, first); in set_next_task_fair()
13232 static void task_change_group_fair(struct task_struct *p) in task_change_group_fair() argument
13238 if (READ_ONCE(p->__state) == TASK_NEW) in task_change_group_fair()
13241 detach_task_cfs_rq(p); in task_change_group_fair()
13244 p->se.avg.last_update_time = 0; in task_change_group_fair()
13245 set_task_rq(p, task_cpu(p)); in task_change_group_fair()
13246 attach_task_cfs_rq(p); in task_change_group_fair()
13586 void show_numa_stats(struct task_struct *p, struct seq_file *m) in show_numa_stats() argument
13593 ng = rcu_dereference(p->numa_group); in show_numa_stats()
13595 if (p->numa_faults) { in show_numa_stats()
13596 tsf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 0)]; in show_numa_stats()
13597 tpf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 1)]; in show_numa_stats()