Lines Matching refs:p
221 static inline int task_has_idle_policy(struct task_struct *p) in task_has_idle_policy() argument
223 return idle_policy(p->policy); in task_has_idle_policy()
226 static inline int task_has_rt_policy(struct task_struct *p) in task_has_rt_policy() argument
228 return rt_policy(p->policy); in task_has_rt_policy()
231 static inline int task_has_dl_policy(struct task_struct *p) in task_has_dl_policy() argument
233 return dl_policy(p->policy); in task_has_dl_policy()
354 extern int sched_dl_overflow(struct task_struct *p, int policy, const struct sched_attr *attr);
355 extern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr);
356 extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr);
358 extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr);
391 struct task_struct *p);
562 extern bool cfs_task_bw_constrained(struct task_struct *p);
597 static inline bool cfs_task_bw_constrained(struct task_struct *p) { return false; } in cfs_task_bw_constrained() argument
1318 static inline bool is_migration_disabled(struct task_struct *p) in is_migration_disabled() argument
1320 return p->migration_disabled; in is_migration_disabled()
1327 #define task_rq(p) cpu_rq(task_cpu(p)) argument
1381 extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi);
1389 static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p) in sched_cpu_cookie_match() argument
1395 return rq->core->core_cookie == p->core_cookie; in sched_cpu_cookie_match()
1398 static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p) in sched_core_cookie_match() argument
1418 return idle_core || rq->core->core_cookie == p->core_cookie; in sched_core_cookie_match()
1422 struct task_struct *p, in sched_group_cookie_match() argument
1431 for_each_cpu_and(cpu, sched_group_span(group), p->cpus_ptr) { in sched_group_cookie_match()
1432 if (sched_core_cookie_match(cpu_rq(cpu), p)) in sched_group_cookie_match()
1438 static inline bool sched_core_enqueued(struct task_struct *p) in sched_core_enqueued() argument
1440 return !RB_EMPTY_NODE(&p->core_node); in sched_core_enqueued()
1443 extern void sched_core_enqueue(struct rq *rq, struct task_struct *p);
1444 extern void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags);
1471 static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p) in sched_cpu_cookie_match() argument
1476 static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p) in sched_core_cookie_match() argument
1482 struct task_struct *p, in sched_group_cookie_match() argument
1576 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) in task_cfs_rq() argument
1578 return p->se.cfs_rq; in task_cfs_rq()
1597 static inline struct cfs_rq *task_cfs_rq(const struct task_struct *p) in task_cfs_rq() argument
1599 return &task_rq(p)->cfs; in task_cfs_rq()
1604 const struct task_struct *p = task_of(se); in cfs_rq_of() local
1605 struct rq *rq = task_rq(p); in cfs_rq_of()
1794 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1798 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1799 __acquires(p->pi_lock)
1810 task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) in task_rq_unlock() argument
1812 __releases(p->pi_lock) in task_rq_unlock()
1816 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); in task_rq_unlock()
1934 extern void sched_setnuma(struct task_struct *p, int node);
1935 extern int migrate_task_to(struct task_struct *p, int cpu);
1936 extern int migrate_swap(struct task_struct *p, struct task_struct *t,
1938 extern void init_numa_balancing(unsigned long clone_flags, struct task_struct *p);
1943 init_numa_balancing(unsigned long clone_flags, struct task_struct *p) in init_numa_balancing() argument
1969 #define rcu_dereference_check_sched_domain(p) \ argument
1970 rcu_dereference_check((p), lockdep_is_held(&sched_domains_mutex))
2107 static inline const struct cpumask *task_user_cpus(struct task_struct *p) in task_user_cpus() argument
2109 if (!p->user_cpus_ptr) in task_user_cpus()
2111 return p->user_cpus_ptr; in task_user_cpus()
2129 static inline struct task_group *task_group(struct task_struct *p) in task_group() argument
2131 return p->sched_task_group; in task_group()
2135 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) in set_task_rq() argument
2138 struct task_group *tg = task_group(p); in set_task_rq()
2142 set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]); in set_task_rq()
2143 p->se.cfs_rq = tg->cfs_rq[cpu]; in set_task_rq()
2144 p->se.parent = tg->se[cpu]; in set_task_rq()
2145 p->se.depth = tg->se[cpu] ? tg->se[cpu]->depth + 1 : 0; in set_task_rq()
2156 p->rt.rt_rq = tg->rt_rq[cpu]; in set_task_rq()
2157 p->rt.parent = tg->rt_se[cpu]; in set_task_rq()
2163 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } in set_task_rq() argument
2165 static inline struct task_group *task_group(struct task_struct *p) in task_group() argument
2172 static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) in __set_task_cpu() argument
2174 set_task_rq(p, cpu); in __set_task_cpu()
2182 WRITE_ONCE(task_thread_info(p)->cpu, cpu); in __set_task_cpu()
2183 p->wake_cpu = cpu; in __set_task_cpu()
2246 static inline int task_current(struct rq *rq, struct task_struct *p) in task_current() argument
2248 return rq->curr == p; in task_current()
2257 static inline int task_current_donor(struct rq *rq, struct task_struct *p) in task_current_donor() argument
2259 return rq->donor == p; in task_current_donor()
2262 static inline bool task_is_blocked(struct task_struct *p) in task_is_blocked() argument
2267 return !!p->blocked_on; in task_is_blocked()
2270 static inline int task_on_cpu(struct rq *rq, struct task_struct *p) in task_on_cpu() argument
2272 return p->on_cpu; in task_on_cpu()
2275 static inline int task_on_rq_queued(struct task_struct *p) in task_on_rq_queued() argument
2277 return READ_ONCE(p->on_rq) == TASK_ON_RQ_QUEUED; in task_on_rq_queued()
2280 static inline int task_on_rq_migrating(struct task_struct *p) in task_on_rq_migrating() argument
2282 return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING; in task_on_rq_migrating()
2375 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
2376 bool (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
2378 bool (*yield_to_task)(struct rq *rq, struct task_struct *p);
2380 void (*wakeup_preempt)(struct rq *rq, struct task_struct *p, int flags);
2395 void (*put_prev_task)(struct rq *rq, struct task_struct *p, struct task_struct *next);
2396 void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first);
2398 int (*select_task_rq)(struct task_struct *p, int task_cpu, int flags);
2400 void (*migrate_task_rq)(struct task_struct *p, int new_cpu);
2404 void (*set_cpus_allowed)(struct task_struct *p, struct affinity_context *ctx);
2409 struct rq *(*find_lock_rq)(struct task_struct *p, struct rq *rq);
2411 void (*task_tick)(struct rq *rq, struct task_struct *p, int queued);
2412 void (*task_fork)(struct task_struct *p);
2413 void (*task_dead)(struct task_struct *p);
2434 void (*task_change_group)(struct task_struct *p);
2438 int (*task_is_throttled)(struct task_struct *p, int cpu);
2565 extern int __set_cpus_allowed_ptr(struct task_struct *p, struct affinity_context *ctx);
2566 extern void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx);
2568 static inline bool task_allowed_on_cpu(struct task_struct *p, int cpu) in task_allowed_on_cpu() argument
2571 if (!cpumask_test_cpu(cpu, p->cpus_ptr)) in task_allowed_on_cpu()
2575 if (!(p->flags & PF_KTHREAD) && !task_cpu_possible(cpu, p)) in task_allowed_on_cpu()
2593 struct task_struct *p = rq->donor; in get_push_task() local
2600 if (p->nr_cpus_allowed == 1) in get_push_task()
2603 if (p->migration_disabled) in get_push_task()
2607 return get_task_struct(p); in get_push_task()
2670 extern void post_init_entity_util_avg(struct task_struct *p);
2724 static inline void __block_task(struct rq *rq, struct task_struct *p) in __block_task() argument
2726 if (p->sched_contributes_to_load) in __block_task()
2729 if (p->in_iowait) { in __block_task()
2734 ASSERT_EXCLUSIVE_WRITER(p->on_rq); in __block_task()
2764 smp_store_release(&p->on_rq, 0); in __block_task()
2767 extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
2768 extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
2770 extern void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags);
3062 extern void show_numa_stats(struct task_struct *p, struct seq_file *m);
3243 static inline bool dl_task_fits_capacity(struct task_struct *p, int cpu) in dl_task_fits_capacity() argument
3247 return cap >= p->dl.dl_density >> (BW_SHIFT - SCHED_CAPACITY_SHIFT); in dl_task_fits_capacity()
3271 unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id);
3362 uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id) in uclamp_eff_value() argument
3432 extern void __setparam_fair(struct task_struct *p, const struct sched_attr *attr);
3487 static inline bool is_per_cpu_kthread(struct task_struct *p) in is_per_cpu_kthread() argument
3489 if (!(p->flags & PF_KTHREAD)) in is_per_cpu_kthread()
3492 if (p->nr_cpus_allowed != 1) in is_per_cpu_kthread()
3807 bool task_is_pushable(struct rq *rq, struct task_struct *p, int cpu) in task_is_pushable() argument
3809 if (!task_on_cpu(rq, p) && in task_is_pushable()
3810 cpumask_test_cpu(cpu, &p->cpus_mask)) in task_is_pushable()
3826 static inline int rt_effective_prio(struct task_struct *p, int prio) in rt_effective_prio() argument
3828 struct task_struct *pi_task = rt_mutex_get_top_task(p); in rt_effective_prio()
3835 static inline int rt_effective_prio(struct task_struct *p, int prio) in rt_effective_prio() argument
3842 extern int __sched_setscheduler(struct task_struct *p, const struct sched_attr *attr, bool user, bo…
3843 extern int __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx);
3845 extern void set_load_weight(struct task_struct *p, bool update_load);
3846 extern void enqueue_task(struct rq *rq, struct task_struct *p, int flags);
3847 extern bool dequeue_task(struct rq *rq, struct task_struct *p, int flags);
3849 extern void check_class_changing(struct rq *rq, struct task_struct *p,
3851 extern void check_class_changed(struct rq *rq, struct task_struct *p,
3864 struct task_struct *p; member
3870 void sched_deq_and_put_task(struct task_struct *p, int queue_flags,