Lines Matching refs:p
189 static inline int __task_prio(const struct task_struct *p) in __task_prio() argument
191 if (p->sched_class == &stop_sched_class) /* trumps deadline */ in __task_prio()
194 if (p->dl_server) in __task_prio()
197 if (rt_or_dl_prio(p->prio)) in __task_prio()
198 return p->prio; /* [-1, 99] */ in __task_prio()
200 if (p->sched_class == &idle_sched_class) in __task_prio()
203 if (task_on_scx(p)) in __task_prio()
284 const struct task_struct *p = __node_2_sc(node); in rb_sched_core_cmp() local
287 if (cookie < p->core_cookie) in rb_sched_core_cmp()
290 if (cookie > p->core_cookie) in rb_sched_core_cmp()
296 void sched_core_enqueue(struct rq *rq, struct task_struct *p) in sched_core_enqueue() argument
298 if (p->se.sched_delayed) in sched_core_enqueue()
303 if (!p->core_cookie) in sched_core_enqueue()
306 rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less); in sched_core_enqueue()
309 void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) in sched_core_dequeue() argument
311 if (p->se.sched_delayed) in sched_core_dequeue()
316 if (sched_core_enqueued(p)) { in sched_core_dequeue()
317 rb_erase(&p->core_node, &rq->core_tree); in sched_core_dequeue()
318 RB_CLEAR_NODE(&p->core_node); in sched_core_dequeue()
331 static int sched_task_is_throttled(struct task_struct *p, int cpu) in sched_task_is_throttled() argument
333 if (p->sched_class->task_is_throttled) in sched_task_is_throttled()
334 return p->sched_class->task_is_throttled(p, cpu); in sched_task_is_throttled()
339 static struct task_struct *sched_core_next(struct task_struct *p, unsigned long cookie) in sched_core_next() argument
341 struct rb_node *node = &p->core_node; in sched_core_next()
342 int cpu = task_cpu(p); in sched_core_next()
349 p = __node_2_sc(node); in sched_core_next()
350 if (p->core_cookie != cookie) in sched_core_next()
353 } while (sched_task_is_throttled(p, cpu)); in sched_core_next()
355 return p; in sched_core_next()
364 struct task_struct *p; in sched_core_find() local
371 p = __node_2_sc(node); in sched_core_find()
372 if (!sched_task_is_throttled(p, rq->cpu)) in sched_core_find()
373 return p; in sched_core_find()
375 return sched_core_next(p, cookie); in sched_core_find()
516 static inline void sched_core_enqueue(struct rq *rq, struct task_struct *p) { } in sched_core_enqueue() argument
518 sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) { } in sched_core_dequeue() argument
703 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) in __task_rq_lock() argument
708 lockdep_assert_held(&p->pi_lock); in __task_rq_lock()
711 rq = task_rq(p); in __task_rq_lock()
713 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { in __task_rq_lock()
719 while (unlikely(task_on_rq_migrating(p))) in __task_rq_lock()
727 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) in task_rq_lock() argument
728 __acquires(p->pi_lock) in task_rq_lock()
734 raw_spin_lock_irqsave(&p->pi_lock, rf->flags); in task_rq_lock()
735 rq = task_rq(p); in task_rq_lock()
754 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { in task_rq_lock()
759 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); in task_rq_lock()
761 while (unlikely(task_on_rq_migrating(p))) in task_rq_lock()
974 static bool set_nr_if_polling(struct task_struct *p) in set_nr_if_polling() argument
976 struct thread_info *ti = task_thread_info(p); in set_nr_if_polling()
996 static inline bool set_nr_if_polling(struct task_struct *p) in set_nr_if_polling() argument
1318 static inline bool __need_bw_check(struct rq *rq, struct task_struct *p) in __need_bw_check() argument
1323 if (p->sched_class != &fair_sched_class) in __need_bw_check()
1326 if (!task_on_rq_queued(p)) in __need_bw_check()
1430 void set_load_weight(struct task_struct *p, bool update_load) in set_load_weight() argument
1432 int prio = p->static_prio - MAX_RT_PRIO; in set_load_weight()
1435 if (task_has_idle_policy(p)) { in set_load_weight()
1447 if (update_load && p->sched_class->reweight_task) in set_load_weight()
1448 p->sched_class->reweight_task(task_rq(p), p, &lw); in set_load_weight()
1450 p->se.load = lw; in set_load_weight()
1560 static void __uclamp_update_util_min_rt_default(struct task_struct *p) in __uclamp_update_util_min_rt_default() argument
1565 lockdep_assert_held(&p->pi_lock); in __uclamp_update_util_min_rt_default()
1567 uc_se = &p->uclamp_req[UCLAMP_MIN]; in __uclamp_update_util_min_rt_default()
1577 static void uclamp_update_util_min_rt_default(struct task_struct *p) in uclamp_update_util_min_rt_default() argument
1579 if (!rt_task(p)) in uclamp_update_util_min_rt_default()
1583 guard(task_rq_lock)(p); in uclamp_update_util_min_rt_default()
1584 __uclamp_update_util_min_rt_default(p); in uclamp_update_util_min_rt_default()
1588 uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id) in uclamp_tg_restrict() argument
1591 struct uclamp_se uc_req = p->uclamp_req[clamp_id]; in uclamp_tg_restrict()
1599 if (task_group_is_autogroup(task_group(p))) in uclamp_tg_restrict()
1601 if (task_group(p) == &root_task_group) in uclamp_tg_restrict()
1604 tg_min = task_group(p)->uclamp[UCLAMP_MIN].value; in uclamp_tg_restrict()
1605 tg_max = task_group(p)->uclamp[UCLAMP_MAX].value; in uclamp_tg_restrict()
1623 uclamp_eff_get(struct task_struct *p, enum uclamp_id clamp_id) in uclamp_eff_get() argument
1625 struct uclamp_se uc_req = uclamp_tg_restrict(p, clamp_id); in uclamp_eff_get()
1635 unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id) in uclamp_eff_value() argument
1640 if (p->uclamp[clamp_id].active) in uclamp_eff_value()
1641 return (unsigned long)p->uclamp[clamp_id].value; in uclamp_eff_value()
1643 uc_eff = uclamp_eff_get(p, clamp_id); in uclamp_eff_value()
1658 static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p, in uclamp_rq_inc_id() argument
1662 struct uclamp_se *uc_se = &p->uclamp[clamp_id]; in uclamp_rq_inc_id()
1668 p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id); in uclamp_rq_inc_id()
1696 static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p, in uclamp_rq_dec_id() argument
1700 struct uclamp_se *uc_se = &p->uclamp[clamp_id]; in uclamp_rq_dec_id()
1762 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p, int flags) in uclamp_rq_inc() argument
1775 if (unlikely(!p->sched_class->uclamp_enabled)) in uclamp_rq_inc()
1779 if (p->se.sched_delayed && !(flags & ENQUEUE_DELAYED)) in uclamp_rq_inc()
1783 uclamp_rq_inc_id(rq, p, clamp_id); in uclamp_rq_inc()
1790 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) in uclamp_rq_dec() argument
1803 if (unlikely(!p->sched_class->uclamp_enabled)) in uclamp_rq_dec()
1806 if (p->se.sched_delayed) in uclamp_rq_dec()
1810 uclamp_rq_dec_id(rq, p, clamp_id); in uclamp_rq_dec()
1813 static inline void uclamp_rq_reinc_id(struct rq *rq, struct task_struct *p, in uclamp_rq_reinc_id() argument
1816 if (!p->uclamp[clamp_id].active) in uclamp_rq_reinc_id()
1819 uclamp_rq_dec_id(rq, p, clamp_id); in uclamp_rq_reinc_id()
1820 uclamp_rq_inc_id(rq, p, clamp_id); in uclamp_rq_reinc_id()
1831 uclamp_update_active(struct task_struct *p) in uclamp_update_active() argument
1845 rq = task_rq_lock(p, &rf); in uclamp_update_active()
1854 uclamp_rq_reinc_id(rq, p, clamp_id); in uclamp_update_active()
1856 task_rq_unlock(rq, p, &rf); in uclamp_update_active()
1864 struct task_struct *p; in uclamp_update_active_tasks() local
1867 while ((p = css_task_iter_next(&it))) in uclamp_update_active_tasks()
1868 uclamp_update_active(p); in uclamp_update_active_tasks()
1895 struct task_struct *g, *p; in uclamp_sync_util_min_rt_default() local
1915 for_each_process_thread(g, p) in uclamp_sync_util_min_rt_default()
1916 uclamp_update_util_min_rt_default(p); in uclamp_sync_util_min_rt_default()
1982 static void uclamp_fork(struct task_struct *p) in uclamp_fork() argument
1991 p->uclamp[clamp_id].active = false; in uclamp_fork()
1993 if (likely(!p->sched_reset_on_fork)) in uclamp_fork()
1997 uclamp_se_set(&p->uclamp_req[clamp_id], in uclamp_fork()
2002 static void uclamp_post_fork(struct task_struct *p) in uclamp_post_fork() argument
2004 uclamp_update_util_min_rt_default(p); in uclamp_post_fork()
2047 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p, int flags) { } in uclamp_rq_inc() argument
2048 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { } in uclamp_rq_dec() argument
2049 static inline void uclamp_fork(struct task_struct *p) { } in uclamp_fork() argument
2050 static inline void uclamp_post_fork(struct task_struct *p) { } in uclamp_post_fork() argument
2054 bool sched_task_on_rq(struct task_struct *p) in sched_task_on_rq() argument
2056 return task_on_rq_queued(p); in sched_task_on_rq()
2059 unsigned long get_wchan(struct task_struct *p) in get_wchan() argument
2064 if (!p || p == current) in get_wchan()
2068 raw_spin_lock_irq(&p->pi_lock); in get_wchan()
2069 state = READ_ONCE(p->__state); in get_wchan()
2071 if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq) in get_wchan()
2072 ip = __get_wchan(p); in get_wchan()
2073 raw_spin_unlock_irq(&p->pi_lock); in get_wchan()
2078 void enqueue_task(struct rq *rq, struct task_struct *p, int flags) in enqueue_task() argument
2088 uclamp_rq_inc(rq, p, flags); in enqueue_task()
2090 p->sched_class->enqueue_task(rq, p, flags); in enqueue_task()
2092 psi_enqueue(p, flags); in enqueue_task()
2095 sched_info_enqueue(rq, p); in enqueue_task()
2098 sched_core_enqueue(rq, p); in enqueue_task()
2104 inline bool dequeue_task(struct rq *rq, struct task_struct *p, int flags) in dequeue_task() argument
2107 sched_core_dequeue(rq, p, flags); in dequeue_task()
2113 sched_info_dequeue(rq, p); in dequeue_task()
2115 psi_dequeue(p, flags); in dequeue_task()
2121 uclamp_rq_dec(rq, p); in dequeue_task()
2122 return p->sched_class->dequeue_task(rq, p, flags); in dequeue_task()
2125 void activate_task(struct rq *rq, struct task_struct *p, int flags) in activate_task() argument
2127 if (task_on_rq_migrating(p)) in activate_task()
2130 sched_mm_cid_migrate_to(rq, p); in activate_task()
2132 enqueue_task(rq, p, flags); in activate_task()
2134 WRITE_ONCE(p->on_rq, TASK_ON_RQ_QUEUED); in activate_task()
2135 ASSERT_EXCLUSIVE_WRITER(p->on_rq); in activate_task()
2138 void deactivate_task(struct rq *rq, struct task_struct *p, int flags) in deactivate_task() argument
2142 WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING); in deactivate_task()
2143 ASSERT_EXCLUSIVE_WRITER(p->on_rq); in deactivate_task()
2150 dequeue_task(rq, p, flags); in deactivate_task()
2153 static void block_task(struct rq *rq, struct task_struct *p, int flags) in block_task() argument
2155 if (dequeue_task(rq, p, DEQUEUE_SLEEP | flags)) in block_task()
2156 __block_task(rq, p); in block_task()
2165 inline int task_curr(const struct task_struct *p) in task_curr() argument
2167 return cpu_curr(task_cpu(p)) == p; in task_curr()
2174 void check_class_changing(struct rq *rq, struct task_struct *p, in check_class_changing() argument
2177 if (prev_class != p->sched_class && p->sched_class->switching_to) in check_class_changing()
2178 p->sched_class->switching_to(rq, p); in check_class_changing()
2188 void check_class_changed(struct rq *rq, struct task_struct *p, in check_class_changed() argument
2192 if (prev_class != p->sched_class) { in check_class_changed()
2194 prev_class->switched_from(rq, p); in check_class_changed()
2196 p->sched_class->switched_to(rq, p); in check_class_changed()
2197 } else if (oldprio != p->prio || dl_task(p)) in check_class_changed()
2198 p->sched_class->prio_changed(rq, p, oldprio); in check_class_changed()
2201 void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags) in wakeup_preempt() argument
2205 if (p->sched_class == donor->sched_class) in wakeup_preempt()
2206 donor->sched_class->wakeup_preempt(rq, p, flags); in wakeup_preempt()
2207 else if (sched_class_above(p->sched_class, donor->sched_class)) in wakeup_preempt()
2219 int __task_state_match(struct task_struct *p, unsigned int state) in __task_state_match() argument
2221 if (READ_ONCE(p->__state) & state) in __task_state_match()
2224 if (READ_ONCE(p->saved_state) & state) in __task_state_match()
2231 int task_state_match(struct task_struct *p, unsigned int state) in task_state_match() argument
2237 guard(raw_spinlock_irq)(&p->pi_lock); in task_state_match()
2238 return __task_state_match(p, state); in task_state_match()
2257 unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state) in wait_task_inactive() argument
2271 rq = task_rq(p); in wait_task_inactive()
2284 while (task_on_cpu(rq, p)) { in wait_task_inactive()
2285 if (!task_state_match(p, match_state)) in wait_task_inactive()
2295 rq = task_rq_lock(p, &rf); in wait_task_inactive()
2300 if (p->se.sched_delayed) in wait_task_inactive()
2301 dequeue_task(rq, p, DEQUEUE_SLEEP | DEQUEUE_DELAYED); in wait_task_inactive()
2302 trace_sched_wait_task(p); in wait_task_inactive()
2303 running = task_on_cpu(rq, p); in wait_task_inactive()
2304 queued = task_on_rq_queued(p); in wait_task_inactive()
2306 if ((match = __task_state_match(p, match_state))) { in wait_task_inactive()
2313 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ in wait_task_inactive()
2315 task_rq_unlock(rq, p, &rf); in wait_task_inactive()
2363 __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx);
2365 static void migrate_disable_switch(struct rq *rq, struct task_struct *p) in migrate_disable_switch() argument
2372 if (likely(!p->migration_disabled)) in migrate_disable_switch()
2375 if (p->cpus_ptr != &p->cpus_mask) in migrate_disable_switch()
2381 __do_set_cpus_allowed(p, &ac); in migrate_disable_switch()
2386 struct task_struct *p = current; in migrate_disable() local
2388 if (p->migration_disabled) { in migrate_disable()
2393 WARN_ON_ONCE((s16)p->migration_disabled < 0); in migrate_disable()
2395 p->migration_disabled++; in migrate_disable()
2401 p->migration_disabled = 1; in migrate_disable()
2407 struct task_struct *p = current; in migrate_enable() local
2409 .new_mask = &p->cpus_mask, in migrate_enable()
2418 if (WARN_ON_ONCE((s16)p->migration_disabled <= 0)) in migrate_enable()
2422 if (p->migration_disabled > 1) { in migrate_enable()
2423 p->migration_disabled--; in migrate_enable()
2432 if (p->cpus_ptr != &p->cpus_mask) in migrate_enable()
2433 __set_cpus_allowed_ptr(p, &ac); in migrate_enable()
2440 p->migration_disabled = 0; in migrate_enable()
2454 static inline bool is_cpu_allowed(struct task_struct *p, int cpu) in is_cpu_allowed() argument
2457 if (!task_allowed_on_cpu(p, cpu)) in is_cpu_allowed()
2461 if (is_migration_disabled(p)) in is_cpu_allowed()
2465 if (!(p->flags & PF_KTHREAD)) in is_cpu_allowed()
2469 if (kthread_is_per_cpu(p)) in is_cpu_allowed()
2500 struct task_struct *p, int new_cpu) in move_queued_task() argument
2504 deactivate_task(rq, p, DEQUEUE_NOCLOCK); in move_queued_task()
2505 set_task_cpu(p, new_cpu); in move_queued_task()
2511 WARN_ON_ONCE(task_cpu(p) != new_cpu); in move_queued_task()
2512 activate_task(rq, p, 0); in move_queued_task()
2513 wakeup_preempt(rq, p, 0); in move_queued_task()
2546 struct task_struct *p, int dest_cpu) in __migrate_task() argument
2549 if (!is_cpu_allowed(p, dest_cpu)) in __migrate_task()
2552 rq = move_queued_task(rq, rf, p, dest_cpu); in __migrate_task()
2566 struct task_struct *p = arg->task; in migration_cpu_stop() local
2583 raw_spin_lock(&p->pi_lock); in migration_cpu_stop()
2590 WARN_ON_ONCE(pending && pending != p->migration_pending); in migration_cpu_stop()
2597 if (task_rq(p) == rq) { in migration_cpu_stop()
2598 if (is_migration_disabled(p)) in migration_cpu_stop()
2602 p->migration_pending = NULL; in migration_cpu_stop()
2605 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) in migration_cpu_stop()
2609 if (task_on_rq_queued(p)) { in migration_cpu_stop()
2611 rq = __migrate_task(rq, &rf, p, arg->dest_cpu); in migration_cpu_stop()
2613 p->wake_cpu = arg->dest_cpu; in migration_cpu_stop()
2638 if (cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) { in migration_cpu_stop()
2639 p->migration_pending = NULL; in migration_cpu_stop()
2651 task_rq_unlock(rq, p, &rf); in migration_cpu_stop()
2652 stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop, in migration_cpu_stop()
2660 task_rq_unlock(rq, p, &rf); in migration_cpu_stop()
2671 struct task_struct *p = arg; in push_cpu_stop() local
2673 raw_spin_lock_irq(&p->pi_lock); in push_cpu_stop()
2676 if (task_rq(p) != rq) in push_cpu_stop()
2679 if (is_migration_disabled(p)) { in push_cpu_stop()
2680 p->migration_flags |= MDF_PUSH; in push_cpu_stop()
2684 p->migration_flags &= ~MDF_PUSH; in push_cpu_stop()
2686 if (p->sched_class->find_lock_rq) in push_cpu_stop()
2687 lowest_rq = p->sched_class->find_lock_rq(p, rq); in push_cpu_stop()
2693 if (task_rq(p) == rq) { in push_cpu_stop()
2694 move_queued_task_locked(rq, lowest_rq, p); in push_cpu_stop()
2703 raw_spin_unlock_irq(&p->pi_lock); in push_cpu_stop()
2705 put_task_struct(p); in push_cpu_stop()
2713 void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx) in set_cpus_allowed_common() argument
2716 p->cpus_ptr = ctx->new_mask; in set_cpus_allowed_common()
2720 cpumask_copy(&p->cpus_mask, ctx->new_mask); in set_cpus_allowed_common()
2721 p->nr_cpus_allowed = cpumask_weight(ctx->new_mask); in set_cpus_allowed_common()
2727 swap(p->user_cpus_ptr, ctx->user_mask); in set_cpus_allowed_common()
2731 __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx) in __do_set_cpus_allowed() argument
2733 struct rq *rq = task_rq(p); in __do_set_cpus_allowed()
2749 WARN_ON_ONCE(!p->on_cpu); in __do_set_cpus_allowed()
2751 lockdep_assert_held(&p->pi_lock); in __do_set_cpus_allowed()
2753 queued = task_on_rq_queued(p); in __do_set_cpus_allowed()
2754 running = task_current_donor(rq, p); in __do_set_cpus_allowed()
2762 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); in __do_set_cpus_allowed()
2765 put_prev_task(rq, p); in __do_set_cpus_allowed()
2767 p->sched_class->set_cpus_allowed(p, ctx); in __do_set_cpus_allowed()
2768 mm_set_cpus_allowed(p->mm, ctx->new_mask); in __do_set_cpus_allowed()
2771 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); in __do_set_cpus_allowed()
2773 set_next_task(rq, p); in __do_set_cpus_allowed()
2780 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) in do_set_cpus_allowed() argument
2792 __do_set_cpus_allowed(p, &ac); in do_set_cpus_allowed()
2845 static inline struct cpumask *clear_user_cpus_ptr(struct task_struct *p) in clear_user_cpus_ptr() argument
2849 swap(p->user_cpus_ptr, user_mask); in clear_user_cpus_ptr()
2854 void release_user_cpus_ptr(struct task_struct *p) in release_user_cpus_ptr() argument
2856 kfree(clear_user_cpus_ptr(p)); in release_user_cpus_ptr()
2935 static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf, in affine_move_task() argument
2938 __releases(p->pi_lock) in affine_move_task()
2950 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask) || in affine_move_task()
2951 (task_current_donor(rq, p) && !task_current(rq, p))) { in affine_move_task()
2955 (p->migration_flags & MDF_PUSH) && !rq->push_busy) { in affine_move_task()
2957 push_task = get_task_struct(p); in affine_move_task()
2964 pending = p->migration_pending; in affine_move_task()
2966 p->migration_pending = NULL; in affine_move_task()
2971 task_rq_unlock(rq, p, rf); in affine_move_task()
2974 p, &rq->push_work); in affine_move_task()
2986 if (!p->migration_pending) { in affine_move_task()
2991 .task = p, in affine_move_task()
2996 p->migration_pending = &my_pending; in affine_move_task()
2998 pending = p->migration_pending; in affine_move_task()
3011 pending = p->migration_pending; in affine_move_task()
3025 task_rq_unlock(rq, p, rf); in affine_move_task()
3029 if (task_on_cpu(rq, p) || READ_ONCE(p->__state) == TASK_WAKING) { in affine_move_task()
3040 p->migration_flags &= ~MDF_PUSH; in affine_move_task()
3043 task_rq_unlock(rq, p, rf); in affine_move_task()
3054 if (!is_migration_disabled(p)) { in affine_move_task()
3055 if (task_on_rq_queued(p)) in affine_move_task()
3056 rq = move_queued_task(rq, rf, p, dest_cpu); in affine_move_task()
3059 p->migration_pending = NULL; in affine_move_task()
3063 task_rq_unlock(rq, p, rf); in affine_move_task()
3089 static int __set_cpus_allowed_ptr_locked(struct task_struct *p, in __set_cpus_allowed_ptr_locked() argument
3094 __releases(p->pi_lock) in __set_cpus_allowed_ptr_locked()
3096 const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p); in __set_cpus_allowed_ptr_locked()
3098 bool kthread = p->flags & PF_KTHREAD; in __set_cpus_allowed_ptr_locked()
3104 if (kthread || is_migration_disabled(p)) { in __set_cpus_allowed_ptr_locked()
3127 if ((ctx->flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) { in __set_cpus_allowed_ptr_locked()
3133 if (cpumask_equal(&p->cpus_mask, ctx->new_mask)) { in __set_cpus_allowed_ptr_locked()
3135 swap(p->user_cpus_ptr, ctx->user_mask); in __set_cpus_allowed_ptr_locked()
3139 if (WARN_ON_ONCE(p == current && in __set_cpus_allowed_ptr_locked()
3140 is_migration_disabled(p) && in __set_cpus_allowed_ptr_locked()
3141 !cpumask_test_cpu(task_cpu(p), ctx->new_mask))) { in __set_cpus_allowed_ptr_locked()
3158 __do_set_cpus_allowed(p, ctx); in __set_cpus_allowed_ptr_locked()
3160 return affine_move_task(rq, p, rf, dest_cpu, ctx->flags); in __set_cpus_allowed_ptr_locked()
3163 task_rq_unlock(rq, p, rf); in __set_cpus_allowed_ptr_locked()
3177 int __set_cpus_allowed_ptr(struct task_struct *p, struct affinity_context *ctx) in __set_cpus_allowed_ptr() argument
3182 rq = task_rq_lock(p, &rf); in __set_cpus_allowed_ptr()
3187 if (p->user_cpus_ptr && in __set_cpus_allowed_ptr()
3189 cpumask_and(rq->scratch_mask, ctx->new_mask, p->user_cpus_ptr)) in __set_cpus_allowed_ptr()
3192 return __set_cpus_allowed_ptr_locked(p, ctx, rq, &rf); in __set_cpus_allowed_ptr()
3195 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) in set_cpus_allowed_ptr() argument
3202 return __set_cpus_allowed_ptr(p, &ac); in set_cpus_allowed_ptr()
3215 static int restrict_cpus_allowed_ptr(struct task_struct *p, in restrict_cpus_allowed_ptr() argument
3227 rq = task_rq_lock(p, &rf); in restrict_cpus_allowed_ptr()
3234 if (task_has_dl_policy(p) && dl_bandwidth_enabled()) { in restrict_cpus_allowed_ptr()
3239 if (!cpumask_and(new_mask, task_user_cpus(p), subset_mask)) { in restrict_cpus_allowed_ptr()
3244 return __set_cpus_allowed_ptr_locked(p, &ac, rq, &rf); in restrict_cpus_allowed_ptr()
3247 task_rq_unlock(rq, p, &rf); in restrict_cpus_allowed_ptr()
3257 void force_compatible_cpus_allowed_ptr(struct task_struct *p) in force_compatible_cpus_allowed_ptr() argument
3260 const struct cpumask *override_mask = task_cpu_possible_mask(p); in force_compatible_cpus_allowed_ptr()
3273 if (!restrict_cpus_allowed_ptr(p, new_mask, override_mask)) in force_compatible_cpus_allowed_ptr()
3280 cpuset_cpus_allowed(p, new_mask); in force_compatible_cpus_allowed_ptr()
3286 task_pid_nr(p), p->comm, in force_compatible_cpus_allowed_ptr()
3290 WARN_ON(set_cpus_allowed_ptr(p, override_mask)); in force_compatible_cpus_allowed_ptr()
3303 void relax_compatible_cpus_allowed_ptr(struct task_struct *p) in relax_compatible_cpus_allowed_ptr() argument
3306 .new_mask = task_user_cpus(p), in relax_compatible_cpus_allowed_ptr()
3315 ret = __sched_setaffinity(p, &ac); in relax_compatible_cpus_allowed_ptr()
3321 void set_task_cpu(struct task_struct *p, unsigned int new_cpu) in set_task_cpu() argument
3323 unsigned int state = READ_ONCE(p->__state); in set_task_cpu()
3329 WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq); in set_task_cpu()
3337 p->sched_class == &fair_sched_class && in set_task_cpu()
3338 (p->on_rq && !task_on_rq_migrating(p))); in set_task_cpu()
3351 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || in set_task_cpu()
3352 lockdep_is_held(__rq_lockp(task_rq(p))))); in set_task_cpu()
3359 WARN_ON_ONCE(is_migration_disabled(p)); in set_task_cpu()
3361 trace_sched_migrate_task(p, new_cpu); in set_task_cpu()
3363 if (task_cpu(p) != new_cpu) { in set_task_cpu()
3364 if (p->sched_class->migrate_task_rq) in set_task_cpu()
3365 p->sched_class->migrate_task_rq(p, new_cpu); in set_task_cpu()
3366 p->se.nr_migrations++; in set_task_cpu()
3367 rseq_migrate(p); in set_task_cpu()
3368 sched_mm_cid_migrate_from(p); in set_task_cpu()
3369 perf_event_task_migrate(p); in set_task_cpu()
3372 __set_task_cpu(p, new_cpu); in set_task_cpu()
3377 static void __migrate_swap_task(struct task_struct *p, int cpu) in __migrate_swap_task() argument
3379 if (task_on_rq_queued(p)) { in __migrate_swap_task()
3383 src_rq = task_rq(p); in __migrate_swap_task()
3389 move_queued_task_locked(src_rq, dst_rq, p); in __migrate_swap_task()
3390 wakeup_preempt(dst_rq, p, 0); in __migrate_swap_task()
3401 p->wake_cpu = cpu; in __migrate_swap_task()
3445 int migrate_swap(struct task_struct *cur, struct task_struct *p, in migrate_swap() argument
3454 .dst_task = p, in migrate_swap()
3474 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu); in migrate_swap()
3495 void kick_process(struct task_struct *p) in kick_process() argument
3498 int cpu = task_cpu(p); in kick_process()
3500 if ((cpu != smp_processor_id()) && task_curr(p)) in kick_process()
3527 static int select_fallback_rq(int cpu, struct task_struct *p) in select_fallback_rq() argument
3544 if (is_cpu_allowed(p, dest_cpu)) in select_fallback_rq()
3551 for_each_cpu(dest_cpu, p->cpus_ptr) { in select_fallback_rq()
3552 if (!is_cpu_allowed(p, dest_cpu)) in select_fallback_rq()
3561 if (cpuset_cpus_allowed_fallback(p)) { in select_fallback_rq()
3573 do_set_cpus_allowed(p, task_cpu_fallback_mask(p)); in select_fallback_rq()
3589 if (p->mm && printk_ratelimit()) { in select_fallback_rq()
3591 task_pid_nr(p), p->comm, cpu); in select_fallback_rq()
3602 int select_task_rq(struct task_struct *p, int cpu, int *wake_flags) in select_task_rq() argument
3604 lockdep_assert_held(&p->pi_lock); in select_task_rq()
3606 if (p->nr_cpus_allowed > 1 && !is_migration_disabled(p)) { in select_task_rq()
3607 cpu = p->sched_class->select_task_rq(p, cpu, *wake_flags); in select_task_rq()
3610 cpu = cpumask_any(p->cpus_ptr); in select_task_rq()
3623 if (unlikely(!is_cpu_allowed(p, cpu))) in select_task_rq()
3624 cpu = select_fallback_rq(task_cpu(p), p); in select_task_rq()
3675 ttwu_stat(struct task_struct *p, int cpu, int wake_flags) in ttwu_stat() argument
3686 __schedstat_inc(p->stats.nr_wakeups_local); in ttwu_stat()
3690 __schedstat_inc(p->stats.nr_wakeups_remote); in ttwu_stat()
3702 __schedstat_inc(p->stats.nr_wakeups_migrate); in ttwu_stat()
3705 __schedstat_inc(p->stats.nr_wakeups); in ttwu_stat()
3708 __schedstat_inc(p->stats.nr_wakeups_sync); in ttwu_stat()
3714 static inline void ttwu_do_wakeup(struct task_struct *p) in ttwu_do_wakeup() argument
3716 WRITE_ONCE(p->__state, TASK_RUNNING); in ttwu_do_wakeup()
3717 trace_sched_wakeup(p); in ttwu_do_wakeup()
3721 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, in ttwu_do_activate() argument
3728 if (p->sched_contributes_to_load) in ttwu_do_activate()
3736 if (p->in_iowait) { in ttwu_do_activate()
3737 delayacct_blkio_end(p); in ttwu_do_activate()
3738 atomic_dec(&task_rq(p)->nr_iowait); in ttwu_do_activate()
3741 activate_task(rq, p, en_flags); in ttwu_do_activate()
3742 wakeup_preempt(rq, p, wake_flags); in ttwu_do_activate()
3744 ttwu_do_wakeup(p); in ttwu_do_activate()
3746 if (p->sched_class->task_woken) { in ttwu_do_activate()
3752 p->sched_class->task_woken(rq, p); in ttwu_do_activate()
3794 static int ttwu_runnable(struct task_struct *p, int wake_flags) in ttwu_runnable() argument
3800 rq = __task_rq_lock(p, &rf); in ttwu_runnable()
3801 if (task_on_rq_queued(p)) { in ttwu_runnable()
3803 if (p->se.sched_delayed) in ttwu_runnable()
3804 enqueue_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_DELAYED); in ttwu_runnable()
3805 if (!task_on_cpu(rq, p)) { in ttwu_runnable()
3810 wakeup_preempt(rq, p, wake_flags); in ttwu_runnable()
3812 ttwu_do_wakeup(p); in ttwu_runnable()
3824 struct task_struct *p, *t; in sched_ttwu_pending() local
3833 llist_for_each_entry_safe(p, t, llist, wake_entry.llist) { in sched_ttwu_pending()
3834 if (WARN_ON_ONCE(p->on_cpu)) in sched_ttwu_pending()
3835 smp_cond_load_acquire(&p->on_cpu, !VAL); in sched_ttwu_pending()
3837 if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq))) in sched_ttwu_pending()
3838 set_task_cpu(p, cpu_of(rq)); in sched_ttwu_pending()
3840 ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf); in sched_ttwu_pending()
3879 static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) in __ttwu_queue_wakelist() argument
3883 p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED); in __ttwu_queue_wakelist()
3887 __smp_call_single_queue(cpu, &p->wake_entry.llist); in __ttwu_queue_wakelist()
3934 static inline bool ttwu_queue_cond(struct task_struct *p, int cpu) in ttwu_queue_cond() argument
3937 if (!scx_allow_ttwu_queue(p)) in ttwu_queue_cond()
3941 if (p->sched_class == &stop_sched_class) in ttwu_queue_cond()
3953 if (!cpumask_test_cpu(cpu, p->cpus_ptr)) in ttwu_queue_cond()
3983 static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) in ttwu_queue_wakelist() argument
3985 if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) { in ttwu_queue_wakelist()
3987 __ttwu_queue_wakelist(p, cpu, wake_flags); in ttwu_queue_wakelist()
3994 static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags) in ttwu_queue() argument
3999 if (ttwu_queue_wakelist(p, cpu, wake_flags)) in ttwu_queue()
4004 ttwu_do_activate(rq, p, wake_flags, &rf); in ttwu_queue()
4027 bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success) in ttwu_state_match() argument
4036 *success = !!(match = __task_state_match(p, state)); in ttwu_state_match()
4053 p->saved_state = TASK_RUNNING; in ttwu_state_match()
4178 int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) in try_to_wake_up() argument
4185 if (p == current) { in try_to_wake_up()
4201 WARN_ON_ONCE(p->se.sched_delayed); in try_to_wake_up()
4202 if (!ttwu_state_match(p, state, &success)) in try_to_wake_up()
4205 trace_sched_waking(p); in try_to_wake_up()
4206 ttwu_do_wakeup(p); in try_to_wake_up()
4216 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) { in try_to_wake_up()
4218 if (!ttwu_state_match(p, state, &success)) in try_to_wake_up()
4221 trace_sched_waking(p); in try_to_wake_up()
4246 if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags)) in try_to_wake_up()
4280 WRITE_ONCE(p->__state, TASK_WAKING); in try_to_wake_up()
4301 if (smp_load_acquire(&p->on_cpu) && in try_to_wake_up()
4302 ttwu_queue_wakelist(p, task_cpu(p), wake_flags)) in try_to_wake_up()
4314 smp_cond_load_acquire(&p->on_cpu, !VAL); in try_to_wake_up()
4316 cpu = select_task_rq(p, p->wake_cpu, &wake_flags); in try_to_wake_up()
4317 if (task_cpu(p) != cpu) { in try_to_wake_up()
4318 if (p->in_iowait) { in try_to_wake_up()
4319 delayacct_blkio_end(p); in try_to_wake_up()
4320 atomic_dec(&task_rq(p)->nr_iowait); in try_to_wake_up()
4324 psi_ttwu_dequeue(p); in try_to_wake_up()
4325 set_task_cpu(p, cpu); in try_to_wake_up()
4328 ttwu_queue(p, cpu, wake_flags); in try_to_wake_up()
4332 ttwu_stat(p, task_cpu(p), wake_flags); in try_to_wake_up()
4337 static bool __task_needs_rq_lock(struct task_struct *p) in __task_needs_rq_lock() argument
4339 unsigned int state = READ_ONCE(p->__state); in __task_needs_rq_lock()
4356 if (p->on_rq) in __task_needs_rq_lock()
4364 smp_cond_load_acquire(&p->on_cpu, !VAL); in __task_needs_rq_lock()
4384 int task_call_func(struct task_struct *p, task_call_f func, void *arg) in task_call_func() argument
4390 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); in task_call_func()
4392 if (__task_needs_rq_lock(p)) in task_call_func()
4393 rq = __task_rq_lock(p, &rf); in task_call_func()
4405 ret = func(p, arg); in task_call_func()
4410 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags); in task_call_func()
4457 int wake_up_process(struct task_struct *p) in wake_up_process() argument
4459 return try_to_wake_up(p, TASK_NORMAL, 0); in wake_up_process()
4463 int wake_up_state(struct task_struct *p, unsigned int state) in wake_up_state() argument
4465 return try_to_wake_up(p, state, 0); in wake_up_state()
4475 static void __sched_fork(unsigned long clone_flags, struct task_struct *p) in __sched_fork() argument
4477 p->on_rq = 0; in __sched_fork()
4479 p->se.on_rq = 0; in __sched_fork()
4480 p->se.exec_start = 0; in __sched_fork()
4481 p->se.sum_exec_runtime = 0; in __sched_fork()
4482 p->se.prev_sum_exec_runtime = 0; in __sched_fork()
4483 p->se.nr_migrations = 0; in __sched_fork()
4484 p->se.vruntime = 0; in __sched_fork()
4485 p->se.vlag = 0; in __sched_fork()
4486 INIT_LIST_HEAD(&p->se.group_node); in __sched_fork()
4489 WARN_ON_ONCE(p->se.sched_delayed); in __sched_fork()
4492 p->se.cfs_rq = NULL; in __sched_fork()
4497 memset(&p->stats, 0, sizeof(p->stats)); in __sched_fork()
4500 init_dl_entity(&p->dl); in __sched_fork()
4502 INIT_LIST_HEAD(&p->rt.run_list); in __sched_fork()
4503 p->rt.timeout = 0; in __sched_fork()
4504 p->rt.time_slice = sched_rr_timeslice; in __sched_fork()
4505 p->rt.on_rq = 0; in __sched_fork()
4506 p->rt.on_list = 0; in __sched_fork()
4509 init_scx_entity(&p->scx); in __sched_fork()
4513 INIT_HLIST_HEAD(&p->preempt_notifiers); in __sched_fork()
4517 p->capture_control = NULL; in __sched_fork()
4519 init_numa_balancing(clone_flags, p); in __sched_fork()
4520 p->wake_entry.u_flags = CSD_TYPE_TTWU; in __sched_fork()
4521 p->migration_pending = NULL; in __sched_fork()
4522 init_sched_mm_cid(p); in __sched_fork()
4710 int sched_fork(unsigned long clone_flags, struct task_struct *p) in sched_fork() argument
4712 __sched_fork(clone_flags, p); in sched_fork()
4718 p->__state = TASK_NEW; in sched_fork()
4723 p->prio = current->normal_prio; in sched_fork()
4725 uclamp_fork(p); in sched_fork()
4730 if (unlikely(p->sched_reset_on_fork)) { in sched_fork()
4731 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { in sched_fork()
4732 p->policy = SCHED_NORMAL; in sched_fork()
4733 p->static_prio = NICE_TO_PRIO(0); in sched_fork()
4734 p->rt_priority = 0; in sched_fork()
4735 } else if (PRIO_TO_NICE(p->static_prio) < 0) in sched_fork()
4736 p->static_prio = NICE_TO_PRIO(0); in sched_fork()
4738 p->prio = p->normal_prio = p->static_prio; in sched_fork()
4739 set_load_weight(p, false); in sched_fork()
4740 p->se.custom_slice = 0; in sched_fork()
4741 p->se.slice = sysctl_sched_base_slice; in sched_fork()
4747 p->sched_reset_on_fork = 0; in sched_fork()
4750 if (dl_prio(p->prio)) in sched_fork()
4753 scx_pre_fork(p); in sched_fork()
4755 if (rt_prio(p->prio)) { in sched_fork()
4756 p->sched_class = &rt_sched_class; in sched_fork()
4758 } else if (task_should_scx(p->policy)) { in sched_fork()
4759 p->sched_class = &ext_sched_class; in sched_fork()
4762 p->sched_class = &fair_sched_class; in sched_fork()
4765 init_entity_runnable_average(&p->se); in sched_fork()
4770 memset(&p->sched_info, 0, sizeof(p->sched_info)); in sched_fork()
4772 p->on_cpu = 0; in sched_fork()
4773 init_task_preempt_count(p); in sched_fork()
4774 plist_node_init(&p->pushable_tasks, MAX_PRIO); in sched_fork()
4775 RB_CLEAR_NODE(&p->pushable_dl_tasks); in sched_fork()
4780 int sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs) in sched_cgroup_fork() argument
4788 raw_spin_lock_irqsave(&p->pi_lock, flags); in sched_cgroup_fork()
4794 tg = autogroup_task_group(p, tg); in sched_cgroup_fork()
4795 p->sched_task_group = tg; in sched_cgroup_fork()
4798 rseq_migrate(p); in sched_cgroup_fork()
4803 __set_task_cpu(p, smp_processor_id()); in sched_cgroup_fork()
4804 if (p->sched_class->task_fork) in sched_cgroup_fork()
4805 p->sched_class->task_fork(p); in sched_cgroup_fork()
4806 raw_spin_unlock_irqrestore(&p->pi_lock, flags); in sched_cgroup_fork()
4808 return scx_fork(p); in sched_cgroup_fork()
4811 void sched_cancel_fork(struct task_struct *p) in sched_cancel_fork() argument
4813 scx_cancel_fork(p); in sched_cancel_fork()
4816 void sched_post_fork(struct task_struct *p) in sched_post_fork() argument
4818 uclamp_post_fork(p); in sched_post_fork()
4819 scx_post_fork(p); in sched_post_fork()
4845 void wake_up_new_task(struct task_struct *p) in wake_up_new_task() argument
4851 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); in wake_up_new_task()
4852 WRITE_ONCE(p->__state, TASK_RUNNING); in wake_up_new_task()
4861 p->recent_used_cpu = task_cpu(p); in wake_up_new_task()
4862 rseq_migrate(p); in wake_up_new_task()
4863 __set_task_cpu(p, select_task_rq(p, task_cpu(p), &wake_flags)); in wake_up_new_task()
4864 rq = __task_rq_lock(p, &rf); in wake_up_new_task()
4866 post_init_entity_util_avg(p); in wake_up_new_task()
4868 activate_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_INITIAL); in wake_up_new_task()
4869 trace_sched_wakeup_new(p); in wake_up_new_task()
4870 wakeup_preempt(rq, p, wake_flags); in wake_up_new_task()
4871 if (p->sched_class->task_woken) { in wake_up_new_task()
4877 p->sched_class->task_woken(rq, p); in wake_up_new_task()
4880 task_rq_unlock(rq, p, &rf); in wake_up_new_task()
5472 struct task_struct *p = current; in sched_exec() local
5476 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) { in sched_exec()
5477 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC); in sched_exec()
5484 arg = (struct migration_arg){ p, dest_cpu }; in sched_exec()
5486 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); in sched_exec()
5501 static inline void prefetch_curr_exec_start(struct task_struct *p) in prefetch_curr_exec_start() argument
5504 struct sched_entity *curr = p->se.cfs_rq->curr; in prefetch_curr_exec_start()
5506 struct sched_entity *curr = task_rq(p)->cfs.curr; in prefetch_curr_exec_start()
5517 unsigned long long task_sched_runtime(struct task_struct *p) in task_sched_runtime() argument
5535 if (!p->on_cpu || !task_on_rq_queued(p)) in task_sched_runtime()
5536 return p->se.sum_exec_runtime; in task_sched_runtime()
5539 rq = task_rq_lock(p, &rf); in task_sched_runtime()
5545 if (task_current_donor(rq, p) && task_on_rq_queued(p)) { in task_sched_runtime()
5546 prefetch_curr_exec_start(p); in task_sched_runtime()
5548 p->sched_class->update_curr(rq); in task_sched_runtime()
5550 ns = p->se.sum_exec_runtime; in task_sched_runtime()
5551 task_rq_unlock(rq, p, &rf); in task_sched_runtime()
5874 static inline unsigned long get_preempt_disable_ip(struct task_struct *p) in get_preempt_disable_ip() argument
5877 return p->preempt_disable_ip; in get_preempt_disable_ip()
5985 struct task_struct *p; in __pick_next_task() local
6001 p = pick_next_task_fair(rq, prev, rf); in __pick_next_task()
6002 if (unlikely(p == RETRY_TASK)) in __pick_next_task()
6006 if (!p) { in __pick_next_task()
6007 p = pick_task_idle(rq); in __pick_next_task()
6008 put_prev_set_next_task(rq, prev, p); in __pick_next_task()
6011 return p; in __pick_next_task()
6019 p = class->pick_next_task(rq, prev); in __pick_next_task()
6020 if (p) in __pick_next_task()
6021 return p; in __pick_next_task()
6023 p = class->pick_task(rq); in __pick_next_task()
6024 if (p) { in __pick_next_task()
6025 put_prev_set_next_task(rq, prev, p); in __pick_next_task()
6026 return p; in __pick_next_task()
6056 struct task_struct *p; in pick_task() local
6061 p = class->pick_task(rq); in pick_task()
6062 if (p) in pick_task()
6063 return p; in pick_task()
6069 extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi);
6076 struct task_struct *next, *p, *max = NULL; in pick_next_task() local
6192 rq_i->core_pick = p = pick_task(rq_i); in pick_next_task()
6195 if (!max || prio_less(max, p, fi_before)) in pick_next_task()
6196 max = p; in pick_next_task()
6207 p = rq_i->core_pick; in pick_next_task()
6209 if (!cookie_equals(p, cookie)) { in pick_next_task()
6210 p = NULL; in pick_next_task()
6212 p = sched_core_find(rq_i, cookie); in pick_next_task()
6213 if (!p) in pick_next_task()
6214 p = idle_sched_class.pick_task(rq_i); in pick_next_task()
6217 rq_i->core_pick = p; in pick_next_task()
6220 if (p == rq_i->idle) { in pick_next_task()
6306 struct task_struct *p; in try_steal_cookie() local
6320 p = sched_core_find(src, cookie); in try_steal_cookie()
6321 if (!p) in try_steal_cookie()
6325 if (p == src->core_pick || p == src->curr) in try_steal_cookie()
6328 if (!is_cpu_allowed(p, this)) in try_steal_cookie()
6331 if (p->core_occupation > dst->idle->core_occupation) in try_steal_cookie()
6339 if (sched_task_is_throttled(p, this)) in try_steal_cookie()
6342 move_queued_task_locked(src, dst, p); in try_steal_cookie()
6349 p = sched_core_next(p, cookie); in try_steal_cookie()
6350 } while (p); in try_steal_cookie()
6544 static bool try_to_block_task(struct rq *rq, struct task_struct *p, in try_to_block_task() argument
6550 if (signal_pending_state(task_state, p)) { in try_to_block_task()
6551 WRITE_ONCE(p->__state, TASK_RUNNING); in try_to_block_task()
6566 p->sched_contributes_to_load = in try_to_block_task()
6585 block_task(rq, p, flags); in try_to_block_task()
6651 struct task_struct *p; in find_proxy_task() local
6655 for (p = donor; task_is_blocked(p); p = owner) { in find_proxy_task()
6656 mutex = p->blocked_on; in find_proxy_task()
6667 if (mutex != __get_task_blocked_on(p)) { in find_proxy_task()
6679 __clear_task_blocked_on(p, mutex); in find_proxy_task()
6680 return p; in find_proxy_task()
6716 if (owner == p) { in find_proxy_task()
7359 void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) in rt_mutex_setprio() argument
7368 prio = __rt_effective_prio(pi_task, p->normal_prio); in rt_mutex_setprio()
7373 if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio)) in rt_mutex_setprio()
7376 rq = __task_rq_lock(p, &rf); in rt_mutex_setprio()
7388 p->pi_top_task = pi_task; in rt_mutex_setprio()
7393 if (prio == p->prio && !dl_prio(prio)) in rt_mutex_setprio()
7408 if (unlikely(p == rq->idle)) { in rt_mutex_setprio()
7409 WARN_ON(p != rq->curr); in rt_mutex_setprio()
7410 WARN_ON(p->pi_blocked_on); in rt_mutex_setprio()
7414 trace_sched_pi_setprio(p, pi_task); in rt_mutex_setprio()
7415 oldprio = p->prio; in rt_mutex_setprio()
7420 prev_class = p->sched_class; in rt_mutex_setprio()
7421 next_class = __setscheduler_class(p->policy, prio); in rt_mutex_setprio()
7423 if (prev_class != next_class && p->se.sched_delayed) in rt_mutex_setprio()
7424 dequeue_task(rq, p, DEQUEUE_SLEEP | DEQUEUE_DELAYED | DEQUEUE_NOCLOCK); in rt_mutex_setprio()
7426 queued = task_on_rq_queued(p); in rt_mutex_setprio()
7427 running = task_current_donor(rq, p); in rt_mutex_setprio()
7429 dequeue_task(rq, p, queue_flag); in rt_mutex_setprio()
7431 put_prev_task(rq, p); in rt_mutex_setprio()
7443 if (!dl_prio(p->normal_prio) || in rt_mutex_setprio()
7445 dl_entity_preempt(&pi_task->dl, &p->dl))) { in rt_mutex_setprio()
7446 p->dl.pi_se = pi_task->dl.pi_se; in rt_mutex_setprio()
7449 p->dl.pi_se = &p->dl; in rt_mutex_setprio()
7453 p->dl.pi_se = &p->dl; in rt_mutex_setprio()
7458 p->dl.pi_se = &p->dl; in rt_mutex_setprio()
7460 p->rt.timeout = 0; in rt_mutex_setprio()
7463 p->sched_class = next_class; in rt_mutex_setprio()
7464 p->prio = prio; in rt_mutex_setprio()
7466 check_class_changing(rq, p, prev_class); in rt_mutex_setprio()
7469 enqueue_task(rq, p, queue_flag); in rt_mutex_setprio()
7471 set_next_task(rq, p); in rt_mutex_setprio()
7473 check_class_changed(rq, p, prev_class, oldprio); in rt_mutex_setprio()
7908 void sched_show_task(struct task_struct *p) in sched_show_task() argument
7913 if (!try_get_task_stack(p)) in sched_show_task()
7916 pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p)); in sched_show_task()
7918 if (task_is_running(p)) in sched_show_task()
7920 free = stack_not_used(p); in sched_show_task()
7923 if (pid_alive(p)) in sched_show_task()
7924 ppid = task_pid_nr(rcu_dereference(p->real_parent)); in sched_show_task()
7927 free, task_pid_nr(p), task_tgid_nr(p), in sched_show_task()
7928 ppid, p->flags, read_task_thread_flags(p)); in sched_show_task()
7930 print_worker_info(KERN_INFO, p); in sched_show_task()
7931 print_stop_info(KERN_INFO, p); in sched_show_task()
7932 print_scx_info(KERN_INFO, p); in sched_show_task()
7933 show_stack(p, NULL, KERN_INFO); in sched_show_task()
7934 put_task_stack(p); in sched_show_task()
7939 state_filter_match(unsigned long state_filter, struct task_struct *p) in state_filter_match() argument
7941 unsigned int state = READ_ONCE(p->__state); in state_filter_match()
7964 struct task_struct *g, *p; in show_state_filter() local
7967 for_each_process_thread(g, p) { in show_state_filter()
7977 if (state_filter_match(state_filter, p)) in show_state_filter()
7978 sched_show_task(p); in show_state_filter()
8073 int task_can_attach(struct task_struct *p) in task_can_attach() argument
8086 if (p->flags & PF_NO_SETAFFINITY) in task_can_attach()
8096 int migrate_task_to(struct task_struct *p, int target_cpu) in migrate_task_to() argument
8098 struct migration_arg arg = { p, target_cpu }; in migrate_task_to()
8099 int curr_cpu = task_cpu(p); in migrate_task_to()
8104 if (!cpumask_test_cpu(target_cpu, p->cpus_ptr)) in migrate_task_to()
8109 trace_sched_move_numa(p, curr_cpu, target_cpu); in migrate_task_to()
8117 void sched_setnuma(struct task_struct *p, int nid) in sched_setnuma() argument
8123 rq = task_rq_lock(p, &rf); in sched_setnuma()
8124 queued = task_on_rq_queued(p); in sched_setnuma()
8125 running = task_current_donor(rq, p); in sched_setnuma()
8128 dequeue_task(rq, p, DEQUEUE_SAVE); in sched_setnuma()
8130 put_prev_task(rq, p); in sched_setnuma()
8132 p->numa_preferred_nid = nid; in sched_setnuma()
8135 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); in sched_setnuma()
8137 set_next_task(rq, p); in sched_setnuma()
8138 task_rq_unlock(rq, p, &rf); in sched_setnuma()
8171 struct task_struct *p = arg; in __balance_push_cpu_stop() local
8176 raw_spin_lock_irq(&p->pi_lock); in __balance_push_cpu_stop()
8181 if (task_rq(p) == rq && task_on_rq_queued(p)) { in __balance_push_cpu_stop()
8182 cpu = select_fallback_rq(rq->cpu, p); in __balance_push_cpu_stop()
8183 rq = __migrate_task(rq, &rf, p, cpu); in __balance_push_cpu_stop()
8187 raw_spin_unlock_irq(&p->pi_lock); in __balance_push_cpu_stop()
8189 put_task_struct(p); in __balance_push_cpu_stop()
8580 struct task_struct *g, *p; in dump_rq_tasks() local
8586 for_each_process_thread(g, p) { in dump_rq_tasks()
8587 if (task_cpu(p) != cpu) in dump_rq_tasks()
8590 if (!task_on_rq_queued(p)) in dump_rq_tasks()
8593 printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm); in dump_rq_tasks()
9027 struct task_struct *g, *p; in normalize_rt_tasks() local
9033 for_each_process_thread(g, p) { in normalize_rt_tasks()
9037 if (p->flags & PF_KTHREAD) in normalize_rt_tasks()
9040 p->se.exec_start = 0; in normalize_rt_tasks()
9041 schedstat_set(p->stats.wait_start, 0); in normalize_rt_tasks()
9042 schedstat_set(p->stats.sleep_start, 0); in normalize_rt_tasks()
9043 schedstat_set(p->stats.block_start, 0); in normalize_rt_tasks()
9045 if (!rt_or_dl_task(p)) { in normalize_rt_tasks()
9050 if (task_nice(p) < 0) in normalize_rt_tasks()
9051 set_user_nice(p, 0); in normalize_rt_tasks()
9055 __sched_setscheduler(p, &attr, false, false); in normalize_rt_tasks()
10930 void sched_deq_and_put_task(struct task_struct *p, int queue_flags, in sched_deq_and_put_task() argument
10933 struct rq *rq = task_rq(p); in sched_deq_and_put_task()
10938 .p = p, in sched_deq_and_put_task()
10940 .queued = task_on_rq_queued(p), in sched_deq_and_put_task()
10941 .running = task_current(rq, p), in sched_deq_and_put_task()
10946 dequeue_task(rq, p, queue_flags | DEQUEUE_NOCLOCK); in sched_deq_and_put_task()
10948 put_prev_task(rq, p); in sched_deq_and_put_task()
10953 struct rq *rq = task_rq(ctx->p); in sched_enq_and_set_task()
10958 enqueue_task(rq, ctx->p, ctx->queue_flags | ENQUEUE_NOCLOCK); in sched_enq_and_set_task()
10960 set_next_task(rq, ctx->p); in sched_enq_and_set_task()