Lines Matching refs:p

284 	s32 (*select_cpu)(struct task_struct *p, s32 prev_cpu, u64 wake_flags);
299 void (*enqueue)(struct task_struct *p, u64 enq_flags);
315 void (*dequeue)(struct task_struct *p, u64 deq_flags);
348 void (*tick)(struct task_struct *p);
375 void (*runnable)(struct task_struct *p, u64 enq_flags);
392 void (*running)(struct task_struct *p);
412 void (*stopping)(struct task_struct *p, bool runnable);
432 void (*quiescent)(struct task_struct *p, u64 deq_flags);
474 void (*set_weight)(struct task_struct *p, u32 weight);
483 void (*set_cpumask)(struct task_struct *p,
543 s32 (*init_task)(struct task_struct *p, struct scx_init_task_args *args);
553 void (*exit_task)(struct task_struct *p, struct scx_exit_task_args *args);
562 void (*enable)(struct task_struct *p);
572 void (*disable)(struct task_struct *p);
602 void (*dump_task)(struct scx_dump_ctx *ctx, struct task_struct *p);
641 s32 (*cgroup_prep_move)(struct task_struct *p,
652 void (*cgroup_move)(struct task_struct *p,
664 void (*cgroup_cancel_move)(struct task_struct *p,
1235 static struct scx_dispatch_q *find_global_dsq(struct task_struct *p) in find_global_dsq() argument
1239 return sch->global_dsqs[cpu_to_node(task_cpu(p))]; in find_global_dsq()
1398 struct task_struct *p) in scx_kf_allowed_on_arg_tasks() argument
1403 if (unlikely((p != current->scx.kf_tasks[0] && in scx_kf_allowed_on_arg_tasks()
1404 p != current->scx.kf_tasks[1]))) { in scx_kf_allowed_on_arg_tasks()
1450 #define nldsq_for_each_task(p, dsq) \ argument
1451 for ((p) = nldsq_next_task((dsq), NULL, false); (p); \
1452 (p) = nldsq_next_task((dsq), (p), false))
1616 struct task_struct *p; in scx_task_iter_next_locked() local
1620 while ((p = scx_task_iter_next(iter))) { in scx_task_iter_next_locked()
1646 if (p->sched_class != &idle_sched_class) in scx_task_iter_next_locked()
1649 if (!p) in scx_task_iter_next_locked()
1652 iter->rq = task_rq_lock(p, &iter->rf); in scx_task_iter_next_locked()
1653 iter->locked = p; in scx_task_iter_next_locked()
1655 return p; in scx_task_iter_next_locked()
1736 static void wait_ops_state(struct task_struct *p, unsigned long opss) in wait_ops_state() argument
1740 } while (atomic_long_read_acquire(&p->scx.ops_state) == opss); in wait_ops_state()
1875 static void touch_core_sched(struct rq *rq, struct task_struct *p) in touch_core_sched() argument
1888 p->scx.core_sched_at = sched_clock_cpu(cpu_of(rq)); in touch_core_sched()
1902 static void touch_core_sched_dispatch(struct rq *rq, struct task_struct *p) in touch_core_sched_dispatch() argument
1908 touch_core_sched(rq, p); in touch_core_sched_dispatch()
1945 static void refill_task_slice_dfl(struct task_struct *p) in refill_task_slice_dfl() argument
1947 p->scx.slice = SCX_SLICE_DFL; in refill_task_slice_dfl()
1952 struct task_struct *p, u64 enq_flags) in dispatch_enqueue() argument
1956 WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node)); in dispatch_enqueue()
1957 WARN_ON_ONCE((p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) || in dispatch_enqueue()
1958 !RB_EMPTY_NODE(&p->scx.dsq_priq)); in dispatch_enqueue()
1966 dsq = find_global_dsq(p); in dispatch_enqueue()
1997 p->scx.dsq_flags |= SCX_TASK_DSQ_ON_PRIQ; in dispatch_enqueue()
1998 rb_add(&p->scx.dsq_priq, &dsq->priq, scx_dsq_priq_less); in dispatch_enqueue()
2004 rbp = rb_prev(&p->scx.dsq_priq); in dispatch_enqueue()
2009 list_add(&p->scx.dsq_list.node, &prev->scx.dsq_list.node); in dispatch_enqueue()
2011 list_add(&p->scx.dsq_list.node, &dsq->list); in dispatch_enqueue()
2020 list_add(&p->scx.dsq_list.node, &dsq->list); in dispatch_enqueue()
2022 list_add_tail(&p->scx.dsq_list.node, &dsq->list); in dispatch_enqueue()
2027 p->scx.dsq_seq = dsq->seq; in dispatch_enqueue()
2030 p->scx.dsq = dsq; in dispatch_enqueue()
2038 p->scx.ddsp_dsq_id = SCX_DSQ_INVALID; in dispatch_enqueue()
2039 p->scx.ddsp_enq_flags = 0; in dispatch_enqueue()
2046 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE); in dispatch_enqueue()
2052 if ((enq_flags & SCX_ENQ_PREEMPT) && p != rq->curr && in dispatch_enqueue()
2066 static void task_unlink_from_dsq(struct task_struct *p, in task_unlink_from_dsq() argument
2069 WARN_ON_ONCE(list_empty(&p->scx.dsq_list.node)); in task_unlink_from_dsq()
2071 if (p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) { in task_unlink_from_dsq()
2072 rb_erase(&p->scx.dsq_priq, &dsq->priq); in task_unlink_from_dsq()
2073 RB_CLEAR_NODE(&p->scx.dsq_priq); in task_unlink_from_dsq()
2074 p->scx.dsq_flags &= ~SCX_TASK_DSQ_ON_PRIQ; in task_unlink_from_dsq()
2077 list_del_init(&p->scx.dsq_list.node); in task_unlink_from_dsq()
2081 static void dispatch_dequeue(struct rq *rq, struct task_struct *p) in dispatch_dequeue() argument
2083 struct scx_dispatch_q *dsq = p->scx.dsq; in dispatch_dequeue()
2091 if (unlikely(!list_empty(&p->scx.dsq_list.node))) in dispatch_dequeue()
2092 list_del_init(&p->scx.dsq_list.node); in dispatch_dequeue()
2100 if (p->scx.holding_cpu >= 0) in dispatch_dequeue()
2101 p->scx.holding_cpu = -1; in dispatch_dequeue()
2113 if (p->scx.holding_cpu < 0) { in dispatch_dequeue()
2115 task_unlink_from_dsq(p, dsq); in dispatch_dequeue()
2123 WARN_ON_ONCE(!list_empty(&p->scx.dsq_list.node)); in dispatch_dequeue()
2124 p->scx.holding_cpu = -1; in dispatch_dequeue()
2126 p->scx.dsq = NULL; in dispatch_dequeue()
2134 struct task_struct *p) in find_dsq_for_dispatch() argument
2145 return find_global_dsq(p); in find_dsq_for_dispatch()
2151 dsq = find_global_dsq(p); in find_dsq_for_dispatch()
2157 dsq_id, p->comm, p->pid); in find_dsq_for_dispatch()
2158 return find_global_dsq(p); in find_dsq_for_dispatch()
2165 struct task_struct *p, u64 dsq_id, in mark_direct_dispatch() argument
2176 if (unlikely(p != ddsp_task)) { in mark_direct_dispatch()
2179 p->comm, p->pid); in mark_direct_dispatch()
2183 p->comm, p->pid); in mark_direct_dispatch()
2187 WARN_ON_ONCE(p->scx.ddsp_dsq_id != SCX_DSQ_INVALID); in mark_direct_dispatch()
2188 WARN_ON_ONCE(p->scx.ddsp_enq_flags); in mark_direct_dispatch()
2190 p->scx.ddsp_dsq_id = dsq_id; in mark_direct_dispatch()
2191 p->scx.ddsp_enq_flags = enq_flags; in mark_direct_dispatch()
2194 static void direct_dispatch(struct scx_sched *sch, struct task_struct *p, in direct_dispatch() argument
2197 struct rq *rq = task_rq(p); in direct_dispatch()
2199 find_dsq_for_dispatch(sch, rq, p->scx.ddsp_dsq_id, p); in direct_dispatch()
2201 touch_core_sched_dispatch(rq, p); in direct_dispatch()
2203 p->scx.ddsp_enq_flags |= enq_flags; in direct_dispatch()
2214 opss = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_STATE_MASK; in direct_dispatch()
2224 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE); in direct_dispatch()
2228 p->comm, p->pid, opss); in direct_dispatch()
2229 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE); in direct_dispatch()
2233 WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node)); in direct_dispatch()
2234 list_add_tail(&p->scx.dsq_list.node, in direct_dispatch()
2240 dispatch_enqueue(sch, dsq, p, in direct_dispatch()
2241 p->scx.ddsp_enq_flags | SCX_ENQ_CLEAR_OPSS); in direct_dispatch()
2256 static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags, in do_enqueue_task() argument
2263 WARN_ON_ONCE(!(p->scx.flags & SCX_TASK_QUEUED)); in do_enqueue_task()
2282 if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID) in do_enqueue_task()
2287 unlikely(p->flags & PF_EXITING)) { in do_enqueue_task()
2294 is_migration_disabled(p)) { in do_enqueue_task()
2305 WARN_ON_ONCE(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE); in do_enqueue_task()
2306 atomic_long_set(&p->scx.ops_state, SCX_OPSS_QUEUEING | qseq); in do_enqueue_task()
2310 *ddsp_taskp = p; in do_enqueue_task()
2312 SCX_CALL_OP_TASK(sch, SCX_KF_ENQUEUE, enqueue, rq, p, enq_flags); in do_enqueue_task()
2315 if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID) in do_enqueue_task()
2322 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_QUEUED | qseq); in do_enqueue_task()
2326 direct_dispatch(sch, p, enq_flags); in do_enqueue_task()
2335 touch_core_sched(rq, p); in do_enqueue_task()
2336 refill_task_slice_dfl(p); in do_enqueue_task()
2338 dispatch_enqueue(sch, &rq->scx.local_dsq, p, enq_flags); in do_enqueue_task()
2342 touch_core_sched(rq, p); /* see the comment in local: */ in do_enqueue_task()
2343 refill_task_slice_dfl(p); in do_enqueue_task()
2344 dispatch_enqueue(sch, find_global_dsq(p), p, enq_flags); in do_enqueue_task()
2347 static bool task_runnable(const struct task_struct *p) in task_runnable() argument
2349 return !list_empty(&p->scx.runnable_node); in task_runnable()
2352 static void set_task_runnable(struct rq *rq, struct task_struct *p) in set_task_runnable() argument
2356 if (p->scx.flags & SCX_TASK_RESET_RUNNABLE_AT) { in set_task_runnable()
2357 p->scx.runnable_at = jiffies; in set_task_runnable()
2358 p->scx.flags &= ~SCX_TASK_RESET_RUNNABLE_AT; in set_task_runnable()
2365 list_add_tail(&p->scx.runnable_node, &rq->scx.runnable_list); in set_task_runnable()
2368 static void clr_task_runnable(struct task_struct *p, bool reset_runnable_at) in clr_task_runnable() argument
2370 list_del_init(&p->scx.runnable_node); in clr_task_runnable()
2372 p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT; in clr_task_runnable()
2375 static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int enq_flags) in enqueue_task_scx() argument
2378 int sticky_cpu = p->scx.sticky_cpu; in enqueue_task_scx()
2386 p->scx.sticky_cpu = -1; in enqueue_task_scx()
2394 if (unlikely(enq_flags & ENQUEUE_RESTORE) && task_current(rq, p)) in enqueue_task_scx()
2397 if (p->scx.flags & SCX_TASK_QUEUED) { in enqueue_task_scx()
2398 WARN_ON_ONCE(!task_runnable(p)); in enqueue_task_scx()
2402 set_task_runnable(rq, p); in enqueue_task_scx()
2403 p->scx.flags |= SCX_TASK_QUEUED; in enqueue_task_scx()
2407 if (SCX_HAS_OP(sch, runnable) && !task_on_rq_migrating(p)) in enqueue_task_scx()
2408 SCX_CALL_OP_TASK(sch, SCX_KF_REST, runnable, rq, p, enq_flags); in enqueue_task_scx()
2411 touch_core_sched(rq, p); in enqueue_task_scx()
2413 do_enqueue_task(rq, p, enq_flags, sticky_cpu); in enqueue_task_scx()
2418 unlikely(cpu_of(rq) != p->scx.selected_cpu)) in enqueue_task_scx()
2422 static void ops_dequeue(struct rq *rq, struct task_struct *p, u64 deq_flags) in ops_dequeue() argument
2428 clr_task_runnable(p, false); in ops_dequeue()
2431 opss = atomic_long_read_acquire(&p->scx.ops_state); in ops_dequeue()
2445 p, deq_flags); in ops_dequeue()
2447 if (atomic_long_try_cmpxchg(&p->scx.ops_state, &opss, in ops_dequeue()
2465 wait_ops_state(p, SCX_OPSS_DISPATCHING); in ops_dequeue()
2466 BUG_ON(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE); in ops_dequeue()
2471 static bool dequeue_task_scx(struct rq *rq, struct task_struct *p, int deq_flags) in dequeue_task_scx() argument
2475 if (!(p->scx.flags & SCX_TASK_QUEUED)) { in dequeue_task_scx()
2476 WARN_ON_ONCE(task_runnable(p)); in dequeue_task_scx()
2480 ops_dequeue(rq, p, deq_flags); in dequeue_task_scx()
2494 if (SCX_HAS_OP(sch, stopping) && task_current(rq, p)) { in dequeue_task_scx()
2496 SCX_CALL_OP_TASK(sch, SCX_KF_REST, stopping, rq, p, false); in dequeue_task_scx()
2499 if (SCX_HAS_OP(sch, quiescent) && !task_on_rq_migrating(p)) in dequeue_task_scx()
2500 SCX_CALL_OP_TASK(sch, SCX_KF_REST, quiescent, rq, p, deq_flags); in dequeue_task_scx()
2503 p->scx.flags |= SCX_TASK_DEQD_FOR_SLEEP; in dequeue_task_scx()
2505 p->scx.flags &= ~SCX_TASK_DEQD_FOR_SLEEP; in dequeue_task_scx()
2507 p->scx.flags &= ~SCX_TASK_QUEUED; in dequeue_task_scx()
2511 dispatch_dequeue(rq, p); in dequeue_task_scx()
2518 struct task_struct *p = rq->curr; in yield_task_scx() local
2521 SCX_CALL_OP_2TASKS_RET(sch, SCX_KF_REST, yield, rq, p, NULL); in yield_task_scx()
2523 p->scx.slice = 0; in yield_task_scx()
2538 static void move_local_task_to_local_dsq(struct task_struct *p, u64 enq_flags, in move_local_task_to_local_dsq() argument
2548 WARN_ON_ONCE(p->scx.holding_cpu >= 0); in move_local_task_to_local_dsq()
2551 list_add(&p->scx.dsq_list.node, &dst_dsq->list); in move_local_task_to_local_dsq()
2553 list_add_tail(&p->scx.dsq_list.node, &dst_dsq->list); in move_local_task_to_local_dsq()
2556 p->scx.dsq = dst_dsq; in move_local_task_to_local_dsq()
2568 static void move_remote_task_to_local_dsq(struct task_struct *p, u64 enq_flags, in move_remote_task_to_local_dsq() argument
2574 deactivate_task(src_rq, p, 0); in move_remote_task_to_local_dsq()
2575 set_task_cpu(p, cpu_of(dst_rq)); in move_remote_task_to_local_dsq()
2576 p->scx.sticky_cpu = cpu_of(dst_rq); in move_remote_task_to_local_dsq()
2586 WARN_ON_ONCE(!cpumask_test_cpu(cpu_of(dst_rq), p->cpus_ptr)); in move_remote_task_to_local_dsq()
2589 activate_task(dst_rq, p, 0); in move_remote_task_to_local_dsq()
2613 struct task_struct *p, struct rq *rq, in task_can_run_on_remote_rq() argument
2618 WARN_ON_ONCE(task_cpu(p) == cpu); in task_can_run_on_remote_rq()
2632 if (unlikely(is_migration_disabled(p))) { in task_can_run_on_remote_rq()
2635 p->comm, p->pid, task_cpu(p), cpu); in task_can_run_on_remote_rq()
2645 if (!task_allowed_on_cpu(p, cpu)) { in task_can_run_on_remote_rq()
2648 cpu, p->comm, p->pid); in task_can_run_on_remote_rq()
2691 static bool unlink_dsq_and_lock_src_rq(struct task_struct *p, in unlink_dsq_and_lock_src_rq() argument
2699 WARN_ON_ONCE(p->scx.holding_cpu >= 0); in unlink_dsq_and_lock_src_rq()
2700 task_unlink_from_dsq(p, dsq); in unlink_dsq_and_lock_src_rq()
2701 p->scx.holding_cpu = cpu; in unlink_dsq_and_lock_src_rq()
2707 return likely(p->scx.holding_cpu == cpu) && in unlink_dsq_and_lock_src_rq()
2708 !WARN_ON_ONCE(src_rq != task_rq(p)); in unlink_dsq_and_lock_src_rq()
2711 static bool consume_remote_task(struct rq *this_rq, struct task_struct *p, in consume_remote_task() argument
2716 if (unlink_dsq_and_lock_src_rq(p, dsq, src_rq)) { in consume_remote_task()
2717 move_remote_task_to_local_dsq(p, 0, src_rq, this_rq); in consume_remote_task()
2743 struct task_struct *p, u64 enq_flags, in move_task_between_dsqs() argument
2747 struct rq *src_rq = task_rq(p), *dst_rq; in move_task_between_dsqs()
2756 unlikely(!task_can_run_on_remote_rq(sch, p, dst_rq, true))) { in move_task_between_dsqs()
2757 dst_dsq = find_global_dsq(p); in move_task_between_dsqs()
2772 task_unlink_from_dsq(p, src_dsq); in move_task_between_dsqs()
2773 move_local_task_to_local_dsq(p, enq_flags, in move_task_between_dsqs()
2778 move_remote_task_to_local_dsq(p, enq_flags, in move_task_between_dsqs()
2786 task_unlink_from_dsq(p, src_dsq); in move_task_between_dsqs()
2787 p->scx.dsq = NULL; in move_task_between_dsqs()
2790 dispatch_enqueue(sch, dst_dsq, p, enq_flags); in move_task_between_dsqs()
2828 struct task_struct *p; in consume_dispatch_q() local
2848 nldsq_for_each_task(p, dsq) { in consume_dispatch_q()
2849 struct rq *task_rq = task_rq(p); in consume_dispatch_q()
2852 task_unlink_from_dsq(p, dsq); in consume_dispatch_q()
2853 move_local_task_to_local_dsq(p, 0, dsq, rq); in consume_dispatch_q()
2858 if (task_can_run_on_remote_rq(sch, p, rq, false)) { in consume_dispatch_q()
2859 if (likely(consume_remote_task(rq, p, dsq, task_rq))) in consume_dispatch_q()
2893 struct task_struct *p, u64 enq_flags) in dispatch_to_local_dsq() argument
2895 struct rq *src_rq = task_rq(p); in dispatch_to_local_dsq()
2906 dispatch_enqueue(sch, dst_dsq, p, in dispatch_to_local_dsq()
2912 unlikely(!task_can_run_on_remote_rq(sch, p, dst_rq, true))) { in dispatch_to_local_dsq()
2913 dispatch_enqueue(sch, find_global_dsq(p), p, in dispatch_to_local_dsq()
2929 p->scx.holding_cpu = raw_smp_processor_id(); in dispatch_to_local_dsq()
2932 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE); in dispatch_to_local_dsq()
2942 if (likely(p->scx.holding_cpu == raw_smp_processor_id()) && in dispatch_to_local_dsq()
2943 !WARN_ON_ONCE(src_rq != task_rq(p))) { in dispatch_to_local_dsq()
2950 p->scx.holding_cpu = -1; in dispatch_to_local_dsq()
2951 dispatch_enqueue(sch, &dst_rq->scx.local_dsq, p, in dispatch_to_local_dsq()
2954 move_remote_task_to_local_dsq(p, enq_flags, in dispatch_to_local_dsq()
2961 if (sched_class_above(p->sched_class, dst_rq->curr->sched_class)) in dispatch_to_local_dsq()
2992 struct task_struct *p, in finish_dispatch() argument
2999 touch_core_sched_dispatch(rq, p); in finish_dispatch()
3005 opss = atomic_long_read(&p->scx.ops_state); in finish_dispatch()
3028 if (likely(atomic_long_try_cmpxchg(&p->scx.ops_state, &opss, in finish_dispatch()
3039 wait_ops_state(p, opss); in finish_dispatch()
3043 BUG_ON(!(p->scx.flags & SCX_TASK_QUEUED)); in finish_dispatch()
3045 dsq = find_dsq_for_dispatch(sch, this_rq(), dsq_id, p); in finish_dispatch()
3048 dispatch_to_local_dsq(sch, rq, dsq, p, enq_flags); in finish_dispatch()
3050 dispatch_enqueue(sch, dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS); in finish_dispatch()
3221 struct task_struct *p; in process_ddsp_deferred_locals() local
3232 while ((p = list_first_entry_or_null(&rq->scx.ddsp_deferred_locals, in process_ddsp_deferred_locals()
3237 list_del_init(&p->scx.dsq_list.node); in process_ddsp_deferred_locals()
3239 dsq = find_dsq_for_dispatch(sch, rq, p->scx.ddsp_dsq_id, p); in process_ddsp_deferred_locals()
3241 dispatch_to_local_dsq(sch, rq, dsq, p, in process_ddsp_deferred_locals()
3242 p->scx.ddsp_enq_flags); in process_ddsp_deferred_locals()
3246 static void set_next_task_scx(struct rq *rq, struct task_struct *p, bool first) in set_next_task_scx() argument
3250 if (p->scx.flags & SCX_TASK_QUEUED) { in set_next_task_scx()
3255 ops_dequeue(rq, p, SCX_DEQ_CORE_SCHED_EXEC); in set_next_task_scx()
3256 dispatch_dequeue(rq, p); in set_next_task_scx()
3259 p->se.exec_start = rq_clock_task(rq); in set_next_task_scx()
3262 if (SCX_HAS_OP(sch, running) && (p->scx.flags & SCX_TASK_QUEUED)) in set_next_task_scx()
3263 SCX_CALL_OP_TASK(sch, SCX_KF_REST, running, rq, p); in set_next_task_scx()
3265 clr_task_runnable(p, true); in set_next_task_scx()
3271 if ((p->scx.slice == SCX_SLICE_INF) != in set_next_task_scx()
3273 if (p->scx.slice == SCX_SLICE_INF) in set_next_task_scx()
3348 static void put_prev_task_scx(struct rq *rq, struct task_struct *p, in put_prev_task_scx() argument
3355 if (SCX_HAS_OP(sch, stopping) && (p->scx.flags & SCX_TASK_QUEUED)) in put_prev_task_scx()
3356 SCX_CALL_OP_TASK(sch, SCX_KF_REST, stopping, rq, p, true); in put_prev_task_scx()
3358 if (p->scx.flags & SCX_TASK_QUEUED) { in put_prev_task_scx()
3359 set_task_runnable(rq, p); in put_prev_task_scx()
3367 if (p->scx.slice && !scx_rq_bypassing(rq)) { in put_prev_task_scx()
3368 dispatch_enqueue(sch, &rq->scx.local_dsq, p, in put_prev_task_scx()
3381 do_enqueue_task(rq, p, SCX_ENQ_LAST, -1); in put_prev_task_scx()
3383 do_enqueue_task(rq, p, 0, -1); in put_prev_task_scx()
3401 struct task_struct *p; in pick_task_scx() local
3443 p = prev; in pick_task_scx()
3444 if (!p->scx.slice) in pick_task_scx()
3445 refill_task_slice_dfl(p); in pick_task_scx()
3447 p = first_local_task(rq); in pick_task_scx()
3448 if (!p) { in pick_task_scx()
3454 if (unlikely(!p->scx.slice)) { in pick_task_scx()
3459 p->comm, p->pid, __func__); in pick_task_scx()
3462 refill_task_slice_dfl(p); in pick_task_scx()
3466 return p; in pick_task_scx()
3509 static int select_task_rq_scx(struct task_struct *p, int prev_cpu, int wake_flags) in select_task_rq_scx() argument
3527 rq_bypass = scx_rq_bypassing(task_rq(p)); in select_task_rq_scx()
3534 *ddsp_taskp = p; in select_task_rq_scx()
3538 select_cpu, NULL, p, prev_cpu, in select_task_rq_scx()
3540 p->scx.selected_cpu = cpu; in select_task_rq_scx()
3549 cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, NULL, 0); in select_task_rq_scx()
3551 refill_task_slice_dfl(p); in select_task_rq_scx()
3552 p->scx.ddsp_dsq_id = SCX_DSQ_LOCAL; in select_task_rq_scx()
3556 p->scx.selected_cpu = cpu; in select_task_rq_scx()
3564 static void task_woken_scx(struct rq *rq, struct task_struct *p) in task_woken_scx() argument
3569 static void set_cpus_allowed_scx(struct task_struct *p, in set_cpus_allowed_scx() argument
3574 set_cpus_allowed_common(p, ac); in set_cpus_allowed_scx()
3586 p, (struct cpumask *)p->cpus_ptr); in set_cpus_allowed_scx()
3642 struct task_struct *p; in check_rq_for_timeouts() local
3651 list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node) { in check_rq_for_timeouts()
3652 unsigned long last_runnable = p->scx.runnable_at; in check_rq_for_timeouts()
3660 p->comm, p->pid, dur_ms / 1000, dur_ms % 1000); in check_rq_for_timeouts()
3754 static enum scx_task_state scx_get_task_state(const struct task_struct *p) in scx_get_task_state() argument
3756 return (p->scx.flags & SCX_TASK_STATE_MASK) >> SCX_TASK_STATE_SHIFT; in scx_get_task_state()
3759 static void scx_set_task_state(struct task_struct *p, enum scx_task_state state) in scx_set_task_state() argument
3761 enum scx_task_state prev_state = scx_get_task_state(p); in scx_set_task_state()
3784 prev_state, state, p->comm, p->pid); in scx_set_task_state()
3786 p->scx.flags &= ~SCX_TASK_STATE_MASK; in scx_set_task_state()
3787 p->scx.flags |= state << SCX_TASK_STATE_SHIFT; in scx_set_task_state()
3790 static int scx_init_task(struct task_struct *p, struct task_group *tg, bool fork) in scx_init_task() argument
3795 p->scx.disallow = false; in scx_init_task()
3804 p, &args); in scx_init_task()
3811 scx_set_task_state(p, SCX_TASK_INIT); in scx_init_task()
3813 if (p->scx.disallow) { in scx_init_task()
3818 rq = task_rq_lock(p, &rf); in scx_init_task()
3827 if (p->policy == SCHED_EXT) { in scx_init_task()
3828 p->policy = SCHED_NORMAL; in scx_init_task()
3832 task_rq_unlock(rq, p, &rf); in scx_init_task()
3833 } else if (p->policy == SCHED_EXT) { in scx_init_task()
3835 p->comm, p->pid); in scx_init_task()
3839 p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT; in scx_init_task()
3843 static void scx_enable_task(struct task_struct *p) in scx_enable_task() argument
3846 struct rq *rq = task_rq(p); in scx_enable_task()
3855 if (task_has_idle_policy(p)) in scx_enable_task()
3858 weight = sched_prio_to_weight[p->static_prio - MAX_RT_PRIO]; in scx_enable_task()
3860 p->scx.weight = sched_weight_to_cgroup(weight); in scx_enable_task()
3863 SCX_CALL_OP_TASK(sch, SCX_KF_REST, enable, rq, p); in scx_enable_task()
3864 scx_set_task_state(p, SCX_TASK_ENABLED); in scx_enable_task()
3868 p, p->scx.weight); in scx_enable_task()
3871 static void scx_disable_task(struct task_struct *p) in scx_disable_task() argument
3874 struct rq *rq = task_rq(p); in scx_disable_task()
3877 WARN_ON_ONCE(scx_get_task_state(p) != SCX_TASK_ENABLED); in scx_disable_task()
3880 SCX_CALL_OP_TASK(sch, SCX_KF_REST, disable, rq, p); in scx_disable_task()
3881 scx_set_task_state(p, SCX_TASK_READY); in scx_disable_task()
3884 static void scx_exit_task(struct task_struct *p) in scx_exit_task() argument
3891 lockdep_assert_rq_held(task_rq(p)); in scx_exit_task()
3893 switch (scx_get_task_state(p)) { in scx_exit_task()
3902 scx_disable_task(p); in scx_exit_task()
3910 SCX_CALL_OP_TASK(sch, SCX_KF_REST, exit_task, task_rq(p), in scx_exit_task()
3911 p, &args); in scx_exit_task()
3912 scx_set_task_state(p, SCX_TASK_NONE); in scx_exit_task()
3928 void scx_pre_fork(struct task_struct *p) in scx_pre_fork() argument
3939 int scx_fork(struct task_struct *p) in scx_fork() argument
3944 return scx_init_task(p, task_group(p), true); in scx_fork()
3949 void scx_post_fork(struct task_struct *p) in scx_post_fork() argument
3952 scx_set_task_state(p, SCX_TASK_READY); in scx_post_fork()
3959 if (p->sched_class == &ext_sched_class) { in scx_post_fork()
3963 rq = task_rq_lock(p, &rf); in scx_post_fork()
3964 scx_enable_task(p); in scx_post_fork()
3965 task_rq_unlock(rq, p, &rf); in scx_post_fork()
3970 list_add_tail(&p->scx.tasks_node, &scx_tasks); in scx_post_fork()
3976 void scx_cancel_fork(struct task_struct *p) in scx_cancel_fork() argument
3982 rq = task_rq_lock(p, &rf); in scx_cancel_fork()
3983 WARN_ON_ONCE(scx_get_task_state(p) >= SCX_TASK_READY); in scx_cancel_fork()
3984 scx_exit_task(p); in scx_cancel_fork()
3985 task_rq_unlock(rq, p, &rf); in scx_cancel_fork()
3991 void sched_ext_free(struct task_struct *p) in sched_ext_free() argument
3996 list_del_init(&p->scx.tasks_node); in sched_ext_free()
4003 if (scx_get_task_state(p) != SCX_TASK_NONE) { in sched_ext_free()
4007 rq = task_rq_lock(p, &rf); in sched_ext_free()
4008 scx_exit_task(p); in sched_ext_free()
4009 task_rq_unlock(rq, p, &rf); in sched_ext_free()
4013 static void reweight_task_scx(struct rq *rq, struct task_struct *p, in reweight_task_scx() argument
4018 lockdep_assert_rq_held(task_rq(p)); in reweight_task_scx()
4020 p->scx.weight = sched_weight_to_cgroup(scale_load_down(lw->weight)); in reweight_task_scx()
4023 p, p->scx.weight); in reweight_task_scx()
4026 static void prio_changed_scx(struct rq *rq, struct task_struct *p, int oldprio) in prio_changed_scx() argument
4030 static void switching_to_scx(struct rq *rq, struct task_struct *p) in switching_to_scx() argument
4034 scx_enable_task(p); in switching_to_scx()
4042 p, (struct cpumask *)p->cpus_ptr); in switching_to_scx()
4045 static void switched_from_scx(struct rq *rq, struct task_struct *p) in switched_from_scx() argument
4047 scx_disable_task(p); in switched_from_scx()
4050 static void wakeup_preempt_scx(struct rq *rq, struct task_struct *p,int wake_flags) {} in wakeup_preempt_scx() argument
4051 static void switched_to_scx(struct rq *rq, struct task_struct *p) {} in switched_to_scx() argument
4053 int scx_check_setscheduler(struct task_struct *p, int policy) in scx_check_setscheduler() argument
4055 lockdep_assert_rq_held(task_rq(p)); in scx_check_setscheduler()
4058 if (scx_enabled() && READ_ONCE(p->scx.disallow) && in scx_check_setscheduler()
4059 p->policy != policy && policy == SCHED_EXT) in scx_check_setscheduler()
4068 struct task_struct *p = rq->curr; in scx_can_stop_tick() local
4073 if (p->sched_class != &ext_sched_class) in scx_can_stop_tick()
4150 struct task_struct *p; in scx_cgroup_can_attach() local
4159 cgroup_taskset_for_each(p, css, tset) { in scx_cgroup_can_attach()
4160 struct cgroup *from = tg_cgrp(task_group(p)); in scx_cgroup_can_attach()
4163 WARN_ON_ONCE(p->scx.cgrp_moving_from); in scx_cgroup_can_attach()
4176 p, from, css->cgroup); in scx_cgroup_can_attach()
4181 p->scx.cgrp_moving_from = from; in scx_cgroup_can_attach()
4187 cgroup_taskset_for_each(p, css, tset) { in scx_cgroup_can_attach()
4189 p->scx.cgrp_moving_from) in scx_cgroup_can_attach()
4191 p, p->scx.cgrp_moving_from, css->cgroup); in scx_cgroup_can_attach()
4192 p->scx.cgrp_moving_from = NULL; in scx_cgroup_can_attach()
4199 void scx_cgroup_move_task(struct task_struct *p) in scx_cgroup_move_task() argument
4211 !WARN_ON_ONCE(!p->scx.cgrp_moving_from)) in scx_cgroup_move_task()
4213 p, p->scx.cgrp_moving_from, in scx_cgroup_move_task()
4214 tg_cgrp(task_group(p))); in scx_cgroup_move_task()
4215 p->scx.cgrp_moving_from = NULL; in scx_cgroup_move_task()
4227 struct task_struct *p; in scx_cgroup_cancel_attach() local
4232 cgroup_taskset_for_each(p, css, tset) { in scx_cgroup_cancel_attach()
4234 p->scx.cgrp_moving_from) in scx_cgroup_cancel_attach()
4236 p, p->scx.cgrp_moving_from, css->cgroup); in scx_cgroup_cancel_attach()
4237 p->scx.cgrp_moving_from = NULL; in scx_cgroup_cancel_attach()
4672 bool scx_allow_ttwu_queue(const struct task_struct *p) in scx_allow_ttwu_queue() argument
4676 p->sched_class != &ext_sched_class; in scx_allow_ttwu_queue()
4839 struct task_struct *p, *n; in scx_bypass() local
4868 list_for_each_entry_safe_reverse(p, n, &rq->scx.runnable_list, in scx_bypass()
4873 sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx); in scx_bypass()
4945 struct task_struct *p; in scx_disable_workfn() local
5002 while ((p = scx_task_iter_next_locked(&sti))) { in scx_disable_workfn()
5003 const struct sched_class *old_class = p->sched_class; in scx_disable_workfn()
5005 __setscheduler_class(p->policy, p->prio); in scx_disable_workfn()
5008 if (old_class != new_class && p->se.sched_delayed) in scx_disable_workfn()
5009 dequeue_task(task_rq(p), p, DEQUEUE_SLEEP | DEQUEUE_DELAYED); in scx_disable_workfn()
5011 sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx); in scx_disable_workfn()
5013 p->sched_class = new_class; in scx_disable_workfn()
5014 check_class_changing(task_rq(p), p, old_class); in scx_disable_workfn()
5018 check_class_changed(task_rq(p), p, old_class, p->prio); in scx_disable_workfn()
5019 scx_exit_task(p); in scx_disable_workfn()
5213 struct task_struct *p, char marker) in scx_dump_task() argument
5218 unsigned long ops_state = atomic_long_read(&p->scx.ops_state); in scx_dump_task()
5221 if (p->scx.dsq) in scx_dump_task()
5223 (unsigned long long)p->scx.dsq->id); in scx_dump_task()
5227 marker, task_state_to_char(p), p->comm, p->pid, in scx_dump_task()
5228 jiffies_delta_msecs(p->scx.runnable_at, dctx->at_jiffies)); in scx_dump_task()
5230 scx_get_task_state(p), p->scx.flags & ~SCX_TASK_STATE_MASK, in scx_dump_task()
5231 p->scx.dsq_flags, ops_state & SCX_OPSS_STATE_MASK, in scx_dump_task()
5234 p->scx.sticky_cpu, p->scx.holding_cpu, dsq_id_buf); in scx_dump_task()
5236 p->scx.dsq_vtime, p->scx.slice, p->scx.weight); in scx_dump_task()
5237 dump_line(s, " cpus=%*pb", cpumask_pr_args(p->cpus_ptr)); in scx_dump_task()
5241 SCX_CALL_OP(sch, SCX_KF_REST, dump_task, NULL, dctx, p); in scx_dump_task()
5246 bt_len = stack_trace_save_tsk(p, bt, SCX_EXIT_BT_LEN, 1); in scx_dump_task()
5300 struct task_struct *p; in scx_dump_state() local
5371 list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node) in scx_dump_state()
5372 scx_dump_task(&s, &dctx, p, ' '); in scx_dump_state()
5567 struct task_struct *p; in scx_enable() local
5702 while ((p = scx_task_iter_next_locked(&sti))) { in scx_enable()
5708 if (!tryget_task_struct(p)) in scx_enable()
5713 ret = scx_init_task(p, task_group(p), false); in scx_enable()
5715 put_task_struct(p); in scx_enable()
5719 ret, p->comm, p->pid); in scx_enable()
5723 scx_set_task_state(p, SCX_TASK_READY); in scx_enable()
5725 put_task_struct(p); in scx_enable()
5746 while ((p = scx_task_iter_next_locked(&sti))) { in scx_enable()
5747 const struct sched_class *old_class = p->sched_class; in scx_enable()
5749 __setscheduler_class(p->policy, p->prio); in scx_enable()
5752 if (old_class != new_class && p->se.sched_delayed) in scx_enable()
5753 dequeue_task(task_rq(p), p, DEQUEUE_SLEEP | DEQUEUE_DELAYED); in scx_enable()
5755 sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx); in scx_enable()
5757 p->scx.slice = SCX_SLICE_DFL; in scx_enable()
5758 p->sched_class = new_class; in scx_enable()
5759 check_class_changing(task_rq(p), p, old_class); in scx_enable()
5763 check_class_changed(task_rq(p), p, old_class, p->prio); in scx_enable()
5975 static s32 sched_ext_ops__select_cpu(struct task_struct *p, s32 prev_cpu, u64 wake_flags) { return … in sched_ext_ops__select_cpu() argument
5976 static void sched_ext_ops__enqueue(struct task_struct *p, u64 enq_flags) {} in sched_ext_ops__enqueue() argument
5977 static void sched_ext_ops__dequeue(struct task_struct *p, u64 enq_flags) {} in sched_ext_ops__dequeue() argument
5979 static void sched_ext_ops__tick(struct task_struct *p) {} in sched_ext_ops__tick() argument
5980 static void sched_ext_ops__runnable(struct task_struct *p, u64 enq_flags) {} in sched_ext_ops__runnable() argument
5981 static void sched_ext_ops__running(struct task_struct *p) {} in sched_ext_ops__running() argument
5982 static void sched_ext_ops__stopping(struct task_struct *p, bool runnable) {} in sched_ext_ops__stopping() argument
5983 static void sched_ext_ops__quiescent(struct task_struct *p, u64 deq_flags) {} in sched_ext_ops__quiescent() argument
5986 static void sched_ext_ops__set_weight(struct task_struct *p, u32 weight) {} in sched_ext_ops__set_weight() argument
5987 static void sched_ext_ops__set_cpumask(struct task_struct *p, const struct cpumask *mask) {} in sched_ext_ops__set_cpumask() argument
5991 static s32 sched_ext_ops__init_task(struct task_struct *p, struct scx_init_task_args *args) { retur… in sched_ext_ops__init_task() argument
5992 static void sched_ext_ops__exit_task(struct task_struct *p, struct scx_exit_task_args *args) {} in sched_ext_ops__exit_task() argument
5993 static void sched_ext_ops__enable(struct task_struct *p) {} in sched_ext_ops__enable() argument
5994 static void sched_ext_ops__disable(struct task_struct *p) {} in sched_ext_ops__disable() argument
5998 static s32 sched_ext_ops__cgroup_prep_move(struct task_struct *p, struct cgroup *from, struct cgrou… in sched_ext_ops__cgroup_prep_move() argument
5999 static void sched_ext_ops__cgroup_move(struct task_struct *p, struct cgroup *from, struct cgroup *t… in sched_ext_ops__cgroup_move() argument
6000 static void sched_ext_ops__cgroup_cancel_move(struct task_struct *p, struct cgroup *from, struct cg… in sched_ext_ops__cgroup_cancel_move() argument
6010 static void sched_ext_ops__dump_task(struct scx_dump_ctx *ctx, struct task_struct *p) {} in sched_ext_ops__dump_task() argument
6218 void print_scx_info(const char *log_lvl, struct task_struct *p) in print_scx_info() argument
6234 if (copy_from_kernel_nofault(&class, &p->sched_class, sizeof(class)) || in print_scx_info()
6241 if (!copy_from_kernel_nofault(&runnable_at, &p->scx.runnable_at, in print_scx_info()
6327 static bool scx_dsq_insert_preamble(struct task_struct *p, u64 enq_flags) in scx_dsq_insert_preamble() argument
6334 if (unlikely(!p)) { in scx_dsq_insert_preamble()
6347 static void scx_dsq_insert_commit(struct task_struct *p, u64 dsq_id, in scx_dsq_insert_commit() argument
6355 mark_direct_dispatch(ddsp_task, p, dsq_id, enq_flags); in scx_dsq_insert_commit()
6365 .task = p, in scx_dsq_insert_commit()
6366 .qseq = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_QSEQ_MASK, in scx_dsq_insert_commit()
6409 __bpf_kfunc void scx_bpf_dsq_insert(struct task_struct *p, u64 dsq_id, u64 slice, in scx_bpf_dsq_insert() argument
6412 if (!scx_dsq_insert_preamble(p, enq_flags)) in scx_bpf_dsq_insert()
6416 p->scx.slice = slice; in scx_bpf_dsq_insert()
6418 p->scx.slice = p->scx.slice ?: 1; in scx_bpf_dsq_insert()
6420 scx_dsq_insert_commit(p, dsq_id, enq_flags); in scx_bpf_dsq_insert()
6444 __bpf_kfunc void scx_bpf_dsq_insert_vtime(struct task_struct *p, u64 dsq_id, in scx_bpf_dsq_insert_vtime() argument
6447 if (!scx_dsq_insert_preamble(p, enq_flags)) in scx_bpf_dsq_insert_vtime()
6451 p->scx.slice = slice; in scx_bpf_dsq_insert_vtime()
6453 p->scx.slice = p->scx.slice ?: 1; in scx_bpf_dsq_insert_vtime()
6455 p->scx.dsq_vtime = vtime; in scx_bpf_dsq_insert_vtime()
6457 scx_dsq_insert_commit(p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ); in scx_bpf_dsq_insert_vtime()
6473 struct task_struct *p, u64 dsq_id, u64 enq_flags) in scx_dsq_move() argument
6490 src_rq = task_rq(p); in scx_dsq_move()
6519 if (unlikely(p->scx.dsq != src_dsq || in scx_dsq_move()
6520 u32_before(kit->cursor.priv, p->scx.dsq_seq) || in scx_dsq_move()
6521 p->scx.holding_cpu >= 0) || in scx_dsq_move()
6522 WARN_ON_ONCE(src_rq != task_rq(p))) { in scx_dsq_move()
6528 dst_dsq = find_dsq_for_dispatch(sch, this_rq, dsq_id, p); in scx_dsq_move()
6536 p->scx.dsq_vtime = kit->vtime; in scx_dsq_move()
6538 p->scx.slice = kit->slice; in scx_dsq_move()
6541 locked_rq = move_task_between_dsqs(sch, p, enq_flags, src_dsq, dst_dsq); in scx_dsq_move()
6699 struct task_struct *p, u64 dsq_id, in scx_bpf_dsq_move() argument
6703 p, dsq_id, enq_flags); in scx_bpf_dsq_move()
6724 struct task_struct *p, u64 dsq_id, in scx_bpf_dsq_move_vtime() argument
6728 p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ); in scx_bpf_dsq_move_vtime()
6762 struct task_struct *p, *n; in scx_bpf_reenqueue_local() local
6775 list_for_each_entry_safe(p, n, &rq->scx.local_dsq.list, in scx_bpf_reenqueue_local()
6792 if (p->migration_pending || is_migration_disabled(p) || p->nr_cpus_allowed == 1) in scx_bpf_reenqueue_local()
6795 dispatch_dequeue(rq, p); in scx_bpf_reenqueue_local()
6796 list_add_tail(&p->scx.dsq_list.node, &tasks); in scx_bpf_reenqueue_local()
6799 list_for_each_entry_safe(p, n, &tasks, scx.dsq_list.node) { in scx_bpf_reenqueue_local()
6800 list_del_init(&p->scx.dsq_list.node); in scx_bpf_reenqueue_local()
6801 do_enqueue_task(rq, p, SCX_ENQ_REENQ, -1); in scx_bpf_reenqueue_local()
7062 struct task_struct *p; in bpf_iter_scx_dsq_next() local
7071 p = NULL; in bpf_iter_scx_dsq_next()
7073 p = container_of(&kit->cursor, struct task_struct, scx.dsq_list); in bpf_iter_scx_dsq_next()
7081 p = nldsq_next_task(kit->dsq, p, rev); in bpf_iter_scx_dsq_next()
7082 } while (p && unlikely(u32_before(kit->cursor.priv, p->scx.dsq_seq))); in bpf_iter_scx_dsq_next()
7084 if (p) { in bpf_iter_scx_dsq_next()
7086 list_move_tail(&kit->cursor.node, &p->scx.dsq_list.node); in bpf_iter_scx_dsq_next()
7088 list_move(&kit->cursor.node, &p->scx.dsq_list.node); in bpf_iter_scx_dsq_next()
7095 return p; in bpf_iter_scx_dsq_next()
7403 __bpf_kfunc bool scx_bpf_task_running(const struct task_struct *p) in scx_bpf_task_running() argument
7405 return task_rq(p)->curr == p; in scx_bpf_task_running()
7412 __bpf_kfunc s32 scx_bpf_task_cpu(const struct task_struct *p) in scx_bpf_task_cpu() argument
7414 return task_cpu(p); in scx_bpf_task_cpu()
7441 __bpf_kfunc struct cgroup *scx_bpf_task_cgroup(struct task_struct *p) in scx_bpf_task_cgroup() argument
7443 struct task_group *tg = p->sched_task_group; in scx_bpf_task_cgroup()
7446 if (!scx_kf_allowed_on_arg_tasks(__SCX_KF_RQ_LOCKED, p)) in scx_bpf_task_cgroup()