Home
last modified time | relevance | path

Searched refs:wake_flags (Results 1 – 10 of 10) sorted by relevance

/kernel/sched/
A Dext_idle.c451 s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, in scx_select_cpu_dfl() argument
519 if (wake_flags & SCX_WAKE_SYNC) { in scx_select_cpu_dfl()
859 static s32 select_cpu_from_kfunc(struct task_struct *p, s32 prev_cpu, u64 wake_flags, in select_cpu_from_kfunc() argument
908 cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, in select_cpu_from_kfunc()
947 u64 wake_flags, bool *is_idle) in scx_bpf_select_cpu_dfl() argument
951 cpu = select_cpu_from_kfunc(p, prev_cpu, wake_flags, NULL, 0); in scx_bpf_select_cpu_dfl()
981 __bpf_kfunc s32 scx_bpf_select_cpu_and(struct task_struct *p, s32 prev_cpu, u64 wake_flags, in scx_bpf_select_cpu_and() argument
984 return select_cpu_from_kfunc(p, prev_cpu, wake_flags, cpus_allowed, flags); in scx_bpf_select_cpu_and()
A Dwait.c93 int nr_exclusive, int wake_flags, void *key) in __wake_up_common() argument
108 ret = curr->func(curr, mode, wake_flags, key); in __wake_up_common()
119 int nr_exclusive, int wake_flags, void *key) in __wake_up_common_lock() argument
125 remaining = __wake_up_common(wq_head, mode, nr_exclusive, wake_flags, in __wake_up_common_lock()
A Dswait.c22 void swake_up_locked(struct swait_queue_head *q, int wake_flags) in swake_up_locked() argument
30 try_to_wake_up(curr->task, TASK_NORMAL, wake_flags); in swake_up_locked()
A Dcompletion.c21 static void complete_with_flags(struct completion *x, int wake_flags) in complete_with_flags() argument
29 swake_up_locked(&x->wait, wake_flags); in complete_with_flags()
A Dcore.c3608 *wake_flags |= WF_RQ_SELECTED; in select_task_rq()
3701 if (wake_flags & WF_MIGRATED) in ttwu_stat()
3707 if (wake_flags & WF_SYNC) in ttwu_stat()
3731 if (wake_flags & WF_RQ_SELECTED) in ttwu_do_activate()
3733 if (wake_flags & WF_MIGRATED) in ttwu_do_activate()
3742 wakeup_preempt(rq, p, wake_flags); in ttwu_do_activate()
4183 wake_flags |= WF_TTWU; in try_to_wake_up()
4323 wake_flags |= WF_MIGRATED; in try_to_wake_up()
4328 ttwu_queue(p, cpu, wake_flags); in try_to_wake_up()
4849 int wake_flags = WF_FORK; in wake_up_new_task() local
[all …]
A Dext_idle.h18 s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
A Dext.c284 s32 (*select_cpu)(struct task_struct *p, s32 prev_cpu, u64 wake_flags);
3509 static int select_task_rq_scx(struct task_struct *p, int prev_cpu, int wake_flags) in select_task_rq_scx() argument
3524 if (unlikely(wake_flags & WF_EXEC)) in select_task_rq_scx()
3539 wake_flags); in select_task_rq_scx()
3549 cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, NULL, 0); in select_task_rq_scx()
4050 static void wakeup_preempt_scx(struct rq *rq, struct task_struct *p,int wake_flags) {} in wakeup_preempt_scx() argument
5975 static s32 sched_ext_ops__select_cpu(struct task_struct *p, s32 prev_cpu, u64 wake_flags) { return … in sched_ext_ops__select_cpu() argument
A Dfair.c8474 select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags) in select_task_rq_fair() argument
8476 int sync = (wake_flags & WF_SYNC) && !(current->flags & PF_EXITING); in select_task_rq_fair()
8482 int sd_flag = wake_flags & 0xF; in select_task_rq_fair()
8488 if (wake_flags & WF_TTWU) { in select_task_rq_fair()
8491 if ((wake_flags & WF_CURRENT_CPU) && in select_task_rq_fair()
8534 } else if (wake_flags & WF_TTWU) { /* XXX always ? */ in select_task_rq_fair()
8646 static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int wake_flags) in check_preempt_wakeup_fair() argument
8666 if (sched_feat(NEXT_BUDDY) && !(wake_flags & WF_FORK) && !pse->sched_delayed) { in check_preempt_wakeup_fair()
A Dsched.h3501 extern int try_to_wake_up(struct task_struct *tsk, unsigned int state, int wake_flags);
/kernel/locking/
A Dpercpu-rwsem.c120 unsigned int mode, int wake_flags, in percpu_rwsem_wake_function() argument

Completed in 76 milliseconds