Lines Matching refs:p

40 static inline int normal_prio(struct task_struct *p)  in normal_prio()  argument
42 return __normal_prio(p->policy, p->rt_priority, PRIO_TO_NICE(p->static_prio)); in normal_prio()
52 static int effective_prio(struct task_struct *p) in effective_prio() argument
54 p->normal_prio = normal_prio(p); in effective_prio()
60 if (!rt_or_dl_prio(p->prio)) in effective_prio()
61 return p->normal_prio; in effective_prio()
62 return p->prio; in effective_prio()
65 void set_user_nice(struct task_struct *p, long nice) in set_user_nice() argument
71 if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) in set_user_nice()
77 CLASS(task_rq_lock, rq_guard)(p); in set_user_nice()
88 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { in set_user_nice()
89 p->static_prio = NICE_TO_PRIO(nice); in set_user_nice()
93 queued = task_on_rq_queued(p); in set_user_nice()
94 running = task_current_donor(rq, p); in set_user_nice()
96 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); in set_user_nice()
98 put_prev_task(rq, p); in set_user_nice()
100 p->static_prio = NICE_TO_PRIO(nice); in set_user_nice()
101 set_load_weight(p, true); in set_user_nice()
102 old_prio = p->prio; in set_user_nice()
103 p->prio = effective_prio(p); in set_user_nice()
106 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); in set_user_nice()
108 set_next_task(rq, p); in set_user_nice()
114 p->sched_class->prio_changed(rq, p, old_prio); in set_user_nice()
126 static bool is_nice_reduction(const struct task_struct *p, const int nice) in is_nice_reduction() argument
131 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE)); in is_nice_reduction()
139 int can_nice(const struct task_struct *p, const int nice) in can_nice() argument
141 return is_nice_reduction(p, nice) || capable(CAP_SYS_NICE); in can_nice()
191 int task_prio(const struct task_struct *p) in task_prio() argument
193 return p->prio - MAX_RT_PRIO; in task_prio()
271 struct task_struct *p; in find_get_task() local
274 p = find_process_by_pid(pid); in find_get_task()
275 if (likely(p)) in find_get_task()
276 get_task_struct(p); in find_get_task()
278 return p; in find_get_task()
290 static void __setscheduler_params(struct task_struct *p, in DEFINE_CLASS()
296 policy = p->policy; in DEFINE_CLASS()
298 p->policy = policy; in DEFINE_CLASS()
301 __setparam_dl(p, attr); in DEFINE_CLASS()
303 __setparam_fair(p, attr); in DEFINE_CLASS()
306 if (rt_or_dl_task_policy(p)) { in DEFINE_CLASS()
307 p->timer_slack_ns = 0; in DEFINE_CLASS()
308 } else if (p->timer_slack_ns == 0) { in DEFINE_CLASS()
310 p->timer_slack_ns = p->default_timer_slack_ns; in DEFINE_CLASS()
318 p->rt_priority = attr->sched_priority; in DEFINE_CLASS()
319 p->normal_prio = normal_prio(p); in DEFINE_CLASS()
320 set_load_weight(p, true); in DEFINE_CLASS()
326 static bool check_same_owner(struct task_struct *p) in check_same_owner() argument
331 pcred = __task_cred(p); in check_same_owner()
338 static int uclamp_validate(struct task_struct *p, in uclamp_validate() argument
341 int util_min = p->uclamp_req[UCLAMP_MIN].value; in uclamp_validate()
342 int util_max = p->uclamp_req[UCLAMP_MAX].value; in uclamp_validate()
398 static void __setscheduler_uclamp(struct task_struct *p, in __setscheduler_uclamp() argument
404 struct uclamp_se *uc_se = &p->uclamp_req[clamp_id]; in __setscheduler_uclamp()
414 if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN)) in __setscheduler_uclamp()
428 uclamp_se_set(&p->uclamp_req[UCLAMP_MIN], in __setscheduler_uclamp()
434 uclamp_se_set(&p->uclamp_req[UCLAMP_MAX], in __setscheduler_uclamp()
441 static inline int uclamp_validate(struct task_struct *p, in uclamp_validate() argument
446 static void __setscheduler_uclamp(struct task_struct *p, in __setscheduler_uclamp() argument
455 static int user_check_sched_setscheduler(struct task_struct *p, in user_check_sched_setscheduler() argument
460 if (attr->sched_nice < task_nice(p) && in user_check_sched_setscheduler()
461 !is_nice_reduction(p, attr->sched_nice)) in user_check_sched_setscheduler()
466 unsigned long rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO); in user_check_sched_setscheduler()
469 if (policy != p->policy && !rlim_rtprio) in user_check_sched_setscheduler()
473 if (attr->sched_priority > p->rt_priority && in user_check_sched_setscheduler()
491 if (task_has_idle_policy(p) && !idle_policy(policy)) { in user_check_sched_setscheduler()
492 if (!is_nice_reduction(p, task_nice(p))) in user_check_sched_setscheduler()
497 if (!check_same_owner(p)) in user_check_sched_setscheduler()
501 if (p->sched_reset_on_fork && !reset_on_fork) in user_check_sched_setscheduler()
513 int __sched_setscheduler(struct task_struct *p, in __sched_setscheduler() argument
532 reset_on_fork = p->sched_reset_on_fork; in __sched_setscheduler()
533 policy = oldpolicy = p->policy; in __sched_setscheduler()
556 retval = user_check_sched_setscheduler(p, attr, policy, reset_on_fork); in __sched_setscheduler()
563 retval = security_task_setscheduler(p); in __sched_setscheduler()
570 retval = uclamp_validate(p, attr); in __sched_setscheduler()
579 if (dl_policy(policy) || dl_policy(p->policy)) { in __sched_setscheduler()
591 rq = task_rq_lock(p, &rf); in __sched_setscheduler()
597 if (p == rq->stop) { in __sched_setscheduler()
602 retval = scx_check_setscheduler(p, policy); in __sched_setscheduler()
610 if (unlikely(policy == p->policy)) { in __sched_setscheduler()
612 (attr->sched_nice != task_nice(p) || in __sched_setscheduler()
613 (attr->sched_runtime != p->se.slice))) in __sched_setscheduler()
615 if (rt_policy(policy) && attr->sched_priority != p->rt_priority) in __sched_setscheduler()
617 if (dl_policy(policy) && dl_param_changed(p, attr)) in __sched_setscheduler()
622 p->sched_reset_on_fork = reset_on_fork; in __sched_setscheduler()
636 task_group(p)->rt_bandwidth.rt_runtime == 0 && in __sched_setscheduler()
637 !task_group_is_autogroup(task_group(p))) { in __sched_setscheduler()
651 if (!cpumask_subset(span, p->cpus_ptr) || in __sched_setscheduler()
660 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { in __sched_setscheduler()
662 task_rq_unlock(rq, p, &rf); in __sched_setscheduler()
673 if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) { in __sched_setscheduler()
678 p->sched_reset_on_fork = reset_on_fork; in __sched_setscheduler()
679 oldprio = p->prio; in __sched_setscheduler()
690 newprio = rt_effective_prio(p, newprio); in __sched_setscheduler()
695 prev_class = p->sched_class; in __sched_setscheduler()
698 if (prev_class != next_class && p->se.sched_delayed) in __sched_setscheduler()
699 dequeue_task(rq, p, DEQUEUE_SLEEP | DEQUEUE_DELAYED | DEQUEUE_NOCLOCK); in __sched_setscheduler()
701 queued = task_on_rq_queued(p); in __sched_setscheduler()
702 running = task_current_donor(rq, p); in __sched_setscheduler()
704 dequeue_task(rq, p, queue_flags); in __sched_setscheduler()
706 put_prev_task(rq, p); in __sched_setscheduler()
709 __setscheduler_params(p, attr); in __sched_setscheduler()
710 p->sched_class = next_class; in __sched_setscheduler()
711 p->prio = newprio; in __sched_setscheduler()
713 __setscheduler_uclamp(p, attr); in __sched_setscheduler()
714 check_class_changing(rq, p, prev_class); in __sched_setscheduler()
721 if (oldprio < p->prio) in __sched_setscheduler()
724 enqueue_task(rq, p, queue_flags); in __sched_setscheduler()
727 set_next_task(rq, p); in __sched_setscheduler()
729 check_class_changed(rq, p, prev_class, oldprio); in __sched_setscheduler()
734 task_rq_unlock(rq, p, &rf); in __sched_setscheduler()
739 rt_mutex_adjust_pi(p); in __sched_setscheduler()
749 task_rq_unlock(rq, p, &rf); in __sched_setscheduler()
755 static int _sched_setscheduler(struct task_struct *p, int policy, in _sched_setscheduler() argument
761 .sched_nice = PRIO_TO_NICE(p->static_prio), in _sched_setscheduler()
764 if (p->se.custom_slice) in _sched_setscheduler()
765 attr.sched_runtime = p->se.slice; in _sched_setscheduler()
774 return __sched_setscheduler(p, &attr, check, true); in _sched_setscheduler()
788 int sched_setscheduler(struct task_struct *p, int policy, in sched_setscheduler() argument
791 return _sched_setscheduler(p, policy, param, true); in sched_setscheduler()
794 int sched_setattr(struct task_struct *p, const struct sched_attr *attr) in sched_setattr() argument
796 return __sched_setscheduler(p, attr, true, true); in sched_setattr()
799 int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr) in sched_setattr_nocheck() argument
801 return __sched_setscheduler(p, attr, false, true); in sched_setattr_nocheck()
818 int sched_setscheduler_nocheck(struct task_struct *p, int policy, in sched_setscheduler_nocheck() argument
821 return _sched_setscheduler(p, policy, param, false); in sched_setscheduler_nocheck()
842 void sched_set_fifo(struct task_struct *p) in sched_set_fifo() argument
845 WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0); in sched_set_fifo()
852 void sched_set_fifo_low(struct task_struct *p) in sched_set_fifo_low() argument
855 WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0); in sched_set_fifo_low()
859 void sched_set_normal(struct task_struct *p, int nice) in sched_set_normal() argument
865 WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0); in sched_set_normal()
879 CLASS(find_get_task, p)(pid); in do_sched_setscheduler()
880 if (!p) in do_sched_setscheduler()
883 return sched_setscheduler(p, policy, &lparam); in do_sched_setscheduler()
931 static void get_params(struct task_struct *p, struct sched_attr *attr) in get_params() argument
933 if (task_has_dl_policy(p)) { in get_params()
934 __getparam_dl(p, attr); in get_params()
935 } else if (task_has_rt_policy(p)) { in get_params()
936 attr->sched_priority = p->rt_priority; in get_params()
938 attr->sched_nice = task_nice(p); in get_params()
939 attr->sched_runtime = p->se.slice; in get_params()
995 CLASS(find_get_task, p)(pid); in SYSCALL_DEFINE3()
996 if (!p) in SYSCALL_DEFINE3()
1000 get_params(p, &attr); in SYSCALL_DEFINE3()
1002 return sched_setattr(p, &attr); in SYSCALL_DEFINE3()
1014 struct task_struct *p; in SYSCALL_DEFINE1() local
1021 p = find_process_by_pid(pid); in SYSCALL_DEFINE1()
1022 if (!p) in SYSCALL_DEFINE1()
1025 retval = security_task_getscheduler(p); in SYSCALL_DEFINE1()
1027 retval = p->policy; in SYSCALL_DEFINE1()
1028 if (p->sched_reset_on_fork) in SYSCALL_DEFINE1()
1045 struct task_struct *p; in SYSCALL_DEFINE2() local
1052 p = find_process_by_pid(pid); in SYSCALL_DEFINE2()
1053 if (!p) in SYSCALL_DEFINE2()
1056 retval = security_task_getscheduler(p); in SYSCALL_DEFINE2()
1060 if (task_has_rt_policy(p)) in SYSCALL_DEFINE2()
1061 lp.sched_priority = p->rt_priority; in SYSCALL_DEFINE2()
1081 struct task_struct *p; in SYSCALL_DEFINE4() local
1089 p = find_process_by_pid(pid); in SYSCALL_DEFINE4()
1090 if (!p) in SYSCALL_DEFINE4()
1093 retval = security_task_getscheduler(p); in SYSCALL_DEFINE4()
1097 kattr.sched_policy = p->policy; in SYSCALL_DEFINE4()
1098 if (p->sched_reset_on_fork) in SYSCALL_DEFINE4()
1100 get_params(p, &kattr); in SYSCALL_DEFINE4()
1109 kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value; in SYSCALL_DEFINE4()
1110 kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value; in SYSCALL_DEFINE4()
1118 int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask) in dl_task_check_affinity() argument
1124 if (!task_has_dl_policy(p) || !dl_bandwidth_enabled()) in dl_task_check_affinity()
1131 if (dl_entity_is_special(&p->dl)) in dl_task_check_affinity()
1141 if (!cpumask_subset(task_rq(p)->rd->span, mask)) in dl_task_check_affinity()
1147 int __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx) in __sched_setaffinity() argument
1160 cpuset_cpus_allowed(p, cpus_allowed); in __sched_setaffinity()
1166 retval = dl_task_check_affinity(p, new_mask); in __sched_setaffinity()
1170 retval = __set_cpus_allowed_ptr(p, ctx); in __sched_setaffinity()
1174 cpuset_cpus_allowed(p, cpus_allowed); in __sched_setaffinity()
1197 __set_cpus_allowed_ptr(p, ctx); in __sched_setaffinity()
1214 CLASS(find_get_task, p)(pid); in sched_setaffinity()
1215 if (!p) in sched_setaffinity()
1218 if (p->flags & PF_NO_SETAFFINITY) in sched_setaffinity()
1221 if (!check_same_owner(p)) { in sched_setaffinity()
1223 if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) in sched_setaffinity()
1227 retval = security_task_setscheduler(p); in sched_setaffinity()
1248 retval = __sched_setaffinity(p, &ac); in sched_setaffinity()
1291 struct task_struct *p; in sched_getaffinity() local
1295 p = find_process_by_pid(pid); in sched_getaffinity()
1296 if (!p) in sched_getaffinity()
1299 retval = security_task_getscheduler(p); in sched_getaffinity()
1303 guard(raw_spinlock_irqsave)(&p->pi_lock); in sched_getaffinity()
1304 cpumask_and(mask, &p->cpus_mask, cpu_active_mask); in sched_getaffinity()
1421 int __sched yield_to(struct task_struct *p, bool preempt) in yield_to() argument
1427 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) { in yield_to()
1431 p_rq = task_rq(p); in yield_to()
1440 if (task_rq(p) != p_rq) in yield_to()
1446 if (curr->sched_class != p->sched_class) in yield_to()
1449 if (task_on_cpu(p_rq, p) || !task_is_running(p)) in yield_to()
1452 yielded = curr->sched_class->yield_to_task(rq, p); in yield_to()
1535 struct task_struct *p = find_process_by_pid(pid); in sched_rr_get_interval() local
1536 if (!p) in sched_rr_get_interval()
1539 retval = security_task_getscheduler(p); in sched_rr_get_interval()
1543 scoped_guard (task_rq_lock, p) { in sched_rr_get_interval()
1545 if (p->sched_class->get_rr_interval) in sched_rr_get_interval()
1546 time_slice = p->sched_class->get_rr_interval(rq, p); in sched_rr_get_interval()