Lines Matching refs:dl_bw

101 static inline struct dl_bw *dl_bw_of(int i)  in dl_bw_of()
105 return &cpu_rq(i)->rd->dl_bw; in dl_bw_of()
167 void __dl_update(struct dl_bw *dl_b, s64 bw) in __dl_update()
169 struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw); in __dl_update()
181 static inline struct dl_bw *dl_bw_of(int i) in dl_bw_of()
183 return &cpu_rq(i)->dl.dl_bw; in dl_bw_of()
202 void __dl_update(struct dl_bw *dl_b, s64 bw) in __dl_update()
204 struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw); in __dl_update()
211 void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus) in __dl_sub()
218 void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus) in __dl_add()
225 __dl_overflow(struct dl_bw *dl_b, unsigned long cap, u64 old_bw, u64 new_bw) in __dl_overflow()
232 void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq) in __add_running_bw() argument
237 dl_rq->running_bw += dl_bw; in __add_running_bw()
245 void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq) in __sub_running_bw() argument
250 dl_rq->running_bw -= dl_bw; in __sub_running_bw()
259 void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq) in __add_rq_bw() argument
264 dl_rq->this_bw += dl_bw; in __add_rq_bw()
269 void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq) in __sub_rq_bw() argument
274 dl_rq->this_bw -= dl_bw; in __sub_rq_bw()
285 __add_rq_bw(dl_se->dl_bw, dl_rq); in add_rq_bw()
292 __sub_rq_bw(dl_se->dl_bw, dl_rq); in sub_rq_bw()
299 __add_running_bw(dl_se->dl_bw, dl_rq); in add_running_bw()
306 __sub_running_bw(dl_se->dl_bw, dl_rq); in sub_running_bw()
332 __sub_rq_bw(p->dl.dl_bw, &rq->dl); in dl_change_utilization()
428 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); in task_non_contending()
433 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); in task_non_contending()
499 void init_dl_bw(struct dl_bw *dl_b) in init_dl_bw()
521 init_dl_bw(&dl_rq->dl_bw); in init_dl_rq()
671 struct dl_bw *dl_b; in dl_task_offline_migration()
722 dl_b = &rq->rd->dl_bw; in dl_task_offline_migration()
724 __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span)); in dl_task_offline_migration()
727 dl_b = &later_rq->rd->dl_bw; in dl_task_offline_migration()
729 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span)); in dl_task_offline_migration()
1285 u64 u_act_min = (dl_se->dl_bw * rq->dl.bw_ratio) >> RATIO_SHIFT; in grub_reclaim()
1423 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); in inactive_task_timer()
1432 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); in inactive_task_timer()
2504 struct dl_bw *src_dl_b; in set_cpus_allowed_dl()
2513 __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); in set_cpus_allowed_dl()
2554 struct dl_bw *dl_b; in dl_add_task_root_domain()
2564 dl_b = &rq->rd->dl_bw; in dl_add_task_root_domain()
2567 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span)); in dl_add_task_root_domain()
2578 raw_spin_lock_irqsave(&rd->dl_bw.lock, flags); in dl_clear_root_domain()
2579 rd->dl_bw.total_bw = 0; in dl_clear_root_domain()
2580 raw_spin_unlock_irqrestore(&rd->dl_bw.lock, flags); in dl_clear_root_domain()
2750 struct dl_bw *dl_b; in sched_dl_global_validate()
2800 struct dl_bw *dl_b; in sched_dl_do_global()
2841 struct dl_bw *dl_b = dl_bw_of(cpu); in sched_dl_overflow()
2848 if (new_bw == p->dl.dl_bw && task_has_dl_policy(p)) in sched_dl_overflow()
2863 __dl_sub(dl_b, p->dl.dl_bw, cpus); in sched_dl_overflow()
2867 !__dl_overflow(dl_b, cap, p->dl.dl_bw, new_bw)) { in sched_dl_overflow()
2875 __dl_sub(dl_b, p->dl.dl_bw, cpus); in sched_dl_overflow()
2908 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime); in __setparam_dl()
2990 dl_se->dl_bw = 0; in __dl_clear_params()
3021 struct dl_bw *cur_dl_b; in dl_cpuset_cpumask_can_shrink()
3039 struct dl_bw *dl_b; in dl_cpu_busy()
3046 overflow = __dl_overflow(dl_b, cap, 0, p ? p->dl.dl_bw : 0); in dl_cpu_busy()
3055 __dl_add(dl_b, p->dl.dl_bw, dl_bw_cpus(cpu)); in dl_cpu_busy()