Lines Matching refs:tg

195 void unregister_rt_sched_group(struct task_group *tg)  in unregister_rt_sched_group()  argument
197 if (tg->rt_se) in unregister_rt_sched_group()
198 destroy_rt_bandwidth(&tg->rt_bandwidth); in unregister_rt_sched_group()
201 void free_rt_sched_group(struct task_group *tg) in free_rt_sched_group() argument
206 if (tg->rt_rq) in free_rt_sched_group()
207 kfree(tg->rt_rq[i]); in free_rt_sched_group()
208 if (tg->rt_se) in free_rt_sched_group()
209 kfree(tg->rt_se[i]); in free_rt_sched_group()
212 kfree(tg->rt_rq); in free_rt_sched_group()
213 kfree(tg->rt_se); in free_rt_sched_group()
216 void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, in init_tg_rt_entry() argument
225 rt_rq->tg = tg; in init_tg_rt_entry()
227 tg->rt_rq[cpu] = rt_rq; in init_tg_rt_entry()
228 tg->rt_se[cpu] = rt_se; in init_tg_rt_entry()
243 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) in alloc_rt_sched_group() argument
249 tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL); in alloc_rt_sched_group()
250 if (!tg->rt_rq) in alloc_rt_sched_group()
252 tg->rt_se = kcalloc(nr_cpu_ids, sizeof(rt_se), GFP_KERNEL); in alloc_rt_sched_group()
253 if (!tg->rt_se) in alloc_rt_sched_group()
256 init_rt_bandwidth(&tg->rt_bandwidth, ktime_to_ns(global_rt_period()), 0); in alloc_rt_sched_group()
270 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime; in alloc_rt_sched_group()
271 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]); in alloc_rt_sched_group()
310 void unregister_rt_sched_group(struct task_group *tg) { } in unregister_rt_sched_group() argument
312 void free_rt_sched_group(struct task_group *tg) { } in free_rt_sched_group() argument
314 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) in alloc_rt_sched_group() argument
487 if (!rt_rq->tg) in sched_rt_runtime()
495 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period); in sched_rt_period()
500 static inline struct task_group *next_task_group(struct task_group *tg) in next_task_group() argument
503 tg = list_entry_rcu(tg->list.next, in next_task_group()
505 } while (&tg->list != &task_groups && task_group_is_autogroup(tg)); in next_task_group()
507 if (&tg->list == &task_groups) in next_task_group()
508 tg = NULL; in next_task_group()
510 return tg; in next_task_group()
537 rt_se = rt_rq->tg->rt_se[cpu]; in sched_rt_rq_enqueue()
555 rt_se = rt_rq->tg->rt_se[cpu]; in sched_rt_rq_dequeue()
603 return &rt_rq->tg->rt_bandwidth; in sched_rt_bandwidth()
1161 if (rt_rq->tg) in inc_rt_group()
1162 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth); in inc_rt_group()
2677 static inline int tg_has_rt_tasks(struct task_group *tg) in tg_has_rt_tasks() argument
2686 if (task_group_is_autogroup(tg)) in tg_has_rt_tasks()
2689 css_task_iter_start(&tg->css, 0, &it); in tg_has_rt_tasks()
2698 struct task_group *tg; member
2703 static int tg_rt_schedulable(struct task_group *tg, void *data) in tg_rt_schedulable() argument
2710 period = ktime_to_ns(tg->rt_bandwidth.rt_period); in tg_rt_schedulable()
2711 runtime = tg->rt_bandwidth.rt_runtime; in tg_rt_schedulable()
2713 if (tg == d->tg) { in tg_rt_schedulable()
2728 tg->rt_bandwidth.rt_runtime && tg_has_rt_tasks(tg)) in tg_rt_schedulable()
2742 list_for_each_entry_rcu(child, &tg->children, siblings) { in tg_rt_schedulable()
2746 if (child == d->tg) { in tg_rt_schedulable()
2760 static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) in __rt_schedulable() argument
2765 .tg = tg, in __rt_schedulable()
2777 static int tg_set_rt_bandwidth(struct task_group *tg, in tg_set_rt_bandwidth() argument
2786 if (tg == &root_task_group && rt_runtime == 0) in tg_set_rt_bandwidth()
2800 err = __rt_schedulable(tg, rt_period, rt_runtime); in tg_set_rt_bandwidth()
2804 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); in tg_set_rt_bandwidth()
2805 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); in tg_set_rt_bandwidth()
2806 tg->rt_bandwidth.rt_runtime = rt_runtime; in tg_set_rt_bandwidth()
2809 struct rt_rq *rt_rq = tg->rt_rq[i]; in tg_set_rt_bandwidth()
2815 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); in tg_set_rt_bandwidth()
2822 int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us) in sched_group_set_rt_runtime() argument
2826 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period); in sched_group_set_rt_runtime()
2833 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); in sched_group_set_rt_runtime()
2836 long sched_group_rt_runtime(struct task_group *tg) in sched_group_rt_runtime() argument
2840 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF) in sched_group_rt_runtime()
2843 rt_runtime_us = tg->rt_bandwidth.rt_runtime; in sched_group_rt_runtime()
2848 int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us) in sched_group_set_rt_period() argument
2856 rt_runtime = tg->rt_bandwidth.rt_runtime; in sched_group_set_rt_period()
2858 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); in sched_group_set_rt_period()
2861 long sched_group_rt_period(struct task_group *tg) in sched_group_rt_period() argument
2865 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period); in sched_group_rt_period()
2883 int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk) in sched_rt_can_attach() argument
2886 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0) in sched_rt_can_attach()