Lines Matching refs:tg

289 	if (cfs_rq && task_group_is_autogroup(cfs_rq->tg))  in cfs_rq_tg_path()
290 autogroup_path(cfs_rq->tg, path, len); in cfs_rq_tg_path()
291 else if (cfs_rq && cfs_rq->tg->css.cgroup) in cfs_rq_tg_path()
292 cgroup_path(cfs_rq->tg->css.cgroup, path, len); in cfs_rq_tg_path()
316 if (cfs_rq->tg->parent && in list_add_leaf_cfs_rq()
317 cfs_rq->tg->parent->cfs_rq[cpu]->on_list) { in list_add_leaf_cfs_rq()
325 &(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list)); in list_add_leaf_cfs_rq()
335 if (!cfs_rq->tg->parent) { in list_add_leaf_cfs_rq()
442 static int tg_is_idle(struct task_group *tg) in tg_is_idle() argument
444 return tg->idle > 0; in tg_is_idle()
496 static inline int tg_is_idle(struct task_group *tg) in tg_is_idle() argument
3159 struct task_group *tg = cfs_rq->tg; in calc_group_shares() local
3161 tg_shares = READ_ONCE(tg->shares); in calc_group_shares()
3165 tg_weight = atomic_long_read(&tg->load_avg); in calc_group_shares()
3209 shares = READ_ONCE(gcfs_rq->tg->shares); in update_cfs_group()
3274 return (prev_cfs_rq->tg->parent == cfs_rq->tg); in child_cfs_rq_on_list()
3327 if (cfs_rq->tg == &root_task_group) in update_tg_load_avg()
3331 atomic_long_add(delta, &cfs_rq->tg->load_avg); in update_tg_load_avg()
4654 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) in tg_cfs_bandwidth() argument
4656 return &tg->cfs_bandwidth; in tg_cfs_bandwidth()
4690 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); in assign_cfs_rq_runtime()
4743 static inline int throttled_lb_pair(struct task_group *tg, in throttled_lb_pair() argument
4748 src_cfs_rq = tg->cfs_rq[src_cpu]; in throttled_lb_pair()
4749 dest_cfs_rq = tg->cfs_rq[dest_cpu]; in throttled_lb_pair()
4755 static int tg_unthrottle_up(struct task_group *tg, void *data) in tg_unthrottle_up() argument
4758 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in tg_unthrottle_up()
4773 static int tg_throttle_down(struct task_group *tg, void *data) in tg_throttle_down() argument
4776 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in tg_throttle_down()
4791 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); in throttle_cfs_rq()
4816 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; in throttle_cfs_rq()
4820 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq); in throttle_cfs_rq()
4878 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); in unthrottle_cfs_rq()
4882 se = cfs_rq->tg->se[cpu_of(rq)]; in unthrottle_cfs_rq()
4894 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq); in unthrottle_cfs_rq()
5125 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); in __return_cfs_rq_runtime()
5210 static void sync_throttle(struct task_group *tg, int cpu) in sync_throttle() argument
5217 if (!tg->parent) in sync_throttle()
5220 cfs_rq = tg->cfs_rq[cpu]; in sync_throttle()
5221 pcfs_rq = tg->parent->cfs_rq[cpu]; in sync_throttle()
5367 struct task_group *tg; in update_runtime_enabled() local
5372 list_for_each_entry_rcu(tg, &task_groups, list) { in update_runtime_enabled()
5373 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; in update_runtime_enabled()
5374 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in update_runtime_enabled()
5386 struct task_group *tg; in unthrottle_offline_cfs_rqs() local
5391 list_for_each_entry_rcu(tg, &task_groups, list) { in unthrottle_offline_cfs_rqs()
5392 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in unthrottle_offline_cfs_rqs()
5424 static inline void sync_throttle(struct task_group *tg, int cpu) {} in sync_throttle() argument
5437 static inline int throttled_lb_pair(struct task_group *tg, in throttled_lb_pair() argument
5449 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) in tg_cfs_bandwidth() argument
8166 se = cfs_rq->tg->se[cpu]; in __update_blocked_fair()
8193 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)]; in update_cfs_rq_h_load()
11120 while (sea->cfs_rq->tg != seb->cfs_rq->tg) { in cfs_prio_less()
11455 void free_fair_sched_group(struct task_group *tg) in free_fair_sched_group() argument
11460 if (tg->cfs_rq) in free_fair_sched_group()
11461 kfree(tg->cfs_rq[i]); in free_fair_sched_group()
11462 if (tg->se) in free_fair_sched_group()
11463 kfree(tg->se[i]); in free_fair_sched_group()
11466 kfree(tg->cfs_rq); in free_fair_sched_group()
11467 kfree(tg->se); in free_fair_sched_group()
11470 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) in alloc_fair_sched_group() argument
11476 tg->cfs_rq = kcalloc(nr_cpu_ids, sizeof(cfs_rq), GFP_KERNEL); in alloc_fair_sched_group()
11477 if (!tg->cfs_rq) in alloc_fair_sched_group()
11479 tg->se = kcalloc(nr_cpu_ids, sizeof(se), GFP_KERNEL); in alloc_fair_sched_group()
11480 if (!tg->se) in alloc_fair_sched_group()
11483 tg->shares = NICE_0_LOAD; in alloc_fair_sched_group()
11485 init_cfs_bandwidth(tg_cfs_bandwidth(tg)); in alloc_fair_sched_group()
11499 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); in alloc_fair_sched_group()
11511 void online_fair_sched_group(struct task_group *tg) in online_fair_sched_group() argument
11520 se = tg->se[i]; in online_fair_sched_group()
11524 sync_throttle(tg, i); in online_fair_sched_group()
11529 void unregister_fair_sched_group(struct task_group *tg) in unregister_fair_sched_group() argument
11535 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg)); in unregister_fair_sched_group()
11538 if (tg->se[cpu]) in unregister_fair_sched_group()
11539 remove_entity_load_avg(tg->se[cpu]); in unregister_fair_sched_group()
11545 if (!tg->cfs_rq[cpu]->on_list) in unregister_fair_sched_group()
11551 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]); in unregister_fair_sched_group()
11556 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, in init_tg_cfs_entry() argument
11562 cfs_rq->tg = tg; in init_tg_cfs_entry()
11566 tg->cfs_rq[cpu] = cfs_rq; in init_tg_cfs_entry()
11567 tg->se[cpu] = se; in init_tg_cfs_entry()
11589 static int __sched_group_set_shares(struct task_group *tg, unsigned long shares) in __sched_group_set_shares() argument
11598 if (!tg->se[0]) in __sched_group_set_shares()
11603 if (tg->shares == shares) in __sched_group_set_shares()
11606 tg->shares = shares; in __sched_group_set_shares()
11609 struct sched_entity *se = tg->se[i]; in __sched_group_set_shares()
11625 int sched_group_set_shares(struct task_group *tg, unsigned long shares) in sched_group_set_shares() argument
11630 if (tg_is_idle(tg)) in sched_group_set_shares()
11633 ret = __sched_group_set_shares(tg, shares); in sched_group_set_shares()
11639 int sched_group_set_idle(struct task_group *tg, long idle) in sched_group_set_idle() argument
11643 if (tg == &root_task_group) in sched_group_set_idle()
11651 if (tg->idle == idle) { in sched_group_set_idle()
11656 tg->idle = idle; in sched_group_set_idle()
11660 struct sched_entity *se = tg->se[i]; in sched_group_set_idle()
11661 struct cfs_rq *parent_cfs_rq, *grp_cfs_rq = tg->cfs_rq[i]; in sched_group_set_idle()
11703 if (tg_is_idle(tg)) in sched_group_set_idle()
11704 __sched_group_set_shares(tg, scale_load(WEIGHT_IDLEPRIO)); in sched_group_set_idle()
11706 __sched_group_set_shares(tg, NICE_0_LOAD); in sched_group_set_idle()
11714 void free_fair_sched_group(struct task_group *tg) { } in free_fair_sched_group() argument
11716 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) in alloc_fair_sched_group() argument
11721 void online_fair_sched_group(struct task_group *tg) { } in online_fair_sched_group() argument
11723 void unregister_fair_sched_group(struct task_group *tg) { } in unregister_fair_sched_group() argument