Lines Matching refs:tg
1362 int tg_nop(struct task_group *tg, void *data) in tg_nop() argument
1816 struct task_group *tg = &root_task_group; in uclamp_update_root_tg() local
1818 uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN], in uclamp_update_root_tg()
1820 uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX], in uclamp_update_root_tg()
4750 struct task_group *tg; in sched_cgroup_fork() local
4751 tg = container_of(kargs->cset->subsys[cpu_cgrp_id], in sched_cgroup_fork()
4753 tg = autogroup_task_group(p, tg); in sched_cgroup_fork()
4754 p->sched_task_group = tg; in sched_cgroup_fork()
8791 static inline void alloc_uclamp_sched_group(struct task_group *tg, in alloc_uclamp_sched_group() argument
8798 uclamp_se_set(&tg->uclamp_req[clamp_id], in alloc_uclamp_sched_group()
8800 tg->uclamp[clamp_id] = parent->uclamp[clamp_id]; in alloc_uclamp_sched_group()
8805 static void sched_free_group(struct task_group *tg) in sched_free_group() argument
8807 free_fair_sched_group(tg); in sched_free_group()
8808 free_rt_sched_group(tg); in sched_free_group()
8809 autogroup_free(tg); in sched_free_group()
8810 kmem_cache_free(task_group_cache, tg); in sched_free_group()
8818 static void sched_unregister_group(struct task_group *tg) in sched_unregister_group() argument
8820 unregister_fair_sched_group(tg); in sched_unregister_group()
8821 unregister_rt_sched_group(tg); in sched_unregister_group()
8826 call_rcu(&tg->rcu, sched_free_group_rcu); in sched_unregister_group()
8832 struct task_group *tg; in sched_create_group() local
8834 tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO); in sched_create_group()
8835 if (!tg) in sched_create_group()
8838 if (!alloc_fair_sched_group(tg, parent)) in sched_create_group()
8841 if (!alloc_rt_sched_group(tg, parent)) in sched_create_group()
8844 scx_group_set_weight(tg, CGROUP_WEIGHT_DFL); in sched_create_group()
8845 alloc_uclamp_sched_group(tg, parent); in sched_create_group()
8847 return tg; in sched_create_group()
8850 sched_free_group(tg); in sched_create_group()
8854 void sched_online_group(struct task_group *tg, struct task_group *parent) in sched_online_group() argument
8859 list_add_rcu(&tg->list, &task_groups); in sched_online_group()
8864 tg->parent = parent; in sched_online_group()
8865 INIT_LIST_HEAD(&tg->children); in sched_online_group()
8866 list_add_rcu(&tg->siblings, &parent->children); in sched_online_group()
8869 online_fair_sched_group(tg); in sched_online_group()
8879 void sched_destroy_group(struct task_group *tg) in sched_destroy_group() argument
8882 call_rcu(&tg->rcu, sched_unregister_group_rcu); in sched_destroy_group()
8885 void sched_release_group(struct task_group *tg) in sched_release_group() argument
8903 list_del_rcu(&tg->list); in sched_release_group()
8904 list_del_rcu(&tg->siblings); in sched_release_group()
8910 struct task_group *tg; in sched_get_task_group() local
8917 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true), in sched_get_task_group()
8919 tg = autogroup_task_group(tsk, tg); in sched_get_task_group()
8921 return tg; in sched_get_task_group()
8991 struct task_group *tg; in cpu_cgroup_css_alloc() local
8998 tg = sched_create_group(parent); in cpu_cgroup_css_alloc()
8999 if (IS_ERR(tg)) in cpu_cgroup_css_alloc()
9002 return &tg->css; in cpu_cgroup_css_alloc()
9008 struct task_group *tg = css_tg(css); in cpu_cgroup_css_online() local
9012 ret = scx_tg_online(tg); in cpu_cgroup_css_online()
9017 sched_online_group(tg, parent); in cpu_cgroup_css_online()
9031 struct task_group *tg = css_tg(css); in cpu_cgroup_css_offline() local
9033 scx_tg_offline(tg); in cpu_cgroup_css_offline()
9038 struct task_group *tg = css_tg(css); in cpu_cgroup_css_released() local
9040 sched_release_group(tg); in cpu_cgroup_css_released()
9045 struct task_group *tg = css_tg(css); in cpu_cgroup_css_free() local
9050 sched_unregister_group(tg); in cpu_cgroup_css_free()
9180 struct task_group *tg; in cpu_uclamp_write() local
9191 tg = css_tg(of_css(of)); in cpu_uclamp_write()
9192 if (tg->uclamp_req[clamp_id].value != req.util) in cpu_uclamp_write()
9193 uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false); in cpu_uclamp_write()
9199 tg->uclamp_pct[clamp_id] = req.percent; in cpu_uclamp_write()
9224 struct task_group *tg; in cpu_uclamp_print() local
9230 tg = css_tg(seq_css(sf)); in cpu_uclamp_print()
9231 util_clamp = tg->uclamp_req[clamp_id].value; in cpu_uclamp_print()
9239 percent = tg->uclamp_pct[clamp_id]; in cpu_uclamp_print()
9258 static unsigned long tg_weight(struct task_group *tg) in tg_weight() argument
9261 return scale_load_down(tg->shares); in tg_weight()
9263 return sched_weight_from_cgroup(tg->scx_weight); in tg_weight()
9296 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
9298 static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota, in tg_set_cfs_bandwidth() argument
9302 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; in tg_set_cfs_bandwidth()
9304 if (tg == &root_task_group) in tg_set_cfs_bandwidth()
9340 ret = __cfs_schedulable(tg, period, quota); in tg_set_cfs_bandwidth()
9369 struct cfs_rq *cfs_rq = tg->cfs_rq[i]; in tg_set_cfs_bandwidth()
9386 static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) in tg_set_cfs_quota() argument
9390 period = ktime_to_ns(tg->cfs_bandwidth.period); in tg_set_cfs_quota()
9391 burst = tg->cfs_bandwidth.burst; in tg_set_cfs_quota()
9399 return tg_set_cfs_bandwidth(tg, period, quota, burst); in tg_set_cfs_quota()
9402 static long tg_get_cfs_quota(struct task_group *tg) in tg_get_cfs_quota() argument
9406 if (tg->cfs_bandwidth.quota == RUNTIME_INF) in tg_get_cfs_quota()
9409 quota_us = tg->cfs_bandwidth.quota; in tg_get_cfs_quota()
9415 static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) in tg_set_cfs_period() argument
9423 quota = tg->cfs_bandwidth.quota; in tg_set_cfs_period()
9424 burst = tg->cfs_bandwidth.burst; in tg_set_cfs_period()
9426 return tg_set_cfs_bandwidth(tg, period, quota, burst); in tg_set_cfs_period()
9429 static long tg_get_cfs_period(struct task_group *tg) in tg_get_cfs_period() argument
9433 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period); in tg_get_cfs_period()
9439 static int tg_set_cfs_burst(struct task_group *tg, long cfs_burst_us) in tg_set_cfs_burst() argument
9447 period = ktime_to_ns(tg->cfs_bandwidth.period); in tg_set_cfs_burst()
9448 quota = tg->cfs_bandwidth.quota; in tg_set_cfs_burst()
9450 return tg_set_cfs_bandwidth(tg, period, quota, burst); in tg_set_cfs_burst()
9453 static long tg_get_cfs_burst(struct task_group *tg) in tg_get_cfs_burst() argument
9457 burst_us = tg->cfs_bandwidth.burst; in tg_get_cfs_burst()
9500 struct task_group *tg; member
9508 static u64 normalize_cfs_quota(struct task_group *tg, in normalize_cfs_quota() argument
9513 if (tg == d->tg) { in normalize_cfs_quota()
9517 period = tg_get_cfs_period(tg); in normalize_cfs_quota()
9518 quota = tg_get_cfs_quota(tg); in normalize_cfs_quota()
9528 static int tg_cfs_schedulable_down(struct task_group *tg, void *data) in tg_cfs_schedulable_down() argument
9531 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; in tg_cfs_schedulable_down()
9534 if (!tg->parent) { in tg_cfs_schedulable_down()
9537 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth; in tg_cfs_schedulable_down()
9539 quota = normalize_cfs_quota(tg, d); in tg_cfs_schedulable_down()
9566 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota) in __cfs_schedulable() argument
9569 .tg = tg, in __cfs_schedulable()
9585 struct task_group *tg = css_tg(seq_css(sf)); in cpu_cfs_stat_show() local
9586 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; in cpu_cfs_stat_show()
9592 if (schedstat_enabled() && tg != &root_task_group) { in cpu_cfs_stat_show()
9598 stats = __schedstats_from_se(tg->se[i]); in cpu_cfs_stat_show()
9611 static u64 throttled_time_self(struct task_group *tg) in throttled_time_self() argument
9617 total += READ_ONCE(tg->cfs_rq[i]->throttled_clock_self_time); in throttled_time_self()
9625 struct task_group *tg = css_tg(seq_css(sf)); in cpu_cfs_local_stat_show() local
9627 seq_printf(sf, "throttled_time %llu\n", throttled_time_self(tg)); in cpu_cfs_local_stat_show()
9750 struct task_group *tg = css_tg(css); in cpu_extra_stat_show() local
9751 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; in cpu_extra_stat_show()
9776 struct task_group *tg = css_tg(css); in cpu_local_stat_show() local
9779 throttled_self_usec = throttled_time_self(tg); in cpu_local_stat_show()
9888 struct task_group *tg = css_tg(seq_css(sf)); in cpu_max_show() local
9890 cpu_period_quota_print(sf, tg_get_cfs_period(tg), tg_get_cfs_quota(tg)); in cpu_max_show()
9897 struct task_group *tg = css_tg(of_css(of)); in cpu_max_write() local
9898 u64 period = tg_get_cfs_period(tg); in cpu_max_write()
9899 u64 burst = tg->cfs_bandwidth.burst; in cpu_max_write()
9905 ret = tg_set_cfs_bandwidth(tg, period, quota, burst); in cpu_max_write()