Lines Matching refs:tg
1284 int tg_nop(struct task_group *tg, void *data) in tg_nop() argument
1774 struct task_group *tg = &root_task_group; in uclamp_update_root_tg() local
1776 uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN], in uclamp_update_root_tg()
1778 uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX], in uclamp_update_root_tg()
4823 struct task_group *tg; in sched_cgroup_fork() local
4824 tg = container_of(kargs->cset->subsys[cpu_cgrp_id], in sched_cgroup_fork()
4826 tg = autogroup_task_group(p, tg); in sched_cgroup_fork()
4827 p->sched_task_group = tg; in sched_cgroup_fork()
10324 static inline void alloc_uclamp_sched_group(struct task_group *tg, in alloc_uclamp_sched_group() argument
10331 uclamp_se_set(&tg->uclamp_req[clamp_id], in alloc_uclamp_sched_group()
10333 tg->uclamp[clamp_id] = parent->uclamp[clamp_id]; in alloc_uclamp_sched_group()
10338 static void sched_free_group(struct task_group *tg) in sched_free_group() argument
10340 free_fair_sched_group(tg); in sched_free_group()
10341 free_rt_sched_group(tg); in sched_free_group()
10342 autogroup_free(tg); in sched_free_group()
10343 kmem_cache_free(task_group_cache, tg); in sched_free_group()
10351 static void sched_unregister_group(struct task_group *tg) in sched_unregister_group() argument
10353 unregister_fair_sched_group(tg); in sched_unregister_group()
10354 unregister_rt_sched_group(tg); in sched_unregister_group()
10359 call_rcu(&tg->rcu, sched_free_group_rcu); in sched_unregister_group()
10365 struct task_group *tg; in sched_create_group() local
10367 tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO); in sched_create_group()
10368 if (!tg) in sched_create_group()
10371 if (!alloc_fair_sched_group(tg, parent)) in sched_create_group()
10374 if (!alloc_rt_sched_group(tg, parent)) in sched_create_group()
10377 alloc_uclamp_sched_group(tg, parent); in sched_create_group()
10379 return tg; in sched_create_group()
10382 sched_free_group(tg); in sched_create_group()
10386 void sched_online_group(struct task_group *tg, struct task_group *parent) in sched_online_group() argument
10391 list_add_rcu(&tg->list, &task_groups); in sched_online_group()
10396 tg->parent = parent; in sched_online_group()
10397 INIT_LIST_HEAD(&tg->children); in sched_online_group()
10398 list_add_rcu(&tg->siblings, &parent->children); in sched_online_group()
10401 online_fair_sched_group(tg); in sched_online_group()
10411 void sched_destroy_group(struct task_group *tg) in sched_destroy_group() argument
10414 call_rcu(&tg->rcu, sched_unregister_group_rcu); in sched_destroy_group()
10417 void sched_release_group(struct task_group *tg) in sched_release_group() argument
10435 list_del_rcu(&tg->list); in sched_release_group()
10436 list_del_rcu(&tg->siblings); in sched_release_group()
10442 struct task_group *tg; in sched_change_group() local
10449 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true), in sched_change_group()
10451 tg = autogroup_task_group(tsk, tg); in sched_change_group()
10452 tsk->sched_task_group = tg; in sched_change_group()
10513 struct task_group *tg; in cpu_cgroup_css_alloc() local
10520 tg = sched_create_group(parent); in cpu_cgroup_css_alloc()
10521 if (IS_ERR(tg)) in cpu_cgroup_css_alloc()
10524 return &tg->css; in cpu_cgroup_css_alloc()
10530 struct task_group *tg = css_tg(css); in cpu_cgroup_css_online() local
10534 sched_online_group(tg, parent); in cpu_cgroup_css_online()
10551 struct task_group *tg = css_tg(css); in cpu_cgroup_css_released() local
10553 sched_release_group(tg); in cpu_cgroup_css_released()
10558 struct task_group *tg = css_tg(css); in cpu_cgroup_css_free() local
10563 sched_unregister_group(tg); in cpu_cgroup_css_free()
10688 struct task_group *tg; in cpu_uclamp_write() local
10699 tg = css_tg(of_css(of)); in cpu_uclamp_write()
10700 if (tg->uclamp_req[clamp_id].value != req.util) in cpu_uclamp_write()
10701 uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false); in cpu_uclamp_write()
10707 tg->uclamp_pct[clamp_id] = req.percent; in cpu_uclamp_write()
10735 struct task_group *tg; in cpu_uclamp_print() local
10741 tg = css_tg(seq_css(sf)); in cpu_uclamp_print()
10742 util_clamp = tg->uclamp_req[clamp_id].value; in cpu_uclamp_print()
10750 percent = tg->uclamp_pct[clamp_id]; in cpu_uclamp_print()
10770 struct task_group *tg; in cpu_uclamp_ls_write_u64() local
10774 tg = css_tg(css); in cpu_uclamp_ls_write_u64()
10775 tg->latency_sensitive = (unsigned int) ls; in cpu_uclamp_ls_write_u64()
10783 struct task_group *tg = css_tg(css); in cpu_uclamp_ls_read_u64() local
10785 return (u64) tg->latency_sensitive; in cpu_uclamp_ls_read_u64()
10801 struct task_group *tg = css_tg(css); in cpu_shares_read_u64() local
10803 return (u64) scale_load_down(tg->shares); in cpu_shares_read_u64()
10814 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
10816 static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota, in tg_set_cfs_bandwidth() argument
10820 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; in tg_set_cfs_bandwidth()
10822 if (tg == &root_task_group) in tg_set_cfs_bandwidth()
10857 ret = __cfs_schedulable(tg, period, quota); in tg_set_cfs_bandwidth()
10883 struct cfs_rq *cfs_rq = tg->cfs_rq[i]; in tg_set_cfs_bandwidth()
10904 static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) in tg_set_cfs_quota() argument
10908 period = ktime_to_ns(tg->cfs_bandwidth.period); in tg_set_cfs_quota()
10909 burst = tg->cfs_bandwidth.burst; in tg_set_cfs_quota()
10917 return tg_set_cfs_bandwidth(tg, period, quota, burst); in tg_set_cfs_quota()
10920 static long tg_get_cfs_quota(struct task_group *tg) in tg_get_cfs_quota() argument
10924 if (tg->cfs_bandwidth.quota == RUNTIME_INF) in tg_get_cfs_quota()
10927 quota_us = tg->cfs_bandwidth.quota; in tg_get_cfs_quota()
10933 static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) in tg_set_cfs_period() argument
10941 quota = tg->cfs_bandwidth.quota; in tg_set_cfs_period()
10942 burst = tg->cfs_bandwidth.burst; in tg_set_cfs_period()
10944 return tg_set_cfs_bandwidth(tg, period, quota, burst); in tg_set_cfs_period()
10947 static long tg_get_cfs_period(struct task_group *tg) in tg_get_cfs_period() argument
10951 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period); in tg_get_cfs_period()
10957 static int tg_set_cfs_burst(struct task_group *tg, long cfs_burst_us) in tg_set_cfs_burst() argument
10965 period = ktime_to_ns(tg->cfs_bandwidth.period); in tg_set_cfs_burst()
10966 quota = tg->cfs_bandwidth.quota; in tg_set_cfs_burst()
10968 return tg_set_cfs_bandwidth(tg, period, quota, burst); in tg_set_cfs_burst()
10971 static long tg_get_cfs_burst(struct task_group *tg) in tg_get_cfs_burst() argument
10975 burst_us = tg->cfs_bandwidth.burst; in tg_get_cfs_burst()
11018 struct task_group *tg; member
11026 static u64 normalize_cfs_quota(struct task_group *tg, in normalize_cfs_quota() argument
11031 if (tg == d->tg) { in normalize_cfs_quota()
11035 period = tg_get_cfs_period(tg); in normalize_cfs_quota()
11036 quota = tg_get_cfs_quota(tg); in normalize_cfs_quota()
11046 static int tg_cfs_schedulable_down(struct task_group *tg, void *data) in tg_cfs_schedulable_down() argument
11049 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; in tg_cfs_schedulable_down()
11052 if (!tg->parent) { in tg_cfs_schedulable_down()
11055 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth; in tg_cfs_schedulable_down()
11057 quota = normalize_cfs_quota(tg, d); in tg_cfs_schedulable_down()
11079 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota) in __cfs_schedulable() argument
11083 .tg = tg, in __cfs_schedulable()
11102 struct task_group *tg = css_tg(seq_css(sf)); in cpu_cfs_stat_show() local
11103 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; in cpu_cfs_stat_show()
11109 if (schedstat_enabled() && tg != &root_task_group) { in cpu_cfs_stat_show()
11115 stats = __schedstats_from_se(tg->se[i]); in cpu_cfs_stat_show()
11244 struct task_group *tg = css_tg(css); in cpu_extra_stat_show() local
11245 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; in cpu_extra_stat_show()
11269 struct task_group *tg = css_tg(css); in cpu_weight_read_u64() local
11270 u64 weight = scale_load_down(tg->shares); in cpu_weight_read_u64()
11363 struct task_group *tg = css_tg(seq_css(sf)); in cpu_max_show() local
11365 cpu_period_quota_print(sf, tg_get_cfs_period(tg), tg_get_cfs_quota(tg)); in cpu_max_show()
11372 struct task_group *tg = css_tg(of_css(of)); in cpu_max_write() local
11373 u64 period = tg_get_cfs_period(tg); in cpu_max_write()
11374 u64 burst = tg_get_cfs_burst(tg); in cpu_max_write()
11380 ret = tg_set_cfs_bandwidth(tg, period, quota, burst); in cpu_max_write()