Lines Matching refs:tg
304 user->tg->uid = user->uid; in set_tg_uid()
363 struct task_group *tg; in task_group() local
367 tg = __task_cred(p)->user->tg; in task_group()
370 tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id), in task_group()
373 tg = &init_task_group; in task_group()
375 return tg; in task_group()
436 struct task_group *tg; /* group that "owns" this runqueue */ member
487 struct task_group *tg; member
1451 static int tg_nop(struct task_group *tg, void *data) in tg_nop() argument
1483 update_group_shares_cpu(struct task_group *tg, int cpu, in update_group_shares_cpu() argument
1489 if (!tg->se[cpu]) in update_group_shares_cpu()
1492 rq_weight = tg->cfs_rq[cpu]->rq_weight; in update_group_shares_cpu()
1503 if (abs(shares - tg->se[cpu]->load.weight) > in update_group_shares_cpu()
1509 tg->cfs_rq[cpu]->shares = shares; in update_group_shares_cpu()
1511 __set_se_shares(tg->se[cpu], shares); in update_group_shares_cpu()
1521 static int tg_shares_up(struct task_group *tg, void *data) in tg_shares_up() argument
1534 weight = tg->cfs_rq[i]->load.weight; in tg_shares_up()
1538 tg->cfs_rq[i]->rq_weight = weight; in tg_shares_up()
1540 shares += tg->cfs_rq[i]->shares; in tg_shares_up()
1543 if ((!shares && rq_weight) || shares > tg->shares) in tg_shares_up()
1544 shares = tg->shares; in tg_shares_up()
1547 shares = tg->shares; in tg_shares_up()
1550 update_group_shares_cpu(tg, i, shares, rq_weight); in tg_shares_up()
1560 static int tg_load_down(struct task_group *tg, void *data) in tg_load_down() argument
1565 if (!tg->parent) { in tg_load_down()
1568 load = tg->parent->cfs_rq[cpu]->h_load; in tg_load_down()
1569 load *= tg->cfs_rq[cpu]->shares; in tg_load_down()
1570 load /= tg->parent->cfs_rq[cpu]->load.weight + 1; in tg_load_down()
1573 tg->cfs_rq[cpu]->h_load = load; in tg_load_down()
8298 static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, in init_tg_cfs_entry() argument
8303 tg->cfs_rq[cpu] = cfs_rq; in init_tg_cfs_entry()
8305 cfs_rq->tg = tg; in init_tg_cfs_entry()
8309 tg->se[cpu] = se; in init_tg_cfs_entry()
8320 se->load.weight = tg->shares; in init_tg_cfs_entry()
8327 static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, in init_tg_rt_entry() argument
8333 tg->rt_rq[cpu] = rt_rq; in init_tg_rt_entry()
8335 rt_rq->tg = tg; in init_tg_rt_entry()
8337 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime; in init_tg_rt_entry()
8341 tg->rt_se[cpu] = rt_se; in init_tg_rt_entry()
8712 static void free_fair_sched_group(struct task_group *tg) in free_fair_sched_group() argument
8717 if (tg->cfs_rq) in free_fair_sched_group()
8718 kfree(tg->cfs_rq[i]); in free_fair_sched_group()
8719 if (tg->se) in free_fair_sched_group()
8720 kfree(tg->se[i]); in free_fair_sched_group()
8723 kfree(tg->cfs_rq); in free_fair_sched_group()
8724 kfree(tg->se); in free_fair_sched_group()
8728 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) in alloc_fair_sched_group() argument
8735 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL); in alloc_fair_sched_group()
8736 if (!tg->cfs_rq) in alloc_fair_sched_group()
8738 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL); in alloc_fair_sched_group()
8739 if (!tg->se) in alloc_fair_sched_group()
8742 tg->shares = NICE_0_LOAD; in alloc_fair_sched_group()
8757 init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent->se[i]); in alloc_fair_sched_group()
8766 static inline void register_fair_sched_group(struct task_group *tg, int cpu) in register_fair_sched_group() argument
8768 list_add_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list, in register_fair_sched_group()
8772 static inline void unregister_fair_sched_group(struct task_group *tg, int cpu) in unregister_fair_sched_group() argument
8774 list_del_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list); in unregister_fair_sched_group()
8777 static inline void free_fair_sched_group(struct task_group *tg) in free_fair_sched_group() argument
8782 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) in alloc_fair_sched_group() argument
8787 static inline void register_fair_sched_group(struct task_group *tg, int cpu) in register_fair_sched_group() argument
8791 static inline void unregister_fair_sched_group(struct task_group *tg, int cpu) in unregister_fair_sched_group() argument
8797 static void free_rt_sched_group(struct task_group *tg) in free_rt_sched_group() argument
8801 destroy_rt_bandwidth(&tg->rt_bandwidth); in free_rt_sched_group()
8804 if (tg->rt_rq) in free_rt_sched_group()
8805 kfree(tg->rt_rq[i]); in free_rt_sched_group()
8806 if (tg->rt_se) in free_rt_sched_group()
8807 kfree(tg->rt_se[i]); in free_rt_sched_group()
8810 kfree(tg->rt_rq); in free_rt_sched_group()
8811 kfree(tg->rt_se); in free_rt_sched_group()
8815 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) in alloc_rt_sched_group() argument
8822 tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL); in alloc_rt_sched_group()
8823 if (!tg->rt_rq) in alloc_rt_sched_group()
8825 tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL); in alloc_rt_sched_group()
8826 if (!tg->rt_se) in alloc_rt_sched_group()
8829 init_rt_bandwidth(&tg->rt_bandwidth, in alloc_rt_sched_group()
8845 init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent->rt_se[i]); in alloc_rt_sched_group()
8854 static inline void register_rt_sched_group(struct task_group *tg, int cpu) in register_rt_sched_group() argument
8856 list_add_rcu(&tg->rt_rq[cpu]->leaf_rt_rq_list, in register_rt_sched_group()
8860 static inline void unregister_rt_sched_group(struct task_group *tg, int cpu) in unregister_rt_sched_group() argument
8862 list_del_rcu(&tg->rt_rq[cpu]->leaf_rt_rq_list); in unregister_rt_sched_group()
8865 static inline void free_rt_sched_group(struct task_group *tg) in free_rt_sched_group() argument
8870 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) in alloc_rt_sched_group() argument
8875 static inline void register_rt_sched_group(struct task_group *tg, int cpu) in register_rt_sched_group() argument
8879 static inline void unregister_rt_sched_group(struct task_group *tg, int cpu) in unregister_rt_sched_group() argument
8885 static void free_sched_group(struct task_group *tg) in free_sched_group() argument
8887 free_fair_sched_group(tg); in free_sched_group()
8888 free_rt_sched_group(tg); in free_sched_group()
8889 kfree(tg); in free_sched_group()
8895 struct task_group *tg; in sched_create_group() local
8899 tg = kzalloc(sizeof(*tg), GFP_KERNEL); in sched_create_group()
8900 if (!tg) in sched_create_group()
8903 if (!alloc_fair_sched_group(tg, parent)) in sched_create_group()
8906 if (!alloc_rt_sched_group(tg, parent)) in sched_create_group()
8911 register_fair_sched_group(tg, i); in sched_create_group()
8912 register_rt_sched_group(tg, i); in sched_create_group()
8914 list_add_rcu(&tg->list, &task_groups); in sched_create_group()
8918 tg->parent = parent; in sched_create_group()
8919 INIT_LIST_HEAD(&tg->children); in sched_create_group()
8920 list_add_rcu(&tg->siblings, &parent->children); in sched_create_group()
8923 return tg; in sched_create_group()
8926 free_sched_group(tg); in sched_create_group()
8938 void sched_destroy_group(struct task_group *tg) in sched_destroy_group() argument
8945 unregister_fair_sched_group(tg, i); in sched_destroy_group()
8946 unregister_rt_sched_group(tg, i); in sched_destroy_group()
8948 list_del_rcu(&tg->list); in sched_destroy_group()
8949 list_del_rcu(&tg->siblings); in sched_destroy_group()
8953 call_rcu(&tg->rcu, free_sched_group_rcu); in sched_destroy_group()
9025 int sched_group_set_shares(struct task_group *tg, unsigned long shares) in sched_group_set_shares() argument
9033 if (!tg->se[0]) in sched_group_set_shares()
9042 if (tg->shares == shares) in sched_group_set_shares()
9047 unregister_fair_sched_group(tg, i); in sched_group_set_shares()
9048 list_del_rcu(&tg->siblings); in sched_group_set_shares()
9058 tg->shares = shares; in sched_group_set_shares()
9063 cfs_rq_set_shares(tg->cfs_rq[i], 0); in sched_group_set_shares()
9064 set_se_shares(tg->se[i], shares); in sched_group_set_shares()
9073 register_fair_sched_group(tg, i); in sched_group_set_shares()
9074 list_add_rcu(&tg->siblings, &tg->parent->children); in sched_group_set_shares()
9081 unsigned long sched_group_shares(struct task_group *tg) in sched_group_shares() argument
9083 return tg->shares; in sched_group_shares()
9102 static inline int tg_has_rt_tasks(struct task_group *tg) in tg_has_rt_tasks() argument
9107 if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg) in tg_has_rt_tasks()
9115 struct task_group *tg; member
9120 static int tg_schedulable(struct task_group *tg, void *data) in tg_schedulable() argument
9127 period = ktime_to_ns(tg->rt_bandwidth.rt_period); in tg_schedulable()
9128 runtime = tg->rt_bandwidth.rt_runtime; in tg_schedulable()
9130 if (tg == d->tg) { in tg_schedulable()
9136 if (tg == &root_task_group) { in tg_schedulable()
9151 if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg)) in tg_schedulable()
9165 list_for_each_entry_rcu(child, &tg->children, siblings) { in tg_schedulable()
9169 if (child == d->tg) { in tg_schedulable()
9183 static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) in __rt_schedulable() argument
9186 .tg = tg, in __rt_schedulable()
9194 static int tg_set_bandwidth(struct task_group *tg, in tg_set_bandwidth() argument
9201 err = __rt_schedulable(tg, rt_period, rt_runtime); in tg_set_bandwidth()
9205 spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); in tg_set_bandwidth()
9206 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); in tg_set_bandwidth()
9207 tg->rt_bandwidth.rt_runtime = rt_runtime; in tg_set_bandwidth()
9210 struct rt_rq *rt_rq = tg->rt_rq[i]; in tg_set_bandwidth()
9216 spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); in tg_set_bandwidth()
9224 int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us) in sched_group_set_rt_runtime() argument
9228 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period); in sched_group_set_rt_runtime()
9233 return tg_set_bandwidth(tg, rt_period, rt_runtime); in sched_group_set_rt_runtime()
9236 long sched_group_rt_runtime(struct task_group *tg) in sched_group_rt_runtime() argument
9240 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF) in sched_group_rt_runtime()
9243 rt_runtime_us = tg->rt_bandwidth.rt_runtime; in sched_group_rt_runtime()
9248 int sched_group_set_rt_period(struct task_group *tg, long rt_period_us) in sched_group_set_rt_period() argument
9253 rt_runtime = tg->rt_bandwidth.rt_runtime; in sched_group_set_rt_period()
9258 return tg_set_bandwidth(tg, rt_period, rt_runtime); in sched_group_set_rt_period()
9261 long sched_group_rt_period(struct task_group *tg) in sched_group_rt_period() argument
9265 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period); in sched_group_rt_period()
9296 int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk) in sched_rt_can_attach() argument
9299 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0) in sched_rt_can_attach()
9370 struct task_group *tg, *parent; in cpu_cgroup_create() local
9378 tg = sched_create_group(parent); in cpu_cgroup_create()
9379 if (IS_ERR(tg)) in cpu_cgroup_create()
9382 return &tg->css; in cpu_cgroup_create()
9388 struct task_group *tg = cgroup_tg(cgrp); in cpu_cgroup_destroy() local
9390 sched_destroy_group(tg); in cpu_cgroup_destroy()
9434 struct task_group *tg = cgroup_tg(cgrp); in cpu_shares_read_u64() local
9436 return (u64) tg->shares; in cpu_shares_read_u64()