1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * related thread group sched header
4 */
5 #ifndef __RTG_H
6 #define __RTG_H
7
8 #include <linux/types.h>
9 #include <linux/sched.h>
10
11 #define for_each_sched_cluster_reverse(cluster) \
12 list_for_each_entry_reverse(cluster, &cluster_head, list)
13
14 #ifdef CONFIG_SCHED_RTG
15 void init_task_rtg(struct task_struct *p);
16 int alloc_related_thread_groups(void);
17 struct related_thread_group *lookup_related_thread_group(unsigned int group_id);
18 struct related_thread_group *task_related_thread_group(struct task_struct *p);
19 void update_group_nr_running(struct task_struct *p, int event, u64 wallclock);
20 struct rq;
21 void update_group_demand(struct task_struct *p, struct rq *rq,
22 int event, u64 wallclock);
23 int sched_set_group_window_size(unsigned int grp_id, unsigned int window_size);
24 int sched_set_group_window_rollover(unsigned int grp_id);
25 struct group_cpu_time *group_update_cpu_time(struct rq *rq,
26 struct related_thread_group *grp);
27 void sched_update_rtg_tick(struct task_struct *p);
28 int preferred_cluster(struct sched_cluster *cluster, struct task_struct *p);
29 int sched_set_group_preferred_cluster(unsigned int grp_id, int sched_cluster_id);
30 struct cpumask *find_rtg_target(struct task_struct *p);
31 int find_rtg_cpu(struct task_struct *p);
32 int sched_set_group_util_invalid_interval(unsigned int grp_id,
33 unsigned int interval);
34 int sched_set_group_normalized_util(unsigned int grp_id, unsigned long util,
35 unsigned int flag);
36 void sched_get_max_group_util(const struct cpumask *query_cpus,
37 unsigned long *util, unsigned int *freq);
38 int sched_set_group_freq_update_interval(unsigned int grp_id,
39 unsigned int interval);
40 #ifdef CONFIG_SCHED_RTG_CGROUP
41 int sync_cgroup_colocation(struct task_struct *p, bool insert);
42 void add_new_task_to_grp(struct task_struct *new);
43 #else
add_new_task_to_grp(struct task_struct * new)44 static inline void add_new_task_to_grp(struct task_struct *new) {}
45 #endif /* CONFIG_SCHED_RTG_CGROUP */
46 #else
alloc_related_thread_groups(void)47 static inline int alloc_related_thread_groups(void) { return 0; }
sched_set_group_preferred_cluster(unsigned int grp_id,int sched_cluster_id)48 static inline int sched_set_group_preferred_cluster(unsigned int grp_id,
49 int sched_cluster_id)
50 {
51 return 0;
52 }
sched_set_group_normalized_util(unsigned int grp_id,unsigned long util,unsigned int flag)53 static inline int sched_set_group_normalized_util(unsigned int grp_id, unsigned long util,
54 unsigned int flag)
55 {
56 return 0;
57 }
sched_get_max_group_util(const struct cpumask * query_cpus,unsigned long * util,unsigned int * freq)58 static inline void sched_get_max_group_util(const struct cpumask *query_cpus,
59 unsigned long *util, unsigned int *freq)
60 {
61 }
add_new_task_to_grp(struct task_struct * new)62 static inline void add_new_task_to_grp(struct task_struct *new) {}
63 #endif /* CONFIG_SCHED_RTG */
64 #endif
65