• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SCHED_TOPOLOGY_H
3 #define _LINUX_SCHED_TOPOLOGY_H
4 
5 #include <linux/topology.h>
6 #include <linux/android_kabi.h>
7 #include <linux/android_vendor.h>
8 
9 #include <linux/sched/idle.h>
10 
11 /*
12  * sched-domains (multiprocessor balancing) declarations:
13  */
14 #ifdef CONFIG_SMP
15 
16 /* Generate SD flag indexes */
17 #define SD_FLAG(name, mflags) __##name,
18 enum {
19 	#include <linux/sched/sd_flags.h>
20 	__SD_FLAG_CNT,
21 };
22 #undef SD_FLAG
23 /* Generate SD flag bits */
24 #define SD_FLAG(name, mflags) name = 1 << __##name,
25 enum {
26 	#include <linux/sched/sd_flags.h>
27 };
28 #undef SD_FLAG
29 
30 #ifdef CONFIG_SCHED_DEBUG
31 
32 struct sd_flag_debug {
33 	unsigned int meta_flags;
34 	char *name;
35 };
36 extern const struct sd_flag_debug sd_flag_debug[];
37 
38 #endif
39 
40 #ifdef CONFIG_SCHED_SMT
cpu_smt_flags(void)41 static inline int cpu_smt_flags(void)
42 {
43 	return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
44 }
45 #endif
46 
47 #ifdef CONFIG_SCHED_MC
cpu_core_flags(void)48 static inline int cpu_core_flags(void)
49 {
50 	return SD_SHARE_PKG_RESOURCES;
51 }
52 #endif
53 
54 #ifdef CONFIG_NUMA
cpu_numa_flags(void)55 static inline int cpu_numa_flags(void)
56 {
57 	return SD_NUMA;
58 }
59 #endif
60 
61 extern int arch_asym_cpu_priority(int cpu);
62 
63 struct sched_domain_attr {
64 	int relax_domain_level;
65 };
66 
67 #define SD_ATTR_INIT	(struct sched_domain_attr) {	\
68 	.relax_domain_level = -1,			\
69 }
70 
71 extern int sched_domain_level_max;
72 
73 struct sched_group;
74 
75 struct sched_domain_shared {
76 	atomic_t	ref;
77 	atomic_t	nr_busy_cpus;
78 	int		has_idle_cores;
79 
80 	ANDROID_VENDOR_DATA(1);
81 };
82 
83 struct sched_domain {
84 	/* These fields must be setup */
85 	struct sched_domain __rcu *parent;	/* top domain must be null terminated */
86 	struct sched_domain __rcu *child;	/* bottom domain must be null terminated */
87 	struct sched_group *groups;	/* the balancing groups of the domain */
88 	unsigned long min_interval;	/* Minimum balance interval ms */
89 	unsigned long max_interval;	/* Maximum balance interval ms */
90 	unsigned int busy_factor;	/* less balancing by factor if busy */
91 	unsigned int imbalance_pct;	/* No balance until over watermark */
92 	unsigned int cache_nice_tries;	/* Leave cache hot tasks for # tries */
93 
94 	int nohz_idle;			/* NOHZ IDLE status */
95 	int flags;			/* See SD_* */
96 	int level;
97 
98 	/* Runtime fields. */
99 	unsigned long last_balance;	/* init to jiffies. units in jiffies */
100 	unsigned int balance_interval;	/* initialise to 1. units in ms. */
101 	unsigned int nr_balance_failed; /* initialise to 0 */
102 
103 	/* idle_balance() stats */
104 	u64 max_newidle_lb_cost;
105 	unsigned long next_decay_max_lb_cost;
106 
107 	u64 avg_scan_cost;		/* select_idle_sibling */
108 
109 #ifdef CONFIG_SCHEDSTATS
110 	/* load_balance() stats */
111 	unsigned int lb_count[CPU_MAX_IDLE_TYPES];
112 	unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
113 	unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
114 	unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
115 	unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
116 	unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
117 	unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
118 	unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
119 
120 	/* Active load balancing */
121 	unsigned int alb_count;
122 	unsigned int alb_failed;
123 	unsigned int alb_pushed;
124 
125 	/* SD_BALANCE_EXEC stats */
126 	unsigned int sbe_count;
127 	unsigned int sbe_balanced;
128 	unsigned int sbe_pushed;
129 
130 	/* SD_BALANCE_FORK stats */
131 	unsigned int sbf_count;
132 	unsigned int sbf_balanced;
133 	unsigned int sbf_pushed;
134 
135 	/* try_to_wake_up() stats */
136 	unsigned int ttwu_wake_remote;
137 	unsigned int ttwu_move_affine;
138 	unsigned int ttwu_move_balance;
139 #endif
140 #ifdef CONFIG_SCHED_DEBUG
141 	char *name;
142 #endif
143 	union {
144 		void *private;		/* used during construction */
145 		struct rcu_head rcu;	/* used during destruction */
146 	};
147 	struct sched_domain_shared *shared;
148 
149 	unsigned int span_weight;
150 
151 	ANDROID_KABI_RESERVE(1);
152 	ANDROID_KABI_RESERVE(2);
153 
154 	/*
155 	 * Span of all CPUs in this domain.
156 	 *
157 	 * NOTE: this field is variable length. (Allocated dynamically
158 	 * by attaching extra space to the end of the structure,
159 	 * depending on how many CPUs the kernel has booted up with)
160 	 */
161 	unsigned long span[];
162 };
163 
sched_domain_span(struct sched_domain * sd)164 static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
165 {
166 	return to_cpumask(sd->span);
167 }
168 
169 extern void partition_sched_domains_locked(int ndoms_new,
170 					   cpumask_var_t doms_new[],
171 					   struct sched_domain_attr *dattr_new);
172 
173 extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
174 				    struct sched_domain_attr *dattr_new);
175 
176 /* Allocate an array of sched domains, for partition_sched_domains(). */
177 cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
178 void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
179 
180 bool cpus_share_cache(int this_cpu, int that_cpu);
181 
182 typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
183 typedef int (*sched_domain_flags_f)(void);
184 
185 #define SDTL_OVERLAP	0x01
186 
187 struct sd_data {
188 	struct sched_domain *__percpu *sd;
189 	struct sched_domain_shared *__percpu *sds;
190 	struct sched_group *__percpu *sg;
191 	struct sched_group_capacity *__percpu *sgc;
192 };
193 
194 struct sched_domain_topology_level {
195 	sched_domain_mask_f mask;
196 	sched_domain_flags_f sd_flags;
197 	int		    flags;
198 	int		    numa_level;
199 	struct sd_data      data;
200 #ifdef CONFIG_SCHED_DEBUG
201 	char                *name;
202 #endif
203 };
204 
205 extern void set_sched_topology(struct sched_domain_topology_level *tl);
206 
207 #ifdef CONFIG_SCHED_DEBUG
208 # define SD_INIT_NAME(type)		.name = #type
209 #else
210 # define SD_INIT_NAME(type)
211 #endif
212 
213 #else /* CONFIG_SMP */
214 
215 struct sched_domain_attr;
216 
217 static inline void
partition_sched_domains_locked(int ndoms_new,cpumask_var_t doms_new[],struct sched_domain_attr * dattr_new)218 partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[],
219 			       struct sched_domain_attr *dattr_new)
220 {
221 }
222 
223 static inline void
partition_sched_domains(int ndoms_new,cpumask_var_t doms_new[],struct sched_domain_attr * dattr_new)224 partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
225 			struct sched_domain_attr *dattr_new)
226 {
227 }
228 
cpus_share_cache(int this_cpu,int that_cpu)229 static inline bool cpus_share_cache(int this_cpu, int that_cpu)
230 {
231 	return true;
232 }
233 
234 #endif	/* !CONFIG_SMP */
235 
236 #ifndef arch_scale_cpu_capacity
237 /**
238  * arch_scale_cpu_capacity - get the capacity scale factor of a given CPU.
239  * @cpu: the CPU in question.
240  *
241  * Return: the CPU scale factor normalized against SCHED_CAPACITY_SCALE, i.e.
242  *
243  *             max_perf(cpu)
244  *      ----------------------------- * SCHED_CAPACITY_SCALE
245  *      max(max_perf(c) : c \in CPUs)
246  */
247 static __always_inline
arch_scale_cpu_capacity(int cpu)248 unsigned long arch_scale_cpu_capacity(int cpu)
249 {
250 	return SCHED_CAPACITY_SCALE;
251 }
252 #endif
253 
254 #ifndef arch_scale_thermal_pressure
255 static __always_inline
arch_scale_thermal_pressure(int cpu)256 unsigned long arch_scale_thermal_pressure(int cpu)
257 {
258 	return 0;
259 }
260 #endif
261 
262 #ifndef arch_set_thermal_pressure
263 static __always_inline
arch_set_thermal_pressure(const struct cpumask * cpus,unsigned long th_pressure)264 void arch_set_thermal_pressure(const struct cpumask *cpus,
265 			       unsigned long th_pressure)
266 { }
267 #endif
268 
task_node(const struct task_struct * p)269 static inline int task_node(const struct task_struct *p)
270 {
271 	return cpu_to_node(task_cpu(p));
272 }
273 
274 #endif /* _LINUX_SCHED_TOPOLOGY_H */
275