1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SCHED_TOPOLOGY_H
3 #define _LINUX_SCHED_TOPOLOGY_H
4
5 #include <linux/topology.h>
6 #include <linux/android_kabi.h>
7
8 #include <linux/sched/idle.h>
9
10 /*
11 * sched-domains (multiprocessor balancing) declarations:
12 */
13 #ifdef CONFIG_SMP
14
15 #define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */
16 #define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */
17 #define SD_BALANCE_EXEC 0x0004 /* Balance on exec */
18 #define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */
19 #define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */
20 #define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */
21 #define SD_ASYM_CPUCAPACITY 0x0040 /* Domain members have different CPU capacities */
22 #define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share CPU capacity */
23 #define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */
24 #define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share CPU pkg resources */
25 #define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
26 #define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */
27 #define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */
28 #define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */
29 #define SD_NUMA 0x4000 /* cross-node balancing */
30
31 #ifdef CONFIG_SCHED_SMT
cpu_smt_flags(void)32 static inline int cpu_smt_flags(void)
33 {
34 return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
35 }
36 #endif
37
38 #ifdef CONFIG_SCHED_MC
cpu_core_flags(void)39 static inline int cpu_core_flags(void)
40 {
41 return SD_SHARE_PKG_RESOURCES;
42 }
43 #endif
44
45 #ifdef CONFIG_NUMA
cpu_numa_flags(void)46 static inline int cpu_numa_flags(void)
47 {
48 return SD_NUMA;
49 }
50 #endif
51
52 extern int arch_asym_cpu_priority(int cpu);
53
54 struct sched_domain_attr {
55 int relax_domain_level;
56 };
57
58 #define SD_ATTR_INIT (struct sched_domain_attr) { \
59 .relax_domain_level = -1, \
60 }
61
62 extern int sched_domain_level_max;
63
64 struct sched_group;
65
66 struct sched_domain_shared {
67 atomic_t ref;
68 atomic_t nr_busy_cpus;
69 int has_idle_cores;
70 };
71
72 struct sched_domain {
73 /* These fields must be setup */
74 struct sched_domain __rcu *parent; /* top domain must be null terminated */
75 struct sched_domain __rcu *child; /* bottom domain must be null terminated */
76 struct sched_group *groups; /* the balancing groups of the domain */
77 unsigned long min_interval; /* Minimum balance interval ms */
78 unsigned long max_interval; /* Maximum balance interval ms */
79 unsigned int busy_factor; /* less balancing by factor if busy */
80 unsigned int imbalance_pct; /* No balance until over watermark */
81 unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */
82
83 int nohz_idle; /* NOHZ IDLE status */
84 int flags; /* See SD_* */
85 int level;
86
87 /* Runtime fields. */
88 unsigned long last_balance; /* init to jiffies. units in jiffies */
89 unsigned int balance_interval; /* initialise to 1. units in ms. */
90 unsigned int nr_balance_failed; /* initialise to 0 */
91
92 /* idle_balance() stats */
93 u64 max_newidle_lb_cost;
94 unsigned long next_decay_max_lb_cost;
95
96 u64 avg_scan_cost; /* select_idle_sibling */
97
98 #ifdef CONFIG_SCHEDSTATS
99 /* load_balance() stats */
100 unsigned int lb_count[CPU_MAX_IDLE_TYPES];
101 unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
102 unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
103 unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
104 unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
105 unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
106 unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
107 unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
108
109 /* Active load balancing */
110 unsigned int alb_count;
111 unsigned int alb_failed;
112 unsigned int alb_pushed;
113
114 /* SD_BALANCE_EXEC stats */
115 unsigned int sbe_count;
116 unsigned int sbe_balanced;
117 unsigned int sbe_pushed;
118
119 /* SD_BALANCE_FORK stats */
120 unsigned int sbf_count;
121 unsigned int sbf_balanced;
122 unsigned int sbf_pushed;
123
124 /* try_to_wake_up() stats */
125 unsigned int ttwu_wake_remote;
126 unsigned int ttwu_move_affine;
127 unsigned int ttwu_move_balance;
128 #endif
129 #ifdef CONFIG_SCHED_DEBUG
130 char *name;
131 #endif
132 union {
133 void *private; /* used during construction */
134 struct rcu_head rcu; /* used during destruction */
135 };
136 struct sched_domain_shared *shared;
137
138 unsigned int span_weight;
139
140 ANDROID_KABI_RESERVE(1);
141 ANDROID_KABI_RESERVE(2);
142
143 /*
144 * Span of all CPUs in this domain.
145 *
146 * NOTE: this field is variable length. (Allocated dynamically
147 * by attaching extra space to the end of the structure,
148 * depending on how many CPUs the kernel has booted up with)
149 */
150 unsigned long span[0];
151 };
152
sched_domain_span(struct sched_domain * sd)153 static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
154 {
155 return to_cpumask(sd->span);
156 }
157
158 extern void partition_sched_domains_locked(int ndoms_new,
159 cpumask_var_t doms_new[],
160 struct sched_domain_attr *dattr_new);
161
162 extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
163 struct sched_domain_attr *dattr_new);
164
165 /* Allocate an array of sched domains, for partition_sched_domains(). */
166 cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
167 void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
168
169 bool cpus_share_cache(int this_cpu, int that_cpu);
170
171 typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
172 typedef int (*sched_domain_flags_f)(void);
173
174 #define SDTL_OVERLAP 0x01
175
176 struct sd_data {
177 struct sched_domain *__percpu *sd;
178 struct sched_domain_shared *__percpu *sds;
179 struct sched_group *__percpu *sg;
180 struct sched_group_capacity *__percpu *sgc;
181 };
182
183 struct sched_domain_topology_level {
184 sched_domain_mask_f mask;
185 sched_domain_flags_f sd_flags;
186 int flags;
187 int numa_level;
188 struct sd_data data;
189 #ifdef CONFIG_SCHED_DEBUG
190 char *name;
191 #endif
192 };
193
194 extern void set_sched_topology(struct sched_domain_topology_level *tl);
195
196 #ifdef CONFIG_SCHED_DEBUG
197 # define SD_INIT_NAME(type) .name = #type
198 #else
199 # define SD_INIT_NAME(type)
200 #endif
201
202 #else /* CONFIG_SMP */
203
204 struct sched_domain_attr;
205
206 static inline void
partition_sched_domains_locked(int ndoms_new,cpumask_var_t doms_new[],struct sched_domain_attr * dattr_new)207 partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[],
208 struct sched_domain_attr *dattr_new)
209 {
210 }
211
212 static inline void
partition_sched_domains(int ndoms_new,cpumask_var_t doms_new[],struct sched_domain_attr * dattr_new)213 partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
214 struct sched_domain_attr *dattr_new)
215 {
216 }
217
cpus_share_cache(int this_cpu,int that_cpu)218 static inline bool cpus_share_cache(int this_cpu, int that_cpu)
219 {
220 return true;
221 }
222
223 #endif /* !CONFIG_SMP */
224
225 #ifndef arch_scale_cpu_capacity
226 static __always_inline
arch_scale_cpu_capacity(int cpu)227 unsigned long arch_scale_cpu_capacity(int cpu)
228 {
229 return SCHED_CAPACITY_SCALE;
230 }
231 #endif
232
task_node(const struct task_struct * p)233 static inline int task_node(const struct task_struct *p)
234 {
235 return cpu_to_node(task_cpu(p));
236 }
237
238 #endif /* _LINUX_SCHED_TOPOLOGY_H */
239