1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2
3 #ifndef __CPUSET_INTERNAL_H
4 #define __CPUSET_INTERNAL_H
5
6 #include <linux/android_kabi.h>
7 #include <linux/cgroup.h>
8 #include <linux/cpu.h>
9 #include <linux/cpumask.h>
10 #include <linux/cpuset.h>
11 #include <linux/spinlock.h>
12 #include <linux/union_find.h>
13
14 /* See "Frequency meter" comments, below. */
15
16 struct fmeter {
17 int cnt; /* unprocessed events count */
18 int val; /* most recent output value */
19 time64_t time; /* clock (secs) when val computed */
20 spinlock_t lock; /* guards read or write of above */
21 };
22
23 /*
24 * Invalid partition error code
25 */
26 enum prs_errcode {
27 PERR_NONE = 0,
28 PERR_INVCPUS,
29 PERR_INVPARENT,
30 PERR_NOTPART,
31 PERR_NOTEXCL,
32 PERR_NOCPUS,
33 PERR_HOTPLUG,
34 PERR_CPUSEMPTY,
35 PERR_HKEEPING,
36 PERR_ACCESS,
37 PERR_REMOTE,
38 };
39 ANDROID_KABI_ENUMERATOR_IGNORE(prs_errcode, PERR_REMOTE);
40
41 /* bits in struct cpuset flags field */
42 typedef enum {
43 CS_ONLINE,
44 CS_CPU_EXCLUSIVE,
45 CS_MEM_EXCLUSIVE,
46 CS_MEM_HARDWALL,
47 CS_MEMORY_MIGRATE,
48 CS_SCHED_LOAD_BALANCE,
49 CS_SPREAD_PAGE,
50 CS_SPREAD_SLAB,
51 } cpuset_flagbits_t;
52
53 /* The various types of files and directories in a cpuset file system */
54
55 typedef enum {
56 FILE_MEMORY_MIGRATE,
57 FILE_CPULIST,
58 FILE_MEMLIST,
59 FILE_EFFECTIVE_CPULIST,
60 FILE_EFFECTIVE_MEMLIST,
61 FILE_SUBPARTS_CPULIST,
62 FILE_EXCLUSIVE_CPULIST,
63 FILE_EFFECTIVE_XCPULIST,
64 FILE_ISOLATED_CPULIST,
65 FILE_CPU_EXCLUSIVE,
66 FILE_MEM_EXCLUSIVE,
67 FILE_MEM_HARDWALL,
68 FILE_SCHED_LOAD_BALANCE,
69 FILE_PARTITION_ROOT,
70 FILE_SCHED_RELAX_DOMAIN_LEVEL,
71 FILE_MEMORY_PRESSURE_ENABLED,
72 FILE_MEMORY_PRESSURE,
73 FILE_SPREAD_PAGE,
74 FILE_SPREAD_SLAB,
75 } cpuset_filetype_t;
76
77 struct cpuset {
78 struct cgroup_subsys_state css;
79
80 unsigned long flags; /* "unsigned long" so bitops work */
81
82 /*
83 * On default hierarchy:
84 *
85 * The user-configured masks can only be changed by writing to
86 * cpuset.cpus and cpuset.mems, and won't be limited by the
87 * parent masks.
88 *
89 * The effective masks is the real masks that apply to the tasks
90 * in the cpuset. They may be changed if the configured masks are
91 * changed or hotplug happens.
92 *
93 * effective_mask == configured_mask & parent's effective_mask,
94 * and if it ends up empty, it will inherit the parent's mask.
95 *
96 *
97 * On legacy hierarchy:
98 *
99 * The user-configured masks are always the same with effective masks.
100 */
101
102 /* user-configured CPUs and Memory Nodes allow to tasks */
103 cpumask_var_t cpus_allowed;
104 nodemask_t mems_allowed;
105
106 /* effective CPUs and Memory Nodes allow to tasks */
107 cpumask_var_t effective_cpus;
108 nodemask_t effective_mems;
109
110 /*
111 * Exclusive CPUs dedicated to current cgroup (default hierarchy only)
112 *
113 * The effective_cpus of a valid partition root comes solely from its
114 * effective_xcpus and some of the effective_xcpus may be distributed
115 * to sub-partitions below & hence excluded from its effective_cpus.
116 * For a valid partition root, its effective_cpus have no relationship
117 * with cpus_allowed unless its exclusive_cpus isn't set.
118 *
119 * This value will only be set if either exclusive_cpus is set or
120 * when this cpuset becomes a local partition root.
121 */
122 cpumask_var_t effective_xcpus;
123
124 /*
125 * Exclusive CPUs as requested by the user (default hierarchy only)
126 *
127 * Its value is independent of cpus_allowed and designates the set of
128 * CPUs that can be granted to the current cpuset or its children when
129 * it becomes a valid partition root. The effective set of exclusive
130 * CPUs granted (effective_xcpus) depends on whether those exclusive
131 * CPUs are passed down by its ancestors and not yet taken up by
132 * another sibling partition root along the way.
133 *
134 * If its value isn't set, it defaults to cpus_allowed.
135 */
136 cpumask_var_t exclusive_cpus;
137
138 /*
139 * This is old Memory Nodes tasks took on.
140 *
141 * - top_cpuset.old_mems_allowed is initialized to mems_allowed.
142 * - A new cpuset's old_mems_allowed is initialized when some
143 * task is moved into it.
144 * - old_mems_allowed is used in cpuset_migrate_mm() when we change
145 * cpuset.mems_allowed and have tasks' nodemask updated, and
146 * then old_mems_allowed is updated to mems_allowed.
147 */
148 nodemask_t old_mems_allowed;
149
150 struct fmeter fmeter; /* memory_pressure filter */
151
152 /*
153 * Tasks are being attached to this cpuset. Used to prevent
154 * zeroing cpus/mems_allowed between ->can_attach() and ->attach().
155 */
156 int attach_in_progress;
157
158 /* for custom sched domain */
159 int relax_domain_level;
160
161 /* number of valid local child partitions */
162 int nr_subparts;
163
164 /* partition root state */
165 int partition_root_state;
166
167 /*
168 * number of SCHED_DEADLINE tasks attached to this cpuset, so that we
169 * know when to rebuild associated root domain bandwidth information.
170 */
171 int nr_deadline_tasks;
172 int nr_migrate_dl_tasks;
173 u64 sum_migrate_dl_bw;
174
175 /* Invalid partition error code, not lock protected */
176 enum prs_errcode prs_err;
177
178 /* Handle for cpuset.cpus.partition */
179 struct cgroup_file partition_file;
180
181 /* Remote partition silbling list anchored at remote_children */
182 struct list_head remote_sibling;
183
184 /* Used to merge intersecting subsets for generate_sched_domains */
185 struct uf_node node;
186
187 ANDROID_BACKPORT_RESERVE(1);
188 };
189
css_cs(struct cgroup_subsys_state * css)190 static inline struct cpuset *css_cs(struct cgroup_subsys_state *css)
191 {
192 return css ? container_of(css, struct cpuset, css) : NULL;
193 }
194
195 /* Retrieve the cpuset for a task */
task_cs(struct task_struct * task)196 static inline struct cpuset *task_cs(struct task_struct *task)
197 {
198 return css_cs(task_css(task, cpuset_cgrp_id));
199 }
200
parent_cs(struct cpuset * cs)201 static inline struct cpuset *parent_cs(struct cpuset *cs)
202 {
203 return css_cs(cs->css.parent);
204 }
205
206 /* convenient tests for these bits */
is_cpuset_online(struct cpuset * cs)207 static inline bool is_cpuset_online(struct cpuset *cs)
208 {
209 return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css);
210 }
211
is_cpu_exclusive(const struct cpuset * cs)212 static inline int is_cpu_exclusive(const struct cpuset *cs)
213 {
214 return test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
215 }
216
is_mem_exclusive(const struct cpuset * cs)217 static inline int is_mem_exclusive(const struct cpuset *cs)
218 {
219 return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
220 }
221
is_mem_hardwall(const struct cpuset * cs)222 static inline int is_mem_hardwall(const struct cpuset *cs)
223 {
224 return test_bit(CS_MEM_HARDWALL, &cs->flags);
225 }
226
is_sched_load_balance(const struct cpuset * cs)227 static inline int is_sched_load_balance(const struct cpuset *cs)
228 {
229 return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
230 }
231
is_memory_migrate(const struct cpuset * cs)232 static inline int is_memory_migrate(const struct cpuset *cs)
233 {
234 return test_bit(CS_MEMORY_MIGRATE, &cs->flags);
235 }
236
is_spread_page(const struct cpuset * cs)237 static inline int is_spread_page(const struct cpuset *cs)
238 {
239 return test_bit(CS_SPREAD_PAGE, &cs->flags);
240 }
241
is_spread_slab(const struct cpuset * cs)242 static inline int is_spread_slab(const struct cpuset *cs)
243 {
244 return test_bit(CS_SPREAD_SLAB, &cs->flags);
245 }
246
247 /**
248 * cpuset_for_each_child - traverse online children of a cpuset
249 * @child_cs: loop cursor pointing to the current child
250 * @pos_css: used for iteration
251 * @parent_cs: target cpuset to walk children of
252 *
253 * Walk @child_cs through the online children of @parent_cs. Must be used
254 * with RCU read locked.
255 */
256 #define cpuset_for_each_child(child_cs, pos_css, parent_cs) \
257 css_for_each_child((pos_css), &(parent_cs)->css) \
258 if (is_cpuset_online(((child_cs) = css_cs((pos_css)))))
259
260 /**
261 * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants
262 * @des_cs: loop cursor pointing to the current descendant
263 * @pos_css: used for iteration
264 * @root_cs: target cpuset to walk ancestor of
265 *
266 * Walk @des_cs through the online descendants of @root_cs. Must be used
267 * with RCU read locked. The caller may modify @pos_css by calling
268 * css_rightmost_descendant() to skip subtree. @root_cs is included in the
269 * iteration and the first node to be visited.
270 */
271 #define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs) \
272 css_for_each_descendant_pre((pos_css), &(root_cs)->css) \
273 if (is_cpuset_online(((des_cs) = css_cs((pos_css)))))
274
275 void rebuild_sched_domains_locked(void);
276 void cpuset_callback_lock_irq(void);
277 void cpuset_callback_unlock_irq(void);
278 void cpuset_update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus);
279 void cpuset_update_tasks_nodemask(struct cpuset *cs);
280 int cpuset_update_flag(cpuset_flagbits_t bit, struct cpuset *cs, int turning_on);
281 ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
282 char *buf, size_t nbytes, loff_t off);
283 int cpuset_common_seq_show(struct seq_file *sf, void *v);
284
285 /*
286 * cpuset-v1.c
287 */
288 #ifdef CONFIG_CPUSETS_V1
289 extern struct cftype cpuset1_files[];
290 void fmeter_init(struct fmeter *fmp);
291 void cpuset1_update_task_spread_flags(struct cpuset *cs,
292 struct task_struct *tsk);
293 void cpuset1_update_tasks_flags(struct cpuset *cs);
294 void cpuset1_hotplug_update_tasks(struct cpuset *cs,
295 struct cpumask *new_cpus, nodemask_t *new_mems,
296 bool cpus_updated, bool mems_updated);
297 int cpuset1_validate_change(struct cpuset *cur, struct cpuset *trial);
298 #else
fmeter_init(struct fmeter * fmp)299 static inline void fmeter_init(struct fmeter *fmp) {}
cpuset1_update_task_spread_flags(struct cpuset * cs,struct task_struct * tsk)300 static inline void cpuset1_update_task_spread_flags(struct cpuset *cs,
301 struct task_struct *tsk) {}
cpuset1_update_tasks_flags(struct cpuset * cs)302 static inline void cpuset1_update_tasks_flags(struct cpuset *cs) {}
cpuset1_hotplug_update_tasks(struct cpuset * cs,struct cpumask * new_cpus,nodemask_t * new_mems,bool cpus_updated,bool mems_updated)303 static inline void cpuset1_hotplug_update_tasks(struct cpuset *cs,
304 struct cpumask *new_cpus, nodemask_t *new_mems,
305 bool cpus_updated, bool mems_updated) {}
cpuset1_validate_change(struct cpuset * cur,struct cpuset * trial)306 static inline int cpuset1_validate_change(struct cpuset *cur,
307 struct cpuset *trial) { return 0; }
308 #endif /* CONFIG_CPUSETS_V1 */
309
310 #endif /* __CPUSET_INTERNAL_H */
311