1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_CPUSET_H
3 #define _LINUX_CPUSET_H
4 /*
5 * cpuset interface
6 *
7 * Copyright (C) 2003 BULL SA
8 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
9 *
10 */
11
12 #include <linux/sched.h>
13 #include <linux/sched/topology.h>
14 #include <linux/sched/task.h>
15 #include <linux/cpumask.h>
16 #include <linux/nodemask.h>
17 #include <linux/mm.h>
18 #include <linux/mmu_context.h>
19 #include <linux/jump_label.h>
20
21 #ifdef CONFIG_CPUSETS
22
23 /*
24 * Static branch rewrites can happen in an arbitrary order for a given
25 * key. In code paths where we need to loop with read_mems_allowed_begin() and
26 * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need
27 * to ensure that begin() always gets rewritten before retry() in the
28 * disabled -> enabled transition. If not, then if local irqs are disabled
29 * around the loop, we can deadlock since retry() would always be
30 * comparing the latest value of the mems_allowed seqcount against 0 as
31 * begin() still would see cpusets_enabled() as false. The enabled -> disabled
32 * transition should happen in reverse order for the same reasons (want to stop
33 * looking at real value of mems_allowed.sequence in retry() first).
34 */
35 extern struct static_key_false cpusets_pre_enable_key;
36 extern struct static_key_false cpusets_enabled_key;
cpusets_enabled(void)37 static inline bool cpusets_enabled(void)
38 {
39 return static_branch_unlikely(&cpusets_enabled_key);
40 }
41
cpuset_inc(void)42 static inline void cpuset_inc(void)
43 {
44 static_branch_inc_cpuslocked(&cpusets_pre_enable_key);
45 static_branch_inc_cpuslocked(&cpusets_enabled_key);
46 }
47
cpuset_dec(void)48 static inline void cpuset_dec(void)
49 {
50 static_branch_dec_cpuslocked(&cpusets_enabled_key);
51 static_branch_dec_cpuslocked(&cpusets_pre_enable_key);
52 }
53
54 extern int cpuset_init(void);
55 extern void cpuset_init_smp(void);
56 extern void cpuset_force_rebuild(void);
57 extern void cpuset_update_active_cpus(void);
58 extern void cpuset_wait_for_hotplug(void);
59 extern void cpuset_read_lock(void);
60 extern void cpuset_read_unlock(void);
61 extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
62 extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
63 extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
64 #define cpuset_current_mems_allowed (current->mems_allowed)
65 void cpuset_init_current_mems_allowed(void);
66 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
67
68 extern bool _cpuset_node_allowed(int node, gfp_t gfp_mask);
69
cpuset_node_allowed(int node,gfp_t gfp_mask)70 static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
71 {
72 if (cpusets_enabled()) {
73 return _cpuset_node_allowed(node, gfp_mask);
74 }
75 return true;
76 }
77
__cpuset_zone_allowed(struct zone * z,gfp_t gfp_mask)78 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
79 {
80 return _cpuset_node_allowed(zone_to_nid(z), gfp_mask);
81 }
82
cpuset_zone_allowed(struct zone * z,gfp_t gfp_mask)83 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
84 {
85 if (cpusets_enabled()) {
86 return __cpuset_zone_allowed(z, gfp_mask);
87 }
88 return true;
89 }
90
91 extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, const struct task_struct *tsk2);
92
93 #define cpuset_memory_pressure_bump() \
94 do { \
95 if (cpuset_memory_pressure_enabled) \
96 _cpuset_memory_pressure_bump(); \
97 } while (0)
98 extern int cpuset_memory_pressure_enabled;
99 extern void _cpuset_memory_pressure_bump(void);
100
101 extern void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task);
102 extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *tsk);
103
104 extern int cpuset_mem_spread_node(void);
105 extern int cpuset_slab_spread_node(void);
106
cpuset_do_page_mem_spread(void)107 static inline int cpuset_do_page_mem_spread(void)
108 {
109 return task_spread_page(current);
110 }
111
cpuset_do_slab_mem_spread(void)112 static inline int cpuset_do_slab_mem_spread(void)
113 {
114 return task_spread_slab(current);
115 }
116
117 extern bool current_cpuset_is_being_rebound(void);
118
119 extern void rebuild_sched_domains(void);
120
121 extern void cpuset_print_current_mems_allowed(void);
122
123 /*
124 * read_mems_allowed_begin is required when making decisions involving
125 * mems_allowed such as during page allocation. mems_allowed can be updated in
126 * parallel and depending on the new value an operation can fail potentially
127 * causing process failure. A retry loop with read_mems_allowed_begin and
128 * read_mems_allowed_retry prevents these artificial failures.
129 */
read_mems_allowed_begin(void)130 static inline unsigned int read_mems_allowed_begin(void)
131 {
132 if (!static_branch_unlikely(&cpusets_pre_enable_key)) {
133 return 0;
134 }
135
136 return read_seqcount_begin(¤t->mems_allowed_seq);
137 }
138
139 /*
140 * If this returns true, the operation that took place after
141 * read_mems_allowed_begin may have failed artificially due to a concurrent
142 * update of mems_allowed. It is up to the caller to retry the operation if
143 * appropriate.
144 */
read_mems_allowed_retry(unsigned int seq)145 static inline bool read_mems_allowed_retry(unsigned int seq)
146 {
147 if (!static_branch_unlikely(&cpusets_enabled_key)) {
148 return false;
149 }
150
151 return read_seqcount_retry(¤t->mems_allowed_seq, seq);
152 }
153
set_mems_allowed(nodemask_t nodemask)154 static inline void set_mems_allowed(nodemask_t nodemask)
155 {
156 unsigned long flags;
157
158 task_lock(current);
159 local_irq_save(flags);
160 write_seqcount_begin(¤t->mems_allowed_seq);
161 current->mems_allowed = nodemask;
162 write_seqcount_end(¤t->mems_allowed_seq);
163 local_irq_restore(flags);
164 task_unlock(current);
165 }
166
167 extern void cpuset_hotplug_workfn(struct work_struct *work);
168
169 #else /* !CONFIG_CPUSETS */
170
cpusets_enabled(void)171 static inline bool cpusets_enabled(void)
172 {
173 return false;
174 }
175
cpuset_init(void)176 static inline int cpuset_init(void)
177 {
178 return 0;
179 }
cpuset_init_smp(void)180 static inline void cpuset_init_smp(void)
181 {
182 }
183
cpuset_force_rebuild(void)184 static inline void cpuset_force_rebuild(void)
185 {
186 }
187
cpuset_update_active_cpus(void)188 static inline void cpuset_update_active_cpus(void)
189 {
190 partition_sched_domains(1, NULL, NULL);
191 }
192
cpuset_wait_for_hotplug(void)193 static inline void cpuset_wait_for_hotplug(void)
194 {
195 }
196
cpuset_read_lock(void)197 static inline void cpuset_read_lock(void)
198 {
199 }
cpuset_read_unlock(void)200 static inline void cpuset_read_unlock(void)
201 {
202 }
203
cpuset_cpus_allowed(struct task_struct * p,struct cpumask * mask)204 static inline void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask)
205 {
206 cpumask_copy(mask, task_cpu_possible_mask(p));
207 }
208
cpuset_cpus_allowed_fallback(struct task_struct * p)209 static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
210 {
211 }
212
cpuset_mems_allowed(struct task_struct * p)213 static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
214 {
215 return node_possible_map;
216 }
217
218 #define cpuset_current_mems_allowed (node_states[N_MEMORY])
cpuset_init_current_mems_allowed(void)219 static inline void cpuset_init_current_mems_allowed(void)
220 {
221 }
222
cpuset_nodemask_valid_mems_allowed(nodemask_t * nodemask)223 static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
224 {
225 return 1;
226 }
227
cpuset_node_allowed(int node,gfp_t gfp_mask)228 static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
229 {
230 return true;
231 }
232
__cpuset_zone_allowed(struct zone * z,gfp_t gfp_mask)233 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
234 {
235 return true;
236 }
237
cpuset_zone_allowed(struct zone * z,gfp_t gfp_mask)238 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
239 {
240 return true;
241 }
242
cpuset_mems_allowed_intersects(const struct task_struct * tsk1,const struct task_struct * tsk2)243 static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, const struct task_struct *tsk2)
244 {
245 return 1;
246 }
247
cpuset_memory_pressure_bump(void)248 static inline void cpuset_memory_pressure_bump(void)
249 {
250 }
251
cpuset_task_status_allowed(struct seq_file * m,struct task_struct * task)252 static inline void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
253 {
254 }
255
cpuset_mem_spread_node(void)256 static inline int cpuset_mem_spread_node(void)
257 {
258 return 0;
259 }
260
cpuset_slab_spread_node(void)261 static inline int cpuset_slab_spread_node(void)
262 {
263 return 0;
264 }
265
cpuset_do_page_mem_spread(void)266 static inline int cpuset_do_page_mem_spread(void)
267 {
268 return 0;
269 }
270
cpuset_do_slab_mem_spread(void)271 static inline int cpuset_do_slab_mem_spread(void)
272 {
273 return 0;
274 }
275
current_cpuset_is_being_rebound(void)276 static inline bool current_cpuset_is_being_rebound(void)
277 {
278 return false;
279 }
280
rebuild_sched_domains(void)281 static inline void rebuild_sched_domains(void)
282 {
283 partition_sched_domains(1, NULL, NULL);
284 }
285
cpuset_print_current_mems_allowed(void)286 static inline void cpuset_print_current_mems_allowed(void)
287 {
288 }
289
set_mems_allowed(nodemask_t nodemask)290 static inline void set_mems_allowed(nodemask_t nodemask)
291 {
292 }
293
read_mems_allowed_begin(void)294 static inline unsigned int read_mems_allowed_begin(void)
295 {
296 return 0;
297 }
298
read_mems_allowed_retry(unsigned int seq)299 static inline bool read_mems_allowed_retry(unsigned int seq)
300 {
301 return false;
302 }
303
cpuset_hotplug_workfn(struct work_struct * work)304 static inline void cpuset_hotplug_workfn(struct work_struct *work)
305 {
306 }
307
308 #endif /* !CONFIG_CPUSETS */
309
310 #endif /* _LINUX_CPUSET_H */
311