• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_CPUSET_H
3 #define _LINUX_CPUSET_H
4 /*
5  *  cpuset interface
6  *
7  *  Copyright (C) 2003 BULL SA
8  *  Copyright (C) 2004-2006 Silicon Graphics, Inc.
9  *
10  */
11 
12 #include <linux/sched.h>
13 #include <linux/sched/topology.h>
14 #include <linux/sched/task.h>
15 #include <linux/cpumask.h>
16 #include <linux/nodemask.h>
17 #include <linux/mm.h>
18 #include <linux/mmu_context.h>
19 #include <linux/jump_label.h>
20 
21 #ifdef CONFIG_CPUSETS
22 
23 /*
24  * Static branch rewrites can happen in an arbitrary order for a given
25  * key. In code paths where we need to loop with read_mems_allowed_begin() and
26  * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need
27  * to ensure that begin() always gets rewritten before retry() in the
28  * disabled -> enabled transition. If not, then if local irqs are disabled
29  * around the loop, we can deadlock since retry() would always be
30  * comparing the latest value of the mems_allowed seqcount against 0 as
31  * begin() still would see cpusets_enabled() as false. The enabled -> disabled
32  * transition should happen in reverse order for the same reasons (want to stop
33  * looking at real value of mems_allowed.sequence in retry() first).
34  */
35 extern struct static_key_false cpusets_pre_enable_key;
36 extern struct static_key_false cpusets_enabled_key;
cpusets_enabled(void)37 static inline bool cpusets_enabled(void)
38 {
39 	return static_branch_unlikely(&cpusets_enabled_key);
40 }
41 
cpuset_inc(void)42 static inline void cpuset_inc(void)
43 {
44 	static_branch_inc_cpuslocked(&cpusets_pre_enable_key);
45 	static_branch_inc_cpuslocked(&cpusets_enabled_key);
46 }
47 
cpuset_dec(void)48 static inline void cpuset_dec(void)
49 {
50 	static_branch_dec_cpuslocked(&cpusets_enabled_key);
51 	static_branch_dec_cpuslocked(&cpusets_pre_enable_key);
52 }
53 
54 extern int cpuset_init(void);
55 extern void cpuset_init_smp(void);
56 extern void cpuset_force_rebuild(void);
57 extern void cpuset_update_active_cpus(void);
58 extern void cpuset_update_active_cpus_affine(int cpu);
59 extern void cpuset_wait_for_hotplug(void);
60 extern void inc_dl_tasks_cs(struct task_struct *task);
61 extern void dec_dl_tasks_cs(struct task_struct *task);
62 extern void cpuset_lock(void);
63 extern void cpuset_unlock(void);
64 extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
65 extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
66 extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
67 #define cpuset_current_mems_allowed (current->mems_allowed)
68 void cpuset_init_current_mems_allowed(void);
69 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
70 
71 extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask);
72 
cpuset_node_allowed(int node,gfp_t gfp_mask)73 static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
74 {
75 	if (cpusets_enabled())
76 		return __cpuset_node_allowed(node, gfp_mask);
77 	return true;
78 }
79 
__cpuset_zone_allowed(struct zone * z,gfp_t gfp_mask)80 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
81 {
82 	return __cpuset_node_allowed(zone_to_nid(z), gfp_mask);
83 }
84 
cpuset_zone_allowed(struct zone * z,gfp_t gfp_mask)85 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
86 {
87 	if (cpusets_enabled())
88 		return __cpuset_zone_allowed(z, gfp_mask);
89 	return true;
90 }
91 
92 extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
93 					  const struct task_struct *tsk2);
94 
95 #define cpuset_memory_pressure_bump() 				\
96 	do {							\
97 		if (cpuset_memory_pressure_enabled)		\
98 			__cpuset_memory_pressure_bump();	\
99 	} while (0)
100 extern int cpuset_memory_pressure_enabled;
101 extern void __cpuset_memory_pressure_bump(void);
102 
103 extern void cpuset_task_status_allowed(struct seq_file *m,
104 					struct task_struct *task);
105 extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
106 			    struct pid *pid, struct task_struct *tsk);
107 
108 extern int cpuset_mem_spread_node(void);
109 extern int cpuset_slab_spread_node(void);
110 
cpuset_do_page_mem_spread(void)111 static inline int cpuset_do_page_mem_spread(void)
112 {
113 	return task_spread_page(current);
114 }
115 
cpuset_do_slab_mem_spread(void)116 static inline int cpuset_do_slab_mem_spread(void)
117 {
118 	return task_spread_slab(current);
119 }
120 
121 extern bool current_cpuset_is_being_rebound(void);
122 
123 extern void rebuild_sched_domains(void);
124 
125 extern void cpuset_print_current_mems_allowed(void);
126 
127 /*
128  * read_mems_allowed_begin is required when making decisions involving
129  * mems_allowed such as during page allocation. mems_allowed can be updated in
130  * parallel and depending on the new value an operation can fail potentially
131  * causing process failure. A retry loop with read_mems_allowed_begin and
132  * read_mems_allowed_retry prevents these artificial failures.
133  */
read_mems_allowed_begin(void)134 static inline unsigned int read_mems_allowed_begin(void)
135 {
136 	if (!static_branch_unlikely(&cpusets_pre_enable_key))
137 		return 0;
138 
139 	return read_seqcount_begin(&current->mems_allowed_seq);
140 }
141 
142 /*
143  * If this returns true, the operation that took place after
144  * read_mems_allowed_begin may have failed artificially due to a concurrent
145  * update of mems_allowed. It is up to the caller to retry the operation if
146  * appropriate.
147  */
read_mems_allowed_retry(unsigned int seq)148 static inline bool read_mems_allowed_retry(unsigned int seq)
149 {
150 	if (!static_branch_unlikely(&cpusets_enabled_key))
151 		return false;
152 
153 	return read_seqcount_retry(&current->mems_allowed_seq, seq);
154 }
155 
set_mems_allowed(nodemask_t nodemask)156 static inline void set_mems_allowed(nodemask_t nodemask)
157 {
158 	unsigned long flags;
159 
160 	task_lock(current);
161 	local_irq_save(flags);
162 	write_seqcount_begin(&current->mems_allowed_seq);
163 	current->mems_allowed = nodemask;
164 	write_seqcount_end(&current->mems_allowed_seq);
165 	local_irq_restore(flags);
166 	task_unlock(current);
167 }
168 
169 extern void cpuset_hotplug_workfn(struct work_struct *work);
170 
171 #else /* !CONFIG_CPUSETS */
172 
cpusets_enabled(void)173 static inline bool cpusets_enabled(void) { return false; }
174 
cpuset_init(void)175 static inline int cpuset_init(void) { return 0; }
cpuset_init_smp(void)176 static inline void cpuset_init_smp(void) {}
177 
cpuset_force_rebuild(void)178 static inline void cpuset_force_rebuild(void) { }
179 
cpuset_update_active_cpus_affine(int cpu)180 static inline void cpuset_update_active_cpus_affine(int cpu) {}
181 
cpuset_update_active_cpus(void)182 static inline void cpuset_update_active_cpus(void)
183 {
184 	partition_sched_domains(1, NULL, NULL);
185 }
186 
cpuset_wait_for_hotplug(void)187 static inline void cpuset_wait_for_hotplug(void) { }
188 
inc_dl_tasks_cs(struct task_struct * task)189 static inline void inc_dl_tasks_cs(struct task_struct *task) { }
dec_dl_tasks_cs(struct task_struct * task)190 static inline void dec_dl_tasks_cs(struct task_struct *task) { }
cpuset_lock(void)191 static inline void cpuset_lock(void) { }
cpuset_unlock(void)192 static inline void cpuset_unlock(void) { }
193 
cpuset_cpus_allowed(struct task_struct * p,struct cpumask * mask)194 static inline void cpuset_cpus_allowed(struct task_struct *p,
195 				       struct cpumask *mask)
196 {
197 	cpumask_copy(mask, task_cpu_possible_mask(p));
198 }
199 
cpuset_cpus_allowed_fallback(struct task_struct * p)200 static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
201 {
202 }
203 
cpuset_mems_allowed(struct task_struct * p)204 static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
205 {
206 	return node_possible_map;
207 }
208 
209 #define cpuset_current_mems_allowed (node_states[N_MEMORY])
cpuset_init_current_mems_allowed(void)210 static inline void cpuset_init_current_mems_allowed(void) {}
211 
cpuset_nodemask_valid_mems_allowed(nodemask_t * nodemask)212 static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
213 {
214 	return 1;
215 }
216 
cpuset_node_allowed(int node,gfp_t gfp_mask)217 static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
218 {
219 	return true;
220 }
221 
__cpuset_zone_allowed(struct zone * z,gfp_t gfp_mask)222 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
223 {
224 	return true;
225 }
226 
cpuset_zone_allowed(struct zone * z,gfp_t gfp_mask)227 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
228 {
229 	return true;
230 }
231 
cpuset_mems_allowed_intersects(const struct task_struct * tsk1,const struct task_struct * tsk2)232 static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
233 						 const struct task_struct *tsk2)
234 {
235 	return 1;
236 }
237 
cpuset_memory_pressure_bump(void)238 static inline void cpuset_memory_pressure_bump(void) {}
239 
cpuset_task_status_allowed(struct seq_file * m,struct task_struct * task)240 static inline void cpuset_task_status_allowed(struct seq_file *m,
241 						struct task_struct *task)
242 {
243 }
244 
cpuset_mem_spread_node(void)245 static inline int cpuset_mem_spread_node(void)
246 {
247 	return 0;
248 }
249 
cpuset_slab_spread_node(void)250 static inline int cpuset_slab_spread_node(void)
251 {
252 	return 0;
253 }
254 
cpuset_do_page_mem_spread(void)255 static inline int cpuset_do_page_mem_spread(void)
256 {
257 	return 0;
258 }
259 
cpuset_do_slab_mem_spread(void)260 static inline int cpuset_do_slab_mem_spread(void)
261 {
262 	return 0;
263 }
264 
current_cpuset_is_being_rebound(void)265 static inline bool current_cpuset_is_being_rebound(void)
266 {
267 	return false;
268 }
269 
rebuild_sched_domains(void)270 static inline void rebuild_sched_domains(void)
271 {
272 	partition_sched_domains(1, NULL, NULL);
273 }
274 
cpuset_print_current_mems_allowed(void)275 static inline void cpuset_print_current_mems_allowed(void)
276 {
277 }
278 
set_mems_allowed(nodemask_t nodemask)279 static inline void set_mems_allowed(nodemask_t nodemask)
280 {
281 }
282 
read_mems_allowed_begin(void)283 static inline unsigned int read_mems_allowed_begin(void)
284 {
285 	return 0;
286 }
287 
read_mems_allowed_retry(unsigned int seq)288 static inline bool read_mems_allowed_retry(unsigned int seq)
289 {
290 	return false;
291 }
292 
cpuset_hotplug_workfn(struct work_struct * work)293 static inline void cpuset_hotplug_workfn(struct work_struct *work) {}
294 
295 #endif /* !CONFIG_CPUSETS */
296 
297 #endif /* _LINUX_CPUSET_H */
298