• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  kernel/cpuset.c
3  *
4  *  Processor and Memory placement constraints for sets of tasks.
5  *
6  *  Copyright (C) 2003 BULL SA.
7  *  Copyright (C) 2004-2007 Silicon Graphics, Inc.
8  *  Copyright (C) 2006 Google, Inc
9  *
10  *  Portions derived from Patrick Mochel's sysfs code.
11  *  sysfs is Copyright (c) 2001-3 Patrick Mochel
12  *
13  *  2003-10-10 Written by Simon Derr.
14  *  2003-10-22 Updates by Stephen Hemminger.
15  *  2004 May-July Rework by Paul Jackson.
16  *  2006 Rework by Paul Menage to use generic cgroups
17  *  2008 Rework of the scheduler domains and CPU hotplug handling
18  *       by Max Krasnyansky
19  *
20  *  This file is subject to the terms and conditions of the GNU General Public
21  *  License.  See the file COPYING in the main directory of the Linux
22  *  distribution for more details.
23  */
24 
25 #include <linux/cpu.h>
26 #include <linux/cpumask.h>
27 #include <linux/cpuset.h>
28 #include <linux/err.h>
29 #include <linux/errno.h>
30 #include <linux/file.h>
31 #include <linux/fs.h>
32 #include <linux/init.h>
33 #include <linux/interrupt.h>
34 #include <linux/kernel.h>
35 #include <linux/kmod.h>
36 #include <linux/kthread.h>
37 #include <linux/list.h>
38 #include <linux/mempolicy.h>
39 #include <linux/mm.h>
40 #include <linux/memory.h>
41 #include <linux/export.h>
42 #include <linux/mount.h>
43 #include <linux/fs_context.h>
44 #include <linux/namei.h>
45 #include <linux/pagemap.h>
46 #include <linux/proc_fs.h>
47 #include <linux/rcupdate.h>
48 #include <linux/sched.h>
49 #include <linux/sched/deadline.h>
50 #include <linux/sched/mm.h>
51 #include <linux/sched/task.h>
52 #include <linux/seq_file.h>
53 #include <linux/security.h>
54 #include <linux/slab.h>
55 #include <linux/spinlock.h>
56 #include <linux/stat.h>
57 #include <linux/string.h>
58 #include <linux/time.h>
59 #include <linux/time64.h>
60 #include <linux/backing-dev.h>
61 #include <linux/sort.h>
62 #include <linux/oom.h>
63 #include <linux/sched/isolation.h>
64 #include <linux/uaccess.h>
65 #include <linux/atomic.h>
66 #include <linux/mutex.h>
67 #include <linux/cgroup.h>
68 #include <linux/wait.h>
69 
70 #include <trace/hooks/cgroup.h>
71 #include <trace/hooks/sched.h>
72 
73 DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key);
74 DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key);
75 
76 /*
77  * There could be abnormal cpuset configurations for cpu or memory
78  * node binding, add this key to provide a quick low-cost judgment
79  * of the situation.
80  */
81 DEFINE_STATIC_KEY_FALSE(cpusets_insane_config_key);
82 
83 /* See "Frequency meter" comments, below. */
84 
85 struct fmeter {
86 	int cnt;		/* unprocessed events count */
87 	int val;		/* most recent output value */
88 	time64_t time;		/* clock (secs) when val computed */
89 	spinlock_t lock;	/* guards read or write of above */
90 };
91 
92 /*
93  * Invalid partition error code
94  */
95 enum prs_errcode {
96 	PERR_NONE = 0,
97 	PERR_INVCPUS,
98 	PERR_INVPARENT,
99 	PERR_NOTPART,
100 	PERR_NOTEXCL,
101 	PERR_NOCPUS,
102 	PERR_HOTPLUG,
103 	PERR_CPUSEMPTY,
104 };
105 
106 static const char * const perr_strings[] = {
107 	[PERR_INVCPUS]   = "Invalid cpu list in cpuset.cpus",
108 	[PERR_INVPARENT] = "Parent is an invalid partition root",
109 	[PERR_NOTPART]   = "Parent is not a partition root",
110 	[PERR_NOTEXCL]   = "Cpu list in cpuset.cpus not exclusive",
111 	[PERR_NOCPUS]    = "Parent unable to distribute cpu downstream",
112 	[PERR_HOTPLUG]   = "No cpu available due to hotplug",
113 	[PERR_CPUSEMPTY] = "cpuset.cpus is empty",
114 };
115 
116 struct cpuset {
117 	struct cgroup_subsys_state css;
118 
119 	unsigned long flags;		/* "unsigned long" so bitops work */
120 
121 	/*
122 	 * On default hierarchy:
123 	 *
124 	 * The user-configured masks can only be changed by writing to
125 	 * cpuset.cpus and cpuset.mems, and won't be limited by the
126 	 * parent masks.
127 	 *
128 	 * The effective masks is the real masks that apply to the tasks
129 	 * in the cpuset. They may be changed if the configured masks are
130 	 * changed or hotplug happens.
131 	 *
132 	 * effective_mask == configured_mask & parent's effective_mask,
133 	 * and if it ends up empty, it will inherit the parent's mask.
134 	 *
135 	 *
136 	 * On legacy hierarchy:
137 	 *
138 	 * The user-configured masks are always the same with effective masks.
139 	 */
140 
141 	/* user-configured CPUs and Memory Nodes allow to tasks */
142 	cpumask_var_t cpus_allowed;
143 	cpumask_var_t cpus_requested;
144 	nodemask_t mems_allowed;
145 
146 	/* effective CPUs and Memory Nodes allow to tasks */
147 	cpumask_var_t effective_cpus;
148 	nodemask_t effective_mems;
149 
150 	/*
151 	 * CPUs allocated to child sub-partitions (default hierarchy only)
152 	 * - CPUs granted by the parent = effective_cpus U subparts_cpus
153 	 * - effective_cpus and subparts_cpus are mutually exclusive.
154 	 *
155 	 * effective_cpus contains only onlined CPUs, but subparts_cpus
156 	 * may have offlined ones.
157 	 */
158 	cpumask_var_t subparts_cpus;
159 
160 	/*
161 	 * This is old Memory Nodes tasks took on.
162 	 *
163 	 * - top_cpuset.old_mems_allowed is initialized to mems_allowed.
164 	 * - A new cpuset's old_mems_allowed is initialized when some
165 	 *   task is moved into it.
166 	 * - old_mems_allowed is used in cpuset_migrate_mm() when we change
167 	 *   cpuset.mems_allowed and have tasks' nodemask updated, and
168 	 *   then old_mems_allowed is updated to mems_allowed.
169 	 */
170 	nodemask_t old_mems_allowed;
171 
172 	struct fmeter fmeter;		/* memory_pressure filter */
173 
174 	/*
175 	 * Tasks are being attached to this cpuset.  Used to prevent
176 	 * zeroing cpus/mems_allowed between ->can_attach() and ->attach().
177 	 */
178 	int attach_in_progress;
179 
180 	/* partition number for rebuild_sched_domains() */
181 	int pn;
182 
183 	/* for custom sched domain */
184 	int relax_domain_level;
185 
186 	/* number of CPUs in subparts_cpus */
187 	int nr_subparts_cpus;
188 
189 	/* partition root state */
190 	int partition_root_state;
191 
192 	/*
193 	 * Default hierarchy only:
194 	 * use_parent_ecpus - set if using parent's effective_cpus
195 	 * child_ecpus_count - # of children with use_parent_ecpus set
196 	 */
197 	int use_parent_ecpus;
198 	int child_ecpus_count;
199 
200 	/*
201 	 * number of SCHED_DEADLINE tasks attached to this cpuset, so that we
202 	 * know when to rebuild associated root domain bandwidth information.
203 	 */
204 	int nr_deadline_tasks;
205 	int nr_migrate_dl_tasks;
206 	u64 sum_migrate_dl_bw;
207 
208 	/* Invalid partition error code, not lock protected */
209 	enum prs_errcode prs_err;
210 
211 	/* Handle for cpuset.cpus.partition */
212 	struct cgroup_file partition_file;
213 };
214 
215 /*
216  * Partition root states:
217  *
218  *   0 - member (not a partition root)
219  *   1 - partition root
220  *   2 - partition root without load balancing (isolated)
221  *  -1 - invalid partition root
222  *  -2 - invalid isolated partition root
223  */
224 #define PRS_MEMBER		0
225 #define PRS_ROOT		1
226 #define PRS_ISOLATED		2
227 #define PRS_INVALID_ROOT	-1
228 #define PRS_INVALID_ISOLATED	-2
229 
is_prs_invalid(int prs_state)230 static inline bool is_prs_invalid(int prs_state)
231 {
232 	return prs_state < 0;
233 }
234 
235 /*
236  * Temporary cpumasks for working with partitions that are passed among
237  * functions to avoid memory allocation in inner functions.
238  */
239 struct tmpmasks {
240 	cpumask_var_t addmask, delmask;	/* For partition root */
241 	cpumask_var_t new_cpus;		/* For update_cpumasks_hier() */
242 };
243 
css_cs(struct cgroup_subsys_state * css)244 static inline struct cpuset *css_cs(struct cgroup_subsys_state *css)
245 {
246 	return css ? container_of(css, struct cpuset, css) : NULL;
247 }
248 
249 /* Retrieve the cpuset for a task */
task_cs(struct task_struct * task)250 static inline struct cpuset *task_cs(struct task_struct *task)
251 {
252 	return css_cs(task_css(task, cpuset_cgrp_id));
253 }
254 
parent_cs(struct cpuset * cs)255 static inline struct cpuset *parent_cs(struct cpuset *cs)
256 {
257 	return css_cs(cs->css.parent);
258 }
259 
inc_dl_tasks_cs(struct task_struct * p)260 void inc_dl_tasks_cs(struct task_struct *p)
261 {
262 	struct cpuset *cs = task_cs(p);
263 
264 	cs->nr_deadline_tasks++;
265 }
266 
dec_dl_tasks_cs(struct task_struct * p)267 void dec_dl_tasks_cs(struct task_struct *p)
268 {
269 	struct cpuset *cs = task_cs(p);
270 
271 	cs->nr_deadline_tasks--;
272 }
273 
274 /* bits in struct cpuset flags field */
275 typedef enum {
276 	CS_ONLINE,
277 	CS_CPU_EXCLUSIVE,
278 	CS_MEM_EXCLUSIVE,
279 	CS_MEM_HARDWALL,
280 	CS_MEMORY_MIGRATE,
281 	CS_SCHED_LOAD_BALANCE,
282 	CS_SPREAD_PAGE,
283 	CS_SPREAD_SLAB,
284 } cpuset_flagbits_t;
285 
286 /* convenient tests for these bits */
is_cpuset_online(struct cpuset * cs)287 static inline bool is_cpuset_online(struct cpuset *cs)
288 {
289 	return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css);
290 }
291 
is_cpu_exclusive(const struct cpuset * cs)292 static inline int is_cpu_exclusive(const struct cpuset *cs)
293 {
294 	return test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
295 }
296 
is_mem_exclusive(const struct cpuset * cs)297 static inline int is_mem_exclusive(const struct cpuset *cs)
298 {
299 	return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
300 }
301 
is_mem_hardwall(const struct cpuset * cs)302 static inline int is_mem_hardwall(const struct cpuset *cs)
303 {
304 	return test_bit(CS_MEM_HARDWALL, &cs->flags);
305 }
306 
is_sched_load_balance(const struct cpuset * cs)307 static inline int is_sched_load_balance(const struct cpuset *cs)
308 {
309 	return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
310 }
311 
is_memory_migrate(const struct cpuset * cs)312 static inline int is_memory_migrate(const struct cpuset *cs)
313 {
314 	return test_bit(CS_MEMORY_MIGRATE, &cs->flags);
315 }
316 
is_spread_page(const struct cpuset * cs)317 static inline int is_spread_page(const struct cpuset *cs)
318 {
319 	return test_bit(CS_SPREAD_PAGE, &cs->flags);
320 }
321 
is_spread_slab(const struct cpuset * cs)322 static inline int is_spread_slab(const struct cpuset *cs)
323 {
324 	return test_bit(CS_SPREAD_SLAB, &cs->flags);
325 }
326 
is_partition_valid(const struct cpuset * cs)327 static inline int is_partition_valid(const struct cpuset *cs)
328 {
329 	return cs->partition_root_state > 0;
330 }
331 
is_partition_invalid(const struct cpuset * cs)332 static inline int is_partition_invalid(const struct cpuset *cs)
333 {
334 	return cs->partition_root_state < 0;
335 }
336 
337 /*
338  * Callers should hold callback_lock to modify partition_root_state.
339  */
make_partition_invalid(struct cpuset * cs)340 static inline void make_partition_invalid(struct cpuset *cs)
341 {
342 	if (is_partition_valid(cs))
343 		cs->partition_root_state = -cs->partition_root_state;
344 }
345 
346 /*
347  * Send notification event of whenever partition_root_state changes.
348  */
notify_partition_change(struct cpuset * cs,int old_prs)349 static inline void notify_partition_change(struct cpuset *cs, int old_prs)
350 {
351 	if (old_prs == cs->partition_root_state)
352 		return;
353 	cgroup_file_notify(&cs->partition_file);
354 
355 	/* Reset prs_err if not invalid */
356 	if (is_partition_valid(cs))
357 		WRITE_ONCE(cs->prs_err, PERR_NONE);
358 }
359 
360 static struct cpuset top_cpuset = {
361 	.flags = ((1 << CS_ONLINE) | (1 << CS_CPU_EXCLUSIVE) |
362 		  (1 << CS_MEM_EXCLUSIVE)),
363 	.partition_root_state = PRS_ROOT,
364 };
365 
366 /**
367  * cpuset_for_each_child - traverse online children of a cpuset
368  * @child_cs: loop cursor pointing to the current child
369  * @pos_css: used for iteration
370  * @parent_cs: target cpuset to walk children of
371  *
372  * Walk @child_cs through the online children of @parent_cs.  Must be used
373  * with RCU read locked.
374  */
375 #define cpuset_for_each_child(child_cs, pos_css, parent_cs)		\
376 	css_for_each_child((pos_css), &(parent_cs)->css)		\
377 		if (is_cpuset_online(((child_cs) = css_cs((pos_css)))))
378 
379 /**
380  * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants
381  * @des_cs: loop cursor pointing to the current descendant
382  * @pos_css: used for iteration
383  * @root_cs: target cpuset to walk ancestor of
384  *
385  * Walk @des_cs through the online descendants of @root_cs.  Must be used
386  * with RCU read locked.  The caller may modify @pos_css by calling
387  * css_rightmost_descendant() to skip subtree.  @root_cs is included in the
388  * iteration and the first node to be visited.
389  */
390 #define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs)	\
391 	css_for_each_descendant_pre((pos_css), &(root_cs)->css)		\
392 		if (is_cpuset_online(((des_cs) = css_cs((pos_css)))))
393 
394 /*
395  * There are two global locks guarding cpuset structures - cpuset_mutex and
396  * callback_lock. We also require taking task_lock() when dereferencing a
397  * task's cpuset pointer. See "The task_lock() exception", at the end of this
398  * comment.  The cpuset code uses only cpuset_mutex. Other kernel subsystems
399  * can use cpuset_lock()/cpuset_unlock() to prevent change to cpuset
400  * structures. Note that cpuset_mutex needs to be a mutex as it is used in
401  * paths that rely on priority inheritance (e.g. scheduler - on RT) for
402  * correctness.
403  *
404  * A task must hold both locks to modify cpusets.  If a task holds
405  * cpuset_mutex, it blocks others, ensuring that it is the only task able to
406  * also acquire callback_lock and be able to modify cpusets.  It can perform
407  * various checks on the cpuset structure first, knowing nothing will change.
408  * It can also allocate memory while just holding cpuset_mutex.  While it is
409  * performing these checks, various callback routines can briefly acquire
410  * callback_lock to query cpusets.  Once it is ready to make the changes, it
411  * takes callback_lock, blocking everyone else.
412  *
413  * Calls to the kernel memory allocator can not be made while holding
414  * callback_lock, as that would risk double tripping on callback_lock
415  * from one of the callbacks into the cpuset code from within
416  * __alloc_pages().
417  *
418  * If a task is only holding callback_lock, then it has read-only
419  * access to cpusets.
420  *
421  * Now, the task_struct fields mems_allowed and mempolicy may be changed
422  * by other task, we use alloc_lock in the task_struct fields to protect
423  * them.
424  *
425  * The cpuset_common_file_read() handlers only hold callback_lock across
426  * small pieces of code, such as when reading out possibly multi-word
427  * cpumasks and nodemasks.
428  *
429  * Accessing a task's cpuset should be done in accordance with the
430  * guidelines for accessing subsystem state in kernel/cgroup.c
431  */
432 
433 DEFINE_STATIC_PERCPU_RWSEM(cpuset_rwsem);
434 
435 static DEFINE_MUTEX(cpuset_mutex);
436 
cpuset_lock(void)437 void cpuset_lock(void)
438 {
439 	mutex_lock(&cpuset_mutex);
440 }
441 
cpuset_unlock(void)442 void cpuset_unlock(void)
443 {
444 	mutex_unlock(&cpuset_mutex);
445 }
446 
447 static DEFINE_SPINLOCK(callback_lock);
448 
449 static struct workqueue_struct *cpuset_migrate_mm_wq;
450 
451 /*
452  * CPU / memory hotplug is handled asynchronously.
453  */
454 static void cpuset_hotplug_workfn(struct work_struct *work);
455 static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn);
456 
457 static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq);
458 
check_insane_mems_config(nodemask_t * nodes)459 static inline void check_insane_mems_config(nodemask_t *nodes)
460 {
461 	if (!cpusets_insane_config() &&
462 		movable_only_nodes(nodes)) {
463 		static_branch_enable(&cpusets_insane_config_key);
464 		pr_info("Unsupported (movable nodes only) cpuset configuration detected (nmask=%*pbl)!\n"
465 			"Cpuset allocations might fail even with a lot of memory available.\n",
466 			nodemask_pr_args(nodes));
467 	}
468 }
469 
470 /*
471  * Cgroup v2 behavior is used on the "cpus" and "mems" control files when
472  * on default hierarchy or when the cpuset_v2_mode flag is set by mounting
473  * the v1 cpuset cgroup filesystem with the "cpuset_v2_mode" mount option.
474  * With v2 behavior, "cpus" and "mems" are always what the users have
475  * requested and won't be changed by hotplug events. Only the effective
476  * cpus or mems will be affected.
477  */
is_in_v2_mode(void)478 static inline bool is_in_v2_mode(void)
479 {
480 	return cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
481 	      (cpuset_cgrp_subsys.root->flags & CGRP_ROOT_CPUSET_V2_MODE);
482 }
483 
484 /**
485  * partition_is_populated - check if partition has tasks
486  * @cs: partition root to be checked
487  * @excluded_child: a child cpuset to be excluded in task checking
488  * Return: true if there are tasks, false otherwise
489  *
490  * It is assumed that @cs is a valid partition root. @excluded_child should
491  * be non-NULL when this cpuset is going to become a partition itself.
492  */
partition_is_populated(struct cpuset * cs,struct cpuset * excluded_child)493 static inline bool partition_is_populated(struct cpuset *cs,
494 					  struct cpuset *excluded_child)
495 {
496 	struct cgroup_subsys_state *css;
497 	struct cpuset *child;
498 
499 	if (cs->css.cgroup->nr_populated_csets)
500 		return true;
501 	if (!excluded_child && !cs->nr_subparts_cpus)
502 		return cgroup_is_populated(cs->css.cgroup);
503 
504 	rcu_read_lock();
505 	cpuset_for_each_child(child, css, cs) {
506 		if (child == excluded_child)
507 			continue;
508 		if (is_partition_valid(child))
509 			continue;
510 		if (cgroup_is_populated(child->css.cgroup)) {
511 			rcu_read_unlock();
512 			return true;
513 		}
514 	}
515 	rcu_read_unlock();
516 	return false;
517 }
518 
519 /*
520  * Return in pmask the portion of a task's cpusets's cpus_allowed that
521  * are online and are capable of running the task.  If none are found,
522  * walk up the cpuset hierarchy until we find one that does have some
523  * appropriate cpus.
524  *
525  * One way or another, we guarantee to return some non-empty subset
526  * of cpu_online_mask.
527  *
528  * Call with callback_lock or cpuset_mutex held.
529  */
guarantee_online_cpus(struct task_struct * tsk,struct cpumask * pmask)530 static void guarantee_online_cpus(struct task_struct *tsk,
531 				  struct cpumask *pmask)
532 {
533 	const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
534 	struct cpuset *cs;
535 
536 	if (WARN_ON(!cpumask_and(pmask, possible_mask, cpu_online_mask)))
537 		cpumask_copy(pmask, cpu_online_mask);
538 
539 	rcu_read_lock();
540 	cs = task_cs(tsk);
541 
542 	while (!cpumask_intersects(cs->effective_cpus, pmask)) {
543 		cs = parent_cs(cs);
544 		if (unlikely(!cs)) {
545 			/*
546 			 * The top cpuset doesn't have any online cpu as a
547 			 * consequence of a race between cpuset_hotplug_work
548 			 * and cpu hotplug notifier.  But we know the top
549 			 * cpuset's effective_cpus is on its way to be
550 			 * identical to cpu_online_mask.
551 			 */
552 			goto out_unlock;
553 		}
554 	}
555 	cpumask_and(pmask, pmask, cs->effective_cpus);
556 
557 out_unlock:
558 	rcu_read_unlock();
559 }
560 
561 /*
562  * Return in *pmask the portion of a cpusets's mems_allowed that
563  * are online, with memory.  If none are online with memory, walk
564  * up the cpuset hierarchy until we find one that does have some
565  * online mems.  The top cpuset always has some mems online.
566  *
567  * One way or another, we guarantee to return some non-empty subset
568  * of node_states[N_MEMORY].
569  *
570  * Call with callback_lock or cpuset_mutex held.
571  */
guarantee_online_mems(struct cpuset * cs,nodemask_t * pmask)572 static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
573 {
574 	while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY]))
575 		cs = parent_cs(cs);
576 	nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]);
577 }
578 
579 /*
580  * update task's spread flag if cpuset's page/slab spread flag is set
581  *
582  * Call with callback_lock or cpuset_mutex held. The check can be skipped
583  * if on default hierarchy.
584  */
cpuset_update_task_spread_flags(struct cpuset * cs,struct task_struct * tsk)585 static void cpuset_update_task_spread_flags(struct cpuset *cs,
586 					struct task_struct *tsk)
587 {
588 	if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys))
589 		return;
590 
591 	if (is_spread_page(cs))
592 		task_set_spread_page(tsk);
593 	else
594 		task_clear_spread_page(tsk);
595 
596 	if (is_spread_slab(cs))
597 		task_set_spread_slab(tsk);
598 	else
599 		task_clear_spread_slab(tsk);
600 }
601 
602 /*
603  * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
604  *
605  * One cpuset is a subset of another if all its allowed CPUs and
606  * Memory Nodes are a subset of the other, and its exclusive flags
607  * are only set if the other's are set.  Call holding cpuset_mutex.
608  */
609 
is_cpuset_subset(const struct cpuset * p,const struct cpuset * q)610 static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
611 {
612 	return	cpumask_subset(p->cpus_requested, q->cpus_requested) &&
613 		nodes_subset(p->mems_allowed, q->mems_allowed) &&
614 		is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
615 		is_mem_exclusive(p) <= is_mem_exclusive(q);
616 }
617 
618 /**
619  * alloc_cpumasks - allocate three cpumasks for cpuset
620  * @cs:  the cpuset that have cpumasks to be allocated.
621  * @tmp: the tmpmasks structure pointer
622  * Return: 0 if successful, -ENOMEM otherwise.
623  *
624  * Only one of the two input arguments should be non-NULL.
625  */
alloc_cpumasks(struct cpuset * cs,struct tmpmasks * tmp)626 static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp)
627 {
628 	cpumask_var_t *pmask1, *pmask2, *pmask3;
629 
630 	if (cs) {
631 		pmask1 = &cs->cpus_allowed;
632 		pmask2 = &cs->effective_cpus;
633 		pmask3 = &cs->subparts_cpus;
634 	} else {
635 		pmask1 = &tmp->new_cpus;
636 		pmask2 = &tmp->addmask;
637 		pmask3 = &tmp->delmask;
638 	}
639 
640 	if (!zalloc_cpumask_var(pmask1, GFP_KERNEL))
641 		return -ENOMEM;
642 
643 	if (!zalloc_cpumask_var(pmask2, GFP_KERNEL))
644 		goto free_one;
645 
646 	if (!zalloc_cpumask_var(pmask3, GFP_KERNEL))
647 		goto free_two;
648 
649 	if (cs && !zalloc_cpumask_var(&cs->cpus_requested, GFP_KERNEL))
650 		goto free_three;
651 
652 	return 0;
653 
654 free_three:
655 	free_cpumask_var(*pmask3);
656 free_two:
657 	free_cpumask_var(*pmask2);
658 free_one:
659 	free_cpumask_var(*pmask1);
660 	return -ENOMEM;
661 }
662 
663 /**
664  * free_cpumasks - free cpumasks in a tmpmasks structure
665  * @cs:  the cpuset that have cpumasks to be free.
666  * @tmp: the tmpmasks structure pointer
667  */
free_cpumasks(struct cpuset * cs,struct tmpmasks * tmp)668 static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp)
669 {
670 	if (cs) {
671 		free_cpumask_var(cs->cpus_allowed);
672 		free_cpumask_var(cs->cpus_requested);
673 		free_cpumask_var(cs->effective_cpus);
674 		free_cpumask_var(cs->subparts_cpus);
675 	}
676 	if (tmp) {
677 		free_cpumask_var(tmp->new_cpus);
678 		free_cpumask_var(tmp->addmask);
679 		free_cpumask_var(tmp->delmask);
680 	}
681 }
682 
683 /**
684  * alloc_trial_cpuset - allocate a trial cpuset
685  * @cs: the cpuset that the trial cpuset duplicates
686  */
alloc_trial_cpuset(struct cpuset * cs)687 static struct cpuset *alloc_trial_cpuset(struct cpuset *cs)
688 {
689 	struct cpuset *trial;
690 
691 	trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL);
692 	if (!trial)
693 		return NULL;
694 
695 	if (alloc_cpumasks(trial, NULL)) {
696 		kfree(trial);
697 		return NULL;
698 	}
699 
700 	cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
701 	cpumask_copy(trial->cpus_requested, cs->cpus_requested);
702 	cpumask_copy(trial->effective_cpus, cs->effective_cpus);
703 	return trial;
704 }
705 
706 /**
707  * free_cpuset - free the cpuset
708  * @cs: the cpuset to be freed
709  */
free_cpuset(struct cpuset * cs)710 static inline void free_cpuset(struct cpuset *cs)
711 {
712 	free_cpumasks(cs, NULL);
713 	kfree(cs);
714 }
715 
716 /*
717  * validate_change_legacy() - Validate conditions specific to legacy (v1)
718  *                            behavior.
719  */
validate_change_legacy(struct cpuset * cur,struct cpuset * trial)720 static int validate_change_legacy(struct cpuset *cur, struct cpuset *trial)
721 {
722 	struct cgroup_subsys_state *css;
723 	struct cpuset *c, *par;
724 	int ret;
725 
726 	WARN_ON_ONCE(!rcu_read_lock_held());
727 
728 	/* Each of our child cpusets must be a subset of us */
729 	ret = -EBUSY;
730 	cpuset_for_each_child(c, css, cur)
731 		if (!is_cpuset_subset(c, trial))
732 			goto out;
733 
734 	/* On legacy hierarchy, we must be a subset of our parent cpuset. */
735 	ret = -EACCES;
736 	par = parent_cs(cur);
737 	if (par && !is_cpuset_subset(trial, par))
738 		goto out;
739 
740 	ret = 0;
741 out:
742 	return ret;
743 }
744 
745 /*
746  * validate_change() - Used to validate that any proposed cpuset change
747  *		       follows the structural rules for cpusets.
748  *
749  * If we replaced the flag and mask values of the current cpuset
750  * (cur) with those values in the trial cpuset (trial), would
751  * our various subset and exclusive rules still be valid?  Presumes
752  * cpuset_mutex held.
753  *
754  * 'cur' is the address of an actual, in-use cpuset.  Operations
755  * such as list traversal that depend on the actual address of the
756  * cpuset in the list must use cur below, not trial.
757  *
758  * 'trial' is the address of bulk structure copy of cur, with
759  * perhaps one or more of the fields cpus_allowed, mems_allowed,
760  * or flags changed to new, trial values.
761  *
762  * Return 0 if valid, -errno if not.
763  */
764 
validate_change(struct cpuset * cur,struct cpuset * trial)765 static int validate_change(struct cpuset *cur, struct cpuset *trial)
766 {
767 	struct cgroup_subsys_state *css;
768 	struct cpuset *c, *par;
769 	int ret = 0;
770 
771 	rcu_read_lock();
772 
773 	if (!is_in_v2_mode())
774 		ret = validate_change_legacy(cur, trial);
775 	if (ret)
776 		goto out;
777 
778 	/* Remaining checks don't apply to root cpuset */
779 	if (cur == &top_cpuset)
780 		goto out;
781 
782 	par = parent_cs(cur);
783 
784 	/*
785 	 * Cpusets with tasks - existing or newly being attached - can't
786 	 * be changed to have empty cpus_allowed or mems_allowed.
787 	 */
788 	ret = -ENOSPC;
789 	if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) {
790 		if (!cpumask_empty(cur->cpus_allowed) &&
791 		    cpumask_empty(trial->cpus_allowed))
792 			goto out;
793 		if (!nodes_empty(cur->mems_allowed) &&
794 		    nodes_empty(trial->mems_allowed))
795 			goto out;
796 	}
797 
798 	/*
799 	 * We can't shrink if we won't have enough room for SCHED_DEADLINE
800 	 * tasks.
801 	 */
802 	ret = -EBUSY;
803 	if (is_cpu_exclusive(cur) &&
804 	    !cpuset_cpumask_can_shrink(cur->cpus_allowed,
805 				       trial->cpus_allowed))
806 		goto out;
807 
808 	/*
809 	 * If either I or some sibling (!= me) is exclusive, we can't
810 	 * overlap
811 	 */
812 	ret = -EINVAL;
813 	cpuset_for_each_child(c, css, par) {
814 		if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
815 		    c != cur &&
816 		    cpumask_intersects(trial->cpus_requested, c->cpus_requested))
817 			goto out;
818 		if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
819 		    c != cur &&
820 		    nodes_intersects(trial->mems_allowed, c->mems_allowed))
821 			goto out;
822 	}
823 
824 	ret = 0;
825 out:
826 	rcu_read_unlock();
827 	return ret;
828 }
829 
830 #ifdef CONFIG_SMP
831 /*
832  * Helper routine for generate_sched_domains().
833  * Do cpusets a, b have overlapping effective cpus_allowed masks?
834  */
cpusets_overlap(struct cpuset * a,struct cpuset * b)835 static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
836 {
837 	return cpumask_intersects(a->effective_cpus, b->effective_cpus);
838 }
839 
840 static void
update_domain_attr(struct sched_domain_attr * dattr,struct cpuset * c)841 update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
842 {
843 	if (dattr->relax_domain_level < c->relax_domain_level)
844 		dattr->relax_domain_level = c->relax_domain_level;
845 	return;
846 }
847 
update_domain_attr_tree(struct sched_domain_attr * dattr,struct cpuset * root_cs)848 static void update_domain_attr_tree(struct sched_domain_attr *dattr,
849 				    struct cpuset *root_cs)
850 {
851 	struct cpuset *cp;
852 	struct cgroup_subsys_state *pos_css;
853 
854 	rcu_read_lock();
855 	cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
856 		/* skip the whole subtree if @cp doesn't have any CPU */
857 		if (cpumask_empty(cp->cpus_allowed)) {
858 			pos_css = css_rightmost_descendant(pos_css);
859 			continue;
860 		}
861 
862 		if (is_sched_load_balance(cp))
863 			update_domain_attr(dattr, cp);
864 	}
865 	rcu_read_unlock();
866 }
867 
868 /* Must be called with cpuset_mutex held.  */
nr_cpusets(void)869 static inline int nr_cpusets(void)
870 {
871 	/* jump label reference count + the top-level cpuset */
872 	return static_key_count(&cpusets_enabled_key.key) + 1;
873 }
874 
875 /*
876  * generate_sched_domains()
877  *
878  * This function builds a partial partition of the systems CPUs
879  * A 'partial partition' is a set of non-overlapping subsets whose
880  * union is a subset of that set.
881  * The output of this function needs to be passed to kernel/sched/core.c
882  * partition_sched_domains() routine, which will rebuild the scheduler's
883  * load balancing domains (sched domains) as specified by that partial
884  * partition.
885  *
886  * See "What is sched_load_balance" in Documentation/admin-guide/cgroup-v1/cpusets.rst
887  * for a background explanation of this.
888  *
889  * Does not return errors, on the theory that the callers of this
890  * routine would rather not worry about failures to rebuild sched
891  * domains when operating in the severe memory shortage situations
892  * that could cause allocation failures below.
893  *
894  * Must be called with cpuset_mutex held.
895  *
896  * The three key local variables below are:
897  *    cp - cpuset pointer, used (together with pos_css) to perform a
898  *	   top-down scan of all cpusets. For our purposes, rebuilding
899  *	   the schedulers sched domains, we can ignore !is_sched_load_
900  *	   balance cpusets.
901  *  csa  - (for CpuSet Array) Array of pointers to all the cpusets
902  *	   that need to be load balanced, for convenient iterative
903  *	   access by the subsequent code that finds the best partition,
904  *	   i.e the set of domains (subsets) of CPUs such that the
905  *	   cpus_allowed of every cpuset marked is_sched_load_balance
906  *	   is a subset of one of these domains, while there are as
907  *	   many such domains as possible, each as small as possible.
908  * doms  - Conversion of 'csa' to an array of cpumasks, for passing to
909  *	   the kernel/sched/core.c routine partition_sched_domains() in a
910  *	   convenient format, that can be easily compared to the prior
911  *	   value to determine what partition elements (sched domains)
912  *	   were changed (added or removed.)
913  *
914  * Finding the best partition (set of domains):
915  *	The triple nested loops below over i, j, k scan over the
916  *	load balanced cpusets (using the array of cpuset pointers in
917  *	csa[]) looking for pairs of cpusets that have overlapping
918  *	cpus_allowed, but which don't have the same 'pn' partition
919  *	number and gives them in the same partition number.  It keeps
920  *	looping on the 'restart' label until it can no longer find
921  *	any such pairs.
922  *
923  *	The union of the cpus_allowed masks from the set of
924  *	all cpusets having the same 'pn' value then form the one
925  *	element of the partition (one sched domain) to be passed to
926  *	partition_sched_domains().
927  */
generate_sched_domains(cpumask_var_t ** domains,struct sched_domain_attr ** attributes)928 static int generate_sched_domains(cpumask_var_t **domains,
929 			struct sched_domain_attr **attributes)
930 {
931 	struct cpuset *cp;	/* top-down scan of cpusets */
932 	struct cpuset **csa;	/* array of all cpuset ptrs */
933 	int csn;		/* how many cpuset ptrs in csa so far */
934 	int i, j, k;		/* indices for partition finding loops */
935 	cpumask_var_t *doms;	/* resulting partition; i.e. sched domains */
936 	struct sched_domain_attr *dattr;  /* attributes for custom domains */
937 	int ndoms = 0;		/* number of sched domains in result */
938 	int nslot;		/* next empty doms[] struct cpumask slot */
939 	struct cgroup_subsys_state *pos_css;
940 	bool root_load_balance = is_sched_load_balance(&top_cpuset);
941 
942 	doms = NULL;
943 	dattr = NULL;
944 	csa = NULL;
945 
946 	/* Special case for the 99% of systems with one, full, sched domain */
947 	if (root_load_balance && !top_cpuset.nr_subparts_cpus) {
948 		ndoms = 1;
949 		doms = alloc_sched_domains(ndoms);
950 		if (!doms)
951 			goto done;
952 
953 		dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
954 		if (dattr) {
955 			*dattr = SD_ATTR_INIT;
956 			update_domain_attr_tree(dattr, &top_cpuset);
957 		}
958 		cpumask_and(doms[0], top_cpuset.effective_cpus,
959 			    housekeeping_cpumask(HK_TYPE_DOMAIN));
960 
961 		goto done;
962 	}
963 
964 	csa = kmalloc_array(nr_cpusets(), sizeof(cp), GFP_KERNEL);
965 	if (!csa)
966 		goto done;
967 	csn = 0;
968 
969 	rcu_read_lock();
970 	if (root_load_balance)
971 		csa[csn++] = &top_cpuset;
972 	cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) {
973 		if (cp == &top_cpuset)
974 			continue;
975 		/*
976 		 * Continue traversing beyond @cp iff @cp has some CPUs and
977 		 * isn't load balancing.  The former is obvious.  The
978 		 * latter: All child cpusets contain a subset of the
979 		 * parent's cpus, so just skip them, and then we call
980 		 * update_domain_attr_tree() to calc relax_domain_level of
981 		 * the corresponding sched domain.
982 		 *
983 		 * If root is load-balancing, we can skip @cp if it
984 		 * is a subset of the root's effective_cpus.
985 		 */
986 		if (!cpumask_empty(cp->cpus_allowed) &&
987 		    !(is_sched_load_balance(cp) &&
988 		      cpumask_intersects(cp->cpus_allowed,
989 					 housekeeping_cpumask(HK_TYPE_DOMAIN))))
990 			continue;
991 
992 		if (root_load_balance &&
993 		    cpumask_subset(cp->cpus_allowed, top_cpuset.effective_cpus))
994 			continue;
995 
996 		if (is_sched_load_balance(cp) &&
997 		    !cpumask_empty(cp->effective_cpus))
998 			csa[csn++] = cp;
999 
1000 		/* skip @cp's subtree if not a partition root */
1001 		if (!is_partition_valid(cp))
1002 			pos_css = css_rightmost_descendant(pos_css);
1003 	}
1004 	rcu_read_unlock();
1005 
1006 	for (i = 0; i < csn; i++)
1007 		csa[i]->pn = i;
1008 	ndoms = csn;
1009 
1010 restart:
1011 	/* Find the best partition (set of sched domains) */
1012 	for (i = 0; i < csn; i++) {
1013 		struct cpuset *a = csa[i];
1014 		int apn = a->pn;
1015 
1016 		for (j = 0; j < csn; j++) {
1017 			struct cpuset *b = csa[j];
1018 			int bpn = b->pn;
1019 
1020 			if (apn != bpn && cpusets_overlap(a, b)) {
1021 				for (k = 0; k < csn; k++) {
1022 					struct cpuset *c = csa[k];
1023 
1024 					if (c->pn == bpn)
1025 						c->pn = apn;
1026 				}
1027 				ndoms--;	/* one less element */
1028 				goto restart;
1029 			}
1030 		}
1031 	}
1032 
1033 	/*
1034 	 * Now we know how many domains to create.
1035 	 * Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
1036 	 */
1037 	doms = alloc_sched_domains(ndoms);
1038 	if (!doms)
1039 		goto done;
1040 
1041 	/*
1042 	 * The rest of the code, including the scheduler, can deal with
1043 	 * dattr==NULL case. No need to abort if alloc fails.
1044 	 */
1045 	dattr = kmalloc_array(ndoms, sizeof(struct sched_domain_attr),
1046 			      GFP_KERNEL);
1047 
1048 	for (nslot = 0, i = 0; i < csn; i++) {
1049 		struct cpuset *a = csa[i];
1050 		struct cpumask *dp;
1051 		int apn = a->pn;
1052 
1053 		if (apn < 0) {
1054 			/* Skip completed partitions */
1055 			continue;
1056 		}
1057 
1058 		dp = doms[nslot];
1059 
1060 		if (nslot == ndoms) {
1061 			static int warnings = 10;
1062 			if (warnings) {
1063 				pr_warn("rebuild_sched_domains confused: nslot %d, ndoms %d, csn %d, i %d, apn %d\n",
1064 					nslot, ndoms, csn, i, apn);
1065 				warnings--;
1066 			}
1067 			continue;
1068 		}
1069 
1070 		cpumask_clear(dp);
1071 		if (dattr)
1072 			*(dattr + nslot) = SD_ATTR_INIT;
1073 		for (j = i; j < csn; j++) {
1074 			struct cpuset *b = csa[j];
1075 
1076 			if (apn == b->pn) {
1077 				cpumask_or(dp, dp, b->effective_cpus);
1078 				cpumask_and(dp, dp, housekeeping_cpumask(HK_TYPE_DOMAIN));
1079 				if (dattr)
1080 					update_domain_attr_tree(dattr + nslot, b);
1081 
1082 				/* Done with this partition */
1083 				b->pn = -1;
1084 			}
1085 		}
1086 		nslot++;
1087 	}
1088 	BUG_ON(nslot != ndoms);
1089 
1090 done:
1091 	kfree(csa);
1092 
1093 	/*
1094 	 * Fallback to the default domain if kmalloc() failed.
1095 	 * See comments in partition_sched_domains().
1096 	 */
1097 	if (doms == NULL)
1098 		ndoms = 1;
1099 
1100 	*domains    = doms;
1101 	*attributes = dattr;
1102 	return ndoms;
1103 }
1104 
dl_update_tasks_root_domain(struct cpuset * cs)1105 static void dl_update_tasks_root_domain(struct cpuset *cs)
1106 {
1107 	struct css_task_iter it;
1108 	struct task_struct *task;
1109 
1110 	if (cs->nr_deadline_tasks == 0)
1111 		return;
1112 
1113 	css_task_iter_start(&cs->css, 0, &it);
1114 
1115 	while ((task = css_task_iter_next(&it)))
1116 		dl_add_task_root_domain(task);
1117 
1118 	css_task_iter_end(&it);
1119 }
1120 
dl_rebuild_rd_accounting(void)1121 static void dl_rebuild_rd_accounting(void)
1122 {
1123 	struct cpuset *cs = NULL;
1124 	struct cgroup_subsys_state *pos_css;
1125 
1126 	lockdep_assert_held(&cpuset_mutex);
1127 	lockdep_assert_cpus_held();
1128 	lockdep_assert_held(&sched_domains_mutex);
1129 
1130 	rcu_read_lock();
1131 
1132 	/*
1133 	 * Clear default root domain DL accounting, it will be computed again
1134 	 * if a task belongs to it.
1135 	 */
1136 	dl_clear_root_domain(&def_root_domain);
1137 
1138 	cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
1139 
1140 		if (cpumask_empty(cs->effective_cpus)) {
1141 			pos_css = css_rightmost_descendant(pos_css);
1142 			continue;
1143 		}
1144 
1145 		css_get(&cs->css);
1146 
1147 		rcu_read_unlock();
1148 
1149 		dl_update_tasks_root_domain(cs);
1150 
1151 		rcu_read_lock();
1152 		css_put(&cs->css);
1153 	}
1154 	rcu_read_unlock();
1155 }
1156 
1157 static void
partition_and_rebuild_sched_domains(int ndoms_new,cpumask_var_t doms_new[],struct sched_domain_attr * dattr_new)1158 partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1159 				    struct sched_domain_attr *dattr_new)
1160 {
1161 	mutex_lock(&sched_domains_mutex);
1162 	partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
1163 	dl_rebuild_rd_accounting();
1164 	mutex_unlock(&sched_domains_mutex);
1165 }
1166 
1167 /*
1168  * Rebuild scheduler domains.
1169  *
1170  * If the flag 'sched_load_balance' of any cpuset with non-empty
1171  * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
1172  * which has that flag enabled, or if any cpuset with a non-empty
1173  * 'cpus' is removed, then call this routine to rebuild the
1174  * scheduler's dynamic sched domains.
1175  *
1176  * Call with cpuset_mutex held.  Takes cpus_read_lock().
1177  */
rebuild_sched_domains_locked(void)1178 static void rebuild_sched_domains_locked(void)
1179 {
1180 	struct cgroup_subsys_state *pos_css;
1181 	struct sched_domain_attr *attr;
1182 	cpumask_var_t *doms;
1183 	struct cpuset *cs;
1184 	int ndoms;
1185 
1186 	lockdep_assert_cpus_held();
1187 	lockdep_assert_held(&cpuset_mutex);
1188 
1189 	/*
1190 	 * If we have raced with CPU hotplug, return early to avoid
1191 	 * passing doms with offlined cpu to partition_sched_domains().
1192 	 * Anyways, cpuset_hotplug_workfn() will rebuild sched domains.
1193 	 *
1194 	 * With no CPUs in any subpartitions, top_cpuset's effective CPUs
1195 	 * should be the same as the active CPUs, so checking only top_cpuset
1196 	 * is enough to detect racing CPU offlines.
1197 	 */
1198 	if (!top_cpuset.nr_subparts_cpus &&
1199 	    !cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask))
1200 		return;
1201 
1202 	/*
1203 	 * With subpartition CPUs, however, the effective CPUs of a partition
1204 	 * root should be only a subset of the active CPUs.  Since a CPU in any
1205 	 * partition root could be offlined, all must be checked.
1206 	 */
1207 	if (top_cpuset.nr_subparts_cpus) {
1208 		rcu_read_lock();
1209 		cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
1210 			if (!is_partition_valid(cs)) {
1211 				pos_css = css_rightmost_descendant(pos_css);
1212 				continue;
1213 			}
1214 			if (!cpumask_subset(cs->effective_cpus,
1215 					    cpu_active_mask)) {
1216 				rcu_read_unlock();
1217 				return;
1218 			}
1219 		}
1220 		rcu_read_unlock();
1221 	}
1222 
1223 	/* Generate domain masks and attrs */
1224 	ndoms = generate_sched_domains(&doms, &attr);
1225 
1226 	/* Have scheduler rebuild the domains */
1227 	partition_and_rebuild_sched_domains(ndoms, doms, attr);
1228 }
1229 #else /* !CONFIG_SMP */
rebuild_sched_domains_locked(void)1230 static void rebuild_sched_domains_locked(void)
1231 {
1232 }
1233 #endif /* CONFIG_SMP */
1234 
rebuild_sched_domains(void)1235 void rebuild_sched_domains(void)
1236 {
1237 	cpus_read_lock();
1238 	mutex_lock(&cpuset_mutex);
1239 	rebuild_sched_domains_locked();
1240 	mutex_unlock(&cpuset_mutex);
1241 	cpus_read_unlock();
1242 }
1243 EXPORT_SYMBOL_GPL(rebuild_sched_domains);
1244 
update_cpus_allowed(struct cpuset * cs,struct task_struct * p,const struct cpumask * new_mask)1245 static int update_cpus_allowed(struct cpuset *cs, struct task_struct *p,
1246 				const struct cpumask *new_mask)
1247 {
1248 	int ret = -EINVAL;
1249 
1250 	trace_android_rvh_update_cpus_allowed(p, cs->cpus_requested, new_mask, &ret);
1251 	if (!ret)
1252 		return ret;
1253 
1254 	return set_cpus_allowed_ptr(p, new_mask);
1255 }
1256 
1257 /**
1258  * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
1259  * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
1260  * @new_cpus: the temp variable for the new effective_cpus mask
1261  *
1262  * Iterate through each task of @cs updating its cpus_allowed to the
1263  * effective cpuset's.  As this function is called with cpuset_mutex held,
1264  * cpuset membership stays stable.
1265  */
update_tasks_cpumask(struct cpuset * cs,struct cpumask * new_cpus)1266 static void update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus)
1267 {
1268 	struct css_task_iter it;
1269 	struct task_struct *task;
1270 	bool top_cs = cs == &top_cpuset;
1271 
1272 	css_task_iter_start(&cs->css, 0, &it);
1273 	while ((task = css_task_iter_next(&it))) {
1274 		/*
1275 		 * Percpu kthreads in top_cpuset are ignored
1276 		 */
1277 		if (top_cs && (task->flags & PF_KTHREAD) &&
1278 		    kthread_is_per_cpu(task))
1279 			continue;
1280 
1281 		cpumask_and(new_cpus, cs->effective_cpus,
1282 			    task_cpu_possible_mask(task));
1283 		update_cpus_allowed(cs, task, new_cpus);
1284 	}
1285 	css_task_iter_end(&it);
1286 }
1287 
1288 /**
1289  * compute_effective_cpumask - Compute the effective cpumask of the cpuset
1290  * @new_cpus: the temp variable for the new effective_cpus mask
1291  * @cs: the cpuset the need to recompute the new effective_cpus mask
1292  * @parent: the parent cpuset
1293  *
1294  * If the parent has subpartition CPUs, include them in the list of
1295  * allowable CPUs in computing the new effective_cpus mask. Since offlined
1296  * CPUs are not removed from subparts_cpus, we have to use cpu_active_mask
1297  * to mask those out.
1298  */
compute_effective_cpumask(struct cpumask * new_cpus,struct cpuset * cs,struct cpuset * parent)1299 static void compute_effective_cpumask(struct cpumask *new_cpus,
1300 				      struct cpuset *cs, struct cpuset *parent)
1301 {
1302 	if (parent->nr_subparts_cpus) {
1303 		cpumask_or(new_cpus, parent->effective_cpus,
1304 			   parent->subparts_cpus);
1305 		cpumask_and(new_cpus, new_cpus, cs->cpus_requested);
1306 		cpumask_and(new_cpus, new_cpus, cpu_active_mask);
1307 	} else {
1308 		cpumask_and(new_cpus, cs->cpus_requested, parent_cs(cs)->effective_cpus);
1309 	}
1310 }
1311 
1312 /*
1313  * Commands for update_parent_subparts_cpumask
1314  */
1315 enum subparts_cmd {
1316 	partcmd_enable,		/* Enable partition root	 */
1317 	partcmd_disable,	/* Disable partition root	 */
1318 	partcmd_update,		/* Update parent's subparts_cpus */
1319 	partcmd_invalidate,	/* Make partition invalid	 */
1320 };
1321 
1322 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
1323 		       int turning_on);
1324 /**
1325  * update_parent_subparts_cpumask - update subparts_cpus mask of parent cpuset
1326  * @cpuset:  The cpuset that requests change in partition root state
1327  * @cmd:     Partition root state change command
1328  * @newmask: Optional new cpumask for partcmd_update
1329  * @tmp:     Temporary addmask and delmask
1330  * Return:   0 or a partition root state error code
1331  *
1332  * For partcmd_enable, the cpuset is being transformed from a non-partition
1333  * root to a partition root. The cpus_allowed mask of the given cpuset will
1334  * be put into parent's subparts_cpus and taken away from parent's
1335  * effective_cpus. The function will return 0 if all the CPUs listed in
1336  * cpus_allowed can be granted or an error code will be returned.
1337  *
1338  * For partcmd_disable, the cpuset is being transformed from a partition
1339  * root back to a non-partition root. Any CPUs in cpus_allowed that are in
1340  * parent's subparts_cpus will be taken away from that cpumask and put back
1341  * into parent's effective_cpus. 0 will always be returned.
1342  *
1343  * For partcmd_update, if the optional newmask is specified, the cpu list is
1344  * to be changed from cpus_allowed to newmask. Otherwise, cpus_allowed is
1345  * assumed to remain the same. The cpuset should either be a valid or invalid
1346  * partition root. The partition root state may change from valid to invalid
1347  * or vice versa. An error code will only be returned if transitioning from
1348  * invalid to valid violates the exclusivity rule.
1349  *
1350  * For partcmd_invalidate, the current partition will be made invalid.
1351  *
1352  * The partcmd_enable and partcmd_disable commands are used by
1353  * update_prstate(). An error code may be returned and the caller will check
1354  * for error.
1355  *
1356  * The partcmd_update command is used by update_cpumasks_hier() with newmask
1357  * NULL and update_cpumask() with newmask set. The partcmd_invalidate is used
1358  * by update_cpumask() with NULL newmask. In both cases, the callers won't
1359  * check for error and so partition_root_state and prs_error will be updated
1360  * directly.
1361  */
update_parent_subparts_cpumask(struct cpuset * cs,int cmd,struct cpumask * newmask,struct tmpmasks * tmp)1362 static int update_parent_subparts_cpumask(struct cpuset *cs, int cmd,
1363 					  struct cpumask *newmask,
1364 					  struct tmpmasks *tmp)
1365 {
1366 	struct cpuset *parent = parent_cs(cs);
1367 	int adding;	/* Moving cpus from effective_cpus to subparts_cpus */
1368 	int deleting;	/* Moving cpus from subparts_cpus to effective_cpus */
1369 	int old_prs, new_prs;
1370 	int part_error = PERR_NONE;	/* Partition error? */
1371 
1372 	lockdep_assert_held(&cpuset_mutex);
1373 
1374 	/*
1375 	 * The parent must be a partition root.
1376 	 * The new cpumask, if present, or the current cpus_allowed must
1377 	 * not be empty.
1378 	 */
1379 	if (!is_partition_valid(parent)) {
1380 		return is_partition_invalid(parent)
1381 		       ? PERR_INVPARENT : PERR_NOTPART;
1382 	}
1383 	if ((newmask && cpumask_empty(newmask)) ||
1384 	   (!newmask && cpumask_empty(cs->cpus_allowed)))
1385 		return PERR_CPUSEMPTY;
1386 
1387 	/*
1388 	 * new_prs will only be changed for the partcmd_update and
1389 	 * partcmd_invalidate commands.
1390 	 */
1391 	adding = deleting = false;
1392 	old_prs = new_prs = cs->partition_root_state;
1393 	if (cmd == partcmd_enable) {
1394 		/*
1395 		 * Enabling partition root is not allowed if cpus_allowed
1396 		 * doesn't overlap parent's cpus_allowed.
1397 		 */
1398 		if (!cpumask_intersects(cs->cpus_allowed, parent->cpus_allowed))
1399 			return PERR_INVCPUS;
1400 
1401 		/*
1402 		 * A parent can be left with no CPU as long as there is no
1403 		 * task directly associated with the parent partition.
1404 		 */
1405 		if (cpumask_subset(parent->effective_cpus, cs->cpus_allowed) &&
1406 		    partition_is_populated(parent, cs))
1407 			return PERR_NOCPUS;
1408 
1409 		cpumask_copy(tmp->addmask, cs->cpus_allowed);
1410 		adding = true;
1411 	} else if (cmd == partcmd_disable) {
1412 		/*
1413 		 * Need to remove cpus from parent's subparts_cpus for valid
1414 		 * partition root.
1415 		 */
1416 		deleting = !is_prs_invalid(old_prs) &&
1417 			   cpumask_and(tmp->delmask, cs->cpus_allowed,
1418 				       parent->subparts_cpus);
1419 	} else if (cmd == partcmd_invalidate) {
1420 		if (is_prs_invalid(old_prs))
1421 			return 0;
1422 
1423 		/*
1424 		 * Make the current partition invalid. It is assumed that
1425 		 * invalidation is caused by violating cpu exclusivity rule.
1426 		 */
1427 		deleting = cpumask_and(tmp->delmask, cs->cpus_allowed,
1428 				       parent->subparts_cpus);
1429 		if (old_prs > 0) {
1430 			new_prs = -old_prs;
1431 			part_error = PERR_NOTEXCL;
1432 		}
1433 	} else if (newmask) {
1434 		/*
1435 		 * partcmd_update with newmask:
1436 		 *
1437 		 * Compute add/delete mask to/from subparts_cpus
1438 		 *
1439 		 * delmask = cpus_allowed & ~newmask & parent->subparts_cpus
1440 		 * addmask = newmask & parent->cpus_allowed
1441 		 *		     & ~parent->subparts_cpus
1442 		 */
1443 		cpumask_andnot(tmp->delmask, cs->cpus_allowed, newmask);
1444 		deleting = cpumask_and(tmp->delmask, tmp->delmask,
1445 				       parent->subparts_cpus);
1446 
1447 		cpumask_and(tmp->addmask, newmask, parent->cpus_allowed);
1448 		adding = cpumask_andnot(tmp->addmask, tmp->addmask,
1449 					parent->subparts_cpus);
1450 		/*
1451 		 * Make partition invalid if parent's effective_cpus could
1452 		 * become empty and there are tasks in the parent.
1453 		 */
1454 		if (adding &&
1455 		    cpumask_subset(parent->effective_cpus, tmp->addmask) &&
1456 		    !cpumask_intersects(tmp->delmask, cpu_active_mask) &&
1457 		    partition_is_populated(parent, cs)) {
1458 			part_error = PERR_NOCPUS;
1459 			adding = false;
1460 			deleting = cpumask_and(tmp->delmask, cs->cpus_allowed,
1461 					       parent->subparts_cpus);
1462 		}
1463 	} else {
1464 		/*
1465 		 * partcmd_update w/o newmask:
1466 		 *
1467 		 * delmask = cpus_allowed & parent->subparts_cpus
1468 		 * addmask = cpus_allowed & parent->cpus_allowed
1469 		 *			  & ~parent->subparts_cpus
1470 		 *
1471 		 * This gets invoked either due to a hotplug event or from
1472 		 * update_cpumasks_hier(). This can cause the state of a
1473 		 * partition root to transition from valid to invalid or vice
1474 		 * versa. So we still need to compute the addmask and delmask.
1475 
1476 		 * A partition error happens when:
1477 		 * 1) Cpuset is valid partition, but parent does not distribute
1478 		 *    out any CPUs.
1479 		 * 2) Parent has tasks and all its effective CPUs will have
1480 		 *    to be distributed out.
1481 		 */
1482 		cpumask_and(tmp->addmask, cs->cpus_allowed,
1483 					  parent->cpus_allowed);
1484 		adding = cpumask_andnot(tmp->addmask, tmp->addmask,
1485 					parent->subparts_cpus);
1486 
1487 		if ((is_partition_valid(cs) && !parent->nr_subparts_cpus) ||
1488 		    (adding &&
1489 		     cpumask_subset(parent->effective_cpus, tmp->addmask) &&
1490 		     partition_is_populated(parent, cs))) {
1491 			part_error = PERR_NOCPUS;
1492 			adding = false;
1493 		}
1494 
1495 		if (part_error && is_partition_valid(cs) &&
1496 		    parent->nr_subparts_cpus)
1497 			deleting = cpumask_and(tmp->delmask, cs->cpus_allowed,
1498 					       parent->subparts_cpus);
1499 	}
1500 	if (part_error)
1501 		WRITE_ONCE(cs->prs_err, part_error);
1502 
1503 	if (cmd == partcmd_update) {
1504 		/*
1505 		 * Check for possible transition between valid and invalid
1506 		 * partition root.
1507 		 */
1508 		switch (cs->partition_root_state) {
1509 		case PRS_ROOT:
1510 		case PRS_ISOLATED:
1511 			if (part_error)
1512 				new_prs = -old_prs;
1513 			break;
1514 		case PRS_INVALID_ROOT:
1515 		case PRS_INVALID_ISOLATED:
1516 			if (!part_error)
1517 				new_prs = -old_prs;
1518 			break;
1519 		}
1520 	}
1521 
1522 	if (!adding && !deleting && (new_prs == old_prs))
1523 		return 0;
1524 
1525 	/*
1526 	 * Transitioning between invalid to valid or vice versa may require
1527 	 * changing CS_CPU_EXCLUSIVE and CS_SCHED_LOAD_BALANCE.
1528 	 */
1529 	if (old_prs != new_prs) {
1530 		if (is_prs_invalid(old_prs) && !is_cpu_exclusive(cs) &&
1531 		    (update_flag(CS_CPU_EXCLUSIVE, cs, 1) < 0))
1532 			return PERR_NOTEXCL;
1533 		if (is_prs_invalid(new_prs) && is_cpu_exclusive(cs))
1534 			update_flag(CS_CPU_EXCLUSIVE, cs, 0);
1535 	}
1536 
1537 	/*
1538 	 * Change the parent's subparts_cpus.
1539 	 * Newly added CPUs will be removed from effective_cpus and
1540 	 * newly deleted ones will be added back to effective_cpus.
1541 	 */
1542 	spin_lock_irq(&callback_lock);
1543 	if (adding) {
1544 		cpumask_or(parent->subparts_cpus,
1545 			   parent->subparts_cpus, tmp->addmask);
1546 		cpumask_andnot(parent->effective_cpus,
1547 			       parent->effective_cpus, tmp->addmask);
1548 	}
1549 	if (deleting) {
1550 		cpumask_andnot(parent->subparts_cpus,
1551 			       parent->subparts_cpus, tmp->delmask);
1552 		/*
1553 		 * Some of the CPUs in subparts_cpus might have been offlined.
1554 		 */
1555 		cpumask_and(tmp->delmask, tmp->delmask, cpu_active_mask);
1556 		cpumask_or(parent->effective_cpus,
1557 			   parent->effective_cpus, tmp->delmask);
1558 	}
1559 
1560 	parent->nr_subparts_cpus = cpumask_weight(parent->subparts_cpus);
1561 
1562 	if (old_prs != new_prs)
1563 		cs->partition_root_state = new_prs;
1564 
1565 	spin_unlock_irq(&callback_lock);
1566 
1567 	if (adding || deleting)
1568 		update_tasks_cpumask(parent, tmp->addmask);
1569 
1570 	/*
1571 	 * Set or clear CS_SCHED_LOAD_BALANCE when partcmd_update, if necessary.
1572 	 * rebuild_sched_domains_locked() may be called.
1573 	 */
1574 	if (old_prs != new_prs) {
1575 		if (old_prs == PRS_ISOLATED)
1576 			update_flag(CS_SCHED_LOAD_BALANCE, cs, 1);
1577 		else if (new_prs == PRS_ISOLATED)
1578 			update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
1579 	}
1580 	notify_partition_change(cs, old_prs);
1581 	return 0;
1582 }
1583 
1584 /*
1585  * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree
1586  * @cs:  the cpuset to consider
1587  * @tmp: temp variables for calculating effective_cpus & partition setup
1588  * @force: don't skip any descendant cpusets if set
1589  *
1590  * When configured cpumask is changed, the effective cpumasks of this cpuset
1591  * and all its descendants need to be updated.
1592  *
1593  * On legacy hierarchy, effective_cpus will be the same with cpu_allowed.
1594  *
1595  * Called with cpuset_mutex held
1596  */
update_cpumasks_hier(struct cpuset * cs,struct tmpmasks * tmp,bool force)1597 static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
1598 				 bool force)
1599 {
1600 	struct cpuset *cp;
1601 	struct cgroup_subsys_state *pos_css;
1602 	bool need_rebuild_sched_domains = false;
1603 	int old_prs, new_prs;
1604 
1605 	rcu_read_lock();
1606 	cpuset_for_each_descendant_pre(cp, pos_css, cs) {
1607 		struct cpuset *parent = parent_cs(cp);
1608 		bool update_parent = false;
1609 
1610 		compute_effective_cpumask(tmp->new_cpus, cp, parent);
1611 
1612 		/*
1613 		 * If it becomes empty, inherit the effective mask of the
1614 		 * parent, which is guaranteed to have some CPUs unless
1615 		 * it is a partition root that has explicitly distributed
1616 		 * out all its CPUs.
1617 		 */
1618 		if (is_in_v2_mode() && cpumask_empty(tmp->new_cpus)) {
1619 			if (is_partition_valid(cp) &&
1620 			    cpumask_equal(cp->cpus_allowed, cp->subparts_cpus))
1621 				goto update_parent_subparts;
1622 
1623 			cpumask_copy(tmp->new_cpus, parent->effective_cpus);
1624 			if (!cp->use_parent_ecpus) {
1625 				cp->use_parent_ecpus = true;
1626 				parent->child_ecpus_count++;
1627 			}
1628 		} else if (cp->use_parent_ecpus) {
1629 			cp->use_parent_ecpus = false;
1630 			WARN_ON_ONCE(!parent->child_ecpus_count);
1631 			parent->child_ecpus_count--;
1632 		}
1633 
1634 		/*
1635 		 * Skip the whole subtree if
1636 		 * 1) the cpumask remains the same,
1637 		 * 2) has no partition root state,
1638 		 * 3) force flag not set, and
1639 		 * 4) for v2 load balance state same as its parent.
1640 		 */
1641 		if (!cp->partition_root_state && !force &&
1642 		    cpumask_equal(tmp->new_cpus, cp->effective_cpus) &&
1643 		    (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
1644 		    (is_sched_load_balance(parent) == is_sched_load_balance(cp)))) {
1645 			pos_css = css_rightmost_descendant(pos_css);
1646 			continue;
1647 		}
1648 
1649 update_parent_subparts:
1650 		/*
1651 		 * update_parent_subparts_cpumask() should have been called
1652 		 * for cs already in update_cpumask(). We should also call
1653 		 * update_tasks_cpumask() again for tasks in the parent
1654 		 * cpuset if the parent's subparts_cpus changes.
1655 		 */
1656 		old_prs = new_prs = cp->partition_root_state;
1657 		if ((cp != cs) && old_prs) {
1658 			switch (parent->partition_root_state) {
1659 			case PRS_ROOT:
1660 			case PRS_ISOLATED:
1661 				update_parent = true;
1662 				break;
1663 
1664 			default:
1665 				/*
1666 				 * When parent is not a partition root or is
1667 				 * invalid, child partition roots become
1668 				 * invalid too.
1669 				 */
1670 				if (is_partition_valid(cp))
1671 					new_prs = -cp->partition_root_state;
1672 				WRITE_ONCE(cp->prs_err,
1673 					   is_partition_invalid(parent)
1674 					   ? PERR_INVPARENT : PERR_NOTPART);
1675 				break;
1676 			}
1677 		}
1678 
1679 		if (!css_tryget_online(&cp->css))
1680 			continue;
1681 		rcu_read_unlock();
1682 
1683 		if (update_parent) {
1684 			update_parent_subparts_cpumask(cp, partcmd_update, NULL,
1685 						       tmp);
1686 			/*
1687 			 * The cpuset partition_root_state may become
1688 			 * invalid. Capture it.
1689 			 */
1690 			new_prs = cp->partition_root_state;
1691 		}
1692 
1693 		spin_lock_irq(&callback_lock);
1694 
1695 		if (cp->nr_subparts_cpus && !is_partition_valid(cp)) {
1696 			/*
1697 			 * Put all active subparts_cpus back to effective_cpus.
1698 			 */
1699 			cpumask_or(tmp->new_cpus, tmp->new_cpus,
1700 				   cp->subparts_cpus);
1701 			cpumask_and(tmp->new_cpus, tmp->new_cpus,
1702 				   cpu_active_mask);
1703 			cp->nr_subparts_cpus = 0;
1704 			cpumask_clear(cp->subparts_cpus);
1705 		}
1706 
1707 		cpumask_copy(cp->effective_cpus, tmp->new_cpus);
1708 		if (cp->nr_subparts_cpus) {
1709 			/*
1710 			 * Make sure that effective_cpus & subparts_cpus
1711 			 * are mutually exclusive.
1712 			 */
1713 			cpumask_andnot(cp->effective_cpus, cp->effective_cpus,
1714 				       cp->subparts_cpus);
1715 		}
1716 
1717 		cp->partition_root_state = new_prs;
1718 		spin_unlock_irq(&callback_lock);
1719 
1720 		notify_partition_change(cp, old_prs);
1721 
1722 		WARN_ON(!is_in_v2_mode() &&
1723 			!cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
1724 
1725 		update_tasks_cpumask(cp, tmp->new_cpus);
1726 
1727 		/*
1728 		 * On default hierarchy, inherit the CS_SCHED_LOAD_BALANCE
1729 		 * from parent if current cpuset isn't a valid partition root
1730 		 * and their load balance states differ.
1731 		 */
1732 		if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
1733 		    !is_partition_valid(cp) &&
1734 		    (is_sched_load_balance(parent) != is_sched_load_balance(cp))) {
1735 			if (is_sched_load_balance(parent))
1736 				set_bit(CS_SCHED_LOAD_BALANCE, &cp->flags);
1737 			else
1738 				clear_bit(CS_SCHED_LOAD_BALANCE, &cp->flags);
1739 		}
1740 
1741 		/*
1742 		 * On legacy hierarchy, if the effective cpumask of any non-
1743 		 * empty cpuset is changed, we need to rebuild sched domains.
1744 		 * On default hierarchy, the cpuset needs to be a partition
1745 		 * root as well.
1746 		 */
1747 		if (!cpumask_empty(cp->cpus_allowed) &&
1748 		    is_sched_load_balance(cp) &&
1749 		   (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
1750 		    is_partition_valid(cp)))
1751 			need_rebuild_sched_domains = true;
1752 
1753 		rcu_read_lock();
1754 		css_put(&cp->css);
1755 	}
1756 	rcu_read_unlock();
1757 
1758 	if (need_rebuild_sched_domains)
1759 		rebuild_sched_domains_locked();
1760 }
1761 
1762 /**
1763  * update_sibling_cpumasks - Update siblings cpumasks
1764  * @parent:  Parent cpuset
1765  * @cs:      Current cpuset
1766  * @tmp:     Temp variables
1767  */
update_sibling_cpumasks(struct cpuset * parent,struct cpuset * cs,struct tmpmasks * tmp)1768 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
1769 				    struct tmpmasks *tmp)
1770 {
1771 	struct cpuset *sibling;
1772 	struct cgroup_subsys_state *pos_css;
1773 
1774 	lockdep_assert_held(&cpuset_mutex);
1775 
1776 	/*
1777 	 * Check all its siblings and call update_cpumasks_hier()
1778 	 * if their use_parent_ecpus flag is set in order for them
1779 	 * to use the right effective_cpus value.
1780 	 *
1781 	 * The update_cpumasks_hier() function may sleep. So we have to
1782 	 * release the RCU read lock before calling it.
1783 	 */
1784 	rcu_read_lock();
1785 	cpuset_for_each_child(sibling, pos_css, parent) {
1786 		if (sibling == cs)
1787 			continue;
1788 		if (!sibling->use_parent_ecpus)
1789 			continue;
1790 		if (!css_tryget_online(&sibling->css))
1791 			continue;
1792 
1793 		rcu_read_unlock();
1794 		update_cpumasks_hier(sibling, tmp, false);
1795 		rcu_read_lock();
1796 		css_put(&sibling->css);
1797 	}
1798 	rcu_read_unlock();
1799 }
1800 
1801 /**
1802  * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
1803  * @cs: the cpuset to consider
1804  * @trialcs: trial cpuset
1805  * @buf: buffer of cpu numbers written to this cpuset
1806  */
update_cpumask(struct cpuset * cs,struct cpuset * trialcs,const char * buf)1807 static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
1808 			  const char *buf)
1809 {
1810 	int retval;
1811 	struct tmpmasks tmp;
1812 	bool invalidate = false;
1813 
1814 	/* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */
1815 	if (cs == &top_cpuset)
1816 		return -EACCES;
1817 
1818 	/*
1819 	 * An empty cpus_requested is ok only if the cpuset has no tasks.
1820 	 * Since cpulist_parse() fails on an empty mask, we special case
1821 	 * that parsing.  The validate_change() call ensures that cpusets
1822 	 * with tasks have cpus.
1823 	 */
1824 	if (!*buf) {
1825 		cpumask_clear(trialcs->cpus_requested);
1826 	} else {
1827 		retval = cpulist_parse(buf, trialcs->cpus_requested);
1828 		if (retval < 0)
1829 			return retval;
1830 	}
1831 
1832 	if (!cpumask_subset(trialcs->cpus_requested, cpu_present_mask))
1833 		return -EINVAL;
1834 
1835 	cpumask_and(trialcs->cpus_allowed, trialcs->cpus_requested, cpu_active_mask);
1836 
1837 	/* Nothing to do if the cpus didn't change */
1838 	if (cpumask_equal(cs->cpus_requested, trialcs->cpus_requested))
1839 		return 0;
1840 
1841 #ifdef CONFIG_CPUMASK_OFFSTACK
1842 	/*
1843 	 * Use the cpumasks in trialcs for tmpmasks when they are pointers
1844 	 * to allocated cpumasks.
1845 	 *
1846 	 * Note that update_parent_subparts_cpumask() uses only addmask &
1847 	 * delmask, but not new_cpus.
1848 	 */
1849 	tmp.addmask  = trialcs->subparts_cpus;
1850 	tmp.delmask  = trialcs->effective_cpus;
1851 	tmp.new_cpus = NULL;
1852 #endif
1853 
1854 	retval = validate_change(cs, trialcs);
1855 
1856 	if ((retval == -EINVAL) && cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) {
1857 		struct cpuset *cp, *parent;
1858 		struct cgroup_subsys_state *css;
1859 
1860 		/*
1861 		 * The -EINVAL error code indicates that partition sibling
1862 		 * CPU exclusivity rule has been violated. We still allow
1863 		 * the cpumask change to proceed while invalidating the
1864 		 * partition. However, any conflicting sibling partitions
1865 		 * have to be marked as invalid too.
1866 		 */
1867 		invalidate = true;
1868 		rcu_read_lock();
1869 		parent = parent_cs(cs);
1870 		cpuset_for_each_child(cp, css, parent)
1871 			if (is_partition_valid(cp) &&
1872 			    cpumask_intersects(trialcs->cpus_allowed, cp->cpus_allowed)) {
1873 				rcu_read_unlock();
1874 				update_parent_subparts_cpumask(cp, partcmd_invalidate, NULL, &tmp);
1875 				rcu_read_lock();
1876 			}
1877 		rcu_read_unlock();
1878 		retval = 0;
1879 	}
1880 	if (retval < 0)
1881 		return retval;
1882 
1883 	if (cs->partition_root_state) {
1884 		if (invalidate)
1885 			update_parent_subparts_cpumask(cs, partcmd_invalidate,
1886 						       NULL, &tmp);
1887 		else
1888 			update_parent_subparts_cpumask(cs, partcmd_update,
1889 						trialcs->cpus_allowed, &tmp);
1890 	}
1891 
1892 	compute_effective_cpumask(trialcs->effective_cpus, trialcs,
1893 				  parent_cs(cs));
1894 	spin_lock_irq(&callback_lock);
1895 	cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
1896 	cpumask_copy(cs->cpus_requested, trialcs->cpus_requested);
1897 
1898 	/*
1899 	 * Make sure that subparts_cpus, if not empty, is a subset of
1900 	 * cpus_allowed. Clear subparts_cpus if partition not valid or
1901 	 * empty effective cpus with tasks.
1902 	 */
1903 	if (cs->nr_subparts_cpus) {
1904 		if (!is_partition_valid(cs) ||
1905 		   (cpumask_subset(trialcs->effective_cpus, cs->subparts_cpus) &&
1906 		    partition_is_populated(cs, NULL))) {
1907 			cs->nr_subparts_cpus = 0;
1908 			cpumask_clear(cs->subparts_cpus);
1909 		} else {
1910 			cpumask_and(cs->subparts_cpus, cs->subparts_cpus,
1911 				    cs->cpus_allowed);
1912 			cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus);
1913 		}
1914 	}
1915 	spin_unlock_irq(&callback_lock);
1916 
1917 #ifdef CONFIG_CPUMASK_OFFSTACK
1918 	/* Now trialcs->cpus_allowed is available */
1919 	tmp.new_cpus = trialcs->cpus_allowed;
1920 #endif
1921 
1922 	/* effective_cpus will be updated here */
1923 	update_cpumasks_hier(cs, &tmp, false);
1924 
1925 	if (cs->partition_root_state) {
1926 		struct cpuset *parent = parent_cs(cs);
1927 
1928 		/*
1929 		 * For partition root, update the cpumasks of sibling
1930 		 * cpusets if they use parent's effective_cpus.
1931 		 */
1932 		if (parent->child_ecpus_count)
1933 			update_sibling_cpumasks(parent, cs, &tmp);
1934 	}
1935 	return 0;
1936 }
1937 
1938 /*
1939  * Migrate memory region from one set of nodes to another.  This is
1940  * performed asynchronously as it can be called from process migration path
1941  * holding locks involved in process management.  All mm migrations are
1942  * performed in the queued order and can be waited for by flushing
1943  * cpuset_migrate_mm_wq.
1944  */
1945 
1946 struct cpuset_migrate_mm_work {
1947 	struct work_struct	work;
1948 	struct mm_struct	*mm;
1949 	nodemask_t		from;
1950 	nodemask_t		to;
1951 };
1952 
cpuset_migrate_mm_workfn(struct work_struct * work)1953 static void cpuset_migrate_mm_workfn(struct work_struct *work)
1954 {
1955 	struct cpuset_migrate_mm_work *mwork =
1956 		container_of(work, struct cpuset_migrate_mm_work, work);
1957 
1958 	/* on a wq worker, no need to worry about %current's mems_allowed */
1959 	do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL);
1960 	mmput(mwork->mm);
1961 	kfree(mwork);
1962 }
1963 
cpuset_migrate_mm(struct mm_struct * mm,const nodemask_t * from,const nodemask_t * to)1964 static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
1965 							const nodemask_t *to)
1966 {
1967 	struct cpuset_migrate_mm_work *mwork;
1968 
1969 	if (nodes_equal(*from, *to)) {
1970 		mmput(mm);
1971 		return;
1972 	}
1973 
1974 	mwork = kzalloc(sizeof(*mwork), GFP_KERNEL);
1975 	if (mwork) {
1976 		mwork->mm = mm;
1977 		mwork->from = *from;
1978 		mwork->to = *to;
1979 		INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn);
1980 		queue_work(cpuset_migrate_mm_wq, &mwork->work);
1981 	} else {
1982 		mmput(mm);
1983 	}
1984 }
1985 
cpuset_post_attach(void)1986 static void cpuset_post_attach(void)
1987 {
1988 	flush_workqueue(cpuset_migrate_mm_wq);
1989 }
1990 
1991 /*
1992  * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
1993  * @tsk: the task to change
1994  * @newmems: new nodes that the task will be set
1995  *
1996  * We use the mems_allowed_seq seqlock to safely update both tsk->mems_allowed
1997  * and rebind an eventual tasks' mempolicy. If the task is allocating in
1998  * parallel, it might temporarily see an empty intersection, which results in
1999  * a seqlock check and retry before OOM or allocation failure.
2000  */
cpuset_change_task_nodemask(struct task_struct * tsk,nodemask_t * newmems)2001 static void cpuset_change_task_nodemask(struct task_struct *tsk,
2002 					nodemask_t *newmems)
2003 {
2004 	task_lock(tsk);
2005 
2006 	local_irq_disable();
2007 	write_seqcount_begin(&tsk->mems_allowed_seq);
2008 
2009 	nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
2010 	mpol_rebind_task(tsk, newmems);
2011 	tsk->mems_allowed = *newmems;
2012 
2013 	write_seqcount_end(&tsk->mems_allowed_seq);
2014 	local_irq_enable();
2015 
2016 	task_unlock(tsk);
2017 }
2018 
2019 static void *cpuset_being_rebound;
2020 
2021 /**
2022  * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
2023  * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
2024  *
2025  * Iterate through each task of @cs updating its mems_allowed to the
2026  * effective cpuset's.  As this function is called with cpuset_mutex held,
2027  * cpuset membership stays stable.
2028  */
update_tasks_nodemask(struct cpuset * cs)2029 static void update_tasks_nodemask(struct cpuset *cs)
2030 {
2031 	static nodemask_t newmems;	/* protected by cpuset_mutex */
2032 	struct css_task_iter it;
2033 	struct task_struct *task;
2034 
2035 	cpuset_being_rebound = cs;		/* causes mpol_dup() rebind */
2036 
2037 	guarantee_online_mems(cs, &newmems);
2038 
2039 	/*
2040 	 * The mpol_rebind_mm() call takes mmap_lock, which we couldn't
2041 	 * take while holding tasklist_lock.  Forks can happen - the
2042 	 * mpol_dup() cpuset_being_rebound check will catch such forks,
2043 	 * and rebind their vma mempolicies too.  Because we still hold
2044 	 * the global cpuset_mutex, we know that no other rebind effort
2045 	 * will be contending for the global variable cpuset_being_rebound.
2046 	 * It's ok if we rebind the same mm twice; mpol_rebind_mm()
2047 	 * is idempotent.  Also migrate pages in each mm to new nodes.
2048 	 */
2049 	css_task_iter_start(&cs->css, 0, &it);
2050 	while ((task = css_task_iter_next(&it))) {
2051 		struct mm_struct *mm;
2052 		bool migrate;
2053 
2054 		cpuset_change_task_nodemask(task, &newmems);
2055 
2056 		mm = get_task_mm(task);
2057 		if (!mm)
2058 			continue;
2059 
2060 		migrate = is_memory_migrate(cs);
2061 
2062 		mpol_rebind_mm(mm, &cs->mems_allowed);
2063 		if (migrate)
2064 			cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems);
2065 		else
2066 			mmput(mm);
2067 	}
2068 	css_task_iter_end(&it);
2069 
2070 	/*
2071 	 * All the tasks' nodemasks have been updated, update
2072 	 * cs->old_mems_allowed.
2073 	 */
2074 	cs->old_mems_allowed = newmems;
2075 
2076 	/* We're done rebinding vmas to this cpuset's new mems_allowed. */
2077 	cpuset_being_rebound = NULL;
2078 }
2079 
2080 /*
2081  * update_nodemasks_hier - Update effective nodemasks and tasks in the subtree
2082  * @cs: the cpuset to consider
2083  * @new_mems: a temp variable for calculating new effective_mems
2084  *
2085  * When configured nodemask is changed, the effective nodemasks of this cpuset
2086  * and all its descendants need to be updated.
2087  *
2088  * On legacy hierarchy, effective_mems will be the same with mems_allowed.
2089  *
2090  * Called with cpuset_mutex held
2091  */
update_nodemasks_hier(struct cpuset * cs,nodemask_t * new_mems)2092 static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
2093 {
2094 	struct cpuset *cp;
2095 	struct cgroup_subsys_state *pos_css;
2096 
2097 	rcu_read_lock();
2098 	cpuset_for_each_descendant_pre(cp, pos_css, cs) {
2099 		struct cpuset *parent = parent_cs(cp);
2100 
2101 		nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems);
2102 
2103 		/*
2104 		 * If it becomes empty, inherit the effective mask of the
2105 		 * parent, which is guaranteed to have some MEMs.
2106 		 */
2107 		if (is_in_v2_mode() && nodes_empty(*new_mems))
2108 			*new_mems = parent->effective_mems;
2109 
2110 		/* Skip the whole subtree if the nodemask remains the same. */
2111 		if (nodes_equal(*new_mems, cp->effective_mems)) {
2112 			pos_css = css_rightmost_descendant(pos_css);
2113 			continue;
2114 		}
2115 
2116 		if (!css_tryget_online(&cp->css))
2117 			continue;
2118 		rcu_read_unlock();
2119 
2120 		spin_lock_irq(&callback_lock);
2121 		cp->effective_mems = *new_mems;
2122 		spin_unlock_irq(&callback_lock);
2123 
2124 		WARN_ON(!is_in_v2_mode() &&
2125 			!nodes_equal(cp->mems_allowed, cp->effective_mems));
2126 
2127 		update_tasks_nodemask(cp);
2128 
2129 		rcu_read_lock();
2130 		css_put(&cp->css);
2131 	}
2132 	rcu_read_unlock();
2133 }
2134 
2135 /*
2136  * Handle user request to change the 'mems' memory placement
2137  * of a cpuset.  Needs to validate the request, update the
2138  * cpusets mems_allowed, and for each task in the cpuset,
2139  * update mems_allowed and rebind task's mempolicy and any vma
2140  * mempolicies and if the cpuset is marked 'memory_migrate',
2141  * migrate the tasks pages to the new memory.
2142  *
2143  * Call with cpuset_mutex held. May take callback_lock during call.
2144  * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
2145  * lock each such tasks mm->mmap_lock, scan its vma's and rebind
2146  * their mempolicies to the cpusets new mems_allowed.
2147  */
update_nodemask(struct cpuset * cs,struct cpuset * trialcs,const char * buf)2148 static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
2149 			   const char *buf)
2150 {
2151 	int retval;
2152 
2153 	/*
2154 	 * top_cpuset.mems_allowed tracks node_stats[N_MEMORY];
2155 	 * it's read-only
2156 	 */
2157 	if (cs == &top_cpuset) {
2158 		retval = -EACCES;
2159 		goto done;
2160 	}
2161 
2162 	/*
2163 	 * An empty mems_allowed is ok iff there are no tasks in the cpuset.
2164 	 * Since nodelist_parse() fails on an empty mask, we special case
2165 	 * that parsing.  The validate_change() call ensures that cpusets
2166 	 * with tasks have memory.
2167 	 */
2168 	if (!*buf) {
2169 		nodes_clear(trialcs->mems_allowed);
2170 	} else {
2171 		retval = nodelist_parse(buf, trialcs->mems_allowed);
2172 		if (retval < 0)
2173 			goto done;
2174 
2175 		if (!nodes_subset(trialcs->mems_allowed,
2176 				  top_cpuset.mems_allowed)) {
2177 			retval = -EINVAL;
2178 			goto done;
2179 		}
2180 	}
2181 
2182 	if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) {
2183 		retval = 0;		/* Too easy - nothing to do */
2184 		goto done;
2185 	}
2186 	retval = validate_change(cs, trialcs);
2187 	if (retval < 0)
2188 		goto done;
2189 
2190 	check_insane_mems_config(&trialcs->mems_allowed);
2191 
2192 	spin_lock_irq(&callback_lock);
2193 	cs->mems_allowed = trialcs->mems_allowed;
2194 	spin_unlock_irq(&callback_lock);
2195 
2196 	/* use trialcs->mems_allowed as a temp variable */
2197 	update_nodemasks_hier(cs, &trialcs->mems_allowed);
2198 done:
2199 	return retval;
2200 }
2201 
current_cpuset_is_being_rebound(void)2202 bool current_cpuset_is_being_rebound(void)
2203 {
2204 	bool ret;
2205 
2206 	rcu_read_lock();
2207 	ret = task_cs(current) == cpuset_being_rebound;
2208 	rcu_read_unlock();
2209 
2210 	return ret;
2211 }
2212 
update_relax_domain_level(struct cpuset * cs,s64 val)2213 static int update_relax_domain_level(struct cpuset *cs, s64 val)
2214 {
2215 #ifdef CONFIG_SMP
2216 	if (val < -1 || val >= sched_domain_level_max)
2217 		return -EINVAL;
2218 #endif
2219 
2220 	if (val != cs->relax_domain_level) {
2221 		cs->relax_domain_level = val;
2222 		if (!cpumask_empty(cs->cpus_allowed) &&
2223 		    is_sched_load_balance(cs))
2224 			rebuild_sched_domains_locked();
2225 	}
2226 
2227 	return 0;
2228 }
2229 
2230 /**
2231  * update_tasks_flags - update the spread flags of tasks in the cpuset.
2232  * @cs: the cpuset in which each task's spread flags needs to be changed
2233  *
2234  * Iterate through each task of @cs updating its spread flags.  As this
2235  * function is called with cpuset_mutex held, cpuset membership stays
2236  * stable.
2237  */
update_tasks_flags(struct cpuset * cs)2238 static void update_tasks_flags(struct cpuset *cs)
2239 {
2240 	struct css_task_iter it;
2241 	struct task_struct *task;
2242 
2243 	css_task_iter_start(&cs->css, 0, &it);
2244 	while ((task = css_task_iter_next(&it)))
2245 		cpuset_update_task_spread_flags(cs, task);
2246 	css_task_iter_end(&it);
2247 }
2248 
2249 /*
2250  * update_flag - read a 0 or a 1 in a file and update associated flag
2251  * bit:		the bit to update (see cpuset_flagbits_t)
2252  * cs:		the cpuset to update
2253  * turning_on: 	whether the flag is being set or cleared
2254  *
2255  * Call with cpuset_mutex held.
2256  */
2257 
update_flag(cpuset_flagbits_t bit,struct cpuset * cs,int turning_on)2258 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
2259 		       int turning_on)
2260 {
2261 	struct cpuset *trialcs;
2262 	int balance_flag_changed;
2263 	int spread_flag_changed;
2264 	int err;
2265 
2266 	trialcs = alloc_trial_cpuset(cs);
2267 	if (!trialcs)
2268 		return -ENOMEM;
2269 
2270 	if (turning_on)
2271 		set_bit(bit, &trialcs->flags);
2272 	else
2273 		clear_bit(bit, &trialcs->flags);
2274 
2275 	err = validate_change(cs, trialcs);
2276 	if (err < 0)
2277 		goto out;
2278 
2279 	balance_flag_changed = (is_sched_load_balance(cs) !=
2280 				is_sched_load_balance(trialcs));
2281 
2282 	spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
2283 			|| (is_spread_page(cs) != is_spread_page(trialcs)));
2284 
2285 	spin_lock_irq(&callback_lock);
2286 	cs->flags = trialcs->flags;
2287 	spin_unlock_irq(&callback_lock);
2288 
2289 	if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
2290 		rebuild_sched_domains_locked();
2291 
2292 	if (spread_flag_changed)
2293 		update_tasks_flags(cs);
2294 out:
2295 	free_cpuset(trialcs);
2296 	return err;
2297 }
2298 
2299 /**
2300  * update_prstate - update partition_root_state
2301  * @cs: the cpuset to update
2302  * @new_prs: new partition root state
2303  * Return: 0 if successful, != 0 if error
2304  *
2305  * Call with cpuset_mutex held.
2306  */
update_prstate(struct cpuset * cs,int new_prs)2307 static int update_prstate(struct cpuset *cs, int new_prs)
2308 {
2309 	int err = PERR_NONE, old_prs = cs->partition_root_state;
2310 	bool sched_domain_rebuilt = false;
2311 	struct cpuset *parent = parent_cs(cs);
2312 	struct tmpmasks tmpmask;
2313 
2314 	if (old_prs == new_prs)
2315 		return 0;
2316 
2317 	/*
2318 	 * For a previously invalid partition root, leave it at being
2319 	 * invalid if new_prs is not "member".
2320 	 */
2321 	if (new_prs && is_prs_invalid(old_prs)) {
2322 		cs->partition_root_state = -new_prs;
2323 		return 0;
2324 	}
2325 
2326 	if (alloc_cpumasks(NULL, &tmpmask))
2327 		return -ENOMEM;
2328 
2329 	if (!old_prs) {
2330 		/*
2331 		 * Turning on partition root requires setting the
2332 		 * CS_CPU_EXCLUSIVE bit implicitly as well and cpus_allowed
2333 		 * cannot be empty.
2334 		 */
2335 		if (cpumask_empty(cs->cpus_allowed)) {
2336 			err = PERR_CPUSEMPTY;
2337 			goto out;
2338 		}
2339 
2340 		err = update_flag(CS_CPU_EXCLUSIVE, cs, 1);
2341 		if (err) {
2342 			err = PERR_NOTEXCL;
2343 			goto out;
2344 		}
2345 
2346 		err = update_parent_subparts_cpumask(cs, partcmd_enable,
2347 						     NULL, &tmpmask);
2348 		if (err) {
2349 			update_flag(CS_CPU_EXCLUSIVE, cs, 0);
2350 			goto out;
2351 		}
2352 
2353 		if (new_prs == PRS_ISOLATED) {
2354 			/*
2355 			 * Disable the load balance flag should not return an
2356 			 * error unless the system is running out of memory.
2357 			 */
2358 			update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
2359 			sched_domain_rebuilt = true;
2360 		}
2361 	} else if (old_prs && new_prs) {
2362 		/*
2363 		 * A change in load balance state only, no change in cpumasks.
2364 		 */
2365 		update_flag(CS_SCHED_LOAD_BALANCE, cs, (new_prs != PRS_ISOLATED));
2366 		sched_domain_rebuilt = true;
2367 		goto out;	/* Sched domain is rebuilt in update_flag() */
2368 	} else {
2369 		/*
2370 		 * Switching back to member is always allowed even if it
2371 		 * disables child partitions.
2372 		 */
2373 		update_parent_subparts_cpumask(cs, partcmd_disable, NULL,
2374 					       &tmpmask);
2375 
2376 		/*
2377 		 * If there are child partitions, they will all become invalid.
2378 		 */
2379 		if (unlikely(cs->nr_subparts_cpus)) {
2380 			spin_lock_irq(&callback_lock);
2381 			cs->nr_subparts_cpus = 0;
2382 			cpumask_clear(cs->subparts_cpus);
2383 			compute_effective_cpumask(cs->effective_cpus, cs, parent);
2384 			spin_unlock_irq(&callback_lock);
2385 		}
2386 
2387 		/* Turning off CS_CPU_EXCLUSIVE will not return error */
2388 		update_flag(CS_CPU_EXCLUSIVE, cs, 0);
2389 
2390 		if (!is_sched_load_balance(cs)) {
2391 			/* Make sure load balance is on */
2392 			update_flag(CS_SCHED_LOAD_BALANCE, cs, 1);
2393 			sched_domain_rebuilt = true;
2394 		}
2395 	}
2396 
2397 	update_tasks_cpumask(parent, tmpmask.new_cpus);
2398 
2399 	if (parent->child_ecpus_count)
2400 		update_sibling_cpumasks(parent, cs, &tmpmask);
2401 
2402 	if (!sched_domain_rebuilt)
2403 		rebuild_sched_domains_locked();
2404 out:
2405 	/*
2406 	 * Make partition invalid if an error happen
2407 	 */
2408 	if (err)
2409 		new_prs = -new_prs;
2410 	spin_lock_irq(&callback_lock);
2411 	cs->partition_root_state = new_prs;
2412 	WRITE_ONCE(cs->prs_err, err);
2413 	spin_unlock_irq(&callback_lock);
2414 	/*
2415 	 * Update child cpusets, if present.
2416 	 * Force update if switching back to member.
2417 	 */
2418 	if (!list_empty(&cs->css.children))
2419 		update_cpumasks_hier(cs, &tmpmask, !new_prs);
2420 
2421 	notify_partition_change(cs, old_prs);
2422 	free_cpumasks(NULL, &tmpmask);
2423 	return 0;
2424 }
2425 
2426 /*
2427  * Frequency meter - How fast is some event occurring?
2428  *
2429  * These routines manage a digitally filtered, constant time based,
2430  * event frequency meter.  There are four routines:
2431  *   fmeter_init() - initialize a frequency meter.
2432  *   fmeter_markevent() - called each time the event happens.
2433  *   fmeter_getrate() - returns the recent rate of such events.
2434  *   fmeter_update() - internal routine used to update fmeter.
2435  *
2436  * A common data structure is passed to each of these routines,
2437  * which is used to keep track of the state required to manage the
2438  * frequency meter and its digital filter.
2439  *
2440  * The filter works on the number of events marked per unit time.
2441  * The filter is single-pole low-pass recursive (IIR).  The time unit
2442  * is 1 second.  Arithmetic is done using 32-bit integers scaled to
2443  * simulate 3 decimal digits of precision (multiplied by 1000).
2444  *
2445  * With an FM_COEF of 933, and a time base of 1 second, the filter
2446  * has a half-life of 10 seconds, meaning that if the events quit
2447  * happening, then the rate returned from the fmeter_getrate()
2448  * will be cut in half each 10 seconds, until it converges to zero.
2449  *
2450  * It is not worth doing a real infinitely recursive filter.  If more
2451  * than FM_MAXTICKS ticks have elapsed since the last filter event,
2452  * just compute FM_MAXTICKS ticks worth, by which point the level
2453  * will be stable.
2454  *
2455  * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid
2456  * arithmetic overflow in the fmeter_update() routine.
2457  *
2458  * Given the simple 32 bit integer arithmetic used, this meter works
2459  * best for reporting rates between one per millisecond (msec) and
2460  * one per 32 (approx) seconds.  At constant rates faster than one
2461  * per msec it maxes out at values just under 1,000,000.  At constant
2462  * rates between one per msec, and one per second it will stabilize
2463  * to a value N*1000, where N is the rate of events per second.
2464  * At constant rates between one per second and one per 32 seconds,
2465  * it will be choppy, moving up on the seconds that have an event,
2466  * and then decaying until the next event.  At rates slower than
2467  * about one in 32 seconds, it decays all the way back to zero between
2468  * each event.
2469  */
2470 
2471 #define FM_COEF 933		/* coefficient for half-life of 10 secs */
2472 #define FM_MAXTICKS ((u32)99)   /* useless computing more ticks than this */
2473 #define FM_MAXCNT 1000000	/* limit cnt to avoid overflow */
2474 #define FM_SCALE 1000		/* faux fixed point scale */
2475 
2476 /* Initialize a frequency meter */
fmeter_init(struct fmeter * fmp)2477 static void fmeter_init(struct fmeter *fmp)
2478 {
2479 	fmp->cnt = 0;
2480 	fmp->val = 0;
2481 	fmp->time = 0;
2482 	spin_lock_init(&fmp->lock);
2483 }
2484 
2485 /* Internal meter update - process cnt events and update value */
fmeter_update(struct fmeter * fmp)2486 static void fmeter_update(struct fmeter *fmp)
2487 {
2488 	time64_t now;
2489 	u32 ticks;
2490 
2491 	now = ktime_get_seconds();
2492 	ticks = now - fmp->time;
2493 
2494 	if (ticks == 0)
2495 		return;
2496 
2497 	ticks = min(FM_MAXTICKS, ticks);
2498 	while (ticks-- > 0)
2499 		fmp->val = (FM_COEF * fmp->val) / FM_SCALE;
2500 	fmp->time = now;
2501 
2502 	fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE;
2503 	fmp->cnt = 0;
2504 }
2505 
2506 /* Process any previous ticks, then bump cnt by one (times scale). */
fmeter_markevent(struct fmeter * fmp)2507 static void fmeter_markevent(struct fmeter *fmp)
2508 {
2509 	spin_lock(&fmp->lock);
2510 	fmeter_update(fmp);
2511 	fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE);
2512 	spin_unlock(&fmp->lock);
2513 }
2514 
2515 /* Process any previous ticks, then return current value. */
fmeter_getrate(struct fmeter * fmp)2516 static int fmeter_getrate(struct fmeter *fmp)
2517 {
2518 	int val;
2519 
2520 	spin_lock(&fmp->lock);
2521 	fmeter_update(fmp);
2522 	val = fmp->val;
2523 	spin_unlock(&fmp->lock);
2524 	return val;
2525 }
2526 
2527 static struct cpuset *cpuset_attach_old_cs;
2528 
2529 /*
2530  * Check to see if a cpuset can accept a new task
2531  * For v1, cpus_allowed and mems_allowed can't be empty.
2532  * For v2, effective_cpus can't be empty.
2533  * Note that in v1, effective_cpus = cpus_allowed.
2534  */
cpuset_can_attach_check(struct cpuset * cs)2535 static int cpuset_can_attach_check(struct cpuset *cs)
2536 {
2537 	if (cpumask_empty(cs->effective_cpus) ||
2538 	   (!is_in_v2_mode() && nodes_empty(cs->mems_allowed)))
2539 		return -ENOSPC;
2540 	return 0;
2541 }
2542 
reset_migrate_dl_data(struct cpuset * cs)2543 static void reset_migrate_dl_data(struct cpuset *cs)
2544 {
2545 	cs->nr_migrate_dl_tasks = 0;
2546 	cs->sum_migrate_dl_bw = 0;
2547 }
2548 
2549 /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
cpuset_can_attach(struct cgroup_taskset * tset)2550 static int cpuset_can_attach(struct cgroup_taskset *tset)
2551 {
2552 	struct cgroup_subsys_state *css;
2553 	struct cpuset *cs, *oldcs;
2554 	struct task_struct *task;
2555 	int ret;
2556 
2557 	/* used later by cpuset_attach() */
2558 	cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css));
2559 	oldcs = cpuset_attach_old_cs;
2560 	cs = css_cs(css);
2561 
2562 	mutex_lock(&cpuset_mutex);
2563 
2564 	/* Check to see if task is allowed in the cpuset */
2565 	ret = cpuset_can_attach_check(cs);
2566 	if (ret)
2567 		goto out_unlock;
2568 
2569 	cgroup_taskset_for_each(task, css, tset) {
2570 		ret = task_can_attach(task);
2571 		if (ret)
2572 			goto out_unlock;
2573 		ret = security_task_setscheduler(task);
2574 		if (ret)
2575 			goto out_unlock;
2576 
2577 		if (dl_task(task)) {
2578 			cs->nr_migrate_dl_tasks++;
2579 			cs->sum_migrate_dl_bw += task->dl.dl_bw;
2580 		}
2581 	}
2582 
2583 	if (!cs->nr_migrate_dl_tasks)
2584 		goto out_success;
2585 
2586 	if (!cpumask_intersects(oldcs->effective_cpus, cs->effective_cpus)) {
2587 		int cpu = cpumask_any_and(cpu_active_mask, cs->effective_cpus);
2588 
2589 		if (unlikely(cpu >= nr_cpu_ids)) {
2590 			reset_migrate_dl_data(cs);
2591 			ret = -EINVAL;
2592 			goto out_unlock;
2593 		}
2594 
2595 		ret = dl_bw_alloc(cpu, cs->sum_migrate_dl_bw);
2596 		if (ret) {
2597 			reset_migrate_dl_data(cs);
2598 			goto out_unlock;
2599 		}
2600 	}
2601 
2602 out_success:
2603 	/*
2604 	 * Mark attach is in progress.  This makes validate_change() fail
2605 	 * changes which zero cpus/mems_allowed.
2606 	 */
2607 	cs->attach_in_progress++;
2608 out_unlock:
2609 	mutex_unlock(&cpuset_mutex);
2610 	return ret;
2611 }
2612 
cpuset_cancel_attach(struct cgroup_taskset * tset)2613 static void cpuset_cancel_attach(struct cgroup_taskset *tset)
2614 {
2615 	struct cgroup_subsys_state *css;
2616 	struct cpuset *cs;
2617 
2618 	cgroup_taskset_first(tset, &css);
2619 	cs = css_cs(css);
2620 
2621 	mutex_lock(&cpuset_mutex);
2622 	cs->attach_in_progress--;
2623 	if (!cs->attach_in_progress)
2624 		wake_up(&cpuset_attach_wq);
2625 
2626 	if (cs->nr_migrate_dl_tasks) {
2627 		int cpu = cpumask_any(cs->effective_cpus);
2628 
2629 		dl_bw_free(cpu, cs->sum_migrate_dl_bw);
2630 		reset_migrate_dl_data(cs);
2631 	}
2632 
2633 	mutex_unlock(&cpuset_mutex);
2634 }
2635 
2636 /*
2637  * Protected by cpuset_mutex. cpus_attach is used only by cpuset_attach_task()
2638  * but we can't allocate it dynamically there.  Define it global and
2639  * allocate from cpuset_init().
2640  */
2641 static cpumask_var_t cpus_attach;
2642 static nodemask_t cpuset_attach_nodemask_to;
2643 
cpuset_attach_task(struct cpuset * cs,struct task_struct * task)2644 static void cpuset_attach_task(struct cpuset *cs, struct task_struct *task)
2645 {
2646 	lockdep_assert_held(&cpuset_mutex);
2647 
2648 	if (cs != &top_cpuset)
2649 		guarantee_online_cpus(task, cpus_attach);
2650 	else
2651 		cpumask_copy(cpus_attach, task_cpu_possible_mask(task));
2652 	/*
2653 	 * can_attach beforehand should guarantee that this doesn't
2654 	 * fail.  TODO: have a better way to handle failure here
2655 	 */
2656 	WARN_ON_ONCE(update_cpus_allowed(cs, task, cpus_attach));
2657 
2658 	cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
2659 	cpuset_update_task_spread_flags(cs, task);
2660 }
2661 
cpuset_attach(struct cgroup_taskset * tset)2662 static void cpuset_attach(struct cgroup_taskset *tset)
2663 {
2664 	struct task_struct *task;
2665 	struct task_struct *leader;
2666 	struct cgroup_subsys_state *css;
2667 	struct cpuset *cs;
2668 	struct cpuset *oldcs = cpuset_attach_old_cs;
2669 
2670 	cgroup_taskset_first(tset, &css);
2671 	cs = css_cs(css);
2672 
2673 	lockdep_assert_cpus_held();	/* see cgroup_attach_lock() */
2674 	mutex_lock(&cpuset_mutex);
2675 
2676 	guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
2677 
2678 	cgroup_taskset_for_each(task, css, tset)
2679 		cpuset_attach_task(cs, task);
2680 
2681 	/*
2682 	 * Change mm for all threadgroup leaders. This is expensive and may
2683 	 * sleep and should be moved outside migration path proper.
2684 	 */
2685 	cpuset_attach_nodemask_to = cs->effective_mems;
2686 	cgroup_taskset_for_each_leader(leader, css, tset) {
2687 		struct mm_struct *mm = get_task_mm(leader);
2688 
2689 		if (mm) {
2690 			mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
2691 
2692 			/*
2693 			 * old_mems_allowed is the same with mems_allowed
2694 			 * here, except if this task is being moved
2695 			 * automatically due to hotplug.  In that case
2696 			 * @mems_allowed has been updated and is empty, so
2697 			 * @old_mems_allowed is the right nodesets that we
2698 			 * migrate mm from.
2699 			 */
2700 			if (is_memory_migrate(cs))
2701 				cpuset_migrate_mm(mm, &oldcs->old_mems_allowed,
2702 						  &cpuset_attach_nodemask_to);
2703 			else
2704 				mmput(mm);
2705 		}
2706 	}
2707 
2708 	cs->old_mems_allowed = cpuset_attach_nodemask_to;
2709 
2710 	if (cs->nr_migrate_dl_tasks) {
2711 		cs->nr_deadline_tasks += cs->nr_migrate_dl_tasks;
2712 		oldcs->nr_deadline_tasks -= cs->nr_migrate_dl_tasks;
2713 		reset_migrate_dl_data(cs);
2714 	}
2715 
2716 	cs->attach_in_progress--;
2717 	if (!cs->attach_in_progress)
2718 		wake_up(&cpuset_attach_wq);
2719 
2720 	mutex_unlock(&cpuset_mutex);
2721 }
2722 
2723 /* The various types of files and directories in a cpuset file system */
2724 
2725 typedef enum {
2726 	FILE_MEMORY_MIGRATE,
2727 	FILE_CPULIST,
2728 	FILE_MEMLIST,
2729 	FILE_EFFECTIVE_CPULIST,
2730 	FILE_EFFECTIVE_MEMLIST,
2731 	FILE_SUBPARTS_CPULIST,
2732 	FILE_CPU_EXCLUSIVE,
2733 	FILE_MEM_EXCLUSIVE,
2734 	FILE_MEM_HARDWALL,
2735 	FILE_SCHED_LOAD_BALANCE,
2736 	FILE_PARTITION_ROOT,
2737 	FILE_SCHED_RELAX_DOMAIN_LEVEL,
2738 	FILE_MEMORY_PRESSURE_ENABLED,
2739 	FILE_MEMORY_PRESSURE,
2740 	FILE_SPREAD_PAGE,
2741 	FILE_SPREAD_SLAB,
2742 } cpuset_filetype_t;
2743 
cpuset_write_u64(struct cgroup_subsys_state * css,struct cftype * cft,u64 val)2744 static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft,
2745 			    u64 val)
2746 {
2747 	struct cpuset *cs = css_cs(css);
2748 	cpuset_filetype_t type = cft->private;
2749 	int retval = 0;
2750 
2751 	cpus_read_lock();
2752 	mutex_lock(&cpuset_mutex);
2753 	if (!is_cpuset_online(cs)) {
2754 		retval = -ENODEV;
2755 		goto out_unlock;
2756 	}
2757 
2758 	switch (type) {
2759 	case FILE_CPU_EXCLUSIVE:
2760 		retval = update_flag(CS_CPU_EXCLUSIVE, cs, val);
2761 		break;
2762 	case FILE_MEM_EXCLUSIVE:
2763 		retval = update_flag(CS_MEM_EXCLUSIVE, cs, val);
2764 		break;
2765 	case FILE_MEM_HARDWALL:
2766 		retval = update_flag(CS_MEM_HARDWALL, cs, val);
2767 		break;
2768 	case FILE_SCHED_LOAD_BALANCE:
2769 		retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val);
2770 		break;
2771 	case FILE_MEMORY_MIGRATE:
2772 		retval = update_flag(CS_MEMORY_MIGRATE, cs, val);
2773 		break;
2774 	case FILE_MEMORY_PRESSURE_ENABLED:
2775 		cpuset_memory_pressure_enabled = !!val;
2776 		break;
2777 	case FILE_SPREAD_PAGE:
2778 		retval = update_flag(CS_SPREAD_PAGE, cs, val);
2779 		break;
2780 	case FILE_SPREAD_SLAB:
2781 		retval = update_flag(CS_SPREAD_SLAB, cs, val);
2782 		break;
2783 	default:
2784 		retval = -EINVAL;
2785 		break;
2786 	}
2787 out_unlock:
2788 	mutex_unlock(&cpuset_mutex);
2789 	cpus_read_unlock();
2790 	return retval;
2791 }
2792 
cpuset_write_s64(struct cgroup_subsys_state * css,struct cftype * cft,s64 val)2793 static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft,
2794 			    s64 val)
2795 {
2796 	struct cpuset *cs = css_cs(css);
2797 	cpuset_filetype_t type = cft->private;
2798 	int retval = -ENODEV;
2799 
2800 	cpus_read_lock();
2801 	mutex_lock(&cpuset_mutex);
2802 	if (!is_cpuset_online(cs))
2803 		goto out_unlock;
2804 
2805 	switch (type) {
2806 	case FILE_SCHED_RELAX_DOMAIN_LEVEL:
2807 		retval = update_relax_domain_level(cs, val);
2808 		break;
2809 	default:
2810 		retval = -EINVAL;
2811 		break;
2812 	}
2813 out_unlock:
2814 	mutex_unlock(&cpuset_mutex);
2815 	cpus_read_unlock();
2816 	return retval;
2817 }
2818 
2819 /*
2820  * Common handling for a write to a "cpus" or "mems" file.
2821  */
cpuset_write_resmask(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)2822 static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
2823 				    char *buf, size_t nbytes, loff_t off)
2824 {
2825 	struct cpuset *cs = css_cs(of_css(of));
2826 	struct cpuset *trialcs;
2827 	int retval = -ENODEV;
2828 
2829 	buf = strstrip(buf);
2830 
2831 	/*
2832 	 * CPU or memory hotunplug may leave @cs w/o any execution
2833 	 * resources, in which case the hotplug code asynchronously updates
2834 	 * configuration and transfers all tasks to the nearest ancestor
2835 	 * which can execute.
2836 	 *
2837 	 * As writes to "cpus" or "mems" may restore @cs's execution
2838 	 * resources, wait for the previously scheduled operations before
2839 	 * proceeding, so that we don't end up keep removing tasks added
2840 	 * after execution capability is restored.
2841 	 *
2842 	 * cpuset_hotplug_work calls back into cgroup core via
2843 	 * cgroup_transfer_tasks() and waiting for it from a cgroupfs
2844 	 * operation like this one can lead to a deadlock through kernfs
2845 	 * active_ref protection.  Let's break the protection.  Losing the
2846 	 * protection is okay as we check whether @cs is online after
2847 	 * grabbing cpuset_mutex anyway.  This only happens on the legacy
2848 	 * hierarchies.
2849 	 */
2850 	css_get(&cs->css);
2851 	kernfs_break_active_protection(of->kn);
2852 	flush_work(&cpuset_hotplug_work);
2853 
2854 	cpus_read_lock();
2855 	mutex_lock(&cpuset_mutex);
2856 	if (!is_cpuset_online(cs))
2857 		goto out_unlock;
2858 
2859 	trialcs = alloc_trial_cpuset(cs);
2860 	if (!trialcs) {
2861 		retval = -ENOMEM;
2862 		goto out_unlock;
2863 	}
2864 
2865 	switch (of_cft(of)->private) {
2866 	case FILE_CPULIST:
2867 		retval = update_cpumask(cs, trialcs, buf);
2868 		break;
2869 	case FILE_MEMLIST:
2870 		retval = update_nodemask(cs, trialcs, buf);
2871 		break;
2872 	default:
2873 		retval = -EINVAL;
2874 		break;
2875 	}
2876 
2877 	free_cpuset(trialcs);
2878 out_unlock:
2879 	mutex_unlock(&cpuset_mutex);
2880 	cpus_read_unlock();
2881 	kernfs_unbreak_active_protection(of->kn);
2882 	css_put(&cs->css);
2883 	flush_workqueue(cpuset_migrate_mm_wq);
2884 	return retval ?: nbytes;
2885 }
2886 
2887 /*
2888  * These ascii lists should be read in a single call, by using a user
2889  * buffer large enough to hold the entire map.  If read in smaller
2890  * chunks, there is no guarantee of atomicity.  Since the display format
2891  * used, list of ranges of sequential numbers, is variable length,
2892  * and since these maps can change value dynamically, one could read
2893  * gibberish by doing partial reads while a list was changing.
2894  */
cpuset_common_seq_show(struct seq_file * sf,void * v)2895 static int cpuset_common_seq_show(struct seq_file *sf, void *v)
2896 {
2897 	struct cpuset *cs = css_cs(seq_css(sf));
2898 	cpuset_filetype_t type = seq_cft(sf)->private;
2899 	int ret = 0;
2900 
2901 	spin_lock_irq(&callback_lock);
2902 
2903 	switch (type) {
2904 	case FILE_CPULIST:
2905 		seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_requested));
2906 		break;
2907 	case FILE_MEMLIST:
2908 		seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed));
2909 		break;
2910 	case FILE_EFFECTIVE_CPULIST:
2911 		seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus));
2912 		break;
2913 	case FILE_EFFECTIVE_MEMLIST:
2914 		seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems));
2915 		break;
2916 	case FILE_SUBPARTS_CPULIST:
2917 		seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->subparts_cpus));
2918 		break;
2919 	default:
2920 		ret = -EINVAL;
2921 	}
2922 
2923 	spin_unlock_irq(&callback_lock);
2924 	return ret;
2925 }
2926 
cpuset_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)2927 static u64 cpuset_read_u64(struct cgroup_subsys_state *css, struct cftype *cft)
2928 {
2929 	struct cpuset *cs = css_cs(css);
2930 	cpuset_filetype_t type = cft->private;
2931 	switch (type) {
2932 	case FILE_CPU_EXCLUSIVE:
2933 		return is_cpu_exclusive(cs);
2934 	case FILE_MEM_EXCLUSIVE:
2935 		return is_mem_exclusive(cs);
2936 	case FILE_MEM_HARDWALL:
2937 		return is_mem_hardwall(cs);
2938 	case FILE_SCHED_LOAD_BALANCE:
2939 		return is_sched_load_balance(cs);
2940 	case FILE_MEMORY_MIGRATE:
2941 		return is_memory_migrate(cs);
2942 	case FILE_MEMORY_PRESSURE_ENABLED:
2943 		return cpuset_memory_pressure_enabled;
2944 	case FILE_MEMORY_PRESSURE:
2945 		return fmeter_getrate(&cs->fmeter);
2946 	case FILE_SPREAD_PAGE:
2947 		return is_spread_page(cs);
2948 	case FILE_SPREAD_SLAB:
2949 		return is_spread_slab(cs);
2950 	default:
2951 		BUG();
2952 	}
2953 
2954 	/* Unreachable but makes gcc happy */
2955 	return 0;
2956 }
2957 
cpuset_read_s64(struct cgroup_subsys_state * css,struct cftype * cft)2958 static s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft)
2959 {
2960 	struct cpuset *cs = css_cs(css);
2961 	cpuset_filetype_t type = cft->private;
2962 	switch (type) {
2963 	case FILE_SCHED_RELAX_DOMAIN_LEVEL:
2964 		return cs->relax_domain_level;
2965 	default:
2966 		BUG();
2967 	}
2968 
2969 	/* Unreachable but makes gcc happy */
2970 	return 0;
2971 }
2972 
sched_partition_show(struct seq_file * seq,void * v)2973 static int sched_partition_show(struct seq_file *seq, void *v)
2974 {
2975 	struct cpuset *cs = css_cs(seq_css(seq));
2976 	const char *err, *type = NULL;
2977 
2978 	switch (cs->partition_root_state) {
2979 	case PRS_ROOT:
2980 		seq_puts(seq, "root\n");
2981 		break;
2982 	case PRS_ISOLATED:
2983 		seq_puts(seq, "isolated\n");
2984 		break;
2985 	case PRS_MEMBER:
2986 		seq_puts(seq, "member\n");
2987 		break;
2988 	case PRS_INVALID_ROOT:
2989 		type = "root";
2990 		fallthrough;
2991 	case PRS_INVALID_ISOLATED:
2992 		if (!type)
2993 			type = "isolated";
2994 		err = perr_strings[READ_ONCE(cs->prs_err)];
2995 		if (err)
2996 			seq_printf(seq, "%s invalid (%s)\n", type, err);
2997 		else
2998 			seq_printf(seq, "%s invalid\n", type);
2999 		break;
3000 	}
3001 	return 0;
3002 }
3003 
sched_partition_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)3004 static ssize_t sched_partition_write(struct kernfs_open_file *of, char *buf,
3005 				     size_t nbytes, loff_t off)
3006 {
3007 	struct cpuset *cs = css_cs(of_css(of));
3008 	int val;
3009 	int retval = -ENODEV;
3010 
3011 	buf = strstrip(buf);
3012 
3013 	/*
3014 	 * Convert "root" to ENABLED, and convert "member" to DISABLED.
3015 	 */
3016 	if (!strcmp(buf, "root"))
3017 		val = PRS_ROOT;
3018 	else if (!strcmp(buf, "member"))
3019 		val = PRS_MEMBER;
3020 	else if (!strcmp(buf, "isolated"))
3021 		val = PRS_ISOLATED;
3022 	else
3023 		return -EINVAL;
3024 
3025 	css_get(&cs->css);
3026 	cpus_read_lock();
3027 	mutex_lock(&cpuset_mutex);
3028 	if (!is_cpuset_online(cs))
3029 		goto out_unlock;
3030 
3031 	retval = update_prstate(cs, val);
3032 out_unlock:
3033 	mutex_unlock(&cpuset_mutex);
3034 	cpus_read_unlock();
3035 	css_put(&cs->css);
3036 	return retval ?: nbytes;
3037 }
3038 
3039 /*
3040  * for the common functions, 'private' gives the type of file
3041  */
3042 
3043 static struct cftype legacy_files[] = {
3044 	{
3045 		.name = "cpus",
3046 		.seq_show = cpuset_common_seq_show,
3047 		.write = cpuset_write_resmask,
3048 		.max_write_len = (100U + 6 * NR_CPUS),
3049 		.private = FILE_CPULIST,
3050 	},
3051 
3052 	{
3053 		.name = "mems",
3054 		.seq_show = cpuset_common_seq_show,
3055 		.write = cpuset_write_resmask,
3056 		.max_write_len = (100U + 6 * MAX_NUMNODES),
3057 		.private = FILE_MEMLIST,
3058 	},
3059 
3060 	{
3061 		.name = "effective_cpus",
3062 		.seq_show = cpuset_common_seq_show,
3063 		.private = FILE_EFFECTIVE_CPULIST,
3064 	},
3065 
3066 	{
3067 		.name = "effective_mems",
3068 		.seq_show = cpuset_common_seq_show,
3069 		.private = FILE_EFFECTIVE_MEMLIST,
3070 	},
3071 
3072 	{
3073 		.name = "cpu_exclusive",
3074 		.read_u64 = cpuset_read_u64,
3075 		.write_u64 = cpuset_write_u64,
3076 		.private = FILE_CPU_EXCLUSIVE,
3077 	},
3078 
3079 	{
3080 		.name = "mem_exclusive",
3081 		.read_u64 = cpuset_read_u64,
3082 		.write_u64 = cpuset_write_u64,
3083 		.private = FILE_MEM_EXCLUSIVE,
3084 	},
3085 
3086 	{
3087 		.name = "mem_hardwall",
3088 		.read_u64 = cpuset_read_u64,
3089 		.write_u64 = cpuset_write_u64,
3090 		.private = FILE_MEM_HARDWALL,
3091 	},
3092 
3093 	{
3094 		.name = "sched_load_balance",
3095 		.read_u64 = cpuset_read_u64,
3096 		.write_u64 = cpuset_write_u64,
3097 		.private = FILE_SCHED_LOAD_BALANCE,
3098 	},
3099 
3100 	{
3101 		.name = "sched_relax_domain_level",
3102 		.read_s64 = cpuset_read_s64,
3103 		.write_s64 = cpuset_write_s64,
3104 		.private = FILE_SCHED_RELAX_DOMAIN_LEVEL,
3105 	},
3106 
3107 	{
3108 		.name = "memory_migrate",
3109 		.read_u64 = cpuset_read_u64,
3110 		.write_u64 = cpuset_write_u64,
3111 		.private = FILE_MEMORY_MIGRATE,
3112 	},
3113 
3114 	{
3115 		.name = "memory_pressure",
3116 		.read_u64 = cpuset_read_u64,
3117 		.private = FILE_MEMORY_PRESSURE,
3118 	},
3119 
3120 	{
3121 		.name = "memory_spread_page",
3122 		.read_u64 = cpuset_read_u64,
3123 		.write_u64 = cpuset_write_u64,
3124 		.private = FILE_SPREAD_PAGE,
3125 	},
3126 
3127 	{
3128 		.name = "memory_spread_slab",
3129 		.read_u64 = cpuset_read_u64,
3130 		.write_u64 = cpuset_write_u64,
3131 		.private = FILE_SPREAD_SLAB,
3132 	},
3133 
3134 	{
3135 		.name = "memory_pressure_enabled",
3136 		.flags = CFTYPE_ONLY_ON_ROOT,
3137 		.read_u64 = cpuset_read_u64,
3138 		.write_u64 = cpuset_write_u64,
3139 		.private = FILE_MEMORY_PRESSURE_ENABLED,
3140 	},
3141 
3142 	{ }	/* terminate */
3143 };
3144 
3145 /*
3146  * This is currently a minimal set for the default hierarchy. It can be
3147  * expanded later on by migrating more features and control files from v1.
3148  */
3149 static struct cftype dfl_files[] = {
3150 	{
3151 		.name = "cpus",
3152 		.seq_show = cpuset_common_seq_show,
3153 		.write = cpuset_write_resmask,
3154 		.max_write_len = (100U + 6 * NR_CPUS),
3155 		.private = FILE_CPULIST,
3156 		.flags = CFTYPE_NOT_ON_ROOT,
3157 	},
3158 
3159 	{
3160 		.name = "mems",
3161 		.seq_show = cpuset_common_seq_show,
3162 		.write = cpuset_write_resmask,
3163 		.max_write_len = (100U + 6 * MAX_NUMNODES),
3164 		.private = FILE_MEMLIST,
3165 		.flags = CFTYPE_NOT_ON_ROOT,
3166 	},
3167 
3168 	{
3169 		.name = "cpus.effective",
3170 		.seq_show = cpuset_common_seq_show,
3171 		.private = FILE_EFFECTIVE_CPULIST,
3172 	},
3173 
3174 	{
3175 		.name = "mems.effective",
3176 		.seq_show = cpuset_common_seq_show,
3177 		.private = FILE_EFFECTIVE_MEMLIST,
3178 	},
3179 
3180 	{
3181 		.name = "cpus.partition",
3182 		.seq_show = sched_partition_show,
3183 		.write = sched_partition_write,
3184 		.private = FILE_PARTITION_ROOT,
3185 		.flags = CFTYPE_NOT_ON_ROOT,
3186 		.file_offset = offsetof(struct cpuset, partition_file),
3187 	},
3188 
3189 	{
3190 		.name = "cpus.subpartitions",
3191 		.seq_show = cpuset_common_seq_show,
3192 		.private = FILE_SUBPARTS_CPULIST,
3193 		.flags = CFTYPE_DEBUG,
3194 	},
3195 
3196 	{ }	/* terminate */
3197 };
3198 
3199 
3200 /*
3201  *	cpuset_css_alloc - allocate a cpuset css
3202  *	cgrp:	control group that the new cpuset will be part of
3203  */
3204 
3205 static struct cgroup_subsys_state *
cpuset_css_alloc(struct cgroup_subsys_state * parent_css)3206 cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
3207 {
3208 	struct cpuset *cs;
3209 
3210 	if (!parent_css)
3211 		return &top_cpuset.css;
3212 
3213 	cs = kzalloc(sizeof(*cs), GFP_KERNEL);
3214 	if (!cs)
3215 		return ERR_PTR(-ENOMEM);
3216 
3217 	if (alloc_cpumasks(cs, NULL)) {
3218 		kfree(cs);
3219 		return ERR_PTR(-ENOMEM);
3220 	}
3221 
3222 	__set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
3223 	nodes_clear(cs->mems_allowed);
3224 	nodes_clear(cs->effective_mems);
3225 	fmeter_init(&cs->fmeter);
3226 	cs->relax_domain_level = -1;
3227 
3228 	/* Set CS_MEMORY_MIGRATE for default hierarchy */
3229 	if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys))
3230 		__set_bit(CS_MEMORY_MIGRATE, &cs->flags);
3231 
3232 	return &cs->css;
3233 }
3234 
cpuset_css_online(struct cgroup_subsys_state * css)3235 static int cpuset_css_online(struct cgroup_subsys_state *css)
3236 {
3237 	struct cpuset *cs = css_cs(css);
3238 	struct cpuset *parent = parent_cs(cs);
3239 	struct cpuset *tmp_cs;
3240 	struct cgroup_subsys_state *pos_css;
3241 
3242 	if (!parent)
3243 		return 0;
3244 
3245 	cpus_read_lock();
3246 	mutex_lock(&cpuset_mutex);
3247 
3248 	set_bit(CS_ONLINE, &cs->flags);
3249 	if (is_spread_page(parent))
3250 		set_bit(CS_SPREAD_PAGE, &cs->flags);
3251 	if (is_spread_slab(parent))
3252 		set_bit(CS_SPREAD_SLAB, &cs->flags);
3253 
3254 	cpuset_inc();
3255 
3256 	spin_lock_irq(&callback_lock);
3257 	if (is_in_v2_mode()) {
3258 		cpumask_copy(cs->effective_cpus, parent->effective_cpus);
3259 		cs->effective_mems = parent->effective_mems;
3260 		cs->use_parent_ecpus = true;
3261 		parent->child_ecpus_count++;
3262 	}
3263 
3264 	/*
3265 	 * For v2, clear CS_SCHED_LOAD_BALANCE if parent is isolated
3266 	 */
3267 	if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
3268 	    !is_sched_load_balance(parent))
3269 		clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
3270 
3271 	spin_unlock_irq(&callback_lock);
3272 
3273 	if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
3274 		goto out_unlock;
3275 
3276 	/*
3277 	 * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is
3278 	 * set.  This flag handling is implemented in cgroup core for
3279 	 * historical reasons - the flag may be specified during mount.
3280 	 *
3281 	 * Currently, if any sibling cpusets have exclusive cpus or mem, we
3282 	 * refuse to clone the configuration - thereby refusing the task to
3283 	 * be entered, and as a result refusing the sys_unshare() or
3284 	 * clone() which initiated it.  If this becomes a problem for some
3285 	 * users who wish to allow that scenario, then this could be
3286 	 * changed to grant parent->cpus_allowed-sibling_cpus_exclusive
3287 	 * (and likewise for mems) to the new cgroup.
3288 	 */
3289 	rcu_read_lock();
3290 	cpuset_for_each_child(tmp_cs, pos_css, parent) {
3291 		if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) {
3292 			rcu_read_unlock();
3293 			goto out_unlock;
3294 		}
3295 	}
3296 	rcu_read_unlock();
3297 
3298 	spin_lock_irq(&callback_lock);
3299 	cs->mems_allowed = parent->mems_allowed;
3300 	cs->effective_mems = parent->mems_allowed;
3301 	cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
3302 	cpumask_copy(cs->cpus_requested, parent->cpus_requested);
3303 	cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
3304 	spin_unlock_irq(&callback_lock);
3305 out_unlock:
3306 	mutex_unlock(&cpuset_mutex);
3307 	cpus_read_unlock();
3308 	return 0;
3309 }
3310 
3311 /*
3312  * If the cpuset being removed has its flag 'sched_load_balance'
3313  * enabled, then simulate turning sched_load_balance off, which
3314  * will call rebuild_sched_domains_locked(). That is not needed
3315  * in the default hierarchy where only changes in partition
3316  * will cause repartitioning.
3317  *
3318  * If the cpuset has the 'sched.partition' flag enabled, simulate
3319  * turning 'sched.partition" off.
3320  */
3321 
cpuset_css_offline(struct cgroup_subsys_state * css)3322 static void cpuset_css_offline(struct cgroup_subsys_state *css)
3323 {
3324 	struct cpuset *cs = css_cs(css);
3325 
3326 	cpus_read_lock();
3327 	mutex_lock(&cpuset_mutex);
3328 
3329 	if (is_partition_valid(cs))
3330 		update_prstate(cs, 0);
3331 
3332 	if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
3333 	    is_sched_load_balance(cs))
3334 		update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
3335 
3336 	if (cs->use_parent_ecpus) {
3337 		struct cpuset *parent = parent_cs(cs);
3338 
3339 		cs->use_parent_ecpus = false;
3340 		parent->child_ecpus_count--;
3341 	}
3342 
3343 	cpuset_dec();
3344 	clear_bit(CS_ONLINE, &cs->flags);
3345 
3346 	mutex_unlock(&cpuset_mutex);
3347 	cpus_read_unlock();
3348 }
3349 
cpuset_css_free(struct cgroup_subsys_state * css)3350 static void cpuset_css_free(struct cgroup_subsys_state *css)
3351 {
3352 	struct cpuset *cs = css_cs(css);
3353 
3354 	free_cpuset(cs);
3355 }
3356 
cpuset_bind(struct cgroup_subsys_state * root_css)3357 static void cpuset_bind(struct cgroup_subsys_state *root_css)
3358 {
3359 	mutex_lock(&cpuset_mutex);
3360 	spin_lock_irq(&callback_lock);
3361 
3362 	if (is_in_v2_mode()) {
3363 		cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
3364 		top_cpuset.mems_allowed = node_possible_map;
3365 	} else {
3366 		cpumask_copy(top_cpuset.cpus_allowed,
3367 			     top_cpuset.effective_cpus);
3368 		top_cpuset.mems_allowed = top_cpuset.effective_mems;
3369 	}
3370 
3371 	spin_unlock_irq(&callback_lock);
3372 	mutex_unlock(&cpuset_mutex);
3373 }
3374 
3375 /*
3376  * In case the child is cloned into a cpuset different from its parent,
3377  * additional checks are done to see if the move is allowed.
3378  */
cpuset_can_fork(struct task_struct * task,struct css_set * cset)3379 static int cpuset_can_fork(struct task_struct *task, struct css_set *cset)
3380 {
3381 	struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]);
3382 	bool same_cs;
3383 	int ret;
3384 
3385 	rcu_read_lock();
3386 	same_cs = (cs == task_cs(current));
3387 	rcu_read_unlock();
3388 
3389 	if (same_cs)
3390 		return 0;
3391 
3392 	lockdep_assert_held(&cgroup_mutex);
3393 	mutex_lock(&cpuset_mutex);
3394 
3395 	/* Check to see if task is allowed in the cpuset */
3396 	ret = cpuset_can_attach_check(cs);
3397 	if (ret)
3398 		goto out_unlock;
3399 
3400 	ret = task_can_attach(task);
3401 	if (ret)
3402 		goto out_unlock;
3403 
3404 	ret = security_task_setscheduler(task);
3405 	if (ret)
3406 		goto out_unlock;
3407 
3408 	/*
3409 	 * Mark attach is in progress.  This makes validate_change() fail
3410 	 * changes which zero cpus/mems_allowed.
3411 	 */
3412 	cs->attach_in_progress++;
3413 out_unlock:
3414 	mutex_unlock(&cpuset_mutex);
3415 	return ret;
3416 }
3417 
cpuset_cancel_fork(struct task_struct * task,struct css_set * cset)3418 static void cpuset_cancel_fork(struct task_struct *task, struct css_set *cset)
3419 {
3420 	struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]);
3421 	bool same_cs;
3422 
3423 	rcu_read_lock();
3424 	same_cs = (cs == task_cs(current));
3425 	rcu_read_unlock();
3426 
3427 	if (same_cs)
3428 		return;
3429 
3430 	mutex_lock(&cpuset_mutex);
3431 	cs->attach_in_progress--;
3432 	if (!cs->attach_in_progress)
3433 		wake_up(&cpuset_attach_wq);
3434 	mutex_unlock(&cpuset_mutex);
3435 }
3436 
3437 /*
3438  * Make sure the new task conform to the current state of its parent,
3439  * which could have been changed by cpuset just after it inherits the
3440  * state from the parent and before it sits on the cgroup's task list.
3441  */
cpuset_fork(struct task_struct * task)3442 static void cpuset_fork(struct task_struct *task)
3443 {
3444 	struct cpuset *cs;
3445 	bool same_cs, inherit_cpus = false;
3446 
3447 	rcu_read_lock();
3448 	cs = task_cs(task);
3449 	same_cs = (cs == task_cs(current));
3450 	rcu_read_unlock();
3451 	if (same_cs) {
3452 		if (cs == &top_cpuset)
3453 			return;
3454 		trace_android_rvh_cpuset_fork(task, &inherit_cpus);
3455 		if (!inherit_cpus)
3456 			set_cpus_allowed_ptr(task, current->cpus_ptr);
3457 		task->mems_allowed = current->mems_allowed;
3458 		return;
3459 	}
3460 
3461 	/* CLONE_INTO_CGROUP */
3462 	mutex_lock(&cpuset_mutex);
3463 	guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
3464 	cpuset_attach_task(cs, task);
3465 
3466 	cs->attach_in_progress--;
3467 	if (!cs->attach_in_progress)
3468 		wake_up(&cpuset_attach_wq);
3469 
3470 	mutex_unlock(&cpuset_mutex);
3471 }
3472 
3473 struct cgroup_subsys cpuset_cgrp_subsys = {
3474 	.css_alloc	= cpuset_css_alloc,
3475 	.css_online	= cpuset_css_online,
3476 	.css_offline	= cpuset_css_offline,
3477 	.css_free	= cpuset_css_free,
3478 	.can_attach	= cpuset_can_attach,
3479 	.cancel_attach	= cpuset_cancel_attach,
3480 	.attach		= cpuset_attach,
3481 	.post_attach	= cpuset_post_attach,
3482 	.bind		= cpuset_bind,
3483 	.can_fork	= cpuset_can_fork,
3484 	.cancel_fork	= cpuset_cancel_fork,
3485 	.fork		= cpuset_fork,
3486 	.legacy_cftypes	= legacy_files,
3487 	.dfl_cftypes	= dfl_files,
3488 	.early_init	= true,
3489 	.threaded	= true,
3490 };
3491 
3492 /**
3493  * cpuset_init - initialize cpusets at system boot
3494  *
3495  * Description: Initialize top_cpuset
3496  **/
3497 
cpuset_init(void)3498 int __init cpuset_init(void)
3499 {
3500 	BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL));
3501 	BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL));
3502 	BUG_ON(!zalloc_cpumask_var(&top_cpuset.subparts_cpus, GFP_KERNEL));
3503 	BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_requested, GFP_KERNEL));
3504 
3505 	cpumask_setall(top_cpuset.cpus_allowed);
3506 	cpumask_setall(top_cpuset.cpus_requested);
3507 	nodes_setall(top_cpuset.mems_allowed);
3508 	cpumask_setall(top_cpuset.effective_cpus);
3509 	nodes_setall(top_cpuset.effective_mems);
3510 
3511 	fmeter_init(&top_cpuset.fmeter);
3512 	set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags);
3513 	top_cpuset.relax_domain_level = -1;
3514 
3515 	BUG_ON(!alloc_cpumask_var(&cpus_attach, GFP_KERNEL));
3516 
3517 	return 0;
3518 }
3519 
3520 /*
3521  * If CPU and/or memory hotplug handlers, below, unplug any CPUs
3522  * or memory nodes, we need to walk over the cpuset hierarchy,
3523  * removing that CPU or node from all cpusets.  If this removes the
3524  * last CPU or node from a cpuset, then move the tasks in the empty
3525  * cpuset to its next-highest non-empty parent.
3526  */
remove_tasks_in_empty_cpuset(struct cpuset * cs)3527 static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
3528 {
3529 	struct cpuset *parent;
3530 
3531 	/*
3532 	 * Find its next-highest non-empty parent, (top cpuset
3533 	 * has online cpus, so can't be empty).
3534 	 */
3535 	parent = parent_cs(cs);
3536 	while (cpumask_empty(parent->cpus_allowed) ||
3537 			nodes_empty(parent->mems_allowed))
3538 		parent = parent_cs(parent);
3539 
3540 	if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) {
3541 		pr_err("cpuset: failed to transfer tasks out of empty cpuset ");
3542 		pr_cont_cgroup_name(cs->css.cgroup);
3543 		pr_cont("\n");
3544 	}
3545 }
3546 
3547 static void
hotplug_update_tasks_legacy(struct cpuset * cs,struct cpumask * new_cpus,nodemask_t * new_mems,bool cpus_updated,bool mems_updated)3548 hotplug_update_tasks_legacy(struct cpuset *cs,
3549 			    struct cpumask *new_cpus, nodemask_t *new_mems,
3550 			    bool cpus_updated, bool mems_updated)
3551 {
3552 	bool is_empty;
3553 
3554 	spin_lock_irq(&callback_lock);
3555 	cpumask_copy(cs->cpus_allowed, new_cpus);
3556 	cpumask_copy(cs->effective_cpus, new_cpus);
3557 	cs->mems_allowed = *new_mems;
3558 	cs->effective_mems = *new_mems;
3559 	spin_unlock_irq(&callback_lock);
3560 
3561 	/*
3562 	 * Don't call update_tasks_cpumask() if the cpuset becomes empty,
3563 	 * as the tasks will be migrated to an ancestor.
3564 	 */
3565 	if (cpus_updated && !cpumask_empty(cs->cpus_allowed))
3566 		update_tasks_cpumask(cs, new_cpus);
3567 	if (mems_updated && !nodes_empty(cs->mems_allowed))
3568 		update_tasks_nodemask(cs);
3569 
3570 	is_empty = cpumask_empty(cs->cpus_allowed) ||
3571 		   nodes_empty(cs->mems_allowed);
3572 
3573 	mutex_unlock(&cpuset_mutex);
3574 
3575 	/*
3576 	 * Move tasks to the nearest ancestor with execution resources,
3577 	 * This is full cgroup operation which will also call back into
3578 	 * cpuset. Should be done outside any lock.
3579 	 */
3580 	if (is_empty)
3581 		remove_tasks_in_empty_cpuset(cs);
3582 
3583 	mutex_lock(&cpuset_mutex);
3584 }
3585 
3586 static void
hotplug_update_tasks(struct cpuset * cs,struct cpumask * new_cpus,nodemask_t * new_mems,bool cpus_updated,bool mems_updated)3587 hotplug_update_tasks(struct cpuset *cs,
3588 		     struct cpumask *new_cpus, nodemask_t *new_mems,
3589 		     bool cpus_updated, bool mems_updated)
3590 {
3591 	/* A partition root is allowed to have empty effective cpus */
3592 	if (cpumask_empty(new_cpus) && !is_partition_valid(cs))
3593 		cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus);
3594 	if (nodes_empty(*new_mems))
3595 		*new_mems = parent_cs(cs)->effective_mems;
3596 
3597 	spin_lock_irq(&callback_lock);
3598 	cpumask_copy(cs->effective_cpus, new_cpus);
3599 	cs->effective_mems = *new_mems;
3600 	spin_unlock_irq(&callback_lock);
3601 
3602 	if (cpus_updated)
3603 		update_tasks_cpumask(cs, new_cpus);
3604 	if (mems_updated)
3605 		update_tasks_nodemask(cs);
3606 }
3607 
3608 static bool force_rebuild;
3609 
cpuset_force_rebuild(void)3610 void cpuset_force_rebuild(void)
3611 {
3612 	force_rebuild = true;
3613 }
3614 
3615 /**
3616  * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug
3617  * @cs: cpuset in interest
3618  * @tmp: the tmpmasks structure pointer
3619  *
3620  * Compare @cs's cpu and mem masks against top_cpuset and if some have gone
3621  * offline, update @cs accordingly.  If @cs ends up with no CPU or memory,
3622  * all its tasks are moved to the nearest ancestor with both resources.
3623  */
cpuset_hotplug_update_tasks(struct cpuset * cs,struct tmpmasks * tmp)3624 static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
3625 {
3626 	static cpumask_t new_cpus;
3627 	static nodemask_t new_mems;
3628 	bool cpus_updated;
3629 	bool mems_updated;
3630 	struct cpuset *parent;
3631 retry:
3632 	wait_event(cpuset_attach_wq, cs->attach_in_progress == 0);
3633 
3634 	mutex_lock(&cpuset_mutex);
3635 
3636 	/*
3637 	 * We have raced with task attaching. We wait until attaching
3638 	 * is finished, so we won't attach a task to an empty cpuset.
3639 	 */
3640 	if (cs->attach_in_progress) {
3641 		mutex_unlock(&cpuset_mutex);
3642 		goto retry;
3643 	}
3644 
3645 	parent = parent_cs(cs);
3646 	compute_effective_cpumask(&new_cpus, cs, parent);
3647 	nodes_and(new_mems, cs->mems_allowed, parent->effective_mems);
3648 
3649 	if (cs->nr_subparts_cpus)
3650 		/*
3651 		 * Make sure that CPUs allocated to child partitions
3652 		 * do not show up in effective_cpus.
3653 		 */
3654 		cpumask_andnot(&new_cpus, &new_cpus, cs->subparts_cpus);
3655 
3656 	if (!tmp || !cs->partition_root_state)
3657 		goto update_tasks;
3658 
3659 	/*
3660 	 * In the unlikely event that a partition root has empty
3661 	 * effective_cpus with tasks, we will have to invalidate child
3662 	 * partitions, if present, by setting nr_subparts_cpus to 0 to
3663 	 * reclaim their cpus.
3664 	 */
3665 	if (cs->nr_subparts_cpus && is_partition_valid(cs) &&
3666 	    cpumask_empty(&new_cpus) && partition_is_populated(cs, NULL)) {
3667 		spin_lock_irq(&callback_lock);
3668 		cs->nr_subparts_cpus = 0;
3669 		cpumask_clear(cs->subparts_cpus);
3670 		spin_unlock_irq(&callback_lock);
3671 		compute_effective_cpumask(&new_cpus, cs, parent);
3672 	}
3673 
3674 	/*
3675 	 * Force the partition to become invalid if either one of
3676 	 * the following conditions hold:
3677 	 * 1) empty effective cpus but not valid empty partition.
3678 	 * 2) parent is invalid or doesn't grant any cpus to child
3679 	 *    partitions.
3680 	 */
3681 	if (is_partition_valid(cs) && (!parent->nr_subparts_cpus ||
3682 	   (cpumask_empty(&new_cpus) && partition_is_populated(cs, NULL)))) {
3683 		int old_prs, parent_prs;
3684 
3685 		update_parent_subparts_cpumask(cs, partcmd_disable, NULL, tmp);
3686 		if (cs->nr_subparts_cpus) {
3687 			spin_lock_irq(&callback_lock);
3688 			cs->nr_subparts_cpus = 0;
3689 			cpumask_clear(cs->subparts_cpus);
3690 			spin_unlock_irq(&callback_lock);
3691 			compute_effective_cpumask(&new_cpus, cs, parent);
3692 		}
3693 
3694 		old_prs = cs->partition_root_state;
3695 		parent_prs = parent->partition_root_state;
3696 		if (is_partition_valid(cs)) {
3697 			spin_lock_irq(&callback_lock);
3698 			make_partition_invalid(cs);
3699 			spin_unlock_irq(&callback_lock);
3700 			if (is_prs_invalid(parent_prs))
3701 				WRITE_ONCE(cs->prs_err, PERR_INVPARENT);
3702 			else if (!parent_prs)
3703 				WRITE_ONCE(cs->prs_err, PERR_NOTPART);
3704 			else
3705 				WRITE_ONCE(cs->prs_err, PERR_HOTPLUG);
3706 			notify_partition_change(cs, old_prs);
3707 		}
3708 		cpuset_force_rebuild();
3709 	}
3710 
3711 	/*
3712 	 * On the other hand, an invalid partition root may be transitioned
3713 	 * back to a regular one.
3714 	 */
3715 	else if (is_partition_valid(parent) && is_partition_invalid(cs)) {
3716 		update_parent_subparts_cpumask(cs, partcmd_update, NULL, tmp);
3717 		if (is_partition_valid(cs))
3718 			cpuset_force_rebuild();
3719 	}
3720 
3721 update_tasks:
3722 	cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus);
3723 	mems_updated = !nodes_equal(new_mems, cs->effective_mems);
3724 
3725 	if (mems_updated)
3726 		check_insane_mems_config(&new_mems);
3727 
3728 	if (is_in_v2_mode())
3729 		hotplug_update_tasks(cs, &new_cpus, &new_mems,
3730 				     cpus_updated, mems_updated);
3731 	else
3732 		hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems,
3733 					    cpus_updated, mems_updated);
3734 
3735 	mutex_unlock(&cpuset_mutex);
3736 }
3737 
3738 /**
3739  * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
3740  *
3741  * This function is called after either CPU or memory configuration has
3742  * changed and updates cpuset accordingly.  The top_cpuset is always
3743  * synchronized to cpu_active_mask and N_MEMORY, which is necessary in
3744  * order to make cpusets transparent (of no affect) on systems that are
3745  * actively using CPU hotplug but making no active use of cpusets.
3746  *
3747  * Non-root cpusets are only affected by offlining.  If any CPUs or memory
3748  * nodes have been taken down, cpuset_hotplug_update_tasks() is invoked on
3749  * all descendants.
3750  *
3751  * Note that CPU offlining during suspend is ignored.  We don't modify
3752  * cpusets across suspend/resume cycles at all.
3753  */
cpuset_hotplug_workfn(struct work_struct * work)3754 static void cpuset_hotplug_workfn(struct work_struct *work)
3755 {
3756 	static cpumask_t new_cpus;
3757 	static nodemask_t new_mems;
3758 	bool cpus_updated, mems_updated;
3759 	bool on_dfl = is_in_v2_mode();
3760 	struct tmpmasks tmp, *ptmp = NULL;
3761 
3762 	if (on_dfl && !alloc_cpumasks(NULL, &tmp))
3763 		ptmp = &tmp;
3764 
3765 	mutex_lock(&cpuset_mutex);
3766 
3767 	/* fetch the available cpus/mems and find out which changed how */
3768 	cpumask_copy(&new_cpus, cpu_active_mask);
3769 	new_mems = node_states[N_MEMORY];
3770 
3771 	/*
3772 	 * If subparts_cpus is populated, it is likely that the check below
3773 	 * will produce a false positive on cpus_updated when the cpu list
3774 	 * isn't changed. It is extra work, but it is better to be safe.
3775 	 */
3776 	cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus);
3777 	mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems);
3778 
3779 	/*
3780 	 * In the rare case that hotplug removes all the cpus in subparts_cpus,
3781 	 * we assumed that cpus are updated.
3782 	 */
3783 	if (!cpus_updated && top_cpuset.nr_subparts_cpus)
3784 		cpus_updated = true;
3785 
3786 	/* synchronize cpus_allowed to cpu_active_mask */
3787 	if (cpus_updated) {
3788 		spin_lock_irq(&callback_lock);
3789 		if (!on_dfl)
3790 			cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
3791 		/*
3792 		 * Make sure that CPUs allocated to child partitions
3793 		 * do not show up in effective_cpus. If no CPU is left,
3794 		 * we clear the subparts_cpus & let the child partitions
3795 		 * fight for the CPUs again.
3796 		 */
3797 		if (top_cpuset.nr_subparts_cpus) {
3798 			if (cpumask_subset(&new_cpus,
3799 					   top_cpuset.subparts_cpus)) {
3800 				top_cpuset.nr_subparts_cpus = 0;
3801 				cpumask_clear(top_cpuset.subparts_cpus);
3802 			} else {
3803 				cpumask_andnot(&new_cpus, &new_cpus,
3804 					       top_cpuset.subparts_cpus);
3805 			}
3806 		}
3807 		cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
3808 		spin_unlock_irq(&callback_lock);
3809 		/* we don't mess with cpumasks of tasks in top_cpuset */
3810 	}
3811 
3812 	/* synchronize mems_allowed to N_MEMORY */
3813 	if (mems_updated) {
3814 		spin_lock_irq(&callback_lock);
3815 		if (!on_dfl)
3816 			top_cpuset.mems_allowed = new_mems;
3817 		top_cpuset.effective_mems = new_mems;
3818 		spin_unlock_irq(&callback_lock);
3819 		update_tasks_nodemask(&top_cpuset);
3820 	}
3821 
3822 	mutex_unlock(&cpuset_mutex);
3823 
3824 	/* if cpus or mems changed, we need to propagate to descendants */
3825 	if (cpus_updated || mems_updated) {
3826 		struct cpuset *cs;
3827 		struct cgroup_subsys_state *pos_css;
3828 
3829 		rcu_read_lock();
3830 		cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
3831 			if (cs == &top_cpuset || !css_tryget_online(&cs->css))
3832 				continue;
3833 			rcu_read_unlock();
3834 
3835 			cpuset_hotplug_update_tasks(cs, ptmp);
3836 
3837 			rcu_read_lock();
3838 			css_put(&cs->css);
3839 		}
3840 		rcu_read_unlock();
3841 	}
3842 
3843 	/* rebuild sched domains if cpus_allowed has changed */
3844 	if (cpus_updated || force_rebuild) {
3845 		force_rebuild = false;
3846 		rebuild_sched_domains();
3847 	}
3848 
3849 	free_cpumasks(NULL, ptmp);
3850 }
3851 
cpuset_update_active_cpus(void)3852 void cpuset_update_active_cpus(void)
3853 {
3854 	/*
3855 	 * We're inside cpu hotplug critical region which usually nests
3856 	 * inside cgroup synchronization.  Bounce actual hotplug processing
3857 	 * to a work item to avoid reverse locking order.
3858 	 */
3859 	schedule_work(&cpuset_hotplug_work);
3860 }
3861 
cpuset_wait_for_hotplug(void)3862 void cpuset_wait_for_hotplug(void)
3863 {
3864 	flush_work(&cpuset_hotplug_work);
3865 }
3866 
3867 /*
3868  * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
3869  * Call this routine anytime after node_states[N_MEMORY] changes.
3870  * See cpuset_update_active_cpus() for CPU hotplug handling.
3871  */
cpuset_track_online_nodes(struct notifier_block * self,unsigned long action,void * arg)3872 static int cpuset_track_online_nodes(struct notifier_block *self,
3873 				unsigned long action, void *arg)
3874 {
3875 	schedule_work(&cpuset_hotplug_work);
3876 	return NOTIFY_OK;
3877 }
3878 
3879 static struct notifier_block cpuset_track_online_nodes_nb = {
3880 	.notifier_call = cpuset_track_online_nodes,
3881 	.priority = 10,		/* ??! */
3882 };
3883 
3884 /**
3885  * cpuset_init_smp - initialize cpus_allowed
3886  *
3887  * Description: Finish top cpuset after cpu, node maps are initialized
3888  */
cpuset_init_smp(void)3889 void __init cpuset_init_smp(void)
3890 {
3891 	/*
3892 	 * cpus_allowd/mems_allowed set to v2 values in the initial
3893 	 * cpuset_bind() call will be reset to v1 values in another
3894 	 * cpuset_bind() call when v1 cpuset is mounted.
3895 	 */
3896 	top_cpuset.old_mems_allowed = top_cpuset.mems_allowed;
3897 
3898 	cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask);
3899 	top_cpuset.effective_mems = node_states[N_MEMORY];
3900 
3901 	register_hotmemory_notifier(&cpuset_track_online_nodes_nb);
3902 
3903 	cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0);
3904 	BUG_ON(!cpuset_migrate_mm_wq);
3905 }
3906 
3907 /**
3908  * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
3909  * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
3910  * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
3911  *
3912  * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
3913  * attached to the specified @tsk.  Guaranteed to return some non-empty
3914  * subset of cpu_online_mask, even if this means going outside the
3915  * tasks cpuset.
3916  **/
3917 
cpuset_cpus_allowed(struct task_struct * tsk,struct cpumask * pmask)3918 void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
3919 {
3920 	unsigned long flags;
3921 
3922 	spin_lock_irqsave(&callback_lock, flags);
3923 	guarantee_online_cpus(tsk, pmask);
3924 	spin_unlock_irqrestore(&callback_lock, flags);
3925 }
3926 EXPORT_SYMBOL_GPL(cpuset_cpus_allowed);
3927 /**
3928  * cpuset_cpus_allowed_fallback - final fallback before complete catastrophe.
3929  * @tsk: pointer to task_struct with which the scheduler is struggling
3930  *
3931  * Description: In the case that the scheduler cannot find an allowed cpu in
3932  * tsk->cpus_allowed, we fall back to task_cs(tsk)->cpus_allowed. In legacy
3933  * mode however, this value is the same as task_cs(tsk)->effective_cpus,
3934  * which will not contain a sane cpumask during cases such as cpu hotplugging.
3935  * This is the absolute last resort for the scheduler and it is only used if
3936  * _every_ other avenue has been traveled.
3937  *
3938  * Returns true if the affinity of @tsk was changed, false otherwise.
3939  **/
3940 
cpuset_cpus_allowed_fallback(struct task_struct * tsk)3941 bool cpuset_cpus_allowed_fallback(struct task_struct *tsk)
3942 {
3943 	const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
3944 	const struct cpumask *cs_mask;
3945 	bool changed = false;
3946 
3947 	rcu_read_lock();
3948 	cs_mask = task_cs(tsk)->cpus_allowed;
3949 	if (is_in_v2_mode() && cpumask_subset(cs_mask, possible_mask)) {
3950 		do_set_cpus_allowed(tsk, cs_mask);
3951 		changed = true;
3952 	}
3953 	rcu_read_unlock();
3954 
3955 	/*
3956 	 * We own tsk->cpus_allowed, nobody can change it under us.
3957 	 *
3958 	 * But we used cs && cs->cpus_allowed lockless and thus can
3959 	 * race with cgroup_attach_task() or update_cpumask() and get
3960 	 * the wrong tsk->cpus_allowed. However, both cases imply the
3961 	 * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr()
3962 	 * which takes task_rq_lock().
3963 	 *
3964 	 * If we are called after it dropped the lock we must see all
3965 	 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
3966 	 * set any mask even if it is not right from task_cs() pov,
3967 	 * the pending set_cpus_allowed_ptr() will fix things.
3968 	 *
3969 	 * select_fallback_rq() will fix things ups and set cpu_possible_mask
3970 	 * if required.
3971 	 */
3972 	return changed;
3973 }
3974 
cpuset_init_current_mems_allowed(void)3975 void __init cpuset_init_current_mems_allowed(void)
3976 {
3977 	nodes_setall(current->mems_allowed);
3978 }
3979 
3980 /**
3981  * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
3982  * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
3983  *
3984  * Description: Returns the nodemask_t mems_allowed of the cpuset
3985  * attached to the specified @tsk.  Guaranteed to return some non-empty
3986  * subset of node_states[N_MEMORY], even if this means going outside the
3987  * tasks cpuset.
3988  **/
3989 
cpuset_mems_allowed(struct task_struct * tsk)3990 nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
3991 {
3992 	nodemask_t mask;
3993 	unsigned long flags;
3994 
3995 	spin_lock_irqsave(&callback_lock, flags);
3996 	rcu_read_lock();
3997 	guarantee_online_mems(task_cs(tsk), &mask);
3998 	rcu_read_unlock();
3999 	spin_unlock_irqrestore(&callback_lock, flags);
4000 
4001 	return mask;
4002 }
4003 
4004 /**
4005  * cpuset_nodemask_valid_mems_allowed - check nodemask vs. current mems_allowed
4006  * @nodemask: the nodemask to be checked
4007  *
4008  * Are any of the nodes in the nodemask allowed in current->mems_allowed?
4009  */
cpuset_nodemask_valid_mems_allowed(nodemask_t * nodemask)4010 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
4011 {
4012 	return nodes_intersects(*nodemask, current->mems_allowed);
4013 }
4014 
4015 /*
4016  * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
4017  * mem_hardwall ancestor to the specified cpuset.  Call holding
4018  * callback_lock.  If no ancestor is mem_exclusive or mem_hardwall
4019  * (an unusual configuration), then returns the root cpuset.
4020  */
nearest_hardwall_ancestor(struct cpuset * cs)4021 static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
4022 {
4023 	while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs))
4024 		cs = parent_cs(cs);
4025 	return cs;
4026 }
4027 
4028 /*
4029  * __cpuset_node_allowed - Can we allocate on a memory node?
4030  * @node: is this an allowed node?
4031  * @gfp_mask: memory allocation flags
4032  *
4033  * If we're in interrupt, yes, we can always allocate.  If @node is set in
4034  * current's mems_allowed, yes.  If it's not a __GFP_HARDWALL request and this
4035  * node is set in the nearest hardwalled cpuset ancestor to current's cpuset,
4036  * yes.  If current has access to memory reserves as an oom victim, yes.
4037  * Otherwise, no.
4038  *
4039  * GFP_USER allocations are marked with the __GFP_HARDWALL bit,
4040  * and do not allow allocations outside the current tasks cpuset
4041  * unless the task has been OOM killed.
4042  * GFP_KERNEL allocations are not so marked, so can escape to the
4043  * nearest enclosing hardwalled ancestor cpuset.
4044  *
4045  * Scanning up parent cpusets requires callback_lock.  The
4046  * __alloc_pages() routine only calls here with __GFP_HARDWALL bit
4047  * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
4048  * current tasks mems_allowed came up empty on the first pass over
4049  * the zonelist.  So only GFP_KERNEL allocations, if all nodes in the
4050  * cpuset are short of memory, might require taking the callback_lock.
4051  *
4052  * The first call here from mm/page_alloc:get_page_from_freelist()
4053  * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
4054  * so no allocation on a node outside the cpuset is allowed (unless
4055  * in interrupt, of course).
4056  *
4057  * The second pass through get_page_from_freelist() doesn't even call
4058  * here for GFP_ATOMIC calls.  For those calls, the __alloc_pages()
4059  * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
4060  * in alloc_flags.  That logic and the checks below have the combined
4061  * affect that:
4062  *	in_interrupt - any node ok (current task context irrelevant)
4063  *	GFP_ATOMIC   - any node ok
4064  *	tsk_is_oom_victim   - any node ok
4065  *	GFP_KERNEL   - any node in enclosing hardwalled cpuset ok
4066  *	GFP_USER     - only nodes in current tasks mems allowed ok.
4067  */
__cpuset_node_allowed(int node,gfp_t gfp_mask)4068 bool __cpuset_node_allowed(int node, gfp_t gfp_mask)
4069 {
4070 	struct cpuset *cs;		/* current cpuset ancestors */
4071 	bool allowed;			/* is allocation in zone z allowed? */
4072 	unsigned long flags;
4073 
4074 	if (in_interrupt())
4075 		return true;
4076 	if (node_isset(node, current->mems_allowed))
4077 		return true;
4078 	/*
4079 	 * Allow tasks that have access to memory reserves because they have
4080 	 * been OOM killed to get memory anywhere.
4081 	 */
4082 	if (unlikely(tsk_is_oom_victim(current)))
4083 		return true;
4084 	if (gfp_mask & __GFP_HARDWALL)	/* If hardwall request, stop here */
4085 		return false;
4086 
4087 	if (current->flags & PF_EXITING) /* Let dying task have memory */
4088 		return true;
4089 
4090 	/* Not hardwall and node outside mems_allowed: scan up cpusets */
4091 	spin_lock_irqsave(&callback_lock, flags);
4092 
4093 	rcu_read_lock();
4094 	cs = nearest_hardwall_ancestor(task_cs(current));
4095 	allowed = node_isset(node, cs->mems_allowed);
4096 	rcu_read_unlock();
4097 
4098 	spin_unlock_irqrestore(&callback_lock, flags);
4099 	return allowed;
4100 }
4101 
4102 /**
4103  * cpuset_mem_spread_node() - On which node to begin search for a file page
4104  * cpuset_slab_spread_node() - On which node to begin search for a slab page
4105  *
4106  * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
4107  * tasks in a cpuset with is_spread_page or is_spread_slab set),
4108  * and if the memory allocation used cpuset_mem_spread_node()
4109  * to determine on which node to start looking, as it will for
4110  * certain page cache or slab cache pages such as used for file
4111  * system buffers and inode caches, then instead of starting on the
4112  * local node to look for a free page, rather spread the starting
4113  * node around the tasks mems_allowed nodes.
4114  *
4115  * We don't have to worry about the returned node being offline
4116  * because "it can't happen", and even if it did, it would be ok.
4117  *
4118  * The routines calling guarantee_online_mems() are careful to
4119  * only set nodes in task->mems_allowed that are online.  So it
4120  * should not be possible for the following code to return an
4121  * offline node.  But if it did, that would be ok, as this routine
4122  * is not returning the node where the allocation must be, only
4123  * the node where the search should start.  The zonelist passed to
4124  * __alloc_pages() will include all nodes.  If the slab allocator
4125  * is passed an offline node, it will fall back to the local node.
4126  * See kmem_cache_alloc_node().
4127  */
4128 
cpuset_spread_node(int * rotor)4129 static int cpuset_spread_node(int *rotor)
4130 {
4131 	return *rotor = next_node_in(*rotor, current->mems_allowed);
4132 }
4133 
cpuset_mem_spread_node(void)4134 int cpuset_mem_spread_node(void)
4135 {
4136 	if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE)
4137 		current->cpuset_mem_spread_rotor =
4138 			node_random(&current->mems_allowed);
4139 
4140 	return cpuset_spread_node(&current->cpuset_mem_spread_rotor);
4141 }
4142 
cpuset_slab_spread_node(void)4143 int cpuset_slab_spread_node(void)
4144 {
4145 	if (current->cpuset_slab_spread_rotor == NUMA_NO_NODE)
4146 		current->cpuset_slab_spread_rotor =
4147 			node_random(&current->mems_allowed);
4148 
4149 	return cpuset_spread_node(&current->cpuset_slab_spread_rotor);
4150 }
4151 
4152 EXPORT_SYMBOL_GPL(cpuset_mem_spread_node);
4153 
4154 /**
4155  * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
4156  * @tsk1: pointer to task_struct of some task.
4157  * @tsk2: pointer to task_struct of some other task.
4158  *
4159  * Description: Return true if @tsk1's mems_allowed intersects the
4160  * mems_allowed of @tsk2.  Used by the OOM killer to determine if
4161  * one of the task's memory usage might impact the memory available
4162  * to the other.
4163  **/
4164 
cpuset_mems_allowed_intersects(const struct task_struct * tsk1,const struct task_struct * tsk2)4165 int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
4166 				   const struct task_struct *tsk2)
4167 {
4168 	return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
4169 }
4170 
4171 /**
4172  * cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed
4173  *
4174  * Description: Prints current's name, cpuset name, and cached copy of its
4175  * mems_allowed to the kernel log.
4176  */
cpuset_print_current_mems_allowed(void)4177 void cpuset_print_current_mems_allowed(void)
4178 {
4179 	struct cgroup *cgrp;
4180 
4181 	rcu_read_lock();
4182 
4183 	cgrp = task_cs(current)->css.cgroup;
4184 	pr_cont(",cpuset=");
4185 	pr_cont_cgroup_name(cgrp);
4186 	pr_cont(",mems_allowed=%*pbl",
4187 		nodemask_pr_args(&current->mems_allowed));
4188 
4189 	rcu_read_unlock();
4190 }
4191 
4192 /*
4193  * Collection of memory_pressure is suppressed unless
4194  * this flag is enabled by writing "1" to the special
4195  * cpuset file 'memory_pressure_enabled' in the root cpuset.
4196  */
4197 
4198 int cpuset_memory_pressure_enabled __read_mostly;
4199 
4200 /*
4201  * __cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
4202  *
4203  * Keep a running average of the rate of synchronous (direct)
4204  * page reclaim efforts initiated by tasks in each cpuset.
4205  *
4206  * This represents the rate at which some task in the cpuset
4207  * ran low on memory on all nodes it was allowed to use, and
4208  * had to enter the kernels page reclaim code in an effort to
4209  * create more free memory by tossing clean pages or swapping
4210  * or writing dirty pages.
4211  *
4212  * Display to user space in the per-cpuset read-only file
4213  * "memory_pressure".  Value displayed is an integer
4214  * representing the recent rate of entry into the synchronous
4215  * (direct) page reclaim by any task attached to the cpuset.
4216  */
4217 
__cpuset_memory_pressure_bump(void)4218 void __cpuset_memory_pressure_bump(void)
4219 {
4220 	rcu_read_lock();
4221 	fmeter_markevent(&task_cs(current)->fmeter);
4222 	rcu_read_unlock();
4223 }
4224 
4225 #ifdef CONFIG_PROC_PID_CPUSET
4226 /*
4227  * proc_cpuset_show()
4228  *  - Print tasks cpuset path into seq_file.
4229  *  - Used for /proc/<pid>/cpuset.
4230  *  - No need to task_lock(tsk) on this tsk->cpuset reference, as it
4231  *    doesn't really matter if tsk->cpuset changes after we read it,
4232  *    and we take cpuset_mutex, keeping cpuset_attach() from changing it
4233  *    anyway.
4234  */
proc_cpuset_show(struct seq_file * m,struct pid_namespace * ns,struct pid * pid,struct task_struct * tsk)4235 int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
4236 		     struct pid *pid, struct task_struct *tsk)
4237 {
4238 	char *buf;
4239 	struct cgroup_subsys_state *css;
4240 	int retval;
4241 
4242 	retval = -ENOMEM;
4243 	buf = kmalloc(PATH_MAX, GFP_KERNEL);
4244 	if (!buf)
4245 		goto out;
4246 
4247 	css = task_get_css(tsk, cpuset_cgrp_id);
4248 	retval = cgroup_path_ns(css->cgroup, buf, PATH_MAX,
4249 				current->nsproxy->cgroup_ns);
4250 	css_put(css);
4251 	if (retval >= PATH_MAX)
4252 		retval = -ENAMETOOLONG;
4253 	if (retval < 0)
4254 		goto out_free;
4255 	seq_puts(m, buf);
4256 	seq_putc(m, '\n');
4257 	retval = 0;
4258 out_free:
4259 	kfree(buf);
4260 out:
4261 	return retval;
4262 }
4263 #endif /* CONFIG_PROC_PID_CPUSET */
4264 
4265 /* Display task mems_allowed in /proc/<pid>/status file. */
cpuset_task_status_allowed(struct seq_file * m,struct task_struct * task)4266 void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
4267 {
4268 	seq_printf(m, "Mems_allowed:\t%*pb\n",
4269 		   nodemask_pr_args(&task->mems_allowed));
4270 	seq_printf(m, "Mems_allowed_list:\t%*pbl\n",
4271 		   nodemask_pr_args(&task->mems_allowed));
4272 }
4273