• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  kernel/cpuset.c
3  *
4  *  Processor and Memory placement constraints for sets of tasks.
5  *
6  *  Copyright (C) 2003 BULL SA.
7  *  Copyright (C) 2004-2007 Silicon Graphics, Inc.
8  *  Copyright (C) 2006 Google, Inc
9  *
10  *  Portions derived from Patrick Mochel's sysfs code.
11  *  sysfs is Copyright (c) 2001-3 Patrick Mochel
12  *
13  *  2003-10-10 Written by Simon Derr.
14  *  2003-10-22 Updates by Stephen Hemminger.
15  *  2004 May-July Rework by Paul Jackson.
16  *  2006 Rework by Paul Menage to use generic cgroups
17  *  2008 Rework of the scheduler domains and CPU hotplug handling
18  *       by Max Krasnyansky
19  *
20  *  This file is subject to the terms and conditions of the GNU General Public
21  *  License.  See the file COPYING in the main directory of the Linux
22  *  distribution for more details.
23  */
24 
25 #include <linux/cpu.h>
26 #include <linux/cpumask.h>
27 #include <linux/cpuset.h>
28 #include <linux/err.h>
29 #include <linux/errno.h>
30 #include <linux/file.h>
31 #include <linux/fs.h>
32 #include <linux/init.h>
33 #include <linux/interrupt.h>
34 #include <linux/kernel.h>
35 #include <linux/kmod.h>
36 #include <linux/list.h>
37 #include <linux/mempolicy.h>
38 #include <linux/mm.h>
39 #include <linux/memory.h>
40 #include <linux/export.h>
41 #include <linux/mount.h>
42 #include <linux/namei.h>
43 #include <linux/pagemap.h>
44 #include <linux/proc_fs.h>
45 #include <linux/rcupdate.h>
46 #include <linux/sched.h>
47 #include <linux/seq_file.h>
48 #include <linux/security.h>
49 #include <linux/slab.h>
50 #include <linux/spinlock.h>
51 #include <linux/stat.h>
52 #include <linux/string.h>
53 #include <linux/time.h>
54 #include <linux/backing-dev.h>
55 #include <linux/sort.h>
56 
57 #include <asm/uaccess.h>
58 #include <linux/atomic.h>
59 #include <linux/mutex.h>
60 #include <linux/workqueue.h>
61 #include <linux/cgroup.h>
62 
63 /*
64  * Tracks how many cpusets are currently defined in system.
65  * When there is only one cpuset (the root cpuset) we can
66  * short circuit some hooks.
67  */
68 int number_of_cpusets __read_mostly;
69 
70 /* Forward declare cgroup structures */
71 struct cgroup_subsys cpuset_subsys;
72 struct cpuset;
73 
74 /* See "Frequency meter" comments, below. */
75 
76 struct fmeter {
77 	int cnt;		/* unprocessed events count */
78 	int val;		/* most recent output value */
79 	time_t time;		/* clock (secs) when val computed */
80 	spinlock_t lock;	/* guards read or write of above */
81 };
82 
83 struct cpuset {
84 	struct cgroup_subsys_state css;
85 
86 	unsigned long flags;		/* "unsigned long" so bitops work */
87 	cpumask_var_t cpus_allowed;	/* CPUs allowed to tasks in cpuset */
88 	nodemask_t mems_allowed;	/* Memory Nodes allowed to tasks */
89 
90 	struct fmeter fmeter;		/* memory_pressure filter */
91 
92 	/*
93 	 * Tasks are being attached to this cpuset.  Used to prevent
94 	 * zeroing cpus/mems_allowed between ->can_attach() and ->attach().
95 	 */
96 	int attach_in_progress;
97 
98 	/* partition number for rebuild_sched_domains() */
99 	int pn;
100 
101 	/* for custom sched domain */
102 	int relax_domain_level;
103 
104 	struct work_struct hotplug_work;
105 };
106 
107 /* Retrieve the cpuset for a cgroup */
cgroup_cs(struct cgroup * cont)108 static inline struct cpuset *cgroup_cs(struct cgroup *cont)
109 {
110 	return container_of(cgroup_subsys_state(cont, cpuset_subsys_id),
111 			    struct cpuset, css);
112 }
113 
114 /* Retrieve the cpuset for a task */
task_cs(struct task_struct * task)115 static inline struct cpuset *task_cs(struct task_struct *task)
116 {
117 	return container_of(task_subsys_state(task, cpuset_subsys_id),
118 			    struct cpuset, css);
119 }
120 
parent_cs(const struct cpuset * cs)121 static inline struct cpuset *parent_cs(const struct cpuset *cs)
122 {
123 	struct cgroup *pcgrp = cs->css.cgroup->parent;
124 
125 	if (pcgrp)
126 		return cgroup_cs(pcgrp);
127 	return NULL;
128 }
129 
130 #ifdef CONFIG_NUMA
task_has_mempolicy(struct task_struct * task)131 static inline bool task_has_mempolicy(struct task_struct *task)
132 {
133 	return task->mempolicy;
134 }
135 #else
task_has_mempolicy(struct task_struct * task)136 static inline bool task_has_mempolicy(struct task_struct *task)
137 {
138 	return false;
139 }
140 #endif
141 
142 
143 /* bits in struct cpuset flags field */
144 typedef enum {
145 	CS_ONLINE,
146 	CS_CPU_EXCLUSIVE,
147 	CS_MEM_EXCLUSIVE,
148 	CS_MEM_HARDWALL,
149 	CS_MEMORY_MIGRATE,
150 	CS_SCHED_LOAD_BALANCE,
151 	CS_SPREAD_PAGE,
152 	CS_SPREAD_SLAB,
153 } cpuset_flagbits_t;
154 
155 /* convenient tests for these bits */
is_cpuset_online(const struct cpuset * cs)156 static inline bool is_cpuset_online(const struct cpuset *cs)
157 {
158 	return test_bit(CS_ONLINE, &cs->flags);
159 }
160 
is_cpu_exclusive(const struct cpuset * cs)161 static inline int is_cpu_exclusive(const struct cpuset *cs)
162 {
163 	return test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
164 }
165 
is_mem_exclusive(const struct cpuset * cs)166 static inline int is_mem_exclusive(const struct cpuset *cs)
167 {
168 	return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
169 }
170 
is_mem_hardwall(const struct cpuset * cs)171 static inline int is_mem_hardwall(const struct cpuset *cs)
172 {
173 	return test_bit(CS_MEM_HARDWALL, &cs->flags);
174 }
175 
is_sched_load_balance(const struct cpuset * cs)176 static inline int is_sched_load_balance(const struct cpuset *cs)
177 {
178 	return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
179 }
180 
is_memory_migrate(const struct cpuset * cs)181 static inline int is_memory_migrate(const struct cpuset *cs)
182 {
183 	return test_bit(CS_MEMORY_MIGRATE, &cs->flags);
184 }
185 
is_spread_page(const struct cpuset * cs)186 static inline int is_spread_page(const struct cpuset *cs)
187 {
188 	return test_bit(CS_SPREAD_PAGE, &cs->flags);
189 }
190 
is_spread_slab(const struct cpuset * cs)191 static inline int is_spread_slab(const struct cpuset *cs)
192 {
193 	return test_bit(CS_SPREAD_SLAB, &cs->flags);
194 }
195 
196 static struct cpuset top_cpuset = {
197 	.flags = ((1 << CS_ONLINE) | (1 << CS_CPU_EXCLUSIVE) |
198 		  (1 << CS_MEM_EXCLUSIVE)),
199 };
200 
201 /**
202  * cpuset_for_each_child - traverse online children of a cpuset
203  * @child_cs: loop cursor pointing to the current child
204  * @pos_cgrp: used for iteration
205  * @parent_cs: target cpuset to walk children of
206  *
207  * Walk @child_cs through the online children of @parent_cs.  Must be used
208  * with RCU read locked.
209  */
210 #define cpuset_for_each_child(child_cs, pos_cgrp, parent_cs)		\
211 	cgroup_for_each_child((pos_cgrp), (parent_cs)->css.cgroup)	\
212 		if (is_cpuset_online(((child_cs) = cgroup_cs((pos_cgrp)))))
213 
214 /**
215  * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants
216  * @des_cs: loop cursor pointing to the current descendant
217  * @pos_cgrp: used for iteration
218  * @root_cs: target cpuset to walk ancestor of
219  *
220  * Walk @des_cs through the online descendants of @root_cs.  Must be used
221  * with RCU read locked.  The caller may modify @pos_cgrp by calling
222  * cgroup_rightmost_descendant() to skip subtree.
223  */
224 #define cpuset_for_each_descendant_pre(des_cs, pos_cgrp, root_cs)	\
225 	cgroup_for_each_descendant_pre((pos_cgrp), (root_cs)->css.cgroup) \
226 		if (is_cpuset_online(((des_cs) = cgroup_cs((pos_cgrp)))))
227 
228 /*
229  * There are two global mutexes guarding cpuset structures - cpuset_mutex
230  * and callback_mutex.  The latter may nest inside the former.  We also
231  * require taking task_lock() when dereferencing a task's cpuset pointer.
232  * See "The task_lock() exception", at the end of this comment.
233  *
234  * A task must hold both mutexes to modify cpusets.  If a task holds
235  * cpuset_mutex, then it blocks others wanting that mutex, ensuring that it
236  * is the only task able to also acquire callback_mutex and be able to
237  * modify cpusets.  It can perform various checks on the cpuset structure
238  * first, knowing nothing will change.  It can also allocate memory while
239  * just holding cpuset_mutex.  While it is performing these checks, various
240  * callback routines can briefly acquire callback_mutex to query cpusets.
241  * Once it is ready to make the changes, it takes callback_mutex, blocking
242  * everyone else.
243  *
244  * Calls to the kernel memory allocator can not be made while holding
245  * callback_mutex, as that would risk double tripping on callback_mutex
246  * from one of the callbacks into the cpuset code from within
247  * __alloc_pages().
248  *
249  * If a task is only holding callback_mutex, then it has read-only
250  * access to cpusets.
251  *
252  * Now, the task_struct fields mems_allowed and mempolicy may be changed
253  * by other task, we use alloc_lock in the task_struct fields to protect
254  * them.
255  *
256  * The cpuset_common_file_read() handlers only hold callback_mutex across
257  * small pieces of code, such as when reading out possibly multi-word
258  * cpumasks and nodemasks.
259  *
260  * Accessing a task's cpuset should be done in accordance with the
261  * guidelines for accessing subsystem state in kernel/cgroup.c
262  */
263 
264 static DEFINE_MUTEX(cpuset_mutex);
265 static DEFINE_MUTEX(callback_mutex);
266 
267 /*
268  * CPU / memory hotplug is handled asynchronously.
269  */
270 static struct workqueue_struct *cpuset_propagate_hotplug_wq;
271 
272 static void cpuset_hotplug_workfn(struct work_struct *work);
273 static void cpuset_propagate_hotplug_workfn(struct work_struct *work);
274 static void schedule_cpuset_propagate_hotplug(struct cpuset *cs);
275 
276 static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn);
277 
278 /*
279  * This is ugly, but preserves the userspace API for existing cpuset
280  * users. If someone tries to mount the "cpuset" filesystem, we
281  * silently switch it to mount "cgroup" instead
282  */
cpuset_mount(struct file_system_type * fs_type,int flags,const char * unused_dev_name,void * data)283 static struct dentry *cpuset_mount(struct file_system_type *fs_type,
284 			 int flags, const char *unused_dev_name, void *data)
285 {
286 	struct file_system_type *cgroup_fs = get_fs_type("cgroup");
287 	struct dentry *ret = ERR_PTR(-ENODEV);
288 	if (cgroup_fs) {
289 		char mountopts[] =
290 			"cpuset,noprefix,"
291 			"release_agent=/sbin/cpuset_release_agent";
292 		ret = cgroup_fs->mount(cgroup_fs, flags,
293 					   unused_dev_name, mountopts);
294 		put_filesystem(cgroup_fs);
295 	}
296 	return ret;
297 }
298 
299 static struct file_system_type cpuset_fs_type = {
300 	.name = "cpuset",
301 	.mount = cpuset_mount,
302 };
303 
304 /*
305  * Return in pmask the portion of a cpusets's cpus_allowed that
306  * are online.  If none are online, walk up the cpuset hierarchy
307  * until we find one that does have some online cpus.  If we get
308  * all the way to the top and still haven't found any online cpus,
309  * return cpu_online_mask.  Or if passed a NULL cs from an exit'ing
310  * task, return cpu_online_mask.
311  *
312  * One way or another, we guarantee to return some non-empty subset
313  * of cpu_online_mask.
314  *
315  * Call with callback_mutex held.
316  */
317 
guarantee_online_cpus(const struct cpuset * cs,struct cpumask * pmask)318 static void guarantee_online_cpus(const struct cpuset *cs,
319 				  struct cpumask *pmask)
320 {
321 	while (cs && !cpumask_intersects(cs->cpus_allowed, cpu_online_mask))
322 		cs = parent_cs(cs);
323 	if (cs)
324 		cpumask_and(pmask, cs->cpus_allowed, cpu_online_mask);
325 	else
326 		cpumask_copy(pmask, cpu_online_mask);
327 	BUG_ON(!cpumask_intersects(pmask, cpu_online_mask));
328 }
329 
330 /*
331  * Return in *pmask the portion of a cpusets's mems_allowed that
332  * are online, with memory.  If none are online with memory, walk
333  * up the cpuset hierarchy until we find one that does have some
334  * online mems.  If we get all the way to the top and still haven't
335  * found any online mems, return node_states[N_MEMORY].
336  *
337  * One way or another, we guarantee to return some non-empty subset
338  * of node_states[N_MEMORY].
339  *
340  * Call with callback_mutex held.
341  */
342 
guarantee_online_mems(const struct cpuset * cs,nodemask_t * pmask)343 static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
344 {
345 	while (cs && !nodes_intersects(cs->mems_allowed,
346 					node_states[N_MEMORY]))
347 		cs = parent_cs(cs);
348 	if (cs)
349 		nodes_and(*pmask, cs->mems_allowed,
350 					node_states[N_MEMORY]);
351 	else
352 		*pmask = node_states[N_MEMORY];
353 	BUG_ON(!nodes_intersects(*pmask, node_states[N_MEMORY]));
354 }
355 
356 /*
357  * update task's spread flag if cpuset's page/slab spread flag is set
358  *
359  * Called with callback_mutex/cpuset_mutex held
360  */
cpuset_update_task_spread_flag(struct cpuset * cs,struct task_struct * tsk)361 static void cpuset_update_task_spread_flag(struct cpuset *cs,
362 					struct task_struct *tsk)
363 {
364 	if (is_spread_page(cs))
365 		tsk->flags |= PF_SPREAD_PAGE;
366 	else
367 		tsk->flags &= ~PF_SPREAD_PAGE;
368 	if (is_spread_slab(cs))
369 		tsk->flags |= PF_SPREAD_SLAB;
370 	else
371 		tsk->flags &= ~PF_SPREAD_SLAB;
372 }
373 
374 /*
375  * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
376  *
377  * One cpuset is a subset of another if all its allowed CPUs and
378  * Memory Nodes are a subset of the other, and its exclusive flags
379  * are only set if the other's are set.  Call holding cpuset_mutex.
380  */
381 
is_cpuset_subset(const struct cpuset * p,const struct cpuset * q)382 static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
383 {
384 	return	cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
385 		nodes_subset(p->mems_allowed, q->mems_allowed) &&
386 		is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
387 		is_mem_exclusive(p) <= is_mem_exclusive(q);
388 }
389 
390 /**
391  * alloc_trial_cpuset - allocate a trial cpuset
392  * @cs: the cpuset that the trial cpuset duplicates
393  */
alloc_trial_cpuset(const struct cpuset * cs)394 static struct cpuset *alloc_trial_cpuset(const struct cpuset *cs)
395 {
396 	struct cpuset *trial;
397 
398 	trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL);
399 	if (!trial)
400 		return NULL;
401 
402 	if (!alloc_cpumask_var(&trial->cpus_allowed, GFP_KERNEL)) {
403 		kfree(trial);
404 		return NULL;
405 	}
406 	cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
407 
408 	return trial;
409 }
410 
411 /**
412  * free_trial_cpuset - free the trial cpuset
413  * @trial: the trial cpuset to be freed
414  */
free_trial_cpuset(struct cpuset * trial)415 static void free_trial_cpuset(struct cpuset *trial)
416 {
417 	free_cpumask_var(trial->cpus_allowed);
418 	kfree(trial);
419 }
420 
421 /*
422  * validate_change() - Used to validate that any proposed cpuset change
423  *		       follows the structural rules for cpusets.
424  *
425  * If we replaced the flag and mask values of the current cpuset
426  * (cur) with those values in the trial cpuset (trial), would
427  * our various subset and exclusive rules still be valid?  Presumes
428  * cpuset_mutex held.
429  *
430  * 'cur' is the address of an actual, in-use cpuset.  Operations
431  * such as list traversal that depend on the actual address of the
432  * cpuset in the list must use cur below, not trial.
433  *
434  * 'trial' is the address of bulk structure copy of cur, with
435  * perhaps one or more of the fields cpus_allowed, mems_allowed,
436  * or flags changed to new, trial values.
437  *
438  * Return 0 if valid, -errno if not.
439  */
440 
validate_change(const struct cpuset * cur,const struct cpuset * trial)441 static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
442 {
443 	struct cgroup *cont;
444 	struct cpuset *c, *par;
445 	int ret;
446 
447 	rcu_read_lock();
448 
449 	/* Each of our child cpusets must be a subset of us */
450 	ret = -EBUSY;
451 	cpuset_for_each_child(c, cont, cur)
452 		if (!is_cpuset_subset(c, trial))
453 			goto out;
454 
455 	/* Remaining checks don't apply to root cpuset */
456 	ret = 0;
457 	if (cur == &top_cpuset)
458 		goto out;
459 
460 	par = parent_cs(cur);
461 
462 	/* We must be a subset of our parent cpuset */
463 	ret = -EACCES;
464 	if (!is_cpuset_subset(trial, par))
465 		goto out;
466 
467 	/*
468 	 * If either I or some sibling (!= me) is exclusive, we can't
469 	 * overlap
470 	 */
471 	ret = -EINVAL;
472 	cpuset_for_each_child(c, cont, par) {
473 		if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
474 		    c != cur &&
475 		    cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
476 			goto out;
477 		if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
478 		    c != cur &&
479 		    nodes_intersects(trial->mems_allowed, c->mems_allowed))
480 			goto out;
481 	}
482 
483 	/*
484 	 * Cpusets with tasks - existing or newly being attached - can't
485 	 * have empty cpus_allowed or mems_allowed.
486 	 */
487 	ret = -ENOSPC;
488 	if ((cgroup_task_count(cur->css.cgroup) || cur->attach_in_progress) &&
489 	    (cpumask_empty(trial->cpus_allowed) ||
490 	     nodes_empty(trial->mems_allowed)))
491 		goto out;
492 
493 	ret = 0;
494 out:
495 	rcu_read_unlock();
496 	return ret;
497 }
498 
499 #ifdef CONFIG_SMP
500 /*
501  * Helper routine for generate_sched_domains().
502  * Do cpusets a, b have overlapping cpus_allowed masks?
503  */
cpusets_overlap(struct cpuset * a,struct cpuset * b)504 static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
505 {
506 	return cpumask_intersects(a->cpus_allowed, b->cpus_allowed);
507 }
508 
509 static void
update_domain_attr(struct sched_domain_attr * dattr,struct cpuset * c)510 update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
511 {
512 	if (dattr->relax_domain_level < c->relax_domain_level)
513 		dattr->relax_domain_level = c->relax_domain_level;
514 	return;
515 }
516 
update_domain_attr_tree(struct sched_domain_attr * dattr,struct cpuset * root_cs)517 static void update_domain_attr_tree(struct sched_domain_attr *dattr,
518 				    struct cpuset *root_cs)
519 {
520 	struct cpuset *cp;
521 	struct cgroup *pos_cgrp;
522 
523 	rcu_read_lock();
524 	cpuset_for_each_descendant_pre(cp, pos_cgrp, root_cs) {
525 		/* skip the whole subtree if @cp doesn't have any CPU */
526 		if (cpumask_empty(cp->cpus_allowed)) {
527 			pos_cgrp = cgroup_rightmost_descendant(pos_cgrp);
528 			continue;
529 		}
530 
531 		if (is_sched_load_balance(cp))
532 			update_domain_attr(dattr, cp);
533 	}
534 	rcu_read_unlock();
535 }
536 
537 /*
538  * generate_sched_domains()
539  *
540  * This function builds a partial partition of the systems CPUs
541  * A 'partial partition' is a set of non-overlapping subsets whose
542  * union is a subset of that set.
543  * The output of this function needs to be passed to kernel/sched.c
544  * partition_sched_domains() routine, which will rebuild the scheduler's
545  * load balancing domains (sched domains) as specified by that partial
546  * partition.
547  *
548  * See "What is sched_load_balance" in Documentation/cgroups/cpusets.txt
549  * for a background explanation of this.
550  *
551  * Does not return errors, on the theory that the callers of this
552  * routine would rather not worry about failures to rebuild sched
553  * domains when operating in the severe memory shortage situations
554  * that could cause allocation failures below.
555  *
556  * Must be called with cpuset_mutex held.
557  *
558  * The three key local variables below are:
559  *    q  - a linked-list queue of cpuset pointers, used to implement a
560  *	   top-down scan of all cpusets.  This scan loads a pointer
561  *	   to each cpuset marked is_sched_load_balance into the
562  *	   array 'csa'.  For our purposes, rebuilding the schedulers
563  *	   sched domains, we can ignore !is_sched_load_balance cpusets.
564  *  csa  - (for CpuSet Array) Array of pointers to all the cpusets
565  *	   that need to be load balanced, for convenient iterative
566  *	   access by the subsequent code that finds the best partition,
567  *	   i.e the set of domains (subsets) of CPUs such that the
568  *	   cpus_allowed of every cpuset marked is_sched_load_balance
569  *	   is a subset of one of these domains, while there are as
570  *	   many such domains as possible, each as small as possible.
571  * doms  - Conversion of 'csa' to an array of cpumasks, for passing to
572  *	   the kernel/sched.c routine partition_sched_domains() in a
573  *	   convenient format, that can be easily compared to the prior
574  *	   value to determine what partition elements (sched domains)
575  *	   were changed (added or removed.)
576  *
577  * Finding the best partition (set of domains):
578  *	The triple nested loops below over i, j, k scan over the
579  *	load balanced cpusets (using the array of cpuset pointers in
580  *	csa[]) looking for pairs of cpusets that have overlapping
581  *	cpus_allowed, but which don't have the same 'pn' partition
582  *	number and gives them in the same partition number.  It keeps
583  *	looping on the 'restart' label until it can no longer find
584  *	any such pairs.
585  *
586  *	The union of the cpus_allowed masks from the set of
587  *	all cpusets having the same 'pn' value then form the one
588  *	element of the partition (one sched domain) to be passed to
589  *	partition_sched_domains().
590  */
generate_sched_domains(cpumask_var_t ** domains,struct sched_domain_attr ** attributes)591 static int generate_sched_domains(cpumask_var_t **domains,
592 			struct sched_domain_attr **attributes)
593 {
594 	struct cpuset *cp;	/* scans q */
595 	struct cpuset **csa;	/* array of all cpuset ptrs */
596 	int csn;		/* how many cpuset ptrs in csa so far */
597 	int i, j, k;		/* indices for partition finding loops */
598 	cpumask_var_t *doms;	/* resulting partition; i.e. sched domains */
599 	struct sched_domain_attr *dattr;  /* attributes for custom domains */
600 	int ndoms = 0;		/* number of sched domains in result */
601 	int nslot;		/* next empty doms[] struct cpumask slot */
602 	struct cgroup *pos_cgrp;
603 
604 	doms = NULL;
605 	dattr = NULL;
606 	csa = NULL;
607 
608 	/* Special case for the 99% of systems with one, full, sched domain */
609 	if (is_sched_load_balance(&top_cpuset)) {
610 		ndoms = 1;
611 		doms = alloc_sched_domains(ndoms);
612 		if (!doms)
613 			goto done;
614 
615 		dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
616 		if (dattr) {
617 			*dattr = SD_ATTR_INIT;
618 			update_domain_attr_tree(dattr, &top_cpuset);
619 		}
620 		cpumask_copy(doms[0], top_cpuset.cpus_allowed);
621 
622 		goto done;
623 	}
624 
625 	csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL);
626 	if (!csa)
627 		goto done;
628 	csn = 0;
629 
630 	rcu_read_lock();
631 	cpuset_for_each_descendant_pre(cp, pos_cgrp, &top_cpuset) {
632 		/*
633 		 * Continue traversing beyond @cp iff @cp has some CPUs and
634 		 * isn't load balancing.  The former is obvious.  The
635 		 * latter: All child cpusets contain a subset of the
636 		 * parent's cpus, so just skip them, and then we call
637 		 * update_domain_attr_tree() to calc relax_domain_level of
638 		 * the corresponding sched domain.
639 		 */
640 		if (!cpumask_empty(cp->cpus_allowed) &&
641 		    !is_sched_load_balance(cp))
642 			continue;
643 
644 		if (is_sched_load_balance(cp))
645 			csa[csn++] = cp;
646 
647 		/* skip @cp's subtree */
648 		pos_cgrp = cgroup_rightmost_descendant(pos_cgrp);
649 	}
650 	rcu_read_unlock();
651 
652 	for (i = 0; i < csn; i++)
653 		csa[i]->pn = i;
654 	ndoms = csn;
655 
656 restart:
657 	/* Find the best partition (set of sched domains) */
658 	for (i = 0; i < csn; i++) {
659 		struct cpuset *a = csa[i];
660 		int apn = a->pn;
661 
662 		for (j = 0; j < csn; j++) {
663 			struct cpuset *b = csa[j];
664 			int bpn = b->pn;
665 
666 			if (apn != bpn && cpusets_overlap(a, b)) {
667 				for (k = 0; k < csn; k++) {
668 					struct cpuset *c = csa[k];
669 
670 					if (c->pn == bpn)
671 						c->pn = apn;
672 				}
673 				ndoms--;	/* one less element */
674 				goto restart;
675 			}
676 		}
677 	}
678 
679 	/*
680 	 * Now we know how many domains to create.
681 	 * Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
682 	 */
683 	doms = alloc_sched_domains(ndoms);
684 	if (!doms)
685 		goto done;
686 
687 	/*
688 	 * The rest of the code, including the scheduler, can deal with
689 	 * dattr==NULL case. No need to abort if alloc fails.
690 	 */
691 	dattr = kmalloc(ndoms * sizeof(struct sched_domain_attr), GFP_KERNEL);
692 
693 	for (nslot = 0, i = 0; i < csn; i++) {
694 		struct cpuset *a = csa[i];
695 		struct cpumask *dp;
696 		int apn = a->pn;
697 
698 		if (apn < 0) {
699 			/* Skip completed partitions */
700 			continue;
701 		}
702 
703 		dp = doms[nslot];
704 
705 		if (nslot == ndoms) {
706 			static int warnings = 10;
707 			if (warnings) {
708 				printk(KERN_WARNING
709 				 "rebuild_sched_domains confused:"
710 				  " nslot %d, ndoms %d, csn %d, i %d,"
711 				  " apn %d\n",
712 				  nslot, ndoms, csn, i, apn);
713 				warnings--;
714 			}
715 			continue;
716 		}
717 
718 		cpumask_clear(dp);
719 		if (dattr)
720 			*(dattr + nslot) = SD_ATTR_INIT;
721 		for (j = i; j < csn; j++) {
722 			struct cpuset *b = csa[j];
723 
724 			if (apn == b->pn) {
725 				cpumask_or(dp, dp, b->cpus_allowed);
726 				if (dattr)
727 					update_domain_attr_tree(dattr + nslot, b);
728 
729 				/* Done with this partition */
730 				b->pn = -1;
731 			}
732 		}
733 		nslot++;
734 	}
735 	BUG_ON(nslot != ndoms);
736 
737 done:
738 	kfree(csa);
739 
740 	/*
741 	 * Fallback to the default domain if kmalloc() failed.
742 	 * See comments in partition_sched_domains().
743 	 */
744 	if (doms == NULL)
745 		ndoms = 1;
746 
747 	*domains    = doms;
748 	*attributes = dattr;
749 	return ndoms;
750 }
751 
752 /*
753  * Rebuild scheduler domains.
754  *
755  * If the flag 'sched_load_balance' of any cpuset with non-empty
756  * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
757  * which has that flag enabled, or if any cpuset with a non-empty
758  * 'cpus' is removed, then call this routine to rebuild the
759  * scheduler's dynamic sched domains.
760  *
761  * Call with cpuset_mutex held.  Takes get_online_cpus().
762  */
rebuild_sched_domains_locked(void)763 static void rebuild_sched_domains_locked(void)
764 {
765 	struct sched_domain_attr *attr;
766 	cpumask_var_t *doms;
767 	int ndoms;
768 
769 	lockdep_assert_held(&cpuset_mutex);
770 	get_online_cpus();
771 
772 	/*
773 	 * We have raced with CPU hotplug. Don't do anything to avoid
774 	 * passing doms with offlined cpu to partition_sched_domains().
775 	 * Anyways, hotplug work item will rebuild sched domains.
776 	 */
777 	if (!cpumask_equal(top_cpuset.cpus_allowed, cpu_active_mask))
778 		goto out;
779 
780 	/* Generate domain masks and attrs */
781 	ndoms = generate_sched_domains(&doms, &attr);
782 
783 	/* Have scheduler rebuild the domains */
784 	partition_sched_domains(ndoms, doms, attr);
785 out:
786 	put_online_cpus();
787 }
788 #else /* !CONFIG_SMP */
rebuild_sched_domains_locked(void)789 static void rebuild_sched_domains_locked(void)
790 {
791 }
792 #endif /* CONFIG_SMP */
793 
rebuild_sched_domains(void)794 void rebuild_sched_domains(void)
795 {
796 	mutex_lock(&cpuset_mutex);
797 	rebuild_sched_domains_locked();
798 	mutex_unlock(&cpuset_mutex);
799 }
800 
801 /**
802  * cpuset_test_cpumask - test a task's cpus_allowed versus its cpuset's
803  * @tsk: task to test
804  * @scan: struct cgroup_scanner contained in its struct cpuset_hotplug_scanner
805  *
806  * Call with cpuset_mutex held.  May take callback_mutex during call.
807  * Called for each task in a cgroup by cgroup_scan_tasks().
808  * Return nonzero if this tasks's cpus_allowed mask should be changed (in other
809  * words, if its mask is not equal to its cpuset's mask).
810  */
cpuset_test_cpumask(struct task_struct * tsk,struct cgroup_scanner * scan)811 static int cpuset_test_cpumask(struct task_struct *tsk,
812 			       struct cgroup_scanner *scan)
813 {
814 	return !cpumask_equal(&tsk->cpus_allowed,
815 			(cgroup_cs(scan->cg))->cpus_allowed);
816 }
817 
818 /**
819  * cpuset_change_cpumask - make a task's cpus_allowed the same as its cpuset's
820  * @tsk: task to test
821  * @scan: struct cgroup_scanner containing the cgroup of the task
822  *
823  * Called by cgroup_scan_tasks() for each task in a cgroup whose
824  * cpus_allowed mask needs to be changed.
825  *
826  * We don't need to re-check for the cgroup/cpuset membership, since we're
827  * holding cpuset_mutex at this point.
828  */
cpuset_change_cpumask(struct task_struct * tsk,struct cgroup_scanner * scan)829 static void cpuset_change_cpumask(struct task_struct *tsk,
830 				  struct cgroup_scanner *scan)
831 {
832 	set_cpus_allowed_ptr(tsk, ((cgroup_cs(scan->cg))->cpus_allowed));
833 }
834 
835 /**
836  * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
837  * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
838  * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
839  *
840  * Called with cpuset_mutex held
841  *
842  * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
843  * calling callback functions for each.
844  *
845  * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
846  * if @heap != NULL.
847  */
update_tasks_cpumask(struct cpuset * cs,struct ptr_heap * heap)848 static void update_tasks_cpumask(struct cpuset *cs, struct ptr_heap *heap)
849 {
850 	struct cgroup_scanner scan;
851 
852 	scan.cg = cs->css.cgroup;
853 	scan.test_task = cpuset_test_cpumask;
854 	scan.process_task = cpuset_change_cpumask;
855 	scan.heap = heap;
856 	cgroup_scan_tasks(&scan);
857 }
858 
859 /**
860  * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
861  * @cs: the cpuset to consider
862  * @buf: buffer of cpu numbers written to this cpuset
863  */
update_cpumask(struct cpuset * cs,struct cpuset * trialcs,const char * buf)864 static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
865 			  const char *buf)
866 {
867 	struct ptr_heap heap;
868 	int retval;
869 	int is_load_balanced;
870 
871 	/* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */
872 	if (cs == &top_cpuset)
873 		return -EACCES;
874 
875 	/*
876 	 * An empty cpus_allowed is ok only if the cpuset has no tasks.
877 	 * Since cpulist_parse() fails on an empty mask, we special case
878 	 * that parsing.  The validate_change() call ensures that cpusets
879 	 * with tasks have cpus.
880 	 */
881 	if (!*buf) {
882 		cpumask_clear(trialcs->cpus_allowed);
883 	} else {
884 		retval = cpulist_parse(buf, trialcs->cpus_allowed);
885 		if (retval < 0)
886 			return retval;
887 
888 		if (!cpumask_subset(trialcs->cpus_allowed, cpu_active_mask))
889 			return -EINVAL;
890 	}
891 	retval = validate_change(cs, trialcs);
892 	if (retval < 0)
893 		return retval;
894 
895 	/* Nothing to do if the cpus didn't change */
896 	if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
897 		return 0;
898 
899 	retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
900 	if (retval)
901 		return retval;
902 
903 	is_load_balanced = is_sched_load_balance(trialcs);
904 
905 	mutex_lock(&callback_mutex);
906 	cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
907 	mutex_unlock(&callback_mutex);
908 
909 	/*
910 	 * Scan tasks in the cpuset, and update the cpumasks of any
911 	 * that need an update.
912 	 */
913 	update_tasks_cpumask(cs, &heap);
914 
915 	heap_free(&heap);
916 
917 	if (is_load_balanced)
918 		rebuild_sched_domains_locked();
919 	return 0;
920 }
921 
922 /*
923  * cpuset_migrate_mm
924  *
925  *    Migrate memory region from one set of nodes to another.
926  *
927  *    Temporarilly set tasks mems_allowed to target nodes of migration,
928  *    so that the migration code can allocate pages on these nodes.
929  *
930  *    Call holding cpuset_mutex, so current's cpuset won't change
931  *    during this call, as manage_mutex holds off any cpuset_attach()
932  *    calls.  Therefore we don't need to take task_lock around the
933  *    call to guarantee_online_mems(), as we know no one is changing
934  *    our task's cpuset.
935  *
936  *    While the mm_struct we are migrating is typically from some
937  *    other task, the task_struct mems_allowed that we are hacking
938  *    is for our current task, which must allocate new pages for that
939  *    migrating memory region.
940  */
941 
cpuset_migrate_mm(struct mm_struct * mm,const nodemask_t * from,const nodemask_t * to)942 static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
943 							const nodemask_t *to)
944 {
945 	struct task_struct *tsk = current;
946 
947 	tsk->mems_allowed = *to;
948 
949 	do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
950 
951 	guarantee_online_mems(task_cs(tsk),&tsk->mems_allowed);
952 }
953 
954 /*
955  * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
956  * @tsk: the task to change
957  * @newmems: new nodes that the task will be set
958  *
959  * In order to avoid seeing no nodes if the old and new nodes are disjoint,
960  * we structure updates as setting all new allowed nodes, then clearing newly
961  * disallowed ones.
962  */
cpuset_change_task_nodemask(struct task_struct * tsk,nodemask_t * newmems)963 static void cpuset_change_task_nodemask(struct task_struct *tsk,
964 					nodemask_t *newmems)
965 {
966 	bool need_loop;
967 
968 	/*
969 	 * Allow tasks that have access to memory reserves because they have
970 	 * been OOM killed to get memory anywhere.
971 	 */
972 	if (unlikely(test_thread_flag(TIF_MEMDIE)))
973 		return;
974 	if (current->flags & PF_EXITING) /* Let dying task have memory */
975 		return;
976 
977 	task_lock(tsk);
978 	/*
979 	 * Determine if a loop is necessary if another thread is doing
980 	 * get_mems_allowed().  If at least one node remains unchanged and
981 	 * tsk does not have a mempolicy, then an empty nodemask will not be
982 	 * possible when mems_allowed is larger than a word.
983 	 */
984 	need_loop = task_has_mempolicy(tsk) ||
985 			!nodes_intersects(*newmems, tsk->mems_allowed);
986 
987 	if (need_loop)
988 		write_seqcount_begin(&tsk->mems_allowed_seq);
989 
990 	nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
991 	mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
992 
993 	mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP2);
994 	tsk->mems_allowed = *newmems;
995 
996 	if (need_loop)
997 		write_seqcount_end(&tsk->mems_allowed_seq);
998 
999 	task_unlock(tsk);
1000 }
1001 
1002 /*
1003  * Update task's mems_allowed and rebind its mempolicy and vmas' mempolicy
1004  * of it to cpuset's new mems_allowed, and migrate pages to new nodes if
1005  * memory_migrate flag is set. Called with cpuset_mutex held.
1006  */
cpuset_change_nodemask(struct task_struct * p,struct cgroup_scanner * scan)1007 static void cpuset_change_nodemask(struct task_struct *p,
1008 				   struct cgroup_scanner *scan)
1009 {
1010 	struct mm_struct *mm;
1011 	struct cpuset *cs;
1012 	int migrate;
1013 	const nodemask_t *oldmem = scan->data;
1014 	static nodemask_t newmems;	/* protected by cpuset_mutex */
1015 
1016 	cs = cgroup_cs(scan->cg);
1017 	guarantee_online_mems(cs, &newmems);
1018 
1019 	cpuset_change_task_nodemask(p, &newmems);
1020 
1021 	mm = get_task_mm(p);
1022 	if (!mm)
1023 		return;
1024 
1025 	migrate = is_memory_migrate(cs);
1026 
1027 	mpol_rebind_mm(mm, &cs->mems_allowed);
1028 	if (migrate)
1029 		cpuset_migrate_mm(mm, oldmem, &cs->mems_allowed);
1030 	mmput(mm);
1031 }
1032 
1033 static void *cpuset_being_rebound;
1034 
1035 /**
1036  * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
1037  * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
1038  * @oldmem: old mems_allowed of cpuset cs
1039  * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
1040  *
1041  * Called with cpuset_mutex held
1042  * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
1043  * if @heap != NULL.
1044  */
update_tasks_nodemask(struct cpuset * cs,const nodemask_t * oldmem,struct ptr_heap * heap)1045 static void update_tasks_nodemask(struct cpuset *cs, const nodemask_t *oldmem,
1046 				 struct ptr_heap *heap)
1047 {
1048 	struct cgroup_scanner scan;
1049 
1050 	cpuset_being_rebound = cs;		/* causes mpol_dup() rebind */
1051 
1052 	scan.cg = cs->css.cgroup;
1053 	scan.test_task = NULL;
1054 	scan.process_task = cpuset_change_nodemask;
1055 	scan.heap = heap;
1056 	scan.data = (nodemask_t *)oldmem;
1057 
1058 	/*
1059 	 * The mpol_rebind_mm() call takes mmap_sem, which we couldn't
1060 	 * take while holding tasklist_lock.  Forks can happen - the
1061 	 * mpol_dup() cpuset_being_rebound check will catch such forks,
1062 	 * and rebind their vma mempolicies too.  Because we still hold
1063 	 * the global cpuset_mutex, we know that no other rebind effort
1064 	 * will be contending for the global variable cpuset_being_rebound.
1065 	 * It's ok if we rebind the same mm twice; mpol_rebind_mm()
1066 	 * is idempotent.  Also migrate pages in each mm to new nodes.
1067 	 */
1068 	cgroup_scan_tasks(&scan);
1069 
1070 	/* We're done rebinding vmas to this cpuset's new mems_allowed. */
1071 	cpuset_being_rebound = NULL;
1072 }
1073 
1074 /*
1075  * Handle user request to change the 'mems' memory placement
1076  * of a cpuset.  Needs to validate the request, update the
1077  * cpusets mems_allowed, and for each task in the cpuset,
1078  * update mems_allowed and rebind task's mempolicy and any vma
1079  * mempolicies and if the cpuset is marked 'memory_migrate',
1080  * migrate the tasks pages to the new memory.
1081  *
1082  * Call with cpuset_mutex held.  May take callback_mutex during call.
1083  * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
1084  * lock each such tasks mm->mmap_sem, scan its vma's and rebind
1085  * their mempolicies to the cpusets new mems_allowed.
1086  */
update_nodemask(struct cpuset * cs,struct cpuset * trialcs,const char * buf)1087 static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
1088 			   const char *buf)
1089 {
1090 	NODEMASK_ALLOC(nodemask_t, oldmem, GFP_KERNEL);
1091 	int retval;
1092 	struct ptr_heap heap;
1093 
1094 	if (!oldmem)
1095 		return -ENOMEM;
1096 
1097 	/*
1098 	 * top_cpuset.mems_allowed tracks node_stats[N_MEMORY];
1099 	 * it's read-only
1100 	 */
1101 	if (cs == &top_cpuset) {
1102 		retval = -EACCES;
1103 		goto done;
1104 	}
1105 
1106 	/*
1107 	 * An empty mems_allowed is ok iff there are no tasks in the cpuset.
1108 	 * Since nodelist_parse() fails on an empty mask, we special case
1109 	 * that parsing.  The validate_change() call ensures that cpusets
1110 	 * with tasks have memory.
1111 	 */
1112 	if (!*buf) {
1113 		nodes_clear(trialcs->mems_allowed);
1114 	} else {
1115 		retval = nodelist_parse(buf, trialcs->mems_allowed);
1116 		if (retval < 0)
1117 			goto done;
1118 
1119 		if (!nodes_subset(trialcs->mems_allowed,
1120 				node_states[N_MEMORY])) {
1121 			retval =  -EINVAL;
1122 			goto done;
1123 		}
1124 	}
1125 	*oldmem = cs->mems_allowed;
1126 	if (nodes_equal(*oldmem, trialcs->mems_allowed)) {
1127 		retval = 0;		/* Too easy - nothing to do */
1128 		goto done;
1129 	}
1130 	retval = validate_change(cs, trialcs);
1131 	if (retval < 0)
1132 		goto done;
1133 
1134 	retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
1135 	if (retval < 0)
1136 		goto done;
1137 
1138 	mutex_lock(&callback_mutex);
1139 	cs->mems_allowed = trialcs->mems_allowed;
1140 	mutex_unlock(&callback_mutex);
1141 
1142 	update_tasks_nodemask(cs, oldmem, &heap);
1143 
1144 	heap_free(&heap);
1145 done:
1146 	NODEMASK_FREE(oldmem);
1147 	return retval;
1148 }
1149 
current_cpuset_is_being_rebound(void)1150 int current_cpuset_is_being_rebound(void)
1151 {
1152 	return task_cs(current) == cpuset_being_rebound;
1153 }
1154 
update_relax_domain_level(struct cpuset * cs,s64 val)1155 static int update_relax_domain_level(struct cpuset *cs, s64 val)
1156 {
1157 #ifdef CONFIG_SMP
1158 	if (val < -1 || val >= sched_domain_level_max)
1159 		return -EINVAL;
1160 #endif
1161 
1162 	if (val != cs->relax_domain_level) {
1163 		cs->relax_domain_level = val;
1164 		if (!cpumask_empty(cs->cpus_allowed) &&
1165 		    is_sched_load_balance(cs))
1166 			rebuild_sched_domains_locked();
1167 	}
1168 
1169 	return 0;
1170 }
1171 
1172 /*
1173  * cpuset_change_flag - make a task's spread flags the same as its cpuset's
1174  * @tsk: task to be updated
1175  * @scan: struct cgroup_scanner containing the cgroup of the task
1176  *
1177  * Called by cgroup_scan_tasks() for each task in a cgroup.
1178  *
1179  * We don't need to re-check for the cgroup/cpuset membership, since we're
1180  * holding cpuset_mutex at this point.
1181  */
cpuset_change_flag(struct task_struct * tsk,struct cgroup_scanner * scan)1182 static void cpuset_change_flag(struct task_struct *tsk,
1183 				struct cgroup_scanner *scan)
1184 {
1185 	cpuset_update_task_spread_flag(cgroup_cs(scan->cg), tsk);
1186 }
1187 
1188 /*
1189  * update_tasks_flags - update the spread flags of tasks in the cpuset.
1190  * @cs: the cpuset in which each task's spread flags needs to be changed
1191  * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
1192  *
1193  * Called with cpuset_mutex held
1194  *
1195  * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
1196  * calling callback functions for each.
1197  *
1198  * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
1199  * if @heap != NULL.
1200  */
update_tasks_flags(struct cpuset * cs,struct ptr_heap * heap)1201 static void update_tasks_flags(struct cpuset *cs, struct ptr_heap *heap)
1202 {
1203 	struct cgroup_scanner scan;
1204 
1205 	scan.cg = cs->css.cgroup;
1206 	scan.test_task = NULL;
1207 	scan.process_task = cpuset_change_flag;
1208 	scan.heap = heap;
1209 	cgroup_scan_tasks(&scan);
1210 }
1211 
1212 /*
1213  * update_flag - read a 0 or a 1 in a file and update associated flag
1214  * bit:		the bit to update (see cpuset_flagbits_t)
1215  * cs:		the cpuset to update
1216  * turning_on: 	whether the flag is being set or cleared
1217  *
1218  * Call with cpuset_mutex held.
1219  */
1220 
update_flag(cpuset_flagbits_t bit,struct cpuset * cs,int turning_on)1221 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
1222 		       int turning_on)
1223 {
1224 	struct cpuset *trialcs;
1225 	int balance_flag_changed;
1226 	int spread_flag_changed;
1227 	struct ptr_heap heap;
1228 	int err;
1229 
1230 	trialcs = alloc_trial_cpuset(cs);
1231 	if (!trialcs)
1232 		return -ENOMEM;
1233 
1234 	if (turning_on)
1235 		set_bit(bit, &trialcs->flags);
1236 	else
1237 		clear_bit(bit, &trialcs->flags);
1238 
1239 	err = validate_change(cs, trialcs);
1240 	if (err < 0)
1241 		goto out;
1242 
1243 	err = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
1244 	if (err < 0)
1245 		goto out;
1246 
1247 	balance_flag_changed = (is_sched_load_balance(cs) !=
1248 				is_sched_load_balance(trialcs));
1249 
1250 	spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
1251 			|| (is_spread_page(cs) != is_spread_page(trialcs)));
1252 
1253 	mutex_lock(&callback_mutex);
1254 	cs->flags = trialcs->flags;
1255 	mutex_unlock(&callback_mutex);
1256 
1257 	if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
1258 		rebuild_sched_domains_locked();
1259 
1260 	if (spread_flag_changed)
1261 		update_tasks_flags(cs, &heap);
1262 	heap_free(&heap);
1263 out:
1264 	free_trial_cpuset(trialcs);
1265 	return err;
1266 }
1267 
1268 /*
1269  * Frequency meter - How fast is some event occurring?
1270  *
1271  * These routines manage a digitally filtered, constant time based,
1272  * event frequency meter.  There are four routines:
1273  *   fmeter_init() - initialize a frequency meter.
1274  *   fmeter_markevent() - called each time the event happens.
1275  *   fmeter_getrate() - returns the recent rate of such events.
1276  *   fmeter_update() - internal routine used to update fmeter.
1277  *
1278  * A common data structure is passed to each of these routines,
1279  * which is used to keep track of the state required to manage the
1280  * frequency meter and its digital filter.
1281  *
1282  * The filter works on the number of events marked per unit time.
1283  * The filter is single-pole low-pass recursive (IIR).  The time unit
1284  * is 1 second.  Arithmetic is done using 32-bit integers scaled to
1285  * simulate 3 decimal digits of precision (multiplied by 1000).
1286  *
1287  * With an FM_COEF of 933, and a time base of 1 second, the filter
1288  * has a half-life of 10 seconds, meaning that if the events quit
1289  * happening, then the rate returned from the fmeter_getrate()
1290  * will be cut in half each 10 seconds, until it converges to zero.
1291  *
1292  * It is not worth doing a real infinitely recursive filter.  If more
1293  * than FM_MAXTICKS ticks have elapsed since the last filter event,
1294  * just compute FM_MAXTICKS ticks worth, by which point the level
1295  * will be stable.
1296  *
1297  * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid
1298  * arithmetic overflow in the fmeter_update() routine.
1299  *
1300  * Given the simple 32 bit integer arithmetic used, this meter works
1301  * best for reporting rates between one per millisecond (msec) and
1302  * one per 32 (approx) seconds.  At constant rates faster than one
1303  * per msec it maxes out at values just under 1,000,000.  At constant
1304  * rates between one per msec, and one per second it will stabilize
1305  * to a value N*1000, where N is the rate of events per second.
1306  * At constant rates between one per second and one per 32 seconds,
1307  * it will be choppy, moving up on the seconds that have an event,
1308  * and then decaying until the next event.  At rates slower than
1309  * about one in 32 seconds, it decays all the way back to zero between
1310  * each event.
1311  */
1312 
1313 #define FM_COEF 933		/* coefficient for half-life of 10 secs */
1314 #define FM_MAXTICKS ((time_t)99) /* useless computing more ticks than this */
1315 #define FM_MAXCNT 1000000	/* limit cnt to avoid overflow */
1316 #define FM_SCALE 1000		/* faux fixed point scale */
1317 
1318 /* Initialize a frequency meter */
fmeter_init(struct fmeter * fmp)1319 static void fmeter_init(struct fmeter *fmp)
1320 {
1321 	fmp->cnt = 0;
1322 	fmp->val = 0;
1323 	fmp->time = 0;
1324 	spin_lock_init(&fmp->lock);
1325 }
1326 
1327 /* Internal meter update - process cnt events and update value */
fmeter_update(struct fmeter * fmp)1328 static void fmeter_update(struct fmeter *fmp)
1329 {
1330 	time_t now = get_seconds();
1331 	time_t ticks = now - fmp->time;
1332 
1333 	if (ticks == 0)
1334 		return;
1335 
1336 	ticks = min(FM_MAXTICKS, ticks);
1337 	while (ticks-- > 0)
1338 		fmp->val = (FM_COEF * fmp->val) / FM_SCALE;
1339 	fmp->time = now;
1340 
1341 	fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE;
1342 	fmp->cnt = 0;
1343 }
1344 
1345 /* Process any previous ticks, then bump cnt by one (times scale). */
fmeter_markevent(struct fmeter * fmp)1346 static void fmeter_markevent(struct fmeter *fmp)
1347 {
1348 	spin_lock(&fmp->lock);
1349 	fmeter_update(fmp);
1350 	fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE);
1351 	spin_unlock(&fmp->lock);
1352 }
1353 
1354 /* Process any previous ticks, then return current value. */
fmeter_getrate(struct fmeter * fmp)1355 static int fmeter_getrate(struct fmeter *fmp)
1356 {
1357 	int val;
1358 
1359 	spin_lock(&fmp->lock);
1360 	fmeter_update(fmp);
1361 	val = fmp->val;
1362 	spin_unlock(&fmp->lock);
1363 	return val;
1364 }
1365 
1366 /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
cpuset_can_attach(struct cgroup * cgrp,struct cgroup_taskset * tset)1367 static int cpuset_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
1368 {
1369 	struct cpuset *cs = cgroup_cs(cgrp);
1370 	struct task_struct *task;
1371 	int ret;
1372 
1373 	mutex_lock(&cpuset_mutex);
1374 
1375 	ret = -ENOSPC;
1376 	if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
1377 		goto out_unlock;
1378 
1379 	cgroup_taskset_for_each(task, cgrp, tset) {
1380 		/*
1381 		 * Kthreads which disallow setaffinity shouldn't be moved
1382 		 * to a new cpuset; we don't want to change their cpu
1383 		 * affinity and isolating such threads by their set of
1384 		 * allowed nodes is unnecessary.  Thus, cpusets are not
1385 		 * applicable for such threads.  This prevents checking for
1386 		 * success of set_cpus_allowed_ptr() on all attached tasks
1387 		 * before cpus_allowed may be changed.
1388 		 */
1389 		ret = -EINVAL;
1390 		if (task->flags & PF_NO_SETAFFINITY)
1391 			goto out_unlock;
1392 		ret = security_task_setscheduler(task);
1393 		if (ret)
1394 			goto out_unlock;
1395 	}
1396 
1397 	/*
1398 	 * Mark attach is in progress.  This makes validate_change() fail
1399 	 * changes which zero cpus/mems_allowed.
1400 	 */
1401 	cs->attach_in_progress++;
1402 	ret = 0;
1403 out_unlock:
1404 	mutex_unlock(&cpuset_mutex);
1405 	return ret;
1406 }
1407 
cpuset_cancel_attach(struct cgroup * cgrp,struct cgroup_taskset * tset)1408 static void cpuset_cancel_attach(struct cgroup *cgrp,
1409 				 struct cgroup_taskset *tset)
1410 {
1411 	mutex_lock(&cpuset_mutex);
1412 	cgroup_cs(cgrp)->attach_in_progress--;
1413 	mutex_unlock(&cpuset_mutex);
1414 }
1415 
1416 /*
1417  * Protected by cpuset_mutex.  cpus_attach is used only by cpuset_attach()
1418  * but we can't allocate it dynamically there.  Define it global and
1419  * allocate from cpuset_init().
1420  */
1421 static cpumask_var_t cpus_attach;
1422 
cpuset_attach(struct cgroup * cgrp,struct cgroup_taskset * tset)1423 static void cpuset_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
1424 {
1425 	/* static bufs protected by cpuset_mutex */
1426 	static nodemask_t cpuset_attach_nodemask_from;
1427 	static nodemask_t cpuset_attach_nodemask_to;
1428 	struct mm_struct *mm;
1429 	struct task_struct *task;
1430 	struct task_struct *leader = cgroup_taskset_first(tset);
1431 	struct cgroup *oldcgrp = cgroup_taskset_cur_cgroup(tset);
1432 	struct cpuset *cs = cgroup_cs(cgrp);
1433 	struct cpuset *oldcs = cgroup_cs(oldcgrp);
1434 
1435 	mutex_lock(&cpuset_mutex);
1436 
1437 	/* prepare for attach */
1438 	if (cs == &top_cpuset)
1439 		cpumask_copy(cpus_attach, cpu_possible_mask);
1440 	else
1441 		guarantee_online_cpus(cs, cpus_attach);
1442 
1443 	guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
1444 
1445 	cgroup_taskset_for_each(task, cgrp, tset) {
1446 		/*
1447 		 * can_attach beforehand should guarantee that this doesn't
1448 		 * fail.  TODO: have a better way to handle failure here
1449 		 */
1450 		WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
1451 
1452 		cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
1453 		cpuset_update_task_spread_flag(cs, task);
1454 	}
1455 
1456 	/*
1457 	 * Change mm, possibly for multiple threads in a threadgroup. This is
1458 	 * expensive and may sleep.
1459 	 */
1460 	cpuset_attach_nodemask_from = oldcs->mems_allowed;
1461 	cpuset_attach_nodemask_to = cs->mems_allowed;
1462 	mm = get_task_mm(leader);
1463 	if (mm) {
1464 		mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
1465 		if (is_memory_migrate(cs))
1466 			cpuset_migrate_mm(mm, &cpuset_attach_nodemask_from,
1467 					  &cpuset_attach_nodemask_to);
1468 		mmput(mm);
1469 	}
1470 
1471 	cs->attach_in_progress--;
1472 
1473 	/*
1474 	 * We may have raced with CPU/memory hotunplug.  Trigger hotplug
1475 	 * propagation if @cs doesn't have any CPU or memory.  It will move
1476 	 * the newly added tasks to the nearest parent which can execute.
1477 	 */
1478 	if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
1479 		schedule_cpuset_propagate_hotplug(cs);
1480 
1481 	mutex_unlock(&cpuset_mutex);
1482 }
1483 
1484 /* The various types of files and directories in a cpuset file system */
1485 
1486 typedef enum {
1487 	FILE_MEMORY_MIGRATE,
1488 	FILE_CPULIST,
1489 	FILE_MEMLIST,
1490 	FILE_CPU_EXCLUSIVE,
1491 	FILE_MEM_EXCLUSIVE,
1492 	FILE_MEM_HARDWALL,
1493 	FILE_SCHED_LOAD_BALANCE,
1494 	FILE_SCHED_RELAX_DOMAIN_LEVEL,
1495 	FILE_MEMORY_PRESSURE_ENABLED,
1496 	FILE_MEMORY_PRESSURE,
1497 	FILE_SPREAD_PAGE,
1498 	FILE_SPREAD_SLAB,
1499 } cpuset_filetype_t;
1500 
cpuset_write_u64(struct cgroup * cgrp,struct cftype * cft,u64 val)1501 static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
1502 {
1503 	struct cpuset *cs = cgroup_cs(cgrp);
1504 	cpuset_filetype_t type = cft->private;
1505 	int retval = -ENODEV;
1506 
1507 	mutex_lock(&cpuset_mutex);
1508 	if (!is_cpuset_online(cs))
1509 		goto out_unlock;
1510 
1511 	switch (type) {
1512 	case FILE_CPU_EXCLUSIVE:
1513 		retval = update_flag(CS_CPU_EXCLUSIVE, cs, val);
1514 		break;
1515 	case FILE_MEM_EXCLUSIVE:
1516 		retval = update_flag(CS_MEM_EXCLUSIVE, cs, val);
1517 		break;
1518 	case FILE_MEM_HARDWALL:
1519 		retval = update_flag(CS_MEM_HARDWALL, cs, val);
1520 		break;
1521 	case FILE_SCHED_LOAD_BALANCE:
1522 		retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val);
1523 		break;
1524 	case FILE_MEMORY_MIGRATE:
1525 		retval = update_flag(CS_MEMORY_MIGRATE, cs, val);
1526 		break;
1527 	case FILE_MEMORY_PRESSURE_ENABLED:
1528 		cpuset_memory_pressure_enabled = !!val;
1529 		break;
1530 	case FILE_MEMORY_PRESSURE:
1531 		retval = -EACCES;
1532 		break;
1533 	case FILE_SPREAD_PAGE:
1534 		retval = update_flag(CS_SPREAD_PAGE, cs, val);
1535 		break;
1536 	case FILE_SPREAD_SLAB:
1537 		retval = update_flag(CS_SPREAD_SLAB, cs, val);
1538 		break;
1539 	default:
1540 		retval = -EINVAL;
1541 		break;
1542 	}
1543 out_unlock:
1544 	mutex_unlock(&cpuset_mutex);
1545 	return retval;
1546 }
1547 
cpuset_write_s64(struct cgroup * cgrp,struct cftype * cft,s64 val)1548 static int cpuset_write_s64(struct cgroup *cgrp, struct cftype *cft, s64 val)
1549 {
1550 	struct cpuset *cs = cgroup_cs(cgrp);
1551 	cpuset_filetype_t type = cft->private;
1552 	int retval = -ENODEV;
1553 
1554 	mutex_lock(&cpuset_mutex);
1555 	if (!is_cpuset_online(cs))
1556 		goto out_unlock;
1557 
1558 	switch (type) {
1559 	case FILE_SCHED_RELAX_DOMAIN_LEVEL:
1560 		retval = update_relax_domain_level(cs, val);
1561 		break;
1562 	default:
1563 		retval = -EINVAL;
1564 		break;
1565 	}
1566 out_unlock:
1567 	mutex_unlock(&cpuset_mutex);
1568 	return retval;
1569 }
1570 
1571 /*
1572  * Common handling for a write to a "cpus" or "mems" file.
1573  */
cpuset_write_resmask(struct cgroup * cgrp,struct cftype * cft,const char * buf)1574 static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft,
1575 				const char *buf)
1576 {
1577 	struct cpuset *cs = cgroup_cs(cgrp);
1578 	struct cpuset *trialcs;
1579 	int retval = -ENODEV;
1580 
1581 	/*
1582 	 * CPU or memory hotunplug may leave @cs w/o any execution
1583 	 * resources, in which case the hotplug code asynchronously updates
1584 	 * configuration and transfers all tasks to the nearest ancestor
1585 	 * which can execute.
1586 	 *
1587 	 * As writes to "cpus" or "mems" may restore @cs's execution
1588 	 * resources, wait for the previously scheduled operations before
1589 	 * proceeding, so that we don't end up keep removing tasks added
1590 	 * after execution capability is restored.
1591 	 *
1592 	 * Flushing cpuset_hotplug_work is enough to synchronize against
1593 	 * hotplug hanlding; however, cpuset_attach() may schedule
1594 	 * propagation work directly.  Flush the workqueue too.
1595 	 */
1596 	flush_work(&cpuset_hotplug_work);
1597 	flush_workqueue(cpuset_propagate_hotplug_wq);
1598 
1599 	mutex_lock(&cpuset_mutex);
1600 	if (!is_cpuset_online(cs))
1601 		goto out_unlock;
1602 
1603 	trialcs = alloc_trial_cpuset(cs);
1604 	if (!trialcs) {
1605 		retval = -ENOMEM;
1606 		goto out_unlock;
1607 	}
1608 
1609 	switch (cft->private) {
1610 	case FILE_CPULIST:
1611 		retval = update_cpumask(cs, trialcs, buf);
1612 		break;
1613 	case FILE_MEMLIST:
1614 		retval = update_nodemask(cs, trialcs, buf);
1615 		break;
1616 	default:
1617 		retval = -EINVAL;
1618 		break;
1619 	}
1620 
1621 	free_trial_cpuset(trialcs);
1622 out_unlock:
1623 	mutex_unlock(&cpuset_mutex);
1624 	return retval;
1625 }
1626 
1627 /*
1628  * These ascii lists should be read in a single call, by using a user
1629  * buffer large enough to hold the entire map.  If read in smaller
1630  * chunks, there is no guarantee of atomicity.  Since the display format
1631  * used, list of ranges of sequential numbers, is variable length,
1632  * and since these maps can change value dynamically, one could read
1633  * gibberish by doing partial reads while a list was changing.
1634  * A single large read to a buffer that crosses a page boundary is
1635  * ok, because the result being copied to user land is not recomputed
1636  * across a page fault.
1637  */
1638 
cpuset_sprintf_cpulist(char * page,struct cpuset * cs)1639 static size_t cpuset_sprintf_cpulist(char *page, struct cpuset *cs)
1640 {
1641 	size_t count;
1642 
1643 	mutex_lock(&callback_mutex);
1644 	count = cpulist_scnprintf(page, PAGE_SIZE, cs->cpus_allowed);
1645 	mutex_unlock(&callback_mutex);
1646 
1647 	return count;
1648 }
1649 
cpuset_sprintf_memlist(char * page,struct cpuset * cs)1650 static size_t cpuset_sprintf_memlist(char *page, struct cpuset *cs)
1651 {
1652 	size_t count;
1653 
1654 	mutex_lock(&callback_mutex);
1655 	count = nodelist_scnprintf(page, PAGE_SIZE, cs->mems_allowed);
1656 	mutex_unlock(&callback_mutex);
1657 
1658 	return count;
1659 }
1660 
cpuset_common_file_read(struct cgroup * cont,struct cftype * cft,struct file * file,char __user * buf,size_t nbytes,loff_t * ppos)1661 static ssize_t cpuset_common_file_read(struct cgroup *cont,
1662 				       struct cftype *cft,
1663 				       struct file *file,
1664 				       char __user *buf,
1665 				       size_t nbytes, loff_t *ppos)
1666 {
1667 	struct cpuset *cs = cgroup_cs(cont);
1668 	cpuset_filetype_t type = cft->private;
1669 	char *page;
1670 	ssize_t retval = 0;
1671 	char *s;
1672 
1673 	if (!(page = (char *)__get_free_page(GFP_TEMPORARY)))
1674 		return -ENOMEM;
1675 
1676 	s = page;
1677 
1678 	switch (type) {
1679 	case FILE_CPULIST:
1680 		s += cpuset_sprintf_cpulist(s, cs);
1681 		break;
1682 	case FILE_MEMLIST:
1683 		s += cpuset_sprintf_memlist(s, cs);
1684 		break;
1685 	default:
1686 		retval = -EINVAL;
1687 		goto out;
1688 	}
1689 	*s++ = '\n';
1690 
1691 	retval = simple_read_from_buffer(buf, nbytes, ppos, page, s - page);
1692 out:
1693 	free_page((unsigned long)page);
1694 	return retval;
1695 }
1696 
cpuset_read_u64(struct cgroup * cont,struct cftype * cft)1697 static u64 cpuset_read_u64(struct cgroup *cont, struct cftype *cft)
1698 {
1699 	struct cpuset *cs = cgroup_cs(cont);
1700 	cpuset_filetype_t type = cft->private;
1701 	switch (type) {
1702 	case FILE_CPU_EXCLUSIVE:
1703 		return is_cpu_exclusive(cs);
1704 	case FILE_MEM_EXCLUSIVE:
1705 		return is_mem_exclusive(cs);
1706 	case FILE_MEM_HARDWALL:
1707 		return is_mem_hardwall(cs);
1708 	case FILE_SCHED_LOAD_BALANCE:
1709 		return is_sched_load_balance(cs);
1710 	case FILE_MEMORY_MIGRATE:
1711 		return is_memory_migrate(cs);
1712 	case FILE_MEMORY_PRESSURE_ENABLED:
1713 		return cpuset_memory_pressure_enabled;
1714 	case FILE_MEMORY_PRESSURE:
1715 		return fmeter_getrate(&cs->fmeter);
1716 	case FILE_SPREAD_PAGE:
1717 		return is_spread_page(cs);
1718 	case FILE_SPREAD_SLAB:
1719 		return is_spread_slab(cs);
1720 	default:
1721 		BUG();
1722 	}
1723 
1724 	/* Unreachable but makes gcc happy */
1725 	return 0;
1726 }
1727 
cpuset_read_s64(struct cgroup * cont,struct cftype * cft)1728 static s64 cpuset_read_s64(struct cgroup *cont, struct cftype *cft)
1729 {
1730 	struct cpuset *cs = cgroup_cs(cont);
1731 	cpuset_filetype_t type = cft->private;
1732 	switch (type) {
1733 	case FILE_SCHED_RELAX_DOMAIN_LEVEL:
1734 		return cs->relax_domain_level;
1735 	default:
1736 		BUG();
1737 	}
1738 
1739 	/* Unrechable but makes gcc happy */
1740 	return 0;
1741 }
1742 
1743 
1744 /*
1745  * for the common functions, 'private' gives the type of file
1746  */
1747 
1748 static struct cftype files[] = {
1749 	{
1750 		.name = "cpus",
1751 		.read = cpuset_common_file_read,
1752 		.write_string = cpuset_write_resmask,
1753 		.max_write_len = (100U + 6 * NR_CPUS),
1754 		.private = FILE_CPULIST,
1755 	},
1756 
1757 	{
1758 		.name = "mems",
1759 		.read = cpuset_common_file_read,
1760 		.write_string = cpuset_write_resmask,
1761 		.max_write_len = (100U + 6 * MAX_NUMNODES),
1762 		.private = FILE_MEMLIST,
1763 	},
1764 
1765 	{
1766 		.name = "cpu_exclusive",
1767 		.read_u64 = cpuset_read_u64,
1768 		.write_u64 = cpuset_write_u64,
1769 		.private = FILE_CPU_EXCLUSIVE,
1770 	},
1771 
1772 	{
1773 		.name = "mem_exclusive",
1774 		.read_u64 = cpuset_read_u64,
1775 		.write_u64 = cpuset_write_u64,
1776 		.private = FILE_MEM_EXCLUSIVE,
1777 	},
1778 
1779 	{
1780 		.name = "mem_hardwall",
1781 		.read_u64 = cpuset_read_u64,
1782 		.write_u64 = cpuset_write_u64,
1783 		.private = FILE_MEM_HARDWALL,
1784 	},
1785 
1786 	{
1787 		.name = "sched_load_balance",
1788 		.read_u64 = cpuset_read_u64,
1789 		.write_u64 = cpuset_write_u64,
1790 		.private = FILE_SCHED_LOAD_BALANCE,
1791 	},
1792 
1793 	{
1794 		.name = "sched_relax_domain_level",
1795 		.read_s64 = cpuset_read_s64,
1796 		.write_s64 = cpuset_write_s64,
1797 		.private = FILE_SCHED_RELAX_DOMAIN_LEVEL,
1798 	},
1799 
1800 	{
1801 		.name = "memory_migrate",
1802 		.read_u64 = cpuset_read_u64,
1803 		.write_u64 = cpuset_write_u64,
1804 		.private = FILE_MEMORY_MIGRATE,
1805 	},
1806 
1807 	{
1808 		.name = "memory_pressure",
1809 		.read_u64 = cpuset_read_u64,
1810 		.write_u64 = cpuset_write_u64,
1811 		.private = FILE_MEMORY_PRESSURE,
1812 		.mode = S_IRUGO,
1813 	},
1814 
1815 	{
1816 		.name = "memory_spread_page",
1817 		.read_u64 = cpuset_read_u64,
1818 		.write_u64 = cpuset_write_u64,
1819 		.private = FILE_SPREAD_PAGE,
1820 	},
1821 
1822 	{
1823 		.name = "memory_spread_slab",
1824 		.read_u64 = cpuset_read_u64,
1825 		.write_u64 = cpuset_write_u64,
1826 		.private = FILE_SPREAD_SLAB,
1827 	},
1828 
1829 	{
1830 		.name = "memory_pressure_enabled",
1831 		.flags = CFTYPE_ONLY_ON_ROOT,
1832 		.read_u64 = cpuset_read_u64,
1833 		.write_u64 = cpuset_write_u64,
1834 		.private = FILE_MEMORY_PRESSURE_ENABLED,
1835 	},
1836 
1837 	{ }	/* terminate */
1838 };
1839 
1840 /*
1841  *	cpuset_css_alloc - allocate a cpuset css
1842  *	cont:	control group that the new cpuset will be part of
1843  */
1844 
cpuset_css_alloc(struct cgroup * cont)1845 static struct cgroup_subsys_state *cpuset_css_alloc(struct cgroup *cont)
1846 {
1847 	struct cpuset *cs;
1848 
1849 	if (!cont->parent)
1850 		return &top_cpuset.css;
1851 
1852 	cs = kzalloc(sizeof(*cs), GFP_KERNEL);
1853 	if (!cs)
1854 		return ERR_PTR(-ENOMEM);
1855 	if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL)) {
1856 		kfree(cs);
1857 		return ERR_PTR(-ENOMEM);
1858 	}
1859 
1860 	set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
1861 	cpumask_clear(cs->cpus_allowed);
1862 	nodes_clear(cs->mems_allowed);
1863 	fmeter_init(&cs->fmeter);
1864 	INIT_WORK(&cs->hotplug_work, cpuset_propagate_hotplug_workfn);
1865 	cs->relax_domain_level = -1;
1866 
1867 	return &cs->css;
1868 }
1869 
cpuset_css_online(struct cgroup * cgrp)1870 static int cpuset_css_online(struct cgroup *cgrp)
1871 {
1872 	struct cpuset *cs = cgroup_cs(cgrp);
1873 	struct cpuset *parent = parent_cs(cs);
1874 	struct cpuset *tmp_cs;
1875 	struct cgroup *pos_cg;
1876 
1877 	if (!parent)
1878 		return 0;
1879 
1880 	mutex_lock(&cpuset_mutex);
1881 
1882 	set_bit(CS_ONLINE, &cs->flags);
1883 	if (is_spread_page(parent))
1884 		set_bit(CS_SPREAD_PAGE, &cs->flags);
1885 	if (is_spread_slab(parent))
1886 		set_bit(CS_SPREAD_SLAB, &cs->flags);
1887 
1888 	number_of_cpusets++;
1889 
1890 	if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags))
1891 		goto out_unlock;
1892 
1893 	/*
1894 	 * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is
1895 	 * set.  This flag handling is implemented in cgroup core for
1896 	 * histrical reasons - the flag may be specified during mount.
1897 	 *
1898 	 * Currently, if any sibling cpusets have exclusive cpus or mem, we
1899 	 * refuse to clone the configuration - thereby refusing the task to
1900 	 * be entered, and as a result refusing the sys_unshare() or
1901 	 * clone() which initiated it.  If this becomes a problem for some
1902 	 * users who wish to allow that scenario, then this could be
1903 	 * changed to grant parent->cpus_allowed-sibling_cpus_exclusive
1904 	 * (and likewise for mems) to the new cgroup.
1905 	 */
1906 	rcu_read_lock();
1907 	cpuset_for_each_child(tmp_cs, pos_cg, parent) {
1908 		if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) {
1909 			rcu_read_unlock();
1910 			goto out_unlock;
1911 		}
1912 	}
1913 	rcu_read_unlock();
1914 
1915 	mutex_lock(&callback_mutex);
1916 	cs->mems_allowed = parent->mems_allowed;
1917 	cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
1918 	mutex_unlock(&callback_mutex);
1919 out_unlock:
1920 	mutex_unlock(&cpuset_mutex);
1921 	return 0;
1922 }
1923 
cpuset_css_offline(struct cgroup * cgrp)1924 static void cpuset_css_offline(struct cgroup *cgrp)
1925 {
1926 	struct cpuset *cs = cgroup_cs(cgrp);
1927 
1928 	mutex_lock(&cpuset_mutex);
1929 
1930 	if (is_sched_load_balance(cs))
1931 		update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
1932 
1933 	number_of_cpusets--;
1934 	clear_bit(CS_ONLINE, &cs->flags);
1935 
1936 	mutex_unlock(&cpuset_mutex);
1937 }
1938 
1939 /*
1940  * If the cpuset being removed has its flag 'sched_load_balance'
1941  * enabled, then simulate turning sched_load_balance off, which
1942  * will call rebuild_sched_domains_locked().
1943  */
1944 
cpuset_css_free(struct cgroup * cont)1945 static void cpuset_css_free(struct cgroup *cont)
1946 {
1947 	struct cpuset *cs = cgroup_cs(cont);
1948 
1949 	free_cpumask_var(cs->cpus_allowed);
1950 	kfree(cs);
1951 }
1952 
1953 struct cgroup_subsys cpuset_subsys = {
1954 	.name = "cpuset",
1955 	.css_alloc = cpuset_css_alloc,
1956 	.css_online = cpuset_css_online,
1957 	.css_offline = cpuset_css_offline,
1958 	.css_free = cpuset_css_free,
1959 	.can_attach = cpuset_can_attach,
1960 	.cancel_attach = cpuset_cancel_attach,
1961 	.attach = cpuset_attach,
1962 	.subsys_id = cpuset_subsys_id,
1963 	.base_cftypes = files,
1964 	.early_init = 1,
1965 };
1966 
1967 /**
1968  * cpuset_init - initialize cpusets at system boot
1969  *
1970  * Description: Initialize top_cpuset and the cpuset internal file system,
1971  **/
1972 
cpuset_init(void)1973 int __init cpuset_init(void)
1974 {
1975 	int err = 0;
1976 
1977 	if (!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL))
1978 		BUG();
1979 
1980 	cpumask_setall(top_cpuset.cpus_allowed);
1981 	nodes_setall(top_cpuset.mems_allowed);
1982 
1983 	fmeter_init(&top_cpuset.fmeter);
1984 	set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags);
1985 	top_cpuset.relax_domain_level = -1;
1986 
1987 	err = register_filesystem(&cpuset_fs_type);
1988 	if (err < 0)
1989 		return err;
1990 
1991 	if (!alloc_cpumask_var(&cpus_attach, GFP_KERNEL))
1992 		BUG();
1993 
1994 	number_of_cpusets = 1;
1995 	return 0;
1996 }
1997 
1998 /*
1999  * If CPU and/or memory hotplug handlers, below, unplug any CPUs
2000  * or memory nodes, we need to walk over the cpuset hierarchy,
2001  * removing that CPU or node from all cpusets.  If this removes the
2002  * last CPU or node from a cpuset, then move the tasks in the empty
2003  * cpuset to its next-highest non-empty parent.
2004  */
remove_tasks_in_empty_cpuset(struct cpuset * cs)2005 static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
2006 {
2007 	struct cpuset *parent;
2008 
2009 	/*
2010 	 * Find its next-highest non-empty parent, (top cpuset
2011 	 * has online cpus, so can't be empty).
2012 	 */
2013 	parent = parent_cs(cs);
2014 	while (cpumask_empty(parent->cpus_allowed) ||
2015 			nodes_empty(parent->mems_allowed))
2016 		parent = parent_cs(parent);
2017 
2018 	if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) {
2019 		rcu_read_lock();
2020 		printk(KERN_ERR "cpuset: failed to transfer tasks out of empty cpuset %s\n",
2021 		       cgroup_name(cs->css.cgroup));
2022 		rcu_read_unlock();
2023 	}
2024 }
2025 
2026 /**
2027  * cpuset_propagate_hotplug_workfn - propagate CPU/memory hotplug to a cpuset
2028  * @cs: cpuset in interest
2029  *
2030  * Compare @cs's cpu and mem masks against top_cpuset and if some have gone
2031  * offline, update @cs accordingly.  If @cs ends up with no CPU or memory,
2032  * all its tasks are moved to the nearest ancestor with both resources.
2033  */
cpuset_propagate_hotplug_workfn(struct work_struct * work)2034 static void cpuset_propagate_hotplug_workfn(struct work_struct *work)
2035 {
2036 	static cpumask_t off_cpus;
2037 	static nodemask_t off_mems, tmp_mems;
2038 	struct cpuset *cs = container_of(work, struct cpuset, hotplug_work);
2039 	bool is_empty;
2040 
2041 	mutex_lock(&cpuset_mutex);
2042 
2043 	cpumask_andnot(&off_cpus, cs->cpus_allowed, top_cpuset.cpus_allowed);
2044 	nodes_andnot(off_mems, cs->mems_allowed, top_cpuset.mems_allowed);
2045 
2046 	/* remove offline cpus from @cs */
2047 	if (!cpumask_empty(&off_cpus)) {
2048 		mutex_lock(&callback_mutex);
2049 		cpumask_andnot(cs->cpus_allowed, cs->cpus_allowed, &off_cpus);
2050 		mutex_unlock(&callback_mutex);
2051 		update_tasks_cpumask(cs, NULL);
2052 	}
2053 
2054 	/* remove offline mems from @cs */
2055 	if (!nodes_empty(off_mems)) {
2056 		tmp_mems = cs->mems_allowed;
2057 		mutex_lock(&callback_mutex);
2058 		nodes_andnot(cs->mems_allowed, cs->mems_allowed, off_mems);
2059 		mutex_unlock(&callback_mutex);
2060 		update_tasks_nodemask(cs, &tmp_mems, NULL);
2061 	}
2062 
2063 	is_empty = cpumask_empty(cs->cpus_allowed) ||
2064 		nodes_empty(cs->mems_allowed);
2065 
2066 	mutex_unlock(&cpuset_mutex);
2067 
2068 	/*
2069 	 * If @cs became empty, move tasks to the nearest ancestor with
2070 	 * execution resources.  This is full cgroup operation which will
2071 	 * also call back into cpuset.  Should be done outside any lock.
2072 	 */
2073 	if (is_empty)
2074 		remove_tasks_in_empty_cpuset(cs);
2075 
2076 	/* the following may free @cs, should be the last operation */
2077 	css_put(&cs->css);
2078 }
2079 
2080 /**
2081  * schedule_cpuset_propagate_hotplug - schedule hotplug propagation to a cpuset
2082  * @cs: cpuset of interest
2083  *
2084  * Schedule cpuset_propagate_hotplug_workfn() which will update CPU and
2085  * memory masks according to top_cpuset.
2086  */
schedule_cpuset_propagate_hotplug(struct cpuset * cs)2087 static void schedule_cpuset_propagate_hotplug(struct cpuset *cs)
2088 {
2089 	/*
2090 	 * Pin @cs.  The refcnt will be released when the work item
2091 	 * finishes executing.
2092 	 */
2093 	if (!css_tryget(&cs->css))
2094 		return;
2095 
2096 	/*
2097 	 * Queue @cs->hotplug_work.  If already pending, lose the css ref.
2098 	 * cpuset_propagate_hotplug_wq is ordered and propagation will
2099 	 * happen in the order this function is called.
2100 	 */
2101 	if (!queue_work(cpuset_propagate_hotplug_wq, &cs->hotplug_work))
2102 		css_put(&cs->css);
2103 }
2104 
2105 /**
2106  * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
2107  *
2108  * This function is called after either CPU or memory configuration has
2109  * changed and updates cpuset accordingly.  The top_cpuset is always
2110  * synchronized to cpu_active_mask and N_MEMORY, which is necessary in
2111  * order to make cpusets transparent (of no affect) on systems that are
2112  * actively using CPU hotplug but making no active use of cpusets.
2113  *
2114  * Non-root cpusets are only affected by offlining.  If any CPUs or memory
2115  * nodes have been taken down, cpuset_propagate_hotplug() is invoked on all
2116  * descendants.
2117  *
2118  * Note that CPU offlining during suspend is ignored.  We don't modify
2119  * cpusets across suspend/resume cycles at all.
2120  */
cpuset_hotplug_workfn(struct work_struct * work)2121 static void cpuset_hotplug_workfn(struct work_struct *work)
2122 {
2123 	static cpumask_t new_cpus, tmp_cpus;
2124 	static nodemask_t new_mems, tmp_mems;
2125 	bool cpus_updated, mems_updated;
2126 	bool cpus_offlined, mems_offlined;
2127 
2128 	mutex_lock(&cpuset_mutex);
2129 
2130 	/* fetch the available cpus/mems and find out which changed how */
2131 	cpumask_copy(&new_cpus, cpu_active_mask);
2132 	new_mems = node_states[N_MEMORY];
2133 
2134 	cpus_updated = !cpumask_equal(top_cpuset.cpus_allowed, &new_cpus);
2135 	cpus_offlined = cpumask_andnot(&tmp_cpus, top_cpuset.cpus_allowed,
2136 				       &new_cpus);
2137 
2138 	mems_updated = !nodes_equal(top_cpuset.mems_allowed, new_mems);
2139 	nodes_andnot(tmp_mems, top_cpuset.mems_allowed, new_mems);
2140 	mems_offlined = !nodes_empty(tmp_mems);
2141 
2142 	/* synchronize cpus_allowed to cpu_active_mask */
2143 	if (cpus_updated) {
2144 		mutex_lock(&callback_mutex);
2145 		cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
2146 		mutex_unlock(&callback_mutex);
2147 		/* we don't mess with cpumasks of tasks in top_cpuset */
2148 	}
2149 
2150 	/* synchronize mems_allowed to N_MEMORY */
2151 	if (mems_updated) {
2152 		tmp_mems = top_cpuset.mems_allowed;
2153 		mutex_lock(&callback_mutex);
2154 		top_cpuset.mems_allowed = new_mems;
2155 		mutex_unlock(&callback_mutex);
2156 		update_tasks_nodemask(&top_cpuset, &tmp_mems, NULL);
2157 	}
2158 
2159 	/* if cpus or mems went down, we need to propagate to descendants */
2160 	if (cpus_offlined || mems_offlined) {
2161 		struct cpuset *cs;
2162 		struct cgroup *pos_cgrp;
2163 
2164 		rcu_read_lock();
2165 		cpuset_for_each_descendant_pre(cs, pos_cgrp, &top_cpuset)
2166 			schedule_cpuset_propagate_hotplug(cs);
2167 		rcu_read_unlock();
2168 	}
2169 
2170 	mutex_unlock(&cpuset_mutex);
2171 
2172 	/* wait for propagations to finish */
2173 	flush_workqueue(cpuset_propagate_hotplug_wq);
2174 
2175 	/* rebuild sched domains if cpus_allowed has changed */
2176 	if (cpus_updated)
2177 		rebuild_sched_domains();
2178 }
2179 
cpuset_update_active_cpus(bool cpu_online)2180 void cpuset_update_active_cpus(bool cpu_online)
2181 {
2182 	/*
2183 	 * We're inside cpu hotplug critical region which usually nests
2184 	 * inside cgroup synchronization.  Bounce actual hotplug processing
2185 	 * to a work item to avoid reverse locking order.
2186 	 *
2187 	 * We still need to do partition_sched_domains() synchronously;
2188 	 * otherwise, the scheduler will get confused and put tasks to the
2189 	 * dead CPU.  Fall back to the default single domain.
2190 	 * cpuset_hotplug_workfn() will rebuild it as necessary.
2191 	 */
2192 	partition_sched_domains(1, NULL, NULL);
2193 	schedule_work(&cpuset_hotplug_work);
2194 }
2195 
2196 /*
2197  * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
2198  * Call this routine anytime after node_states[N_MEMORY] changes.
2199  * See cpuset_update_active_cpus() for CPU hotplug handling.
2200  */
cpuset_track_online_nodes(struct notifier_block * self,unsigned long action,void * arg)2201 static int cpuset_track_online_nodes(struct notifier_block *self,
2202 				unsigned long action, void *arg)
2203 {
2204 	schedule_work(&cpuset_hotplug_work);
2205 	return NOTIFY_OK;
2206 }
2207 
2208 static struct notifier_block cpuset_track_online_nodes_nb = {
2209 	.notifier_call = cpuset_track_online_nodes,
2210 	.priority = 10,		/* ??! */
2211 };
2212 
2213 /**
2214  * cpuset_init_smp - initialize cpus_allowed
2215  *
2216  * Description: Finish top cpuset after cpu, node maps are initialized
2217  */
cpuset_init_smp(void)2218 void __init cpuset_init_smp(void)
2219 {
2220 	cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
2221 	top_cpuset.mems_allowed = node_states[N_MEMORY];
2222 
2223 	register_hotmemory_notifier(&cpuset_track_online_nodes_nb);
2224 
2225 	cpuset_propagate_hotplug_wq =
2226 		alloc_ordered_workqueue("cpuset_hotplug", 0);
2227 	BUG_ON(!cpuset_propagate_hotplug_wq);
2228 }
2229 
2230 /**
2231  * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
2232  * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
2233  * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
2234  *
2235  * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
2236  * attached to the specified @tsk.  Guaranteed to return some non-empty
2237  * subset of cpu_online_mask, even if this means going outside the
2238  * tasks cpuset.
2239  **/
2240 
cpuset_cpus_allowed(struct task_struct * tsk,struct cpumask * pmask)2241 void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
2242 {
2243 	mutex_lock(&callback_mutex);
2244 	task_lock(tsk);
2245 	guarantee_online_cpus(task_cs(tsk), pmask);
2246 	task_unlock(tsk);
2247 	mutex_unlock(&callback_mutex);
2248 }
2249 
cpuset_cpus_allowed_fallback(struct task_struct * tsk)2250 void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
2251 {
2252 	const struct cpuset *cs;
2253 
2254 	rcu_read_lock();
2255 	cs = task_cs(tsk);
2256 	if (cs)
2257 		do_set_cpus_allowed(tsk, cs->cpus_allowed);
2258 	rcu_read_unlock();
2259 
2260 	/*
2261 	 * We own tsk->cpus_allowed, nobody can change it under us.
2262 	 *
2263 	 * But we used cs && cs->cpus_allowed lockless and thus can
2264 	 * race with cgroup_attach_task() or update_cpumask() and get
2265 	 * the wrong tsk->cpus_allowed. However, both cases imply the
2266 	 * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr()
2267 	 * which takes task_rq_lock().
2268 	 *
2269 	 * If we are called after it dropped the lock we must see all
2270 	 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
2271 	 * set any mask even if it is not right from task_cs() pov,
2272 	 * the pending set_cpus_allowed_ptr() will fix things.
2273 	 *
2274 	 * select_fallback_rq() will fix things ups and set cpu_possible_mask
2275 	 * if required.
2276 	 */
2277 }
2278 
cpuset_init_current_mems_allowed(void)2279 void cpuset_init_current_mems_allowed(void)
2280 {
2281 	nodes_setall(current->mems_allowed);
2282 }
2283 
2284 /**
2285  * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
2286  * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
2287  *
2288  * Description: Returns the nodemask_t mems_allowed of the cpuset
2289  * attached to the specified @tsk.  Guaranteed to return some non-empty
2290  * subset of node_states[N_MEMORY], even if this means going outside the
2291  * tasks cpuset.
2292  **/
2293 
cpuset_mems_allowed(struct task_struct * tsk)2294 nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
2295 {
2296 	nodemask_t mask;
2297 
2298 	mutex_lock(&callback_mutex);
2299 	task_lock(tsk);
2300 	guarantee_online_mems(task_cs(tsk), &mask);
2301 	task_unlock(tsk);
2302 	mutex_unlock(&callback_mutex);
2303 
2304 	return mask;
2305 }
2306 
2307 /**
2308  * cpuset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed
2309  * @nodemask: the nodemask to be checked
2310  *
2311  * Are any of the nodes in the nodemask allowed in current->mems_allowed?
2312  */
cpuset_nodemask_valid_mems_allowed(nodemask_t * nodemask)2313 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
2314 {
2315 	return nodes_intersects(*nodemask, current->mems_allowed);
2316 }
2317 
2318 /*
2319  * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
2320  * mem_hardwall ancestor to the specified cpuset.  Call holding
2321  * callback_mutex.  If no ancestor is mem_exclusive or mem_hardwall
2322  * (an unusual configuration), then returns the root cpuset.
2323  */
nearest_hardwall_ancestor(const struct cpuset * cs)2324 static const struct cpuset *nearest_hardwall_ancestor(const struct cpuset *cs)
2325 {
2326 	while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs))
2327 		cs = parent_cs(cs);
2328 	return cs;
2329 }
2330 
2331 /**
2332  * cpuset_node_allowed_softwall - Can we allocate on a memory node?
2333  * @node: is this an allowed node?
2334  * @gfp_mask: memory allocation flags
2335  *
2336  * If we're in interrupt, yes, we can always allocate.  If __GFP_THISNODE is
2337  * set, yes, we can always allocate.  If node is in our task's mems_allowed,
2338  * yes.  If it's not a __GFP_HARDWALL request and this node is in the nearest
2339  * hardwalled cpuset ancestor to this task's cpuset, yes.  If the task has been
2340  * OOM killed and has access to memory reserves as specified by the TIF_MEMDIE
2341  * flag, yes.
2342  * Otherwise, no.
2343  *
2344  * If __GFP_HARDWALL is set, cpuset_node_allowed_softwall() reduces to
2345  * cpuset_node_allowed_hardwall().  Otherwise, cpuset_node_allowed_softwall()
2346  * might sleep, and might allow a node from an enclosing cpuset.
2347  *
2348  * cpuset_node_allowed_hardwall() only handles the simpler case of hardwall
2349  * cpusets, and never sleeps.
2350  *
2351  * The __GFP_THISNODE placement logic is really handled elsewhere,
2352  * by forcibly using a zonelist starting at a specified node, and by
2353  * (in get_page_from_freelist()) refusing to consider the zones for
2354  * any node on the zonelist except the first.  By the time any such
2355  * calls get to this routine, we should just shut up and say 'yes'.
2356  *
2357  * GFP_USER allocations are marked with the __GFP_HARDWALL bit,
2358  * and do not allow allocations outside the current tasks cpuset
2359  * unless the task has been OOM killed as is marked TIF_MEMDIE.
2360  * GFP_KERNEL allocations are not so marked, so can escape to the
2361  * nearest enclosing hardwalled ancestor cpuset.
2362  *
2363  * Scanning up parent cpusets requires callback_mutex.  The
2364  * __alloc_pages() routine only calls here with __GFP_HARDWALL bit
2365  * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
2366  * current tasks mems_allowed came up empty on the first pass over
2367  * the zonelist.  So only GFP_KERNEL allocations, if all nodes in the
2368  * cpuset are short of memory, might require taking the callback_mutex
2369  * mutex.
2370  *
2371  * The first call here from mm/page_alloc:get_page_from_freelist()
2372  * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
2373  * so no allocation on a node outside the cpuset is allowed (unless
2374  * in interrupt, of course).
2375  *
2376  * The second pass through get_page_from_freelist() doesn't even call
2377  * here for GFP_ATOMIC calls.  For those calls, the __alloc_pages()
2378  * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
2379  * in alloc_flags.  That logic and the checks below have the combined
2380  * affect that:
2381  *	in_interrupt - any node ok (current task context irrelevant)
2382  *	GFP_ATOMIC   - any node ok
2383  *	TIF_MEMDIE   - any node ok
2384  *	GFP_KERNEL   - any node in enclosing hardwalled cpuset ok
2385  *	GFP_USER     - only nodes in current tasks mems allowed ok.
2386  *
2387  * Rule:
2388  *    Don't call cpuset_node_allowed_softwall if you can't sleep, unless you
2389  *    pass in the __GFP_HARDWALL flag set in gfp_flag, which disables
2390  *    the code that might scan up ancestor cpusets and sleep.
2391  */
__cpuset_node_allowed_softwall(int node,gfp_t gfp_mask)2392 int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
2393 {
2394 	const struct cpuset *cs;	/* current cpuset ancestors */
2395 	int allowed;			/* is allocation in zone z allowed? */
2396 
2397 	if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
2398 		return 1;
2399 	might_sleep_if(!(gfp_mask & __GFP_HARDWALL));
2400 	if (node_isset(node, current->mems_allowed))
2401 		return 1;
2402 	/*
2403 	 * Allow tasks that have access to memory reserves because they have
2404 	 * been OOM killed to get memory anywhere.
2405 	 */
2406 	if (unlikely(test_thread_flag(TIF_MEMDIE)))
2407 		return 1;
2408 	if (gfp_mask & __GFP_HARDWALL)	/* If hardwall request, stop here */
2409 		return 0;
2410 
2411 	if (current->flags & PF_EXITING) /* Let dying task have memory */
2412 		return 1;
2413 
2414 	/* Not hardwall and node outside mems_allowed: scan up cpusets */
2415 	mutex_lock(&callback_mutex);
2416 
2417 	task_lock(current);
2418 	cs = nearest_hardwall_ancestor(task_cs(current));
2419 	task_unlock(current);
2420 
2421 	allowed = node_isset(node, cs->mems_allowed);
2422 	mutex_unlock(&callback_mutex);
2423 	return allowed;
2424 }
2425 
2426 /*
2427  * cpuset_node_allowed_hardwall - Can we allocate on a memory node?
2428  * @node: is this an allowed node?
2429  * @gfp_mask: memory allocation flags
2430  *
2431  * If we're in interrupt, yes, we can always allocate.  If __GFP_THISNODE is
2432  * set, yes, we can always allocate.  If node is in our task's mems_allowed,
2433  * yes.  If the task has been OOM killed and has access to memory reserves as
2434  * specified by the TIF_MEMDIE flag, yes.
2435  * Otherwise, no.
2436  *
2437  * The __GFP_THISNODE placement logic is really handled elsewhere,
2438  * by forcibly using a zonelist starting at a specified node, and by
2439  * (in get_page_from_freelist()) refusing to consider the zones for
2440  * any node on the zonelist except the first.  By the time any such
2441  * calls get to this routine, we should just shut up and say 'yes'.
2442  *
2443  * Unlike the cpuset_node_allowed_softwall() variant, above,
2444  * this variant requires that the node be in the current task's
2445  * mems_allowed or that we're in interrupt.  It does not scan up the
2446  * cpuset hierarchy for the nearest enclosing mem_exclusive cpuset.
2447  * It never sleeps.
2448  */
__cpuset_node_allowed_hardwall(int node,gfp_t gfp_mask)2449 int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
2450 {
2451 	if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
2452 		return 1;
2453 	if (node_isset(node, current->mems_allowed))
2454 		return 1;
2455 	/*
2456 	 * Allow tasks that have access to memory reserves because they have
2457 	 * been OOM killed to get memory anywhere.
2458 	 */
2459 	if (unlikely(test_thread_flag(TIF_MEMDIE)))
2460 		return 1;
2461 	return 0;
2462 }
2463 
2464 /**
2465  * cpuset_mem_spread_node() - On which node to begin search for a file page
2466  * cpuset_slab_spread_node() - On which node to begin search for a slab page
2467  *
2468  * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
2469  * tasks in a cpuset with is_spread_page or is_spread_slab set),
2470  * and if the memory allocation used cpuset_mem_spread_node()
2471  * to determine on which node to start looking, as it will for
2472  * certain page cache or slab cache pages such as used for file
2473  * system buffers and inode caches, then instead of starting on the
2474  * local node to look for a free page, rather spread the starting
2475  * node around the tasks mems_allowed nodes.
2476  *
2477  * We don't have to worry about the returned node being offline
2478  * because "it can't happen", and even if it did, it would be ok.
2479  *
2480  * The routines calling guarantee_online_mems() are careful to
2481  * only set nodes in task->mems_allowed that are online.  So it
2482  * should not be possible for the following code to return an
2483  * offline node.  But if it did, that would be ok, as this routine
2484  * is not returning the node where the allocation must be, only
2485  * the node where the search should start.  The zonelist passed to
2486  * __alloc_pages() will include all nodes.  If the slab allocator
2487  * is passed an offline node, it will fall back to the local node.
2488  * See kmem_cache_alloc_node().
2489  */
2490 
cpuset_spread_node(int * rotor)2491 static int cpuset_spread_node(int *rotor)
2492 {
2493 	int node;
2494 
2495 	node = next_node(*rotor, current->mems_allowed);
2496 	if (node == MAX_NUMNODES)
2497 		node = first_node(current->mems_allowed);
2498 	*rotor = node;
2499 	return node;
2500 }
2501 
cpuset_mem_spread_node(void)2502 int cpuset_mem_spread_node(void)
2503 {
2504 	if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE)
2505 		current->cpuset_mem_spread_rotor =
2506 			node_random(&current->mems_allowed);
2507 
2508 	return cpuset_spread_node(&current->cpuset_mem_spread_rotor);
2509 }
2510 
cpuset_slab_spread_node(void)2511 int cpuset_slab_spread_node(void)
2512 {
2513 	if (current->cpuset_slab_spread_rotor == NUMA_NO_NODE)
2514 		current->cpuset_slab_spread_rotor =
2515 			node_random(&current->mems_allowed);
2516 
2517 	return cpuset_spread_node(&current->cpuset_slab_spread_rotor);
2518 }
2519 
2520 EXPORT_SYMBOL_GPL(cpuset_mem_spread_node);
2521 
2522 /**
2523  * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
2524  * @tsk1: pointer to task_struct of some task.
2525  * @tsk2: pointer to task_struct of some other task.
2526  *
2527  * Description: Return true if @tsk1's mems_allowed intersects the
2528  * mems_allowed of @tsk2.  Used by the OOM killer to determine if
2529  * one of the task's memory usage might impact the memory available
2530  * to the other.
2531  **/
2532 
cpuset_mems_allowed_intersects(const struct task_struct * tsk1,const struct task_struct * tsk2)2533 int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
2534 				   const struct task_struct *tsk2)
2535 {
2536 	return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
2537 }
2538 
2539 #define CPUSET_NODELIST_LEN	(256)
2540 
2541 /**
2542  * cpuset_print_task_mems_allowed - prints task's cpuset and mems_allowed
2543  * @task: pointer to task_struct of some task.
2544  *
2545  * Description: Prints @task's name, cpuset name, and cached copy of its
2546  * mems_allowed to the kernel log.  Must hold task_lock(task) to allow
2547  * dereferencing task_cs(task).
2548  */
cpuset_print_task_mems_allowed(struct task_struct * tsk)2549 void cpuset_print_task_mems_allowed(struct task_struct *tsk)
2550 {
2551 	 /* Statically allocated to prevent using excess stack. */
2552 	static char cpuset_nodelist[CPUSET_NODELIST_LEN];
2553 	static DEFINE_SPINLOCK(cpuset_buffer_lock);
2554 
2555 	struct cgroup *cgrp = task_cs(tsk)->css.cgroup;
2556 
2557 	rcu_read_lock();
2558 	spin_lock(&cpuset_buffer_lock);
2559 
2560 	nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN,
2561 			   tsk->mems_allowed);
2562 	printk(KERN_INFO "%s cpuset=%s mems_allowed=%s\n",
2563 	       tsk->comm, cgroup_name(cgrp), cpuset_nodelist);
2564 
2565 	spin_unlock(&cpuset_buffer_lock);
2566 	rcu_read_unlock();
2567 }
2568 
2569 /*
2570  * Collection of memory_pressure is suppressed unless
2571  * this flag is enabled by writing "1" to the special
2572  * cpuset file 'memory_pressure_enabled' in the root cpuset.
2573  */
2574 
2575 int cpuset_memory_pressure_enabled __read_mostly;
2576 
2577 /**
2578  * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
2579  *
2580  * Keep a running average of the rate of synchronous (direct)
2581  * page reclaim efforts initiated by tasks in each cpuset.
2582  *
2583  * This represents the rate at which some task in the cpuset
2584  * ran low on memory on all nodes it was allowed to use, and
2585  * had to enter the kernels page reclaim code in an effort to
2586  * create more free memory by tossing clean pages or swapping
2587  * or writing dirty pages.
2588  *
2589  * Display to user space in the per-cpuset read-only file
2590  * "memory_pressure".  Value displayed is an integer
2591  * representing the recent rate of entry into the synchronous
2592  * (direct) page reclaim by any task attached to the cpuset.
2593  **/
2594 
__cpuset_memory_pressure_bump(void)2595 void __cpuset_memory_pressure_bump(void)
2596 {
2597 	task_lock(current);
2598 	fmeter_markevent(&task_cs(current)->fmeter);
2599 	task_unlock(current);
2600 }
2601 
2602 #ifdef CONFIG_PROC_PID_CPUSET
2603 /*
2604  * proc_cpuset_show()
2605  *  - Print tasks cpuset path into seq_file.
2606  *  - Used for /proc/<pid>/cpuset.
2607  *  - No need to task_lock(tsk) on this tsk->cpuset reference, as it
2608  *    doesn't really matter if tsk->cpuset changes after we read it,
2609  *    and we take cpuset_mutex, keeping cpuset_attach() from changing it
2610  *    anyway.
2611  */
proc_cpuset_show(struct seq_file * m,void * unused_v)2612 int proc_cpuset_show(struct seq_file *m, void *unused_v)
2613 {
2614 	struct pid *pid;
2615 	struct task_struct *tsk;
2616 	char *buf;
2617 	struct cgroup_subsys_state *css;
2618 	int retval;
2619 
2620 	retval = -ENOMEM;
2621 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
2622 	if (!buf)
2623 		goto out;
2624 
2625 	retval = -ESRCH;
2626 	pid = m->private;
2627 	tsk = get_pid_task(pid, PIDTYPE_PID);
2628 	if (!tsk)
2629 		goto out_free;
2630 
2631 	rcu_read_lock();
2632 	css = task_subsys_state(tsk, cpuset_subsys_id);
2633 	retval = cgroup_path(css->cgroup, buf, PAGE_SIZE);
2634 	rcu_read_unlock();
2635 	if (retval < 0)
2636 		goto out_put_task;
2637 	seq_puts(m, buf);
2638 	seq_putc(m, '\n');
2639 out_put_task:
2640 	put_task_struct(tsk);
2641 out_free:
2642 	kfree(buf);
2643 out:
2644 	return retval;
2645 }
2646 #endif /* CONFIG_PROC_PID_CPUSET */
2647 
2648 /* Display task mems_allowed in /proc/<pid>/status file. */
cpuset_task_status_allowed(struct seq_file * m,struct task_struct * task)2649 void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
2650 {
2651 	seq_printf(m, "Mems_allowed:\t");
2652 	seq_nodemask(m, &task->mems_allowed);
2653 	seq_printf(m, "\n");
2654 	seq_printf(m, "Mems_allowed_list:\t");
2655 	seq_nodemask_list(m, &task->mems_allowed);
2656 	seq_printf(m, "\n");
2657 }
2658