• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  kernel/cpuset.c
3  *
4  *  Processor and Memory placement constraints for sets of tasks.
5  *
6  *  Copyright (C) 2003 BULL SA.
7  *  Copyright (C) 2004-2007 Silicon Graphics, Inc.
8  *  Copyright (C) 2006 Google, Inc
9  *
10  *  Portions derived from Patrick Mochel's sysfs code.
11  *  sysfs is Copyright (c) 2001-3 Patrick Mochel
12  *
13  *  2003-10-10 Written by Simon Derr.
14  *  2003-10-22 Updates by Stephen Hemminger.
15  *  2004 May-July Rework by Paul Jackson.
16  *  2006 Rework by Paul Menage to use generic cgroups
17  *  2008 Rework of the scheduler domains and CPU hotplug handling
18  *       by Max Krasnyansky
19  *
20  *  This file is subject to the terms and conditions of the GNU General Public
21  *  License.  See the file COPYING in the main directory of the Linux
22  *  distribution for more details.
23  */
24 
25 #include <linux/cpu.h>
26 #include <linux/cpumask.h>
27 #include <linux/cpuset.h>
28 #include <linux/err.h>
29 #include <linux/errno.h>
30 #include <linux/file.h>
31 #include <linux/fs.h>
32 #include <linux/init.h>
33 #include <linux/interrupt.h>
34 #include <linux/kernel.h>
35 #include <linux/kmod.h>
36 #include <linux/list.h>
37 #include <linux/mempolicy.h>
38 #include <linux/mm.h>
39 #include <linux/memory.h>
40 #include <linux/module.h>
41 #include <linux/mount.h>
42 #include <linux/namei.h>
43 #include <linux/pagemap.h>
44 #include <linux/proc_fs.h>
45 #include <linux/rcupdate.h>
46 #include <linux/sched.h>
47 #include <linux/seq_file.h>
48 #include <linux/security.h>
49 #include <linux/slab.h>
50 #include <linux/spinlock.h>
51 #include <linux/stat.h>
52 #include <linux/string.h>
53 #include <linux/time.h>
54 #include <linux/backing-dev.h>
55 #include <linux/sort.h>
56 
57 #include <asm/uaccess.h>
58 #include <asm/atomic.h>
59 #include <linux/mutex.h>
60 #include <linux/workqueue.h>
61 #include <linux/cgroup.h>
62 
63 /*
64  * Workqueue for cpuset related tasks.
65  *
66  * Using kevent workqueue may cause deadlock when memory_migrate
67  * is set. So we create a separate workqueue thread for cpuset.
68  */
69 static struct workqueue_struct *cpuset_wq;
70 
71 /*
72  * Tracks how many cpusets are currently defined in system.
73  * When there is only one cpuset (the root cpuset) we can
74  * short circuit some hooks.
75  */
76 int number_of_cpusets __read_mostly;
77 
78 /* Forward declare cgroup structures */
79 struct cgroup_subsys cpuset_subsys;
80 struct cpuset;
81 
82 /* See "Frequency meter" comments, below. */
83 
84 struct fmeter {
85 	int cnt;		/* unprocessed events count */
86 	int val;		/* most recent output value */
87 	time_t time;		/* clock (secs) when val computed */
88 	spinlock_t lock;	/* guards read or write of above */
89 };
90 
91 struct cpuset {
92 	struct cgroup_subsys_state css;
93 
94 	unsigned long flags;		/* "unsigned long" so bitops work */
95 	cpumask_var_t cpus_allowed;	/* CPUs allowed to tasks in cpuset */
96 	nodemask_t mems_allowed;	/* Memory Nodes allowed to tasks */
97 
98 	struct cpuset *parent;		/* my parent */
99 
100 	/*
101 	 * Copy of global cpuset_mems_generation as of the most
102 	 * recent time this cpuset changed its mems_allowed.
103 	 */
104 	int mems_generation;
105 
106 	struct fmeter fmeter;		/* memory_pressure filter */
107 
108 	/* partition number for rebuild_sched_domains() */
109 	int pn;
110 
111 	/* for custom sched domain */
112 	int relax_domain_level;
113 
114 	/* used for walking a cpuset heirarchy */
115 	struct list_head stack_list;
116 };
117 
118 /* Retrieve the cpuset for a cgroup */
cgroup_cs(struct cgroup * cont)119 static inline struct cpuset *cgroup_cs(struct cgroup *cont)
120 {
121 	return container_of(cgroup_subsys_state(cont, cpuset_subsys_id),
122 			    struct cpuset, css);
123 }
124 
125 /* Retrieve the cpuset for a task */
task_cs(struct task_struct * task)126 static inline struct cpuset *task_cs(struct task_struct *task)
127 {
128 	return container_of(task_subsys_state(task, cpuset_subsys_id),
129 			    struct cpuset, css);
130 }
131 struct cpuset_hotplug_scanner {
132 	struct cgroup_scanner scan;
133 	struct cgroup *to;
134 };
135 
136 /* bits in struct cpuset flags field */
137 typedef enum {
138 	CS_CPU_EXCLUSIVE,
139 	CS_MEM_EXCLUSIVE,
140 	CS_MEM_HARDWALL,
141 	CS_MEMORY_MIGRATE,
142 	CS_SCHED_LOAD_BALANCE,
143 	CS_SPREAD_PAGE,
144 	CS_SPREAD_SLAB,
145 } cpuset_flagbits_t;
146 
147 /* convenient tests for these bits */
is_cpu_exclusive(const struct cpuset * cs)148 static inline int is_cpu_exclusive(const struct cpuset *cs)
149 {
150 	return test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
151 }
152 
is_mem_exclusive(const struct cpuset * cs)153 static inline int is_mem_exclusive(const struct cpuset *cs)
154 {
155 	return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
156 }
157 
is_mem_hardwall(const struct cpuset * cs)158 static inline int is_mem_hardwall(const struct cpuset *cs)
159 {
160 	return test_bit(CS_MEM_HARDWALL, &cs->flags);
161 }
162 
is_sched_load_balance(const struct cpuset * cs)163 static inline int is_sched_load_balance(const struct cpuset *cs)
164 {
165 	return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
166 }
167 
is_memory_migrate(const struct cpuset * cs)168 static inline int is_memory_migrate(const struct cpuset *cs)
169 {
170 	return test_bit(CS_MEMORY_MIGRATE, &cs->flags);
171 }
172 
is_spread_page(const struct cpuset * cs)173 static inline int is_spread_page(const struct cpuset *cs)
174 {
175 	return test_bit(CS_SPREAD_PAGE, &cs->flags);
176 }
177 
is_spread_slab(const struct cpuset * cs)178 static inline int is_spread_slab(const struct cpuset *cs)
179 {
180 	return test_bit(CS_SPREAD_SLAB, &cs->flags);
181 }
182 
183 /*
184  * Increment this integer everytime any cpuset changes its
185  * mems_allowed value.  Users of cpusets can track this generation
186  * number, and avoid having to lock and reload mems_allowed unless
187  * the cpuset they're using changes generation.
188  *
189  * A single, global generation is needed because cpuset_attach_task() could
190  * reattach a task to a different cpuset, which must not have its
191  * generation numbers aliased with those of that tasks previous cpuset.
192  *
193  * Generations are needed for mems_allowed because one task cannot
194  * modify another's memory placement.  So we must enable every task,
195  * on every visit to __alloc_pages(), to efficiently check whether
196  * its current->cpuset->mems_allowed has changed, requiring an update
197  * of its current->mems_allowed.
198  *
199  * Since writes to cpuset_mems_generation are guarded by the cgroup lock
200  * there is no need to mark it atomic.
201  */
202 static int cpuset_mems_generation;
203 
204 static struct cpuset top_cpuset = {
205 	.flags = ((1 << CS_CPU_EXCLUSIVE) | (1 << CS_MEM_EXCLUSIVE)),
206 };
207 
208 /*
209  * There are two global mutexes guarding cpuset structures.  The first
210  * is the main control groups cgroup_mutex, accessed via
211  * cgroup_lock()/cgroup_unlock().  The second is the cpuset-specific
212  * callback_mutex, below. They can nest.  It is ok to first take
213  * cgroup_mutex, then nest callback_mutex.  We also require taking
214  * task_lock() when dereferencing a task's cpuset pointer.  See "The
215  * task_lock() exception", at the end of this comment.
216  *
217  * A task must hold both mutexes to modify cpusets.  If a task
218  * holds cgroup_mutex, then it blocks others wanting that mutex,
219  * ensuring that it is the only task able to also acquire callback_mutex
220  * and be able to modify cpusets.  It can perform various checks on
221  * the cpuset structure first, knowing nothing will change.  It can
222  * also allocate memory while just holding cgroup_mutex.  While it is
223  * performing these checks, various callback routines can briefly
224  * acquire callback_mutex to query cpusets.  Once it is ready to make
225  * the changes, it takes callback_mutex, blocking everyone else.
226  *
227  * Calls to the kernel memory allocator can not be made while holding
228  * callback_mutex, as that would risk double tripping on callback_mutex
229  * from one of the callbacks into the cpuset code from within
230  * __alloc_pages().
231  *
232  * If a task is only holding callback_mutex, then it has read-only
233  * access to cpusets.
234  *
235  * The task_struct fields mems_allowed and mems_generation may only
236  * be accessed in the context of that task, so require no locks.
237  *
238  * The cpuset_common_file_read() handlers only hold callback_mutex across
239  * small pieces of code, such as when reading out possibly multi-word
240  * cpumasks and nodemasks.
241  *
242  * Accessing a task's cpuset should be done in accordance with the
243  * guidelines for accessing subsystem state in kernel/cgroup.c
244  */
245 
246 static DEFINE_MUTEX(callback_mutex);
247 
248 /*
249  * cpuset_buffer_lock protects both the cpuset_name and cpuset_nodelist
250  * buffers.  They are statically allocated to prevent using excess stack
251  * when calling cpuset_print_task_mems_allowed().
252  */
253 #define CPUSET_NAME_LEN		(128)
254 #define	CPUSET_NODELIST_LEN	(256)
255 static char cpuset_name[CPUSET_NAME_LEN];
256 static char cpuset_nodelist[CPUSET_NODELIST_LEN];
257 static DEFINE_SPINLOCK(cpuset_buffer_lock);
258 
259 /*
260  * This is ugly, but preserves the userspace API for existing cpuset
261  * users. If someone tries to mount the "cpuset" filesystem, we
262  * silently switch it to mount "cgroup" instead
263  */
cpuset_get_sb(struct file_system_type * fs_type,int flags,const char * unused_dev_name,void * data,struct vfsmount * mnt)264 static int cpuset_get_sb(struct file_system_type *fs_type,
265 			 int flags, const char *unused_dev_name,
266 			 void *data, struct vfsmount *mnt)
267 {
268 	struct file_system_type *cgroup_fs = get_fs_type("cgroup");
269 	int ret = -ENODEV;
270 	if (cgroup_fs) {
271 		char mountopts[] =
272 			"cpuset,noprefix,"
273 			"release_agent=/sbin/cpuset_release_agent";
274 		ret = cgroup_fs->get_sb(cgroup_fs, flags,
275 					   unused_dev_name, mountopts, mnt);
276 		put_filesystem(cgroup_fs);
277 	}
278 	return ret;
279 }
280 
281 static struct file_system_type cpuset_fs_type = {
282 	.name = "cpuset",
283 	.get_sb = cpuset_get_sb,
284 };
285 
286 /*
287  * Return in pmask the portion of a cpusets's cpus_allowed that
288  * are online.  If none are online, walk up the cpuset hierarchy
289  * until we find one that does have some online cpus.  If we get
290  * all the way to the top and still haven't found any online cpus,
291  * return cpu_online_map.  Or if passed a NULL cs from an exit'ing
292  * task, return cpu_online_map.
293  *
294  * One way or another, we guarantee to return some non-empty subset
295  * of cpu_online_map.
296  *
297  * Call with callback_mutex held.
298  */
299 
guarantee_online_cpus(const struct cpuset * cs,struct cpumask * pmask)300 static void guarantee_online_cpus(const struct cpuset *cs,
301 				  struct cpumask *pmask)
302 {
303 	while (cs && !cpumask_intersects(cs->cpus_allowed, cpu_online_mask))
304 		cs = cs->parent;
305 	if (cs)
306 		cpumask_and(pmask, cs->cpus_allowed, cpu_online_mask);
307 	else
308 		cpumask_copy(pmask, cpu_online_mask);
309 	BUG_ON(!cpumask_intersects(pmask, cpu_online_mask));
310 }
311 
312 /*
313  * Return in *pmask the portion of a cpusets's mems_allowed that
314  * are online, with memory.  If none are online with memory, walk
315  * up the cpuset hierarchy until we find one that does have some
316  * online mems.  If we get all the way to the top and still haven't
317  * found any online mems, return node_states[N_HIGH_MEMORY].
318  *
319  * One way or another, we guarantee to return some non-empty subset
320  * of node_states[N_HIGH_MEMORY].
321  *
322  * Call with callback_mutex held.
323  */
324 
guarantee_online_mems(const struct cpuset * cs,nodemask_t * pmask)325 static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
326 {
327 	while (cs && !nodes_intersects(cs->mems_allowed,
328 					node_states[N_HIGH_MEMORY]))
329 		cs = cs->parent;
330 	if (cs)
331 		nodes_and(*pmask, cs->mems_allowed,
332 					node_states[N_HIGH_MEMORY]);
333 	else
334 		*pmask = node_states[N_HIGH_MEMORY];
335 	BUG_ON(!nodes_intersects(*pmask, node_states[N_HIGH_MEMORY]));
336 }
337 
338 /**
339  * cpuset_update_task_memory_state - update task memory placement
340  *
341  * If the current tasks cpusets mems_allowed changed behind our
342  * backs, update current->mems_allowed, mems_generation and task NUMA
343  * mempolicy to the new value.
344  *
345  * Task mempolicy is updated by rebinding it relative to the
346  * current->cpuset if a task has its memory placement changed.
347  * Do not call this routine if in_interrupt().
348  *
349  * Call without callback_mutex or task_lock() held.  May be
350  * called with or without cgroup_mutex held.  Thanks in part to
351  * 'the_top_cpuset_hack', the task's cpuset pointer will never
352  * be NULL.  This routine also might acquire callback_mutex during
353  * call.
354  *
355  * Reading current->cpuset->mems_generation doesn't need task_lock
356  * to guard the current->cpuset derefence, because it is guarded
357  * from concurrent freeing of current->cpuset using RCU.
358  *
359  * The rcu_dereference() is technically probably not needed,
360  * as I don't actually mind if I see a new cpuset pointer but
361  * an old value of mems_generation.  However this really only
362  * matters on alpha systems using cpusets heavily.  If I dropped
363  * that rcu_dereference(), it would save them a memory barrier.
364  * For all other arch's, rcu_dereference is a no-op anyway, and for
365  * alpha systems not using cpusets, another planned optimization,
366  * avoiding the rcu critical section for tasks in the root cpuset
367  * which is statically allocated, so can't vanish, will make this
368  * irrelevant.  Better to use RCU as intended, than to engage in
369  * some cute trick to save a memory barrier that is impossible to
370  * test, for alpha systems using cpusets heavily, which might not
371  * even exist.
372  *
373  * This routine is needed to update the per-task mems_allowed data,
374  * within the tasks context, when it is trying to allocate memory
375  * (in various mm/mempolicy.c routines) and notices that some other
376  * task has been modifying its cpuset.
377  */
378 
cpuset_update_task_memory_state(void)379 void cpuset_update_task_memory_state(void)
380 {
381 	int my_cpusets_mem_gen;
382 	struct task_struct *tsk = current;
383 	struct cpuset *cs;
384 
385 	rcu_read_lock();
386 	my_cpusets_mem_gen = task_cs(tsk)->mems_generation;
387 	rcu_read_unlock();
388 
389 	if (my_cpusets_mem_gen != tsk->cpuset_mems_generation) {
390 		mutex_lock(&callback_mutex);
391 		task_lock(tsk);
392 		cs = task_cs(tsk); /* Maybe changed when task not locked */
393 		guarantee_online_mems(cs, &tsk->mems_allowed);
394 		tsk->cpuset_mems_generation = cs->mems_generation;
395 		if (is_spread_page(cs))
396 			tsk->flags |= PF_SPREAD_PAGE;
397 		else
398 			tsk->flags &= ~PF_SPREAD_PAGE;
399 		if (is_spread_slab(cs))
400 			tsk->flags |= PF_SPREAD_SLAB;
401 		else
402 			tsk->flags &= ~PF_SPREAD_SLAB;
403 		task_unlock(tsk);
404 		mutex_unlock(&callback_mutex);
405 		mpol_rebind_task(tsk, &tsk->mems_allowed);
406 	}
407 }
408 
409 /*
410  * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
411  *
412  * One cpuset is a subset of another if all its allowed CPUs and
413  * Memory Nodes are a subset of the other, and its exclusive flags
414  * are only set if the other's are set.  Call holding cgroup_mutex.
415  */
416 
is_cpuset_subset(const struct cpuset * p,const struct cpuset * q)417 static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
418 {
419 	return	cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
420 		nodes_subset(p->mems_allowed, q->mems_allowed) &&
421 		is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
422 		is_mem_exclusive(p) <= is_mem_exclusive(q);
423 }
424 
425 /**
426  * alloc_trial_cpuset - allocate a trial cpuset
427  * @cs: the cpuset that the trial cpuset duplicates
428  */
alloc_trial_cpuset(const struct cpuset * cs)429 static struct cpuset *alloc_trial_cpuset(const struct cpuset *cs)
430 {
431 	struct cpuset *trial;
432 
433 	trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL);
434 	if (!trial)
435 		return NULL;
436 
437 	if (!alloc_cpumask_var(&trial->cpus_allowed, GFP_KERNEL)) {
438 		kfree(trial);
439 		return NULL;
440 	}
441 	cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
442 
443 	return trial;
444 }
445 
446 /**
447  * free_trial_cpuset - free the trial cpuset
448  * @trial: the trial cpuset to be freed
449  */
free_trial_cpuset(struct cpuset * trial)450 static void free_trial_cpuset(struct cpuset *trial)
451 {
452 	free_cpumask_var(trial->cpus_allowed);
453 	kfree(trial);
454 }
455 
456 /*
457  * validate_change() - Used to validate that any proposed cpuset change
458  *		       follows the structural rules for cpusets.
459  *
460  * If we replaced the flag and mask values of the current cpuset
461  * (cur) with those values in the trial cpuset (trial), would
462  * our various subset and exclusive rules still be valid?  Presumes
463  * cgroup_mutex held.
464  *
465  * 'cur' is the address of an actual, in-use cpuset.  Operations
466  * such as list traversal that depend on the actual address of the
467  * cpuset in the list must use cur below, not trial.
468  *
469  * 'trial' is the address of bulk structure copy of cur, with
470  * perhaps one or more of the fields cpus_allowed, mems_allowed,
471  * or flags changed to new, trial values.
472  *
473  * Return 0 if valid, -errno if not.
474  */
475 
validate_change(const struct cpuset * cur,const struct cpuset * trial)476 static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
477 {
478 	struct cgroup *cont;
479 	struct cpuset *c, *par;
480 
481 	/* Each of our child cpusets must be a subset of us */
482 	list_for_each_entry(cont, &cur->css.cgroup->children, sibling) {
483 		if (!is_cpuset_subset(cgroup_cs(cont), trial))
484 			return -EBUSY;
485 	}
486 
487 	/* Remaining checks don't apply to root cpuset */
488 	if (cur == &top_cpuset)
489 		return 0;
490 
491 	par = cur->parent;
492 
493 	/* We must be a subset of our parent cpuset */
494 	if (!is_cpuset_subset(trial, par))
495 		return -EACCES;
496 
497 	/*
498 	 * If either I or some sibling (!= me) is exclusive, we can't
499 	 * overlap
500 	 */
501 	list_for_each_entry(cont, &par->css.cgroup->children, sibling) {
502 		c = cgroup_cs(cont);
503 		if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
504 		    c != cur &&
505 		    cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
506 			return -EINVAL;
507 		if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
508 		    c != cur &&
509 		    nodes_intersects(trial->mems_allowed, c->mems_allowed))
510 			return -EINVAL;
511 	}
512 
513 	/* Cpusets with tasks can't have empty cpus_allowed or mems_allowed */
514 	if (cgroup_task_count(cur->css.cgroup)) {
515 		if (cpumask_empty(trial->cpus_allowed) ||
516 		    nodes_empty(trial->mems_allowed)) {
517 			return -ENOSPC;
518 		}
519 	}
520 
521 	return 0;
522 }
523 
524 /*
525  * Helper routine for generate_sched_domains().
526  * Do cpusets a, b have overlapping cpus_allowed masks?
527  */
cpusets_overlap(struct cpuset * a,struct cpuset * b)528 static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
529 {
530 	return cpumask_intersects(a->cpus_allowed, b->cpus_allowed);
531 }
532 
533 static void
update_domain_attr(struct sched_domain_attr * dattr,struct cpuset * c)534 update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
535 {
536 	if (dattr->relax_domain_level < c->relax_domain_level)
537 		dattr->relax_domain_level = c->relax_domain_level;
538 	return;
539 }
540 
541 static void
update_domain_attr_tree(struct sched_domain_attr * dattr,struct cpuset * c)542 update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c)
543 {
544 	LIST_HEAD(q);
545 
546 	list_add(&c->stack_list, &q);
547 	while (!list_empty(&q)) {
548 		struct cpuset *cp;
549 		struct cgroup *cont;
550 		struct cpuset *child;
551 
552 		cp = list_first_entry(&q, struct cpuset, stack_list);
553 		list_del(q.next);
554 
555 		if (cpumask_empty(cp->cpus_allowed))
556 			continue;
557 
558 		if (is_sched_load_balance(cp))
559 			update_domain_attr(dattr, cp);
560 
561 		list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
562 			child = cgroup_cs(cont);
563 			list_add_tail(&child->stack_list, &q);
564 		}
565 	}
566 }
567 
568 /*
569  * generate_sched_domains()
570  *
571  * This function builds a partial partition of the systems CPUs
572  * A 'partial partition' is a set of non-overlapping subsets whose
573  * union is a subset of that set.
574  * The output of this function needs to be passed to kernel/sched.c
575  * partition_sched_domains() routine, which will rebuild the scheduler's
576  * load balancing domains (sched domains) as specified by that partial
577  * partition.
578  *
579  * See "What is sched_load_balance" in Documentation/cgroups/cpusets.txt
580  * for a background explanation of this.
581  *
582  * Does not return errors, on the theory that the callers of this
583  * routine would rather not worry about failures to rebuild sched
584  * domains when operating in the severe memory shortage situations
585  * that could cause allocation failures below.
586  *
587  * Must be called with cgroup_lock held.
588  *
589  * The three key local variables below are:
590  *    q  - a linked-list queue of cpuset pointers, used to implement a
591  *	   top-down scan of all cpusets.  This scan loads a pointer
592  *	   to each cpuset marked is_sched_load_balance into the
593  *	   array 'csa'.  For our purposes, rebuilding the schedulers
594  *	   sched domains, we can ignore !is_sched_load_balance cpusets.
595  *  csa  - (for CpuSet Array) Array of pointers to all the cpusets
596  *	   that need to be load balanced, for convenient iterative
597  *	   access by the subsequent code that finds the best partition,
598  *	   i.e the set of domains (subsets) of CPUs such that the
599  *	   cpus_allowed of every cpuset marked is_sched_load_balance
600  *	   is a subset of one of these domains, while there are as
601  *	   many such domains as possible, each as small as possible.
602  * doms  - Conversion of 'csa' to an array of cpumasks, for passing to
603  *	   the kernel/sched.c routine partition_sched_domains() in a
604  *	   convenient format, that can be easily compared to the prior
605  *	   value to determine what partition elements (sched domains)
606  *	   were changed (added or removed.)
607  *
608  * Finding the best partition (set of domains):
609  *	The triple nested loops below over i, j, k scan over the
610  *	load balanced cpusets (using the array of cpuset pointers in
611  *	csa[]) looking for pairs of cpusets that have overlapping
612  *	cpus_allowed, but which don't have the same 'pn' partition
613  *	number and gives them in the same partition number.  It keeps
614  *	looping on the 'restart' label until it can no longer find
615  *	any such pairs.
616  *
617  *	The union of the cpus_allowed masks from the set of
618  *	all cpusets having the same 'pn' value then form the one
619  *	element of the partition (one sched domain) to be passed to
620  *	partition_sched_domains().
621  */
622 /* FIXME: see the FIXME in partition_sched_domains() */
generate_sched_domains(struct cpumask ** domains,struct sched_domain_attr ** attributes)623 static int generate_sched_domains(struct cpumask **domains,
624 			struct sched_domain_attr **attributes)
625 {
626 	LIST_HEAD(q);		/* queue of cpusets to be scanned */
627 	struct cpuset *cp;	/* scans q */
628 	struct cpuset **csa;	/* array of all cpuset ptrs */
629 	int csn;		/* how many cpuset ptrs in csa so far */
630 	int i, j, k;		/* indices for partition finding loops */
631 	struct cpumask *doms;	/* resulting partition; i.e. sched domains */
632 	struct sched_domain_attr *dattr;  /* attributes for custom domains */
633 	int ndoms = 0;		/* number of sched domains in result */
634 	int nslot;		/* next empty doms[] struct cpumask slot */
635 
636 	doms = NULL;
637 	dattr = NULL;
638 	csa = NULL;
639 
640 	/* Special case for the 99% of systems with one, full, sched domain */
641 	if (is_sched_load_balance(&top_cpuset)) {
642 		doms = kmalloc(cpumask_size(), GFP_KERNEL);
643 		if (!doms)
644 			goto done;
645 
646 		dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
647 		if (dattr) {
648 			*dattr = SD_ATTR_INIT;
649 			update_domain_attr_tree(dattr, &top_cpuset);
650 		}
651 		cpumask_copy(doms, top_cpuset.cpus_allowed);
652 
653 		ndoms = 1;
654 		goto done;
655 	}
656 
657 	csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL);
658 	if (!csa)
659 		goto done;
660 	csn = 0;
661 
662 	list_add(&top_cpuset.stack_list, &q);
663 	while (!list_empty(&q)) {
664 		struct cgroup *cont;
665 		struct cpuset *child;   /* scans child cpusets of cp */
666 
667 		cp = list_first_entry(&q, struct cpuset, stack_list);
668 		list_del(q.next);
669 
670 		if (cpumask_empty(cp->cpus_allowed))
671 			continue;
672 
673 		/*
674 		 * All child cpusets contain a subset of the parent's cpus, so
675 		 * just skip them, and then we call update_domain_attr_tree()
676 		 * to calc relax_domain_level of the corresponding sched
677 		 * domain.
678 		 */
679 		if (is_sched_load_balance(cp)) {
680 			csa[csn++] = cp;
681 			continue;
682 		}
683 
684 		list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
685 			child = cgroup_cs(cont);
686 			list_add_tail(&child->stack_list, &q);
687 		}
688   	}
689 
690 	for (i = 0; i < csn; i++)
691 		csa[i]->pn = i;
692 	ndoms = csn;
693 
694 restart:
695 	/* Find the best partition (set of sched domains) */
696 	for (i = 0; i < csn; i++) {
697 		struct cpuset *a = csa[i];
698 		int apn = a->pn;
699 
700 		for (j = 0; j < csn; j++) {
701 			struct cpuset *b = csa[j];
702 			int bpn = b->pn;
703 
704 			if (apn != bpn && cpusets_overlap(a, b)) {
705 				for (k = 0; k < csn; k++) {
706 					struct cpuset *c = csa[k];
707 
708 					if (c->pn == bpn)
709 						c->pn = apn;
710 				}
711 				ndoms--;	/* one less element */
712 				goto restart;
713 			}
714 		}
715 	}
716 
717 	/*
718 	 * Now we know how many domains to create.
719 	 * Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
720 	 */
721 	doms = kmalloc(ndoms * cpumask_size(), GFP_KERNEL);
722 	if (!doms)
723 		goto done;
724 
725 	/*
726 	 * The rest of the code, including the scheduler, can deal with
727 	 * dattr==NULL case. No need to abort if alloc fails.
728 	 */
729 	dattr = kmalloc(ndoms * sizeof(struct sched_domain_attr), GFP_KERNEL);
730 
731 	for (nslot = 0, i = 0; i < csn; i++) {
732 		struct cpuset *a = csa[i];
733 		struct cpumask *dp;
734 		int apn = a->pn;
735 
736 		if (apn < 0) {
737 			/* Skip completed partitions */
738 			continue;
739 		}
740 
741 		dp = doms + nslot;
742 
743 		if (nslot == ndoms) {
744 			static int warnings = 10;
745 			if (warnings) {
746 				printk(KERN_WARNING
747 				 "rebuild_sched_domains confused:"
748 				  " nslot %d, ndoms %d, csn %d, i %d,"
749 				  " apn %d\n",
750 				  nslot, ndoms, csn, i, apn);
751 				warnings--;
752 			}
753 			continue;
754 		}
755 
756 		cpumask_clear(dp);
757 		if (dattr)
758 			*(dattr + nslot) = SD_ATTR_INIT;
759 		for (j = i; j < csn; j++) {
760 			struct cpuset *b = csa[j];
761 
762 			if (apn == b->pn) {
763 				cpumask_or(dp, dp, b->cpus_allowed);
764 				if (dattr)
765 					update_domain_attr_tree(dattr + nslot, b);
766 
767 				/* Done with this partition */
768 				b->pn = -1;
769 			}
770 		}
771 		nslot++;
772 	}
773 	BUG_ON(nslot != ndoms);
774 
775 done:
776 	kfree(csa);
777 
778 	/*
779 	 * Fallback to the default domain if kmalloc() failed.
780 	 * See comments in partition_sched_domains().
781 	 */
782 	if (doms == NULL)
783 		ndoms = 1;
784 
785 	*domains    = doms;
786 	*attributes = dattr;
787 	return ndoms;
788 }
789 
790 /*
791  * Rebuild scheduler domains.
792  *
793  * Call with neither cgroup_mutex held nor within get_online_cpus().
794  * Takes both cgroup_mutex and get_online_cpus().
795  *
796  * Cannot be directly called from cpuset code handling changes
797  * to the cpuset pseudo-filesystem, because it cannot be called
798  * from code that already holds cgroup_mutex.
799  */
do_rebuild_sched_domains(struct work_struct * unused)800 static void do_rebuild_sched_domains(struct work_struct *unused)
801 {
802 	struct sched_domain_attr *attr;
803 	struct cpumask *doms;
804 	int ndoms;
805 
806 	get_online_cpus();
807 
808 	/* Generate domain masks and attrs */
809 	cgroup_lock();
810 	ndoms = generate_sched_domains(&doms, &attr);
811 	cgroup_unlock();
812 
813 	/* Have scheduler rebuild the domains */
814 	partition_sched_domains(ndoms, doms, attr);
815 
816 	put_online_cpus();
817 }
818 
819 static DECLARE_WORK(rebuild_sched_domains_work, do_rebuild_sched_domains);
820 
821 /*
822  * Rebuild scheduler domains, asynchronously via workqueue.
823  *
824  * If the flag 'sched_load_balance' of any cpuset with non-empty
825  * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
826  * which has that flag enabled, or if any cpuset with a non-empty
827  * 'cpus' is removed, then call this routine to rebuild the
828  * scheduler's dynamic sched domains.
829  *
830  * The rebuild_sched_domains() and partition_sched_domains()
831  * routines must nest cgroup_lock() inside get_online_cpus(),
832  * but such cpuset changes as these must nest that locking the
833  * other way, holding cgroup_lock() for much of the code.
834  *
835  * So in order to avoid an ABBA deadlock, the cpuset code handling
836  * these user changes delegates the actual sched domain rebuilding
837  * to a separate workqueue thread, which ends up processing the
838  * above do_rebuild_sched_domains() function.
839  */
async_rebuild_sched_domains(void)840 static void async_rebuild_sched_domains(void)
841 {
842 	queue_work(cpuset_wq, &rebuild_sched_domains_work);
843 }
844 
845 /*
846  * Accomplishes the same scheduler domain rebuild as the above
847  * async_rebuild_sched_domains(), however it directly calls the
848  * rebuild routine synchronously rather than calling it via an
849  * asynchronous work thread.
850  *
851  * This can only be called from code that is not holding
852  * cgroup_mutex (not nested in a cgroup_lock() call.)
853  */
rebuild_sched_domains(void)854 void rebuild_sched_domains(void)
855 {
856 	do_rebuild_sched_domains(NULL);
857 }
858 
859 /**
860  * cpuset_test_cpumask - test a task's cpus_allowed versus its cpuset's
861  * @tsk: task to test
862  * @scan: struct cgroup_scanner contained in its struct cpuset_hotplug_scanner
863  *
864  * Call with cgroup_mutex held.  May take callback_mutex during call.
865  * Called for each task in a cgroup by cgroup_scan_tasks().
866  * Return nonzero if this tasks's cpus_allowed mask should be changed (in other
867  * words, if its mask is not equal to its cpuset's mask).
868  */
cpuset_test_cpumask(struct task_struct * tsk,struct cgroup_scanner * scan)869 static int cpuset_test_cpumask(struct task_struct *tsk,
870 			       struct cgroup_scanner *scan)
871 {
872 	return !cpumask_equal(&tsk->cpus_allowed,
873 			(cgroup_cs(scan->cg))->cpus_allowed);
874 }
875 
876 /**
877  * cpuset_change_cpumask - make a task's cpus_allowed the same as its cpuset's
878  * @tsk: task to test
879  * @scan: struct cgroup_scanner containing the cgroup of the task
880  *
881  * Called by cgroup_scan_tasks() for each task in a cgroup whose
882  * cpus_allowed mask needs to be changed.
883  *
884  * We don't need to re-check for the cgroup/cpuset membership, since we're
885  * holding cgroup_lock() at this point.
886  */
cpuset_change_cpumask(struct task_struct * tsk,struct cgroup_scanner * scan)887 static void cpuset_change_cpumask(struct task_struct *tsk,
888 				  struct cgroup_scanner *scan)
889 {
890 	set_cpus_allowed_ptr(tsk, ((cgroup_cs(scan->cg))->cpus_allowed));
891 }
892 
893 /**
894  * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
895  * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
896  * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
897  *
898  * Called with cgroup_mutex held
899  *
900  * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
901  * calling callback functions for each.
902  *
903  * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
904  * if @heap != NULL.
905  */
update_tasks_cpumask(struct cpuset * cs,struct ptr_heap * heap)906 static void update_tasks_cpumask(struct cpuset *cs, struct ptr_heap *heap)
907 {
908 	struct cgroup_scanner scan;
909 
910 	scan.cg = cs->css.cgroup;
911 	scan.test_task = cpuset_test_cpumask;
912 	scan.process_task = cpuset_change_cpumask;
913 	scan.heap = heap;
914 	cgroup_scan_tasks(&scan);
915 }
916 
917 /**
918  * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
919  * @cs: the cpuset to consider
920  * @buf: buffer of cpu numbers written to this cpuset
921  */
update_cpumask(struct cpuset * cs,struct cpuset * trialcs,const char * buf)922 static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
923 			  const char *buf)
924 {
925 	struct ptr_heap heap;
926 	int retval;
927 	int is_load_balanced;
928 
929 	/* top_cpuset.cpus_allowed tracks cpu_online_map; it's read-only */
930 	if (cs == &top_cpuset)
931 		return -EACCES;
932 
933 	/*
934 	 * An empty cpus_allowed is ok only if the cpuset has no tasks.
935 	 * Since cpulist_parse() fails on an empty mask, we special case
936 	 * that parsing.  The validate_change() call ensures that cpusets
937 	 * with tasks have cpus.
938 	 */
939 	if (!*buf) {
940 		cpumask_clear(trialcs->cpus_allowed);
941 	} else {
942 		retval = cpulist_parse(buf, trialcs->cpus_allowed);
943 		if (retval < 0)
944 			return retval;
945 
946 		if (!cpumask_subset(trialcs->cpus_allowed, cpu_online_mask))
947 			return -EINVAL;
948 	}
949 	retval = validate_change(cs, trialcs);
950 	if (retval < 0)
951 		return retval;
952 
953 	/* Nothing to do if the cpus didn't change */
954 	if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
955 		return 0;
956 
957 	retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
958 	if (retval)
959 		return retval;
960 
961 	is_load_balanced = is_sched_load_balance(trialcs);
962 
963 	mutex_lock(&callback_mutex);
964 	cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
965 	mutex_unlock(&callback_mutex);
966 
967 	/*
968 	 * Scan tasks in the cpuset, and update the cpumasks of any
969 	 * that need an update.
970 	 */
971 	update_tasks_cpumask(cs, &heap);
972 
973 	heap_free(&heap);
974 
975 	if (is_load_balanced)
976 		async_rebuild_sched_domains();
977 	return 0;
978 }
979 
980 /*
981  * cpuset_migrate_mm
982  *
983  *    Migrate memory region from one set of nodes to another.
984  *
985  *    Temporarilly set tasks mems_allowed to target nodes of migration,
986  *    so that the migration code can allocate pages on these nodes.
987  *
988  *    Call holding cgroup_mutex, so current's cpuset won't change
989  *    during this call, as manage_mutex holds off any cpuset_attach()
990  *    calls.  Therefore we don't need to take task_lock around the
991  *    call to guarantee_online_mems(), as we know no one is changing
992  *    our task's cpuset.
993  *
994  *    Hold callback_mutex around the two modifications of our tasks
995  *    mems_allowed to synchronize with cpuset_mems_allowed().
996  *
997  *    While the mm_struct we are migrating is typically from some
998  *    other task, the task_struct mems_allowed that we are hacking
999  *    is for our current task, which must allocate new pages for that
1000  *    migrating memory region.
1001  *
1002  *    We call cpuset_update_task_memory_state() before hacking
1003  *    our tasks mems_allowed, so that we are assured of being in
1004  *    sync with our tasks cpuset, and in particular, callbacks to
1005  *    cpuset_update_task_memory_state() from nested page allocations
1006  *    won't see any mismatch of our cpuset and task mems_generation
1007  *    values, so won't overwrite our hacked tasks mems_allowed
1008  *    nodemask.
1009  */
1010 
cpuset_migrate_mm(struct mm_struct * mm,const nodemask_t * from,const nodemask_t * to)1011 static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
1012 							const nodemask_t *to)
1013 {
1014 	struct task_struct *tsk = current;
1015 
1016 	cpuset_update_task_memory_state();
1017 
1018 	mutex_lock(&callback_mutex);
1019 	tsk->mems_allowed = *to;
1020 	mutex_unlock(&callback_mutex);
1021 
1022 	do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
1023 
1024 	mutex_lock(&callback_mutex);
1025 	guarantee_online_mems(task_cs(tsk),&tsk->mems_allowed);
1026 	mutex_unlock(&callback_mutex);
1027 }
1028 
1029 static void *cpuset_being_rebound;
1030 
1031 /**
1032  * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
1033  * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
1034  * @oldmem: old mems_allowed of cpuset cs
1035  *
1036  * Called with cgroup_mutex held
1037  * Return 0 if successful, -errno if not.
1038  */
update_tasks_nodemask(struct cpuset * cs,const nodemask_t * oldmem)1039 static int update_tasks_nodemask(struct cpuset *cs, const nodemask_t *oldmem)
1040 {
1041 	struct task_struct *p;
1042 	struct mm_struct **mmarray;
1043 	int i, n, ntasks;
1044 	int migrate;
1045 	int fudge;
1046 	struct cgroup_iter it;
1047 	int retval;
1048 
1049 	cpuset_being_rebound = cs;		/* causes mpol_dup() rebind */
1050 
1051 	fudge = 10;				/* spare mmarray[] slots */
1052 	fudge += cpumask_weight(cs->cpus_allowed);/* imagine 1 fork-bomb/cpu */
1053 	retval = -ENOMEM;
1054 
1055 	/*
1056 	 * Allocate mmarray[] to hold mm reference for each task
1057 	 * in cpuset cs.  Can't kmalloc GFP_KERNEL while holding
1058 	 * tasklist_lock.  We could use GFP_ATOMIC, but with a
1059 	 * few more lines of code, we can retry until we get a big
1060 	 * enough mmarray[] w/o using GFP_ATOMIC.
1061 	 */
1062 	while (1) {
1063 		ntasks = cgroup_task_count(cs->css.cgroup);  /* guess */
1064 		ntasks += fudge;
1065 		mmarray = kmalloc(ntasks * sizeof(*mmarray), GFP_KERNEL);
1066 		if (!mmarray)
1067 			goto done;
1068 		read_lock(&tasklist_lock);		/* block fork */
1069 		if (cgroup_task_count(cs->css.cgroup) <= ntasks)
1070 			break;				/* got enough */
1071 		read_unlock(&tasklist_lock);		/* try again */
1072 		kfree(mmarray);
1073 	}
1074 
1075 	n = 0;
1076 
1077 	/* Load up mmarray[] with mm reference for each task in cpuset. */
1078 	cgroup_iter_start(cs->css.cgroup, &it);
1079 	while ((p = cgroup_iter_next(cs->css.cgroup, &it))) {
1080 		struct mm_struct *mm;
1081 
1082 		if (n >= ntasks) {
1083 			printk(KERN_WARNING
1084 				"Cpuset mempolicy rebind incomplete.\n");
1085 			break;
1086 		}
1087 		mm = get_task_mm(p);
1088 		if (!mm)
1089 			continue;
1090 		mmarray[n++] = mm;
1091 	}
1092 	cgroup_iter_end(cs->css.cgroup, &it);
1093 	read_unlock(&tasklist_lock);
1094 
1095 	/*
1096 	 * Now that we've dropped the tasklist spinlock, we can
1097 	 * rebind the vma mempolicies of each mm in mmarray[] to their
1098 	 * new cpuset, and release that mm.  The mpol_rebind_mm()
1099 	 * call takes mmap_sem, which we couldn't take while holding
1100 	 * tasklist_lock.  Forks can happen again now - the mpol_dup()
1101 	 * cpuset_being_rebound check will catch such forks, and rebind
1102 	 * their vma mempolicies too.  Because we still hold the global
1103 	 * cgroup_mutex, we know that no other rebind effort will
1104 	 * be contending for the global variable cpuset_being_rebound.
1105 	 * It's ok if we rebind the same mm twice; mpol_rebind_mm()
1106 	 * is idempotent.  Also migrate pages in each mm to new nodes.
1107 	 */
1108 	migrate = is_memory_migrate(cs);
1109 	for (i = 0; i < n; i++) {
1110 		struct mm_struct *mm = mmarray[i];
1111 
1112 		mpol_rebind_mm(mm, &cs->mems_allowed);
1113 		if (migrate)
1114 			cpuset_migrate_mm(mm, oldmem, &cs->mems_allowed);
1115 		mmput(mm);
1116 	}
1117 
1118 	/* We're done rebinding vmas to this cpuset's new mems_allowed. */
1119 	kfree(mmarray);
1120 	cpuset_being_rebound = NULL;
1121 	retval = 0;
1122 done:
1123 	return retval;
1124 }
1125 
1126 /*
1127  * Handle user request to change the 'mems' memory placement
1128  * of a cpuset.  Needs to validate the request, update the
1129  * cpusets mems_allowed and mems_generation, and for each
1130  * task in the cpuset, rebind any vma mempolicies and if
1131  * the cpuset is marked 'memory_migrate', migrate the tasks
1132  * pages to the new memory.
1133  *
1134  * Call with cgroup_mutex held.  May take callback_mutex during call.
1135  * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
1136  * lock each such tasks mm->mmap_sem, scan its vma's and rebind
1137  * their mempolicies to the cpusets new mems_allowed.
1138  */
update_nodemask(struct cpuset * cs,struct cpuset * trialcs,const char * buf)1139 static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
1140 			   const char *buf)
1141 {
1142 	nodemask_t oldmem;
1143 	int retval;
1144 
1145 	/*
1146 	 * top_cpuset.mems_allowed tracks node_stats[N_HIGH_MEMORY];
1147 	 * it's read-only
1148 	 */
1149 	if (cs == &top_cpuset)
1150 		return -EACCES;
1151 
1152 	/*
1153 	 * An empty mems_allowed is ok iff there are no tasks in the cpuset.
1154 	 * Since nodelist_parse() fails on an empty mask, we special case
1155 	 * that parsing.  The validate_change() call ensures that cpusets
1156 	 * with tasks have memory.
1157 	 */
1158 	if (!*buf) {
1159 		nodes_clear(trialcs->mems_allowed);
1160 	} else {
1161 		retval = nodelist_parse(buf, trialcs->mems_allowed);
1162 		if (retval < 0)
1163 			goto done;
1164 
1165 		if (!nodes_subset(trialcs->mems_allowed,
1166 				node_states[N_HIGH_MEMORY]))
1167 			return -EINVAL;
1168 	}
1169 	oldmem = cs->mems_allowed;
1170 	if (nodes_equal(oldmem, trialcs->mems_allowed)) {
1171 		retval = 0;		/* Too easy - nothing to do */
1172 		goto done;
1173 	}
1174 	retval = validate_change(cs, trialcs);
1175 	if (retval < 0)
1176 		goto done;
1177 
1178 	mutex_lock(&callback_mutex);
1179 	cs->mems_allowed = trialcs->mems_allowed;
1180 	cs->mems_generation = cpuset_mems_generation++;
1181 	mutex_unlock(&callback_mutex);
1182 
1183 	retval = update_tasks_nodemask(cs, &oldmem);
1184 done:
1185 	return retval;
1186 }
1187 
current_cpuset_is_being_rebound(void)1188 int current_cpuset_is_being_rebound(void)
1189 {
1190 	return task_cs(current) == cpuset_being_rebound;
1191 }
1192 
update_relax_domain_level(struct cpuset * cs,s64 val)1193 static int update_relax_domain_level(struct cpuset *cs, s64 val)
1194 {
1195 	if (val < -1 || val >= SD_LV_MAX)
1196 		return -EINVAL;
1197 
1198 	if (val != cs->relax_domain_level) {
1199 		cs->relax_domain_level = val;
1200 		if (!cpumask_empty(cs->cpus_allowed) &&
1201 		    is_sched_load_balance(cs))
1202 			async_rebuild_sched_domains();
1203 	}
1204 
1205 	return 0;
1206 }
1207 
1208 /*
1209  * update_flag - read a 0 or a 1 in a file and update associated flag
1210  * bit:		the bit to update (see cpuset_flagbits_t)
1211  * cs:		the cpuset to update
1212  * turning_on: 	whether the flag is being set or cleared
1213  *
1214  * Call with cgroup_mutex held.
1215  */
1216 
update_flag(cpuset_flagbits_t bit,struct cpuset * cs,int turning_on)1217 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
1218 		       int turning_on)
1219 {
1220 	struct cpuset *trialcs;
1221 	int err;
1222 	int balance_flag_changed;
1223 
1224 	trialcs = alloc_trial_cpuset(cs);
1225 	if (!trialcs)
1226 		return -ENOMEM;
1227 
1228 	if (turning_on)
1229 		set_bit(bit, &trialcs->flags);
1230 	else
1231 		clear_bit(bit, &trialcs->flags);
1232 
1233 	err = validate_change(cs, trialcs);
1234 	if (err < 0)
1235 		goto out;
1236 
1237 	balance_flag_changed = (is_sched_load_balance(cs) !=
1238 				is_sched_load_balance(trialcs));
1239 
1240 	mutex_lock(&callback_mutex);
1241 	cs->flags = trialcs->flags;
1242 	mutex_unlock(&callback_mutex);
1243 
1244 	if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
1245 		async_rebuild_sched_domains();
1246 
1247 out:
1248 	free_trial_cpuset(trialcs);
1249 	return err;
1250 }
1251 
1252 /*
1253  * Frequency meter - How fast is some event occurring?
1254  *
1255  * These routines manage a digitally filtered, constant time based,
1256  * event frequency meter.  There are four routines:
1257  *   fmeter_init() - initialize a frequency meter.
1258  *   fmeter_markevent() - called each time the event happens.
1259  *   fmeter_getrate() - returns the recent rate of such events.
1260  *   fmeter_update() - internal routine used to update fmeter.
1261  *
1262  * A common data structure is passed to each of these routines,
1263  * which is used to keep track of the state required to manage the
1264  * frequency meter and its digital filter.
1265  *
1266  * The filter works on the number of events marked per unit time.
1267  * The filter is single-pole low-pass recursive (IIR).  The time unit
1268  * is 1 second.  Arithmetic is done using 32-bit integers scaled to
1269  * simulate 3 decimal digits of precision (multiplied by 1000).
1270  *
1271  * With an FM_COEF of 933, and a time base of 1 second, the filter
1272  * has a half-life of 10 seconds, meaning that if the events quit
1273  * happening, then the rate returned from the fmeter_getrate()
1274  * will be cut in half each 10 seconds, until it converges to zero.
1275  *
1276  * It is not worth doing a real infinitely recursive filter.  If more
1277  * than FM_MAXTICKS ticks have elapsed since the last filter event,
1278  * just compute FM_MAXTICKS ticks worth, by which point the level
1279  * will be stable.
1280  *
1281  * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid
1282  * arithmetic overflow in the fmeter_update() routine.
1283  *
1284  * Given the simple 32 bit integer arithmetic used, this meter works
1285  * best for reporting rates between one per millisecond (msec) and
1286  * one per 32 (approx) seconds.  At constant rates faster than one
1287  * per msec it maxes out at values just under 1,000,000.  At constant
1288  * rates between one per msec, and one per second it will stabilize
1289  * to a value N*1000, where N is the rate of events per second.
1290  * At constant rates between one per second and one per 32 seconds,
1291  * it will be choppy, moving up on the seconds that have an event,
1292  * and then decaying until the next event.  At rates slower than
1293  * about one in 32 seconds, it decays all the way back to zero between
1294  * each event.
1295  */
1296 
1297 #define FM_COEF 933		/* coefficient for half-life of 10 secs */
1298 #define FM_MAXTICKS ((time_t)99) /* useless computing more ticks than this */
1299 #define FM_MAXCNT 1000000	/* limit cnt to avoid overflow */
1300 #define FM_SCALE 1000		/* faux fixed point scale */
1301 
1302 /* Initialize a frequency meter */
fmeter_init(struct fmeter * fmp)1303 static void fmeter_init(struct fmeter *fmp)
1304 {
1305 	fmp->cnt = 0;
1306 	fmp->val = 0;
1307 	fmp->time = 0;
1308 	spin_lock_init(&fmp->lock);
1309 }
1310 
1311 /* Internal meter update - process cnt events and update value */
fmeter_update(struct fmeter * fmp)1312 static void fmeter_update(struct fmeter *fmp)
1313 {
1314 	time_t now = get_seconds();
1315 	time_t ticks = now - fmp->time;
1316 
1317 	if (ticks == 0)
1318 		return;
1319 
1320 	ticks = min(FM_MAXTICKS, ticks);
1321 	while (ticks-- > 0)
1322 		fmp->val = (FM_COEF * fmp->val) / FM_SCALE;
1323 	fmp->time = now;
1324 
1325 	fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE;
1326 	fmp->cnt = 0;
1327 }
1328 
1329 /* Process any previous ticks, then bump cnt by one (times scale). */
fmeter_markevent(struct fmeter * fmp)1330 static void fmeter_markevent(struct fmeter *fmp)
1331 {
1332 	spin_lock(&fmp->lock);
1333 	fmeter_update(fmp);
1334 	fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE);
1335 	spin_unlock(&fmp->lock);
1336 }
1337 
1338 /* Process any previous ticks, then return current value. */
fmeter_getrate(struct fmeter * fmp)1339 static int fmeter_getrate(struct fmeter *fmp)
1340 {
1341 	int val;
1342 
1343 	spin_lock(&fmp->lock);
1344 	fmeter_update(fmp);
1345 	val = fmp->val;
1346 	spin_unlock(&fmp->lock);
1347 	return val;
1348 }
1349 
1350 /* Protected by cgroup_lock */
1351 static cpumask_var_t cpus_attach;
1352 
1353 /* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */
cpuset_can_attach(struct cgroup_subsys * ss,struct cgroup * cont,struct task_struct * tsk)1354 static int cpuset_can_attach(struct cgroup_subsys *ss,
1355 			     struct cgroup *cont, struct task_struct *tsk)
1356 {
1357 	struct cpuset *cs = cgroup_cs(cont);
1358 	int ret = 0;
1359 
1360 	if ((current != task) && (!capable(CAP_SYS_ADMIN))) {
1361 		const struct cred *cred = current_cred(), *tcred;
1362 
1363 		if (cred->euid != tcred->uid && cred->euid != tcred->suid)
1364 			return -EPERM;
1365 	}
1366 
1367 	if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
1368 		return -ENOSPC;
1369 
1370 	if (tsk->flags & PF_THREAD_BOUND) {
1371 		mutex_lock(&callback_mutex);
1372 		if (!cpumask_equal(&tsk->cpus_allowed, cs->cpus_allowed))
1373 			ret = -EINVAL;
1374 		mutex_unlock(&callback_mutex);
1375 	}
1376 
1377 	return ret < 0 ? ret : security_task_setscheduler(tsk, 0, NULL);
1378 }
1379 
cpuset_attach(struct cgroup_subsys * ss,struct cgroup * cont,struct cgroup * oldcont,struct task_struct * tsk)1380 static void cpuset_attach(struct cgroup_subsys *ss,
1381 			  struct cgroup *cont, struct cgroup *oldcont,
1382 			  struct task_struct *tsk)
1383 {
1384 	nodemask_t from, to;
1385 	struct mm_struct *mm;
1386 	struct cpuset *cs = cgroup_cs(cont);
1387 	struct cpuset *oldcs = cgroup_cs(oldcont);
1388 	int err;
1389 
1390 	if (cs == &top_cpuset) {
1391 		cpumask_copy(cpus_attach, cpu_possible_mask);
1392 	} else {
1393 		mutex_lock(&callback_mutex);
1394 		guarantee_online_cpus(cs, cpus_attach);
1395 		mutex_unlock(&callback_mutex);
1396 	}
1397 	err = set_cpus_allowed_ptr(tsk, cpus_attach);
1398 	if (err)
1399 		return;
1400 
1401 	from = oldcs->mems_allowed;
1402 	to = cs->mems_allowed;
1403 	mm = get_task_mm(tsk);
1404 	if (mm) {
1405 		mpol_rebind_mm(mm, &to);
1406 		if (is_memory_migrate(cs))
1407 			cpuset_migrate_mm(mm, &from, &to);
1408 		mmput(mm);
1409 	}
1410 }
1411 
1412 /* The various types of files and directories in a cpuset file system */
1413 
1414 typedef enum {
1415 	FILE_MEMORY_MIGRATE,
1416 	FILE_CPULIST,
1417 	FILE_MEMLIST,
1418 	FILE_CPU_EXCLUSIVE,
1419 	FILE_MEM_EXCLUSIVE,
1420 	FILE_MEM_HARDWALL,
1421 	FILE_SCHED_LOAD_BALANCE,
1422 	FILE_SCHED_RELAX_DOMAIN_LEVEL,
1423 	FILE_MEMORY_PRESSURE_ENABLED,
1424 	FILE_MEMORY_PRESSURE,
1425 	FILE_SPREAD_PAGE,
1426 	FILE_SPREAD_SLAB,
1427 } cpuset_filetype_t;
1428 
cpuset_write_u64(struct cgroup * cgrp,struct cftype * cft,u64 val)1429 static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
1430 {
1431 	int retval = 0;
1432 	struct cpuset *cs = cgroup_cs(cgrp);
1433 	cpuset_filetype_t type = cft->private;
1434 
1435 	if (!cgroup_lock_live_group(cgrp))
1436 		return -ENODEV;
1437 
1438 	switch (type) {
1439 	case FILE_CPU_EXCLUSIVE:
1440 		retval = update_flag(CS_CPU_EXCLUSIVE, cs, val);
1441 		break;
1442 	case FILE_MEM_EXCLUSIVE:
1443 		retval = update_flag(CS_MEM_EXCLUSIVE, cs, val);
1444 		break;
1445 	case FILE_MEM_HARDWALL:
1446 		retval = update_flag(CS_MEM_HARDWALL, cs, val);
1447 		break;
1448 	case FILE_SCHED_LOAD_BALANCE:
1449 		retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val);
1450 		break;
1451 	case FILE_MEMORY_MIGRATE:
1452 		retval = update_flag(CS_MEMORY_MIGRATE, cs, val);
1453 		break;
1454 	case FILE_MEMORY_PRESSURE_ENABLED:
1455 		cpuset_memory_pressure_enabled = !!val;
1456 		break;
1457 	case FILE_MEMORY_PRESSURE:
1458 		retval = -EACCES;
1459 		break;
1460 	case FILE_SPREAD_PAGE:
1461 		retval = update_flag(CS_SPREAD_PAGE, cs, val);
1462 		cs->mems_generation = cpuset_mems_generation++;
1463 		break;
1464 	case FILE_SPREAD_SLAB:
1465 		retval = update_flag(CS_SPREAD_SLAB, cs, val);
1466 		cs->mems_generation = cpuset_mems_generation++;
1467 		break;
1468 	default:
1469 		retval = -EINVAL;
1470 		break;
1471 	}
1472 	cgroup_unlock();
1473 	return retval;
1474 }
1475 
cpuset_write_s64(struct cgroup * cgrp,struct cftype * cft,s64 val)1476 static int cpuset_write_s64(struct cgroup *cgrp, struct cftype *cft, s64 val)
1477 {
1478 	int retval = 0;
1479 	struct cpuset *cs = cgroup_cs(cgrp);
1480 	cpuset_filetype_t type = cft->private;
1481 
1482 	if (!cgroup_lock_live_group(cgrp))
1483 		return -ENODEV;
1484 
1485 	switch (type) {
1486 	case FILE_SCHED_RELAX_DOMAIN_LEVEL:
1487 		retval = update_relax_domain_level(cs, val);
1488 		break;
1489 	default:
1490 		retval = -EINVAL;
1491 		break;
1492 	}
1493 	cgroup_unlock();
1494 	return retval;
1495 }
1496 
1497 /*
1498  * Common handling for a write to a "cpus" or "mems" file.
1499  */
cpuset_write_resmask(struct cgroup * cgrp,struct cftype * cft,const char * buf)1500 static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft,
1501 				const char *buf)
1502 {
1503 	int retval = 0;
1504 	struct cpuset *cs = cgroup_cs(cgrp);
1505 	struct cpuset *trialcs;
1506 
1507 	if (!cgroup_lock_live_group(cgrp))
1508 		return -ENODEV;
1509 
1510 	trialcs = alloc_trial_cpuset(cs);
1511 	if (!trialcs)
1512 		return -ENOMEM;
1513 
1514 	switch (cft->private) {
1515 	case FILE_CPULIST:
1516 		retval = update_cpumask(cs, trialcs, buf);
1517 		break;
1518 	case FILE_MEMLIST:
1519 		retval = update_nodemask(cs, trialcs, buf);
1520 		break;
1521 	default:
1522 		retval = -EINVAL;
1523 		break;
1524 	}
1525 
1526 	free_trial_cpuset(trialcs);
1527 	cgroup_unlock();
1528 	return retval;
1529 }
1530 
1531 /*
1532  * These ascii lists should be read in a single call, by using a user
1533  * buffer large enough to hold the entire map.  If read in smaller
1534  * chunks, there is no guarantee of atomicity.  Since the display format
1535  * used, list of ranges of sequential numbers, is variable length,
1536  * and since these maps can change value dynamically, one could read
1537  * gibberish by doing partial reads while a list was changing.
1538  * A single large read to a buffer that crosses a page boundary is
1539  * ok, because the result being copied to user land is not recomputed
1540  * across a page fault.
1541  */
1542 
cpuset_sprintf_cpulist(char * page,struct cpuset * cs)1543 static int cpuset_sprintf_cpulist(char *page, struct cpuset *cs)
1544 {
1545 	int ret;
1546 
1547 	mutex_lock(&callback_mutex);
1548 	ret = cpulist_scnprintf(page, PAGE_SIZE, cs->cpus_allowed);
1549 	mutex_unlock(&callback_mutex);
1550 
1551 	return ret;
1552 }
1553 
cpuset_sprintf_memlist(char * page,struct cpuset * cs)1554 static int cpuset_sprintf_memlist(char *page, struct cpuset *cs)
1555 {
1556 	nodemask_t mask;
1557 
1558 	mutex_lock(&callback_mutex);
1559 	mask = cs->mems_allowed;
1560 	mutex_unlock(&callback_mutex);
1561 
1562 	return nodelist_scnprintf(page, PAGE_SIZE, mask);
1563 }
1564 
cpuset_common_file_read(struct cgroup * cont,struct cftype * cft,struct file * file,char __user * buf,size_t nbytes,loff_t * ppos)1565 static ssize_t cpuset_common_file_read(struct cgroup *cont,
1566 				       struct cftype *cft,
1567 				       struct file *file,
1568 				       char __user *buf,
1569 				       size_t nbytes, loff_t *ppos)
1570 {
1571 	struct cpuset *cs = cgroup_cs(cont);
1572 	cpuset_filetype_t type = cft->private;
1573 	char *page;
1574 	ssize_t retval = 0;
1575 	char *s;
1576 
1577 	if (!(page = (char *)__get_free_page(GFP_TEMPORARY)))
1578 		return -ENOMEM;
1579 
1580 	s = page;
1581 
1582 	switch (type) {
1583 	case FILE_CPULIST:
1584 		s += cpuset_sprintf_cpulist(s, cs);
1585 		break;
1586 	case FILE_MEMLIST:
1587 		s += cpuset_sprintf_memlist(s, cs);
1588 		break;
1589 	default:
1590 		retval = -EINVAL;
1591 		goto out;
1592 	}
1593 	*s++ = '\n';
1594 
1595 	retval = simple_read_from_buffer(buf, nbytes, ppos, page, s - page);
1596 out:
1597 	free_page((unsigned long)page);
1598 	return retval;
1599 }
1600 
cpuset_read_u64(struct cgroup * cont,struct cftype * cft)1601 static u64 cpuset_read_u64(struct cgroup *cont, struct cftype *cft)
1602 {
1603 	struct cpuset *cs = cgroup_cs(cont);
1604 	cpuset_filetype_t type = cft->private;
1605 	switch (type) {
1606 	case FILE_CPU_EXCLUSIVE:
1607 		return is_cpu_exclusive(cs);
1608 	case FILE_MEM_EXCLUSIVE:
1609 		return is_mem_exclusive(cs);
1610 	case FILE_MEM_HARDWALL:
1611 		return is_mem_hardwall(cs);
1612 	case FILE_SCHED_LOAD_BALANCE:
1613 		return is_sched_load_balance(cs);
1614 	case FILE_MEMORY_MIGRATE:
1615 		return is_memory_migrate(cs);
1616 	case FILE_MEMORY_PRESSURE_ENABLED:
1617 		return cpuset_memory_pressure_enabled;
1618 	case FILE_MEMORY_PRESSURE:
1619 		return fmeter_getrate(&cs->fmeter);
1620 	case FILE_SPREAD_PAGE:
1621 		return is_spread_page(cs);
1622 	case FILE_SPREAD_SLAB:
1623 		return is_spread_slab(cs);
1624 	default:
1625 		BUG();
1626 	}
1627 
1628 	/* Unreachable but makes gcc happy */
1629 	return 0;
1630 }
1631 
cpuset_read_s64(struct cgroup * cont,struct cftype * cft)1632 static s64 cpuset_read_s64(struct cgroup *cont, struct cftype *cft)
1633 {
1634 	struct cpuset *cs = cgroup_cs(cont);
1635 	cpuset_filetype_t type = cft->private;
1636 	switch (type) {
1637 	case FILE_SCHED_RELAX_DOMAIN_LEVEL:
1638 		return cs->relax_domain_level;
1639 	default:
1640 		BUG();
1641 	}
1642 
1643 	/* Unrechable but makes gcc happy */
1644 	return 0;
1645 }
1646 
1647 
1648 /*
1649  * for the common functions, 'private' gives the type of file
1650  */
1651 
1652 static struct cftype files[] = {
1653 	{
1654 		.name = "cpus",
1655 		.read = cpuset_common_file_read,
1656 		.write_string = cpuset_write_resmask,
1657 		.max_write_len = (100U + 6 * NR_CPUS),
1658 		.private = FILE_CPULIST,
1659 	},
1660 
1661 	{
1662 		.name = "mems",
1663 		.read = cpuset_common_file_read,
1664 		.write_string = cpuset_write_resmask,
1665 		.max_write_len = (100U + 6 * MAX_NUMNODES),
1666 		.private = FILE_MEMLIST,
1667 	},
1668 
1669 	{
1670 		.name = "cpu_exclusive",
1671 		.read_u64 = cpuset_read_u64,
1672 		.write_u64 = cpuset_write_u64,
1673 		.private = FILE_CPU_EXCLUSIVE,
1674 	},
1675 
1676 	{
1677 		.name = "mem_exclusive",
1678 		.read_u64 = cpuset_read_u64,
1679 		.write_u64 = cpuset_write_u64,
1680 		.private = FILE_MEM_EXCLUSIVE,
1681 	},
1682 
1683 	{
1684 		.name = "mem_hardwall",
1685 		.read_u64 = cpuset_read_u64,
1686 		.write_u64 = cpuset_write_u64,
1687 		.private = FILE_MEM_HARDWALL,
1688 	},
1689 
1690 	{
1691 		.name = "sched_load_balance",
1692 		.read_u64 = cpuset_read_u64,
1693 		.write_u64 = cpuset_write_u64,
1694 		.private = FILE_SCHED_LOAD_BALANCE,
1695 	},
1696 
1697 	{
1698 		.name = "sched_relax_domain_level",
1699 		.read_s64 = cpuset_read_s64,
1700 		.write_s64 = cpuset_write_s64,
1701 		.private = FILE_SCHED_RELAX_DOMAIN_LEVEL,
1702 	},
1703 
1704 	{
1705 		.name = "memory_migrate",
1706 		.read_u64 = cpuset_read_u64,
1707 		.write_u64 = cpuset_write_u64,
1708 		.private = FILE_MEMORY_MIGRATE,
1709 	},
1710 
1711 	{
1712 		.name = "memory_pressure",
1713 		.read_u64 = cpuset_read_u64,
1714 		.write_u64 = cpuset_write_u64,
1715 		.private = FILE_MEMORY_PRESSURE,
1716 	},
1717 
1718 	{
1719 		.name = "memory_spread_page",
1720 		.read_u64 = cpuset_read_u64,
1721 		.write_u64 = cpuset_write_u64,
1722 		.private = FILE_SPREAD_PAGE,
1723 	},
1724 
1725 	{
1726 		.name = "memory_spread_slab",
1727 		.read_u64 = cpuset_read_u64,
1728 		.write_u64 = cpuset_write_u64,
1729 		.private = FILE_SPREAD_SLAB,
1730 	},
1731 };
1732 
1733 static struct cftype cft_memory_pressure_enabled = {
1734 	.name = "memory_pressure_enabled",
1735 	.read_u64 = cpuset_read_u64,
1736 	.write_u64 = cpuset_write_u64,
1737 	.private = FILE_MEMORY_PRESSURE_ENABLED,
1738 };
1739 
cpuset_populate(struct cgroup_subsys * ss,struct cgroup * cont)1740 static int cpuset_populate(struct cgroup_subsys *ss, struct cgroup *cont)
1741 {
1742 	int err;
1743 
1744 	err = cgroup_add_files(cont, ss, files, ARRAY_SIZE(files));
1745 	if (err)
1746 		return err;
1747 	/* memory_pressure_enabled is in root cpuset only */
1748 	if (!cont->parent)
1749 		err = cgroup_add_file(cont, ss,
1750 				      &cft_memory_pressure_enabled);
1751 	return err;
1752 }
1753 
1754 /*
1755  * post_clone() is called at the end of cgroup_clone().
1756  * 'cgroup' was just created automatically as a result of
1757  * a cgroup_clone(), and the current task is about to
1758  * be moved into 'cgroup'.
1759  *
1760  * Currently we refuse to set up the cgroup - thereby
1761  * refusing the task to be entered, and as a result refusing
1762  * the sys_unshare() or clone() which initiated it - if any
1763  * sibling cpusets have exclusive cpus or mem.
1764  *
1765  * If this becomes a problem for some users who wish to
1766  * allow that scenario, then cpuset_post_clone() could be
1767  * changed to grant parent->cpus_allowed-sibling_cpus_exclusive
1768  * (and likewise for mems) to the new cgroup. Called with cgroup_mutex
1769  * held.
1770  */
cpuset_post_clone(struct cgroup_subsys * ss,struct cgroup * cgroup)1771 static void cpuset_post_clone(struct cgroup_subsys *ss,
1772 			      struct cgroup *cgroup)
1773 {
1774 	struct cgroup *parent, *child;
1775 	struct cpuset *cs, *parent_cs;
1776 
1777 	parent = cgroup->parent;
1778 	list_for_each_entry(child, &parent->children, sibling) {
1779 		cs = cgroup_cs(child);
1780 		if (is_mem_exclusive(cs) || is_cpu_exclusive(cs))
1781 			return;
1782 	}
1783 	cs = cgroup_cs(cgroup);
1784 	parent_cs = cgroup_cs(parent);
1785 
1786 	cs->mems_allowed = parent_cs->mems_allowed;
1787 	cpumask_copy(cs->cpus_allowed, parent_cs->cpus_allowed);
1788 	return;
1789 }
1790 
1791 /*
1792  *	cpuset_create - create a cpuset
1793  *	ss:	cpuset cgroup subsystem
1794  *	cont:	control group that the new cpuset will be part of
1795  */
1796 
cpuset_create(struct cgroup_subsys * ss,struct cgroup * cont)1797 static struct cgroup_subsys_state *cpuset_create(
1798 	struct cgroup_subsys *ss,
1799 	struct cgroup *cont)
1800 {
1801 	struct cpuset *cs;
1802 	struct cpuset *parent;
1803 
1804 	if (!cont->parent) {
1805 		/* This is early initialization for the top cgroup */
1806 		top_cpuset.mems_generation = cpuset_mems_generation++;
1807 		return &top_cpuset.css;
1808 	}
1809 	parent = cgroup_cs(cont->parent);
1810 	cs = kmalloc(sizeof(*cs), GFP_KERNEL);
1811 	if (!cs)
1812 		return ERR_PTR(-ENOMEM);
1813 	if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL)) {
1814 		kfree(cs);
1815 		return ERR_PTR(-ENOMEM);
1816 	}
1817 
1818 	cpuset_update_task_memory_state();
1819 	cs->flags = 0;
1820 	if (is_spread_page(parent))
1821 		set_bit(CS_SPREAD_PAGE, &cs->flags);
1822 	if (is_spread_slab(parent))
1823 		set_bit(CS_SPREAD_SLAB, &cs->flags);
1824 	set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
1825 	cpumask_clear(cs->cpus_allowed);
1826 	nodes_clear(cs->mems_allowed);
1827 	cs->mems_generation = cpuset_mems_generation++;
1828 	fmeter_init(&cs->fmeter);
1829 	cs->relax_domain_level = -1;
1830 
1831 	cs->parent = parent;
1832 	number_of_cpusets++;
1833 	return &cs->css ;
1834 }
1835 
1836 /*
1837  * If the cpuset being removed has its flag 'sched_load_balance'
1838  * enabled, then simulate turning sched_load_balance off, which
1839  * will call async_rebuild_sched_domains().
1840  */
1841 
cpuset_destroy(struct cgroup_subsys * ss,struct cgroup * cont)1842 static void cpuset_destroy(struct cgroup_subsys *ss, struct cgroup *cont)
1843 {
1844 	struct cpuset *cs = cgroup_cs(cont);
1845 
1846 	cpuset_update_task_memory_state();
1847 
1848 	if (is_sched_load_balance(cs))
1849 		update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
1850 
1851 	number_of_cpusets--;
1852 	free_cpumask_var(cs->cpus_allowed);
1853 	kfree(cs);
1854 }
1855 
1856 struct cgroup_subsys cpuset_subsys = {
1857 	.name = "cpuset",
1858 	.create = cpuset_create,
1859 	.destroy = cpuset_destroy,
1860 	.can_attach = cpuset_can_attach,
1861 	.attach = cpuset_attach,
1862 	.populate = cpuset_populate,
1863 	.post_clone = cpuset_post_clone,
1864 	.subsys_id = cpuset_subsys_id,
1865 	.early_init = 1,
1866 };
1867 
1868 /*
1869  * cpuset_init_early - just enough so that the calls to
1870  * cpuset_update_task_memory_state() in early init code
1871  * are harmless.
1872  */
1873 
cpuset_init_early(void)1874 int __init cpuset_init_early(void)
1875 {
1876 	alloc_bootmem_cpumask_var(&top_cpuset.cpus_allowed);
1877 
1878 	top_cpuset.mems_generation = cpuset_mems_generation++;
1879 	return 0;
1880 }
1881 
1882 
1883 /**
1884  * cpuset_init - initialize cpusets at system boot
1885  *
1886  * Description: Initialize top_cpuset and the cpuset internal file system,
1887  **/
1888 
cpuset_init(void)1889 int __init cpuset_init(void)
1890 {
1891 	int err = 0;
1892 
1893 	cpumask_setall(top_cpuset.cpus_allowed);
1894 	nodes_setall(top_cpuset.mems_allowed);
1895 
1896 	fmeter_init(&top_cpuset.fmeter);
1897 	top_cpuset.mems_generation = cpuset_mems_generation++;
1898 	set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags);
1899 	top_cpuset.relax_domain_level = -1;
1900 
1901 	err = register_filesystem(&cpuset_fs_type);
1902 	if (err < 0)
1903 		return err;
1904 
1905 	if (!alloc_cpumask_var(&cpus_attach, GFP_KERNEL))
1906 		BUG();
1907 
1908 	number_of_cpusets = 1;
1909 	return 0;
1910 }
1911 
1912 /**
1913  * cpuset_do_move_task - move a given task to another cpuset
1914  * @tsk: pointer to task_struct the task to move
1915  * @scan: struct cgroup_scanner contained in its struct cpuset_hotplug_scanner
1916  *
1917  * Called by cgroup_scan_tasks() for each task in a cgroup.
1918  * Return nonzero to stop the walk through the tasks.
1919  */
cpuset_do_move_task(struct task_struct * tsk,struct cgroup_scanner * scan)1920 static void cpuset_do_move_task(struct task_struct *tsk,
1921 				struct cgroup_scanner *scan)
1922 {
1923 	struct cpuset_hotplug_scanner *chsp;
1924 
1925 	chsp = container_of(scan, struct cpuset_hotplug_scanner, scan);
1926 	cgroup_attach_task(chsp->to, tsk);
1927 }
1928 
1929 /**
1930  * move_member_tasks_to_cpuset - move tasks from one cpuset to another
1931  * @from: cpuset in which the tasks currently reside
1932  * @to: cpuset to which the tasks will be moved
1933  *
1934  * Called with cgroup_mutex held
1935  * callback_mutex must not be held, as cpuset_attach() will take it.
1936  *
1937  * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
1938  * calling callback functions for each.
1939  */
move_member_tasks_to_cpuset(struct cpuset * from,struct cpuset * to)1940 static void move_member_tasks_to_cpuset(struct cpuset *from, struct cpuset *to)
1941 {
1942 	struct cpuset_hotplug_scanner scan;
1943 
1944 	scan.scan.cg = from->css.cgroup;
1945 	scan.scan.test_task = NULL; /* select all tasks in cgroup */
1946 	scan.scan.process_task = cpuset_do_move_task;
1947 	scan.scan.heap = NULL;
1948 	scan.to = to->css.cgroup;
1949 
1950 	if (cgroup_scan_tasks(&scan.scan))
1951 		printk(KERN_ERR "move_member_tasks_to_cpuset: "
1952 				"cgroup_scan_tasks failed\n");
1953 }
1954 
1955 /*
1956  * If CPU and/or memory hotplug handlers, below, unplug any CPUs
1957  * or memory nodes, we need to walk over the cpuset hierarchy,
1958  * removing that CPU or node from all cpusets.  If this removes the
1959  * last CPU or node from a cpuset, then move the tasks in the empty
1960  * cpuset to its next-highest non-empty parent.
1961  *
1962  * Called with cgroup_mutex held
1963  * callback_mutex must not be held, as cpuset_attach() will take it.
1964  */
remove_tasks_in_empty_cpuset(struct cpuset * cs)1965 static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
1966 {
1967 	struct cpuset *parent;
1968 
1969 	/*
1970 	 * The cgroup's css_sets list is in use if there are tasks
1971 	 * in the cpuset; the list is empty if there are none;
1972 	 * the cs->css.refcnt seems always 0.
1973 	 */
1974 	if (list_empty(&cs->css.cgroup->css_sets))
1975 		return;
1976 
1977 	/*
1978 	 * Find its next-highest non-empty parent, (top cpuset
1979 	 * has online cpus, so can't be empty).
1980 	 */
1981 	parent = cs->parent;
1982 	while (cpumask_empty(parent->cpus_allowed) ||
1983 			nodes_empty(parent->mems_allowed))
1984 		parent = parent->parent;
1985 
1986 	move_member_tasks_to_cpuset(cs, parent);
1987 }
1988 
1989 /*
1990  * Walk the specified cpuset subtree and look for empty cpusets.
1991  * The tasks of such cpuset must be moved to a parent cpuset.
1992  *
1993  * Called with cgroup_mutex held.  We take callback_mutex to modify
1994  * cpus_allowed and mems_allowed.
1995  *
1996  * This walk processes the tree from top to bottom, completing one layer
1997  * before dropping down to the next.  It always processes a node before
1998  * any of its children.
1999  *
2000  * For now, since we lack memory hot unplug, we'll never see a cpuset
2001  * that has tasks along with an empty 'mems'.  But if we did see such
2002  * a cpuset, we'd handle it just like we do if its 'cpus' was empty.
2003  */
scan_for_empty_cpusets(struct cpuset * root)2004 static void scan_for_empty_cpusets(struct cpuset *root)
2005 {
2006 	LIST_HEAD(queue);
2007 	struct cpuset *cp;	/* scans cpusets being updated */
2008 	struct cpuset *child;	/* scans child cpusets of cp */
2009 	struct cgroup *cont;
2010 	nodemask_t oldmems;
2011 
2012 	list_add_tail((struct list_head *)&root->stack_list, &queue);
2013 
2014 	while (!list_empty(&queue)) {
2015 		cp = list_first_entry(&queue, struct cpuset, stack_list);
2016 		list_del(queue.next);
2017 		list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
2018 			child = cgroup_cs(cont);
2019 			list_add_tail(&child->stack_list, &queue);
2020 		}
2021 
2022 		/* Continue past cpusets with all cpus, mems online */
2023 		if (cpumask_subset(cp->cpus_allowed, cpu_online_mask) &&
2024 		    nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY]))
2025 			continue;
2026 
2027 		oldmems = cp->mems_allowed;
2028 
2029 		/* Remove offline cpus and mems from this cpuset. */
2030 		mutex_lock(&callback_mutex);
2031 		cpumask_and(cp->cpus_allowed, cp->cpus_allowed,
2032 			    cpu_online_mask);
2033 		nodes_and(cp->mems_allowed, cp->mems_allowed,
2034 						node_states[N_HIGH_MEMORY]);
2035 		mutex_unlock(&callback_mutex);
2036 
2037 		/* Move tasks from the empty cpuset to a parent */
2038 		if (cpumask_empty(cp->cpus_allowed) ||
2039 		     nodes_empty(cp->mems_allowed))
2040 			remove_tasks_in_empty_cpuset(cp);
2041 		else {
2042 			update_tasks_cpumask(cp, NULL);
2043 			update_tasks_nodemask(cp, &oldmems);
2044 		}
2045 	}
2046 }
2047 
2048 /*
2049  * The top_cpuset tracks what CPUs and Memory Nodes are online,
2050  * period.  This is necessary in order to make cpusets transparent
2051  * (of no affect) on systems that are actively using CPU hotplug
2052  * but making no active use of cpusets.
2053  *
2054  * This routine ensures that top_cpuset.cpus_allowed tracks
2055  * cpu_online_map on each CPU hotplug (cpuhp) event.
2056  *
2057  * Called within get_online_cpus().  Needs to call cgroup_lock()
2058  * before calling generate_sched_domains().
2059  */
cpuset_track_online_cpus(struct notifier_block * unused_nb,unsigned long phase,void * unused_cpu)2060 static int cpuset_track_online_cpus(struct notifier_block *unused_nb,
2061 				unsigned long phase, void *unused_cpu)
2062 {
2063 	struct sched_domain_attr *attr;
2064 	struct cpumask *doms;
2065 	int ndoms;
2066 
2067 	switch (phase) {
2068 	case CPU_ONLINE:
2069 	case CPU_ONLINE_FROZEN:
2070 	case CPU_DEAD:
2071 	case CPU_DEAD_FROZEN:
2072 		break;
2073 
2074 	default:
2075 		return NOTIFY_DONE;
2076 	}
2077 
2078 	cgroup_lock();
2079 	cpumask_copy(top_cpuset.cpus_allowed, cpu_online_mask);
2080 	scan_for_empty_cpusets(&top_cpuset);
2081 	ndoms = generate_sched_domains(&doms, &attr);
2082 	cgroup_unlock();
2083 
2084 	/* Have scheduler rebuild the domains */
2085 	partition_sched_domains(ndoms, doms, attr);
2086 
2087 	return NOTIFY_OK;
2088 }
2089 
2090 #ifdef CONFIG_MEMORY_HOTPLUG
2091 /*
2092  * Keep top_cpuset.mems_allowed tracking node_states[N_HIGH_MEMORY].
2093  * Call this routine anytime after node_states[N_HIGH_MEMORY] changes.
2094  * See also the previous routine cpuset_track_online_cpus().
2095  */
cpuset_track_online_nodes(struct notifier_block * self,unsigned long action,void * arg)2096 static int cpuset_track_online_nodes(struct notifier_block *self,
2097 				unsigned long action, void *arg)
2098 {
2099 	cgroup_lock();
2100 	switch (action) {
2101 	case MEM_ONLINE:
2102 		top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
2103 		break;
2104 	case MEM_OFFLINE:
2105 		top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
2106 		scan_for_empty_cpusets(&top_cpuset);
2107 		break;
2108 	default:
2109 		break;
2110 	}
2111 	cgroup_unlock();
2112 	return NOTIFY_OK;
2113 }
2114 #endif
2115 
2116 /**
2117  * cpuset_init_smp - initialize cpus_allowed
2118  *
2119  * Description: Finish top cpuset after cpu, node maps are initialized
2120  **/
2121 
cpuset_init_smp(void)2122 void __init cpuset_init_smp(void)
2123 {
2124 	cpumask_copy(top_cpuset.cpus_allowed, cpu_online_mask);
2125 	top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
2126 
2127 	hotcpu_notifier(cpuset_track_online_cpus, 0);
2128 	hotplug_memory_notifier(cpuset_track_online_nodes, 10);
2129 
2130 	cpuset_wq = create_singlethread_workqueue("cpuset");
2131 	BUG_ON(!cpuset_wq);
2132 }
2133 
2134 /**
2135  * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
2136  * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
2137  * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
2138  *
2139  * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
2140  * attached to the specified @tsk.  Guaranteed to return some non-empty
2141  * subset of cpu_online_map, even if this means going outside the
2142  * tasks cpuset.
2143  **/
2144 
cpuset_cpus_allowed(struct task_struct * tsk,struct cpumask * pmask)2145 void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
2146 {
2147 	mutex_lock(&callback_mutex);
2148 	cpuset_cpus_allowed_locked(tsk, pmask);
2149 	mutex_unlock(&callback_mutex);
2150 }
2151 
2152 /**
2153  * cpuset_cpus_allowed_locked - return cpus_allowed mask from a tasks cpuset.
2154  * Must be called with callback_mutex held.
2155  **/
cpuset_cpus_allowed_locked(struct task_struct * tsk,struct cpumask * pmask)2156 void cpuset_cpus_allowed_locked(struct task_struct *tsk, struct cpumask *pmask)
2157 {
2158 	task_lock(tsk);
2159 	guarantee_online_cpus(task_cs(tsk), pmask);
2160 	task_unlock(tsk);
2161 }
2162 
cpuset_init_current_mems_allowed(void)2163 void cpuset_init_current_mems_allowed(void)
2164 {
2165 	nodes_setall(current->mems_allowed);
2166 }
2167 
2168 /**
2169  * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
2170  * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
2171  *
2172  * Description: Returns the nodemask_t mems_allowed of the cpuset
2173  * attached to the specified @tsk.  Guaranteed to return some non-empty
2174  * subset of node_states[N_HIGH_MEMORY], even if this means going outside the
2175  * tasks cpuset.
2176  **/
2177 
cpuset_mems_allowed(struct task_struct * tsk)2178 nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
2179 {
2180 	nodemask_t mask;
2181 
2182 	mutex_lock(&callback_mutex);
2183 	task_lock(tsk);
2184 	guarantee_online_mems(task_cs(tsk), &mask);
2185 	task_unlock(tsk);
2186 	mutex_unlock(&callback_mutex);
2187 
2188 	return mask;
2189 }
2190 
2191 /**
2192  * cpuset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed
2193  * @nodemask: the nodemask to be checked
2194  *
2195  * Are any of the nodes in the nodemask allowed in current->mems_allowed?
2196  */
cpuset_nodemask_valid_mems_allowed(nodemask_t * nodemask)2197 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
2198 {
2199 	return nodes_intersects(*nodemask, current->mems_allowed);
2200 }
2201 
2202 /*
2203  * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
2204  * mem_hardwall ancestor to the specified cpuset.  Call holding
2205  * callback_mutex.  If no ancestor is mem_exclusive or mem_hardwall
2206  * (an unusual configuration), then returns the root cpuset.
2207  */
nearest_hardwall_ancestor(const struct cpuset * cs)2208 static const struct cpuset *nearest_hardwall_ancestor(const struct cpuset *cs)
2209 {
2210 	while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && cs->parent)
2211 		cs = cs->parent;
2212 	return cs;
2213 }
2214 
2215 /**
2216  * cpuset_zone_allowed_softwall - Can we allocate on zone z's memory node?
2217  * @z: is this zone on an allowed node?
2218  * @gfp_mask: memory allocation flags
2219  *
2220  * If we're in interrupt, yes, we can always allocate.  If
2221  * __GFP_THISNODE is set, yes, we can always allocate.  If zone
2222  * z's node is in our tasks mems_allowed, yes.  If it's not a
2223  * __GFP_HARDWALL request and this zone's nodes is in the nearest
2224  * hardwalled cpuset ancestor to this tasks cpuset, yes.
2225  * If the task has been OOM killed and has access to memory reserves
2226  * as specified by the TIF_MEMDIE flag, yes.
2227  * Otherwise, no.
2228  *
2229  * If __GFP_HARDWALL is set, cpuset_zone_allowed_softwall()
2230  * reduces to cpuset_zone_allowed_hardwall().  Otherwise,
2231  * cpuset_zone_allowed_softwall() might sleep, and might allow a zone
2232  * from an enclosing cpuset.
2233  *
2234  * cpuset_zone_allowed_hardwall() only handles the simpler case of
2235  * hardwall cpusets, and never sleeps.
2236  *
2237  * The __GFP_THISNODE placement logic is really handled elsewhere,
2238  * by forcibly using a zonelist starting at a specified node, and by
2239  * (in get_page_from_freelist()) refusing to consider the zones for
2240  * any node on the zonelist except the first.  By the time any such
2241  * calls get to this routine, we should just shut up and say 'yes'.
2242  *
2243  * GFP_USER allocations are marked with the __GFP_HARDWALL bit,
2244  * and do not allow allocations outside the current tasks cpuset
2245  * unless the task has been OOM killed as is marked TIF_MEMDIE.
2246  * GFP_KERNEL allocations are not so marked, so can escape to the
2247  * nearest enclosing hardwalled ancestor cpuset.
2248  *
2249  * Scanning up parent cpusets requires callback_mutex.  The
2250  * __alloc_pages() routine only calls here with __GFP_HARDWALL bit
2251  * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
2252  * current tasks mems_allowed came up empty on the first pass over
2253  * the zonelist.  So only GFP_KERNEL allocations, if all nodes in the
2254  * cpuset are short of memory, might require taking the callback_mutex
2255  * mutex.
2256  *
2257  * The first call here from mm/page_alloc:get_page_from_freelist()
2258  * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
2259  * so no allocation on a node outside the cpuset is allowed (unless
2260  * in interrupt, of course).
2261  *
2262  * The second pass through get_page_from_freelist() doesn't even call
2263  * here for GFP_ATOMIC calls.  For those calls, the __alloc_pages()
2264  * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
2265  * in alloc_flags.  That logic and the checks below have the combined
2266  * affect that:
2267  *	in_interrupt - any node ok (current task context irrelevant)
2268  *	GFP_ATOMIC   - any node ok
2269  *	TIF_MEMDIE   - any node ok
2270  *	GFP_KERNEL   - any node in enclosing hardwalled cpuset ok
2271  *	GFP_USER     - only nodes in current tasks mems allowed ok.
2272  *
2273  * Rule:
2274  *    Don't call cpuset_zone_allowed_softwall if you can't sleep, unless you
2275  *    pass in the __GFP_HARDWALL flag set in gfp_flag, which disables
2276  *    the code that might scan up ancestor cpusets and sleep.
2277  */
2278 
__cpuset_zone_allowed_softwall(struct zone * z,gfp_t gfp_mask)2279 int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
2280 {
2281 	int node;			/* node that zone z is on */
2282 	const struct cpuset *cs;	/* current cpuset ancestors */
2283 	int allowed;			/* is allocation in zone z allowed? */
2284 
2285 	if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
2286 		return 1;
2287 	node = zone_to_nid(z);
2288 	might_sleep_if(!(gfp_mask & __GFP_HARDWALL));
2289 	if (node_isset(node, current->mems_allowed))
2290 		return 1;
2291 	/*
2292 	 * Allow tasks that have access to memory reserves because they have
2293 	 * been OOM killed to get memory anywhere.
2294 	 */
2295 	if (unlikely(test_thread_flag(TIF_MEMDIE)))
2296 		return 1;
2297 	if (gfp_mask & __GFP_HARDWALL)	/* If hardwall request, stop here */
2298 		return 0;
2299 
2300 	if (current->flags & PF_EXITING) /* Let dying task have memory */
2301 		return 1;
2302 
2303 	/* Not hardwall and node outside mems_allowed: scan up cpusets */
2304 	mutex_lock(&callback_mutex);
2305 
2306 	task_lock(current);
2307 	cs = nearest_hardwall_ancestor(task_cs(current));
2308 	task_unlock(current);
2309 
2310 	allowed = node_isset(node, cs->mems_allowed);
2311 	mutex_unlock(&callback_mutex);
2312 	return allowed;
2313 }
2314 
2315 /*
2316  * cpuset_zone_allowed_hardwall - Can we allocate on zone z's memory node?
2317  * @z: is this zone on an allowed node?
2318  * @gfp_mask: memory allocation flags
2319  *
2320  * If we're in interrupt, yes, we can always allocate.
2321  * If __GFP_THISNODE is set, yes, we can always allocate.  If zone
2322  * z's node is in our tasks mems_allowed, yes.   If the task has been
2323  * OOM killed and has access to memory reserves as specified by the
2324  * TIF_MEMDIE flag, yes.  Otherwise, no.
2325  *
2326  * The __GFP_THISNODE placement logic is really handled elsewhere,
2327  * by forcibly using a zonelist starting at a specified node, and by
2328  * (in get_page_from_freelist()) refusing to consider the zones for
2329  * any node on the zonelist except the first.  By the time any such
2330  * calls get to this routine, we should just shut up and say 'yes'.
2331  *
2332  * Unlike the cpuset_zone_allowed_softwall() variant, above,
2333  * this variant requires that the zone be in the current tasks
2334  * mems_allowed or that we're in interrupt.  It does not scan up the
2335  * cpuset hierarchy for the nearest enclosing mem_exclusive cpuset.
2336  * It never sleeps.
2337  */
2338 
__cpuset_zone_allowed_hardwall(struct zone * z,gfp_t gfp_mask)2339 int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
2340 {
2341 	int node;			/* node that zone z is on */
2342 
2343 	if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
2344 		return 1;
2345 	node = zone_to_nid(z);
2346 	if (node_isset(node, current->mems_allowed))
2347 		return 1;
2348 	/*
2349 	 * Allow tasks that have access to memory reserves because they have
2350 	 * been OOM killed to get memory anywhere.
2351 	 */
2352 	if (unlikely(test_thread_flag(TIF_MEMDIE)))
2353 		return 1;
2354 	return 0;
2355 }
2356 
2357 /**
2358  * cpuset_lock - lock out any changes to cpuset structures
2359  *
2360  * The out of memory (oom) code needs to mutex_lock cpusets
2361  * from being changed while it scans the tasklist looking for a
2362  * task in an overlapping cpuset.  Expose callback_mutex via this
2363  * cpuset_lock() routine, so the oom code can lock it, before
2364  * locking the task list.  The tasklist_lock is a spinlock, so
2365  * must be taken inside callback_mutex.
2366  */
2367 
cpuset_lock(void)2368 void cpuset_lock(void)
2369 {
2370 	mutex_lock(&callback_mutex);
2371 }
2372 
2373 /**
2374  * cpuset_unlock - release lock on cpuset changes
2375  *
2376  * Undo the lock taken in a previous cpuset_lock() call.
2377  */
2378 
cpuset_unlock(void)2379 void cpuset_unlock(void)
2380 {
2381 	mutex_unlock(&callback_mutex);
2382 }
2383 
2384 /**
2385  * cpuset_mem_spread_node() - On which node to begin search for a page
2386  *
2387  * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
2388  * tasks in a cpuset with is_spread_page or is_spread_slab set),
2389  * and if the memory allocation used cpuset_mem_spread_node()
2390  * to determine on which node to start looking, as it will for
2391  * certain page cache or slab cache pages such as used for file
2392  * system buffers and inode caches, then instead of starting on the
2393  * local node to look for a free page, rather spread the starting
2394  * node around the tasks mems_allowed nodes.
2395  *
2396  * We don't have to worry about the returned node being offline
2397  * because "it can't happen", and even if it did, it would be ok.
2398  *
2399  * The routines calling guarantee_online_mems() are careful to
2400  * only set nodes in task->mems_allowed that are online.  So it
2401  * should not be possible for the following code to return an
2402  * offline node.  But if it did, that would be ok, as this routine
2403  * is not returning the node where the allocation must be, only
2404  * the node where the search should start.  The zonelist passed to
2405  * __alloc_pages() will include all nodes.  If the slab allocator
2406  * is passed an offline node, it will fall back to the local node.
2407  * See kmem_cache_alloc_node().
2408  */
2409 
cpuset_mem_spread_node(void)2410 int cpuset_mem_spread_node(void)
2411 {
2412 	int node;
2413 
2414 	node = next_node(current->cpuset_mem_spread_rotor, current->mems_allowed);
2415 	if (node == MAX_NUMNODES)
2416 		node = first_node(current->mems_allowed);
2417 	current->cpuset_mem_spread_rotor = node;
2418 	return node;
2419 }
2420 EXPORT_SYMBOL_GPL(cpuset_mem_spread_node);
2421 
2422 /**
2423  * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
2424  * @tsk1: pointer to task_struct of some task.
2425  * @tsk2: pointer to task_struct of some other task.
2426  *
2427  * Description: Return true if @tsk1's mems_allowed intersects the
2428  * mems_allowed of @tsk2.  Used by the OOM killer to determine if
2429  * one of the task's memory usage might impact the memory available
2430  * to the other.
2431  **/
2432 
cpuset_mems_allowed_intersects(const struct task_struct * tsk1,const struct task_struct * tsk2)2433 int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
2434 				   const struct task_struct *tsk2)
2435 {
2436 	return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
2437 }
2438 
2439 /**
2440  * cpuset_print_task_mems_allowed - prints task's cpuset and mems_allowed
2441  * @task: pointer to task_struct of some task.
2442  *
2443  * Description: Prints @task's name, cpuset name, and cached copy of its
2444  * mems_allowed to the kernel log.  Must hold task_lock(task) to allow
2445  * dereferencing task_cs(task).
2446  */
cpuset_print_task_mems_allowed(struct task_struct * tsk)2447 void cpuset_print_task_mems_allowed(struct task_struct *tsk)
2448 {
2449 	struct dentry *dentry;
2450 
2451 	dentry = task_cs(tsk)->css.cgroup->dentry;
2452 	spin_lock(&cpuset_buffer_lock);
2453 	snprintf(cpuset_name, CPUSET_NAME_LEN,
2454 		 dentry ? (const char *)dentry->d_name.name : "/");
2455 	nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN,
2456 			   tsk->mems_allowed);
2457 	printk(KERN_INFO "%s cpuset=%s mems_allowed=%s\n",
2458 	       tsk->comm, cpuset_name, cpuset_nodelist);
2459 	spin_unlock(&cpuset_buffer_lock);
2460 }
2461 
2462 /*
2463  * Collection of memory_pressure is suppressed unless
2464  * this flag is enabled by writing "1" to the special
2465  * cpuset file 'memory_pressure_enabled' in the root cpuset.
2466  */
2467 
2468 int cpuset_memory_pressure_enabled __read_mostly;
2469 
2470 /**
2471  * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
2472  *
2473  * Keep a running average of the rate of synchronous (direct)
2474  * page reclaim efforts initiated by tasks in each cpuset.
2475  *
2476  * This represents the rate at which some task in the cpuset
2477  * ran low on memory on all nodes it was allowed to use, and
2478  * had to enter the kernels page reclaim code in an effort to
2479  * create more free memory by tossing clean pages or swapping
2480  * or writing dirty pages.
2481  *
2482  * Display to user space in the per-cpuset read-only file
2483  * "memory_pressure".  Value displayed is an integer
2484  * representing the recent rate of entry into the synchronous
2485  * (direct) page reclaim by any task attached to the cpuset.
2486  **/
2487 
__cpuset_memory_pressure_bump(void)2488 void __cpuset_memory_pressure_bump(void)
2489 {
2490 	task_lock(current);
2491 	fmeter_markevent(&task_cs(current)->fmeter);
2492 	task_unlock(current);
2493 }
2494 
2495 #ifdef CONFIG_PROC_PID_CPUSET
2496 /*
2497  * proc_cpuset_show()
2498  *  - Print tasks cpuset path into seq_file.
2499  *  - Used for /proc/<pid>/cpuset.
2500  *  - No need to task_lock(tsk) on this tsk->cpuset reference, as it
2501  *    doesn't really matter if tsk->cpuset changes after we read it,
2502  *    and we take cgroup_mutex, keeping cpuset_attach() from changing it
2503  *    anyway.
2504  */
proc_cpuset_show(struct seq_file * m,void * unused_v)2505 static int proc_cpuset_show(struct seq_file *m, void *unused_v)
2506 {
2507 	struct pid *pid;
2508 	struct task_struct *tsk;
2509 	char *buf;
2510 	struct cgroup_subsys_state *css;
2511 	int retval;
2512 
2513 	retval = -ENOMEM;
2514 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
2515 	if (!buf)
2516 		goto out;
2517 
2518 	retval = -ESRCH;
2519 	pid = m->private;
2520 	tsk = get_pid_task(pid, PIDTYPE_PID);
2521 	if (!tsk)
2522 		goto out_free;
2523 
2524 	retval = -EINVAL;
2525 	cgroup_lock();
2526 	css = task_subsys_state(tsk, cpuset_subsys_id);
2527 	retval = cgroup_path(css->cgroup, buf, PAGE_SIZE);
2528 	if (retval < 0)
2529 		goto out_unlock;
2530 	seq_puts(m, buf);
2531 	seq_putc(m, '\n');
2532 out_unlock:
2533 	cgroup_unlock();
2534 	put_task_struct(tsk);
2535 out_free:
2536 	kfree(buf);
2537 out:
2538 	return retval;
2539 }
2540 
cpuset_open(struct inode * inode,struct file * file)2541 static int cpuset_open(struct inode *inode, struct file *file)
2542 {
2543 	struct pid *pid = PROC_I(inode)->pid;
2544 	return single_open(file, proc_cpuset_show, pid);
2545 }
2546 
2547 const struct file_operations proc_cpuset_operations = {
2548 	.open		= cpuset_open,
2549 	.read		= seq_read,
2550 	.llseek		= seq_lseek,
2551 	.release	= single_release,
2552 };
2553 #endif /* CONFIG_PROC_PID_CPUSET */
2554 
2555 /* Display task cpus_allowed, mems_allowed in /proc/<pid>/status file. */
cpuset_task_status_allowed(struct seq_file * m,struct task_struct * task)2556 void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
2557 {
2558 	seq_printf(m, "Cpus_allowed:\t");
2559 	seq_cpumask(m, &task->cpus_allowed);
2560 	seq_printf(m, "\n");
2561 	seq_printf(m, "Cpus_allowed_list:\t");
2562 	seq_cpumask_list(m, &task->cpus_allowed);
2563 	seq_printf(m, "\n");
2564 	seq_printf(m, "Mems_allowed:\t");
2565 	seq_nodemask(m, &task->mems_allowed);
2566 	seq_printf(m, "\n");
2567 	seq_printf(m, "Mems_allowed_list:\t");
2568 	seq_nodemask_list(m, &task->mems_allowed);
2569 	seq_printf(m, "\n");
2570 }
2571