• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  kernel/cpuset.c
3  *
4  *  Processor and Memory placement constraints for sets of tasks.
5  *
6  *  Copyright (C) 2003 BULL SA.
7  *  Copyright (C) 2004-2007 Silicon Graphics, Inc.
8  *  Copyright (C) 2006 Google, Inc
9  *
10  *  Portions derived from Patrick Mochel's sysfs code.
11  *  sysfs is Copyright (c) 2001-3 Patrick Mochel
12  *
13  *  2003-10-10 Written by Simon Derr.
14  *  2003-10-22 Updates by Stephen Hemminger.
15  *  2004 May-July Rework by Paul Jackson.
16  *  2006 Rework by Paul Menage to use generic cgroups
17  *  2008 Rework of the scheduler domains and CPU hotplug handling
18  *       by Max Krasnyansky
19  *
20  *  This file is subject to the terms and conditions of the GNU General Public
21  *  License.  See the file COPYING in the main directory of the Linux
22  *  distribution for more details.
23  */
24 
25 #include <linux/cpu.h>
26 #include <linux/cpumask.h>
27 #include <linux/cpuset.h>
28 #include <linux/err.h>
29 #include <linux/errno.h>
30 #include <linux/file.h>
31 #include <linux/fs.h>
32 #include <linux/init.h>
33 #include <linux/interrupt.h>
34 #include <linux/kernel.h>
35 #include <linux/kmod.h>
36 #include <linux/list.h>
37 #include <linux/mempolicy.h>
38 #include <linux/mm.h>
39 #include <linux/memory.h>
40 #include <linux/export.h>
41 #include <linux/mount.h>
42 #include <linux/fs_context.h>
43 #include <linux/namei.h>
44 #include <linux/pagemap.h>
45 #include <linux/proc_fs.h>
46 #include <linux/rcupdate.h>
47 #include <linux/sched.h>
48 #include <linux/sched/deadline.h>
49 #include <linux/sched/mm.h>
50 #include <linux/sched/task.h>
51 #include <linux/seq_file.h>
52 #include <linux/security.h>
53 #include <linux/slab.h>
54 #include <linux/spinlock.h>
55 #include <linux/stat.h>
56 #include <linux/string.h>
57 #include <linux/time.h>
58 #include <linux/time64.h>
59 #include <linux/backing-dev.h>
60 #include <linux/sort.h>
61 #include <linux/oom.h>
62 #include <linux/sched/isolation.h>
63 #include <linux/uaccess.h>
64 #include <linux/atomic.h>
65 #include <linux/mutex.h>
66 #include <linux/cgroup.h>
67 #include <linux/wait.h>
68 
69 DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key);
70 DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key);
71 
72 /* See "Frequency meter" comments, below. */
73 
74 struct fmeter {
75     int cnt;         /* unprocessed events count */
76     int val;         /* most recent output value */
77     time64_t time;   /* clock (secs) when val computed */
78     spinlock_t lock; /* guards read or write of above */
79 };
80 
81 struct cpuset {
82     struct cgroup_subsys_state css;
83 
84     unsigned long flags; /* "unsigned long" so bitops work */
85 
86     /*
87      * On default hierarchy:
88      *
89      * The user-configured masks can only be changed by writing to
90      * cpuset.cpus and cpuset.mems, and won't be limited by the
91      * parent masks.
92      *
93      * The effective masks is the real masks that apply to the tasks
94      * in the cpuset. They may be changed if the configured masks are
95      * changed or hotplug happens.
96      *
97      * effective_mask == configured_mask & parent's effective_mask,
98      * and if it ends up empty, it will inherit the parent's mask.
99      *
100      *
101      * On legacy hierachy:
102      *
103      * The user-configured masks are always the same with effective masks.
104      */
105 
106     /* user-configured CPUs and Memory Nodes allow to tasks */
107     cpumask_var_t cpus_allowed;
108     cpumask_var_t cpus_requested;
109     nodemask_t mems_allowed;
110 
111     /* effective CPUs and Memory Nodes allow to tasks */
112     cpumask_var_t effective_cpus;
113     nodemask_t effective_mems;
114 
115     /*
116      * CPUs allocated to child sub-partitions (default hierarchy only)
117      * - CPUs granted by the parent = effective_cpus U subparts_cpus
118      * - effective_cpus and subparts_cpus are mutually exclusive.
119      *
120      * effective_cpus contains only onlined CPUs, but subparts_cpus
121      * may have offlined ones.
122      */
123     cpumask_var_t subparts_cpus;
124 
125     /*
126      * This is old Memory Nodes tasks took on.
127      *
128      * - top_cpuset.old_mems_allowed is initialized to mems_allowed.
129      * - A new cpuset's old_mems_allowed is initialized when some
130      *   task is moved into it.
131      * - old_mems_allowed is used in cpuset_migrate_mm() when we change
132      *   cpuset.mems_allowed and have tasks' nodemask updated, and
133      *   then old_mems_allowed is updated to mems_allowed.
134      */
135     nodemask_t old_mems_allowed;
136 
137     struct fmeter fmeter; /* memory_pressure filter */
138 
139     /*
140      * Tasks are being attached to this cpuset.  Used to prevent
141      * zeroing cpus/mems_allowed between ->can_attach() and ->attach().
142      */
143     int attach_in_progress;
144 
145     /* partition number for rebuild_sched_domains() */
146     int pn;
147 
148     /* for custom sched domain */
149     int relax_domain_level;
150 
151     /* number of CPUs in subparts_cpus */
152     int nr_subparts_cpus;
153 
154     /* partition root state */
155     int partition_root_state;
156 
157     /*
158      * Default hierarchy only:
159      * use_parent_ecpus - set if using parent's effective_cpus
160      * child_ecpus_count - # of children with use_parent_ecpus set
161      */
162     int use_parent_ecpus;
163     int child_ecpus_count;
164 };
165 
166 /*
167  * Partition root states:
168  *
169  *   0 - not a partition root
170  *
171  *   1 - partition root
172  *
173  *  -1 - invalid partition root
174  *       None of the cpus in cpus_allowed can be put into the parent's
175  *       subparts_cpus. In this case, the cpuset is not a real partition
176  *       root anymore.  However, the CPU_EXCLUSIVE bit will still be set
177  *       and the cpuset can be restored back to a partition root if the
178  *       parent cpuset can give more CPUs back to this child cpuset.
179  */
180 #define PRS_DISABLED 0
181 #define PRS_ENABLED 1
182 #define PRS_ERROR (-1)
183 
184 /*
185  * Temporary cpumasks for working with partitions that are passed among
186  * functions to avoid memory allocation in inner functions.
187  */
188 struct tmpmasks {
189     cpumask_var_t addmask, delmask; /* For partition root */
190     cpumask_var_t new_cpus;         /* For update_cpumasks_hier() */
191 };
192 
css_cs(struct cgroup_subsys_state * css)193 static inline struct cpuset *css_cs(struct cgroup_subsys_state *css)
194 {
195     return css ? container_of(css, struct cpuset, css) : NULL;
196 }
197 
198 /* Retrieve the cpuset for a task */
task_cs(struct task_struct * task)199 static inline struct cpuset *task_cs(struct task_struct *task)
200 {
201     return css_cs(task_css(task, cpuset_cgrp_id));
202 }
203 
parent_cs(struct cpuset * cs)204 static inline struct cpuset *parent_cs(struct cpuset *cs)
205 {
206     return css_cs(cs->css.parent);
207 }
208 
209 /* bits in struct cpuset flags field */
210 typedef enum {
211     CS_ONLINE,
212     CS_CPU_EXCLUSIVE,
213     CS_MEM_EXCLUSIVE,
214     CS_MEM_HARDWALL,
215     CS_MEMORY_MIGRATE,
216     CS_SCHED_LOAD_BALANCE,
217     CS_SPREAD_PAGE,
218     CS_SPREAD_SLAB,
219 } cpuset_flagbits_t;
220 
221 /* convenient tests for these bits */
is_cpuset_online(struct cpuset * cs)222 static inline bool is_cpuset_online(struct cpuset *cs)
223 {
224     return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css);
225 }
226 
is_cpu_exclusive(const struct cpuset * cs)227 static inline int is_cpu_exclusive(const struct cpuset *cs)
228 {
229     return test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
230 }
231 
is_mem_exclusive(const struct cpuset * cs)232 static inline int is_mem_exclusive(const struct cpuset *cs)
233 {
234     return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
235 }
236 
is_mem_hardwall(const struct cpuset * cs)237 static inline int is_mem_hardwall(const struct cpuset *cs)
238 {
239     return test_bit(CS_MEM_HARDWALL, &cs->flags);
240 }
241 
is_sched_load_balance(const struct cpuset * cs)242 static inline int is_sched_load_balance(const struct cpuset *cs)
243 {
244     return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
245 }
246 
is_memory_migrate(const struct cpuset * cs)247 static inline int is_memory_migrate(const struct cpuset *cs)
248 {
249     return test_bit(CS_MEMORY_MIGRATE, &cs->flags);
250 }
251 
is_spread_page(const struct cpuset * cs)252 static inline int is_spread_page(const struct cpuset *cs)
253 {
254     return test_bit(CS_SPREAD_PAGE, &cs->flags);
255 }
256 
is_spread_slab(const struct cpuset * cs)257 static inline int is_spread_slab(const struct cpuset *cs)
258 {
259     return test_bit(CS_SPREAD_SLAB, &cs->flags);
260 }
261 
is_partition_root(const struct cpuset * cs)262 static inline int is_partition_root(const struct cpuset *cs)
263 {
264     return cs->partition_root_state > 0;
265 }
266 
267 static struct cpuset top_cpuset = {
268     .flags = ((1 << CS_ONLINE) | (1 << CS_CPU_EXCLUSIVE) | (1 << CS_MEM_EXCLUSIVE)),
269     .partition_root_state = PRS_ENABLED,
270 };
271 
272 /**
273  * cpuset_for_each_child - traverse online children of a cpuset
274  * @child_cs: loop cursor pointing to the current child
275  * @pos_css: used for iteration
276  * @parent_cs: target cpuset to walk children of
277  *
278  * Walk @child_cs through the online children of @parent_cs.  Must be used
279  * with RCU read locked.
280  */
281 #define cpuset_for_each_child(child_cs, pos_css, parent_cs)                                                            \
282     css_for_each_child((pos_css), &(parent_cs)->css) if (is_cpuset_online(((child_cs) = css_cs((pos_css)))))
283 
284 /**
285  * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants
286  * @des_cs: loop cursor pointing to the current descendant
287  * @pos_css: used for iteration
288  * @root_cs: target cpuset to walk ancestor of
289  *
290  * Walk @des_cs through the online descendants of @root_cs.  Must be used
291  * with RCU read locked.  The caller may modify @pos_css by calling
292  * css_rightmost_descendant() to skip subtree.  @root_cs is included in the
293  * iteration and the first node to be visited.
294  */
295 #define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs)                                                       \
296     css_for_each_descendant_pre((pos_css), &(root_cs)->css) if (is_cpuset_online(((des_cs) = css_cs((pos_css)))))
297 
298 /*
299  * There are two global locks guarding cpuset structures - cpuset_mutex and
300  * callback_lock. We also require taking task_lock() when dereferencing a
301  * task's cpuset pointer. See "The task_lock() exception", at the end of this
302  * comment.
303  *
304  * A task must hold both locks to modify cpusets.  If a task holds
305  * cpuset_mutex, then it blocks others wanting that mutex, ensuring that it
306  * is the only task able to also acquire callback_lock and be able to
307  * modify cpusets.  It can perform various checks on the cpuset structure
308  * first, knowing nothing will change.  It can also allocate memory while
309  * just holding cpuset_mutex.  While it is performing these checks, various
310  * callback routines can briefly acquire callback_lock to query cpusets.
311  * Once it is ready to make the changes, it takes callback_lock, blocking
312  * everyone else.
313  *
314  * Calls to the kernel memory allocator can not be made while holding
315  * callback_lock, as that would risk double tripping on callback_lock
316  * from one of the callbacks into the cpuset code from within
317  * __alloc_pages().
318  *
319  * If a task is only holding callback_lock, then it has read-only
320  * access to cpusets.
321  *
322  * Now, the task_struct fields mems_allowed and mempolicy may be changed
323  * by other task, we use alloc_lock in the task_struct fields to protect
324  * them.
325  *
326  * The cpuset_common_file_read() handlers only hold callback_lock across
327  * small pieces of code, such as when reading out possibly multi-word
328  * cpumasks and nodemasks.
329  *
330  * Accessing a task's cpuset should be done in accordance with the
331  * guidelines for accessing subsystem state in kernel/cgroup.c
332  */
333 
334 static DEFINE_MUTEX(cpuset_mutex);
335 
336 DEFINE_STATIC_PERCPU_RWSEM(cpuset_rwsem);
337 
cpuset_read_lock(void)338 void cpuset_read_lock(void)
339 {
340     percpu_down_read(&cpuset_rwsem);
341 }
342 
cpuset_read_unlock(void)343 void cpuset_read_unlock(void)
344 {
345     percpu_up_read(&cpuset_rwsem);
346 }
347 
348 static DEFINE_SPINLOCK(callback_lock);
349 
350 static struct workqueue_struct *cpuset_migrate_mm_wq;
351 
352 /*
353  * CPU / memory hotplug is handled asynchronously
354  * for hotplug, synchronously for resume_cpus
355  */
356 static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn);
357 
358 static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq);
359 
360 /*
361  * Cgroup v2 behavior is used on the "cpus" and "mems" control files when
362  * on default hierarchy or when the cpuset_v2_mode flag is set by mounting
363  * the v1 cpuset cgroup filesystem with the "cpuset_v2_mode" mount option.
364  * With v2 behavior, "cpus" and "mems" are always what the users have
365  * requested and won't be changed by hotplug events. Only the effective
366  * cpus or mems will be affected.
367  */
is_in_v2_mode(void)368 static inline bool is_in_v2_mode(void)
369 {
370     return cgroup_subsys_on_dfl(cpuset_cgrp_subsys) || (cpuset_cgrp_subsys.root->flags & CGRP_ROOT_CPUSET_V2_MODE);
371 }
372 
373 /*
374  * Return in pmask the portion of a task's cpusets's cpus_allowed that
375  * are online and are capable of running the task.  If none are found,
376  * walk up the cpuset hierarchy until we find one that does have some
377  * appropriate cpus.
378  *
379  * One way or another, we guarantee to return some non-empty subset
380  * of cpu_active_mask.
381  *
382  * Call with callback_lock or cpuset_mutex held.
383  */
guarantee_online_cpus(struct task_struct * tsk,struct cpumask * pmask)384 static void guarantee_online_cpus(struct task_struct *tsk, struct cpumask *pmask)
385 {
386     const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
387     struct cpuset *cs;
388 
389     if (WARN_ON(!cpumask_and(pmask, possible_mask, cpu_active_mask))) {
390         cpumask_copy(pmask, cpu_active_mask);
391     }
392 
393     rcu_read_lock();
394     cs = task_cs(tsk);
395 
396     while (!cpumask_intersects(cs->effective_cpus, pmask)) {
397         cs = parent_cs(cs);
398         if (unlikely(!cs)) {
399             /*
400              * The top cpuset doesn't have any online cpu as a
401              * consequence of a race between cpuset_hotplug_work
402              * and cpu hotplug notifier.  But we know the top
403              * cpuset's effective_cpus is on its way to be
404              * identical to cpu_online_mask.
405              */
406             goto out_unlock;
407         }
408     }
409     cpumask_and(pmask, pmask, cs->effective_cpus);
410 
411 out_unlock:
412     rcu_read_unlock();
413 }
414 
415 /*
416  * Return in *pmask the portion of a cpusets's mems_allowed that
417  * are online, with memory.  If none are online with memory, walk
418  * up the cpuset hierarchy until we find one that does have some
419  * online mems.  The top cpuset always has some mems online.
420  *
421  * One way or another, we guarantee to return some non-empty subset
422  * of node_states[N_MEMORY].
423  *
424  * Call with callback_lock or cpuset_mutex held.
425  */
guarantee_online_mems(struct cpuset * cs,nodemask_t * pmask)426 static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
427 {
428     while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY])) {
429         cs = parent_cs(cs);
430     }
431     nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]);
432 }
433 
434 /*
435  * update task's spread flag if cpuset's page/slab spread flag is set
436  *
437  * Call with callback_lock or cpuset_mutex held.
438  */
cpuset_update_task_spread_flag(struct cpuset * cs,struct task_struct * tsk)439 static void cpuset_update_task_spread_flag(struct cpuset *cs, struct task_struct *tsk)
440 {
441     if (is_spread_page(cs)) {
442         task_set_spread_page(tsk);
443     } else {
444         task_clear_spread_page(tsk);
445     }
446 
447     if (is_spread_slab(cs)) {
448         task_set_spread_slab(tsk);
449     } else {
450         task_clear_spread_slab(tsk);
451     }
452 }
453 
454 /*
455  * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
456  *
457  * One cpuset is a subset of another if all its allowed CPUs and
458  * Memory Nodes are a subset of the other, and its exclusive flags
459  * are only set if the other's are set.  Call holding cpuset_mutex.
460  */
461 
is_cpuset_subset(const struct cpuset * p,const struct cpuset * q)462 static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
463 {
464     return cpumask_subset(p->cpus_requested, q->cpus_requested) && nodes_subset(p->mems_allowed, q->mems_allowed) &&
465            is_cpu_exclusive(p) <= is_cpu_exclusive(q) && is_mem_exclusive(p) <= is_mem_exclusive(q);
466 }
467 
468 /**
469  * alloc_cpumasks - allocate three cpumasks for cpuset
470  * @cs:  the cpuset that have cpumasks to be allocated.
471  * @tmp: the tmpmasks structure pointer
472  * Return: 0 if successful, -ENOMEM otherwise.
473  *
474  * Only one of the two input arguments should be non-NULL.
475  */
alloc_cpumasks(struct cpuset * cs,struct tmpmasks * tmp)476 static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp)
477 {
478     cpumask_var_t *pmask1, *pmask2, *pmask3, *pmask4;
479 
480     if (cs) {
481         pmask1 = &cs->cpus_allowed;
482         pmask2 = &cs->effective_cpus;
483         pmask3 = &cs->subparts_cpus;
484         pmask4 = &cs->cpus_requested;
485     } else {
486         pmask1 = &tmp->new_cpus;
487         pmask2 = &tmp->addmask;
488         pmask3 = &tmp->delmask;
489     }
490 
491     if (!zalloc_cpumask_var(pmask1, GFP_KERNEL)) {
492         return -ENOMEM;
493     }
494 
495     if (!zalloc_cpumask_var(pmask2, GFP_KERNEL)) {
496         goto free_one;
497     }
498 
499     if (!zalloc_cpumask_var(pmask3, GFP_KERNEL)) {
500         goto free_two;
501     }
502 
503     if (cs && !zalloc_cpumask_var(pmask4, GFP_KERNEL)) {
504         goto free_three;
505     }
506 
507     if (cs && !zalloc_cpumask_var(&cs->cpus_requested, GFP_KERNEL)) {
508         goto free_three;
509     }
510 
511     return 0;
512 
513 free_three:
514     free_cpumask_var(*pmask3);
515 free_two:
516     free_cpumask_var(*pmask2);
517 free_one:
518     free_cpumask_var(*pmask1);
519     return -ENOMEM;
520 }
521 
522 /**
523  * free_cpumasks - free cpumasks in a tmpmasks structure
524  * @cs:  the cpuset that have cpumasks to be free.
525  * @tmp: the tmpmasks structure pointer
526  */
free_cpumasks(struct cpuset * cs,struct tmpmasks * tmp)527 static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp)
528 {
529     if (cs) {
530         free_cpumask_var(cs->cpus_allowed);
531         free_cpumask_var(cs->cpus_requested);
532         free_cpumask_var(cs->effective_cpus);
533         free_cpumask_var(cs->subparts_cpus);
534     }
535     if (tmp) {
536         free_cpumask_var(tmp->new_cpus);
537         free_cpumask_var(tmp->addmask);
538         free_cpumask_var(tmp->delmask);
539     }
540 }
541 
542 /**
543  * alloc_trial_cpuset - allocate a trial cpuset
544  * @cs: the cpuset that the trial cpuset duplicates
545  */
alloc_trial_cpuset(struct cpuset * cs)546 static struct cpuset *alloc_trial_cpuset(struct cpuset *cs)
547 {
548     struct cpuset *trial;
549 
550     trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL);
551     if (!trial) {
552         return NULL;
553     }
554 
555     if (alloc_cpumasks(trial, NULL)) {
556         kfree(trial);
557         return NULL;
558     }
559 
560     cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
561     cpumask_copy(trial->cpus_requested, cs->cpus_requested);
562     cpumask_copy(trial->effective_cpus, cs->effective_cpus);
563     return trial;
564 }
565 
566 /**
567  * free_cpuset - free the cpuset
568  * @cs: the cpuset to be freed
569  */
free_cpuset(struct cpuset * cs)570 static inline void free_cpuset(struct cpuset *cs)
571 {
572     free_cpumasks(cs, NULL);
573     kfree(cs);
574 }
575 
576 /*
577  * validate_change() - Used to validate that any proposed cpuset change
578  *               follows the structural rules for cpusets.
579  *
580  * If we replaced the flag and mask values of the current cpuset
581  * (cur) with those values in the trial cpuset (trial), would
582  * our various subset and exclusive rules still be valid?  Presumes
583  * cpuset_mutex held.
584  *
585  * 'cur' is the address of an actual, in-use cpuset.  Operations
586  * such as list traversal that depend on the actual address of the
587  * cpuset in the list must use cur below, not trial.
588  *
589  * 'trial' is the address of bulk structure copy of cur, with
590  * perhaps one or more of the fields cpus_allowed, mems_allowed,
591  * or flags changed to new, trial values.
592  *
593  * Return 0 if valid, -errno if not.
594  */
595 
validate_change(struct cpuset * cur,struct cpuset * trial)596 static int validate_change(struct cpuset *cur, struct cpuset *trial)
597 {
598     struct cgroup_subsys_state *css;
599     struct cpuset *c, *par;
600     int ret;
601 
602     rcu_read_lock();
603 
604     /* Each of our child cpusets must be a subset of us */
605     ret = -EBUSY;
606     cpuset_for_each_child(c, css, cur) if (!is_cpuset_subset(c, trial)) goto out;
607 
608     /* Remaining checks don't apply to root cpuset */
609     ret = 0;
610     if (cur == &top_cpuset) {
611         goto out;
612     }
613 
614     par = parent_cs(cur);
615 
616     /* On legacy hiearchy, we must be a subset of our parent cpuset. */
617     ret = -EACCES;
618     if (!is_in_v2_mode() && !is_cpuset_subset(trial, par)) {
619         goto out;
620     }
621 
622     /*
623      * If either I or some sibling (!= me) is exclusive, we can't
624      * overlap
625      */
626     ret = -EINVAL;
627     cpuset_for_each_child(c, css, par) {
628         if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) && c != cur &&
629             cpumask_intersects(trial->cpus_requested, c->cpus_requested)) {
630             goto out;
631         }
632         if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) && c != cur &&
633             nodes_intersects(trial->mems_allowed, c->mems_allowed)) {
634             goto out;
635         }
636     }
637 
638     /*
639      * Cpusets with tasks - existing or newly being attached - can't
640      * be changed to have empty cpus_allowed or mems_allowed.
641      */
642     ret = -ENOSPC;
643     if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) {
644         if (!cpumask_empty(cur->cpus_allowed) && cpumask_empty(trial->cpus_allowed)) {
645             goto out;
646         }
647         if (!nodes_empty(cur->mems_allowed) && nodes_empty(trial->mems_allowed)) {
648             goto out;
649         }
650     }
651 
652     /*
653      * We can't shrink if we won't have enough room for SCHED_DEADLINE
654      * tasks.
655      */
656     ret = -EBUSY;
657     if (is_cpu_exclusive(cur) && !cpuset_cpumask_can_shrink(cur->cpus_allowed, trial->cpus_allowed)) {
658         goto out;
659     }
660 
661     ret = 0;
662 out:
663     rcu_read_unlock();
664     return ret;
665 }
666 
667 #ifdef CONFIG_SMP
668 /*
669  * Helper routine for generate_sched_domains().
670  * Do cpusets a, b have overlapping effective cpus_allowed masks?
671  */
cpusets_overlap(struct cpuset * a,struct cpuset * b)672 static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
673 {
674     return cpumask_intersects(a->effective_cpus, b->effective_cpus);
675 }
676 
update_domain_attr(struct sched_domain_attr * dattr,struct cpuset * c)677 static void update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
678 {
679     if (dattr->relax_domain_level < c->relax_domain_level) {
680         dattr->relax_domain_level = c->relax_domain_level;
681     }
682     return;
683 }
684 
update_domain_attr_tree(struct sched_domain_attr * dattr,struct cpuset * root_cs)685 static void update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *root_cs)
686 {
687     struct cpuset *cp;
688     struct cgroup_subsys_state *pos_css;
689 
690     rcu_read_lock();
691     cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
692         /* skip the whole subtree if @cp doesn't have any CPU */
693         if (cpumask_empty(cp->cpus_allowed)) {
694             pos_css = css_rightmost_descendant(pos_css);
695             continue;
696         }
697 
698         if (is_sched_load_balance(cp)) {
699             update_domain_attr(dattr, cp);
700         }
701     }
702     rcu_read_unlock();
703 }
704 
705 /* Must be called with cpuset_mutex held.  */
nr_cpusets(void)706 static inline int nr_cpusets(void)
707 {
708     /* jump label reference count + the top-level cpuset */
709     return static_key_count(&cpusets_enabled_key.key) + 1;
710 }
711 
712 /*
713  * generate_sched_domains()
714  *
715  * This function builds a partial partition of the systems CPUs
716  * A 'partial partition' is a set of non-overlapping subsets whose
717  * union is a subset of that set.
718  * The output of this function needs to be passed to kernel/sched/core.c
719  * partition_sched_domains() routine, which will rebuild the scheduler's
720  * load balancing domains (sched domains) as specified by that partial
721  * partition.
722  *
723  * See "What is sched_load_balance" in Documentation/admin-guide/cgroup-v1/cpusets.rst
724  * for a background explanation of this.
725  *
726  * Does not return errors, on the theory that the callers of this
727  * routine would rather not worry about failures to rebuild sched
728  * domains when operating in the severe memory shortage situations
729  * that could cause allocation failures below.
730  *
731  * Must be called with cpuset_mutex held.
732  *
733  * The three key local variables below are:
734  *    cp - cpuset pointer, used (together with pos_css) to perform a
735  *       top-down scan of all cpusets. For our purposes, rebuilding
736  *       the schedulers sched domains, we can ignore !is_sched_load_
737  *       balance cpusets.
738  *  csa  - (for CpuSet Array) Array of pointers to all the cpusets
739  *       that need to be load balanced, for convenient iterative
740  *       access by the subsequent code that finds the best partition,
741  *       i.e the set of domains (subsets) of CPUs such that the
742  *       cpus_allowed of every cpuset marked is_sched_load_balance
743  *       is a subset of one of these domains, while there are as
744  *       many such domains as possible, each as small as possible.
745  * doms  - Conversion of 'csa' to an array of cpumasks, for passing to
746  *       the kernel/sched/core.c routine partition_sched_domains() in a
747  *       convenient format, that can be easily compared to the prior
748  *       value to determine what partition elements (sched domains)
749  *       were changed (added or removed.)
750  *
751  * Finding the best partition (set of domains):
752  *    The triple nested loops below over i, j, k scan over the
753  *    load balanced cpusets (using the array of cpuset pointers in
754  *    csa[]) looking for pairs of cpusets that have overlapping
755  *    cpus_allowed, but which don't have the same 'pn' partition
756  *    number and gives them in the same partition number.  It keeps
757  *    looping on the 'restart' label until it can no longer find
758  *    any such pairs.
759  *
760  *    The union of the cpus_allowed masks from the set of
761  *    all cpusets having the same 'pn' value then form the one
762  *    element of the partition (one sched domain) to be passed to
763  *    partition_sched_domains().
764  */
generate_sched_domains(cpumask_var_t ** domains,struct sched_domain_attr ** attributes)765 static int generate_sched_domains(cpumask_var_t **domains, struct sched_domain_attr **attributes)
766 {
767     struct cpuset *cp;               /* top-down scan of cpusets */
768     struct cpuset **csa;             /* array of all cpuset ptrs */
769     int csn;                         /* how many cpuset ptrs in csa so far */
770     int i, j, k;                     /* indices for partition finding loops */
771     cpumask_var_t *doms;             /* resulting partition; i.e. sched domains */
772     struct sched_domain_attr *dattr; /* attributes for custom domains */
773     int ndoms = 0;                   /* number of sched domains in result */
774     int nslot;                       /* next empty doms[] struct cpumask slot */
775     struct cgroup_subsys_state *pos_css;
776     bool root_load_balance = is_sched_load_balance(&top_cpuset);
777 
778     doms = NULL;
779     dattr = NULL;
780     csa = NULL;
781 
782     /* Special case for the 99% of systems with one, full, sched domain */
783     if (root_load_balance && !top_cpuset.nr_subparts_cpus) {
784         ndoms = 1;
785         doms = alloc_sched_domains(ndoms);
786         if (!doms) {
787             goto done;
788         }
789 
790         dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
791         if (dattr) {
792             *dattr = SD_ATTR_INIT;
793             update_domain_attr_tree(dattr, &top_cpuset);
794         }
795         cpumask_and(doms[0], top_cpuset.effective_cpus, housekeeping_cpumask(HK_FLAG_DOMAIN));
796 
797         goto done;
798     }
799 
800     csa = kmalloc_array(nr_cpusets(), sizeof(cp), GFP_KERNEL);
801     if (!csa) {
802         goto done;
803     }
804     csn = 0;
805 
806     rcu_read_lock();
807     if (root_load_balance) {
808         csa[csn++] = &top_cpuset;
809     }
810     cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) {
811         if (cp == &top_cpuset) {
812             continue;
813         }
814         /*
815          * Continue traversing beyond @cp iff @cp has some CPUs and
816          * isn't load balancing.  The former is obvious.  The
817          * latter: All child cpusets contain a subset of the
818          * parent's cpus, so just skip them, and then we call
819          * update_domain_attr_tree() to calc relax_domain_level of
820          * the corresponding sched domain.
821          *
822          * If root is load-balancing, we can skip @cp if it
823          * is a subset of the root's effective_cpus.
824          */
825         if (!cpumask_empty(cp->cpus_allowed) &&
826             !(is_sched_load_balance(cp) &&
827               cpumask_intersects(cp->cpus_allowed, housekeeping_cpumask(HK_FLAG_DOMAIN)))) {
828             continue;
829         }
830 
831         if (root_load_balance && cpumask_subset(cp->cpus_allowed, top_cpuset.effective_cpus)) {
832             continue;
833         }
834 
835         if (is_sched_load_balance(cp) && !cpumask_empty(cp->effective_cpus)) {
836             csa[csn++] = cp;
837         }
838 
839         /* skip @cp's subtree if not a partition root */
840         if (!is_partition_root(cp)) {
841             pos_css = css_rightmost_descendant(pos_css);
842         }
843     }
844     rcu_read_unlock();
845 
846     for (i = 0; i < csn; i++) {
847         csa[i]->pn = i;
848     }
849     ndoms = csn;
850 
851 restart:
852     /* Find the best partition (set of sched domains) */
853     for (i = 0; i < csn; i++) {
854         struct cpuset *a = csa[i];
855         int apn = a->pn;
856 
857         for (j = 0; j < csn; j++) {
858             struct cpuset *b = csa[j];
859             int bpn = b->pn;
860 
861             if (apn != bpn && cpusets_overlap(a, b)) {
862                 for (k = 0; k < csn; k++) {
863                     struct cpuset *c = csa[k];
864 
865                     if (c->pn == bpn) {
866                         c->pn = apn;
867                     }
868                 }
869                 ndoms--; /* one less element */
870                 goto restart;
871             }
872         }
873     }
874 
875     /*
876      * Now we know how many domains to create.
877      * Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
878      */
879     doms = alloc_sched_domains(ndoms);
880     if (!doms) {
881         goto done;
882     }
883 
884     /*
885      * The rest of the code, including the scheduler, can deal with
886      * dattr==NULL case. No need to abort if alloc fails.
887      */
888     dattr = kmalloc_array(ndoms, sizeof(struct sched_domain_attr), GFP_KERNEL);
889 
890     for (nslot = 0, i = 0; i < csn; i++) {
891         struct cpuset *a = csa[i];
892         struct cpumask *dp;
893         int apn = a->pn;
894 
895         if (apn < 0) {
896             /* Skip completed partitions */
897             continue;
898         }
899 
900         dp = doms[nslot];
901 
902         if (nslot == ndoms) {
903             static int warnings = 10;
904             if (warnings) {
905                 pr_warn("rebuild_sched_domains confused: nslot %d, ndoms %d, csn %d, i %d, apn %d\n", nslot, ndoms, csn,
906                         i, apn);
907                 warnings--;
908             }
909             continue;
910         }
911 
912         cpumask_clear(dp);
913         if (dattr) {
914             *(dattr + nslot) = SD_ATTR_INIT;
915         }
916         for (j = i; j < csn; j++) {
917             struct cpuset *b = csa[j];
918 
919             if (apn == b->pn) {
920                 cpumask_or(dp, dp, b->effective_cpus);
921                 cpumask_and(dp, dp, housekeeping_cpumask(HK_FLAG_DOMAIN));
922                 if (dattr) {
923                     update_domain_attr_tree(dattr + nslot, b);
924                 }
925 
926                 /* Done with this partition */
927                 b->pn = -1;
928             }
929         }
930         nslot++;
931     }
932     BUG_ON(nslot != ndoms);
933 
934 done:
935     kfree(csa);
936 
937     /*
938      * Fallback to the default domain if kmalloc() failed.
939      * See comments in partition_sched_domains().
940      */
941     if (doms == NULL) {
942         ndoms = 1;
943     }
944 
945     *domains = doms;
946     *attributes = dattr;
947     return ndoms;
948 }
949 
update_tasks_root_domain(struct cpuset * cs)950 static void update_tasks_root_domain(struct cpuset *cs)
951 {
952     struct css_task_iter it;
953     struct task_struct *task;
954 
955     css_task_iter_start(&cs->css, 0, &it);
956 
957     while ((task = css_task_iter_next(&it))) {
958         dl_add_task_root_domain(task);
959     }
960 
961     css_task_iter_end(&it);
962 }
963 
rebuild_root_domains(void)964 static void rebuild_root_domains(void)
965 {
966     struct cpuset *cs = NULL;
967     struct cgroup_subsys_state *pos_css;
968 
969     lockdep_assert_held(&cpuset_mutex);
970     lockdep_assert_cpus_held();
971     lockdep_assert_held(&sched_domains_mutex);
972 
973     rcu_read_lock();
974 
975     /*
976      * Clear default root domain DL accounting, it will be computed again
977      * if a task belongs to it.
978      */
979     dl_clear_root_domain(&def_root_domain);
980 
981     cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
982         if (cpumask_empty(cs->effective_cpus)) {
983             pos_css = css_rightmost_descendant(pos_css);
984             continue;
985         }
986 
987         css_get(&cs->css);
988 
989         rcu_read_unlock();
990 
991         update_tasks_root_domain(cs);
992 
993         rcu_read_lock();
994         css_put(&cs->css);
995     }
996     rcu_read_unlock();
997 }
998 
partition_and_rebuild_sched_domains(int ndoms_new,cpumask_var_t doms_new[],struct sched_domain_attr * dattr_new)999 static void partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1000                                                 struct sched_domain_attr *dattr_new)
1001 {
1002     mutex_lock(&sched_domains_mutex);
1003     partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
1004     rebuild_root_domains();
1005     mutex_unlock(&sched_domains_mutex);
1006 }
1007 
1008 /*
1009  * Rebuild scheduler domains.
1010  *
1011  * If the flag 'sched_load_balance' of any cpuset with non-empty
1012  * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
1013  * which has that flag enabled, or if any cpuset with a non-empty
1014  * 'cpus' is removed, then call this routine to rebuild the
1015  * scheduler's dynamic sched domains.
1016  *
1017  * Call with cpuset_mutex held.  Takes get_online_cpus().
1018  */
rebuild_sched_domains_locked(void)1019 static void rebuild_sched_domains_locked(void)
1020 {
1021     struct cgroup_subsys_state *pos_css;
1022     struct sched_domain_attr *attr;
1023     cpumask_var_t *doms;
1024     struct cpuset *cs;
1025     int ndoms;
1026 
1027     lockdep_assert_held(&cpuset_mutex);
1028 
1029     /*
1030      * If we have raced with CPU hotplug, return early to avoid
1031      * passing doms with offlined cpu to partition_sched_domains().
1032      * Anyways, cpuset_hotplug_workfn() will rebuild sched domains.
1033      *
1034      * With no CPUs in any subpartitions, top_cpuset's effective CPUs
1035      * should be the same as the active CPUs, so checking only top_cpuset
1036      * is enough to detect racing CPU offlines.
1037      */
1038     if (!top_cpuset.nr_subparts_cpus && !cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask)) {
1039         return;
1040     }
1041 
1042     /*
1043      * With subpartition CPUs, however, the effective CPUs of a partition
1044      * root should be only a subset of the active CPUs.  Since a CPU in any
1045      * partition root could be offlined, all must be checked.
1046      */
1047     if (top_cpuset.nr_subparts_cpus) {
1048         rcu_read_lock();
1049         cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
1050             if (!is_partition_root(cs)) {
1051                 pos_css = css_rightmost_descendant(pos_css);
1052                 continue;
1053             }
1054             if (!cpumask_subset(cs->effective_cpus, cpu_active_mask)) {
1055                 rcu_read_unlock();
1056                 return;
1057             }
1058         }
1059         rcu_read_unlock();
1060     }
1061 
1062     /* Generate domain masks and attrs */
1063     ndoms = generate_sched_domains(&doms, &attr);
1064 
1065     /* Have scheduler rebuild the domains */
1066     partition_and_rebuild_sched_domains(ndoms, doms, attr);
1067 }
1068 #else  /* !CONFIG_SMP */
rebuild_sched_domains_locked(void)1069 static void rebuild_sched_domains_locked(void)
1070 {
1071 }
1072 #endif /* CONFIG_SMP */
1073 
rebuild_sched_domains(void)1074 void rebuild_sched_domains(void)
1075 {
1076     get_online_cpus();
1077     mutex_lock(&cpuset_mutex);
1078     rebuild_sched_domains_locked();
1079     mutex_unlock(&cpuset_mutex);
1080     put_online_cpus();
1081 }
1082 
update_cpus_allowed(struct cpuset * cs,struct task_struct * p,const struct cpumask * new_mask)1083 static int update_cpus_allowed(struct cpuset *cs, struct task_struct *p, const struct cpumask *new_mask)
1084 {
1085     return set_cpus_allowed_ptr(p, new_mask);
1086 }
1087 
1088 /**
1089  * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
1090  * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
1091  *
1092  * Iterate through each task of @cs updating its cpus_allowed to the
1093  * effective cpuset's.  As this function is called with cpuset_mutex held,
1094  * cpuset membership stays stable.
1095  */
update_tasks_cpumask(struct cpuset * cs)1096 static void update_tasks_cpumask(struct cpuset *cs)
1097 {
1098     struct css_task_iter it;
1099     struct task_struct *task;
1100 
1101     css_task_iter_start(&cs->css, 0, &it);
1102     while ((task = css_task_iter_next(&it))) {
1103         update_cpus_allowed(cs, task, cs->effective_cpus);
1104     }
1105     css_task_iter_end(&it);
1106 }
1107 
1108 /**
1109  * compute_effective_cpumask - Compute the effective cpumask of the cpuset
1110  * @new_cpus: the temp variable for the new effective_cpus mask
1111  * @cs: the cpuset the need to recompute the new effective_cpus mask
1112  * @parent: the parent cpuset
1113  *
1114  * If the parent has subpartition CPUs, include them in the list of
1115  * allowable CPUs in computing the new effective_cpus mask. Since offlined
1116  * CPUs are not removed from subparts_cpus, we have to use cpu_active_mask
1117  * to mask those out.
1118  */
compute_effective_cpumask(struct cpumask * new_cpus,struct cpuset * cs,struct cpuset * parent)1119 static void compute_effective_cpumask(struct cpumask *new_cpus, struct cpuset *cs, struct cpuset *parent)
1120 {
1121     if (parent->nr_subparts_cpus) {
1122         cpumask_or(new_cpus, parent->effective_cpus, parent->subparts_cpus);
1123         cpumask_and(new_cpus, new_cpus, cs->cpus_requested);
1124         cpumask_and(new_cpus, new_cpus, cpu_active_mask);
1125     } else {
1126         cpumask_and(new_cpus, cs->cpus_requested, parent_cs(cs)->effective_cpus);
1127     }
1128 }
1129 
1130 /*
1131  * Commands for update_parent_subparts_cpumask
1132  */
1133 enum subparts_cmd {
1134     partcmd_enable,  /* Enable partition root     */
1135     partcmd_disable, /* Disable partition root     */
1136     partcmd_update,  /* Update parent's subparts_cpus */
1137 };
1138 
1139 /**
1140  * update_parent_subparts_cpumask - update subparts_cpus mask of parent cpuset
1141  * @cpuset:  The cpuset that requests change in partition root state
1142  * @cmd:     Partition root state change command
1143  * @newmask: Optional new cpumask for partcmd_update
1144  * @tmp:     Temporary addmask and delmask
1145  * Return:   0, 1 or an error code
1146  *
1147  * For partcmd_enable, the cpuset is being transformed from a non-partition
1148  * root to a partition root. The cpus_allowed mask of the given cpuset will
1149  * be put into parent's subparts_cpus and taken away from parent's
1150  * effective_cpus. The function will return 0 if all the CPUs listed in
1151  * cpus_allowed can be granted or an error code will be returned.
1152  *
1153  * For partcmd_disable, the cpuset is being transofrmed from a partition
1154  * root back to a non-partition root. Any CPUs in cpus_allowed that are in
1155  * parent's subparts_cpus will be taken away from that cpumask and put back
1156  * into parent's effective_cpus. 0 should always be returned.
1157  *
1158  * For partcmd_update, if the optional newmask is specified, the cpu
1159  * list is to be changed from cpus_allowed to newmask. Otherwise,
1160  * cpus_allowed is assumed to remain the same. The cpuset should either
1161  * be a partition root or an invalid partition root. The partition root
1162  * state may change if newmask is NULL and none of the requested CPUs can
1163  * be granted by the parent. The function will return 1 if changes to
1164  * parent's subparts_cpus and effective_cpus happen or 0 otherwise.
1165  * Error code should only be returned when newmask is non-NULL.
1166  *
1167  * The partcmd_enable and partcmd_disable commands are used by
1168  * update_prstate(). The partcmd_update command is used by
1169  * update_cpumasks_hier() with newmask NULL and update_cpumask() with
1170  * newmask set.
1171  *
1172  * The checking is more strict when enabling partition root than the
1173  * other two commands.
1174  *
1175  * Because of the implicit cpu exclusive nature of a partition root,
1176  * cpumask changes that violates the cpu exclusivity rule will not be
1177  * permitted when checked by validate_change(). The validate_change()
1178  * function will also prevent any changes to the cpu list if it is not
1179  * a superset of children's cpu lists.
1180  */
update_parent_subparts_cpumask(struct cpuset * cpuset,int cmd,struct cpumask * newmask,struct tmpmasks * tmp)1181 static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd, struct cpumask *newmask, struct tmpmasks *tmp)
1182 {
1183     struct cpuset *parent = parent_cs(cpuset);
1184     int adding;   /* Moving cpus from effective_cpus to subparts_cpus */
1185     int deleting; /* Moving cpus from subparts_cpus to effective_cpus */
1186     int new_prs;
1187     bool part_error = false; /* Partition error? */
1188 
1189     lockdep_assert_held(&cpuset_mutex);
1190 
1191     /*
1192      * The parent must be a partition root.
1193      * The new cpumask, if present, or the current cpus_allowed must
1194      * not be empty.
1195      */
1196     if (!is_partition_root(parent) || (newmask && cpumask_empty(newmask)) ||
1197         (!newmask && cpumask_empty(cpuset->cpus_allowed))) {
1198         return -EINVAL;
1199     }
1200 
1201     /*
1202      * Enabling/disabling partition root is not allowed if there are
1203      * online children.
1204      */
1205     if ((cmd != partcmd_update) && css_has_online_children(&cpuset->css)) {
1206         return -EBUSY;
1207     }
1208 
1209     /*
1210      * Enabling partition root is not allowed if not all the CPUs
1211      * can be granted from parent's effective_cpus or at least one
1212      * CPU will be left after that.
1213      */
1214     if ((cmd == partcmd_enable) && (!cpumask_subset(cpuset->cpus_allowed, parent->effective_cpus) ||
1215                                     cpumask_equal(cpuset->cpus_allowed, parent->effective_cpus))) {
1216         return -EINVAL;
1217     }
1218 
1219     /*
1220      * A cpumask update cannot make parent's effective_cpus become empty.
1221      */
1222     adding = deleting = false;
1223     new_prs = cpuset->partition_root_state;
1224     if (cmd == partcmd_enable) {
1225         cpumask_copy(tmp->addmask, cpuset->cpus_allowed);
1226         adding = true;
1227     } else if (cmd == partcmd_disable) {
1228         deleting = cpumask_and(tmp->delmask, cpuset->cpus_allowed, parent->subparts_cpus);
1229     } else if (newmask) {
1230         /*
1231          * partcmd_update with newmask:
1232          *
1233          * delmask = cpus_allowed & ~newmask & parent->subparts_cpus
1234          * addmask = newmask & parent->effective_cpus
1235          *             & ~parent->subparts_cpus
1236          */
1237         cpumask_andnot(tmp->delmask, cpuset->cpus_allowed, newmask);
1238         deleting = cpumask_and(tmp->delmask, tmp->delmask, parent->subparts_cpus);
1239 
1240         cpumask_and(tmp->addmask, newmask, parent->effective_cpus);
1241         adding = cpumask_andnot(tmp->addmask, tmp->addmask, parent->subparts_cpus);
1242         /*
1243          * Return error if the new effective_cpus could become empty.
1244          */
1245         if (adding && cpumask_equal(parent->effective_cpus, tmp->addmask)) {
1246             if (!deleting) {
1247                 return -EINVAL;
1248             }
1249             /*
1250              * As some of the CPUs in subparts_cpus might have
1251              * been offlined, we need to compute the real delmask
1252              * to confirm that.
1253              */
1254             if (!cpumask_and(tmp->addmask, tmp->delmask, cpu_active_mask)) {
1255                 return -EINVAL;
1256             }
1257             cpumask_copy(tmp->addmask, parent->effective_cpus);
1258         }
1259     } else {
1260         /*
1261          * partcmd_update w/o newmask:
1262          *
1263          * addmask = cpus_allowed & parent->effective_cpus
1264          *
1265          * Note that parent's subparts_cpus may have been
1266          * pre-shrunk in case there is a change in the cpu list.
1267          * So no deletion is needed.
1268          */
1269         adding = cpumask_and(tmp->addmask, cpuset->cpus_allowed, parent->effective_cpus);
1270         part_error = cpumask_equal(tmp->addmask, parent->effective_cpus);
1271     }
1272 
1273     if (cmd == partcmd_update) {
1274         int prev_prs = cpuset->partition_root_state;
1275 
1276         /*
1277          * Check for possible transition between PRS_ENABLED
1278          * and PRS_ERROR.
1279          */
1280         switch (cpuset->partition_root_state) {
1281             case PRS_ENABLED:
1282                 if (part_error) {
1283                     new_prs = PRS_ERROR;
1284                 }
1285                 break;
1286             case PRS_ERROR:
1287                 if (!part_error) {
1288                     new_prs = PRS_ENABLED;
1289                 }
1290                 break;
1291             default:
1292                 break;
1293         }
1294         /*
1295          * Set part_error if previously in invalid state.
1296          */
1297         part_error = (prev_prs == PRS_ERROR);
1298     }
1299 
1300     if (!part_error && (new_prs == PRS_ERROR)) {
1301         return 0; /* Nothing need to be done */
1302     }
1303 
1304     if (new_prs == PRS_ERROR) {
1305         /*
1306          * Remove all its cpus from parent's subparts_cpus.
1307          */
1308         adding = false;
1309         deleting = cpumask_and(tmp->delmask, cpuset->cpus_allowed, parent->subparts_cpus);
1310     }
1311 
1312     if (!adding && !deleting && (new_prs == cpuset->partition_root_state)) {
1313         return 0;
1314     }
1315 
1316     /*
1317      * Change the parent's subparts_cpus.
1318      * Newly added CPUs will be removed from effective_cpus and
1319      * newly deleted ones will be added back to effective_cpus.
1320      */
1321     spin_lock_irq(&callback_lock);
1322     if (adding) {
1323         cpumask_or(parent->subparts_cpus, parent->subparts_cpus, tmp->addmask);
1324         cpumask_andnot(parent->effective_cpus, parent->effective_cpus, tmp->addmask);
1325     }
1326     if (deleting) {
1327         cpumask_andnot(parent->subparts_cpus, parent->subparts_cpus, tmp->delmask);
1328         /*
1329          * Some of the CPUs in subparts_cpus might have been offlined.
1330          */
1331         cpumask_and(tmp->delmask, tmp->delmask, cpu_active_mask);
1332         cpumask_or(parent->effective_cpus, parent->effective_cpus, tmp->delmask);
1333     }
1334 
1335     parent->nr_subparts_cpus = cpumask_weight(parent->subparts_cpus);
1336 
1337     if (cpuset->partition_root_state != new_prs) {
1338         cpuset->partition_root_state = new_prs;
1339     }
1340     spin_unlock_irq(&callback_lock);
1341 
1342     return cmd == partcmd_update;
1343 }
1344 
1345 /*
1346  * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree
1347  * @cs:  the cpuset to consider
1348  * @tmp: temp variables for calculating effective_cpus & partition setup
1349  *
1350  * When congifured cpumask is changed, the effective cpumasks of this cpuset
1351  * and all its descendants need to be updated.
1352  *
1353  * On legacy hierachy, effective_cpus will be the same with cpu_allowed.
1354  *
1355  * Called with cpuset_mutex held
1356  */
update_cpumasks_hier(struct cpuset * cs,struct tmpmasks * tmp)1357 static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp)
1358 {
1359     struct cpuset *cp;
1360     struct cgroup_subsys_state *pos_css;
1361     bool need_rebuild_sched_domains = false;
1362     int new_prs;
1363 
1364     rcu_read_lock();
1365     cpuset_for_each_descendant_pre(cp, pos_css, cs) {
1366         struct cpuset *parent = parent_cs(cp);
1367 
1368         compute_effective_cpumask(tmp->new_cpus, cp, parent);
1369 
1370         /*
1371          * If it becomes empty, inherit the effective mask of the
1372          * parent, which is guaranteed to have some CPUs.
1373          */
1374         if (is_in_v2_mode() && cpumask_empty(tmp->new_cpus)) {
1375             cpumask_copy(tmp->new_cpus, parent->effective_cpus);
1376             if (!cp->use_parent_ecpus) {
1377                 cp->use_parent_ecpus = true;
1378                 parent->child_ecpus_count++;
1379             }
1380         } else if (cp->use_parent_ecpus) {
1381             cp->use_parent_ecpus = false;
1382             WARN_ON_ONCE(!parent->child_ecpus_count);
1383             parent->child_ecpus_count--;
1384         }
1385 
1386         /*
1387          * Skip the whole subtree if the cpumask remains the same
1388          * and has no partition root state.
1389          */
1390         if (!cp->partition_root_state && cpumask_equal(tmp->new_cpus, cp->effective_cpus)) {
1391             pos_css = css_rightmost_descendant(pos_css);
1392             continue;
1393         }
1394 
1395         /*
1396          * update_parent_subparts_cpumask() should have been called
1397          * for cs already in update_cpumask(). We should also call
1398          * update_tasks_cpumask() again for tasks in the parent
1399          * cpuset if the parent's subparts_cpus changes.
1400          */
1401         new_prs = cp->partition_root_state;
1402         if ((cp != cs) && new_prs) {
1403             switch (parent->partition_root_state) {
1404                 case PRS_DISABLED:
1405                     /*
1406                      * If parent is not a partition root or an
1407                      * invalid partition root, clear its state
1408                      * and its CS_CPU_EXCLUSIVE flag.
1409                      */
1410                     WARN_ON_ONCE(cp->partition_root_state != PRS_ERROR);
1411                     new_prs = PRS_DISABLED;
1412 
1413                     /*
1414                      * clear_bit() is an atomic operation and
1415                      * readers aren't interested in the state
1416                      * of CS_CPU_EXCLUSIVE anyway. So we can
1417                      * just update the flag without holding
1418                      * the callback_lock.
1419                      */
1420                     clear_bit(CS_CPU_EXCLUSIVE, &cp->flags);
1421                     break;
1422 
1423                 case PRS_ENABLED:
1424                     if (update_parent_subparts_cpumask(cp, partcmd_update, NULL, tmp)) {
1425                         update_tasks_cpumask(parent);
1426                     }
1427                     break;
1428 
1429                 case PRS_ERROR:
1430                     /*
1431                      * When parent is invalid, it has to be too.
1432                      */
1433                     new_prs = PRS_ERROR;
1434                     break;
1435             }
1436         }
1437 
1438         if (!css_tryget_online(&cp->css)) {
1439             continue;
1440         }
1441         rcu_read_unlock();
1442 
1443         spin_lock_irq(&callback_lock);
1444 
1445         cpumask_copy(cp->effective_cpus, tmp->new_cpus);
1446         if (cp->nr_subparts_cpus && (new_prs != PRS_ENABLED)) {
1447             cp->nr_subparts_cpus = 0;
1448             cpumask_clear(cp->subparts_cpus);
1449         } else if (cp->nr_subparts_cpus) {
1450             /*
1451              * Make sure that effective_cpus & subparts_cpus
1452              * are mutually exclusive.
1453              *
1454              * In the unlikely event that effective_cpus
1455              * becomes empty. we clear cp->nr_subparts_cpus and
1456              * let its child partition roots to compete for
1457              * CPUs again.
1458              */
1459             cpumask_andnot(cp->effective_cpus, cp->effective_cpus, cp->subparts_cpus);
1460             if (cpumask_empty(cp->effective_cpus)) {
1461                 cpumask_copy(cp->effective_cpus, tmp->new_cpus);
1462                 cpumask_clear(cp->subparts_cpus);
1463                 cp->nr_subparts_cpus = 0;
1464             } else if (!cpumask_subset(cp->subparts_cpus, tmp->new_cpus)) {
1465                 cpumask_andnot(cp->subparts_cpus, cp->subparts_cpus, tmp->new_cpus);
1466                 cp->nr_subparts_cpus = cpumask_weight(cp->subparts_cpus);
1467             }
1468         }
1469 
1470         if (new_prs != cp->partition_root_state) {
1471             cp->partition_root_state = new_prs;
1472         }
1473 
1474         spin_unlock_irq(&callback_lock);
1475 
1476         WARN_ON(!is_in_v2_mode() && !cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
1477 
1478         update_tasks_cpumask(cp);
1479 
1480         /*
1481          * On legacy hierarchy, if the effective cpumask of any non-
1482          * empty cpuset is changed, we need to rebuild sched domains.
1483          * On default hierarchy, the cpuset needs to be a partition
1484          * root as well.
1485          */
1486         if (!cpumask_empty(cp->cpus_allowed) && is_sched_load_balance(cp) &&
1487             (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) || is_partition_root(cp))) {
1488             need_rebuild_sched_domains = true;
1489         }
1490 
1491         rcu_read_lock();
1492         css_put(&cp->css);
1493     }
1494     rcu_read_unlock();
1495 
1496     if (need_rebuild_sched_domains) {
1497         rebuild_sched_domains_locked();
1498     }
1499 }
1500 
1501 /**
1502  * update_sibling_cpumasks - Update siblings cpumasks
1503  * @parent:  Parent cpuset
1504  * @cs:      Current cpuset
1505  * @tmp:     Temp variables
1506  */
update_sibling_cpumasks(struct cpuset * parent,struct cpuset * cs,struct tmpmasks * tmp)1507 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs, struct tmpmasks *tmp)
1508 {
1509     struct cpuset *sibling;
1510     struct cgroup_subsys_state *pos_css;
1511 
1512     percpu_rwsem_assert_held(&cpuset_rwsem);
1513     /*
1514      * Check all its siblings and call update_cpumasks_hier()
1515      * if their use_parent_ecpus flag is set in order for them
1516      * to use the right effective_cpus value.
1517      */
1518     rcu_read_lock();
1519     cpuset_for_each_child(sibling, pos_css, parent) {
1520         if (sibling == cs) {
1521             continue;
1522         }
1523         if (!sibling->use_parent_ecpus) {
1524             continue;
1525         }
1526         if (!css_tryget_online(&sibling->css)) {
1527             continue;
1528         }
1529         rcu_read_unlock();
1530         update_cpumasks_hier(sibling, tmp);
1531         rcu_read_lock();
1532         css_put(&sibling->css);
1533     }
1534     rcu_read_unlock();
1535 }
1536 
1537 /**
1538  * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
1539  * @cs: the cpuset to consider
1540  * @trialcs: trial cpuset
1541  * @buf: buffer of cpu numbers written to this cpuset
1542  */
update_cpumask(struct cpuset * cs,struct cpuset * trialcs,const char * buf)1543 static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, const char *buf)
1544 {
1545     int retval;
1546     struct tmpmasks tmp;
1547 
1548     /* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */
1549     if (cs == &top_cpuset) {
1550         return -EACCES;
1551     }
1552 
1553     /*
1554      * An empty cpus_requested is ok only if the cpuset has no tasks.
1555      * Since cpulist_parse() fails on an empty mask, we special case
1556      * that parsing.  The validate_change() call ensures that cpusets
1557      * with tasks have cpus.
1558      */
1559     if (!*buf) {
1560         cpumask_clear(trialcs->cpus_requested);
1561     } else {
1562         retval = cpulist_parse(buf, trialcs->cpus_requested);
1563         if (retval < 0) {
1564             return retval;
1565         }
1566     }
1567 
1568     if (!cpumask_subset(trialcs->cpus_requested, cpu_present_mask)) {
1569         return -EINVAL;
1570     }
1571 
1572     cpumask_and(trialcs->cpus_allowed, trialcs->cpus_requested, cpu_active_mask);
1573 
1574     /* Nothing to do if the cpus didn't change */
1575     if (cpumask_equal(cs->cpus_requested, trialcs->cpus_requested)) {
1576         return 0;
1577     }
1578 
1579     retval = validate_change(cs, trialcs);
1580     if (retval < 0) {
1581         return retval;
1582     }
1583 
1584 #ifdef CONFIG_CPUMASK_OFFSTACK
1585     /*
1586      * Use the cpumasks in trialcs for tmpmasks when they are pointers
1587      * to allocated cpumasks.
1588      */
1589     tmp.addmask = trialcs->subparts_cpus;
1590     tmp.delmask = trialcs->effective_cpus;
1591     tmp.new_cpus = trialcs->cpus_allowed;
1592 #endif
1593 
1594     if (cs->partition_root_state) {
1595         /* Cpumask of a partition root cannot be empty */
1596         if (cpumask_empty(trialcs->cpus_allowed)) {
1597             return -EINVAL;
1598         }
1599         if (update_parent_subparts_cpumask(cs, partcmd_update, trialcs->cpus_allowed, &tmp) < 0) {
1600             return -EINVAL;
1601         }
1602     }
1603 
1604     spin_lock_irq(&callback_lock);
1605     cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
1606     cpumask_copy(cs->cpus_requested, trialcs->cpus_requested);
1607 
1608     /*
1609      * Make sure that subparts_cpus is a subset of cpus_allowed.
1610      */
1611     if (cs->nr_subparts_cpus) {
1612         cpumask_and(cs->subparts_cpus, cs->subparts_cpus, cs->cpus_allowed);
1613         cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus);
1614     }
1615     spin_unlock_irq(&callback_lock);
1616 
1617     update_cpumasks_hier(cs, &tmp);
1618 
1619     if (cs->partition_root_state) {
1620         struct cpuset *parent = parent_cs(cs);
1621 
1622         /*
1623          * For partition root, update the cpumasks of sibling
1624          * cpusets if they use parent's effective_cpus.
1625          */
1626         if (parent->child_ecpus_count) {
1627             update_sibling_cpumasks(parent, cs, &tmp);
1628         }
1629     }
1630     return 0;
1631 }
1632 
1633 /*
1634  * Migrate memory region from one set of nodes to another.  This is
1635  * performed asynchronously as it can be called from process migration path
1636  * holding locks involved in process management.  All mm migrations are
1637  * performed in the queued order and can be waited for by flushing
1638  * cpuset_migrate_mm_wq.
1639  */
1640 
1641 struct cpuset_migrate_mm_work {
1642     struct work_struct work;
1643     struct mm_struct *mm;
1644     nodemask_t from;
1645     nodemask_t to;
1646 };
1647 
cpuset_migrate_mm_workfn(struct work_struct * work)1648 static void cpuset_migrate_mm_workfn(struct work_struct *work)
1649 {
1650     struct cpuset_migrate_mm_work *mwork = container_of(work, struct cpuset_migrate_mm_work, work);
1651 
1652     /* on a wq worker, no need to worry about %current's mems_allowed */
1653     do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL);
1654     mmput(mwork->mm);
1655     kfree(mwork);
1656 }
1657 
cpuset_migrate_mm(struct mm_struct * mm,const nodemask_t * from,const nodemask_t * to)1658 static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, const nodemask_t *to)
1659 {
1660     struct cpuset_migrate_mm_work *mwork;
1661 
1662     mwork = kzalloc(sizeof(*mwork), GFP_KERNEL);
1663     if (mwork) {
1664         mwork->mm = mm;
1665         mwork->from = *from;
1666         mwork->to = *to;
1667         INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn);
1668         queue_work(cpuset_migrate_mm_wq, &mwork->work);
1669     } else {
1670         mmput(mm);
1671     }
1672 }
1673 
cpuset_post_attach(void)1674 static void cpuset_post_attach(void)
1675 {
1676     flush_workqueue(cpuset_migrate_mm_wq);
1677 }
1678 
1679 /*
1680  * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
1681  * @tsk: the task to change
1682  * @newmems: new nodes that the task will be set
1683  *
1684  * We use the mems_allowed_seq seqlock to safely update both tsk->mems_allowed
1685  * and rebind an eventual tasks' mempolicy. If the task is allocating in
1686  * parallel, it might temporarily see an empty intersection, which results in
1687  * a seqlock check and retry before OOM or allocation failure.
1688  */
cpuset_change_task_nodemask(struct task_struct * tsk,nodemask_t * newmems)1689 static void cpuset_change_task_nodemask(struct task_struct *tsk, nodemask_t *newmems)
1690 {
1691     task_lock(tsk);
1692 
1693     local_irq_disable();
1694     write_seqcount_begin(&tsk->mems_allowed_seq);
1695 
1696     nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
1697     mpol_rebind_task(tsk, newmems);
1698     tsk->mems_allowed = *newmems;
1699 
1700     write_seqcount_end(&tsk->mems_allowed_seq);
1701     local_irq_enable();
1702 
1703     task_unlock(tsk);
1704 }
1705 
1706 static void *cpuset_being_rebound;
1707 
1708 /**
1709  * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
1710  * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
1711  *
1712  * Iterate through each task of @cs updating its mems_allowed to the
1713  * effective cpuset's.  As this function is called with cpuset_mutex held,
1714  * cpuset membership stays stable.
1715  */
update_tasks_nodemask(struct cpuset * cs)1716 static void update_tasks_nodemask(struct cpuset *cs)
1717 {
1718     static nodemask_t newmems; /* protected by cpuset_mutex */
1719     struct css_task_iter it;
1720     struct task_struct *task;
1721 
1722     cpuset_being_rebound = cs; /* causes mpol_dup() rebind */
1723 
1724     guarantee_online_mems(cs, &newmems);
1725 
1726     /*
1727      * The mpol_rebind_mm() call takes mmap_lock, which we couldn't
1728      * take while holding tasklist_lock.  Forks can happen - the
1729      * mpol_dup() cpuset_being_rebound check will catch such forks,
1730      * and rebind their vma mempolicies too.  Because we still hold
1731      * the global cpuset_mutex, we know that no other rebind effort
1732      * will be contending for the global variable cpuset_being_rebound.
1733      * It's ok if we rebind the same mm twice; mpol_rebind_mm()
1734      * is idempotent.  Also migrate pages in each mm to new nodes.
1735      */
1736     css_task_iter_start(&cs->css, 0, &it);
1737     while ((task = css_task_iter_next(&it))) {
1738         struct mm_struct *mm;
1739         bool migrate;
1740 
1741         cpuset_change_task_nodemask(task, &newmems);
1742 
1743         mm = get_task_mm(task);
1744         if (!mm) {
1745             continue;
1746         }
1747 
1748         migrate = is_memory_migrate(cs);
1749 
1750         mpol_rebind_mm(mm, &cs->mems_allowed);
1751         if (migrate) {
1752             cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems);
1753         } else {
1754             mmput(mm);
1755         }
1756     }
1757     css_task_iter_end(&it);
1758 
1759     /*
1760      * All the tasks' nodemasks have been updated, update
1761      * cs->old_mems_allowed.
1762      */
1763     cs->old_mems_allowed = newmems;
1764 
1765     /* We're done rebinding vmas to this cpuset's new mems_allowed. */
1766     cpuset_being_rebound = NULL;
1767 }
1768 
1769 /*
1770  * update_nodemasks_hier - Update effective nodemasks and tasks in the subtree
1771  * @cs: the cpuset to consider
1772  * @new_mems: a temp variable for calculating new effective_mems
1773  *
1774  * When configured nodemask is changed, the effective nodemasks of this cpuset
1775  * and all its descendants need to be updated.
1776  *
1777  * On legacy hiearchy, effective_mems will be the same with mems_allowed.
1778  *
1779  * Called with cpuset_mutex held
1780  */
update_nodemasks_hier(struct cpuset * cs,nodemask_t * new_mems)1781 static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
1782 {
1783     struct cpuset *cp;
1784     struct cgroup_subsys_state *pos_css;
1785 
1786     rcu_read_lock();
1787     cpuset_for_each_descendant_pre(cp, pos_css, cs) {
1788         struct cpuset *parent = parent_cs(cp);
1789 
1790         nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems);
1791 
1792         /*
1793          * If it becomes empty, inherit the effective mask of the
1794          * parent, which is guaranteed to have some MEMs.
1795          */
1796         if (is_in_v2_mode() && nodes_empty(*new_mems)) {
1797             *new_mems = parent->effective_mems;
1798         }
1799 
1800         /* Skip the whole subtree if the nodemask remains the same. */
1801         if (nodes_equal(*new_mems, cp->effective_mems)) {
1802             pos_css = css_rightmost_descendant(pos_css);
1803             continue;
1804         }
1805 
1806         if (!css_tryget_online(&cp->css)) {
1807             continue;
1808         }
1809         rcu_read_unlock();
1810 
1811         spin_lock_irq(&callback_lock);
1812         cp->effective_mems = *new_mems;
1813         spin_unlock_irq(&callback_lock);
1814 
1815         WARN_ON(!is_in_v2_mode() && !nodes_equal(cp->mems_allowed, cp->effective_mems));
1816 
1817         update_tasks_nodemask(cp);
1818 
1819         rcu_read_lock();
1820         css_put(&cp->css);
1821     }
1822     rcu_read_unlock();
1823 }
1824 
1825 /*
1826  * Handle user request to change the 'mems' memory placement
1827  * of a cpuset.  Needs to validate the request, update the
1828  * cpusets mems_allowed, and for each task in the cpuset,
1829  * update mems_allowed and rebind task's mempolicy and any vma
1830  * mempolicies and if the cpuset is marked 'memory_migrate',
1831  * migrate the tasks pages to the new memory.
1832  *
1833  * Call with cpuset_mutex held. May take callback_lock during call.
1834  * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
1835  * lock each such tasks mm->mmap_lock, scan its vma's and rebind
1836  * their mempolicies to the cpusets new mems_allowed.
1837  */
update_nodemask(struct cpuset * cs,struct cpuset * trialcs,const char * buf)1838 static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, const char *buf)
1839 {
1840     int retval;
1841 
1842     /*
1843      * top_cpuset.mems_allowed tracks node_stats[N_MEMORY];
1844      * it's read-only
1845      */
1846     if (cs == &top_cpuset) {
1847         retval = -EACCES;
1848         goto done;
1849     }
1850 
1851     /*
1852      * An empty mems_allowed is ok iff there are no tasks in the cpuset.
1853      * Since nodelist_parse() fails on an empty mask, we special case
1854      * that parsing.  The validate_change() call ensures that cpusets
1855      * with tasks have memory.
1856      */
1857     if (!*buf) {
1858         nodes_clear(trialcs->mems_allowed);
1859     } else {
1860         retval = nodelist_parse(buf, trialcs->mems_allowed);
1861         if (retval < 0) {
1862             goto done;
1863         }
1864 
1865         if (!nodes_subset(trialcs->mems_allowed, top_cpuset.mems_allowed)) {
1866             retval = -EINVAL;
1867             goto done;
1868         }
1869     }
1870 
1871     if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) {
1872         retval = 0; /* Too easy - nothing to do */
1873         goto done;
1874     }
1875     retval = validate_change(cs, trialcs);
1876     if (retval < 0) {
1877         goto done;
1878     }
1879 
1880     spin_lock_irq(&callback_lock);
1881     cs->mems_allowed = trialcs->mems_allowed;
1882     spin_unlock_irq(&callback_lock);
1883 
1884     /* use trialcs->mems_allowed as a temp variable */
1885     update_nodemasks_hier(cs, &trialcs->mems_allowed);
1886 done:
1887     return retval;
1888 }
1889 
current_cpuset_is_being_rebound(void)1890 bool current_cpuset_is_being_rebound(void)
1891 {
1892     bool ret;
1893 
1894     rcu_read_lock();
1895     ret = task_cs(current) == cpuset_being_rebound;
1896     rcu_read_unlock();
1897 
1898     return ret;
1899 }
1900 
update_relax_domain_level(struct cpuset * cs,s64 val)1901 static int update_relax_domain_level(struct cpuset *cs, s64 val)
1902 {
1903 #ifdef CONFIG_SMP
1904     if (val < -1 || val >= sched_domain_level_max) {
1905         return -EINVAL;
1906     }
1907 #endif
1908 
1909     if (val != cs->relax_domain_level) {
1910         cs->relax_domain_level = val;
1911         if (!cpumask_empty(cs->cpus_allowed) && is_sched_load_balance(cs)) {
1912             rebuild_sched_domains_locked();
1913         }
1914     }
1915 
1916     return 0;
1917 }
1918 
1919 /**
1920  * update_tasks_flags - update the spread flags of tasks in the cpuset.
1921  * @cs: the cpuset in which each task's spread flags needs to be changed
1922  *
1923  * Iterate through each task of @cs updating its spread flags.  As this
1924  * function is called with cpuset_mutex held, cpuset membership stays
1925  * stable.
1926  */
update_tasks_flags(struct cpuset * cs)1927 static void update_tasks_flags(struct cpuset *cs)
1928 {
1929     struct css_task_iter it;
1930     struct task_struct *task;
1931 
1932     css_task_iter_start(&cs->css, 0, &it);
1933     while ((task = css_task_iter_next(&it))) {
1934         cpuset_update_task_spread_flag(cs, task);
1935     }
1936     css_task_iter_end(&it);
1937 }
1938 
1939 /*
1940  * update_flag - read a 0 or a 1 in a file and update associated flag
1941  * bit:        the bit to update (see cpuset_flagbits_t)
1942  * cs:        the cpuset to update
1943  * turning_on:     whether the flag is being set or cleared
1944  *
1945  * Call with cpuset_mutex held.
1946  */
1947 
update_flag(cpuset_flagbits_t bit,struct cpuset * cs,int turning_on)1948 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, int turning_on)
1949 {
1950     struct cpuset *trialcs;
1951     int balance_flag_changed;
1952     int spread_flag_changed;
1953     int err;
1954 
1955     trialcs = alloc_trial_cpuset(cs);
1956     if (!trialcs) {
1957         return -ENOMEM;
1958     }
1959 
1960     if (turning_on) {
1961         set_bit(bit, &trialcs->flags);
1962     } else {
1963         clear_bit(bit, &trialcs->flags);
1964     }
1965 
1966     err = validate_change(cs, trialcs);
1967     if (err < 0) {
1968         goto out;
1969     }
1970 
1971     balance_flag_changed = (is_sched_load_balance(cs) != is_sched_load_balance(trialcs));
1972 
1973     spread_flag_changed =
1974         ((is_spread_slab(cs) != is_spread_slab(trialcs)) || (is_spread_page(cs) != is_spread_page(trialcs)));
1975 
1976     spin_lock_irq(&callback_lock);
1977     cs->flags = trialcs->flags;
1978     spin_unlock_irq(&callback_lock);
1979 
1980     if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) {
1981         rebuild_sched_domains_locked();
1982     }
1983 
1984     if (spread_flag_changed) {
1985         update_tasks_flags(cs);
1986     }
1987 out:
1988     free_cpuset(trialcs);
1989     return err;
1990 }
1991 
1992 /*
1993  * update_prstate - update partititon_root_state
1994  * cs: the cpuset to update
1995  * new_prs: new partition root state
1996  *
1997  * Call with cpuset_mutex held.
1998  */
update_prstate(struct cpuset * cs,int new_prs)1999 static int update_prstate(struct cpuset *cs, int new_prs)
2000 {
2001     int err, old_prs = cs->partition_root_state;
2002     struct cpuset *parent = parent_cs(cs);
2003     struct tmpmasks tmpmask;
2004 
2005     if (old_prs == new_prs) {
2006         return 0;
2007     }
2008 
2009     /*
2010      * Cannot force a partial or invalid partition root to a full
2011      * partition root.
2012      */
2013     if (new_prs && (old_prs == PRS_ERROR)) {
2014         return -EINVAL;
2015     }
2016 
2017     if (alloc_cpumasks(NULL, &tmpmask)) {
2018         return -ENOMEM;
2019     }
2020 
2021     err = -EINVAL;
2022     if (!old_prs) {
2023         /*
2024          * Turning on partition root requires setting the
2025          * CS_CPU_EXCLUSIVE bit implicitly as well and cpus_allowed
2026          * cannot be NULL.
2027          */
2028         if (cpumask_empty(cs->cpus_allowed)) {
2029             goto out;
2030         }
2031 
2032         err = update_flag(CS_CPU_EXCLUSIVE, cs, 1);
2033         if (err) {
2034             goto out;
2035         }
2036 
2037         err = update_parent_subparts_cpumask(cs, partcmd_enable, NULL, &tmpmask);
2038         if (err) {
2039             update_flag(CS_CPU_EXCLUSIVE, cs, 0);
2040             goto out;
2041         }
2042     } else {
2043         /*
2044          * Turning off partition root will clear the
2045          * CS_CPU_EXCLUSIVE bit.
2046          */
2047         if (old_prs == PRS_ERROR) {
2048             update_flag(CS_CPU_EXCLUSIVE, cs, 0);
2049             err = 0;
2050             goto out;
2051         }
2052 
2053         err = update_parent_subparts_cpumask(cs, partcmd_disable, NULL, &tmpmask);
2054         if (err) {
2055             goto out;
2056         }
2057 
2058         /* Turning off CS_CPU_EXCLUSIVE will not return error */
2059         update_flag(CS_CPU_EXCLUSIVE, cs, 0);
2060     }
2061 
2062     /*
2063      * Update cpumask of parent's tasks except when it is the top
2064      * cpuset as some system daemons cannot be mapped to other CPUs.
2065      */
2066     if (parent != &top_cpuset) {
2067         update_tasks_cpumask(parent);
2068     }
2069 
2070     if (parent->child_ecpus_count) {
2071         update_sibling_cpumasks(parent, cs, &tmpmask);
2072     }
2073 
2074     rebuild_sched_domains_locked();
2075 out:
2076     if (!err) {
2077         spin_lock_irq(&callback_lock);
2078         cs->partition_root_state = new_prs;
2079         spin_unlock_irq(&callback_lock);
2080     }
2081 
2082     free_cpumasks(NULL, &tmpmask);
2083     return err;
2084 }
2085 
2086 /*
2087  * Frequency meter - How fast is some event occurring?
2088  *
2089  * These routines manage a digitally filtered, constant time based,
2090  * event frequency meter.  There are four routines:
2091  *   fmeter_init() - initialize a frequency meter.
2092  *   fmeter_markevent() - called each time the event happens.
2093  *   fmeter_getrate() - returns the recent rate of such events.
2094  *   fmeter_update() - internal routine used to update fmeter.
2095  *
2096  * A common data structure is passed to each of these routines,
2097  * which is used to keep track of the state required to manage the
2098  * frequency meter and its digital filter.
2099  *
2100  * The filter works on the number of events marked per unit time.
2101  * The filter is single-pole low-pass recursive (IIR).  The time unit
2102  * is 1 second.  Arithmetic is done using 32-bit integers scaled to
2103  * simulate 3 decimal digits of precision (multiplied by 1000).
2104  *
2105  * With an FM_COEF of 933, and a time base of 1 second, the filter
2106  * has a half-life of 10 seconds, meaning that if the events quit
2107  * happening, then the rate returned from the fmeter_getrate()
2108  * will be cut in half each 10 seconds, until it converges to zero.
2109  *
2110  * It is not worth doing a real infinitely recursive filter.  If more
2111  * than FM_MAXTICKS ticks have elapsed since the last filter event,
2112  * just compute FM_MAXTICKS ticks worth, by which point the level
2113  * will be stable.
2114  *
2115  * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid
2116  * arithmetic overflow in the fmeter_update() routine.
2117  *
2118  * Given the simple 32 bit integer arithmetic used, this meter works
2119  * best for reporting rates between one per millisecond (msec) and
2120  * one per 32 (approx) seconds.  At constant rates faster than one
2121  * per msec it maxes out at values just under 1,000,000.  At constant
2122  * rates between one per msec, and one per second it will stabilize
2123  * to a value N*1000, where N is the rate of events per second.
2124  * At constant rates between one per second and one per 32 seconds,
2125  * it will be choppy, moving up on the seconds that have an event,
2126  * and then decaying until the next event.  At rates slower than
2127  * about one in 32 seconds, it decays all the way back to zero between
2128  * each event.
2129  */
2130 
2131 #define FM_COEF 933           /* coefficient for half-life of 10 secs */
2132 #define FM_MAXTICKS ((u32)99) /* useless computing more ticks than this */
2133 #define FM_MAXCNT 1000000     /* limit cnt to avoid overflow */
2134 #define FM_SCALE 1000         /* faux fixed point scale */
2135 
2136 /* Initialize a frequency meter */
fmeter_init(struct fmeter * fmp)2137 static void fmeter_init(struct fmeter *fmp)
2138 {
2139     fmp->cnt = 0;
2140     fmp->val = 0;
2141     fmp->time = 0;
2142     spin_lock_init(&fmp->lock);
2143 }
2144 
2145 /* Internal meter update - process cnt events and update value */
fmeter_update(struct fmeter * fmp)2146 static void fmeter_update(struct fmeter *fmp)
2147 {
2148     time64_t now;
2149     u32 ticks;
2150 
2151     now = ktime_get_seconds();
2152     ticks = now - fmp->time;
2153 
2154     if (ticks == 0) {
2155         return;
2156     }
2157 
2158     ticks = min(FM_MAXTICKS, ticks);
2159     while (ticks-- > 0) {
2160         fmp->val = (FM_COEF * fmp->val) / FM_SCALE;
2161     }
2162     fmp->time = now;
2163 
2164     fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE;
2165     fmp->cnt = 0;
2166 }
2167 
2168 /* Process any previous ticks, then bump cnt by one (times scale). */
fmeter_markevent(struct fmeter * fmp)2169 static void fmeter_markevent(struct fmeter *fmp)
2170 {
2171     spin_lock(&fmp->lock);
2172     fmeter_update(fmp);
2173     fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE);
2174     spin_unlock(&fmp->lock);
2175 }
2176 
2177 /* Process any previous ticks, then return current value. */
fmeter_getrate(struct fmeter * fmp)2178 static int fmeter_getrate(struct fmeter *fmp)
2179 {
2180     int val;
2181 
2182     spin_lock(&fmp->lock);
2183     fmeter_update(fmp);
2184     val = fmp->val;
2185     spin_unlock(&fmp->lock);
2186     return val;
2187 }
2188 
2189 static struct cpuset *cpuset_attach_old_cs;
2190 
2191 /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
cpuset_can_attach(struct cgroup_taskset * tset)2192 static int cpuset_can_attach(struct cgroup_taskset *tset)
2193 {
2194     struct cgroup_subsys_state *css;
2195     struct cpuset *cs;
2196     struct task_struct *task;
2197     int ret;
2198 
2199     /* used later by cpuset_attach() */
2200     cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css));
2201     cs = css_cs(css);
2202 
2203     mutex_lock(&cpuset_mutex);
2204 
2205     /* allow moving tasks into an empty cpuset if on default hierarchy */
2206     ret = -ENOSPC;
2207     if (!is_in_v2_mode() && (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))) {
2208         goto out_unlock;
2209     }
2210 
2211     cgroup_taskset_for_each(task, css, tset)
2212     {
2213         ret = task_can_attach(task, cs->effective_cpus);
2214         if (ret) {
2215             goto out_unlock;
2216         }
2217         ret = security_task_setscheduler(task);
2218         if (ret) {
2219             goto out_unlock;
2220         }
2221     }
2222 
2223     /*
2224      * Mark attach is in progress.  This makes validate_change() fail
2225      * changes which zero cpus/mems_allowed.
2226      */
2227     cs->attach_in_progress++;
2228     ret = 0;
2229 out_unlock:
2230     mutex_unlock(&cpuset_mutex);
2231     return ret;
2232 }
2233 
cpuset_cancel_attach(struct cgroup_taskset * tset)2234 static void cpuset_cancel_attach(struct cgroup_taskset *tset)
2235 {
2236     struct cgroup_subsys_state *css;
2237 
2238     cgroup_taskset_first(tset, &css);
2239 
2240     mutex_lock(&cpuset_mutex);
2241     css_cs(css)->attach_in_progress--;
2242     mutex_unlock(&cpuset_mutex);
2243 }
2244 
2245 /*
2246  * Protected by cpuset_mutex.  cpus_attach is used only by cpuset_attach()
2247  * but we can't allocate it dynamically there.  Define it global and
2248  * allocate from cpuset_init().
2249  */
2250 static cpumask_var_t cpus_attach;
2251 
cpuset_attach(struct cgroup_taskset * tset)2252 static void cpuset_attach(struct cgroup_taskset *tset)
2253 {
2254     /* static buf protected by cpuset_mutex */
2255     static nodemask_t cpuset_attach_nodemask_to;
2256     struct task_struct *task;
2257     struct task_struct *leader;
2258     struct cgroup_subsys_state *css;
2259     struct cpuset *cs;
2260     struct cpuset *oldcs = cpuset_attach_old_cs;
2261 
2262     cgroup_taskset_first(tset, &css);
2263     cs = css_cs(css);
2264 
2265     mutex_lock(&cpuset_mutex);
2266 
2267     guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
2268 
2269     cgroup_taskset_for_each(task, css, tset)
2270     {
2271         if (cs != &top_cpuset) {
2272             guarantee_online_cpus(task, cpus_attach);
2273         } else {
2274             cpumask_copy(cpus_attach, task_cpu_possible_mask(task));
2275         }
2276         /*
2277          * can_attach beforehand should guarantee that this doesn't
2278          * fail. have a better way to handle failure here
2279          */
2280         WARN_ON_ONCE(update_cpus_allowed(cs, task, cpus_attach));
2281 
2282         cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
2283         cpuset_update_task_spread_flag(cs, task);
2284     }
2285 
2286     /*
2287      * Change mm for all threadgroup leaders. This is expensive and may
2288      * sleep and should be moved outside migration path proper.
2289      */
2290     cpuset_attach_nodemask_to = cs->effective_mems;
2291     cgroup_taskset_for_each_leader(leader, css, tset)
2292     {
2293         struct mm_struct *mm = get_task_mm(leader);
2294 
2295         if (mm) {
2296             mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
2297 
2298             /*
2299              * old_mems_allowed is the same with mems_allowed
2300              * here, except if this task is being moved
2301              * automatically due to hotplug.  In that case
2302              * @mems_allowed has been updated and is empty, so
2303              * @old_mems_allowed is the right nodesets that we
2304              * migrate mm from.
2305              */
2306             if (is_memory_migrate(cs)) {
2307                 cpuset_migrate_mm(mm, &oldcs->old_mems_allowed, &cpuset_attach_nodemask_to);
2308             } else {
2309                 mmput(mm);
2310             }
2311         }
2312     }
2313 
2314     cs->old_mems_allowed = cpuset_attach_nodemask_to;
2315 
2316     cs->attach_in_progress--;
2317     if (!cs->attach_in_progress) {
2318         wake_up(&cpuset_attach_wq);
2319     }
2320 
2321     mutex_unlock(&cpuset_mutex);
2322 }
2323 
2324 /* The various types of files and directories in a cpuset file system */
2325 
2326 typedef enum {
2327     FILE_MEMORY_MIGRATE,
2328     FILE_CPULIST,
2329     FILE_MEMLIST,
2330     FILE_EFFECTIVE_CPULIST,
2331     FILE_EFFECTIVE_MEMLIST,
2332     FILE_SUBPARTS_CPULIST,
2333     FILE_CPU_EXCLUSIVE,
2334     FILE_MEM_EXCLUSIVE,
2335     FILE_MEM_HARDWALL,
2336     FILE_SCHED_LOAD_BALANCE,
2337     FILE_PARTITION_ROOT,
2338     FILE_SCHED_RELAX_DOMAIN_LEVEL,
2339     FILE_MEMORY_PRESSURE_ENABLED,
2340     FILE_MEMORY_PRESSURE,
2341     FILE_SPREAD_PAGE,
2342     FILE_SPREAD_SLAB,
2343 } cpuset_filetype_t;
2344 
cpuset_write_u64(struct cgroup_subsys_state * css,struct cftype * cft,u64 val)2345 static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft, u64 val)
2346 {
2347     struct cpuset *cs = css_cs(css);
2348     cpuset_filetype_t type = cft->private;
2349     int retval = 0;
2350 
2351     get_online_cpus();
2352     mutex_lock(&cpuset_mutex);
2353     if (!is_cpuset_online(cs)) {
2354         retval = -ENODEV;
2355         goto out_unlock;
2356     }
2357 
2358     switch (type) {
2359         case FILE_CPU_EXCLUSIVE:
2360             retval = update_flag(CS_CPU_EXCLUSIVE, cs, val);
2361             break;
2362         case FILE_MEM_EXCLUSIVE:
2363             retval = update_flag(CS_MEM_EXCLUSIVE, cs, val);
2364             break;
2365         case FILE_MEM_HARDWALL:
2366             retval = update_flag(CS_MEM_HARDWALL, cs, val);
2367             break;
2368         case FILE_SCHED_LOAD_BALANCE:
2369             retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val);
2370             break;
2371         case FILE_MEMORY_MIGRATE:
2372             retval = update_flag(CS_MEMORY_MIGRATE, cs, val);
2373             break;
2374         case FILE_MEMORY_PRESSURE_ENABLED:
2375             cpuset_memory_pressure_enabled = !!val;
2376             break;
2377         case FILE_SPREAD_PAGE:
2378             retval = update_flag(CS_SPREAD_PAGE, cs, val);
2379             break;
2380         case FILE_SPREAD_SLAB:
2381             retval = update_flag(CS_SPREAD_SLAB, cs, val);
2382             break;
2383         default:
2384             retval = -EINVAL;
2385             break;
2386     }
2387 out_unlock:
2388     mutex_unlock(&cpuset_mutex);
2389     put_online_cpus();
2390     return retval;
2391 }
2392 
cpuset_write_s64(struct cgroup_subsys_state * css,struct cftype * cft,s64 val)2393 static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft, s64 val)
2394 {
2395     struct cpuset *cs = css_cs(css);
2396     cpuset_filetype_t type = cft->private;
2397     int retval = -ENODEV;
2398 
2399     get_online_cpus();
2400     mutex_lock(&cpuset_mutex);
2401     if (!is_cpuset_online(cs)) {
2402         goto out_unlock;
2403     }
2404 
2405     switch (type) {
2406         case FILE_SCHED_RELAX_DOMAIN_LEVEL:
2407             retval = update_relax_domain_level(cs, val);
2408             break;
2409         default:
2410             retval = -EINVAL;
2411             break;
2412     }
2413 out_unlock:
2414     mutex_unlock(&cpuset_mutex);
2415     put_online_cpus();
2416     return retval;
2417 }
2418 
2419 /*
2420  * Common handling for a write to a "cpus" or "mems" file.
2421  */
cpuset_write_resmask(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)2422 static ssize_t cpuset_write_resmask(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off)
2423 {
2424     struct cpuset *cs = css_cs(of_css(of));
2425     struct cpuset *trialcs;
2426     int retval = -ENODEV;
2427 
2428     buf = strstrip(buf);
2429 
2430     /*
2431      * CPU or memory hotunplug may leave @cs w/o any execution
2432      * resources, in which case the hotplug code asynchronously updates
2433      * configuration and transfers all tasks to the nearest ancestor
2434      * which can execute.
2435      *
2436      * As writes to "cpus" or "mems" may restore @cs's execution
2437      * resources, wait for the previously scheduled operations before
2438      * proceeding, so that we don't end up keep removing tasks added
2439      * after execution capability is restored.
2440      *
2441      * cpuset_hotplug_work calls back into cgroup core via
2442      * cgroup_transfer_tasks() and waiting for it from a cgroupfs
2443      * operation like this one can lead to a deadlock through kernfs
2444      * active_ref protection.  Let's break the protection.  Losing the
2445      * protection is okay as we check whether @cs is online after
2446      * grabbing cpuset_mutex anyway.  This only happens on the legacy
2447      * hierarchies.
2448      */
2449     css_get(&cs->css);
2450     kernfs_break_active_protection(of->kn);
2451     flush_work(&cpuset_hotplug_work);
2452 
2453     get_online_cpus();
2454     mutex_lock(&cpuset_mutex);
2455     if (!is_cpuset_online(cs)) {
2456         goto out_unlock;
2457     }
2458 
2459     trialcs = alloc_trial_cpuset(cs);
2460     if (!trialcs) {
2461         retval = -ENOMEM;
2462         goto out_unlock;
2463     }
2464 
2465     switch (of_cft(of)->private) {
2466         case FILE_CPULIST:
2467             retval = update_cpumask(cs, trialcs, buf);
2468             break;
2469         case FILE_MEMLIST:
2470             retval = update_nodemask(cs, trialcs, buf);
2471             break;
2472         default:
2473             retval = -EINVAL;
2474             break;
2475     }
2476 
2477     free_cpuset(trialcs);
2478 out_unlock:
2479     mutex_unlock(&cpuset_mutex);
2480     put_online_cpus();
2481     kernfs_unbreak_active_protection(of->kn);
2482     css_put(&cs->css);
2483     flush_workqueue(cpuset_migrate_mm_wq);
2484     return retval ?: nbytes;
2485 }
2486 
2487 /*
2488  * These ascii lists should be read in a single call, by using a user
2489  * buffer large enough to hold the entire map.  If read in smaller
2490  * chunks, there is no guarantee of atomicity.  Since the display format
2491  * used, list of ranges of sequential numbers, is variable length,
2492  * and since these maps can change value dynamically, one could read
2493  * gibberish by doing partial reads while a list was changing.
2494  */
cpuset_common_seq_show(struct seq_file * sf,void * v)2495 static int cpuset_common_seq_show(struct seq_file *sf, void *v)
2496 {
2497     struct cpuset *cs = css_cs(seq_css(sf));
2498     cpuset_filetype_t type = seq_cft(sf)->private;
2499     int ret = 0;
2500 
2501     spin_lock_irq(&callback_lock);
2502 
2503     switch (type) {
2504         case FILE_CPULIST:
2505             seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_requested));
2506             break;
2507         case FILE_MEMLIST:
2508             seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed));
2509             break;
2510         case FILE_EFFECTIVE_CPULIST:
2511             seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus));
2512             break;
2513         case FILE_EFFECTIVE_MEMLIST:
2514             seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems));
2515             break;
2516         case FILE_SUBPARTS_CPULIST:
2517             seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->subparts_cpus));
2518             break;
2519         default:
2520             ret = -EINVAL;
2521     }
2522 
2523     spin_unlock_irq(&callback_lock);
2524     return ret;
2525 }
2526 
cpuset_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)2527 static u64 cpuset_read_u64(struct cgroup_subsys_state *css, struct cftype *cft)
2528 {
2529     struct cpuset *cs = css_cs(css);
2530     cpuset_filetype_t type = cft->private;
2531     switch (type) {
2532         case FILE_CPU_EXCLUSIVE:
2533             return is_cpu_exclusive(cs);
2534         case FILE_MEM_EXCLUSIVE:
2535             return is_mem_exclusive(cs);
2536         case FILE_MEM_HARDWALL:
2537             return is_mem_hardwall(cs);
2538         case FILE_SCHED_LOAD_BALANCE:
2539             return is_sched_load_balance(cs);
2540         case FILE_MEMORY_MIGRATE:
2541             return is_memory_migrate(cs);
2542         case FILE_MEMORY_PRESSURE_ENABLED:
2543             return cpuset_memory_pressure_enabled;
2544         case FILE_MEMORY_PRESSURE:
2545             return fmeter_getrate(&cs->fmeter);
2546         case FILE_SPREAD_PAGE:
2547             return is_spread_page(cs);
2548         case FILE_SPREAD_SLAB:
2549             return is_spread_slab(cs);
2550         default:
2551             BUG();
2552     }
2553 
2554     /* Unreachable but makes gcc happy */
2555     return 0;
2556 }
2557 
cpuset_read_s64(struct cgroup_subsys_state * css,struct cftype * cft)2558 static s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft)
2559 {
2560     struct cpuset *cs = css_cs(css);
2561     cpuset_filetype_t type = cft->private;
2562     switch (type) {
2563         case FILE_SCHED_RELAX_DOMAIN_LEVEL:
2564             return cs->relax_domain_level;
2565         default:
2566             BUG();
2567     }
2568 
2569     /* Unrechable but makes gcc happy */
2570     return 0;
2571 }
2572 
sched_partition_show(struct seq_file * seq,void * v)2573 static int sched_partition_show(struct seq_file *seq, void *v)
2574 {
2575     struct cpuset *cs = css_cs(seq_css(seq));
2576 
2577     switch (cs->partition_root_state) {
2578         case PRS_ENABLED:
2579             seq_puts(seq, "root\n");
2580             break;
2581         case PRS_DISABLED:
2582             seq_puts(seq, "member\n");
2583             break;
2584         case PRS_ERROR:
2585             seq_puts(seq, "root invalid\n");
2586             break;
2587         default:
2588             break;
2589     }
2590     return 0;
2591 }
2592 
sched_partition_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)2593 static ssize_t sched_partition_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off)
2594 {
2595     struct cpuset *cs = css_cs(of_css(of));
2596     int val;
2597     int retval = -ENODEV;
2598 
2599     buf = strstrip(buf);
2600     /*
2601      * Convert "root" to ENABLED, and convert "member" to DISABLED.
2602      */
2603     if (!strcmp(buf, "root")) {
2604         val = PRS_ENABLED;
2605     } else if (!strcmp(buf, "member")) {
2606         val = PRS_DISABLED;
2607     } else {
2608         return -EINVAL;
2609     }
2610 
2611     css_get(&cs->css);
2612     get_online_cpus();
2613     mutex_lock(&cpuset_mutex);
2614     if (!is_cpuset_online(cs)) {
2615         goto out_unlock;
2616     }
2617 
2618     retval = update_prstate(cs, val);
2619 out_unlock:
2620     mutex_unlock(&cpuset_mutex);
2621     put_online_cpus();
2622     css_put(&cs->css);
2623     return retval ?: nbytes;
2624 }
2625 
2626 /*
2627  * for the common functions, 'private' gives the type of file
2628  */
2629 
2630 static struct cftype legacy_files[] = {
2631     {
2632         .name = "cpus",
2633         .seq_show = cpuset_common_seq_show,
2634         .write = cpuset_write_resmask,
2635         .max_write_len = (100U + 6 * NR_CPUS),
2636         .private = FILE_CPULIST,
2637     },
2638 
2639     {
2640         .name = "mems",
2641         .seq_show = cpuset_common_seq_show,
2642         .write = cpuset_write_resmask,
2643         .max_write_len = (100U + 6 * MAX_NUMNODES),
2644         .private = FILE_MEMLIST,
2645     },
2646 
2647     {
2648         .name = "effective_cpus",
2649         .seq_show = cpuset_common_seq_show,
2650         .private = FILE_EFFECTIVE_CPULIST,
2651     },
2652 
2653     {
2654         .name = "effective_mems",
2655         .seq_show = cpuset_common_seq_show,
2656         .private = FILE_EFFECTIVE_MEMLIST,
2657     },
2658 
2659     {
2660         .name = "cpu_exclusive",
2661         .read_u64 = cpuset_read_u64,
2662         .write_u64 = cpuset_write_u64,
2663         .private = FILE_CPU_EXCLUSIVE,
2664     },
2665 
2666     {
2667         .name = "mem_exclusive",
2668         .read_u64 = cpuset_read_u64,
2669         .write_u64 = cpuset_write_u64,
2670         .private = FILE_MEM_EXCLUSIVE,
2671     },
2672 
2673     {
2674         .name = "mem_hardwall",
2675         .read_u64 = cpuset_read_u64,
2676         .write_u64 = cpuset_write_u64,
2677         .private = FILE_MEM_HARDWALL,
2678     },
2679 
2680     {
2681         .name = "sched_load_balance",
2682         .read_u64 = cpuset_read_u64,
2683         .write_u64 = cpuset_write_u64,
2684         .private = FILE_SCHED_LOAD_BALANCE,
2685     },
2686 
2687     {
2688         .name = "sched_relax_domain_level",
2689         .read_s64 = cpuset_read_s64,
2690         .write_s64 = cpuset_write_s64,
2691         .private = FILE_SCHED_RELAX_DOMAIN_LEVEL,
2692     },
2693 
2694     {
2695         .name = "memory_migrate",
2696         .read_u64 = cpuset_read_u64,
2697         .write_u64 = cpuset_write_u64,
2698         .private = FILE_MEMORY_MIGRATE,
2699     },
2700 
2701     {
2702         .name = "memory_pressure",
2703         .read_u64 = cpuset_read_u64,
2704         .private = FILE_MEMORY_PRESSURE,
2705     },
2706 
2707     {
2708         .name = "memory_spread_page",
2709         .read_u64 = cpuset_read_u64,
2710         .write_u64 = cpuset_write_u64,
2711         .private = FILE_SPREAD_PAGE,
2712     },
2713 
2714     {
2715         .name = "memory_spread_slab",
2716         .read_u64 = cpuset_read_u64,
2717         .write_u64 = cpuset_write_u64,
2718         .private = FILE_SPREAD_SLAB,
2719     },
2720 
2721     {
2722         .name = "memory_pressure_enabled",
2723         .flags = CFTYPE_ONLY_ON_ROOT,
2724         .read_u64 = cpuset_read_u64,
2725         .write_u64 = cpuset_write_u64,
2726         .private = FILE_MEMORY_PRESSURE_ENABLED,
2727     },
2728 
2729     {} /* terminate */
2730 };
2731 
2732 /*
2733  * This is currently a minimal set for the default hierarchy. It can be
2734  * expanded later on by migrating more features and control files from v1.
2735  */
2736 static struct cftype dfl_files[] = {
2737     {
2738         .name = "cpus",
2739         .seq_show = cpuset_common_seq_show,
2740         .write = cpuset_write_resmask,
2741         .max_write_len = (100U + 6 * NR_CPUS),
2742         .private = FILE_CPULIST,
2743         .flags = CFTYPE_NOT_ON_ROOT,
2744     },
2745 
2746     {
2747         .name = "mems",
2748         .seq_show = cpuset_common_seq_show,
2749         .write = cpuset_write_resmask,
2750         .max_write_len = (100U + 6 * MAX_NUMNODES),
2751         .private = FILE_MEMLIST,
2752         .flags = CFTYPE_NOT_ON_ROOT,
2753     },
2754 
2755     {
2756         .name = "cpus.effective",
2757         .seq_show = cpuset_common_seq_show,
2758         .private = FILE_EFFECTIVE_CPULIST,
2759     },
2760 
2761     {
2762         .name = "mems.effective",
2763         .seq_show = cpuset_common_seq_show,
2764         .private = FILE_EFFECTIVE_MEMLIST,
2765     },
2766 
2767     {
2768         .name = "cpus.partition",
2769         .seq_show = sched_partition_show,
2770         .write = sched_partition_write,
2771         .private = FILE_PARTITION_ROOT,
2772         .flags = CFTYPE_NOT_ON_ROOT,
2773     },
2774 
2775     {
2776         .name = "cpus.subpartitions",
2777         .seq_show = cpuset_common_seq_show,
2778         .private = FILE_SUBPARTS_CPULIST,
2779         .flags = CFTYPE_DEBUG,
2780     },
2781 
2782     {} /* terminate */
2783 };
2784 
2785 /*
2786  *    cpuset_css_alloc - allocate a cpuset css
2787  *    cgrp:    control group that the new cpuset will be part of
2788  */
2789 
cpuset_css_alloc(struct cgroup_subsys_state * parent_css)2790 static struct cgroup_subsys_state *cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
2791 {
2792     struct cpuset *cs;
2793 
2794     if (!parent_css) {
2795         return &top_cpuset.css;
2796     }
2797 
2798     cs = kzalloc(sizeof(*cs), GFP_KERNEL);
2799     if (!cs) {
2800         return ERR_PTR(-ENOMEM);
2801     }
2802 
2803     if (alloc_cpumasks(cs, NULL)) {
2804         kfree(cs);
2805         return ERR_PTR(-ENOMEM);
2806     }
2807 
2808     set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
2809     nodes_clear(cs->mems_allowed);
2810     nodes_clear(cs->effective_mems);
2811     fmeter_init(&cs->fmeter);
2812     cs->relax_domain_level = -1;
2813 
2814     return &cs->css;
2815 }
2816 
cpuset_css_online(struct cgroup_subsys_state * css)2817 static int cpuset_css_online(struct cgroup_subsys_state *css)
2818 {
2819     struct cpuset *cs = css_cs(css);
2820     struct cpuset *parent = parent_cs(cs);
2821     struct cpuset *tmp_cs;
2822     struct cgroup_subsys_state *pos_css;
2823 
2824     if (!parent) {
2825         return 0;
2826     }
2827 
2828     get_online_cpus();
2829     mutex_lock(&cpuset_mutex);
2830 
2831     set_bit(CS_ONLINE, &cs->flags);
2832     if (is_spread_page(parent)) {
2833         set_bit(CS_SPREAD_PAGE, &cs->flags);
2834     }
2835     if (is_spread_slab(parent)) {
2836         set_bit(CS_SPREAD_SLAB, &cs->flags);
2837     }
2838 
2839     cpuset_inc();
2840 
2841     spin_lock_irq(&callback_lock);
2842     if (is_in_v2_mode()) {
2843         cpumask_copy(cs->effective_cpus, parent->effective_cpus);
2844         cs->effective_mems = parent->effective_mems;
2845         cs->use_parent_ecpus = true;
2846         parent->child_ecpus_count++;
2847     }
2848     spin_unlock_irq(&callback_lock);
2849 
2850     if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags)) {
2851         goto out_unlock;
2852     }
2853 
2854     /*
2855      * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is
2856      * set.  This flag handling is implemented in cgroup core for
2857      * histrical reasons - the flag may be specified during mount.
2858      *
2859      * Currently, if any sibling cpusets have exclusive cpus or mem, we
2860      * refuse to clone the configuration - thereby refusing the task to
2861      * be entered, and as a result refusing the sys_unshare() or
2862      * clone() which initiated it.  If this becomes a problem for some
2863      * users who wish to allow that scenario, then this could be
2864      * changed to grant parent->cpus_allowed-sibling_cpus_exclusive
2865      * (and likewise for mems) to the new cgroup.
2866      */
2867     rcu_read_lock();
2868     cpuset_for_each_child(tmp_cs, pos_css, parent) {
2869         if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) {
2870             rcu_read_unlock();
2871             goto out_unlock;
2872         }
2873     }
2874     rcu_read_unlock();
2875 
2876     spin_lock_irq(&callback_lock);
2877     cs->mems_allowed = parent->mems_allowed;
2878     cs->effective_mems = parent->mems_allowed;
2879     cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
2880     cpumask_copy(cs->cpus_requested, parent->cpus_requested);
2881     cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
2882     spin_unlock_irq(&callback_lock);
2883 out_unlock:
2884     mutex_unlock(&cpuset_mutex);
2885     put_online_cpus();
2886     return 0;
2887 }
2888 
2889 /*
2890  * If the cpuset being removed has its flag 'sched_load_balance'
2891  * enabled, then simulate turning sched_load_balance off, which
2892  * will call rebuild_sched_domains_locked(). That is not needed
2893  * in the default hierarchy where only changes in partition
2894  * will cause repartitioning.
2895  *
2896  * If the cpuset has the 'sched.partition' flag enabled, simulate
2897  * turning 'sched.partition" off.
2898  */
2899 
cpuset_css_offline(struct cgroup_subsys_state * css)2900 static void cpuset_css_offline(struct cgroup_subsys_state *css)
2901 {
2902     struct cpuset *cs = css_cs(css);
2903 
2904     get_online_cpus();
2905     mutex_lock(&cpuset_mutex);
2906 
2907     if (is_partition_root(cs)) {
2908         update_prstate(cs, 0);
2909     }
2910 
2911     if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && is_sched_load_balance(cs)) {
2912         update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
2913     }
2914 
2915     if (cs->use_parent_ecpus) {
2916         struct cpuset *parent = parent_cs(cs);
2917 
2918         cs->use_parent_ecpus = false;
2919         parent->child_ecpus_count--;
2920     }
2921 
2922     cpuset_dec();
2923     clear_bit(CS_ONLINE, &cs->flags);
2924 
2925     mutex_unlock(&cpuset_mutex);
2926     put_online_cpus();
2927 }
2928 
cpuset_css_free(struct cgroup_subsys_state * css)2929 static void cpuset_css_free(struct cgroup_subsys_state *css)
2930 {
2931     struct cpuset *cs = css_cs(css);
2932 
2933     free_cpuset(cs);
2934 }
2935 
cpuset_bind(struct cgroup_subsys_state * root_css)2936 static void cpuset_bind(struct cgroup_subsys_state *root_css)
2937 {
2938     mutex_lock(&cpuset_mutex);
2939     spin_lock_irq(&callback_lock);
2940 
2941     if (is_in_v2_mode()) {
2942         cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
2943         top_cpuset.mems_allowed = node_possible_map;
2944     } else {
2945         cpumask_copy(top_cpuset.cpus_allowed, top_cpuset.effective_cpus);
2946         top_cpuset.mems_allowed = top_cpuset.effective_mems;
2947     }
2948 
2949     spin_unlock_irq(&callback_lock);
2950     mutex_unlock(&cpuset_mutex);
2951 }
2952 
2953 /*
2954  * Make sure the new task conform to the current state of its parent,
2955  * which could have been changed by cpuset just after it inherits the
2956  * state from the parent and before it sits on the cgroup's task list.
2957  */
cpuset_fork(struct task_struct * task)2958 static void cpuset_fork(struct task_struct *task)
2959 {
2960     int inherit_cpus = 0;
2961     if (task_css_is_root(task, cpuset_cgrp_id)) {
2962         return;
2963     }
2964 
2965     task->mems_allowed = current->mems_allowed;
2966 }
2967 
2968 struct cgroup_subsys cpuset_cgrp_subsys = {
2969     .css_alloc = cpuset_css_alloc,
2970     .css_online = cpuset_css_online,
2971     .css_offline = cpuset_css_offline,
2972     .css_free = cpuset_css_free,
2973     .can_attach = cpuset_can_attach,
2974     .cancel_attach = cpuset_cancel_attach,
2975     .attach = cpuset_attach,
2976     .post_attach = cpuset_post_attach,
2977     .bind = cpuset_bind,
2978     .fork = cpuset_fork,
2979     .legacy_cftypes = legacy_files,
2980     .dfl_cftypes = dfl_files,
2981     .early_init = true,
2982     .threaded = true,
2983 };
2984 
2985 /**
2986  * cpuset_init - initialize cpusets at system boot
2987  *
2988  * Description: Initialize top_cpuset
2989  **/
2990 
cpuset_init(void)2991 int __init cpuset_init(void)
2992 {
2993     BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL));
2994     BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_requested, GFP_KERNEL));
2995     BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL));
2996     BUG_ON(!zalloc_cpumask_var(&top_cpuset.subparts_cpus, GFP_KERNEL));
2997 
2998     cpumask_setall(top_cpuset.cpus_allowed);
2999     cpumask_setall(top_cpuset.cpus_requested);
3000     nodes_setall(top_cpuset.mems_allowed);
3001     cpumask_setall(top_cpuset.effective_cpus);
3002     nodes_setall(top_cpuset.effective_mems);
3003 
3004     fmeter_init(&top_cpuset.fmeter);
3005     set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags);
3006     top_cpuset.relax_domain_level = -1;
3007 
3008     BUG_ON(!alloc_cpumask_var(&cpus_attach, GFP_KERNEL));
3009 
3010     return 0;
3011 }
3012 
3013 /*
3014  * If CPU and/or memory hotplug handlers, below, unplug any CPUs
3015  * or memory nodes, we need to walk over the cpuset hierarchy,
3016  * removing that CPU or node from all cpusets.  If this removes the
3017  * last CPU or node from a cpuset, then move the tasks in the empty
3018  * cpuset to its next-highest non-empty parent.
3019  */
remove_tasks_in_empty_cpuset(struct cpuset * cs)3020 static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
3021 {
3022     struct cpuset *parent;
3023 
3024     /*
3025      * Find its next-highest non-empty parent, (top cpuset
3026      * has online cpus, so can't be empty).
3027      */
3028     parent = parent_cs(cs);
3029     while (cpumask_empty(parent->cpus_allowed) || nodes_empty(parent->mems_allowed)) {
3030         parent = parent_cs(parent);
3031     }
3032 
3033     if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) {
3034         pr_err("cpuset: failed to transfer tasks out of empty cpuset ");
3035         pr_cont_cgroup_name(cs->css.cgroup);
3036         pr_cont("\n");
3037     }
3038 }
3039 
hotplug_update_tasks_legacy(struct cpuset * cs,struct cpumask * new_cpus,nodemask_t * new_mems,bool cpus_updated,bool mems_updated)3040 static void hotplug_update_tasks_legacy(struct cpuset *cs, struct cpumask *new_cpus, nodemask_t *new_mems,
3041                                         bool cpus_updated, bool mems_updated)
3042 {
3043     bool is_empty;
3044 
3045     spin_lock_irq(&callback_lock);
3046     cpumask_copy(cs->cpus_allowed, new_cpus);
3047     cpumask_copy(cs->effective_cpus, new_cpus);
3048     cs->mems_allowed = *new_mems;
3049     cs->effective_mems = *new_mems;
3050     spin_unlock_irq(&callback_lock);
3051 
3052     /*
3053      * Don't call update_tasks_cpumask() if the cpuset becomes empty,
3054      * as the tasks will be migratecd to an ancestor.
3055      */
3056     if (cpus_updated && !cpumask_empty(cs->cpus_allowed)) {
3057         update_tasks_cpumask(cs);
3058     }
3059     if (mems_updated && !nodes_empty(cs->mems_allowed)) {
3060         update_tasks_nodemask(cs);
3061     }
3062 
3063     is_empty = cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed);
3064 
3065     mutex_unlock(&cpuset_mutex);
3066 
3067     /*
3068      * Move tasks to the nearest ancestor with execution resources,
3069      * This is full cgroup operation which will also call back into
3070      * cpuset. Should be done outside any lock.
3071      */
3072     if (is_empty) {
3073         remove_tasks_in_empty_cpuset(cs);
3074     }
3075 
3076     mutex_lock(&cpuset_mutex);
3077 }
3078 
hotplug_update_tasks(struct cpuset * cs,struct cpumask * new_cpus,nodemask_t * new_mems,bool cpus_updated,bool mems_updated)3079 static void hotplug_update_tasks(struct cpuset *cs, struct cpumask *new_cpus, nodemask_t *new_mems, bool cpus_updated,
3080                                  bool mems_updated)
3081 {
3082     if (cpumask_empty(new_cpus)) {
3083         cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus);
3084     }
3085     if (nodes_empty(*new_mems)) {
3086         *new_mems = parent_cs(cs)->effective_mems;
3087     }
3088 
3089     spin_lock_irq(&callback_lock);
3090     cpumask_copy(cs->effective_cpus, new_cpus);
3091     cs->effective_mems = *new_mems;
3092     spin_unlock_irq(&callback_lock);
3093 
3094     if (cpus_updated) {
3095         update_tasks_cpumask(cs);
3096     }
3097     if (mems_updated) {
3098         update_tasks_nodemask(cs);
3099     }
3100 }
3101 
3102 static bool force_rebuild;
3103 
cpuset_force_rebuild(void)3104 void cpuset_force_rebuild(void)
3105 {
3106     force_rebuild = true;
3107 }
3108 
3109 /**
3110  * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug
3111  * @cs: cpuset in interest
3112  * @tmp: the tmpmasks structure pointer
3113  *
3114  * Compare @cs's cpu and mem masks against top_cpuset and if some have gone
3115  * offline, update @cs accordingly.  If @cs ends up with no CPU or memory,
3116  * all its tasks are moved to the nearest ancestor with both resources.
3117  */
cpuset_hotplug_update_tasks(struct cpuset * cs,struct tmpmasks * tmp)3118 static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
3119 {
3120     static cpumask_t new_cpus;
3121     static nodemask_t new_mems;
3122     bool cpus_updated;
3123     bool mems_updated;
3124     struct cpuset *parent;
3125 
3126     while (1) {
3127         wait_event(cpuset_attach_wq, cs->attach_in_progress == 0);
3128 
3129         mutex_lock(&cpuset_mutex);
3130 
3131         /*
3132          * We have raced with task attaching. We wait until attaching
3133          * is finished, so we won't attach a task to an empty cpuset.
3134          */
3135         if (cs->attach_in_progress) {
3136             mutex_unlock(&cpuset_mutex);
3137             continue;
3138         }
3139         break;
3140     }
3141 
3142     parent = parent_cs(cs);
3143     compute_effective_cpumask(&new_cpus, cs, parent);
3144     nodes_and(new_mems, cs->mems_allowed, parent->effective_mems);
3145 
3146     if (cs->nr_subparts_cpus) {
3147         /*
3148          * Make sure that CPUs allocated to child partitions
3149          * do not show up in effective_cpus.
3150          */
3151         cpumask_andnot(&new_cpus, &new_cpus, cs->subparts_cpus);
3152     }
3153 
3154     if (!tmp || !cs->partition_root_state) {
3155         goto update_tasks;
3156     }
3157 
3158     /*
3159      * In the unlikely event that a partition root has empty
3160      * effective_cpus or its parent becomes erroneous, we have to
3161      * transition it to the erroneous state.
3162      */
3163     if (is_partition_root(cs) && (cpumask_empty(&new_cpus) || (parent->partition_root_state == PRS_ERROR))) {
3164         if (cs->nr_subparts_cpus) {
3165             spin_lock_irq(&callback_lock);
3166             cs->nr_subparts_cpus = 0;
3167             cpumask_clear(cs->subparts_cpus);
3168             spin_unlock_irq(&callback_lock);
3169             compute_effective_cpumask(&new_cpus, cs, parent);
3170         }
3171 
3172         /*
3173          * If the effective_cpus is empty because the child
3174          * partitions take away all the CPUs, we can keep
3175          * the current partition and let the child partitions
3176          * fight for available CPUs.
3177          */
3178         if ((parent->partition_root_state == PRS_ERROR) || cpumask_empty(&new_cpus)) {
3179             update_parent_subparts_cpumask(cs, partcmd_disable, NULL, tmp);
3180             spin_lock_irq(&callback_lock);
3181             cs->partition_root_state = PRS_ERROR;
3182             spin_unlock_irq(&callback_lock);
3183         }
3184         cpuset_force_rebuild();
3185     }
3186 
3187     /*
3188      * On the other hand, an erroneous partition root may be transitioned
3189      * back to a regular one or a partition root with no CPU allocated
3190      * from the parent may change to erroneous.
3191      */
3192     if (is_partition_root(parent) &&
3193         ((cs->partition_root_state == PRS_ERROR) || !cpumask_intersects(&new_cpus, parent->subparts_cpus)) &&
3194         update_parent_subparts_cpumask(cs, partcmd_update, NULL, tmp)) {
3195         cpuset_force_rebuild();
3196     }
3197 
3198 update_tasks:
3199     cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus);
3200     mems_updated = !nodes_equal(new_mems, cs->effective_mems);
3201 
3202     if (is_in_v2_mode()) {
3203         hotplug_update_tasks(cs, &new_cpus, &new_mems, cpus_updated, mems_updated);
3204     } else {
3205         hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems, cpus_updated, mems_updated);
3206     }
3207 
3208     mutex_unlock(&cpuset_mutex);
3209 }
3210 
3211 /**
3212  * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
3213  *
3214  * This function is called after either CPU or memory configuration has
3215  * changed and updates cpuset accordingly.  The top_cpuset is always
3216  * synchronized to cpu_active_mask and N_MEMORY, which is necessary in
3217  * order to make cpusets transparent (of no affect) on systems that are
3218  * actively using CPU hotplug but making no active use of cpusets.
3219  *
3220  * Non-root cpusets are only affected by offlining.  If any CPUs or memory
3221  * nodes have been taken down, cpuset_hotplug_update_tasks() is invoked on
3222  * all descendants.
3223  *
3224  * Note that CPU offlining during suspend is ignored.  We don't modify
3225  * cpusets across suspend/resume cycles at all.
3226  */
cpuset_hotplug_workfn(struct work_struct * work)3227 void cpuset_hotplug_workfn(struct work_struct *work)
3228 {
3229     static cpumask_t new_cpus;
3230     static nodemask_t new_mems;
3231     bool cpus_updated, mems_updated;
3232     bool on_dfl = is_in_v2_mode();
3233     struct tmpmasks tmp, *ptmp = NULL;
3234 
3235     if (on_dfl && !alloc_cpumasks(NULL, &tmp)) {
3236         ptmp = &tmp;
3237     }
3238 
3239     mutex_lock(&cpuset_mutex);
3240 
3241     /* fetch the available cpus/mems and find out which changed how */
3242     cpumask_copy(&new_cpus, cpu_active_mask);
3243     new_mems = node_states[N_MEMORY];
3244 
3245     /*
3246      * If subparts_cpus is populated, it is likely that the check below
3247      * will produce a false positive on cpus_updated when the cpu list
3248      * isn't changed. It is extra work, but it is better to be safe.
3249      */
3250     cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus);
3251     mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems);
3252 
3253     /*
3254      * In the rare case that hotplug removes all the cpus in subparts_cpus,
3255      * we assumed that cpus are updated.
3256      */
3257     if (!cpus_updated && top_cpuset.nr_subparts_cpus) {
3258         cpus_updated = true;
3259     }
3260 
3261     /* synchronize cpus_allowed to cpu_active_mask */
3262     if (cpus_updated) {
3263         spin_lock_irq(&callback_lock);
3264         if (!on_dfl) {
3265             cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
3266         }
3267         /*
3268          * Make sure that CPUs allocated to child partitions
3269          * do not show up in effective_cpus. If no CPU is left,
3270          * we clear the subparts_cpus & let the child partitions
3271          * fight for the CPUs again.
3272          */
3273         if (top_cpuset.nr_subparts_cpus) {
3274             if (cpumask_subset(&new_cpus, top_cpuset.subparts_cpus)) {
3275                 top_cpuset.nr_subparts_cpus = 0;
3276                 cpumask_clear(top_cpuset.subparts_cpus);
3277             } else {
3278                 cpumask_andnot(&new_cpus, &new_cpus, top_cpuset.subparts_cpus);
3279             }
3280         }
3281         cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
3282         spin_unlock_irq(&callback_lock);
3283         /* we don't mess with cpumasks of tasks in top_cpuset */
3284     }
3285 
3286     /* synchronize mems_allowed to N_MEMORY */
3287     if (mems_updated) {
3288         spin_lock_irq(&callback_lock);
3289         if (!on_dfl) {
3290             top_cpuset.mems_allowed = new_mems;
3291         }
3292         top_cpuset.effective_mems = new_mems;
3293         spin_unlock_irq(&callback_lock);
3294         update_tasks_nodemask(&top_cpuset);
3295     }
3296 
3297     mutex_unlock(&cpuset_mutex);
3298 
3299     /* if cpus or mems changed, we need to propagate to descendants */
3300     if (cpus_updated || mems_updated) {
3301         struct cpuset *cs;
3302         struct cgroup_subsys_state *pos_css;
3303 
3304         rcu_read_lock();
3305         cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
3306             if (cs == &top_cpuset || !css_tryget_online(&cs->css)) {
3307                 continue;
3308             }
3309             rcu_read_unlock();
3310 
3311             cpuset_hotplug_update_tasks(cs, ptmp);
3312 
3313             rcu_read_lock();
3314             css_put(&cs->css);
3315         }
3316         rcu_read_unlock();
3317     }
3318 
3319     /* rebuild sched domains if cpus_allowed has changed */
3320     if (cpus_updated || force_rebuild) {
3321         force_rebuild = false;
3322         rebuild_sched_domains();
3323     }
3324 
3325     free_cpumasks(NULL, ptmp);
3326 }
3327 
cpuset_update_active_cpus(void)3328 void cpuset_update_active_cpus(void)
3329 {
3330     /*
3331      * We're inside cpu hotplug critical region which usually nests
3332      * inside cgroup synchronization.  Bounce actual hotplug processing
3333      * to a work item to avoid reverse locking order.
3334      */
3335     schedule_work(&cpuset_hotplug_work);
3336 }
3337 
cpuset_wait_for_hotplug(void)3338 void cpuset_wait_for_hotplug(void)
3339 {
3340     flush_work(&cpuset_hotplug_work);
3341 }
3342 EXPORT_SYMBOL_GPL(cpuset_wait_for_hotplug);
3343 
3344 /*
3345  * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
3346  * Call this routine anytime after node_states[N_MEMORY] changes.
3347  * See cpuset_update_active_cpus() for CPU hotplug handling.
3348  */
cpuset_track_online_nodes(struct notifier_block * self,unsigned long action,void * arg)3349 static int cpuset_track_online_nodes(struct notifier_block *self, unsigned long action, void *arg)
3350 {
3351     schedule_work(&cpuset_hotplug_work);
3352     return NOTIFY_OK;
3353 }
3354 
3355 static struct notifier_block cpuset_track_online_nodes_nb = {
3356     .notifier_call = cpuset_track_online_nodes, .priority = 10, /* ??! */
3357 };
3358 
3359 /**
3360  * cpuset_init_smp - initialize cpus_allowed
3361  *
3362  * Description: Finish top cpuset after cpu, node maps are initialized
3363  */
cpuset_init_smp(void)3364 void __init cpuset_init_smp(void)
3365 {
3366    	/*
3367 	 * cpus_allowd/mems_allowed set to v2 values in the initial
3368 	 * cpuset_bind() call will be reset to v1 values in another
3369 	 * cpuset_bind() call when v1 cpuset is mounted.
3370 	 */
3371     top_cpuset.old_mems_allowed = top_cpuset.mems_allowed;
3372 
3373     cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask);
3374     top_cpuset.effective_mems = node_states[N_MEMORY];
3375 
3376     register_hotmemory_notifier(&cpuset_track_online_nodes_nb);
3377 
3378     cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0);
3379     BUG_ON(!cpuset_migrate_mm_wq);
3380 }
3381 
3382 /**
3383  * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
3384  * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
3385  * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
3386  *
3387  * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
3388  * attached to the specified @tsk.  Guaranteed to return some non-empty
3389  * subset of cpu_online_mask, even if this means going outside the
3390  * tasks cpuset.
3391  **/
3392 
cpuset_cpus_allowed(struct task_struct * tsk,struct cpumask * pmask)3393 void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
3394 {
3395     unsigned long flags;
3396 
3397     spin_lock_irqsave(&callback_lock, flags);
3398     rcu_read_lock();
3399     guarantee_online_cpus(tsk, pmask);
3400     rcu_read_unlock();
3401     spin_unlock_irqrestore(&callback_lock, flags);
3402 }
3403 EXPORT_SYMBOL_GPL(cpuset_cpus_allowed);
3404 /**
3405  * cpuset_cpus_allowed_fallback - final fallback before complete catastrophe.
3406  * @tsk: pointer to task_struct with which the scheduler is struggling
3407  *
3408  * Description: In the case that the scheduler cannot find an allowed cpu in
3409  * tsk->cpus_allowed, we fall back to task_cs(tsk)->cpus_allowed. In legacy
3410  * mode however, this value is the same as task_cs(tsk)->effective_cpus,
3411  * which will not contain a sane cpumask during cases such as cpu hotplugging.
3412  * This is the absolute last resort for the scheduler and it is only used if
3413  * _every_ other avenue has been traveled.
3414  **/
3415 
cpuset_cpus_allowed_fallback(struct task_struct * tsk)3416 void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
3417 {
3418     const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
3419     const struct cpumask *cs_mask;
3420 
3421     rcu_read_lock();
3422     cs_mask = task_cs(tsk)->cpus_allowed;
3423     if (!is_in_v2_mode() || !cpumask_subset(cs_mask, possible_mask)) {
3424         goto unlock; /* select_fallback_rq will try harder */
3425     }
3426 
3427     do_set_cpus_allowed(tsk, cs_mask);
3428 unlock:
3429     rcu_read_unlock();
3430 
3431     /*
3432      * We own tsk->cpus_allowed, nobody can change it under us.
3433      *
3434      * But we used cs && cs->cpus_allowed lockless and thus can
3435      * race with cgroup_attach_task() or update_cpumask() and get
3436      * the wrong tsk->cpus_allowed. However, both cases imply the
3437      * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr()
3438      * which takes task_rq_lock().
3439      *
3440      * If we are called after it dropped the lock we must see all
3441      * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
3442      * set any mask even if it is not right from task_cs() pov,
3443      * the pending set_cpus_allowed_ptr() will fix things.
3444      *
3445      * select_fallback_rq() will fix things ups and set cpu_possible_mask
3446      * if required.
3447      */
3448 }
3449 
cpuset_init_current_mems_allowed(void)3450 void __init cpuset_init_current_mems_allowed(void)
3451 {
3452     nodes_setall(current->mems_allowed);
3453 }
3454 
3455 /**
3456  * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
3457  * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
3458  *
3459  * Description: Returns the nodemask_t mems_allowed of the cpuset
3460  * attached to the specified @tsk.  Guaranteed to return some non-empty
3461  * subset of node_states[N_MEMORY], even if this means going outside the
3462  * tasks cpuset.
3463  **/
3464 
cpuset_mems_allowed(struct task_struct * tsk)3465 nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
3466 {
3467     nodemask_t mask;
3468     unsigned long flags;
3469 
3470     spin_lock_irqsave(&callback_lock, flags);
3471     rcu_read_lock();
3472     guarantee_online_mems(task_cs(tsk), &mask);
3473     rcu_read_unlock();
3474     spin_unlock_irqrestore(&callback_lock, flags);
3475 
3476     return mask;
3477 }
3478 
3479 /**
3480  * cpuset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed
3481  * @nodemask: the nodemask to be checked
3482  *
3483  * Are any of the nodes in the nodemask allowed in current->mems_allowed?
3484  */
cpuset_nodemask_valid_mems_allowed(nodemask_t * nodemask)3485 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
3486 {
3487     return nodes_intersects(*nodemask, current->mems_allowed);
3488 }
3489 
3490 /*
3491  * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
3492  * mem_hardwall ancestor to the specified cpuset.  Call holding
3493  * callback_lock.  If no ancestor is mem_exclusive or mem_hardwall
3494  * (an unusual configuration), then returns the root cpuset.
3495  */
nearest_hardwall_ancestor(struct cpuset * cs)3496 static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
3497 {
3498     while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs)) {
3499         cs = parent_cs(cs);
3500     }
3501     return cs;
3502 }
3503 
3504 /**
3505  * cpuset_node_allowed - Can we allocate on a memory node?
3506  * @node: is this an allowed node?
3507  * @gfp_mask: memory allocation flags
3508  *
3509  * If we're in interrupt, yes, we can always allocate.  If @node is set in
3510  * current's mems_allowed, yes.  If it's not a __GFP_HARDWALL request and this
3511  * node is set in the nearest hardwalled cpuset ancestor to current's cpuset,
3512  * yes.  If current has access to memory reserves as an oom victim, yes.
3513  * Otherwise, no.
3514  *
3515  * GFP_USER allocations are marked with the __GFP_HARDWALL bit,
3516  * and do not allow allocations outside the current tasks cpuset
3517  * unless the task has been OOM killed.
3518  * GFP_KERNEL allocations are not so marked, so can escape to the
3519  * nearest enclosing hardwalled ancestor cpuset.
3520  *
3521  * Scanning up parent cpusets requires callback_lock.  The
3522  * __alloc_pages() routine only calls here with __GFP_HARDWALL bit
3523  * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
3524  * current tasks mems_allowed came up empty on the first pass over
3525  * the zonelist.  So only GFP_KERNEL allocations, if all nodes in the
3526  * cpuset are short of memory, might require taking the callback_lock.
3527  *
3528  * The first call here from mm/page_alloc:get_page_from_freelist()
3529  * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
3530  * so no allocation on a node outside the cpuset is allowed (unless
3531  * in interrupt, of course).
3532  *
3533  * The second pass through get_page_from_freelist() doesn't even call
3534  * here for GFP_ATOMIC calls.  For those calls, the __alloc_pages()
3535  * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
3536  * in alloc_flags.  That logic and the checks below have the combined
3537  * affect that:
3538  *    in_interrupt - any node ok (current task context irrelevant)
3539  *    GFP_ATOMIC   - any node ok
3540  *    tsk_is_oom_victim   - any node ok
3541  *    GFP_KERNEL   - any node in enclosing hardwalled cpuset ok
3542  *    GFP_USER     - only nodes in current tasks mems allowed ok.
3543  */
_cpuset_node_allowed(int node,gfp_t gfp_mask)3544 bool _cpuset_node_allowed(int node, gfp_t gfp_mask)
3545 {
3546     struct cpuset *cs; /* current cpuset ancestors */
3547     int allowed;       /* is allocation in zone z allowed? */
3548     unsigned long flags;
3549 
3550     if (in_interrupt()) {
3551         return true;
3552     }
3553     if (node_isset(node, current->mems_allowed)) {
3554         return true;
3555     }
3556     /*
3557      * Allow tasks that have access to memory reserves because they have
3558      * been OOM killed to get memory anywhere.
3559      */
3560     if (unlikely(tsk_is_oom_victim(current))) {
3561         return true;
3562     }
3563     if (gfp_mask & __GFP_HARDWALL) { /* If hardwall request, stop here */
3564         return false;
3565     }
3566 
3567     if (current->flags & PF_EXITING) { /* Let dying task have memory */
3568         return true;
3569     }
3570 
3571     /* Not hardwall and node outside mems_allowed: scan up cpusets */
3572     spin_lock_irqsave(&callback_lock, flags);
3573 
3574     rcu_read_lock();
3575     cs = nearest_hardwall_ancestor(task_cs(current));
3576     allowed = node_isset(node, cs->mems_allowed);
3577     rcu_read_unlock();
3578 
3579     spin_unlock_irqrestore(&callback_lock, flags);
3580     return allowed;
3581 }
3582 
3583 /**
3584  * cpuset_mem_spread_node() - On which node to begin search for a file page
3585  * cpuset_slab_spread_node() - On which node to begin search for a slab page
3586  *
3587  * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
3588  * tasks in a cpuset with is_spread_page or is_spread_slab set),
3589  * and if the memory allocation used cpuset_mem_spread_node()
3590  * to determine on which node to start looking, as it will for
3591  * certain page cache or slab cache pages such as used for file
3592  * system buffers and inode caches, then instead of starting on the
3593  * local node to look for a free page, rather spread the starting
3594  * node around the tasks mems_allowed nodes.
3595  *
3596  * We don't have to worry about the returned node being offline
3597  * because "it can't happen", and even if it did, it would be ok.
3598  *
3599  * The routines calling guarantee_online_mems() are careful to
3600  * only set nodes in task->mems_allowed that are online.  So it
3601  * should not be possible for the following code to return an
3602  * offline node.  But if it did, that would be ok, as this routine
3603  * is not returning the node where the allocation must be, only
3604  * the node where the search should start.  The zonelist passed to
3605  * __alloc_pages() will include all nodes.  If the slab allocator
3606  * is passed an offline node, it will fall back to the local node.
3607  * See kmem_cache_alloc_node().
3608  */
3609 
cpuset_spread_node(int * rotor)3610 static int cpuset_spread_node(int *rotor)
3611 {
3612     return *rotor = next_node_in(*rotor, current->mems_allowed);
3613 }
3614 
cpuset_mem_spread_node(void)3615 int cpuset_mem_spread_node(void)
3616 {
3617     if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE) {
3618         current->cpuset_mem_spread_rotor = node_random(&current->mems_allowed);
3619     }
3620 
3621     return cpuset_spread_node(&current->cpuset_mem_spread_rotor);
3622 }
3623 
cpuset_slab_spread_node(void)3624 int cpuset_slab_spread_node(void)
3625 {
3626     if (current->cpuset_slab_spread_rotor == NUMA_NO_NODE) {
3627         current->cpuset_slab_spread_rotor = node_random(&current->mems_allowed);
3628     }
3629 
3630     return cpuset_spread_node(&current->cpuset_slab_spread_rotor);
3631 }
3632 
3633 EXPORT_SYMBOL_GPL(cpuset_mem_spread_node);
3634 
3635 /**
3636  * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
3637  * @tsk1: pointer to task_struct of some task.
3638  * @tsk2: pointer to task_struct of some other task.
3639  *
3640  * Description: Return true if @tsk1's mems_allowed intersects the
3641  * mems_allowed of @tsk2.  Used by the OOM killer to determine if
3642  * one of the task's memory usage might impact the memory available
3643  * to the other.
3644  **/
3645 
cpuset_mems_allowed_intersects(const struct task_struct * tsk1,const struct task_struct * tsk2)3646 int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, const struct task_struct *tsk2)
3647 {
3648     return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
3649 }
3650 
3651 /**
3652  * cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed
3653  *
3654  * Description: Prints current's name, cpuset name, and cached copy of its
3655  * mems_allowed to the kernel log.
3656  */
cpuset_print_current_mems_allowed(void)3657 void cpuset_print_current_mems_allowed(void)
3658 {
3659     struct cgroup *cgrp;
3660 
3661     rcu_read_lock();
3662 
3663     cgrp = task_cs(current)->css.cgroup;
3664     pr_cont(",cpuset=");
3665     pr_cont_cgroup_name(cgrp);
3666     pr_cont(",mems_allowed=%*pbl", nodemask_pr_args(&current->mems_allowed));
3667 
3668     rcu_read_unlock();
3669 }
3670 
3671 /*
3672  * Collection of memory_pressure is suppressed unless
3673  * this flag is enabled by writing "1" to the special
3674  * cpuset file 'memory_pressure_enabled' in the root cpuset.
3675  */
3676 
3677 int cpuset_memory_pressure_enabled __read_mostly;
3678 
3679 /**
3680  * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
3681  *
3682  * Keep a running average of the rate of synchronous (direct)
3683  * page reclaim efforts initiated by tasks in each cpuset.
3684  *
3685  * This represents the rate at which some task in the cpuset
3686  * ran low on memory on all nodes it was allowed to use, and
3687  * had to enter the kernels page reclaim code in an effort to
3688  * create more free memory by tossing clean pages or swapping
3689  * or writing dirty pages.
3690  *
3691  * Display to user space in the per-cpuset read-only file
3692  * "memory_pressure".  Value displayed is an integer
3693  * representing the recent rate of entry into the synchronous
3694  * (direct) page reclaim by any task attached to the cpuset.
3695  **/
3696 
_cpuset_memory_pressure_bump(void)3697 void _cpuset_memory_pressure_bump(void)
3698 {
3699     rcu_read_lock();
3700     fmeter_markevent(&task_cs(current)->fmeter);
3701     rcu_read_unlock();
3702 }
3703 
3704 #ifdef CONFIG_PROC_PID_CPUSET
3705 /*
3706  * proc_cpuset_show()
3707  *  - Print tasks cpuset path into seq_file.
3708  *  - Used for /proc/<pid>/cpuset.
3709  *  - No need to task_lock(tsk) on this tsk->cpuset reference, as it
3710  *    doesn't really matter if tsk->cpuset changes after we read it,
3711  *    and we take cpuset_mutex, keeping cpuset_attach() from changing it
3712  *    anyway.
3713  */
proc_cpuset_show(struct seq_file * m,struct pid_namespace * ns,struct pid * pid,struct task_struct * tsk)3714 int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *tsk)
3715 {
3716     char *buf;
3717     struct cgroup_subsys_state *css;
3718     int retval;
3719 
3720     retval = -ENOMEM;
3721     buf = kmalloc(PATH_MAX, GFP_KERNEL);
3722     if (!buf) {
3723         goto out;
3724     }
3725 
3726     css = task_get_css(tsk, cpuset_cgrp_id);
3727     retval = cgroup_path_ns(css->cgroup, buf, PATH_MAX, current->nsproxy->cgroup_ns);
3728     css_put(css);
3729     if (retval >= PATH_MAX) {
3730         retval = -ENAMETOOLONG;
3731     }
3732     if (retval < 0) {
3733         goto out_free;
3734     }
3735     seq_puts(m, buf);
3736     seq_putc(m, '\n');
3737     retval = 0;
3738 out_free:
3739     kfree(buf);
3740 out:
3741     return retval;
3742 }
3743 #endif /* CONFIG_PROC_PID_CPUSET */
3744 
3745 /* Display task mems_allowed in /proc/<pid>/status file. */
cpuset_task_status_allowed(struct seq_file * m,struct task_struct * task)3746 void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
3747 {
3748     seq_printf(m, "Mems_allowed:\t%*pb\n", nodemask_pr_args(&task->mems_allowed));
3749     seq_printf(m, "Mems_allowed_list:\t%*pbl\n", nodemask_pr_args(&task->mems_allowed));
3750 }
3751