• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include "cgroup-internal.h"
3 
4 #include <linux/ctype.h>
5 #include <linux/kmod.h>
6 #include <linux/sort.h>
7 #include <linux/delay.h>
8 #include <linux/mm.h>
9 #include <linux/sched/signal.h>
10 #include <linux/sched/task.h>
11 #include <linux/magic.h>
12 #include <linux/slab.h>
13 #include <linux/vmalloc.h>
14 #include <linux/delayacct.h>
15 #include <linux/pid_namespace.h>
16 #include <linux/cgroupstats.h>
17 #include <linux/fs_parser.h>
18 
19 #include <trace/events/cgroup.h>
20 
21 /*
22  * pidlists linger the following amount before being destroyed.  The goal
23  * is avoiding frequent destruction in the middle of consecutive read calls
24  * Expiring in the middle is a performance problem not a correctness one.
25  * 1 sec should be enough.
26  */
27 #define CGROUP_PIDLIST_DESTROY_DELAY HZ
28 
29 #define CGROUP_ARRAY_INDEX_ZERO 0
30 #define CGROUP_ARRAY_INDEX_ONE 1
31 #define CGROUP_ARRAY_INDEX_TWO 2
32 
33 /* Controllers blocked by the commandline in v1 */
34 static u16 cgroup_no_v1_mask;
35 
36 /* disable named v1 mounts */
37 static bool cgroup_no_v1_named;
38 
39 /*
40  * pidlist destructions need to be flushed on cgroup destruction.  Use a
41  * separate workqueue as flush domain.
42  */
43 static struct workqueue_struct *cgroup_pidlist_destroy_wq;
44 
45 /* protects cgroup_subsys->release_agent_path */
46 static DEFINE_SPINLOCK(release_agent_path_lock);
47 
cgroup1_ssid_disabled(int ssid)48 bool cgroup1_ssid_disabled(int ssid)
49 {
50     return cgroup_no_v1_mask & (1 << ssid);
51 }
52 
53 /**
54  * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
55  * @from: attach to all cgroups of a given task
56  * @tsk: the task to be attached
57  */
cgroup_attach_task_all(struct task_struct * from,struct task_struct * tsk)58 int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
59 {
60     struct cgroup_root *root;
61     int retval = 0;
62 
63     mutex_lock(&cgroup_mutex);
64     percpu_down_write(&cgroup_threadgroup_rwsem);
65     for_each_root(root)
66     {
67         struct cgroup *from_cgrp;
68 
69         if (root == &cgrp_dfl_root) {
70             continue;
71         }
72 
73         spin_lock_irq(&css_set_lock);
74         from_cgrp = task_cgroup_from_root(from, root);
75         spin_unlock_irq(&css_set_lock);
76 
77         retval = cgroup_attach_task(from_cgrp, tsk, false);
78         if (retval) {
79             break;
80         }
81     }
82     percpu_up_write(&cgroup_threadgroup_rwsem);
83     mutex_unlock(&cgroup_mutex);
84 
85     return retval;
86 }
87 EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
88 
89 /**
90  * cgroup_trasnsfer_tasks - move tasks from one cgroup to another
91  * @to: cgroup to which the tasks will be moved
92  * @from: cgroup in which the tasks currently reside
93  *
94  * Locking rules between cgroup_post_fork() and the migration path
95  * guarantee that, if a task is forking while being migrated, the new child
96  * is guaranteed to be either visible in the source cgroup after the
97  * parent's migration is complete or put into the target cgroup.  No task
98  * can slip out of migration through forking.
99  */
cgroup_transfer_tasks(struct cgroup * to,struct cgroup * from)100 int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
101 {
102     DEFINE_CGROUP_MGCTX(mgctx);
103     struct cgrp_cset_link *link;
104     struct css_task_iter it;
105     struct task_struct *task;
106     int ret;
107 
108     if (cgroup_on_dfl(to)) {
109         return -EINVAL;
110     }
111 
112     ret = cgroup_migrate_vet_dst(to);
113     if (ret) {
114         return ret;
115     }
116 
117     mutex_lock(&cgroup_mutex);
118 
119     percpu_down_write(&cgroup_threadgroup_rwsem);
120 
121     /* all tasks in @from are being moved, all csets are source */
122     spin_lock_irq(&css_set_lock);
123     list_for_each_entry(link, &from->cset_links, cset_link) cgroup_migrate_add_src(link->cset, to, &mgctx);
124     spin_unlock_irq(&css_set_lock);
125 
126     ret = cgroup_migrate_prepare_dst(&mgctx);
127     if (ret) {
128         goto out_err;
129     }
130 
131     /*
132      * Migrate tasks one-by-one until @from is empty.  This fails iff
133      * ->can_attach() fails.
134      */
135     do {
136         css_task_iter_start(&from->self, 0, &it);
137 
138         do {
139             task = css_task_iter_next(&it);
140         } while (task && (task->flags & PF_EXITING));
141 
142         if (task) {
143             get_task_struct(task);
144         }
145         css_task_iter_end(&it);
146 
147         if (task) {
148             ret = cgroup_migrate(task, false, &mgctx);
149             if (!ret) {
150                 TRACE_CGROUP_PATH(transfer_tasks, to, task, false);
151             }
152             put_task_struct(task);
153         }
154     } while (task && !ret);
155 out_err:
156     cgroup_migrate_finish(&mgctx);
157     percpu_up_write(&cgroup_threadgroup_rwsem);
158     mutex_unlock(&cgroup_mutex);
159     return ret;
160 }
161 
162 /*
163  * Stuff for reading the 'tasks'/'procs' files.
164  *
165  * Reading this file can return large amounts of data if a cgroup has
166  * *lots* of attached tasks. So it may need several calls to read(),
167  * but we cannot guarantee that the information we produce is correct
168  * unless we produce it entirely atomically.
169  *
170  */
171 
172 /* which pidlist file are we talking about? */
173 enum cgroup_filetype {
174     CGROUP_FILE_PROCS,
175     CGROUP_FILE_TASKS,
176 };
177 
178 /*
179  * A pidlist is a list of pids that virtually represents the contents of one
180  * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
181  * a pair (one each for procs, tasks) for each pid namespace that's relevant
182  * to the cgroup.
183  */
184 struct cgroup_pidlist {
185     /*
186      * used to find which pidlist is wanted. doesn't change as long as
187      * this particular list stays in the list.
188      */
189     struct {
190         enum cgroup_filetype type;
191         struct pid_namespace *ns;
192     } key;
193     /* array of xids */
194     pid_t *list;
195     /* how many elements the above list has */
196     int length;
197     /* each of these stored in a list by its cgroup */
198     struct list_head links;
199     /* pointer to the cgroup we belong to, for list removal purposes */
200     struct cgroup *owner;
201     /* for delayed destruction */
202     struct delayed_work destroy_dwork;
203 };
204 
205 /*
206  * Used to destroy all pidlists lingering waiting for destroy timer.  None
207  * should be left afterwards.
208  */
cgroup1_pidlist_destroy_all(struct cgroup * cgrp)209 void cgroup1_pidlist_destroy_all(struct cgroup *cgrp)
210 {
211     struct cgroup_pidlist *l, *tmp_l;
212 
213     mutex_lock(&cgrp->pidlist_mutex);
214     list_for_each_entry_safe(l, tmp_l, &cgrp->pidlists, links)
215         mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, 0);
216     mutex_unlock(&cgrp->pidlist_mutex);
217 
218     flush_workqueue(cgroup_pidlist_destroy_wq);
219     BUG_ON(!list_empty(&cgrp->pidlists));
220 }
221 
cgroup_pidlist_destroy_work_fn(struct work_struct * work)222 static void cgroup_pidlist_destroy_work_fn(struct work_struct *work)
223 {
224     struct delayed_work *dwork = to_delayed_work(work);
225     struct cgroup_pidlist *l = container_of(dwork, struct cgroup_pidlist, destroy_dwork);
226     struct cgroup_pidlist *tofree = NULL;
227 
228     mutex_lock(&l->owner->pidlist_mutex);
229 
230     /*
231      * Destroy iff we didn't get queued again.  The state won't change
232      * as destroy_dwork can only be queued while locked.
233      */
234     if (!delayed_work_pending(dwork)) {
235         list_del(&l->links);
236         kvfree(l->list);
237         put_pid_ns(l->key.ns);
238         tofree = l;
239     }
240 
241     mutex_unlock(&l->owner->pidlist_mutex);
242     kfree(tofree);
243 }
244 
245 /*
246  * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
247  * Returns the number of unique elements.
248  */
pidlist_uniq(pid_t * list,int length)249 static int pidlist_uniq(pid_t *list, int length)
250 {
251     int src, dest = 1;
252 
253     /*
254      * we presume the 0th element is unique, so i starts at 1. trivial
255      * edge cases first; no work needs to be done for either
256      */
257     if (length == 0 || length == 1) {
258         return length;
259     }
260     /* src and dest walk down the list; dest counts unique elements */
261     for (src = 1; src < length; src++) {
262         /* find next unique element */
263         while (list[src] == list[src - 1]) {
264             src++;
265             if (src == length) {
266                 goto after;
267             }
268         }
269         /* dest always points to where the next unique element goes */
270         list[dest] = list[src];
271         dest++;
272     }
273 after:
274     return dest;
275 }
276 
277 /*
278  * The two pid files - task and cgroup.procs - guaranteed that the result
279  * is sorted, which forced this whole pidlist fiasco.  As pid order is
280  * different per namespace, each namespace needs differently sorted list,
281  * making it impossible to use, for example, single rbtree of member tasks
282  * sorted by task pointer.  As pidlists can be fairly large, allocating one
283  * per open file is dangerous, so cgroup had to implement shared pool of
284  * pidlists keyed by cgroup and namespace.
285  */
cmppid(const void * a,const void * b)286 static int cmppid(const void *a, const void *b)
287 {
288     return *(pid_t *)a - *(pid_t *)b;
289 }
290 
cgroup_pidlist_find(struct cgroup * cgrp,enum cgroup_filetype type)291 static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp, enum cgroup_filetype type)
292 {
293     struct cgroup_pidlist *l;
294     /* don't need task_nsproxy() if we're looking at ourself */
295     struct pid_namespace *ns = task_active_pid_ns(current);
296 
297     lockdep_assert_held(&cgrp->pidlist_mutex);
298 
299     list_for_each_entry(l, &cgrp->pidlists, links) if (l->key.type == type && l->key.ns == ns) return l;
300     return NULL;
301 }
302 
303 /*
304  * find the appropriate pidlist for our purpose (given procs vs tasks)
305  * returns with the lock on that pidlist already held, and takes care
306  * of the use count, or returns NULL with no locks held if we're out of
307  * memory.
308  */
cgroup_pidlist_find_create(struct cgroup * cgrp,enum cgroup_filetype type)309 static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp, enum cgroup_filetype type)
310 {
311     struct cgroup_pidlist *l;
312 
313     lockdep_assert_held(&cgrp->pidlist_mutex);
314 
315     l = cgroup_pidlist_find(cgrp, type);
316     if (l) {
317         return l;
318     }
319 
320     /* entry not found; create a new one */
321     l = kzalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL);
322     if (!l) {
323         return l;
324     }
325 
326     INIT_DELAYED_WORK(&l->destroy_dwork, cgroup_pidlist_destroy_work_fn);
327     l->key.type = type;
328     /* don't need task_nsproxy() if we're looking at ourself */
329     l->key.ns = get_pid_ns(task_active_pid_ns(current));
330     l->owner = cgrp;
331     list_add(&l->links, &cgrp->pidlists);
332     return l;
333 }
334 
335 /*
336  * Load a cgroup's pidarray with either procs' tgids or tasks' pids
337  */
pidlist_array_load(struct cgroup * cgrp,enum cgroup_filetype type,struct cgroup_pidlist ** lp)338 static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type, struct cgroup_pidlist **lp)
339 {
340     pid_t *array;
341     int length;
342     int pid, n = 0; /* used for populating the array */
343     struct css_task_iter it;
344     struct task_struct *tsk;
345     struct cgroup_pidlist *l;
346 
347     lockdep_assert_held(&cgrp->pidlist_mutex);
348 
349     /*
350      * If cgroup gets more users after we read count, we won't have
351      * enough space - tough.  This race is indistinguishable to the
352      * caller from the case that the additional cgroup users didn't
353      * show up until sometime later on.
354      */
355     length = cgroup_task_count(cgrp);
356     array = kvmalloc_array(length, sizeof(pid_t), GFP_KERNEL);
357     if (!array) {
358         return -ENOMEM;
359     }
360     /* now, populate the array */
361     css_task_iter_start(&cgrp->self, 0, &it);
362     while ((tsk = css_task_iter_next(&it))) {
363         if (unlikely(n == length)) {
364             break;
365         }
366         /* get tgid or pid for procs or tasks file respectively */
367         if (type == CGROUP_FILE_PROCS) {
368             pid = task_tgid_vnr(tsk);
369         } else {
370             pid = task_pid_vnr(tsk);
371         }
372         if (pid > 0) { /* make sure to only use valid results */
373             array[n++] = pid;
374         }
375     }
376     css_task_iter_end(&it);
377     length = n;
378     /* now sort & (if procs) strip out duplicates */
379     sort(array, length, sizeof(pid_t), cmppid, NULL);
380     if (type == CGROUP_FILE_PROCS) {
381         length = pidlist_uniq(array, length);
382     }
383 
384     l = cgroup_pidlist_find_create(cgrp, type);
385     if (!l) {
386         kvfree(array);
387         return -ENOMEM;
388     }
389 
390     /* store array, freeing old if necessary */
391     kvfree(l->list);
392     l->list = array;
393     l->length = length;
394     *lp = l;
395     return 0;
396 }
397 
398 /*
399  * seq_file methods for the tasks/procs files. The seq_file position is the
400  * next pid to display; the seq_file iterator is a pointer to the pid
401  * in the cgroup->l->list array.
402  */
403 
cgroup_pidlist_start(struct seq_file * s,loff_t * pos)404 static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
405 {
406     /*
407      * Initially we receive a position value that corresponds to
408      * one more than the last pid shown (or 0 on the first call or
409      * after a seek to the start). Use a binary-search to find the
410      * next pid to display, if any
411      */
412     struct kernfs_open_file *of = s->private;
413     struct cgroup_file_ctx *ctx = of->priv;
414     struct cgroup *cgrp = seq_css(s)->cgroup;
415     struct cgroup_pidlist *l;
416     enum cgroup_filetype type = seq_cft(s)->private;
417     int index = 0, pid = *pos;
418     int *iter, ret;
419 
420     mutex_lock(&cgrp->pidlist_mutex);
421 
422     /*
423      * !NULL @ctx->procs1.pidlist indicates that this isn't the first
424      * start() after open. If the matching pidlist is around, we can use
425      * that. Look for it. Note that @ctx->procs1.pidlist can't be used
426      * directly. It could already have been destroyed.
427      */
428     if (ctx->procs1.pidlist) {
429         ctx->procs1.pidlist = cgroup_pidlist_find(cgrp, type);
430     }
431 
432     /*
433      * Either this is the first start() after open or the matching
434      * pidlist has been destroyed inbetween.  Create a new one.
435      */
436     if (!ctx->procs1.pidlist) {
437         ret = pidlist_array_load(cgrp, type, &ctx->procs1.pidlist);
438         if (ret) {
439             return ERR_PTR(ret);
440         }
441     }
442     l = ctx->procs1.pidlist;
443 
444     if (pid) {
445         int end = l->length;
446 
447         while (index < end) {
448             int mid = (index + end) / 2;
449             if (l->list[mid] == pid) {
450                 index = mid;
451                 break;
452             } else if (l->list[mid] <= pid) {
453                 index = mid + 1;
454             } else {
455                 end = mid;
456             }
457         }
458     }
459     /* If we're off the end of the array, we're done */
460     if (index >= l->length) {
461         return NULL;
462     }
463     /* Update the abstract position to be the actual pid that we found */
464     iter = l->list + index;
465     *pos = *iter;
466     return iter;
467 }
468 
cgroup_pidlist_stop(struct seq_file * s,void * v)469 static void cgroup_pidlist_stop(struct seq_file *s, void *v)
470 {
471     struct kernfs_open_file *of = s->private;
472     struct cgroup_file_ctx *ctx = of->priv;
473     struct cgroup_pidlist *l = ctx->procs1.pidlist;
474 
475     if (l) {
476         mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, CGROUP_PIDLIST_DESTROY_DELAY);
477     }
478     mutex_unlock(&seq_css(s)->cgroup->pidlist_mutex);
479 }
480 
cgroup_pidlist_next(struct seq_file * s,void * v,loff_t * pos)481 static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
482 {
483     struct kernfs_open_file *of = s->private;
484     struct cgroup_file_ctx *ctx = of->priv;
485     struct cgroup_pidlist *l = ctx->procs1.pidlist;
486     pid_t *p = v;
487     pid_t *end = l->list + l->length;
488     /*
489      * Advance to the next pid in the array. If this goes off the
490      * end, we're done
491      */
492     p++;
493     if (p >= end) {
494         (*pos)++;
495         return NULL;
496     } else {
497         *pos = *p;
498         return p;
499     }
500 }
501 
cgroup_pidlist_show(struct seq_file * s,void * v)502 static int cgroup_pidlist_show(struct seq_file *s, void *v)
503 {
504     seq_printf(s, "%d\n", *(int *)v);
505 
506     return 0;
507 }
508 
cgroup1_procs_write_func(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off,bool threadgroup)509 static ssize_t cgroup1_procs_write_func(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off,
510                                         bool threadgroup)
511 {
512     struct cgroup *cgrp;
513     struct task_struct *task;
514     const struct cred *cred, *tcred;
515     ssize_t ret;
516     bool locked;
517 
518     cgrp = cgroup_kn_lock_live(of->kn, false);
519     if (!cgrp) {
520         return -ENODEV;
521     }
522 
523     task = cgroup_procs_write_start(buf, threadgroup, &locked);
524     ret = PTR_ERR_OR_ZERO(task);
525     if (ret) {
526         goto out_unlock;
527     }
528 
529     /*
530      * Even if we're attaching all tasks in the thread group, we only need
531      * to check permissions on one of them. Check permissions using the
532      * credentials from file open to protect against inherited fd attacks.
533      */
534     cred = of->file->f_cred;
535     tcred = get_task_cred(task);
536 #ifdef CONFIG_HYPERHOLD
537     if (!uid_eq(cred->euid, GLOBAL_MEMMGR_UID) && !uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
538 #else
539     if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
540 #endif
541         !uid_eq(cred->euid, tcred->uid) && !uid_eq(cred->euid, tcred->suid) &&
542         !ns_capable(tcred->user_ns, CAP_SYS_NICE))
543         ret = -EACCES;
544     put_cred(tcred);
545     if (ret) {
546         goto out_finish;
547     }
548 
549     ret = cgroup_attach_task(cgrp, task, threadgroup);
550 
551 out_finish:
552     cgroup_procs_write_finish(task, locked);
553 out_unlock:
554     cgroup_kn_unlock(of->kn);
555 
556     return ret ?: nbytes;
557 }
558 
cgroup1_procs_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)559 static ssize_t cgroup1_procs_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off)
560 {
561     return cgroup1_procs_write_func(of, buf, nbytes, off, true);
562 }
563 
cgroup1_tasks_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)564 static ssize_t cgroup1_tasks_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off)
565 {
566     return cgroup1_procs_write_func(of, buf, nbytes, off, false);
567 }
568 
cgroup_release_agent_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)569 static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off)
570 {
571     struct cgroup *cgrp;
572     struct cgroup_file_ctx *ctx;
573 
574     BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
575 
576     /*
577      * Release agent gets called with all capabilities,
578      * require capabilities to set release agent.
579      */
580     ctx = of->priv;
581     if ((ctx->ns->user_ns != &init_user_ns) ||
582         !file_ns_capable(of->file, &init_user_ns, CAP_SYS_ADMIN))
583         return -EPERM;
584 
585     cgrp = cgroup_kn_lock_live(of->kn, false);
586     if (!cgrp) {
587         return -ENODEV;
588     }
589     spin_lock(&release_agent_path_lock);
590     strlcpy(cgrp->root->release_agent_path, strstrip(buf), sizeof(cgrp->root->release_agent_path));
591     spin_unlock(&release_agent_path_lock);
592     cgroup_kn_unlock(of->kn);
593     return nbytes;
594 }
595 
cgroup_release_agent_show(struct seq_file * seq,void * v)596 static int cgroup_release_agent_show(struct seq_file *seq, void *v)
597 {
598     struct cgroup *cgrp = seq_css(seq)->cgroup;
599 
600     spin_lock(&release_agent_path_lock);
601     seq_puts(seq, cgrp->root->release_agent_path);
602     spin_unlock(&release_agent_path_lock);
603     seq_putc(seq, '\n');
604     return 0;
605 }
606 
cgroup_sane_behavior_show(struct seq_file * seq,void * v)607 static int cgroup_sane_behavior_show(struct seq_file *seq, void *v)
608 {
609     seq_puts(seq, "0\n");
610     return 0;
611 }
612 
cgroup_read_notify_on_release(struct cgroup_subsys_state * css,struct cftype * cft)613 static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css, struct cftype *cft)
614 {
615     return notify_on_release(css->cgroup);
616 }
617 
cgroup_write_notify_on_release(struct cgroup_subsys_state * css,struct cftype * cft,u64 val)618 static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css, struct cftype *cft, u64 val)
619 {
620     if (val) {
621         set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
622     } else {
623         clear_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
624     }
625     return 0;
626 }
627 
cgroup_clone_children_read(struct cgroup_subsys_state * css,struct cftype * cft)628 static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css, struct cftype *cft)
629 {
630     return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
631 }
632 
cgroup_clone_children_write(struct cgroup_subsys_state * css,struct cftype * cft,u64 val)633 static int cgroup_clone_children_write(struct cgroup_subsys_state *css, struct cftype *cft, u64 val)
634 {
635     if (val) {
636         set_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
637     } else {
638         clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
639     }
640     return 0;
641 }
642 
643 /* cgroup core interface files for the legacy hierarchies */
644 struct cftype cgroup1_base_files[] = {
645     {
646         .name = "cgroup.procs",
647         .seq_start = cgroup_pidlist_start,
648         .seq_next = cgroup_pidlist_next,
649         .seq_stop = cgroup_pidlist_stop,
650         .seq_show = cgroup_pidlist_show,
651         .private = CGROUP_FILE_PROCS,
652         .write = cgroup1_procs_write,
653     },
654     {
655         .name = "cgroup.clone_children",
656         .read_u64 = cgroup_clone_children_read,
657         .write_u64 = cgroup_clone_children_write,
658     },
659     {
660         .name = "cgroup.sane_behavior",
661         .flags = CFTYPE_ONLY_ON_ROOT,
662         .seq_show = cgroup_sane_behavior_show,
663     },
664     {
665         .name = "tasks",
666         .seq_start = cgroup_pidlist_start,
667         .seq_next = cgroup_pidlist_next,
668         .seq_stop = cgroup_pidlist_stop,
669         .seq_show = cgroup_pidlist_show,
670         .private = CGROUP_FILE_TASKS,
671         .write = cgroup1_tasks_write,
672     },
673     {
674         .name = "notify_on_release",
675         .read_u64 = cgroup_read_notify_on_release,
676         .write_u64 = cgroup_write_notify_on_release,
677     },
678     {
679         .name = "release_agent",
680         .flags = CFTYPE_ONLY_ON_ROOT,
681         .seq_show = cgroup_release_agent_show,
682         .write = cgroup_release_agent_write,
683         .max_write_len = PATH_MAX - 1,
684     },
685     {} /* terminate */
686 };
687 
688 /* Display information about each subsystem and each hierarchy */
proc_cgroupstats_show(struct seq_file * m,void * v)689 int proc_cgroupstats_show(struct seq_file *m, void *v)
690 {
691     struct cgroup_subsys *ss;
692     int i;
693     bool dead;
694 
695     seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
696     /*
697      * ideally we don't want subsystems moving around while we do this.
698      * cgroup_mutex is also necessary to guarantee an atomic snapshot of
699      * subsys/hierarchy state.
700      */
701     mutex_lock(&cgroup_mutex);
702 
703     for_each_subsys(ss, i) for_each_subsys(ss, i)
704     {
705         dead = percpu_ref_is_dying(&ss->root->cgrp.self.refcnt);
706         seq_printf(m, "%s\t%d\t%d\t%d\n", ss->legacy_name, dead ? 0 : ss->root->hierarchy_id,
707                    dead ? 0 : atomic_read(&ss->root->nr_cgrps), cgroup_ssid_enabled(i));
708     }
709 
710     mutex_unlock(&cgroup_mutex);
711     return 0;
712 }
713 
714 /**
715  * cgroupstats_build - build and fill cgroupstats
716  * @stats: cgroupstats to fill information into
717  * @dentry: A dentry entry belonging to the cgroup for which stats have
718  * been requested.
719  *
720  * Build and fill cgroupstats so that taskstats can export it to user
721  * space.
722  */
cgroupstats_build(struct cgroupstats * stats,struct dentry * dentry)723 int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
724 {
725     struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
726     struct cgroup *cgrp;
727     struct css_task_iter it;
728     struct task_struct *tsk;
729 
730     /* it should be kernfs_node belonging to cgroupfs and is a directory */
731     if (dentry->d_sb->s_type != &cgroup_fs_type || !kn || kernfs_type(kn) != KERNFS_DIR) {
732         return -EINVAL;
733     }
734 
735     mutex_lock(&cgroup_mutex);
736 
737     /*
738      * We aren't being called from kernfs and there's no guarantee on
739      * @kn->priv's validity.  For this and css_tryget_online_from_dir(),
740      * @kn->priv is RCU safe.  Let's do the RCU dancing.
741      */
742     rcu_read_lock();
743     cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv);
744     if (!cgrp || cgroup_is_dead(cgrp)) {
745         rcu_read_unlock();
746         mutex_unlock(&cgroup_mutex);
747         return -ENOENT;
748     }
749     rcu_read_unlock();
750 
751     css_task_iter_start(&cgrp->self, 0, &it);
752     while ((tsk = css_task_iter_next(&it))) {
753         switch (tsk->state) {
754             case TASK_RUNNING:
755                 stats->nr_running++;
756                 break;
757             case TASK_INTERRUPTIBLE:
758                 stats->nr_sleeping++;
759                 break;
760             case TASK_UNINTERRUPTIBLE:
761                 stats->nr_uninterruptible++;
762                 break;
763             case TASK_STOPPED:
764                 stats->nr_stopped++;
765                 break;
766             default:
767                 if (delayacct_is_task_waiting_on_io(tsk)) {
768                     stats->nr_io_wait++;
769                 }
770                 break;
771         }
772     }
773     css_task_iter_end(&it);
774 
775     mutex_unlock(&cgroup_mutex);
776     return 0;
777 }
778 
cgroup1_check_for_release(struct cgroup * cgrp)779 void cgroup1_check_for_release(struct cgroup *cgrp)
780 {
781     if (notify_on_release(cgrp) && !cgroup_is_populated(cgrp) && !css_has_online_children(&cgrp->self) &&
782         !cgroup_is_dead(cgrp)) {
783         schedule_work(&cgrp->release_agent_work);
784     }
785 }
786 
787 /*
788  * Notify userspace when a cgroup is released, by running the
789  * configured release agent with the name of the cgroup (path
790  * relative to the root of cgroup file system) as the argument.
791  *
792  * Most likely, this user command will try to rmdir this cgroup.
793  *
794  * This races with the possibility that some other task will be
795  * attached to this cgroup before it is removed, or that some other
796  * user task will 'mkdir' a child cgroup of this cgroup.  That's ok.
797  * The presumed 'rmdir' will fail quietly if this cgroup is no longer
798  * unused, and this cgroup will be reprieved from its death sentence,
799  * to continue to serve a useful existence.  Next time it's released,
800  * we will get notified again, if it still has 'notify_on_release' set.
801  *
802  * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
803  * means only wait until the task is successfully execve()'d.  The
804  * separate release agent task is forked by call_usermodehelper(),
805  * then control in this thread returns here, without waiting for the
806  * release agent task.  We don't bother to wait because the caller of
807  * this routine has no use for the exit status of the release agent
808  * task, so no sense holding our caller up for that.
809  */
cgroup1_release_agent(struct work_struct * work)810 void cgroup1_release_agent(struct work_struct *work)
811 {
812     struct cgroup *cgrp = container_of(work, struct cgroup, release_agent_work);
813     char *pathbuf, *agentbuf;
814     char *argv[3], *envp[3];
815     int ret;
816 
817     /* snoop agent path and exit early if empty */
818     if (!cgrp->root->release_agent_path[0]) {
819         return;
820     }
821 
822     /* prepare argument buffers */
823     pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
824     agentbuf = kmalloc(PATH_MAX, GFP_KERNEL);
825     if (!pathbuf || !agentbuf) {
826         goto out_free;
827     }
828 
829     spin_lock(&release_agent_path_lock);
830     strlcpy(agentbuf, cgrp->root->release_agent_path, PATH_MAX);
831     spin_unlock(&release_agent_path_lock);
832     if (!agentbuf[0]) {
833         goto out_free;
834     }
835 
836     ret = cgroup_path_ns(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns);
837     if (ret < 0 || ret >= PATH_MAX) {
838         goto out_free;
839     }
840 
841     argv[CGROUP_ARRAY_INDEX_ZERO] = agentbuf;
842     argv[CGROUP_ARRAY_INDEX_ONE] = pathbuf;
843     argv[CGROUP_ARRAY_INDEX_TWO] = NULL;
844 
845     /* minimal command environment */
846     envp[CGROUP_ARRAY_INDEX_ZERO] = "HOME=/";
847     envp[CGROUP_ARRAY_INDEX_ONE] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
848     envp[CGROUP_ARRAY_INDEX_TWO] = NULL;
849 
850     call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
851 out_free:
852     kfree(agentbuf);
853     kfree(pathbuf);
854 }
855 
856 /*
857  * cgroup_rename - Only allow simple rename of directories in place.
858  */
cgroup1_rename(struct kernfs_node * kn,struct kernfs_node * new_parent,const char * new_name_str)859 static int cgroup1_rename(struct kernfs_node *kn, struct kernfs_node *new_parent, const char *new_name_str)
860 {
861     struct cgroup *cgrp = kn->priv;
862     int ret;
863 
864     /* do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable */
865     if (strchr(new_name_str, '\n')) {
866         return -EINVAL;
867     }
868 
869     if (kernfs_type(kn) != KERNFS_DIR) {
870         return -ENOTDIR;
871     }
872     if (kn->parent != new_parent) {
873         return -EIO;
874     }
875 
876     /*
877      * We're gonna grab cgroup_mutex which nests outside kernfs
878      * active_ref.  kernfs_rename() doesn't require active_ref
879      * protection.  Break them before grabbing cgroup_mutex.
880      */
881     kernfs_break_active_protection(new_parent);
882     kernfs_break_active_protection(kn);
883 
884     mutex_lock(&cgroup_mutex);
885 
886     ret = kernfs_rename(kn, new_parent, new_name_str);
887     if (!ret) {
888         TRACE_CGROUP_PATH(rename, cgrp);
889     }
890 
891     mutex_unlock(&cgroup_mutex);
892 
893     kernfs_unbreak_active_protection(kn);
894     kernfs_unbreak_active_protection(new_parent);
895     return ret;
896 }
897 
cgroup1_show_options(struct seq_file * seq,struct kernfs_root * kf_root)898 static int cgroup1_show_options(struct seq_file *seq, struct kernfs_root *kf_root)
899 {
900     struct cgroup_root *root = cgroup_root_from_kf(kf_root);
901     struct cgroup_subsys *ss;
902     int ssid;
903 
904     for_each_subsys(ss, ssid) if (root->subsys_mask & (1 << ssid)) seq_show_option(seq, ss->legacy_name, NULL);
905     if (root->flags & CGRP_ROOT_NOPREFIX) {
906         seq_puts(seq, ",noprefix");
907     }
908     if (root->flags & CGRP_ROOT_XATTR) {
909         seq_puts(seq, ",xattr");
910     }
911     if (root->flags & CGRP_ROOT_CPUSET_V2_MODE) {
912         seq_puts(seq, ",cpuset_v2_mode");
913     }
914 
915     spin_lock(&release_agent_path_lock);
916     if (strlen(root->release_agent_path)) {
917         seq_show_option(seq, "release_agent", root->release_agent_path);
918     }
919     spin_unlock(&release_agent_path_lock);
920 
921     if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags)) {
922         seq_puts(seq, ",clone_children");
923     }
924     if (strlen(root->name)) {
925         seq_show_option(seq, "name", root->name);
926     }
927     return 0;
928 }
929 
930 enum cgroup1_param {
931     Opt_all,
932     Opt_clone_children,
933     Opt_cpuset_v2_mode,
934     Opt_name,
935     Opt_none,
936     Opt_noprefix,
937     Opt_release_agent,
938     Opt_xattr,
939 };
940 
941 const struct fs_parameter_spec cgroup1_fs_parameters[] = {fsparam_flag("all", Opt_all),
942                                                           fsparam_flag("clone_children", Opt_clone_children),
943                                                           fsparam_flag("cpuset_v2_mode", Opt_cpuset_v2_mode),
944                                                           fsparam_string("name", Opt_name),
945                                                           fsparam_flag("none", Opt_none),
946                                                           fsparam_flag("noprefix", Opt_noprefix),
947                                                           fsparam_string("release_agent", Opt_release_agent),
948                                                           fsparam_flag("xattr", Opt_xattr),
949                                                           {}};
950 
cgroup1_parse_param(struct fs_context * fc,struct fs_parameter * param)951 int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param)
952 {
953     struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
954     struct cgroup_subsys *ss;
955     struct fs_parse_result result;
956     int opt, i;
957 
958     opt = fs_parse(fc, cgroup1_fs_parameters, param, &result);
959     if (opt == -ENOPARAM) {
960         int ret;
961 
962         ret = vfs_parse_fs_param_source(fc, param);
963         if (ret != -ENOPARAM) {
964             return ret;
965         }
966         for_each_subsys(ss, i)
967         {
968             if (strcmp(param->key, ss->legacy_name)) {
969                 continue;
970             }
971             if (!cgroup_ssid_enabled(i) || cgroup1_ssid_disabled(i)) {
972                 return invalfc(fc, "Disabled controller '%s'", param->key);
973             }
974             ctx->subsys_mask |= (1 << i);
975             return 0;
976         }
977         return invalfc(fc, "Unknown subsys name '%s'", param->key);
978     }
979     if (opt < 0) {
980         return opt;
981     }
982 
983     switch (opt) {
984         case Opt_none:
985             /* Explicitly have no subsystems */
986             ctx->none = true;
987             break;
988         case Opt_all:
989             ctx->all_ss = true;
990             break;
991         case Opt_noprefix:
992             ctx->flags |= CGRP_ROOT_NOPREFIX;
993             break;
994         case Opt_clone_children:
995             ctx->cpuset_clone_children = true;
996             break;
997         case Opt_cpuset_v2_mode:
998             ctx->flags |= CGRP_ROOT_CPUSET_V2_MODE;
999             break;
1000         case Opt_xattr:
1001             ctx->flags |= CGRP_ROOT_XATTR;
1002             break;
1003         case Opt_release_agent:
1004             /* Specifying two release agents is forbidden */
1005             if (ctx->release_agent) {
1006                 return invalfc(fc, "release_agent respecified");
1007             }
1008             /*
1009              * Release agent gets called with all capabilities,
1010              * require capabilities to set release agent.
1011              */
1012             if ((fc->user_ns != &init_user_ns) || !capable(CAP_SYS_ADMIN)) {
1013                 return invalfc(fc, "Setting release_agent not allowed");
1014             }
1015             ctx->release_agent = param->string;
1016             param->string = NULL;
1017             break;
1018         case Opt_name:
1019             /* blocked by boot param? */
1020             if (cgroup_no_v1_named) {
1021                 return -ENOENT;
1022             }
1023             /* Can't specify an empty name */
1024             if (!param->size) {
1025                 return invalfc(fc, "Empty name");
1026             }
1027             if (param->size > MAX_CGROUP_ROOT_NAMELEN - 1) {
1028                 return invalfc(fc, "Name too long");
1029             }
1030             /* Must match [\w.-]+ */
1031             for (i = 0; i < param->size; i++) {
1032                 char c = param->string[i];
1033                 if (isalnum(c)) {
1034                     continue;
1035                 }
1036                 if ((c == '.') || (c == '-') || (c == '_')) {
1037                     continue;
1038                 }
1039                 return invalfc(fc, "Invalid name");
1040             }
1041             /* Specifying two names is forbidden */
1042             if (ctx->name) {
1043                 return invalfc(fc, "name respecified");
1044             }
1045             ctx->name = param->string;
1046             param->string = NULL;
1047             break;
1048         default:
1049             break;
1050     }
1051     return 0;
1052 }
1053 
check_cgroupfs_options(struct fs_context * fc)1054 static int check_cgroupfs_options(struct fs_context *fc)
1055 {
1056     struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1057     u16 mask = U16_MAX;
1058     u16 enabled = 0;
1059     struct cgroup_subsys *ss;
1060     int i;
1061 
1062 #ifdef CONFIG_CPUSETS
1063     mask = ~((u16)1 << cpuset_cgrp_id);
1064 #endif
1065     for_each_subsys(ss, i) if (cgroup_ssid_enabled(i) && !cgroup1_ssid_disabled(i)) enabled |= 1 << i;
1066 
1067     ctx->subsys_mask &= enabled;
1068 
1069     /*
1070      * In absense of 'none', 'name=' or subsystem name options,
1071      * let's default to 'all'.
1072      */
1073     if (!ctx->subsys_mask && !ctx->none && !ctx->name) {
1074         ctx->all_ss = true;
1075     }
1076 
1077     if (ctx->all_ss) {
1078         /* Mutually exclusive option 'all' + subsystem name */
1079         if (ctx->subsys_mask) {
1080             return invalfc(fc, "subsys name conflicts with all");
1081         }
1082         /* 'all' => select all the subsystems */
1083         ctx->subsys_mask = enabled;
1084     }
1085 
1086     /*
1087      * We either have to specify by name or by subsystems. (So all
1088      * empty hierarchies must have a name).
1089      */
1090     if (!ctx->subsys_mask && !ctx->name) {
1091         return invalfc(fc, "Need name or subsystem set");
1092     }
1093 
1094     /*
1095      * Option noprefix was introduced just for backward compatibility
1096      * with the old cpuset, so we allow noprefix only if mounting just
1097      * the cpuset subsystem.
1098      */
1099     if ((ctx->flags & CGRP_ROOT_NOPREFIX) && (ctx->subsys_mask & mask)) {
1100         return invalfc(fc, "noprefix used incorrectly");
1101     }
1102 
1103     /* Can't specify "none" and some subsystems */
1104     if (ctx->subsys_mask && ctx->none) {
1105         return invalfc(fc, "none used incorrectly");
1106     }
1107 
1108     return 0;
1109 }
1110 
cgroup1_reconfigure(struct fs_context * fc)1111 int cgroup1_reconfigure(struct fs_context *fc)
1112 {
1113     struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1114     struct kernfs_root *kf_root = kernfs_root_from_sb(fc->root->d_sb);
1115     struct cgroup_root *root = cgroup_root_from_kf(kf_root);
1116     int ret = 0;
1117     u16 added_mask, removed_mask;
1118 
1119     cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1120 
1121     /* See what subsystems are wanted */
1122     ret = check_cgroupfs_options(fc);
1123     if (ret) {
1124         goto out_unlock;
1125     }
1126 
1127     if (ctx->subsys_mask != root->subsys_mask || ctx->release_agent) {
1128         pr_warn("option changes via remount are deprecated (pid=%d comm=%s)\n", task_tgid_nr(current), current->comm);
1129     }
1130 
1131     added_mask = ctx->subsys_mask & ~root->subsys_mask;
1132     removed_mask = root->subsys_mask & ~ctx->subsys_mask;
1133 
1134     /* Don't allow flags or name to change at remount */
1135     if ((ctx->flags ^ root->flags) || (ctx->name && strcmp(ctx->name, root->name))) {
1136         errorfc(fc, "option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"", ctx->flags, ctx->name ?: "",
1137                 root->flags, root->name);
1138         ret = -EINVAL;
1139         goto out_unlock;
1140     }
1141 
1142     /* remounting is not allowed for populated hierarchies */
1143     if (!list_empty(&root->cgrp.self.children)) {
1144         ret = -EBUSY;
1145         goto out_unlock;
1146     }
1147 
1148     ret = rebind_subsystems(root, added_mask);
1149     if (ret) {
1150         goto out_unlock;
1151     }
1152 
1153     WARN_ON(rebind_subsystems(&cgrp_dfl_root, removed_mask));
1154 
1155     if (ctx->release_agent) {
1156         spin_lock(&release_agent_path_lock);
1157         strcpy(root->release_agent_path, ctx->release_agent);
1158         spin_unlock(&release_agent_path_lock);
1159     }
1160 
1161     trace_cgroup_remount(root);
1162 
1163 out_unlock:
1164     mutex_unlock(&cgroup_mutex);
1165     return ret;
1166 }
1167 
1168 struct kernfs_syscall_ops cgroup1_kf_syscall_ops = {
1169     .rename = cgroup1_rename,
1170     .show_options = cgroup1_show_options,
1171     .mkdir = cgroup_mkdir,
1172     .rmdir = cgroup_rmdir,
1173     .show_path = cgroup_show_path,
1174 };
1175 
1176 /*
1177  * The guts of cgroup1 mount - find or create cgroup_root to use.
1178  * Called with cgroup_mutex held; returns 0 on success, -E... on
1179  * error and positive - in case when the candidate is busy dying.
1180  * On success it stashes a reference to cgroup_root into given
1181  * cgroup_fs_context; that reference is *NOT* counting towards the
1182  * cgroup_root refcount.
1183  */
cgroup1_root_to_use(struct fs_context * fc)1184 static int cgroup1_root_to_use(struct fs_context *fc)
1185 {
1186     struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1187     struct cgroup_root *root;
1188     struct cgroup_subsys *ss;
1189     int i, ret;
1190 
1191     /* First find the desired set of subsystems */
1192     ret = check_cgroupfs_options(fc);
1193     if (ret) {
1194         return ret;
1195     }
1196 
1197     /*
1198      * Destruction of cgroup root is asynchronous, so subsystems may
1199      * still be dying after the previous unmount.  Let's drain the
1200      * dying subsystems.  We just need to ensure that the ones
1201      * unmounted previously finish dying and don't care about new ones
1202      * starting.  Testing ref liveliness is good enough.
1203      */
1204     for_each_subsys(ss, i)
1205     {
1206         if (!(ctx->subsys_mask & (1 << i)) || ss->root == &cgrp_dfl_root) {
1207             continue;
1208         }
1209 
1210         if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt)) {
1211             return 1; /* restart */
1212         }
1213         cgroup_put(&ss->root->cgrp);
1214     }
1215 
1216     for_each_root(root)
1217     {
1218         bool name_match = false;
1219 
1220         if (root == &cgrp_dfl_root) {
1221             continue;
1222         }
1223 
1224         /*
1225          * If we asked for a name then it must match.  Also, if
1226          * name matches but sybsys_mask doesn't, we should fail.
1227          * Remember whether name matched.
1228          */
1229         if (ctx->name) {
1230             if (strcmp(ctx->name, root->name)) {
1231                 continue;
1232             }
1233             name_match = true;
1234         }
1235 
1236         /*
1237          * If we asked for subsystems (or explicitly for no
1238          * subsystems) then they must match.
1239          */
1240         if ((ctx->subsys_mask || ctx->none) && (ctx->subsys_mask != root->subsys_mask)) {
1241             if (!name_match) {
1242                 continue;
1243             }
1244             return -EBUSY;
1245         }
1246 
1247         if (root->flags ^ ctx->flags) {
1248             pr_warn("new mount options do not match the existing superblock, will be ignored\n");
1249         }
1250 
1251         ctx->root = root;
1252         return 0;
1253     }
1254 
1255     /*
1256      * No such thing, create a new one.  name= matching without subsys
1257      * specification is allowed for already existing hierarchies but we
1258      * can't create new one without subsys specification.
1259      */
1260     if (!ctx->subsys_mask && !ctx->none) {
1261         return invalfc(fc, "No subsys list or none specified");
1262     }
1263 
1264     /* Hierarchies may only be created in the initial cgroup namespace. */
1265     if (ctx->ns != &init_cgroup_ns) {
1266         return -EPERM;
1267     }
1268 
1269     root = kzalloc(sizeof(*root), GFP_KERNEL);
1270     if (!root) {
1271         return -ENOMEM;
1272     }
1273 
1274     ctx->root = root;
1275     init_cgroup_root(ctx);
1276 
1277     ret = cgroup_setup_root(root, ctx->subsys_mask);
1278     if (ret) {
1279         cgroup_free_root(root);
1280     }
1281     return ret;
1282 }
1283 
cgroup1_get_tree(struct fs_context * fc)1284 int cgroup1_get_tree(struct fs_context *fc)
1285 {
1286     struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1287     int ret;
1288 
1289     /* Check if the caller has permission to mount. */
1290     if (!ns_capable(ctx->ns->user_ns, CAP_SYS_ADMIN)) {
1291         return -EPERM;
1292     }
1293 
1294     cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1295 
1296     ret = cgroup1_root_to_use(fc);
1297     if (!ret && !percpu_ref_tryget_live(&ctx->root->cgrp.self.refcnt)) {
1298         ret = 1; /* restart */
1299     }
1300 
1301     mutex_unlock(&cgroup_mutex);
1302 
1303     if (!ret) {
1304         ret = cgroup_do_get_tree(fc);
1305     }
1306 
1307     if (!ret && percpu_ref_is_dying(&ctx->root->cgrp.self.refcnt)) {
1308         fc_drop_locked(fc);
1309         ret = 1;
1310     }
1311 
1312     if (unlikely(ret > 0)) {
1313         msleep(0xa);
1314         return restart_syscall();
1315     }
1316     return ret;
1317 }
1318 
cgroup1_wq_init(void)1319 static int __init cgroup1_wq_init(void)
1320 {
1321     /*
1322      * Used to destroy pidlists and separate to serve as flush domain.
1323      * Cap @max_active to 1 too.
1324      */
1325     cgroup_pidlist_destroy_wq = alloc_workqueue("cgroup_pidlist_destroy", 0, 1);
1326     BUG_ON(!cgroup_pidlist_destroy_wq);
1327     return 0;
1328 }
1329 core_initcall(cgroup1_wq_init);
1330 
cgroup_no_v1(char * str)1331 static int __init cgroup_no_v1(char *str)
1332 {
1333     struct cgroup_subsys *ss;
1334     char *token;
1335     int i;
1336 
1337     while ((token = strsep(&str, ",")) != NULL) {
1338         if (!*token) {
1339             continue;
1340         }
1341 
1342         if (!strcmp(token, "all")) {
1343             cgroup_no_v1_mask = U16_MAX;
1344             continue;
1345         }
1346 
1347         if (!strcmp(token, "named")) {
1348             cgroup_no_v1_named = true;
1349             continue;
1350         }
1351 
1352         for_each_subsys(ss, i)
1353         {
1354             if (strcmp(token, ss->name) && strcmp(token, ss->legacy_name)) {
1355                 continue;
1356             }
1357 
1358             cgroup_no_v1_mask |= 1 << i;
1359         }
1360     }
1361     return 1;
1362 }
1363 __setup("cgroup_no_v1=", cgroup_no_v1);
1364