• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include "cgroup-internal.h"
3 
4 #include <linux/ctype.h>
5 #include <linux/kmod.h>
6 #include <linux/sort.h>
7 #include <linux/delay.h>
8 #include <linux/mm.h>
9 #include <linux/sched/signal.h>
10 #include <linux/sched/task.h>
11 #include <linux/magic.h>
12 #include <linux/slab.h>
13 #include <linux/vmalloc.h>
14 #include <linux/delayacct.h>
15 #include <linux/pid_namespace.h>
16 #include <linux/cgroupstats.h>
17 #include <linux/fs_parser.h>
18 
19 #include <trace/events/cgroup.h>
20 
21 /*
22  * pidlists linger the following amount before being destroyed.  The goal
23  * is avoiding frequent destruction in the middle of consecutive read calls
24  * Expiring in the middle is a performance problem not a correctness one.
25  * 1 sec should be enough.
26  */
27 #define CGROUP_PIDLIST_DESTROY_DELAY	HZ
28 
29 /* Controllers blocked by the commandline in v1 */
30 static u16 cgroup_no_v1_mask;
31 
32 /* disable named v1 mounts */
33 static bool cgroup_no_v1_named;
34 
35 /*
36  * pidlist destructions need to be flushed on cgroup destruction.  Use a
37  * separate workqueue as flush domain.
38  */
39 static struct workqueue_struct *cgroup_pidlist_destroy_wq;
40 
41 /* protects cgroup_subsys->release_agent_path */
42 static DEFINE_SPINLOCK(release_agent_path_lock);
43 
cgroup1_ssid_disabled(int ssid)44 bool cgroup1_ssid_disabled(int ssid)
45 {
46 	return cgroup_no_v1_mask & (1 << ssid);
47 }
48 
49 /**
50  * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
51  * @from: attach to all cgroups of a given task
52  * @tsk: the task to be attached
53  *
54  * Return: %0 on success or a negative errno code on failure
55  */
cgroup_attach_task_all(struct task_struct * from,struct task_struct * tsk)56 int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
57 {
58 	struct cgroup_root *root;
59 	int retval = 0;
60 
61 	cgroup_lock();
62 	cgroup_attach_lock(true);
63 	for_each_root(root) {
64 		struct cgroup *from_cgrp;
65 
66 		spin_lock_irq(&css_set_lock);
67 		from_cgrp = task_cgroup_from_root(from, root);
68 		spin_unlock_irq(&css_set_lock);
69 
70 		retval = cgroup_attach_task(from_cgrp, tsk, false);
71 		if (retval)
72 			break;
73 	}
74 	cgroup_attach_unlock(true);
75 	cgroup_unlock();
76 
77 	return retval;
78 }
79 EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
80 
81 /**
82  * cgroup_transfer_tasks - move tasks from one cgroup to another
83  * @to: cgroup to which the tasks will be moved
84  * @from: cgroup in which the tasks currently reside
85  *
86  * Locking rules between cgroup_post_fork() and the migration path
87  * guarantee that, if a task is forking while being migrated, the new child
88  * is guaranteed to be either visible in the source cgroup after the
89  * parent's migration is complete or put into the target cgroup.  No task
90  * can slip out of migration through forking.
91  *
92  * Return: %0 on success or a negative errno code on failure
93  */
cgroup_transfer_tasks(struct cgroup * to,struct cgroup * from)94 int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
95 {
96 	DEFINE_CGROUP_MGCTX(mgctx);
97 	struct cgrp_cset_link *link;
98 	struct css_task_iter it;
99 	struct task_struct *task;
100 	int ret;
101 
102 	if (cgroup_on_dfl(to))
103 		return -EINVAL;
104 
105 	ret = cgroup_migrate_vet_dst(to);
106 	if (ret)
107 		return ret;
108 
109 	cgroup_lock();
110 
111 	cgroup_attach_lock(true);
112 
113 	/* all tasks in @from are being moved, all csets are source */
114 	spin_lock_irq(&css_set_lock);
115 	list_for_each_entry(link, &from->cset_links, cset_link)
116 		cgroup_migrate_add_src(link->cset, to, &mgctx);
117 	spin_unlock_irq(&css_set_lock);
118 
119 	ret = cgroup_migrate_prepare_dst(&mgctx);
120 	if (ret)
121 		goto out_err;
122 
123 	/*
124 	 * Migrate tasks one-by-one until @from is empty.  This fails iff
125 	 * ->can_attach() fails.
126 	 */
127 	do {
128 		css_task_iter_start(&from->self, 0, &it);
129 
130 		do {
131 			task = css_task_iter_next(&it);
132 		} while (task && (task->flags & PF_EXITING));
133 
134 		if (task)
135 			get_task_struct(task);
136 		css_task_iter_end(&it);
137 
138 		if (task) {
139 			ret = cgroup_migrate(task, false, &mgctx);
140 			if (!ret)
141 				TRACE_CGROUP_PATH(transfer_tasks, to, task, false);
142 			put_task_struct(task);
143 		}
144 	} while (task && !ret);
145 out_err:
146 	cgroup_migrate_finish(&mgctx);
147 	cgroup_attach_unlock(true);
148 	cgroup_unlock();
149 	return ret;
150 }
151 
152 /*
153  * Stuff for reading the 'tasks'/'procs' files.
154  *
155  * Reading this file can return large amounts of data if a cgroup has
156  * *lots* of attached tasks. So it may need several calls to read(),
157  * but we cannot guarantee that the information we produce is correct
158  * unless we produce it entirely atomically.
159  *
160  */
161 
162 /* which pidlist file are we talking about? */
163 enum cgroup_filetype {
164 	CGROUP_FILE_PROCS,
165 	CGROUP_FILE_TASKS,
166 };
167 
168 /*
169  * A pidlist is a list of pids that virtually represents the contents of one
170  * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
171  * a pair (one each for procs, tasks) for each pid namespace that's relevant
172  * to the cgroup.
173  */
174 struct cgroup_pidlist {
175 	/*
176 	 * used to find which pidlist is wanted. doesn't change as long as
177 	 * this particular list stays in the list.
178 	*/
179 	struct { enum cgroup_filetype type; struct pid_namespace *ns; } key;
180 	/* array of xids */
181 	pid_t *list;
182 	/* how many elements the above list has */
183 	int length;
184 	/* each of these stored in a list by its cgroup */
185 	struct list_head links;
186 	/* pointer to the cgroup we belong to, for list removal purposes */
187 	struct cgroup *owner;
188 	/* for delayed destruction */
189 	struct delayed_work destroy_dwork;
190 };
191 
192 /*
193  * Used to destroy all pidlists lingering waiting for destroy timer.  None
194  * should be left afterwards.
195  */
cgroup1_pidlist_destroy_all(struct cgroup * cgrp)196 void cgroup1_pidlist_destroy_all(struct cgroup *cgrp)
197 {
198 	struct cgroup_pidlist *l, *tmp_l;
199 
200 	mutex_lock(&cgrp->pidlist_mutex);
201 	list_for_each_entry_safe(l, tmp_l, &cgrp->pidlists, links)
202 		mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, 0);
203 	mutex_unlock(&cgrp->pidlist_mutex);
204 
205 	flush_workqueue(cgroup_pidlist_destroy_wq);
206 	BUG_ON(!list_empty(&cgrp->pidlists));
207 }
208 
cgroup_pidlist_destroy_work_fn(struct work_struct * work)209 static void cgroup_pidlist_destroy_work_fn(struct work_struct *work)
210 {
211 	struct delayed_work *dwork = to_delayed_work(work);
212 	struct cgroup_pidlist *l = container_of(dwork, struct cgroup_pidlist,
213 						destroy_dwork);
214 	struct cgroup_pidlist *tofree = NULL;
215 
216 	mutex_lock(&l->owner->pidlist_mutex);
217 
218 	/*
219 	 * Destroy iff we didn't get queued again.  The state won't change
220 	 * as destroy_dwork can only be queued while locked.
221 	 */
222 	if (!delayed_work_pending(dwork)) {
223 		list_del(&l->links);
224 		kvfree(l->list);
225 		put_pid_ns(l->key.ns);
226 		tofree = l;
227 	}
228 
229 	mutex_unlock(&l->owner->pidlist_mutex);
230 	kfree(tofree);
231 }
232 
233 /*
234  * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
235  * Returns the number of unique elements.
236  */
pidlist_uniq(pid_t * list,int length)237 static int pidlist_uniq(pid_t *list, int length)
238 {
239 	int src, dest = 1;
240 
241 	/*
242 	 * we presume the 0th element is unique, so i starts at 1. trivial
243 	 * edge cases first; no work needs to be done for either
244 	 */
245 	if (length == 0 || length == 1)
246 		return length;
247 	/* src and dest walk down the list; dest counts unique elements */
248 	for (src = 1; src < length; src++) {
249 		/* find next unique element */
250 		while (list[src] == list[src-1]) {
251 			src++;
252 			if (src == length)
253 				goto after;
254 		}
255 		/* dest always points to where the next unique element goes */
256 		list[dest] = list[src];
257 		dest++;
258 	}
259 after:
260 	return dest;
261 }
262 
263 /*
264  * The two pid files - task and cgroup.procs - guaranteed that the result
265  * is sorted, which forced this whole pidlist fiasco.  As pid order is
266  * different per namespace, each namespace needs differently sorted list,
267  * making it impossible to use, for example, single rbtree of member tasks
268  * sorted by task pointer.  As pidlists can be fairly large, allocating one
269  * per open file is dangerous, so cgroup had to implement shared pool of
270  * pidlists keyed by cgroup and namespace.
271  */
cmppid(const void * a,const void * b)272 static int cmppid(const void *a, const void *b)
273 {
274 	return *(pid_t *)a - *(pid_t *)b;
275 }
276 
cgroup_pidlist_find(struct cgroup * cgrp,enum cgroup_filetype type)277 static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
278 						  enum cgroup_filetype type)
279 {
280 	struct cgroup_pidlist *l;
281 	/* don't need task_nsproxy() if we're looking at ourself */
282 	struct pid_namespace *ns = task_active_pid_ns(current);
283 
284 	lockdep_assert_held(&cgrp->pidlist_mutex);
285 
286 	list_for_each_entry(l, &cgrp->pidlists, links)
287 		if (l->key.type == type && l->key.ns == ns)
288 			return l;
289 	return NULL;
290 }
291 
292 /*
293  * find the appropriate pidlist for our purpose (given procs vs tasks)
294  * returns with the lock on that pidlist already held, and takes care
295  * of the use count, or returns NULL with no locks held if we're out of
296  * memory.
297  */
cgroup_pidlist_find_create(struct cgroup * cgrp,enum cgroup_filetype type)298 static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp,
299 						enum cgroup_filetype type)
300 {
301 	struct cgroup_pidlist *l;
302 
303 	lockdep_assert_held(&cgrp->pidlist_mutex);
304 
305 	l = cgroup_pidlist_find(cgrp, type);
306 	if (l)
307 		return l;
308 
309 	/* entry not found; create a new one */
310 	l = kzalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL);
311 	if (!l)
312 		return l;
313 
314 	INIT_DELAYED_WORK(&l->destroy_dwork, cgroup_pidlist_destroy_work_fn);
315 	l->key.type = type;
316 	/* don't need task_nsproxy() if we're looking at ourself */
317 	l->key.ns = get_pid_ns(task_active_pid_ns(current));
318 	l->owner = cgrp;
319 	list_add(&l->links, &cgrp->pidlists);
320 	return l;
321 }
322 
323 /*
324  * Load a cgroup's pidarray with either procs' tgids or tasks' pids
325  */
pidlist_array_load(struct cgroup * cgrp,enum cgroup_filetype type,struct cgroup_pidlist ** lp)326 static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
327 			      struct cgroup_pidlist **lp)
328 {
329 	pid_t *array;
330 	int length;
331 	int pid, n = 0; /* used for populating the array */
332 	struct css_task_iter it;
333 	struct task_struct *tsk;
334 	struct cgroup_pidlist *l;
335 
336 	lockdep_assert_held(&cgrp->pidlist_mutex);
337 
338 	/*
339 	 * If cgroup gets more users after we read count, we won't have
340 	 * enough space - tough.  This race is indistinguishable to the
341 	 * caller from the case that the additional cgroup users didn't
342 	 * show up until sometime later on.
343 	 */
344 	length = cgroup_task_count(cgrp);
345 	array = kvmalloc_array(length, sizeof(pid_t), GFP_KERNEL);
346 	if (!array)
347 		return -ENOMEM;
348 	/* now, populate the array */
349 	css_task_iter_start(&cgrp->self, 0, &it);
350 	while ((tsk = css_task_iter_next(&it))) {
351 		if (unlikely(n == length))
352 			break;
353 		/* get tgid or pid for procs or tasks file respectively */
354 		if (type == CGROUP_FILE_PROCS)
355 			pid = task_tgid_vnr(tsk);
356 		else
357 			pid = task_pid_vnr(tsk);
358 		if (pid > 0) /* make sure to only use valid results */
359 			array[n++] = pid;
360 	}
361 	css_task_iter_end(&it);
362 	length = n;
363 	/* now sort & strip out duplicates (tgids or recycled thread PIDs) */
364 	sort(array, length, sizeof(pid_t), cmppid, NULL);
365 	length = pidlist_uniq(array, length);
366 
367 	l = cgroup_pidlist_find_create(cgrp, type);
368 	if (!l) {
369 		kvfree(array);
370 		return -ENOMEM;
371 	}
372 
373 	/* store array, freeing old if necessary */
374 	kvfree(l->list);
375 	l->list = array;
376 	l->length = length;
377 	*lp = l;
378 	return 0;
379 }
380 
381 /*
382  * seq_file methods for the tasks/procs files. The seq_file position is the
383  * next pid to display; the seq_file iterator is a pointer to the pid
384  * in the cgroup->l->list array.
385  */
386 
cgroup_pidlist_start(struct seq_file * s,loff_t * pos)387 static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
388 {
389 	/*
390 	 * Initially we receive a position value that corresponds to
391 	 * one more than the last pid shown (or 0 on the first call or
392 	 * after a seek to the start). Use a binary-search to find the
393 	 * next pid to display, if any
394 	 */
395 	struct kernfs_open_file *of = s->private;
396 	struct cgroup_file_ctx *ctx = of->priv;
397 	struct cgroup *cgrp = seq_css(s)->cgroup;
398 	struct cgroup_pidlist *l;
399 	enum cgroup_filetype type = seq_cft(s)->private;
400 	int index = 0, pid = *pos;
401 	int *iter, ret;
402 
403 	mutex_lock(&cgrp->pidlist_mutex);
404 
405 	/*
406 	 * !NULL @ctx->procs1.pidlist indicates that this isn't the first
407 	 * start() after open. If the matching pidlist is around, we can use
408 	 * that. Look for it. Note that @ctx->procs1.pidlist can't be used
409 	 * directly. It could already have been destroyed.
410 	 */
411 	if (ctx->procs1.pidlist)
412 		ctx->procs1.pidlist = cgroup_pidlist_find(cgrp, type);
413 
414 	/*
415 	 * Either this is the first start() after open or the matching
416 	 * pidlist has been destroyed inbetween.  Create a new one.
417 	 */
418 	if (!ctx->procs1.pidlist) {
419 		ret = pidlist_array_load(cgrp, type, &ctx->procs1.pidlist);
420 		if (ret)
421 			return ERR_PTR(ret);
422 	}
423 	l = ctx->procs1.pidlist;
424 
425 	if (pid) {
426 		int end = l->length;
427 
428 		while (index < end) {
429 			int mid = (index + end) / 2;
430 			if (l->list[mid] == pid) {
431 				index = mid;
432 				break;
433 			} else if (l->list[mid] < pid)
434 				index = mid + 1;
435 			else
436 				end = mid;
437 		}
438 	}
439 	/* If we're off the end of the array, we're done */
440 	if (index >= l->length)
441 		return NULL;
442 	/* Update the abstract position to be the actual pid that we found */
443 	iter = l->list + index;
444 	*pos = *iter;
445 	return iter;
446 }
447 
cgroup_pidlist_stop(struct seq_file * s,void * v)448 static void cgroup_pidlist_stop(struct seq_file *s, void *v)
449 {
450 	struct kernfs_open_file *of = s->private;
451 	struct cgroup_file_ctx *ctx = of->priv;
452 	struct cgroup_pidlist *l = ctx->procs1.pidlist;
453 
454 	if (l)
455 		mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork,
456 				 CGROUP_PIDLIST_DESTROY_DELAY);
457 	mutex_unlock(&seq_css(s)->cgroup->pidlist_mutex);
458 }
459 
cgroup_pidlist_next(struct seq_file * s,void * v,loff_t * pos)460 static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
461 {
462 	struct kernfs_open_file *of = s->private;
463 	struct cgroup_file_ctx *ctx = of->priv;
464 	struct cgroup_pidlist *l = ctx->procs1.pidlist;
465 	pid_t *p = v;
466 	pid_t *end = l->list + l->length;
467 	/*
468 	 * Advance to the next pid in the array. If this goes off the
469 	 * end, we're done
470 	 */
471 	p++;
472 	if (p >= end) {
473 		(*pos)++;
474 		return NULL;
475 	} else {
476 		*pos = *p;
477 		return p;
478 	}
479 }
480 
cgroup_pidlist_show(struct seq_file * s,void * v)481 static int cgroup_pidlist_show(struct seq_file *s, void *v)
482 {
483 	seq_printf(s, "%d\n", *(int *)v);
484 
485 	return 0;
486 }
487 
__cgroup1_procs_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off,bool threadgroup)488 static ssize_t __cgroup1_procs_write(struct kernfs_open_file *of,
489 				     char *buf, size_t nbytes, loff_t off,
490 				     bool threadgroup)
491 {
492 	struct cgroup *cgrp;
493 	struct task_struct *task;
494 	const struct cred *cred, *tcred;
495 	ssize_t ret;
496 	bool locked;
497 
498 	cgrp = cgroup_kn_lock_live(of->kn, false);
499 	if (!cgrp)
500 		return -ENODEV;
501 
502 	task = cgroup_procs_write_start(buf, threadgroup, &locked);
503 	ret = PTR_ERR_OR_ZERO(task);
504 	if (ret)
505 		goto out_unlock;
506 
507 	/*
508 	 * Even if we're attaching all tasks in the thread group, we only need
509 	 * to check permissions on one of them. Check permissions using the
510 	 * credentials from file open to protect against inherited fd attacks.
511 	 */
512 	cred = of->file->f_cred;
513 	tcred = get_task_cred(task);
514 #ifdef CONFIG_HYPERHOLD
515 	if (!uid_eq(cred->euid, GLOBAL_MEMMGR_UID) &&
516 	    !uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
517 #else
518 	if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
519 #endif
520 	    !uid_eq(cred->euid, tcred->uid) &&
521 	    !uid_eq(cred->euid, tcred->suid))
522 		ret = -EACCES;
523 	put_cred(tcred);
524 	if (ret)
525 		goto out_finish;
526 
527 	ret = cgroup_attach_task(cgrp, task, threadgroup);
528 
529 out_finish:
530 	cgroup_procs_write_finish(task, locked);
531 out_unlock:
532 	cgroup_kn_unlock(of->kn);
533 
534 	return ret ?: nbytes;
535 }
536 
cgroup1_procs_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)537 static ssize_t cgroup1_procs_write(struct kernfs_open_file *of,
538 				   char *buf, size_t nbytes, loff_t off)
539 {
540 	return __cgroup1_procs_write(of, buf, nbytes, off, true);
541 }
542 
cgroup1_tasks_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)543 static ssize_t cgroup1_tasks_write(struct kernfs_open_file *of,
544 				   char *buf, size_t nbytes, loff_t off)
545 {
546 	return __cgroup1_procs_write(of, buf, nbytes, off, false);
547 }
548 
cgroup_release_agent_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)549 static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
550 					  char *buf, size_t nbytes, loff_t off)
551 {
552 	struct cgroup *cgrp;
553 	struct cgroup_file_ctx *ctx;
554 
555 	BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
556 
557 	/*
558 	 * Release agent gets called with all capabilities,
559 	 * require capabilities to set release agent.
560 	 */
561 	ctx = of->priv;
562 	if ((ctx->ns->user_ns != &init_user_ns) ||
563 	    !file_ns_capable(of->file, &init_user_ns, CAP_SYS_ADMIN))
564 		return -EPERM;
565 
566 	cgrp = cgroup_kn_lock_live(of->kn, false);
567 	if (!cgrp)
568 		return -ENODEV;
569 	spin_lock(&release_agent_path_lock);
570 	strscpy(cgrp->root->release_agent_path, strstrip(buf),
571 		sizeof(cgrp->root->release_agent_path));
572 	spin_unlock(&release_agent_path_lock);
573 	cgroup_kn_unlock(of->kn);
574 	return nbytes;
575 }
576 
cgroup_release_agent_show(struct seq_file * seq,void * v)577 static int cgroup_release_agent_show(struct seq_file *seq, void *v)
578 {
579 	struct cgroup *cgrp = seq_css(seq)->cgroup;
580 
581 	spin_lock(&release_agent_path_lock);
582 	seq_puts(seq, cgrp->root->release_agent_path);
583 	spin_unlock(&release_agent_path_lock);
584 	seq_putc(seq, '\n');
585 	return 0;
586 }
587 
cgroup_sane_behavior_show(struct seq_file * seq,void * v)588 static int cgroup_sane_behavior_show(struct seq_file *seq, void *v)
589 {
590 	seq_puts(seq, "0\n");
591 	return 0;
592 }
593 
cgroup_read_notify_on_release(struct cgroup_subsys_state * css,struct cftype * cft)594 static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css,
595 					 struct cftype *cft)
596 {
597 	return notify_on_release(css->cgroup);
598 }
599 
cgroup_write_notify_on_release(struct cgroup_subsys_state * css,struct cftype * cft,u64 val)600 static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css,
601 					  struct cftype *cft, u64 val)
602 {
603 	if (val)
604 		set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
605 	else
606 		clear_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
607 	return 0;
608 }
609 
cgroup_clone_children_read(struct cgroup_subsys_state * css,struct cftype * cft)610 static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css,
611 				      struct cftype *cft)
612 {
613 	return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
614 }
615 
cgroup_clone_children_write(struct cgroup_subsys_state * css,struct cftype * cft,u64 val)616 static int cgroup_clone_children_write(struct cgroup_subsys_state *css,
617 				       struct cftype *cft, u64 val)
618 {
619 	if (val)
620 		set_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
621 	else
622 		clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
623 	return 0;
624 }
625 
626 /* cgroup core interface files for the legacy hierarchies */
627 struct cftype cgroup1_base_files[] = {
628 	{
629 		.name = "cgroup.procs",
630 		.seq_start = cgroup_pidlist_start,
631 		.seq_next = cgroup_pidlist_next,
632 		.seq_stop = cgroup_pidlist_stop,
633 		.seq_show = cgroup_pidlist_show,
634 		.private = CGROUP_FILE_PROCS,
635 		.write = cgroup1_procs_write,
636 	},
637 	{
638 		.name = "cgroup.clone_children",
639 		.read_u64 = cgroup_clone_children_read,
640 		.write_u64 = cgroup_clone_children_write,
641 	},
642 	{
643 		.name = "cgroup.sane_behavior",
644 		.flags = CFTYPE_ONLY_ON_ROOT,
645 		.seq_show = cgroup_sane_behavior_show,
646 	},
647 	{
648 		.name = "tasks",
649 		.seq_start = cgroup_pidlist_start,
650 		.seq_next = cgroup_pidlist_next,
651 		.seq_stop = cgroup_pidlist_stop,
652 		.seq_show = cgroup_pidlist_show,
653 		.private = CGROUP_FILE_TASKS,
654 		.write = cgroup1_tasks_write,
655 	},
656 	{
657 		.name = "notify_on_release",
658 		.read_u64 = cgroup_read_notify_on_release,
659 		.write_u64 = cgroup_write_notify_on_release,
660 	},
661 	{
662 		.name = "release_agent",
663 		.flags = CFTYPE_ONLY_ON_ROOT,
664 		.seq_show = cgroup_release_agent_show,
665 		.write = cgroup_release_agent_write,
666 		.max_write_len = PATH_MAX - 1,
667 	},
668 	{ }	/* terminate */
669 };
670 
671 /* Display information about each subsystem and each hierarchy */
proc_cgroupstats_show(struct seq_file * m,void * v)672 int proc_cgroupstats_show(struct seq_file *m, void *v)
673 {
674 	struct cgroup_subsys *ss;
675 	int i;
676 
677 	seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
678 	/*
679 	 * Grab the subsystems state racily. No need to add avenue to
680 	 * cgroup_mutex contention.
681 	 */
682 
683 	for_each_subsys(ss, i)
684 		seq_printf(m, "%s\t%d\t%d\t%d\n",
685 			   ss->legacy_name, ss->root->hierarchy_id,
686 			   atomic_read(&ss->root->nr_cgrps),
687 			   cgroup_ssid_enabled(i));
688 
689 	return 0;
690 }
691 
692 /**
693  * cgroupstats_build - build and fill cgroupstats
694  * @stats: cgroupstats to fill information into
695  * @dentry: A dentry entry belonging to the cgroup for which stats have
696  * been requested.
697  *
698  * Build and fill cgroupstats so that taskstats can export it to user
699  * space.
700  *
701  * Return: %0 on success or a negative errno code on failure
702  */
cgroupstats_build(struct cgroupstats * stats,struct dentry * dentry)703 int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
704 {
705 	struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
706 	struct cgroup *cgrp;
707 	struct css_task_iter it;
708 	struct task_struct *tsk;
709 
710 	/* it should be kernfs_node belonging to cgroupfs and is a directory */
711 	if (dentry->d_sb->s_type != &cgroup_fs_type || !kn ||
712 	    kernfs_type(kn) != KERNFS_DIR)
713 		return -EINVAL;
714 
715 	/*
716 	 * We aren't being called from kernfs and there's no guarantee on
717 	 * @kn->priv's validity.  For this and css_tryget_online_from_dir(),
718 	 * @kn->priv is RCU safe.  Let's do the RCU dancing.
719 	 */
720 	rcu_read_lock();
721 	cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv);
722 	if (!cgrp || !cgroup_tryget(cgrp)) {
723 		rcu_read_unlock();
724 		return -ENOENT;
725 	}
726 	rcu_read_unlock();
727 
728 	css_task_iter_start(&cgrp->self, 0, &it);
729 	while ((tsk = css_task_iter_next(&it))) {
730 		switch (READ_ONCE(tsk->__state)) {
731 		case TASK_RUNNING:
732 			stats->nr_running++;
733 			break;
734 		case TASK_INTERRUPTIBLE:
735 			stats->nr_sleeping++;
736 			break;
737 		case TASK_UNINTERRUPTIBLE:
738 			stats->nr_uninterruptible++;
739 			break;
740 		case TASK_STOPPED:
741 			stats->nr_stopped++;
742 			break;
743 		default:
744 			if (tsk->in_iowait)
745 				stats->nr_io_wait++;
746 			break;
747 		}
748 	}
749 	css_task_iter_end(&it);
750 
751 	cgroup_put(cgrp);
752 	return 0;
753 }
754 
cgroup1_check_for_release(struct cgroup * cgrp)755 void cgroup1_check_for_release(struct cgroup *cgrp)
756 {
757 	if (notify_on_release(cgrp) && !cgroup_is_populated(cgrp) &&
758 	    !css_has_online_children(&cgrp->self) && !cgroup_is_dead(cgrp))
759 		schedule_work(&cgrp->release_agent_work);
760 }
761 
762 /*
763  * Notify userspace when a cgroup is released, by running the
764  * configured release agent with the name of the cgroup (path
765  * relative to the root of cgroup file system) as the argument.
766  *
767  * Most likely, this user command will try to rmdir this cgroup.
768  *
769  * This races with the possibility that some other task will be
770  * attached to this cgroup before it is removed, or that some other
771  * user task will 'mkdir' a child cgroup of this cgroup.  That's ok.
772  * The presumed 'rmdir' will fail quietly if this cgroup is no longer
773  * unused, and this cgroup will be reprieved from its death sentence,
774  * to continue to serve a useful existence.  Next time it's released,
775  * we will get notified again, if it still has 'notify_on_release' set.
776  *
777  * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
778  * means only wait until the task is successfully execve()'d.  The
779  * separate release agent task is forked by call_usermodehelper(),
780  * then control in this thread returns here, without waiting for the
781  * release agent task.  We don't bother to wait because the caller of
782  * this routine has no use for the exit status of the release agent
783  * task, so no sense holding our caller up for that.
784  */
cgroup1_release_agent(struct work_struct * work)785 void cgroup1_release_agent(struct work_struct *work)
786 {
787 	struct cgroup *cgrp =
788 		container_of(work, struct cgroup, release_agent_work);
789 	char *pathbuf, *agentbuf;
790 	char *argv[3], *envp[3];
791 	int ret;
792 
793 	/* snoop agent path and exit early if empty */
794 	if (!cgrp->root->release_agent_path[0])
795 		return;
796 
797 	/* prepare argument buffers */
798 	pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
799 	agentbuf = kmalloc(PATH_MAX, GFP_KERNEL);
800 	if (!pathbuf || !agentbuf)
801 		goto out_free;
802 
803 	spin_lock(&release_agent_path_lock);
804 	strscpy(agentbuf, cgrp->root->release_agent_path, PATH_MAX);
805 	spin_unlock(&release_agent_path_lock);
806 	if (!agentbuf[0])
807 		goto out_free;
808 
809 	ret = cgroup_path_ns(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns);
810 	if (ret < 0)
811 		goto out_free;
812 
813 	argv[0] = agentbuf;
814 	argv[1] = pathbuf;
815 	argv[2] = NULL;
816 
817 	/* minimal command environment */
818 	envp[0] = "HOME=/";
819 	envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
820 	envp[2] = NULL;
821 
822 	call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
823 out_free:
824 	kfree(agentbuf);
825 	kfree(pathbuf);
826 }
827 
828 /*
829  * cgroup_rename - Only allow simple rename of directories in place.
830  */
cgroup1_rename(struct kernfs_node * kn,struct kernfs_node * new_parent,const char * new_name_str)831 static int cgroup1_rename(struct kernfs_node *kn, struct kernfs_node *new_parent,
832 			  const char *new_name_str)
833 {
834 	struct cgroup *cgrp = kn->priv;
835 	int ret;
836 
837 	/* do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable */
838 	if (strchr(new_name_str, '\n'))
839 		return -EINVAL;
840 
841 	if (kernfs_type(kn) != KERNFS_DIR)
842 		return -ENOTDIR;
843 	if (kn->parent != new_parent)
844 		return -EIO;
845 
846 	/*
847 	 * We're gonna grab cgroup_mutex which nests outside kernfs
848 	 * active_ref.  kernfs_rename() doesn't require active_ref
849 	 * protection.  Break them before grabbing cgroup_mutex.
850 	 */
851 	kernfs_break_active_protection(new_parent);
852 	kernfs_break_active_protection(kn);
853 
854 	cgroup_lock();
855 
856 	ret = kernfs_rename(kn, new_parent, new_name_str);
857 	if (!ret)
858 		TRACE_CGROUP_PATH(rename, cgrp);
859 
860 	cgroup_unlock();
861 
862 	kernfs_unbreak_active_protection(kn);
863 	kernfs_unbreak_active_protection(new_parent);
864 	return ret;
865 }
866 
cgroup1_show_options(struct seq_file * seq,struct kernfs_root * kf_root)867 static int cgroup1_show_options(struct seq_file *seq, struct kernfs_root *kf_root)
868 {
869 	struct cgroup_root *root = cgroup_root_from_kf(kf_root);
870 	struct cgroup_subsys *ss;
871 	int ssid;
872 
873 	for_each_subsys(ss, ssid)
874 		if (root->subsys_mask & (1 << ssid))
875 			seq_show_option(seq, ss->legacy_name, NULL);
876 	if (root->flags & CGRP_ROOT_NOPREFIX)
877 		seq_puts(seq, ",noprefix");
878 	if (root->flags & CGRP_ROOT_XATTR)
879 		seq_puts(seq, ",xattr");
880 	if (root->flags & CGRP_ROOT_CPUSET_V2_MODE)
881 		seq_puts(seq, ",cpuset_v2_mode");
882 	if (root->flags & CGRP_ROOT_FAVOR_DYNMODS)
883 		seq_puts(seq, ",favordynmods");
884 
885 	spin_lock(&release_agent_path_lock);
886 	if (strlen(root->release_agent_path))
887 		seq_show_option(seq, "release_agent",
888 				root->release_agent_path);
889 	spin_unlock(&release_agent_path_lock);
890 
891 	if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags))
892 		seq_puts(seq, ",clone_children");
893 	if (strlen(root->name))
894 		seq_show_option(seq, "name", root->name);
895 	return 0;
896 }
897 
898 enum cgroup1_param {
899 	Opt_all,
900 	Opt_clone_children,
901 	Opt_cpuset_v2_mode,
902 	Opt_name,
903 	Opt_none,
904 	Opt_noprefix,
905 	Opt_release_agent,
906 	Opt_xattr,
907 	Opt_favordynmods,
908 	Opt_nofavordynmods,
909 };
910 
911 const struct fs_parameter_spec cgroup1_fs_parameters[] = {
912 	fsparam_flag  ("all",		Opt_all),
913 	fsparam_flag  ("clone_children", Opt_clone_children),
914 	fsparam_flag  ("cpuset_v2_mode", Opt_cpuset_v2_mode),
915 	fsparam_string("name",		Opt_name),
916 	fsparam_flag  ("none",		Opt_none),
917 	fsparam_flag  ("noprefix",	Opt_noprefix),
918 	fsparam_string("release_agent",	Opt_release_agent),
919 	fsparam_flag  ("xattr",		Opt_xattr),
920 	fsparam_flag  ("favordynmods",	Opt_favordynmods),
921 	fsparam_flag  ("nofavordynmods", Opt_nofavordynmods),
922 	{}
923 };
924 
cgroup1_parse_param(struct fs_context * fc,struct fs_parameter * param)925 int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param)
926 {
927 	struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
928 	struct cgroup_subsys *ss;
929 	struct fs_parse_result result;
930 	int opt, i;
931 
932 	opt = fs_parse(fc, cgroup1_fs_parameters, param, &result);
933 	if (opt == -ENOPARAM) {
934 		int ret;
935 
936 		ret = vfs_parse_fs_param_source(fc, param);
937 		if (ret != -ENOPARAM)
938 			return ret;
939 		for_each_subsys(ss, i) {
940 			if (strcmp(param->key, ss->legacy_name))
941 				continue;
942 			if (!cgroup_ssid_enabled(i) || cgroup1_ssid_disabled(i))
943 				return invalfc(fc, "Disabled controller '%s'",
944 					       param->key);
945 			ctx->subsys_mask |= (1 << i);
946 			return 0;
947 		}
948 		return invalfc(fc, "Unknown subsys name '%s'", param->key);
949 	}
950 	if (opt < 0)
951 		return opt;
952 
953 	switch (opt) {
954 	case Opt_none:
955 		/* Explicitly have no subsystems */
956 		ctx->none = true;
957 		break;
958 	case Opt_all:
959 		ctx->all_ss = true;
960 		break;
961 	case Opt_noprefix:
962 		ctx->flags |= CGRP_ROOT_NOPREFIX;
963 		break;
964 	case Opt_clone_children:
965 		ctx->cpuset_clone_children = true;
966 		break;
967 	case Opt_cpuset_v2_mode:
968 		ctx->flags |= CGRP_ROOT_CPUSET_V2_MODE;
969 		break;
970 	case Opt_xattr:
971 		ctx->flags |= CGRP_ROOT_XATTR;
972 		break;
973 	case Opt_favordynmods:
974 		ctx->flags |= CGRP_ROOT_FAVOR_DYNMODS;
975 		break;
976 	case Opt_nofavordynmods:
977 		ctx->flags &= ~CGRP_ROOT_FAVOR_DYNMODS;
978 		break;
979 	case Opt_release_agent:
980 		/* Specifying two release agents is forbidden */
981 		if (ctx->release_agent)
982 			return invalfc(fc, "release_agent respecified");
983 		/*
984 		 * Release agent gets called with all capabilities,
985 		 * require capabilities to set release agent.
986 		 */
987 		if ((fc->user_ns != &init_user_ns) || !capable(CAP_SYS_ADMIN))
988 			return invalfc(fc, "Setting release_agent not allowed");
989 		ctx->release_agent = param->string;
990 		param->string = NULL;
991 		break;
992 	case Opt_name:
993 		/* blocked by boot param? */
994 		if (cgroup_no_v1_named)
995 			return -ENOENT;
996 		/* Can't specify an empty name */
997 		if (!param->size)
998 			return invalfc(fc, "Empty name");
999 		if (param->size > MAX_CGROUP_ROOT_NAMELEN - 1)
1000 			return invalfc(fc, "Name too long");
1001 		/* Must match [\w.-]+ */
1002 		for (i = 0; i < param->size; i++) {
1003 			char c = param->string[i];
1004 			if (isalnum(c))
1005 				continue;
1006 			if ((c == '.') || (c == '-') || (c == '_'))
1007 				continue;
1008 			return invalfc(fc, "Invalid name");
1009 		}
1010 		/* Specifying two names is forbidden */
1011 		if (ctx->name)
1012 			return invalfc(fc, "name respecified");
1013 		ctx->name = param->string;
1014 		param->string = NULL;
1015 		break;
1016 	}
1017 	return 0;
1018 }
1019 
check_cgroupfs_options(struct fs_context * fc)1020 static int check_cgroupfs_options(struct fs_context *fc)
1021 {
1022 	struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1023 	u16 mask = U16_MAX;
1024 	u16 enabled = 0;
1025 	struct cgroup_subsys *ss;
1026 	int i;
1027 
1028 #ifdef CONFIG_CPUSETS
1029 	mask = ~((u16)1 << cpuset_cgrp_id);
1030 #endif
1031 	for_each_subsys(ss, i)
1032 		if (cgroup_ssid_enabled(i) && !cgroup1_ssid_disabled(i))
1033 			enabled |= 1 << i;
1034 
1035 	ctx->subsys_mask &= enabled;
1036 
1037 	/*
1038 	 * In absence of 'none', 'name=' and subsystem name options,
1039 	 * let's default to 'all'.
1040 	 */
1041 	if (!ctx->subsys_mask && !ctx->none && !ctx->name)
1042 		ctx->all_ss = true;
1043 
1044 	if (ctx->all_ss) {
1045 		/* Mutually exclusive option 'all' + subsystem name */
1046 		if (ctx->subsys_mask)
1047 			return invalfc(fc, "subsys name conflicts with all");
1048 		/* 'all' => select all the subsystems */
1049 		ctx->subsys_mask = enabled;
1050 	}
1051 
1052 	/*
1053 	 * We either have to specify by name or by subsystems. (So all
1054 	 * empty hierarchies must have a name).
1055 	 */
1056 	if (!ctx->subsys_mask && !ctx->name)
1057 		return invalfc(fc, "Need name or subsystem set");
1058 
1059 	/*
1060 	 * Option noprefix was introduced just for backward compatibility
1061 	 * with the old cpuset, so we allow noprefix only if mounting just
1062 	 * the cpuset subsystem.
1063 	 */
1064 	if ((ctx->flags & CGRP_ROOT_NOPREFIX) && (ctx->subsys_mask & mask))
1065 		return invalfc(fc, "noprefix used incorrectly");
1066 
1067 	/* Can't specify "none" and some subsystems */
1068 	if (ctx->subsys_mask && ctx->none)
1069 		return invalfc(fc, "none used incorrectly");
1070 
1071 	return 0;
1072 }
1073 
cgroup1_reconfigure(struct fs_context * fc)1074 int cgroup1_reconfigure(struct fs_context *fc)
1075 {
1076 	struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1077 	struct kernfs_root *kf_root = kernfs_root_from_sb(fc->root->d_sb);
1078 	struct cgroup_root *root = cgroup_root_from_kf(kf_root);
1079 	int ret = 0;
1080 	u16 added_mask, removed_mask;
1081 
1082 	cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1083 
1084 	/* See what subsystems are wanted */
1085 	ret = check_cgroupfs_options(fc);
1086 	if (ret)
1087 		goto out_unlock;
1088 
1089 	if (ctx->subsys_mask != root->subsys_mask || ctx->release_agent)
1090 		pr_warn("option changes via remount are deprecated (pid=%d comm=%s)\n",
1091 			task_tgid_nr(current), current->comm);
1092 
1093 	added_mask = ctx->subsys_mask & ~root->subsys_mask;
1094 	removed_mask = root->subsys_mask & ~ctx->subsys_mask;
1095 
1096 	/* Don't allow flags or name to change at remount */
1097 	if ((ctx->flags ^ root->flags) ||
1098 	    (ctx->name && strcmp(ctx->name, root->name))) {
1099 		errorfc(fc, "option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"",
1100 		       ctx->flags, ctx->name ?: "", root->flags, root->name);
1101 		ret = -EINVAL;
1102 		goto out_unlock;
1103 	}
1104 
1105 	/* remounting is not allowed for populated hierarchies */
1106 	if (!list_empty(&root->cgrp.self.children)) {
1107 		ret = -EBUSY;
1108 		goto out_unlock;
1109 	}
1110 
1111 	ret = rebind_subsystems(root, added_mask);
1112 	if (ret)
1113 		goto out_unlock;
1114 
1115 	WARN_ON(rebind_subsystems(&cgrp_dfl_root, removed_mask));
1116 
1117 	if (ctx->release_agent) {
1118 		spin_lock(&release_agent_path_lock);
1119 		strcpy(root->release_agent_path, ctx->release_agent);
1120 		spin_unlock(&release_agent_path_lock);
1121 	}
1122 
1123 	trace_cgroup_remount(root);
1124 
1125  out_unlock:
1126 	cgroup_unlock();
1127 	return ret;
1128 }
1129 
1130 struct kernfs_syscall_ops cgroup1_kf_syscall_ops = {
1131 	.rename			= cgroup1_rename,
1132 	.show_options		= cgroup1_show_options,
1133 	.mkdir			= cgroup_mkdir,
1134 	.rmdir			= cgroup_rmdir,
1135 	.show_path		= cgroup_show_path,
1136 };
1137 
1138 /*
1139  * The guts of cgroup1 mount - find or create cgroup_root to use.
1140  * Called with cgroup_mutex held; returns 0 on success, -E... on
1141  * error and positive - in case when the candidate is busy dying.
1142  * On success it stashes a reference to cgroup_root into given
1143  * cgroup_fs_context; that reference is *NOT* counting towards the
1144  * cgroup_root refcount.
1145  */
cgroup1_root_to_use(struct fs_context * fc)1146 static int cgroup1_root_to_use(struct fs_context *fc)
1147 {
1148 	struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1149 	struct cgroup_root *root;
1150 	struct cgroup_subsys *ss;
1151 	int i, ret;
1152 
1153 	/* First find the desired set of subsystems */
1154 	ret = check_cgroupfs_options(fc);
1155 	if (ret)
1156 		return ret;
1157 
1158 	/*
1159 	 * Destruction of cgroup root is asynchronous, so subsystems may
1160 	 * still be dying after the previous unmount.  Let's drain the
1161 	 * dying subsystems.  We just need to ensure that the ones
1162 	 * unmounted previously finish dying and don't care about new ones
1163 	 * starting.  Testing ref liveliness is good enough.
1164 	 */
1165 	for_each_subsys(ss, i) {
1166 		if (!(ctx->subsys_mask & (1 << i)) ||
1167 		    ss->root == &cgrp_dfl_root)
1168 			continue;
1169 
1170 		if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt))
1171 			return 1;	/* restart */
1172 		cgroup_put(&ss->root->cgrp);
1173 	}
1174 
1175 	for_each_root(root) {
1176 		bool name_match = false;
1177 
1178 		if (root == &cgrp_dfl_root)
1179 			continue;
1180 
1181 		/*
1182 		 * If we asked for a name then it must match.  Also, if
1183 		 * name matches but sybsys_mask doesn't, we should fail.
1184 		 * Remember whether name matched.
1185 		 */
1186 		if (ctx->name) {
1187 			if (strcmp(ctx->name, root->name))
1188 				continue;
1189 			name_match = true;
1190 		}
1191 
1192 		/*
1193 		 * If we asked for subsystems (or explicitly for no
1194 		 * subsystems) then they must match.
1195 		 */
1196 		if ((ctx->subsys_mask || ctx->none) &&
1197 		    (ctx->subsys_mask != root->subsys_mask)) {
1198 			if (!name_match)
1199 				continue;
1200 			return -EBUSY;
1201 		}
1202 
1203 		if (root->flags ^ ctx->flags)
1204 			pr_warn("new mount options do not match the existing superblock, will be ignored\n");
1205 
1206 		ctx->root = root;
1207 		return 0;
1208 	}
1209 
1210 	/*
1211 	 * No such thing, create a new one.  name= matching without subsys
1212 	 * specification is allowed for already existing hierarchies but we
1213 	 * can't create new one without subsys specification.
1214 	 */
1215 	if (!ctx->subsys_mask && !ctx->none)
1216 		return invalfc(fc, "No subsys list or none specified");
1217 
1218 	/* Hierarchies may only be created in the initial cgroup namespace. */
1219 	if (ctx->ns != &init_cgroup_ns)
1220 		return -EPERM;
1221 
1222 	root = kzalloc(sizeof(*root), GFP_KERNEL);
1223 	if (!root)
1224 		return -ENOMEM;
1225 
1226 	ctx->root = root;
1227 	init_cgroup_root(ctx);
1228 
1229 	ret = cgroup_setup_root(root, ctx->subsys_mask);
1230 	if (!ret)
1231 		cgroup_favor_dynmods(root, ctx->flags & CGRP_ROOT_FAVOR_DYNMODS);
1232 	else
1233 		cgroup_free_root(root);
1234 
1235 	return ret;
1236 }
1237 
cgroup1_get_tree(struct fs_context * fc)1238 int cgroup1_get_tree(struct fs_context *fc)
1239 {
1240 	struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1241 	int ret;
1242 
1243 	/* Check if the caller has permission to mount. */
1244 	if (!ns_capable(ctx->ns->user_ns, CAP_SYS_ADMIN))
1245 		return -EPERM;
1246 
1247 	cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1248 
1249 	ret = cgroup1_root_to_use(fc);
1250 	if (!ret && !percpu_ref_tryget_live(&ctx->root->cgrp.self.refcnt))
1251 		ret = 1;	/* restart */
1252 
1253 	cgroup_unlock();
1254 
1255 	if (!ret)
1256 		ret = cgroup_do_get_tree(fc);
1257 
1258 	if (!ret && percpu_ref_is_dying(&ctx->root->cgrp.self.refcnt)) {
1259 		fc_drop_locked(fc);
1260 		ret = 1;
1261 	}
1262 
1263 	if (unlikely(ret > 0)) {
1264 		msleep(10);
1265 		return restart_syscall();
1266 	}
1267 	return ret;
1268 }
1269 
cgroup1_wq_init(void)1270 static int __init cgroup1_wq_init(void)
1271 {
1272 	/*
1273 	 * Used to destroy pidlists and separate to serve as flush domain.
1274 	 * Cap @max_active to 1 too.
1275 	 */
1276 	cgroup_pidlist_destroy_wq = alloc_workqueue("cgroup_pidlist_destroy",
1277 						    0, 1);
1278 	BUG_ON(!cgroup_pidlist_destroy_wq);
1279 	return 0;
1280 }
1281 core_initcall(cgroup1_wq_init);
1282 
cgroup_no_v1(char * str)1283 static int __init cgroup_no_v1(char *str)
1284 {
1285 	struct cgroup_subsys *ss;
1286 	char *token;
1287 	int i;
1288 
1289 	while ((token = strsep(&str, ",")) != NULL) {
1290 		if (!*token)
1291 			continue;
1292 
1293 		if (!strcmp(token, "all")) {
1294 			cgroup_no_v1_mask = U16_MAX;
1295 			continue;
1296 		}
1297 
1298 		if (!strcmp(token, "named")) {
1299 			cgroup_no_v1_named = true;
1300 			continue;
1301 		}
1302 
1303 		for_each_subsys(ss, i) {
1304 			if (strcmp(token, ss->name) &&
1305 			    strcmp(token, ss->legacy_name))
1306 				continue;
1307 
1308 			cgroup_no_v1_mask |= 1 << i;
1309 		}
1310 	}
1311 	return 1;
1312 }
1313 __setup("cgroup_no_v1=", cgroup_no_v1);
1314