• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include "cgroup-internal.h"
3 
4 #include <linux/ctype.h>
5 #include <linux/kmod.h>
6 #include <linux/sort.h>
7 #include <linux/delay.h>
8 #include <linux/mm.h>
9 #include <linux/sched/signal.h>
10 #include <linux/sched/task.h>
11 #include <linux/magic.h>
12 #include <linux/slab.h>
13 #include <linux/vmalloc.h>
14 #include <linux/delayacct.h>
15 #include <linux/pid_namespace.h>
16 #include <linux/cgroupstats.h>
17 #include <linux/fs_parser.h>
18 
19 #include <trace/events/cgroup.h>
20 
21 /*
22  * pidlists linger the following amount before being destroyed.  The goal
23  * is avoiding frequent destruction in the middle of consecutive read calls
24  * Expiring in the middle is a performance problem not a correctness one.
25  * 1 sec should be enough.
26  */
27 #define CGROUP_PIDLIST_DESTROY_DELAY	HZ
28 
29 /* Controllers blocked by the commandline in v1 */
30 static u16 cgroup_no_v1_mask;
31 
32 /* disable named v1 mounts */
33 static bool cgroup_no_v1_named;
34 
35 /*
36  * pidlist destructions need to be flushed on cgroup destruction.  Use a
37  * separate workqueue as flush domain.
38  */
39 static struct workqueue_struct *cgroup_pidlist_destroy_wq;
40 
41 /* protects cgroup_subsys->release_agent_path */
42 static DEFINE_SPINLOCK(release_agent_path_lock);
43 
cgroup1_ssid_disabled(int ssid)44 bool cgroup1_ssid_disabled(int ssid)
45 {
46 	return cgroup_no_v1_mask & (1 << ssid);
47 }
48 
49 /**
50  * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
51  * @from: attach to all cgroups of a given task
52  * @tsk: the task to be attached
53  */
cgroup_attach_task_all(struct task_struct * from,struct task_struct * tsk)54 int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
55 {
56 	struct cgroup_root *root;
57 	int retval = 0;
58 
59 	mutex_lock(&cgroup_mutex);
60 	cpus_read_lock();
61 	percpu_down_write(&cgroup_threadgroup_rwsem);
62 	for_each_root(root) {
63 		struct cgroup *from_cgrp;
64 
65 		if (root == &cgrp_dfl_root)
66 			continue;
67 
68 		spin_lock_irq(&css_set_lock);
69 		from_cgrp = task_cgroup_from_root(from, root);
70 		spin_unlock_irq(&css_set_lock);
71 
72 		retval = cgroup_attach_task(from_cgrp, tsk, false);
73 		if (retval)
74 			break;
75 	}
76 	percpu_up_write(&cgroup_threadgroup_rwsem);
77 	cpus_read_unlock();
78 	mutex_unlock(&cgroup_mutex);
79 
80 	return retval;
81 }
82 EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
83 
84 /**
85  * cgroup_trasnsfer_tasks - move tasks from one cgroup to another
86  * @to: cgroup to which the tasks will be moved
87  * @from: cgroup in which the tasks currently reside
88  *
89  * Locking rules between cgroup_post_fork() and the migration path
90  * guarantee that, if a task is forking while being migrated, the new child
91  * is guaranteed to be either visible in the source cgroup after the
92  * parent's migration is complete or put into the target cgroup.  No task
93  * can slip out of migration through forking.
94  */
cgroup_transfer_tasks(struct cgroup * to,struct cgroup * from)95 int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
96 {
97 	DEFINE_CGROUP_MGCTX(mgctx);
98 	struct cgrp_cset_link *link;
99 	struct css_task_iter it;
100 	struct task_struct *task;
101 	int ret;
102 
103 	if (cgroup_on_dfl(to))
104 		return -EINVAL;
105 
106 	ret = cgroup_migrate_vet_dst(to);
107 	if (ret)
108 		return ret;
109 
110 	mutex_lock(&cgroup_mutex);
111 
112 	percpu_down_write(&cgroup_threadgroup_rwsem);
113 
114 	/* all tasks in @from are being moved, all csets are source */
115 	spin_lock_irq(&css_set_lock);
116 	list_for_each_entry(link, &from->cset_links, cset_link)
117 		cgroup_migrate_add_src(link->cset, to, &mgctx);
118 	spin_unlock_irq(&css_set_lock);
119 
120 	ret = cgroup_migrate_prepare_dst(&mgctx);
121 	if (ret)
122 		goto out_err;
123 
124 	/*
125 	 * Migrate tasks one-by-one until @from is empty.  This fails iff
126 	 * ->can_attach() fails.
127 	 */
128 	do {
129 		css_task_iter_start(&from->self, 0, &it);
130 
131 		do {
132 			task = css_task_iter_next(&it);
133 		} while (task && (task->flags & PF_EXITING));
134 
135 		if (task)
136 			get_task_struct(task);
137 		css_task_iter_end(&it);
138 
139 		if (task) {
140 			ret = cgroup_migrate(task, false, &mgctx);
141 			if (!ret)
142 				TRACE_CGROUP_PATH(transfer_tasks, to, task, false);
143 			put_task_struct(task);
144 		}
145 	} while (task && !ret);
146 out_err:
147 	cgroup_migrate_finish(&mgctx);
148 	percpu_up_write(&cgroup_threadgroup_rwsem);
149 	mutex_unlock(&cgroup_mutex);
150 	return ret;
151 }
152 
153 /*
154  * Stuff for reading the 'tasks'/'procs' files.
155  *
156  * Reading this file can return large amounts of data if a cgroup has
157  * *lots* of attached tasks. So it may need several calls to read(),
158  * but we cannot guarantee that the information we produce is correct
159  * unless we produce it entirely atomically.
160  *
161  */
162 
163 /* which pidlist file are we talking about? */
164 enum cgroup_filetype {
165 	CGROUP_FILE_PROCS,
166 	CGROUP_FILE_TASKS,
167 };
168 
169 /*
170  * A pidlist is a list of pids that virtually represents the contents of one
171  * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
172  * a pair (one each for procs, tasks) for each pid namespace that's relevant
173  * to the cgroup.
174  */
175 struct cgroup_pidlist {
176 	/*
177 	 * used to find which pidlist is wanted. doesn't change as long as
178 	 * this particular list stays in the list.
179 	*/
180 	struct { enum cgroup_filetype type; struct pid_namespace *ns; } key;
181 	/* array of xids */
182 	pid_t *list;
183 	/* how many elements the above list has */
184 	int length;
185 	/* each of these stored in a list by its cgroup */
186 	struct list_head links;
187 	/* pointer to the cgroup we belong to, for list removal purposes */
188 	struct cgroup *owner;
189 	/* for delayed destruction */
190 	struct delayed_work destroy_dwork;
191 };
192 
193 /*
194  * Used to destroy all pidlists lingering waiting for destroy timer.  None
195  * should be left afterwards.
196  */
cgroup1_pidlist_destroy_all(struct cgroup * cgrp)197 void cgroup1_pidlist_destroy_all(struct cgroup *cgrp)
198 {
199 	struct cgroup_pidlist *l, *tmp_l;
200 
201 	mutex_lock(&cgrp->pidlist_mutex);
202 	list_for_each_entry_safe(l, tmp_l, &cgrp->pidlists, links)
203 		mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, 0);
204 	mutex_unlock(&cgrp->pidlist_mutex);
205 
206 	flush_workqueue(cgroup_pidlist_destroy_wq);
207 	BUG_ON(!list_empty(&cgrp->pidlists));
208 }
209 
cgroup_pidlist_destroy_work_fn(struct work_struct * work)210 static void cgroup_pidlist_destroy_work_fn(struct work_struct *work)
211 {
212 	struct delayed_work *dwork = to_delayed_work(work);
213 	struct cgroup_pidlist *l = container_of(dwork, struct cgroup_pidlist,
214 						destroy_dwork);
215 	struct cgroup_pidlist *tofree = NULL;
216 
217 	mutex_lock(&l->owner->pidlist_mutex);
218 
219 	/*
220 	 * Destroy iff we didn't get queued again.  The state won't change
221 	 * as destroy_dwork can only be queued while locked.
222 	 */
223 	if (!delayed_work_pending(dwork)) {
224 		list_del(&l->links);
225 		kvfree(l->list);
226 		put_pid_ns(l->key.ns);
227 		tofree = l;
228 	}
229 
230 	mutex_unlock(&l->owner->pidlist_mutex);
231 	kfree(tofree);
232 }
233 
234 /*
235  * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
236  * Returns the number of unique elements.
237  */
pidlist_uniq(pid_t * list,int length)238 static int pidlist_uniq(pid_t *list, int length)
239 {
240 	int src, dest = 1;
241 
242 	/*
243 	 * we presume the 0th element is unique, so i starts at 1. trivial
244 	 * edge cases first; no work needs to be done for either
245 	 */
246 	if (length == 0 || length == 1)
247 		return length;
248 	/* src and dest walk down the list; dest counts unique elements */
249 	for (src = 1; src < length; src++) {
250 		/* find next unique element */
251 		while (list[src] == list[src-1]) {
252 			src++;
253 			if (src == length)
254 				goto after;
255 		}
256 		/* dest always points to where the next unique element goes */
257 		list[dest] = list[src];
258 		dest++;
259 	}
260 after:
261 	return dest;
262 }
263 
264 /*
265  * The two pid files - task and cgroup.procs - guaranteed that the result
266  * is sorted, which forced this whole pidlist fiasco.  As pid order is
267  * different per namespace, each namespace needs differently sorted list,
268  * making it impossible to use, for example, single rbtree of member tasks
269  * sorted by task pointer.  As pidlists can be fairly large, allocating one
270  * per open file is dangerous, so cgroup had to implement shared pool of
271  * pidlists keyed by cgroup and namespace.
272  */
cmppid(const void * a,const void * b)273 static int cmppid(const void *a, const void *b)
274 {
275 	return *(pid_t *)a - *(pid_t *)b;
276 }
277 
cgroup_pidlist_find(struct cgroup * cgrp,enum cgroup_filetype type)278 static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
279 						  enum cgroup_filetype type)
280 {
281 	struct cgroup_pidlist *l;
282 	/* don't need task_nsproxy() if we're looking at ourself */
283 	struct pid_namespace *ns = task_active_pid_ns(current);
284 
285 	lockdep_assert_held(&cgrp->pidlist_mutex);
286 
287 	list_for_each_entry(l, &cgrp->pidlists, links)
288 		if (l->key.type == type && l->key.ns == ns)
289 			return l;
290 	return NULL;
291 }
292 
293 /*
294  * find the appropriate pidlist for our purpose (given procs vs tasks)
295  * returns with the lock on that pidlist already held, and takes care
296  * of the use count, or returns NULL with no locks held if we're out of
297  * memory.
298  */
cgroup_pidlist_find_create(struct cgroup * cgrp,enum cgroup_filetype type)299 static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp,
300 						enum cgroup_filetype type)
301 {
302 	struct cgroup_pidlist *l;
303 
304 	lockdep_assert_held(&cgrp->pidlist_mutex);
305 
306 	l = cgroup_pidlist_find(cgrp, type);
307 	if (l)
308 		return l;
309 
310 	/* entry not found; create a new one */
311 	l = kzalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL);
312 	if (!l)
313 		return l;
314 
315 	INIT_DELAYED_WORK(&l->destroy_dwork, cgroup_pidlist_destroy_work_fn);
316 	l->key.type = type;
317 	/* don't need task_nsproxy() if we're looking at ourself */
318 	l->key.ns = get_pid_ns(task_active_pid_ns(current));
319 	l->owner = cgrp;
320 	list_add(&l->links, &cgrp->pidlists);
321 	return l;
322 }
323 
324 /*
325  * Load a cgroup's pidarray with either procs' tgids or tasks' pids
326  */
pidlist_array_load(struct cgroup * cgrp,enum cgroup_filetype type,struct cgroup_pidlist ** lp)327 static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
328 			      struct cgroup_pidlist **lp)
329 {
330 	pid_t *array;
331 	int length;
332 	int pid, n = 0; /* used for populating the array */
333 	struct css_task_iter it;
334 	struct task_struct *tsk;
335 	struct cgroup_pidlist *l;
336 
337 	lockdep_assert_held(&cgrp->pidlist_mutex);
338 
339 	/*
340 	 * If cgroup gets more users after we read count, we won't have
341 	 * enough space - tough.  This race is indistinguishable to the
342 	 * caller from the case that the additional cgroup users didn't
343 	 * show up until sometime later on.
344 	 */
345 	length = cgroup_task_count(cgrp);
346 	array = kvmalloc_array(length, sizeof(pid_t), GFP_KERNEL);
347 	if (!array)
348 		return -ENOMEM;
349 	/* now, populate the array */
350 	css_task_iter_start(&cgrp->self, 0, &it);
351 	while ((tsk = css_task_iter_next(&it))) {
352 		if (unlikely(n == length))
353 			break;
354 		/* get tgid or pid for procs or tasks file respectively */
355 		if (type == CGROUP_FILE_PROCS)
356 			pid = task_tgid_vnr(tsk);
357 		else
358 			pid = task_pid_vnr(tsk);
359 		if (pid > 0) /* make sure to only use valid results */
360 			array[n++] = pid;
361 	}
362 	css_task_iter_end(&it);
363 	length = n;
364 	/* now sort & strip out duplicates (tgids or recycled thread PIDs) */
365 	sort(array, length, sizeof(pid_t), cmppid, NULL);
366 	length = pidlist_uniq(array, length);
367 
368 	l = cgroup_pidlist_find_create(cgrp, type);
369 	if (!l) {
370 		kvfree(array);
371 		return -ENOMEM;
372 	}
373 
374 	/* store array, freeing old if necessary */
375 	kvfree(l->list);
376 	l->list = array;
377 	l->length = length;
378 	*lp = l;
379 	return 0;
380 }
381 
382 /*
383  * seq_file methods for the tasks/procs files. The seq_file position is the
384  * next pid to display; the seq_file iterator is a pointer to the pid
385  * in the cgroup->l->list array.
386  */
387 
cgroup_pidlist_start(struct seq_file * s,loff_t * pos)388 static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
389 {
390 	/*
391 	 * Initially we receive a position value that corresponds to
392 	 * one more than the last pid shown (or 0 on the first call or
393 	 * after a seek to the start). Use a binary-search to find the
394 	 * next pid to display, if any
395 	 */
396 	struct kernfs_open_file *of = s->private;
397 	struct cgroup_file_ctx *ctx = of->priv;
398 	struct cgroup *cgrp = seq_css(s)->cgroup;
399 	struct cgroup_pidlist *l;
400 	enum cgroup_filetype type = seq_cft(s)->private;
401 	int index = 0, pid = *pos;
402 	int *iter, ret;
403 
404 	mutex_lock(&cgrp->pidlist_mutex);
405 
406 	/*
407 	 * !NULL @ctx->procs1.pidlist indicates that this isn't the first
408 	 * start() after open. If the matching pidlist is around, we can use
409 	 * that. Look for it. Note that @ctx->procs1.pidlist can't be used
410 	 * directly. It could already have been destroyed.
411 	 */
412 	if (ctx->procs1.pidlist)
413 		ctx->procs1.pidlist = cgroup_pidlist_find(cgrp, type);
414 
415 	/*
416 	 * Either this is the first start() after open or the matching
417 	 * pidlist has been destroyed inbetween.  Create a new one.
418 	 */
419 	if (!ctx->procs1.pidlist) {
420 		ret = pidlist_array_load(cgrp, type, &ctx->procs1.pidlist);
421 		if (ret)
422 			return ERR_PTR(ret);
423 	}
424 	l = ctx->procs1.pidlist;
425 
426 	if (pid) {
427 		int end = l->length;
428 
429 		while (index < end) {
430 			int mid = (index + end) / 2;
431 			if (l->list[mid] == pid) {
432 				index = mid;
433 				break;
434 			} else if (l->list[mid] <= pid)
435 				index = mid + 1;
436 			else
437 				end = mid;
438 		}
439 	}
440 	/* If we're off the end of the array, we're done */
441 	if (index >= l->length)
442 		return NULL;
443 	/* Update the abstract position to be the actual pid that we found */
444 	iter = l->list + index;
445 	*pos = *iter;
446 	return iter;
447 }
448 
cgroup_pidlist_stop(struct seq_file * s,void * v)449 static void cgroup_pidlist_stop(struct seq_file *s, void *v)
450 {
451 	struct kernfs_open_file *of = s->private;
452 	struct cgroup_file_ctx *ctx = of->priv;
453 	struct cgroup_pidlist *l = ctx->procs1.pidlist;
454 
455 	if (l)
456 		mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork,
457 				 CGROUP_PIDLIST_DESTROY_DELAY);
458 	mutex_unlock(&seq_css(s)->cgroup->pidlist_mutex);
459 }
460 
cgroup_pidlist_next(struct seq_file * s,void * v,loff_t * pos)461 static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
462 {
463 	struct kernfs_open_file *of = s->private;
464 	struct cgroup_file_ctx *ctx = of->priv;
465 	struct cgroup_pidlist *l = ctx->procs1.pidlist;
466 	pid_t *p = v;
467 	pid_t *end = l->list + l->length;
468 	/*
469 	 * Advance to the next pid in the array. If this goes off the
470 	 * end, we're done
471 	 */
472 	p++;
473 	if (p >= end) {
474 		(*pos)++;
475 		return NULL;
476 	} else {
477 		*pos = *p;
478 		return p;
479 	}
480 }
481 
cgroup_pidlist_show(struct seq_file * s,void * v)482 static int cgroup_pidlist_show(struct seq_file *s, void *v)
483 {
484 	seq_printf(s, "%d\n", *(int *)v);
485 
486 	return 0;
487 }
488 
__cgroup1_procs_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off,bool threadgroup)489 static ssize_t __cgroup1_procs_write(struct kernfs_open_file *of,
490 				     char *buf, size_t nbytes, loff_t off,
491 				     bool threadgroup)
492 {
493 	struct cgroup *cgrp;
494 	struct task_struct *task;
495 	const struct cred *cred, *tcred;
496 	ssize_t ret;
497 	bool locked;
498 
499 	cgrp = cgroup_kn_lock_live(of->kn, false);
500 	if (!cgrp)
501 		return -ENODEV;
502 
503 	task = cgroup_procs_write_start(buf, threadgroup, &locked);
504 	ret = PTR_ERR_OR_ZERO(task);
505 	if (ret)
506 		goto out_unlock;
507 
508 	/*
509 	 * Even if we're attaching all tasks in the thread group, we only need
510 	 * to check permissions on one of them. Check permissions using the
511 	 * credentials from file open to protect against inherited fd attacks.
512 	 */
513 	cred = of->file->f_cred;
514 	tcred = get_task_cred(task);
515 #ifdef CONFIG_HYPERHOLD
516 	if (!uid_eq(cred->euid, GLOBAL_MEMMGR_UID) &&
517 	    !uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
518 #else
519 	if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
520 #endif
521 	    !uid_eq(cred->euid, tcred->uid) &&
522 	    !uid_eq(cred->euid, tcred->suid))
523 		ret = -EACCES;
524 	put_cred(tcred);
525 	if (ret)
526 		goto out_finish;
527 
528 	ret = cgroup_attach_task(cgrp, task, threadgroup);
529 
530 out_finish:
531 	cgroup_procs_write_finish(task, locked);
532 out_unlock:
533 	cgroup_kn_unlock(of->kn);
534 
535 	return ret ?: nbytes;
536 }
537 
cgroup1_procs_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)538 static ssize_t cgroup1_procs_write(struct kernfs_open_file *of,
539 				   char *buf, size_t nbytes, loff_t off)
540 {
541 	return __cgroup1_procs_write(of, buf, nbytes, off, true);
542 }
543 
cgroup1_tasks_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)544 static ssize_t cgroup1_tasks_write(struct kernfs_open_file *of,
545 				   char *buf, size_t nbytes, loff_t off)
546 {
547 	return __cgroup1_procs_write(of, buf, nbytes, off, false);
548 }
549 
cgroup_release_agent_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)550 static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
551 					  char *buf, size_t nbytes, loff_t off)
552 {
553 	struct cgroup *cgrp;
554 	struct cgroup_file_ctx *ctx;
555 
556 	BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
557 
558 	/*
559 	 * Release agent gets called with all capabilities,
560 	 * require capabilities to set release agent.
561 	 */
562 	ctx = of->priv;
563 	if ((ctx->ns->user_ns != &init_user_ns) ||
564 	    !file_ns_capable(of->file, &init_user_ns, CAP_SYS_ADMIN))
565 		return -EPERM;
566 
567 	cgrp = cgroup_kn_lock_live(of->kn, false);
568 	if (!cgrp)
569 		return -ENODEV;
570 	spin_lock(&release_agent_path_lock);
571 	strlcpy(cgrp->root->release_agent_path, strstrip(buf),
572 		sizeof(cgrp->root->release_agent_path));
573 	spin_unlock(&release_agent_path_lock);
574 	cgroup_kn_unlock(of->kn);
575 	return nbytes;
576 }
577 
cgroup_release_agent_show(struct seq_file * seq,void * v)578 static int cgroup_release_agent_show(struct seq_file *seq, void *v)
579 {
580 	struct cgroup *cgrp = seq_css(seq)->cgroup;
581 
582 	spin_lock(&release_agent_path_lock);
583 	seq_puts(seq, cgrp->root->release_agent_path);
584 	spin_unlock(&release_agent_path_lock);
585 	seq_putc(seq, '\n');
586 	return 0;
587 }
588 
cgroup_sane_behavior_show(struct seq_file * seq,void * v)589 static int cgroup_sane_behavior_show(struct seq_file *seq, void *v)
590 {
591 	seq_puts(seq, "0\n");
592 	return 0;
593 }
594 
cgroup_read_notify_on_release(struct cgroup_subsys_state * css,struct cftype * cft)595 static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css,
596 					 struct cftype *cft)
597 {
598 	return notify_on_release(css->cgroup);
599 }
600 
cgroup_write_notify_on_release(struct cgroup_subsys_state * css,struct cftype * cft,u64 val)601 static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css,
602 					  struct cftype *cft, u64 val)
603 {
604 	if (val)
605 		set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
606 	else
607 		clear_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
608 	return 0;
609 }
610 
cgroup_clone_children_read(struct cgroup_subsys_state * css,struct cftype * cft)611 static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css,
612 				      struct cftype *cft)
613 {
614 	return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
615 }
616 
cgroup_clone_children_write(struct cgroup_subsys_state * css,struct cftype * cft,u64 val)617 static int cgroup_clone_children_write(struct cgroup_subsys_state *css,
618 				       struct cftype *cft, u64 val)
619 {
620 	if (val)
621 		set_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
622 	else
623 		clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
624 	return 0;
625 }
626 
627 /* cgroup core interface files for the legacy hierarchies */
628 struct cftype cgroup1_base_files[] = {
629 	{
630 		.name = "cgroup.procs",
631 		.seq_start = cgroup_pidlist_start,
632 		.seq_next = cgroup_pidlist_next,
633 		.seq_stop = cgroup_pidlist_stop,
634 		.seq_show = cgroup_pidlist_show,
635 		.private = CGROUP_FILE_PROCS,
636 		.write = cgroup1_procs_write,
637 	},
638 	{
639 		.name = "cgroup.clone_children",
640 		.read_u64 = cgroup_clone_children_read,
641 		.write_u64 = cgroup_clone_children_write,
642 	},
643 	{
644 		.name = "cgroup.sane_behavior",
645 		.flags = CFTYPE_ONLY_ON_ROOT,
646 		.seq_show = cgroup_sane_behavior_show,
647 	},
648 	{
649 		.name = "tasks",
650 		.seq_start = cgroup_pidlist_start,
651 		.seq_next = cgroup_pidlist_next,
652 		.seq_stop = cgroup_pidlist_stop,
653 		.seq_show = cgroup_pidlist_show,
654 		.private = CGROUP_FILE_TASKS,
655 		.write = cgroup1_tasks_write,
656 	},
657 	{
658 		.name = "notify_on_release",
659 		.read_u64 = cgroup_read_notify_on_release,
660 		.write_u64 = cgroup_write_notify_on_release,
661 	},
662 	{
663 		.name = "release_agent",
664 		.flags = CFTYPE_ONLY_ON_ROOT,
665 		.seq_show = cgroup_release_agent_show,
666 		.write = cgroup_release_agent_write,
667 		.max_write_len = PATH_MAX - 1,
668 	},
669 	{ }	/* terminate */
670 };
671 
672 /* Display information about each subsystem and each hierarchy */
proc_cgroupstats_show(struct seq_file * m,void * v)673 int proc_cgroupstats_show(struct seq_file *m, void *v)
674 {
675 	struct cgroup_subsys *ss;
676 	int i;
677 	bool dead;
678 
679 	seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
680 	/*
681 	 * ideally we don't want subsystems moving around while we do this.
682 	 * cgroup_mutex is also necessary to guarantee an atomic snapshot of
683 	 * subsys/hierarchy state.
684 	 */
685 	mutex_lock(&cgroup_mutex);
686 
687 	for_each_subsys(ss, i)
688 	for_each_subsys(ss, i) {
689 		dead = percpu_ref_is_dying(&ss->root->cgrp.self.refcnt);
690 		seq_printf(m, "%s\t%d\t%d\t%d\n",
691 			   ss->legacy_name, dead ? 0 : ss->root->hierarchy_id,
692 			   dead ? 0 : atomic_read(&ss->root->nr_cgrps),
693 			   cgroup_ssid_enabled(i));
694 	}
695 
696 	mutex_unlock(&cgroup_mutex);
697 	return 0;
698 }
699 
700 /**
701  * cgroupstats_build - build and fill cgroupstats
702  * @stats: cgroupstats to fill information into
703  * @dentry: A dentry entry belonging to the cgroup for which stats have
704  * been requested.
705  *
706  * Build and fill cgroupstats so that taskstats can export it to user
707  * space.
708  */
cgroupstats_build(struct cgroupstats * stats,struct dentry * dentry)709 int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
710 {
711 	struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
712 	struct cgroup *cgrp;
713 	struct css_task_iter it;
714 	struct task_struct *tsk;
715 
716 	/* it should be kernfs_node belonging to cgroupfs and is a directory */
717 	if (dentry->d_sb->s_type != &cgroup_fs_type || !kn ||
718 	    kernfs_type(kn) != KERNFS_DIR)
719 		return -EINVAL;
720 
721 	mutex_lock(&cgroup_mutex);
722 
723 	/*
724 	 * We aren't being called from kernfs and there's no guarantee on
725 	 * @kn->priv's validity.  For this and css_tryget_online_from_dir(),
726 	 * @kn->priv is RCU safe.  Let's do the RCU dancing.
727 	 */
728 	rcu_read_lock();
729 	cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv);
730 	if (!cgrp || cgroup_is_dead(cgrp)) {
731 		rcu_read_unlock();
732 		mutex_unlock(&cgroup_mutex);
733 		return -ENOENT;
734 	}
735 	rcu_read_unlock();
736 
737 	css_task_iter_start(&cgrp->self, 0, &it);
738 	while ((tsk = css_task_iter_next(&it))) {
739 		switch (tsk->state) {
740 		case TASK_RUNNING:
741 			stats->nr_running++;
742 			break;
743 		case TASK_INTERRUPTIBLE:
744 			stats->nr_sleeping++;
745 			break;
746 		case TASK_UNINTERRUPTIBLE:
747 			stats->nr_uninterruptible++;
748 			break;
749 		case TASK_STOPPED:
750 			stats->nr_stopped++;
751 			break;
752 		default:
753 			if (delayacct_is_task_waiting_on_io(tsk))
754 				stats->nr_io_wait++;
755 			break;
756 		}
757 	}
758 	css_task_iter_end(&it);
759 
760 	mutex_unlock(&cgroup_mutex);
761 	return 0;
762 }
763 
cgroup1_check_for_release(struct cgroup * cgrp)764 void cgroup1_check_for_release(struct cgroup *cgrp)
765 {
766 	if (notify_on_release(cgrp) && !cgroup_is_populated(cgrp) &&
767 	    !css_has_online_children(&cgrp->self) && !cgroup_is_dead(cgrp))
768 		schedule_work(&cgrp->release_agent_work);
769 }
770 
771 /*
772  * Notify userspace when a cgroup is released, by running the
773  * configured release agent with the name of the cgroup (path
774  * relative to the root of cgroup file system) as the argument.
775  *
776  * Most likely, this user command will try to rmdir this cgroup.
777  *
778  * This races with the possibility that some other task will be
779  * attached to this cgroup before it is removed, or that some other
780  * user task will 'mkdir' a child cgroup of this cgroup.  That's ok.
781  * The presumed 'rmdir' will fail quietly if this cgroup is no longer
782  * unused, and this cgroup will be reprieved from its death sentence,
783  * to continue to serve a useful existence.  Next time it's released,
784  * we will get notified again, if it still has 'notify_on_release' set.
785  *
786  * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
787  * means only wait until the task is successfully execve()'d.  The
788  * separate release agent task is forked by call_usermodehelper(),
789  * then control in this thread returns here, without waiting for the
790  * release agent task.  We don't bother to wait because the caller of
791  * this routine has no use for the exit status of the release agent
792  * task, so no sense holding our caller up for that.
793  */
cgroup1_release_agent(struct work_struct * work)794 void cgroup1_release_agent(struct work_struct *work)
795 {
796 	struct cgroup *cgrp =
797 		container_of(work, struct cgroup, release_agent_work);
798 	char *pathbuf, *agentbuf;
799 	char *argv[3], *envp[3];
800 	int ret;
801 
802 	/* snoop agent path and exit early if empty */
803 	if (!cgrp->root->release_agent_path[0])
804 		return;
805 
806 	/* prepare argument buffers */
807 	pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
808 	agentbuf = kmalloc(PATH_MAX, GFP_KERNEL);
809 	if (!pathbuf || !agentbuf)
810 		goto out_free;
811 
812 	spin_lock(&release_agent_path_lock);
813 	strlcpy(agentbuf, cgrp->root->release_agent_path, PATH_MAX);
814 	spin_unlock(&release_agent_path_lock);
815 	if (!agentbuf[0])
816 		goto out_free;
817 
818 	ret = cgroup_path_ns(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns);
819 	if (ret < 0 || ret >= PATH_MAX)
820 		goto out_free;
821 
822 	argv[0] = agentbuf;
823 	argv[1] = pathbuf;
824 	argv[2] = NULL;
825 
826 	/* minimal command environment */
827 	envp[0] = "HOME=/";
828 	envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
829 	envp[2] = NULL;
830 
831 	call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
832 out_free:
833 	kfree(agentbuf);
834 	kfree(pathbuf);
835 }
836 
837 /*
838  * cgroup_rename - Only allow simple rename of directories in place.
839  */
cgroup1_rename(struct kernfs_node * kn,struct kernfs_node * new_parent,const char * new_name_str)840 static int cgroup1_rename(struct kernfs_node *kn, struct kernfs_node *new_parent,
841 			  const char *new_name_str)
842 {
843 	struct cgroup *cgrp = kn->priv;
844 	int ret;
845 
846 	/* do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable */
847 	if (strchr(new_name_str, '\n'))
848 		return -EINVAL;
849 
850 	if (kernfs_type(kn) != KERNFS_DIR)
851 		return -ENOTDIR;
852 	if (kn->parent != new_parent)
853 		return -EIO;
854 
855 	/*
856 	 * We're gonna grab cgroup_mutex which nests outside kernfs
857 	 * active_ref.  kernfs_rename() doesn't require active_ref
858 	 * protection.  Break them before grabbing cgroup_mutex.
859 	 */
860 	kernfs_break_active_protection(new_parent);
861 	kernfs_break_active_protection(kn);
862 
863 	mutex_lock(&cgroup_mutex);
864 
865 	ret = kernfs_rename(kn, new_parent, new_name_str);
866 	if (!ret)
867 		TRACE_CGROUP_PATH(rename, cgrp);
868 
869 	mutex_unlock(&cgroup_mutex);
870 
871 	kernfs_unbreak_active_protection(kn);
872 	kernfs_unbreak_active_protection(new_parent);
873 	return ret;
874 }
875 
cgroup1_show_options(struct seq_file * seq,struct kernfs_root * kf_root)876 static int cgroup1_show_options(struct seq_file *seq, struct kernfs_root *kf_root)
877 {
878 	struct cgroup_root *root = cgroup_root_from_kf(kf_root);
879 	struct cgroup_subsys *ss;
880 	int ssid;
881 
882 	for_each_subsys(ss, ssid)
883 		if (root->subsys_mask & (1 << ssid))
884 			seq_show_option(seq, ss->legacy_name, NULL);
885 	if (root->flags & CGRP_ROOT_NOPREFIX)
886 		seq_puts(seq, ",noprefix");
887 	if (root->flags & CGRP_ROOT_XATTR)
888 		seq_puts(seq, ",xattr");
889 	if (root->flags & CGRP_ROOT_CPUSET_V2_MODE)
890 		seq_puts(seq, ",cpuset_v2_mode");
891 
892 	spin_lock(&release_agent_path_lock);
893 	if (strlen(root->release_agent_path))
894 		seq_show_option(seq, "release_agent",
895 				root->release_agent_path);
896 	spin_unlock(&release_agent_path_lock);
897 
898 	if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags))
899 		seq_puts(seq, ",clone_children");
900 	if (strlen(root->name))
901 		seq_show_option(seq, "name", root->name);
902 	return 0;
903 }
904 
905 enum cgroup1_param {
906 	Opt_all,
907 	Opt_clone_children,
908 	Opt_cpuset_v2_mode,
909 	Opt_name,
910 	Opt_none,
911 	Opt_noprefix,
912 	Opt_release_agent,
913 	Opt_xattr,
914 };
915 
916 const struct fs_parameter_spec cgroup1_fs_parameters[] = {
917 	fsparam_flag  ("all",		Opt_all),
918 	fsparam_flag  ("clone_children", Opt_clone_children),
919 	fsparam_flag  ("cpuset_v2_mode", Opt_cpuset_v2_mode),
920 	fsparam_string("name",		Opt_name),
921 	fsparam_flag  ("none",		Opt_none),
922 	fsparam_flag  ("noprefix",	Opt_noprefix),
923 	fsparam_string("release_agent",	Opt_release_agent),
924 	fsparam_flag  ("xattr",		Opt_xattr),
925 	{}
926 };
927 
cgroup1_parse_param(struct fs_context * fc,struct fs_parameter * param)928 int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param)
929 {
930 	struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
931 	struct cgroup_subsys *ss;
932 	struct fs_parse_result result;
933 	int opt, i;
934 
935 	opt = fs_parse(fc, cgroup1_fs_parameters, param, &result);
936 	if (opt == -ENOPARAM) {
937 		int ret;
938 
939 		ret = vfs_parse_fs_param_source(fc, param);
940 		if (ret != -ENOPARAM)
941 			return ret;
942 		for_each_subsys(ss, i) {
943 			if (strcmp(param->key, ss->legacy_name))
944 				continue;
945 			if (!cgroup_ssid_enabled(i) || cgroup1_ssid_disabled(i))
946 				return invalfc(fc, "Disabled controller '%s'",
947 					       param->key);
948 			ctx->subsys_mask |= (1 << i);
949 			return 0;
950 		}
951 		return invalfc(fc, "Unknown subsys name '%s'", param->key);
952 	}
953 	if (opt < 0)
954 		return opt;
955 
956 	switch (opt) {
957 	case Opt_none:
958 		/* Explicitly have no subsystems */
959 		ctx->none = true;
960 		break;
961 	case Opt_all:
962 		ctx->all_ss = true;
963 		break;
964 	case Opt_noprefix:
965 		ctx->flags |= CGRP_ROOT_NOPREFIX;
966 		break;
967 	case Opt_clone_children:
968 		ctx->cpuset_clone_children = true;
969 		break;
970 	case Opt_cpuset_v2_mode:
971 		ctx->flags |= CGRP_ROOT_CPUSET_V2_MODE;
972 		break;
973 	case Opt_xattr:
974 		ctx->flags |= CGRP_ROOT_XATTR;
975 		break;
976 	case Opt_release_agent:
977 		/* Specifying two release agents is forbidden */
978 		if (ctx->release_agent)
979 			return invalfc(fc, "release_agent respecified");
980 		/*
981 		 * Release agent gets called with all capabilities,
982 		 * require capabilities to set release agent.
983 		 */
984 		if ((fc->user_ns != &init_user_ns) || !capable(CAP_SYS_ADMIN))
985 			return invalfc(fc, "Setting release_agent not allowed");
986 		ctx->release_agent = param->string;
987 		param->string = NULL;
988 		break;
989 	case Opt_name:
990 		/* blocked by boot param? */
991 		if (cgroup_no_v1_named)
992 			return -ENOENT;
993 		/* Can't specify an empty name */
994 		if (!param->size)
995 			return invalfc(fc, "Empty name");
996 		if (param->size > MAX_CGROUP_ROOT_NAMELEN - 1)
997 			return invalfc(fc, "Name too long");
998 		/* Must match [\w.-]+ */
999 		for (i = 0; i < param->size; i++) {
1000 			char c = param->string[i];
1001 			if (isalnum(c))
1002 				continue;
1003 			if ((c == '.') || (c == '-') || (c == '_'))
1004 				continue;
1005 			return invalfc(fc, "Invalid name");
1006 		}
1007 		/* Specifying two names is forbidden */
1008 		if (ctx->name)
1009 			return invalfc(fc, "name respecified");
1010 		ctx->name = param->string;
1011 		param->string = NULL;
1012 		break;
1013 	}
1014 	return 0;
1015 }
1016 
check_cgroupfs_options(struct fs_context * fc)1017 static int check_cgroupfs_options(struct fs_context *fc)
1018 {
1019 	struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1020 	u16 mask = U16_MAX;
1021 	u16 enabled = 0;
1022 	struct cgroup_subsys *ss;
1023 	int i;
1024 
1025 #ifdef CONFIG_CPUSETS
1026 	mask = ~((u16)1 << cpuset_cgrp_id);
1027 #endif
1028 	for_each_subsys(ss, i)
1029 		if (cgroup_ssid_enabled(i) && !cgroup1_ssid_disabled(i))
1030 			enabled |= 1 << i;
1031 
1032 	ctx->subsys_mask &= enabled;
1033 
1034 	/*
1035 	 * In absense of 'none', 'name=' or subsystem name options,
1036 	 * let's default to 'all'.
1037 	 */
1038 	if (!ctx->subsys_mask && !ctx->none && !ctx->name)
1039 		ctx->all_ss = true;
1040 
1041 	if (ctx->all_ss) {
1042 		/* Mutually exclusive option 'all' + subsystem name */
1043 		if (ctx->subsys_mask)
1044 			return invalfc(fc, "subsys name conflicts with all");
1045 		/* 'all' => select all the subsystems */
1046 		ctx->subsys_mask = enabled;
1047 	}
1048 
1049 	/*
1050 	 * We either have to specify by name or by subsystems. (So all
1051 	 * empty hierarchies must have a name).
1052 	 */
1053 	if (!ctx->subsys_mask && !ctx->name)
1054 		return invalfc(fc, "Need name or subsystem set");
1055 
1056 	/*
1057 	 * Option noprefix was introduced just for backward compatibility
1058 	 * with the old cpuset, so we allow noprefix only if mounting just
1059 	 * the cpuset subsystem.
1060 	 */
1061 	if ((ctx->flags & CGRP_ROOT_NOPREFIX) && (ctx->subsys_mask & mask))
1062 		return invalfc(fc, "noprefix used incorrectly");
1063 
1064 	/* Can't specify "none" and some subsystems */
1065 	if (ctx->subsys_mask && ctx->none)
1066 		return invalfc(fc, "none used incorrectly");
1067 
1068 	return 0;
1069 }
1070 
cgroup1_reconfigure(struct fs_context * fc)1071 int cgroup1_reconfigure(struct fs_context *fc)
1072 {
1073 	struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1074 	struct kernfs_root *kf_root = kernfs_root_from_sb(fc->root->d_sb);
1075 	struct cgroup_root *root = cgroup_root_from_kf(kf_root);
1076 	int ret = 0;
1077 	u16 added_mask, removed_mask;
1078 
1079 	cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1080 
1081 	/* See what subsystems are wanted */
1082 	ret = check_cgroupfs_options(fc);
1083 	if (ret)
1084 		goto out_unlock;
1085 
1086 	if (ctx->subsys_mask != root->subsys_mask || ctx->release_agent)
1087 		pr_warn("option changes via remount are deprecated (pid=%d comm=%s)\n",
1088 			task_tgid_nr(current), current->comm);
1089 
1090 	added_mask = ctx->subsys_mask & ~root->subsys_mask;
1091 	removed_mask = root->subsys_mask & ~ctx->subsys_mask;
1092 
1093 	/* Don't allow flags or name to change at remount */
1094 	if ((ctx->flags ^ root->flags) ||
1095 	    (ctx->name && strcmp(ctx->name, root->name))) {
1096 		errorfc(fc, "option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"",
1097 		       ctx->flags, ctx->name ?: "", root->flags, root->name);
1098 		ret = -EINVAL;
1099 		goto out_unlock;
1100 	}
1101 
1102 	/* remounting is not allowed for populated hierarchies */
1103 	if (!list_empty(&root->cgrp.self.children)) {
1104 		ret = -EBUSY;
1105 		goto out_unlock;
1106 	}
1107 
1108 	ret = rebind_subsystems(root, added_mask);
1109 	if (ret)
1110 		goto out_unlock;
1111 
1112 	WARN_ON(rebind_subsystems(&cgrp_dfl_root, removed_mask));
1113 
1114 	if (ctx->release_agent) {
1115 		spin_lock(&release_agent_path_lock);
1116 		strcpy(root->release_agent_path, ctx->release_agent);
1117 		spin_unlock(&release_agent_path_lock);
1118 	}
1119 
1120 	trace_cgroup_remount(root);
1121 
1122  out_unlock:
1123 	mutex_unlock(&cgroup_mutex);
1124 	return ret;
1125 }
1126 
1127 struct kernfs_syscall_ops cgroup1_kf_syscall_ops = {
1128 	.rename			= cgroup1_rename,
1129 	.show_options		= cgroup1_show_options,
1130 	.mkdir			= cgroup_mkdir,
1131 	.rmdir			= cgroup_rmdir,
1132 	.show_path		= cgroup_show_path,
1133 };
1134 
1135 /*
1136  * The guts of cgroup1 mount - find or create cgroup_root to use.
1137  * Called with cgroup_mutex held; returns 0 on success, -E... on
1138  * error and positive - in case when the candidate is busy dying.
1139  * On success it stashes a reference to cgroup_root into given
1140  * cgroup_fs_context; that reference is *NOT* counting towards the
1141  * cgroup_root refcount.
1142  */
cgroup1_root_to_use(struct fs_context * fc)1143 static int cgroup1_root_to_use(struct fs_context *fc)
1144 {
1145 	struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1146 	struct cgroup_root *root;
1147 	struct cgroup_subsys *ss;
1148 	int i, ret;
1149 
1150 	/* First find the desired set of subsystems */
1151 	ret = check_cgroupfs_options(fc);
1152 	if (ret)
1153 		return ret;
1154 
1155 	/*
1156 	 * Destruction of cgroup root is asynchronous, so subsystems may
1157 	 * still be dying after the previous unmount.  Let's drain the
1158 	 * dying subsystems.  We just need to ensure that the ones
1159 	 * unmounted previously finish dying and don't care about new ones
1160 	 * starting.  Testing ref liveliness is good enough.
1161 	 */
1162 	for_each_subsys(ss, i) {
1163 		if (!(ctx->subsys_mask & (1 << i)) ||
1164 		    ss->root == &cgrp_dfl_root)
1165 			continue;
1166 
1167 		if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt))
1168 			return 1;	/* restart */
1169 		cgroup_put(&ss->root->cgrp);
1170 	}
1171 
1172 	for_each_root(root) {
1173 		bool name_match = false;
1174 
1175 		if (root == &cgrp_dfl_root)
1176 			continue;
1177 
1178 		/*
1179 		 * If we asked for a name then it must match.  Also, if
1180 		 * name matches but sybsys_mask doesn't, we should fail.
1181 		 * Remember whether name matched.
1182 		 */
1183 		if (ctx->name) {
1184 			if (strcmp(ctx->name, root->name))
1185 				continue;
1186 			name_match = true;
1187 		}
1188 
1189 		/*
1190 		 * If we asked for subsystems (or explicitly for no
1191 		 * subsystems) then they must match.
1192 		 */
1193 		if ((ctx->subsys_mask || ctx->none) &&
1194 		    (ctx->subsys_mask != root->subsys_mask)) {
1195 			if (!name_match)
1196 				continue;
1197 			return -EBUSY;
1198 		}
1199 
1200 		if (root->flags ^ ctx->flags)
1201 			pr_warn("new mount options do not match the existing superblock, will be ignored\n");
1202 
1203 		ctx->root = root;
1204 		return 0;
1205 	}
1206 
1207 	/*
1208 	 * No such thing, create a new one.  name= matching without subsys
1209 	 * specification is allowed for already existing hierarchies but we
1210 	 * can't create new one without subsys specification.
1211 	 */
1212 	if (!ctx->subsys_mask && !ctx->none)
1213 		return invalfc(fc, "No subsys list or none specified");
1214 
1215 	/* Hierarchies may only be created in the initial cgroup namespace. */
1216 	if (ctx->ns != &init_cgroup_ns)
1217 		return -EPERM;
1218 
1219 	root = kzalloc(sizeof(*root), GFP_KERNEL);
1220 	if (!root)
1221 		return -ENOMEM;
1222 
1223 	ctx->root = root;
1224 	init_cgroup_root(ctx);
1225 
1226 	ret = cgroup_setup_root(root, ctx->subsys_mask);
1227 	if (ret)
1228 		cgroup_free_root(root);
1229 	return ret;
1230 }
1231 
cgroup1_get_tree(struct fs_context * fc)1232 int cgroup1_get_tree(struct fs_context *fc)
1233 {
1234 	struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1235 	int ret;
1236 
1237 	/* Check if the caller has permission to mount. */
1238 	if (!ns_capable(ctx->ns->user_ns, CAP_SYS_ADMIN))
1239 		return -EPERM;
1240 
1241 	cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1242 
1243 	ret = cgroup1_root_to_use(fc);
1244 	if (!ret && !percpu_ref_tryget_live(&ctx->root->cgrp.self.refcnt))
1245 		ret = 1;	/* restart */
1246 
1247 	mutex_unlock(&cgroup_mutex);
1248 
1249 	if (!ret)
1250 		ret = cgroup_do_get_tree(fc);
1251 
1252 	if (!ret && percpu_ref_is_dying(&ctx->root->cgrp.self.refcnt)) {
1253 		fc_drop_locked(fc);
1254 		ret = 1;
1255 	}
1256 
1257 	if (unlikely(ret > 0)) {
1258 		msleep(10);
1259 		return restart_syscall();
1260 	}
1261 	return ret;
1262 }
1263 
cgroup1_wq_init(void)1264 static int __init cgroup1_wq_init(void)
1265 {
1266 	/*
1267 	 * Used to destroy pidlists and separate to serve as flush domain.
1268 	 * Cap @max_active to 1 too.
1269 	 */
1270 	cgroup_pidlist_destroy_wq = alloc_workqueue("cgroup_pidlist_destroy",
1271 						    0, 1);
1272 	BUG_ON(!cgroup_pidlist_destroy_wq);
1273 	return 0;
1274 }
1275 core_initcall(cgroup1_wq_init);
1276 
cgroup_no_v1(char * str)1277 static int __init cgroup_no_v1(char *str)
1278 {
1279 	struct cgroup_subsys *ss;
1280 	char *token;
1281 	int i;
1282 
1283 	while ((token = strsep(&str, ",")) != NULL) {
1284 		if (!*token)
1285 			continue;
1286 
1287 		if (!strcmp(token, "all")) {
1288 			cgroup_no_v1_mask = U16_MAX;
1289 			continue;
1290 		}
1291 
1292 		if (!strcmp(token, "named")) {
1293 			cgroup_no_v1_named = true;
1294 			continue;
1295 		}
1296 
1297 		for_each_subsys(ss, i) {
1298 			if (strcmp(token, ss->name) &&
1299 			    strcmp(token, ss->legacy_name))
1300 				continue;
1301 
1302 			cgroup_no_v1_mask |= 1 << i;
1303 		}
1304 	}
1305 	return 1;
1306 }
1307 __setup("cgroup_no_v1=", cgroup_no_v1);
1308