• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include "cgroup-internal.h"
3 
4 #include <linux/ctype.h>
5 #include <linux/kmod.h>
6 #include <linux/sort.h>
7 #include <linux/delay.h>
8 #include <linux/mm.h>
9 #include <linux/sched/signal.h>
10 #include <linux/sched/task.h>
11 #include <linux/magic.h>
12 #include <linux/slab.h>
13 #include <linux/vmalloc.h>
14 #include <linux/delayacct.h>
15 #include <linux/pid_namespace.h>
16 #include <linux/cgroupstats.h>
17 #include <linux/fs_parser.h>
18 
19 #include <trace/events/cgroup.h>
20 #include <trace/hooks/cgroup.h>
21 
22 /*
23  * pidlists linger the following amount before being destroyed.  The goal
24  * is avoiding frequent destruction in the middle of consecutive read calls
25  * Expiring in the middle is a performance problem not a correctness one.
26  * 1 sec should be enough.
27  */
28 #define CGROUP_PIDLIST_DESTROY_DELAY	HZ
29 
30 /* Controllers blocked by the commandline in v1 */
31 static u16 cgroup_no_v1_mask;
32 
33 /* disable named v1 mounts */
34 static bool cgroup_no_v1_named;
35 
36 /*
37  * pidlist destructions need to be flushed on cgroup destruction.  Use a
38  * separate workqueue as flush domain.
39  */
40 static struct workqueue_struct *cgroup_pidlist_destroy_wq;
41 
42 /* protects cgroup_subsys->release_agent_path */
43 static DEFINE_SPINLOCK(release_agent_path_lock);
44 
cgroup1_ssid_disabled(int ssid)45 bool cgroup1_ssid_disabled(int ssid)
46 {
47 	return cgroup_no_v1_mask & (1 << ssid);
48 }
49 
50 /**
51  * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
52  * @from: attach to all cgroups of a given task
53  * @tsk: the task to be attached
54  */
cgroup_attach_task_all(struct task_struct * from,struct task_struct * tsk)55 int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
56 {
57 	struct cgroup_root *root;
58 	int retval = 0;
59 
60 	mutex_lock(&cgroup_mutex);
61 	cpus_read_lock();
62 	percpu_down_write(&cgroup_threadgroup_rwsem);
63 	for_each_root(root) {
64 		struct cgroup *from_cgrp;
65 
66 		if (root == &cgrp_dfl_root)
67 			continue;
68 
69 		spin_lock_irq(&css_set_lock);
70 		from_cgrp = task_cgroup_from_root(from, root);
71 		spin_unlock_irq(&css_set_lock);
72 
73 		retval = cgroup_attach_task(from_cgrp, tsk, false);
74 		if (retval)
75 			break;
76 	}
77 	percpu_up_write(&cgroup_threadgroup_rwsem);
78 	cpus_read_unlock();
79 	mutex_unlock(&cgroup_mutex);
80 
81 	return retval;
82 }
83 EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
84 
85 /**
86  * cgroup_trasnsfer_tasks - move tasks from one cgroup to another
87  * @to: cgroup to which the tasks will be moved
88  * @from: cgroup in which the tasks currently reside
89  *
90  * Locking rules between cgroup_post_fork() and the migration path
91  * guarantee that, if a task is forking while being migrated, the new child
92  * is guaranteed to be either visible in the source cgroup after the
93  * parent's migration is complete or put into the target cgroup.  No task
94  * can slip out of migration through forking.
95  */
cgroup_transfer_tasks(struct cgroup * to,struct cgroup * from)96 int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
97 {
98 	DEFINE_CGROUP_MGCTX(mgctx);
99 	struct cgrp_cset_link *link;
100 	struct css_task_iter it;
101 	struct task_struct *task;
102 	int ret;
103 
104 	if (cgroup_on_dfl(to))
105 		return -EINVAL;
106 
107 	ret = cgroup_migrate_vet_dst(to);
108 	if (ret)
109 		return ret;
110 
111 	mutex_lock(&cgroup_mutex);
112 
113 	percpu_down_write(&cgroup_threadgroup_rwsem);
114 
115 	/* all tasks in @from are being moved, all csets are source */
116 	spin_lock_irq(&css_set_lock);
117 	list_for_each_entry(link, &from->cset_links, cset_link)
118 		cgroup_migrate_add_src(link->cset, to, &mgctx);
119 	spin_unlock_irq(&css_set_lock);
120 
121 	ret = cgroup_migrate_prepare_dst(&mgctx);
122 	if (ret)
123 		goto out_err;
124 
125 	/*
126 	 * Migrate tasks one-by-one until @from is empty.  This fails iff
127 	 * ->can_attach() fails.
128 	 */
129 	do {
130 		css_task_iter_start(&from->self, 0, &it);
131 
132 		do {
133 			task = css_task_iter_next(&it);
134 		} while (task && (task->flags & PF_EXITING));
135 
136 		if (task)
137 			get_task_struct(task);
138 		css_task_iter_end(&it);
139 
140 		if (task) {
141 			ret = cgroup_migrate(task, false, &mgctx);
142 			if (!ret)
143 				TRACE_CGROUP_PATH(transfer_tasks, to, task, false);
144 			put_task_struct(task);
145 		}
146 	} while (task && !ret);
147 out_err:
148 	cgroup_migrate_finish(&mgctx);
149 	percpu_up_write(&cgroup_threadgroup_rwsem);
150 	mutex_unlock(&cgroup_mutex);
151 	return ret;
152 }
153 
154 /*
155  * Stuff for reading the 'tasks'/'procs' files.
156  *
157  * Reading this file can return large amounts of data if a cgroup has
158  * *lots* of attached tasks. So it may need several calls to read(),
159  * but we cannot guarantee that the information we produce is correct
160  * unless we produce it entirely atomically.
161  *
162  */
163 
164 /* which pidlist file are we talking about? */
165 enum cgroup_filetype {
166 	CGROUP_FILE_PROCS,
167 	CGROUP_FILE_TASKS,
168 };
169 
170 /*
171  * A pidlist is a list of pids that virtually represents the contents of one
172  * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
173  * a pair (one each for procs, tasks) for each pid namespace that's relevant
174  * to the cgroup.
175  */
176 struct cgroup_pidlist {
177 	/*
178 	 * used to find which pidlist is wanted. doesn't change as long as
179 	 * this particular list stays in the list.
180 	*/
181 	struct { enum cgroup_filetype type; struct pid_namespace *ns; } key;
182 	/* array of xids */
183 	pid_t *list;
184 	/* how many elements the above list has */
185 	int length;
186 	/* each of these stored in a list by its cgroup */
187 	struct list_head links;
188 	/* pointer to the cgroup we belong to, for list removal purposes */
189 	struct cgroup *owner;
190 	/* for delayed destruction */
191 	struct delayed_work destroy_dwork;
192 };
193 
194 /*
195  * Used to destroy all pidlists lingering waiting for destroy timer.  None
196  * should be left afterwards.
197  */
cgroup1_pidlist_destroy_all(struct cgroup * cgrp)198 void cgroup1_pidlist_destroy_all(struct cgroup *cgrp)
199 {
200 	struct cgroup_pidlist *l, *tmp_l;
201 
202 	mutex_lock(&cgrp->pidlist_mutex);
203 	list_for_each_entry_safe(l, tmp_l, &cgrp->pidlists, links)
204 		mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, 0);
205 	mutex_unlock(&cgrp->pidlist_mutex);
206 
207 	flush_workqueue(cgroup_pidlist_destroy_wq);
208 	BUG_ON(!list_empty(&cgrp->pidlists));
209 }
210 
cgroup_pidlist_destroy_work_fn(struct work_struct * work)211 static void cgroup_pidlist_destroy_work_fn(struct work_struct *work)
212 {
213 	struct delayed_work *dwork = to_delayed_work(work);
214 	struct cgroup_pidlist *l = container_of(dwork, struct cgroup_pidlist,
215 						destroy_dwork);
216 	struct cgroup_pidlist *tofree = NULL;
217 
218 	mutex_lock(&l->owner->pidlist_mutex);
219 
220 	/*
221 	 * Destroy iff we didn't get queued again.  The state won't change
222 	 * as destroy_dwork can only be queued while locked.
223 	 */
224 	if (!delayed_work_pending(dwork)) {
225 		list_del(&l->links);
226 		kvfree(l->list);
227 		put_pid_ns(l->key.ns);
228 		tofree = l;
229 	}
230 
231 	mutex_unlock(&l->owner->pidlist_mutex);
232 	kfree(tofree);
233 }
234 
235 /*
236  * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
237  * Returns the number of unique elements.
238  */
pidlist_uniq(pid_t * list,int length)239 static int pidlist_uniq(pid_t *list, int length)
240 {
241 	int src, dest = 1;
242 
243 	/*
244 	 * we presume the 0th element is unique, so i starts at 1. trivial
245 	 * edge cases first; no work needs to be done for either
246 	 */
247 	if (length == 0 || length == 1)
248 		return length;
249 	/* src and dest walk down the list; dest counts unique elements */
250 	for (src = 1; src < length; src++) {
251 		/* find next unique element */
252 		while (list[src] == list[src-1]) {
253 			src++;
254 			if (src == length)
255 				goto after;
256 		}
257 		/* dest always points to where the next unique element goes */
258 		list[dest] = list[src];
259 		dest++;
260 	}
261 after:
262 	return dest;
263 }
264 
265 /*
266  * The two pid files - task and cgroup.procs - guaranteed that the result
267  * is sorted, which forced this whole pidlist fiasco.  As pid order is
268  * different per namespace, each namespace needs differently sorted list,
269  * making it impossible to use, for example, single rbtree of member tasks
270  * sorted by task pointer.  As pidlists can be fairly large, allocating one
271  * per open file is dangerous, so cgroup had to implement shared pool of
272  * pidlists keyed by cgroup and namespace.
273  */
cmppid(const void * a,const void * b)274 static int cmppid(const void *a, const void *b)
275 {
276 	return *(pid_t *)a - *(pid_t *)b;
277 }
278 
cgroup_pidlist_find(struct cgroup * cgrp,enum cgroup_filetype type)279 static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
280 						  enum cgroup_filetype type)
281 {
282 	struct cgroup_pidlist *l;
283 	/* don't need task_nsproxy() if we're looking at ourself */
284 	struct pid_namespace *ns = task_active_pid_ns(current);
285 
286 	lockdep_assert_held(&cgrp->pidlist_mutex);
287 
288 	list_for_each_entry(l, &cgrp->pidlists, links)
289 		if (l->key.type == type && l->key.ns == ns)
290 			return l;
291 	return NULL;
292 }
293 
294 /*
295  * find the appropriate pidlist for our purpose (given procs vs tasks)
296  * returns with the lock on that pidlist already held, and takes care
297  * of the use count, or returns NULL with no locks held if we're out of
298  * memory.
299  */
cgroup_pidlist_find_create(struct cgroup * cgrp,enum cgroup_filetype type)300 static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp,
301 						enum cgroup_filetype type)
302 {
303 	struct cgroup_pidlist *l;
304 
305 	lockdep_assert_held(&cgrp->pidlist_mutex);
306 
307 	l = cgroup_pidlist_find(cgrp, type);
308 	if (l)
309 		return l;
310 
311 	/* entry not found; create a new one */
312 	l = kzalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL);
313 	if (!l)
314 		return l;
315 
316 	INIT_DELAYED_WORK(&l->destroy_dwork, cgroup_pidlist_destroy_work_fn);
317 	l->key.type = type;
318 	/* don't need task_nsproxy() if we're looking at ourself */
319 	l->key.ns = get_pid_ns(task_active_pid_ns(current));
320 	l->owner = cgrp;
321 	list_add(&l->links, &cgrp->pidlists);
322 	return l;
323 }
324 
325 /*
326  * Load a cgroup's pidarray with either procs' tgids or tasks' pids
327  */
pidlist_array_load(struct cgroup * cgrp,enum cgroup_filetype type,struct cgroup_pidlist ** lp)328 static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
329 			      struct cgroup_pidlist **lp)
330 {
331 	pid_t *array;
332 	int length;
333 	int pid, n = 0; /* used for populating the array */
334 	struct css_task_iter it;
335 	struct task_struct *tsk;
336 	struct cgroup_pidlist *l;
337 
338 	lockdep_assert_held(&cgrp->pidlist_mutex);
339 
340 	/*
341 	 * If cgroup gets more users after we read count, we won't have
342 	 * enough space - tough.  This race is indistinguishable to the
343 	 * caller from the case that the additional cgroup users didn't
344 	 * show up until sometime later on.
345 	 */
346 	length = cgroup_task_count(cgrp);
347 	array = kvmalloc_array(length, sizeof(pid_t), GFP_KERNEL);
348 	if (!array)
349 		return -ENOMEM;
350 	/* now, populate the array */
351 	css_task_iter_start(&cgrp->self, 0, &it);
352 	while ((tsk = css_task_iter_next(&it))) {
353 		if (unlikely(n == length))
354 			break;
355 		/* get tgid or pid for procs or tasks file respectively */
356 		if (type == CGROUP_FILE_PROCS)
357 			pid = task_tgid_vnr(tsk);
358 		else
359 			pid = task_pid_vnr(tsk);
360 		if (pid > 0) /* make sure to only use valid results */
361 			array[n++] = pid;
362 	}
363 	css_task_iter_end(&it);
364 	length = n;
365 	/* now sort & strip out duplicates (tgids or recycled thread PIDs) */
366 	sort(array, length, sizeof(pid_t), cmppid, NULL);
367 	length = pidlist_uniq(array, length);
368 
369 	l = cgroup_pidlist_find_create(cgrp, type);
370 	if (!l) {
371 		kvfree(array);
372 		return -ENOMEM;
373 	}
374 
375 	/* store array, freeing old if necessary */
376 	kvfree(l->list);
377 	l->list = array;
378 	l->length = length;
379 	*lp = l;
380 	return 0;
381 }
382 
383 /*
384  * seq_file methods for the tasks/procs files. The seq_file position is the
385  * next pid to display; the seq_file iterator is a pointer to the pid
386  * in the cgroup->l->list array.
387  */
388 
cgroup_pidlist_start(struct seq_file * s,loff_t * pos)389 static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
390 {
391 	/*
392 	 * Initially we receive a position value that corresponds to
393 	 * one more than the last pid shown (or 0 on the first call or
394 	 * after a seek to the start). Use a binary-search to find the
395 	 * next pid to display, if any
396 	 */
397 	struct kernfs_open_file *of = s->private;
398 	struct cgroup_file_ctx *ctx = of->priv;
399 	struct cgroup *cgrp = seq_css(s)->cgroup;
400 	struct cgroup_pidlist *l;
401 	enum cgroup_filetype type = seq_cft(s)->private;
402 	int index = 0, pid = *pos;
403 	int *iter, ret;
404 
405 	mutex_lock(&cgrp->pidlist_mutex);
406 
407 	/*
408 	 * !NULL @ctx->procs1.pidlist indicates that this isn't the first
409 	 * start() after open. If the matching pidlist is around, we can use
410 	 * that. Look for it. Note that @ctx->procs1.pidlist can't be used
411 	 * directly. It could already have been destroyed.
412 	 */
413 	if (ctx->procs1.pidlist)
414 		ctx->procs1.pidlist = cgroup_pidlist_find(cgrp, type);
415 
416 	/*
417 	 * Either this is the first start() after open or the matching
418 	 * pidlist has been destroyed inbetween.  Create a new one.
419 	 */
420 	if (!ctx->procs1.pidlist) {
421 		ret = pidlist_array_load(cgrp, type, &ctx->procs1.pidlist);
422 		if (ret)
423 			return ERR_PTR(ret);
424 	}
425 	l = ctx->procs1.pidlist;
426 
427 	if (pid) {
428 		int end = l->length;
429 
430 		while (index < end) {
431 			int mid = (index + end) / 2;
432 			if (l->list[mid] == pid) {
433 				index = mid;
434 				break;
435 			} else if (l->list[mid] <= pid)
436 				index = mid + 1;
437 			else
438 				end = mid;
439 		}
440 	}
441 	/* If we're off the end of the array, we're done */
442 	if (index >= l->length)
443 		return NULL;
444 	/* Update the abstract position to be the actual pid that we found */
445 	iter = l->list + index;
446 	*pos = *iter;
447 	return iter;
448 }
449 
cgroup_pidlist_stop(struct seq_file * s,void * v)450 static void cgroup_pidlist_stop(struct seq_file *s, void *v)
451 {
452 	struct kernfs_open_file *of = s->private;
453 	struct cgroup_file_ctx *ctx = of->priv;
454 	struct cgroup_pidlist *l = ctx->procs1.pidlist;
455 
456 	if (l)
457 		mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork,
458 				 CGROUP_PIDLIST_DESTROY_DELAY);
459 	mutex_unlock(&seq_css(s)->cgroup->pidlist_mutex);
460 }
461 
cgroup_pidlist_next(struct seq_file * s,void * v,loff_t * pos)462 static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
463 {
464 	struct kernfs_open_file *of = s->private;
465 	struct cgroup_file_ctx *ctx = of->priv;
466 	struct cgroup_pidlist *l = ctx->procs1.pidlist;
467 	pid_t *p = v;
468 	pid_t *end = l->list + l->length;
469 	/*
470 	 * Advance to the next pid in the array. If this goes off the
471 	 * end, we're done
472 	 */
473 	p++;
474 	if (p >= end) {
475 		(*pos)++;
476 		return NULL;
477 	} else {
478 		*pos = *p;
479 		return p;
480 	}
481 }
482 
cgroup_pidlist_show(struct seq_file * s,void * v)483 static int cgroup_pidlist_show(struct seq_file *s, void *v)
484 {
485 	seq_printf(s, "%d\n", *(int *)v);
486 
487 	return 0;
488 }
489 
__cgroup1_procs_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off,bool threadgroup)490 static ssize_t __cgroup1_procs_write(struct kernfs_open_file *of,
491 				     char *buf, size_t nbytes, loff_t off,
492 				     bool threadgroup)
493 {
494 	struct cgroup *cgrp;
495 	struct task_struct *task;
496 	const struct cred *cred, *tcred;
497 	ssize_t ret;
498 	bool locked;
499 
500 	cgrp = cgroup_kn_lock_live(of->kn, false);
501 	if (!cgrp)
502 		return -ENODEV;
503 
504 	task = cgroup_procs_write_start(buf, threadgroup, &locked, cgrp);
505 	ret = PTR_ERR_OR_ZERO(task);
506 	if (ret)
507 		goto out_unlock;
508 
509 	/*
510 	 * Even if we're attaching all tasks in the thread group, we only need
511 	 * to check permissions on one of them. Check permissions using the
512 	 * credentials from file open to protect against inherited fd attacks.
513 	 */
514 	cred = of->file->f_cred;
515 	tcred = get_task_cred(task);
516 	if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
517 	    !uid_eq(cred->euid, tcred->uid) &&
518 	    !uid_eq(cred->euid, tcred->suid) &&
519 	    !ns_capable(tcred->user_ns, CAP_SYS_NICE))
520 		ret = -EACCES;
521 	put_cred(tcred);
522 	if (ret)
523 		goto out_finish;
524 
525 	ret = cgroup_attach_task(cgrp, task, threadgroup);
526 	trace_android_vh_cgroup_set_task(ret, task);
527 
528 out_finish:
529 	cgroup_procs_write_finish(task, locked);
530 out_unlock:
531 	cgroup_kn_unlock(of->kn);
532 
533 	return ret ?: nbytes;
534 }
535 
cgroup1_procs_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)536 static ssize_t cgroup1_procs_write(struct kernfs_open_file *of,
537 				   char *buf, size_t nbytes, loff_t off)
538 {
539 	return __cgroup1_procs_write(of, buf, nbytes, off, true);
540 }
541 
cgroup1_tasks_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)542 static ssize_t cgroup1_tasks_write(struct kernfs_open_file *of,
543 				   char *buf, size_t nbytes, loff_t off)
544 {
545 	return __cgroup1_procs_write(of, buf, nbytes, off, false);
546 }
547 
cgroup_release_agent_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)548 static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
549 					  char *buf, size_t nbytes, loff_t off)
550 {
551 	struct cgroup *cgrp;
552 	struct cgroup_file_ctx *ctx;
553 
554 	BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
555 
556 	/*
557 	 * Release agent gets called with all capabilities,
558 	 * require capabilities to set release agent.
559 	 */
560 	ctx = of->priv;
561 	if ((ctx->ns->user_ns != &init_user_ns) ||
562 	    !file_ns_capable(of->file, &init_user_ns, CAP_SYS_ADMIN))
563 		return -EPERM;
564 
565 	cgrp = cgroup_kn_lock_live(of->kn, false);
566 	if (!cgrp)
567 		return -ENODEV;
568 	spin_lock(&release_agent_path_lock);
569 	strlcpy(cgrp->root->release_agent_path, strstrip(buf),
570 		sizeof(cgrp->root->release_agent_path));
571 	spin_unlock(&release_agent_path_lock);
572 	cgroup_kn_unlock(of->kn);
573 	return nbytes;
574 }
575 
cgroup_release_agent_show(struct seq_file * seq,void * v)576 static int cgroup_release_agent_show(struct seq_file *seq, void *v)
577 {
578 	struct cgroup *cgrp = seq_css(seq)->cgroup;
579 
580 	spin_lock(&release_agent_path_lock);
581 	seq_puts(seq, cgrp->root->release_agent_path);
582 	spin_unlock(&release_agent_path_lock);
583 	seq_putc(seq, '\n');
584 	return 0;
585 }
586 
cgroup_sane_behavior_show(struct seq_file * seq,void * v)587 static int cgroup_sane_behavior_show(struct seq_file *seq, void *v)
588 {
589 	seq_puts(seq, "0\n");
590 	return 0;
591 }
592 
cgroup_read_notify_on_release(struct cgroup_subsys_state * css,struct cftype * cft)593 static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css,
594 					 struct cftype *cft)
595 {
596 	return notify_on_release(css->cgroup);
597 }
598 
cgroup_write_notify_on_release(struct cgroup_subsys_state * css,struct cftype * cft,u64 val)599 static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css,
600 					  struct cftype *cft, u64 val)
601 {
602 	if (val)
603 		set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
604 	else
605 		clear_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
606 	return 0;
607 }
608 
cgroup_clone_children_read(struct cgroup_subsys_state * css,struct cftype * cft)609 static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css,
610 				      struct cftype *cft)
611 {
612 	return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
613 }
614 
cgroup_clone_children_write(struct cgroup_subsys_state * css,struct cftype * cft,u64 val)615 static int cgroup_clone_children_write(struct cgroup_subsys_state *css,
616 				       struct cftype *cft, u64 val)
617 {
618 	if (val)
619 		set_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
620 	else
621 		clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
622 	return 0;
623 }
624 
625 /* cgroup core interface files for the legacy hierarchies */
626 struct cftype cgroup1_base_files[] = {
627 	{
628 		.name = "cgroup.procs",
629 		.seq_start = cgroup_pidlist_start,
630 		.seq_next = cgroup_pidlist_next,
631 		.seq_stop = cgroup_pidlist_stop,
632 		.seq_show = cgroup_pidlist_show,
633 		.private = CGROUP_FILE_PROCS,
634 		.write = cgroup1_procs_write,
635 	},
636 	{
637 		.name = "cgroup.clone_children",
638 		.read_u64 = cgroup_clone_children_read,
639 		.write_u64 = cgroup_clone_children_write,
640 	},
641 	{
642 		.name = "cgroup.sane_behavior",
643 		.flags = CFTYPE_ONLY_ON_ROOT,
644 		.seq_show = cgroup_sane_behavior_show,
645 	},
646 	{
647 		.name = "tasks",
648 		.seq_start = cgroup_pidlist_start,
649 		.seq_next = cgroup_pidlist_next,
650 		.seq_stop = cgroup_pidlist_stop,
651 		.seq_show = cgroup_pidlist_show,
652 		.private = CGROUP_FILE_TASKS,
653 		.write = cgroup1_tasks_write,
654 	},
655 	{
656 		.name = "notify_on_release",
657 		.read_u64 = cgroup_read_notify_on_release,
658 		.write_u64 = cgroup_write_notify_on_release,
659 	},
660 	{
661 		.name = "release_agent",
662 		.flags = CFTYPE_ONLY_ON_ROOT,
663 		.seq_show = cgroup_release_agent_show,
664 		.write = cgroup_release_agent_write,
665 		.max_write_len = PATH_MAX - 1,
666 	},
667 	{ }	/* terminate */
668 };
669 
670 /* Display information about each subsystem and each hierarchy */
proc_cgroupstats_show(struct seq_file * m,void * v)671 int proc_cgroupstats_show(struct seq_file *m, void *v)
672 {
673 	struct cgroup_subsys *ss;
674 	int i;
675 
676 	seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
677 	/*
678 	 * ideally we don't want subsystems moving around while we do this.
679 	 * cgroup_mutex is also necessary to guarantee an atomic snapshot of
680 	 * subsys/hierarchy state.
681 	 */
682 	mutex_lock(&cgroup_mutex);
683 
684 	for_each_subsys(ss, i)
685 		seq_printf(m, "%s\t%d\t%d\t%d\n",
686 			   ss->legacy_name, ss->root->hierarchy_id,
687 			   atomic_read(&ss->root->nr_cgrps),
688 			   cgroup_ssid_enabled(i));
689 
690 	mutex_unlock(&cgroup_mutex);
691 	return 0;
692 }
693 
694 /**
695  * cgroupstats_build - build and fill cgroupstats
696  * @stats: cgroupstats to fill information into
697  * @dentry: A dentry entry belonging to the cgroup for which stats have
698  * been requested.
699  *
700  * Build and fill cgroupstats so that taskstats can export it to user
701  * space.
702  */
cgroupstats_build(struct cgroupstats * stats,struct dentry * dentry)703 int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
704 {
705 	struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
706 	struct cgroup *cgrp;
707 	struct css_task_iter it;
708 	struct task_struct *tsk;
709 
710 	/* it should be kernfs_node belonging to cgroupfs and is a directory */
711 	if (dentry->d_sb->s_type != &cgroup_fs_type || !kn ||
712 	    kernfs_type(kn) != KERNFS_DIR)
713 		return -EINVAL;
714 
715 	mutex_lock(&cgroup_mutex);
716 
717 	/*
718 	 * We aren't being called from kernfs and there's no guarantee on
719 	 * @kn->priv's validity.  For this and css_tryget_online_from_dir(),
720 	 * @kn->priv is RCU safe.  Let's do the RCU dancing.
721 	 */
722 	rcu_read_lock();
723 	cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv);
724 	if (!cgrp || cgroup_is_dead(cgrp)) {
725 		rcu_read_unlock();
726 		mutex_unlock(&cgroup_mutex);
727 		return -ENOENT;
728 	}
729 	rcu_read_unlock();
730 
731 	css_task_iter_start(&cgrp->self, 0, &it);
732 	while ((tsk = css_task_iter_next(&it))) {
733 		switch (tsk->state) {
734 		case TASK_RUNNING:
735 			stats->nr_running++;
736 			break;
737 		case TASK_INTERRUPTIBLE:
738 			stats->nr_sleeping++;
739 			break;
740 		case TASK_UNINTERRUPTIBLE:
741 			stats->nr_uninterruptible++;
742 			break;
743 		case TASK_STOPPED:
744 			stats->nr_stopped++;
745 			break;
746 		default:
747 			if (delayacct_is_task_waiting_on_io(tsk))
748 				stats->nr_io_wait++;
749 			break;
750 		}
751 	}
752 	css_task_iter_end(&it);
753 
754 	mutex_unlock(&cgroup_mutex);
755 	return 0;
756 }
757 
cgroup1_check_for_release(struct cgroup * cgrp)758 void cgroup1_check_for_release(struct cgroup *cgrp)
759 {
760 	if (notify_on_release(cgrp) && !cgroup_is_populated(cgrp) &&
761 	    !css_has_online_children(&cgrp->self) && !cgroup_is_dead(cgrp))
762 		schedule_work(&cgrp->release_agent_work);
763 }
764 
765 /*
766  * Notify userspace when a cgroup is released, by running the
767  * configured release agent with the name of the cgroup (path
768  * relative to the root of cgroup file system) as the argument.
769  *
770  * Most likely, this user command will try to rmdir this cgroup.
771  *
772  * This races with the possibility that some other task will be
773  * attached to this cgroup before it is removed, or that some other
774  * user task will 'mkdir' a child cgroup of this cgroup.  That's ok.
775  * The presumed 'rmdir' will fail quietly if this cgroup is no longer
776  * unused, and this cgroup will be reprieved from its death sentence,
777  * to continue to serve a useful existence.  Next time it's released,
778  * we will get notified again, if it still has 'notify_on_release' set.
779  *
780  * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
781  * means only wait until the task is successfully execve()'d.  The
782  * separate release agent task is forked by call_usermodehelper(),
783  * then control in this thread returns here, without waiting for the
784  * release agent task.  We don't bother to wait because the caller of
785  * this routine has no use for the exit status of the release agent
786  * task, so no sense holding our caller up for that.
787  */
cgroup1_release_agent(struct work_struct * work)788 void cgroup1_release_agent(struct work_struct *work)
789 {
790 	struct cgroup *cgrp =
791 		container_of(work, struct cgroup, release_agent_work);
792 	char *pathbuf, *agentbuf;
793 	char *argv[3], *envp[3];
794 	int ret;
795 
796 	/* snoop agent path and exit early if empty */
797 	if (!cgrp->root->release_agent_path[0])
798 		return;
799 
800 	/* prepare argument buffers */
801 	pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
802 	agentbuf = kmalloc(PATH_MAX, GFP_KERNEL);
803 	if (!pathbuf || !agentbuf)
804 		goto out_free;
805 
806 	spin_lock(&release_agent_path_lock);
807 	strlcpy(agentbuf, cgrp->root->release_agent_path, PATH_MAX);
808 	spin_unlock(&release_agent_path_lock);
809 	if (!agentbuf[0])
810 		goto out_free;
811 
812 	ret = cgroup_path_ns(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns);
813 	if (ret < 0 || ret >= PATH_MAX)
814 		goto out_free;
815 
816 	argv[0] = agentbuf;
817 	argv[1] = pathbuf;
818 	argv[2] = NULL;
819 
820 	/* minimal command environment */
821 	envp[0] = "HOME=/";
822 	envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
823 	envp[2] = NULL;
824 
825 	call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
826 out_free:
827 	kfree(agentbuf);
828 	kfree(pathbuf);
829 }
830 
831 /*
832  * cgroup_rename - Only allow simple rename of directories in place.
833  */
cgroup1_rename(struct kernfs_node * kn,struct kernfs_node * new_parent,const char * new_name_str)834 static int cgroup1_rename(struct kernfs_node *kn, struct kernfs_node *new_parent,
835 			  const char *new_name_str)
836 {
837 	struct cgroup *cgrp = kn->priv;
838 	int ret;
839 
840 	/* do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable */
841 	if (strchr(new_name_str, '\n'))
842 		return -EINVAL;
843 
844 	if (kernfs_type(kn) != KERNFS_DIR)
845 		return -ENOTDIR;
846 	if (kn->parent != new_parent)
847 		return -EIO;
848 
849 	/*
850 	 * We're gonna grab cgroup_mutex which nests outside kernfs
851 	 * active_ref.  kernfs_rename() doesn't require active_ref
852 	 * protection.  Break them before grabbing cgroup_mutex.
853 	 */
854 	kernfs_break_active_protection(new_parent);
855 	kernfs_break_active_protection(kn);
856 
857 	mutex_lock(&cgroup_mutex);
858 
859 	ret = kernfs_rename(kn, new_parent, new_name_str);
860 	if (!ret)
861 		TRACE_CGROUP_PATH(rename, cgrp);
862 
863 	mutex_unlock(&cgroup_mutex);
864 
865 	kernfs_unbreak_active_protection(kn);
866 	kernfs_unbreak_active_protection(new_parent);
867 	return ret;
868 }
869 
cgroup1_show_options(struct seq_file * seq,struct kernfs_root * kf_root)870 static int cgroup1_show_options(struct seq_file *seq, struct kernfs_root *kf_root)
871 {
872 	struct cgroup_root *root = cgroup_root_from_kf(kf_root);
873 	struct cgroup_subsys *ss;
874 	int ssid;
875 
876 	for_each_subsys(ss, ssid)
877 		if (root->subsys_mask & (1 << ssid))
878 			seq_show_option(seq, ss->legacy_name, NULL);
879 	if (root->flags & CGRP_ROOT_NOPREFIX)
880 		seq_puts(seq, ",noprefix");
881 	if (root->flags & CGRP_ROOT_XATTR)
882 		seq_puts(seq, ",xattr");
883 	if (root->flags & CGRP_ROOT_CPUSET_V2_MODE)
884 		seq_puts(seq, ",cpuset_v2_mode");
885 
886 	spin_lock(&release_agent_path_lock);
887 	if (strlen(root->release_agent_path))
888 		seq_show_option(seq, "release_agent",
889 				root->release_agent_path);
890 	spin_unlock(&release_agent_path_lock);
891 
892 	if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags))
893 		seq_puts(seq, ",clone_children");
894 	if (strlen(root->name))
895 		seq_show_option(seq, "name", root->name);
896 	return 0;
897 }
898 
899 enum cgroup1_param {
900 	Opt_all,
901 	Opt_clone_children,
902 	Opt_cpuset_v2_mode,
903 	Opt_name,
904 	Opt_none,
905 	Opt_noprefix,
906 	Opt_release_agent,
907 	Opt_xattr,
908 };
909 
910 const struct fs_parameter_spec cgroup1_fs_parameters[] = {
911 	fsparam_flag  ("all",		Opt_all),
912 	fsparam_flag  ("clone_children", Opt_clone_children),
913 	fsparam_flag  ("cpuset_v2_mode", Opt_cpuset_v2_mode),
914 	fsparam_string("name",		Opt_name),
915 	fsparam_flag  ("none",		Opt_none),
916 	fsparam_flag  ("noprefix",	Opt_noprefix),
917 	fsparam_string("release_agent",	Opt_release_agent),
918 	fsparam_flag  ("xattr",		Opt_xattr),
919 	{}
920 };
921 
cgroup1_parse_param(struct fs_context * fc,struct fs_parameter * param)922 int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param)
923 {
924 	struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
925 	struct cgroup_subsys *ss;
926 	struct fs_parse_result result;
927 	int opt, i;
928 
929 	opt = fs_parse(fc, cgroup1_fs_parameters, param, &result);
930 	if (opt == -ENOPARAM) {
931 		if (strcmp(param->key, "source") == 0) {
932 			if (param->type != fs_value_is_string)
933 				return invalf(fc, "Non-string source");
934 			if (fc->source)
935 				return invalf(fc, "Multiple sources not supported");
936 			fc->source = param->string;
937 			param->string = NULL;
938 			return 0;
939 		}
940 		for_each_subsys(ss, i) {
941 			if (strcmp(param->key, ss->legacy_name))
942 				continue;
943 			if (!cgroup_ssid_enabled(i) || cgroup1_ssid_disabled(i))
944 				return invalfc(fc, "Disabled controller '%s'",
945 					       param->key);
946 			ctx->subsys_mask |= (1 << i);
947 			return 0;
948 		}
949 		return invalfc(fc, "Unknown subsys name '%s'", param->key);
950 	}
951 	if (opt < 0)
952 		return opt;
953 
954 	switch (opt) {
955 	case Opt_none:
956 		/* Explicitly have no subsystems */
957 		ctx->none = true;
958 		break;
959 	case Opt_all:
960 		ctx->all_ss = true;
961 		break;
962 	case Opt_noprefix:
963 		ctx->flags |= CGRP_ROOT_NOPREFIX;
964 		break;
965 	case Opt_clone_children:
966 		ctx->cpuset_clone_children = true;
967 		break;
968 	case Opt_cpuset_v2_mode:
969 		ctx->flags |= CGRP_ROOT_CPUSET_V2_MODE;
970 		break;
971 	case Opt_xattr:
972 		ctx->flags |= CGRP_ROOT_XATTR;
973 		break;
974 	case Opt_release_agent:
975 		/* Specifying two release agents is forbidden */
976 		if (ctx->release_agent)
977 			return invalfc(fc, "release_agent respecified");
978 		/*
979 		 * Release agent gets called with all capabilities,
980 		 * require capabilities to set release agent.
981 		 */
982 		if ((fc->user_ns != &init_user_ns) || !capable(CAP_SYS_ADMIN))
983 			return invalfc(fc, "Setting release_agent not allowed");
984 		ctx->release_agent = param->string;
985 		param->string = NULL;
986 		break;
987 	case Opt_name:
988 		/* blocked by boot param? */
989 		if (cgroup_no_v1_named)
990 			return -ENOENT;
991 		/* Can't specify an empty name */
992 		if (!param->size)
993 			return invalfc(fc, "Empty name");
994 		if (param->size > MAX_CGROUP_ROOT_NAMELEN - 1)
995 			return invalfc(fc, "Name too long");
996 		/* Must match [\w.-]+ */
997 		for (i = 0; i < param->size; i++) {
998 			char c = param->string[i];
999 			if (isalnum(c))
1000 				continue;
1001 			if ((c == '.') || (c == '-') || (c == '_'))
1002 				continue;
1003 			return invalfc(fc, "Invalid name");
1004 		}
1005 		/* Specifying two names is forbidden */
1006 		if (ctx->name)
1007 			return invalfc(fc, "name respecified");
1008 		ctx->name = param->string;
1009 		param->string = NULL;
1010 		break;
1011 	}
1012 	return 0;
1013 }
1014 
check_cgroupfs_options(struct fs_context * fc)1015 static int check_cgroupfs_options(struct fs_context *fc)
1016 {
1017 	struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1018 	u16 mask = U16_MAX;
1019 	u16 enabled = 0;
1020 	struct cgroup_subsys *ss;
1021 	int i;
1022 
1023 #ifdef CONFIG_CPUSETS
1024 	mask = ~((u16)1 << cpuset_cgrp_id);
1025 #endif
1026 	for_each_subsys(ss, i)
1027 		if (cgroup_ssid_enabled(i) && !cgroup1_ssid_disabled(i))
1028 			enabled |= 1 << i;
1029 
1030 	ctx->subsys_mask &= enabled;
1031 
1032 	/*
1033 	 * In absense of 'none', 'name=' or subsystem name options,
1034 	 * let's default to 'all'.
1035 	 */
1036 	if (!ctx->subsys_mask && !ctx->none && !ctx->name)
1037 		ctx->all_ss = true;
1038 
1039 	if (ctx->all_ss) {
1040 		/* Mutually exclusive option 'all' + subsystem name */
1041 		if (ctx->subsys_mask)
1042 			return invalfc(fc, "subsys name conflicts with all");
1043 		/* 'all' => select all the subsystems */
1044 		ctx->subsys_mask = enabled;
1045 	}
1046 
1047 	/*
1048 	 * We either have to specify by name or by subsystems. (So all
1049 	 * empty hierarchies must have a name).
1050 	 */
1051 	if (!ctx->subsys_mask && !ctx->name)
1052 		return invalfc(fc, "Need name or subsystem set");
1053 
1054 	/*
1055 	 * Option noprefix was introduced just for backward compatibility
1056 	 * with the old cpuset, so we allow noprefix only if mounting just
1057 	 * the cpuset subsystem.
1058 	 */
1059 	if ((ctx->flags & CGRP_ROOT_NOPREFIX) && (ctx->subsys_mask & mask))
1060 		return invalfc(fc, "noprefix used incorrectly");
1061 
1062 	/* Can't specify "none" and some subsystems */
1063 	if (ctx->subsys_mask && ctx->none)
1064 		return invalfc(fc, "none used incorrectly");
1065 
1066 	return 0;
1067 }
1068 
cgroup1_reconfigure(struct fs_context * fc)1069 int cgroup1_reconfigure(struct fs_context *fc)
1070 {
1071 	struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1072 	struct kernfs_root *kf_root = kernfs_root_from_sb(fc->root->d_sb);
1073 	struct cgroup_root *root = cgroup_root_from_kf(kf_root);
1074 	int ret = 0;
1075 	u16 added_mask, removed_mask;
1076 
1077 	cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1078 
1079 	/* See what subsystems are wanted */
1080 	ret = check_cgroupfs_options(fc);
1081 	if (ret)
1082 		goto out_unlock;
1083 
1084 	if (ctx->subsys_mask != root->subsys_mask || ctx->release_agent)
1085 		pr_warn("option changes via remount are deprecated (pid=%d comm=%s)\n",
1086 			task_tgid_nr(current), current->comm);
1087 
1088 	added_mask = ctx->subsys_mask & ~root->subsys_mask;
1089 	removed_mask = root->subsys_mask & ~ctx->subsys_mask;
1090 
1091 	/* Don't allow flags or name to change at remount */
1092 	if ((ctx->flags ^ root->flags) ||
1093 	    (ctx->name && strcmp(ctx->name, root->name))) {
1094 		errorfc(fc, "option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"",
1095 		       ctx->flags, ctx->name ?: "", root->flags, root->name);
1096 		ret = -EINVAL;
1097 		goto out_unlock;
1098 	}
1099 
1100 	/* remounting is not allowed for populated hierarchies */
1101 	if (!list_empty(&root->cgrp.self.children)) {
1102 		ret = -EBUSY;
1103 		goto out_unlock;
1104 	}
1105 
1106 	ret = rebind_subsystems(root, added_mask);
1107 	if (ret)
1108 		goto out_unlock;
1109 
1110 	WARN_ON(rebind_subsystems(&cgrp_dfl_root, removed_mask));
1111 
1112 	if (ctx->release_agent) {
1113 		spin_lock(&release_agent_path_lock);
1114 		strcpy(root->release_agent_path, ctx->release_agent);
1115 		spin_unlock(&release_agent_path_lock);
1116 	}
1117 
1118 	trace_cgroup_remount(root);
1119 
1120  out_unlock:
1121 	mutex_unlock(&cgroup_mutex);
1122 	return ret;
1123 }
1124 
1125 struct kernfs_syscall_ops cgroup1_kf_syscall_ops = {
1126 	.rename			= cgroup1_rename,
1127 	.show_options		= cgroup1_show_options,
1128 	.mkdir			= cgroup_mkdir,
1129 	.rmdir			= cgroup_rmdir,
1130 	.show_path		= cgroup_show_path,
1131 };
1132 
1133 /*
1134  * The guts of cgroup1 mount - find or create cgroup_root to use.
1135  * Called with cgroup_mutex held; returns 0 on success, -E... on
1136  * error and positive - in case when the candidate is busy dying.
1137  * On success it stashes a reference to cgroup_root into given
1138  * cgroup_fs_context; that reference is *NOT* counting towards the
1139  * cgroup_root refcount.
1140  */
cgroup1_root_to_use(struct fs_context * fc)1141 static int cgroup1_root_to_use(struct fs_context *fc)
1142 {
1143 	struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1144 	struct cgroup_root *root;
1145 	struct cgroup_subsys *ss;
1146 	int i, ret;
1147 
1148 	/* First find the desired set of subsystems */
1149 	ret = check_cgroupfs_options(fc);
1150 	if (ret)
1151 		return ret;
1152 
1153 	/*
1154 	 * Destruction of cgroup root is asynchronous, so subsystems may
1155 	 * still be dying after the previous unmount.  Let's drain the
1156 	 * dying subsystems.  We just need to ensure that the ones
1157 	 * unmounted previously finish dying and don't care about new ones
1158 	 * starting.  Testing ref liveliness is good enough.
1159 	 */
1160 	for_each_subsys(ss, i) {
1161 		if (!(ctx->subsys_mask & (1 << i)) ||
1162 		    ss->root == &cgrp_dfl_root)
1163 			continue;
1164 
1165 		if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt))
1166 			return 1;	/* restart */
1167 		cgroup_put(&ss->root->cgrp);
1168 	}
1169 
1170 	for_each_root(root) {
1171 		bool name_match = false;
1172 
1173 		if (root == &cgrp_dfl_root)
1174 			continue;
1175 
1176 		/*
1177 		 * If we asked for a name then it must match.  Also, if
1178 		 * name matches but sybsys_mask doesn't, we should fail.
1179 		 * Remember whether name matched.
1180 		 */
1181 		if (ctx->name) {
1182 			if (strcmp(ctx->name, root->name))
1183 				continue;
1184 			name_match = true;
1185 		}
1186 
1187 		/*
1188 		 * If we asked for subsystems (or explicitly for no
1189 		 * subsystems) then they must match.
1190 		 */
1191 		if ((ctx->subsys_mask || ctx->none) &&
1192 		    (ctx->subsys_mask != root->subsys_mask)) {
1193 			if (!name_match)
1194 				continue;
1195 			return -EBUSY;
1196 		}
1197 
1198 		if (root->flags ^ ctx->flags)
1199 			pr_warn("new mount options do not match the existing superblock, will be ignored\n");
1200 
1201 		ctx->root = root;
1202 		return 0;
1203 	}
1204 
1205 	/*
1206 	 * No such thing, create a new one.  name= matching without subsys
1207 	 * specification is allowed for already existing hierarchies but we
1208 	 * can't create new one without subsys specification.
1209 	 */
1210 	if (!ctx->subsys_mask && !ctx->none)
1211 		return invalfc(fc, "No subsys list or none specified");
1212 
1213 	/* Hierarchies may only be created in the initial cgroup namespace. */
1214 	if (ctx->ns != &init_cgroup_ns)
1215 		return -EPERM;
1216 
1217 	root = kzalloc(sizeof(*root), GFP_KERNEL);
1218 	if (!root)
1219 		return -ENOMEM;
1220 
1221 	ctx->root = root;
1222 	init_cgroup_root(ctx);
1223 
1224 	ret = cgroup_setup_root(root, ctx->subsys_mask);
1225 	if (ret)
1226 		cgroup_free_root(root);
1227 	return ret;
1228 }
1229 
cgroup1_get_tree(struct fs_context * fc)1230 int cgroup1_get_tree(struct fs_context *fc)
1231 {
1232 	struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1233 	int ret;
1234 
1235 	/* Check if the caller has permission to mount. */
1236 	if (!ns_capable(ctx->ns->user_ns, CAP_SYS_ADMIN))
1237 		return -EPERM;
1238 
1239 	cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1240 
1241 	ret = cgroup1_root_to_use(fc);
1242 	if (!ret && !percpu_ref_tryget_live(&ctx->root->cgrp.self.refcnt))
1243 		ret = 1;	/* restart */
1244 
1245 	mutex_unlock(&cgroup_mutex);
1246 
1247 	if (!ret)
1248 		ret = cgroup_do_get_tree(fc);
1249 
1250 	if (!ret && percpu_ref_is_dying(&ctx->root->cgrp.self.refcnt)) {
1251 		fc_drop_locked(fc);
1252 		ret = 1;
1253 	}
1254 
1255 	if (unlikely(ret > 0)) {
1256 		msleep(10);
1257 		return restart_syscall();
1258 	}
1259 	return ret;
1260 }
1261 
cgroup1_wq_init(void)1262 static int __init cgroup1_wq_init(void)
1263 {
1264 	/*
1265 	 * Used to destroy pidlists and separate to serve as flush domain.
1266 	 * Cap @max_active to 1 too.
1267 	 */
1268 	cgroup_pidlist_destroy_wq = alloc_workqueue("cgroup_pidlist_destroy",
1269 						    0, 1);
1270 	BUG_ON(!cgroup_pidlist_destroy_wq);
1271 	return 0;
1272 }
1273 core_initcall(cgroup1_wq_init);
1274 
cgroup_no_v1(char * str)1275 static int __init cgroup_no_v1(char *str)
1276 {
1277 	struct cgroup_subsys *ss;
1278 	char *token;
1279 	int i;
1280 
1281 	while ((token = strsep(&str, ",")) != NULL) {
1282 		if (!*token)
1283 			continue;
1284 
1285 		if (!strcmp(token, "all")) {
1286 			cgroup_no_v1_mask = U16_MAX;
1287 			continue;
1288 		}
1289 
1290 		if (!strcmp(token, "named")) {
1291 			cgroup_no_v1_named = true;
1292 			continue;
1293 		}
1294 
1295 		for_each_subsys(ss, i) {
1296 			if (strcmp(token, ss->name) &&
1297 			    strcmp(token, ss->legacy_name))
1298 				continue;
1299 
1300 			cgroup_no_v1_mask |= 1 << i;
1301 		}
1302 	}
1303 	return 1;
1304 }
1305 __setup("cgroup_no_v1=", cgroup_no_v1);
1306