• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Generic process-grouping system.
3  *
4  *  Based originally on the cpuset system, extracted by Paul Menage
5  *  Copyright (C) 2006 Google, Inc
6  *
7  *  Copyright notices from the original cpuset code:
8  *  --------------------------------------------------
9  *  Copyright (C) 2003 BULL SA.
10  *  Copyright (C) 2004-2006 Silicon Graphics, Inc.
11  *
12  *  Portions derived from Patrick Mochel's sysfs code.
13  *  sysfs is Copyright (c) 2001-3 Patrick Mochel
14  *
15  *  2003-10-10 Written by Simon Derr.
16  *  2003-10-22 Updates by Stephen Hemminger.
17  *  2004 May-July Rework by Paul Jackson.
18  *  ---------------------------------------------------
19  *
20  *  This file is subject to the terms and conditions of the GNU General Public
21  *  License.  See the file COPYING in the main directory of the Linux
22  *  distribution for more details.
23  */
24 
25 #include <linux/cgroup.h>
26 #include <linux/errno.h>
27 #include <linux/fs.h>
28 #include <linux/kernel.h>
29 #include <linux/list.h>
30 #include <linux/mm.h>
31 #include <linux/mutex.h>
32 #include <linux/mount.h>
33 #include <linux/pagemap.h>
34 #include <linux/proc_fs.h>
35 #include <linux/rcupdate.h>
36 #include <linux/sched.h>
37 #include <linux/backing-dev.h>
38 #include <linux/seq_file.h>
39 #include <linux/slab.h>
40 #include <linux/magic.h>
41 #include <linux/spinlock.h>
42 #include <linux/string.h>
43 #include <linux/sort.h>
44 #include <linux/kmod.h>
45 #include <linux/delayacct.h>
46 #include <linux/cgroupstats.h>
47 #include <linux/hash.h>
48 #include <linux/namei.h>
49 #include <linux/capability.h>
50 
51 #include <asm/atomic.h>
52 
53 static DEFINE_MUTEX(cgroup_mutex);
54 
55 /* Generate an array of cgroup subsystem pointers */
56 #define SUBSYS(_x) &_x ## _subsys,
57 
58 static struct cgroup_subsys *subsys[] = {
59 #include <linux/cgroup_subsys.h>
60 };
61 
62 /*
63  * A cgroupfs_root represents the root of a cgroup hierarchy,
64  * and may be associated with a superblock to form an active
65  * hierarchy
66  */
67 struct cgroupfs_root {
68 	struct super_block *sb;
69 
70 	/*
71 	 * The bitmask of subsystems intended to be attached to this
72 	 * hierarchy
73 	 */
74 	unsigned long subsys_bits;
75 
76 	/* The bitmask of subsystems currently attached to this hierarchy */
77 	unsigned long actual_subsys_bits;
78 
79 	/* A list running through the attached subsystems */
80 	struct list_head subsys_list;
81 
82 	/* The root cgroup for this hierarchy */
83 	struct cgroup top_cgroup;
84 
85 	/* Tracks how many cgroups are currently defined in hierarchy.*/
86 	int number_of_cgroups;
87 
88 	/* A list running through the active hierarchies */
89 	struct list_head root_list;
90 
91 	/* Hierarchy-specific flags */
92 	unsigned long flags;
93 
94 	/* The path to use for release notifications. */
95 	char release_agent_path[PATH_MAX];
96 };
97 
98 
99 /*
100  * The "rootnode" hierarchy is the "dummy hierarchy", reserved for the
101  * subsystems that are otherwise unattached - it never has more than a
102  * single cgroup, and all tasks are part of that cgroup.
103  */
104 static struct cgroupfs_root rootnode;
105 
106 /* The list of hierarchy roots */
107 
108 static LIST_HEAD(roots);
109 static int root_count;
110 
111 /* dummytop is a shorthand for the dummy hierarchy's top cgroup */
112 #define dummytop (&rootnode.top_cgroup)
113 
114 /* This flag indicates whether tasks in the fork and exit paths should
115  * check for fork/exit handlers to call. This avoids us having to do
116  * extra work in the fork/exit path if none of the subsystems need to
117  * be called.
118  */
119 static int need_forkexit_callback __read_mostly;
120 
121 /* convenient tests for these bits */
cgroup_is_removed(const struct cgroup * cgrp)122 inline int cgroup_is_removed(const struct cgroup *cgrp)
123 {
124 	return test_bit(CGRP_REMOVED, &cgrp->flags);
125 }
126 
127 /* bits in struct cgroupfs_root flags field */
128 enum {
129 	ROOT_NOPREFIX, /* mounted subsystems have no named prefix */
130 };
131 
cgroup_is_releasable(const struct cgroup * cgrp)132 static int cgroup_is_releasable(const struct cgroup *cgrp)
133 {
134 	const int bits =
135 		(1 << CGRP_RELEASABLE) |
136 		(1 << CGRP_NOTIFY_ON_RELEASE);
137 	return (cgrp->flags & bits) == bits;
138 }
139 
notify_on_release(const struct cgroup * cgrp)140 static int notify_on_release(const struct cgroup *cgrp)
141 {
142 	return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
143 }
144 
145 /*
146  * for_each_subsys() allows you to iterate on each subsystem attached to
147  * an active hierarchy
148  */
149 #define for_each_subsys(_root, _ss) \
150 list_for_each_entry(_ss, &_root->subsys_list, sibling)
151 
152 /* for_each_active_root() allows you to iterate across the active hierarchies */
153 #define for_each_active_root(_root) \
154 list_for_each_entry(_root, &roots, root_list)
155 
156 /* the list of cgroups eligible for automatic release. Protected by
157  * release_list_lock */
158 static LIST_HEAD(release_list);
159 static DEFINE_SPINLOCK(release_list_lock);
160 static void cgroup_release_agent(struct work_struct *work);
161 static DECLARE_WORK(release_agent_work, cgroup_release_agent);
162 static void check_for_release(struct cgroup *cgrp);
163 
164 /* Link structure for associating css_set objects with cgroups */
165 struct cg_cgroup_link {
166 	/*
167 	 * List running through cg_cgroup_links associated with a
168 	 * cgroup, anchored on cgroup->css_sets
169 	 */
170 	struct list_head cgrp_link_list;
171 	/*
172 	 * List running through cg_cgroup_links pointing at a
173 	 * single css_set object, anchored on css_set->cg_links
174 	 */
175 	struct list_head cg_link_list;
176 	struct css_set *cg;
177 };
178 
179 /* The default css_set - used by init and its children prior to any
180  * hierarchies being mounted. It contains a pointer to the root state
181  * for each subsystem. Also used to anchor the list of css_sets. Not
182  * reference-counted, to improve performance when child cgroups
183  * haven't been created.
184  */
185 
186 static struct css_set init_css_set;
187 static struct cg_cgroup_link init_css_set_link;
188 
189 /* css_set_lock protects the list of css_set objects, and the
190  * chain of tasks off each css_set.  Nests outside task->alloc_lock
191  * due to cgroup_iter_start() */
192 static DEFINE_RWLOCK(css_set_lock);
193 static int css_set_count;
194 
195 /* hash table for cgroup groups. This improves the performance to
196  * find an existing css_set */
197 #define CSS_SET_HASH_BITS	7
198 #define CSS_SET_TABLE_SIZE	(1 << CSS_SET_HASH_BITS)
199 static struct hlist_head css_set_table[CSS_SET_TABLE_SIZE];
200 
css_set_hash(struct cgroup_subsys_state * css[])201 static struct hlist_head *css_set_hash(struct cgroup_subsys_state *css[])
202 {
203 	int i;
204 	int index;
205 	unsigned long tmp = 0UL;
206 
207 	for (i = 0; i < CGROUP_SUBSYS_COUNT; i++)
208 		tmp += (unsigned long)css[i];
209 	tmp = (tmp >> 16) ^ tmp;
210 
211 	index = hash_long(tmp, CSS_SET_HASH_BITS);
212 
213 	return &css_set_table[index];
214 }
215 
216 /* We don't maintain the lists running through each css_set to its
217  * task until after the first call to cgroup_iter_start(). This
218  * reduces the fork()/exit() overhead for people who have cgroups
219  * compiled into their kernel but not actually in use */
220 static int use_task_css_set_links __read_mostly;
221 
222 /* When we create or destroy a css_set, the operation simply
223  * takes/releases a reference count on all the cgroups referenced
224  * by subsystems in this css_set. This can end up multiple-counting
225  * some cgroups, but that's OK - the ref-count is just a
226  * busy/not-busy indicator; ensuring that we only count each cgroup
227  * once would require taking a global lock to ensure that no
228  * subsystems moved between hierarchies while we were doing so.
229  *
230  * Possible TODO: decide at boot time based on the number of
231  * registered subsystems and the number of CPUs or NUMA nodes whether
232  * it's better for performance to ref-count every subsystem, or to
233  * take a global lock and only add one ref count to each hierarchy.
234  */
235 
236 /*
237  * unlink a css_set from the list and free it
238  */
unlink_css_set(struct css_set * cg)239 static void unlink_css_set(struct css_set *cg)
240 {
241 	struct cg_cgroup_link *link;
242 	struct cg_cgroup_link *saved_link;
243 
244 	hlist_del(&cg->hlist);
245 	css_set_count--;
246 
247 	list_for_each_entry_safe(link, saved_link, &cg->cg_links,
248 				 cg_link_list) {
249 		list_del(&link->cg_link_list);
250 		list_del(&link->cgrp_link_list);
251 		kfree(link);
252 	}
253 }
254 
__put_css_set(struct css_set * cg,int taskexit)255 static void __put_css_set(struct css_set *cg, int taskexit)
256 {
257 	int i;
258 	/*
259 	 * Ensure that the refcount doesn't hit zero while any readers
260 	 * can see it. Similar to atomic_dec_and_lock(), but for an
261 	 * rwlock
262 	 */
263 	if (atomic_add_unless(&cg->refcount, -1, 1))
264 		return;
265 	write_lock(&css_set_lock);
266 	if (!atomic_dec_and_test(&cg->refcount)) {
267 		write_unlock(&css_set_lock);
268 		return;
269 	}
270 	unlink_css_set(cg);
271 	write_unlock(&css_set_lock);
272 
273 	rcu_read_lock();
274 	for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
275 		struct cgroup *cgrp = rcu_dereference(cg->subsys[i]->cgroup);
276 		if (atomic_dec_and_test(&cgrp->count) &&
277 		    notify_on_release(cgrp)) {
278 			if (taskexit)
279 				set_bit(CGRP_RELEASABLE, &cgrp->flags);
280 			check_for_release(cgrp);
281 		}
282 	}
283 	rcu_read_unlock();
284 	kfree(cg);
285 }
286 
287 /*
288  * refcounted get/put for css_set objects
289  */
get_css_set(struct css_set * cg)290 static inline void get_css_set(struct css_set *cg)
291 {
292 	atomic_inc(&cg->refcount);
293 }
294 
put_css_set(struct css_set * cg)295 static inline void put_css_set(struct css_set *cg)
296 {
297 	__put_css_set(cg, 0);
298 }
299 
put_css_set_taskexit(struct css_set * cg)300 static inline void put_css_set_taskexit(struct css_set *cg)
301 {
302 	__put_css_set(cg, 1);
303 }
304 
305 /*
306  * find_existing_css_set() is a helper for
307  * find_css_set(), and checks to see whether an existing
308  * css_set is suitable.
309  *
310  * oldcg: the cgroup group that we're using before the cgroup
311  * transition
312  *
313  * cgrp: the cgroup that we're moving into
314  *
315  * template: location in which to build the desired set of subsystem
316  * state objects for the new cgroup group
317  */
find_existing_css_set(struct css_set * oldcg,struct cgroup * cgrp,struct cgroup_subsys_state * template[])318 static struct css_set *find_existing_css_set(
319 	struct css_set *oldcg,
320 	struct cgroup *cgrp,
321 	struct cgroup_subsys_state *template[])
322 {
323 	int i;
324 	struct cgroupfs_root *root = cgrp->root;
325 	struct hlist_head *hhead;
326 	struct hlist_node *node;
327 	struct css_set *cg;
328 
329 	/* Built the set of subsystem state objects that we want to
330 	 * see in the new css_set */
331 	for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
332 		if (root->subsys_bits & (1UL << i)) {
333 			/* Subsystem is in this hierarchy. So we want
334 			 * the subsystem state from the new
335 			 * cgroup */
336 			template[i] = cgrp->subsys[i];
337 		} else {
338 			/* Subsystem is not in this hierarchy, so we
339 			 * don't want to change the subsystem state */
340 			template[i] = oldcg->subsys[i];
341 		}
342 	}
343 
344 	hhead = css_set_hash(template);
345 	hlist_for_each_entry(cg, node, hhead, hlist) {
346 		if (!memcmp(template, cg->subsys, sizeof(cg->subsys))) {
347 			/* All subsystems matched */
348 			return cg;
349 		}
350 	}
351 
352 	/* No existing cgroup group matched */
353 	return NULL;
354 }
355 
free_cg_links(struct list_head * tmp)356 static void free_cg_links(struct list_head *tmp)
357 {
358 	struct cg_cgroup_link *link;
359 	struct cg_cgroup_link *saved_link;
360 
361 	list_for_each_entry_safe(link, saved_link, tmp, cgrp_link_list) {
362 		list_del(&link->cgrp_link_list);
363 		kfree(link);
364 	}
365 }
366 
367 /*
368  * allocate_cg_links() allocates "count" cg_cgroup_link structures
369  * and chains them on tmp through their cgrp_link_list fields. Returns 0 on
370  * success or a negative error
371  */
allocate_cg_links(int count,struct list_head * tmp)372 static int allocate_cg_links(int count, struct list_head *tmp)
373 {
374 	struct cg_cgroup_link *link;
375 	int i;
376 	INIT_LIST_HEAD(tmp);
377 	for (i = 0; i < count; i++) {
378 		link = kmalloc(sizeof(*link), GFP_KERNEL);
379 		if (!link) {
380 			free_cg_links(tmp);
381 			return -ENOMEM;
382 		}
383 		list_add(&link->cgrp_link_list, tmp);
384 	}
385 	return 0;
386 }
387 
388 /**
389  * link_css_set - a helper function to link a css_set to a cgroup
390  * @tmp_cg_links: cg_cgroup_link objects allocated by allocate_cg_links()
391  * @cg: the css_set to be linked
392  * @cgrp: the destination cgroup
393  */
link_css_set(struct list_head * tmp_cg_links,struct css_set * cg,struct cgroup * cgrp)394 static void link_css_set(struct list_head *tmp_cg_links,
395 			 struct css_set *cg, struct cgroup *cgrp)
396 {
397 	struct cg_cgroup_link *link;
398 
399 	BUG_ON(list_empty(tmp_cg_links));
400 	link = list_first_entry(tmp_cg_links, struct cg_cgroup_link,
401 				cgrp_link_list);
402 	link->cg = cg;
403 	list_move(&link->cgrp_link_list, &cgrp->css_sets);
404 	list_add(&link->cg_link_list, &cg->cg_links);
405 }
406 
407 /*
408  * find_css_set() takes an existing cgroup group and a
409  * cgroup object, and returns a css_set object that's
410  * equivalent to the old group, but with the given cgroup
411  * substituted into the appropriate hierarchy. Must be called with
412  * cgroup_mutex held
413  */
find_css_set(struct css_set * oldcg,struct cgroup * cgrp)414 static struct css_set *find_css_set(
415 	struct css_set *oldcg, struct cgroup *cgrp)
416 {
417 	struct css_set *res;
418 	struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT];
419 	int i;
420 
421 	struct list_head tmp_cg_links;
422 
423 	struct hlist_head *hhead;
424 
425 	/* First see if we already have a cgroup group that matches
426 	 * the desired set */
427 	read_lock(&css_set_lock);
428 	res = find_existing_css_set(oldcg, cgrp, template);
429 	if (res)
430 		get_css_set(res);
431 	read_unlock(&css_set_lock);
432 
433 	if (res)
434 		return res;
435 
436 	res = kmalloc(sizeof(*res), GFP_KERNEL);
437 	if (!res)
438 		return NULL;
439 
440 	/* Allocate all the cg_cgroup_link objects that we'll need */
441 	if (allocate_cg_links(root_count, &tmp_cg_links) < 0) {
442 		kfree(res);
443 		return NULL;
444 	}
445 
446 	atomic_set(&res->refcount, 1);
447 	INIT_LIST_HEAD(&res->cg_links);
448 	INIT_LIST_HEAD(&res->tasks);
449 	INIT_HLIST_NODE(&res->hlist);
450 
451 	/* Copy the set of subsystem state objects generated in
452 	 * find_existing_css_set() */
453 	memcpy(res->subsys, template, sizeof(res->subsys));
454 
455 	write_lock(&css_set_lock);
456 	/* Add reference counts and links from the new css_set. */
457 	for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
458 		struct cgroup *cgrp = res->subsys[i]->cgroup;
459 		struct cgroup_subsys *ss = subsys[i];
460 		atomic_inc(&cgrp->count);
461 		/*
462 		 * We want to add a link once per cgroup, so we
463 		 * only do it for the first subsystem in each
464 		 * hierarchy
465 		 */
466 		if (ss->root->subsys_list.next == &ss->sibling)
467 			link_css_set(&tmp_cg_links, res, cgrp);
468 	}
469 	if (list_empty(&rootnode.subsys_list))
470 		link_css_set(&tmp_cg_links, res, dummytop);
471 
472 	BUG_ON(!list_empty(&tmp_cg_links));
473 
474 	css_set_count++;
475 
476 	/* Add this cgroup group to the hash table */
477 	hhead = css_set_hash(res->subsys);
478 	hlist_add_head(&res->hlist, hhead);
479 
480 	write_unlock(&css_set_lock);
481 
482 	return res;
483 }
484 
485 /*
486  * There is one global cgroup mutex. We also require taking
487  * task_lock() when dereferencing a task's cgroup subsys pointers.
488  * See "The task_lock() exception", at the end of this comment.
489  *
490  * A task must hold cgroup_mutex to modify cgroups.
491  *
492  * Any task can increment and decrement the count field without lock.
493  * So in general, code holding cgroup_mutex can't rely on the count
494  * field not changing.  However, if the count goes to zero, then only
495  * cgroup_attach_task() can increment it again.  Because a count of zero
496  * means that no tasks are currently attached, therefore there is no
497  * way a task attached to that cgroup can fork (the other way to
498  * increment the count).  So code holding cgroup_mutex can safely
499  * assume that if the count is zero, it will stay zero. Similarly, if
500  * a task holds cgroup_mutex on a cgroup with zero count, it
501  * knows that the cgroup won't be removed, as cgroup_rmdir()
502  * needs that mutex.
503  *
504  * The fork and exit callbacks cgroup_fork() and cgroup_exit(), don't
505  * (usually) take cgroup_mutex.  These are the two most performance
506  * critical pieces of code here.  The exception occurs on cgroup_exit(),
507  * when a task in a notify_on_release cgroup exits.  Then cgroup_mutex
508  * is taken, and if the cgroup count is zero, a usermode call made
509  * to the release agent with the name of the cgroup (path relative to
510  * the root of cgroup file system) as the argument.
511  *
512  * A cgroup can only be deleted if both its 'count' of using tasks
513  * is zero, and its list of 'children' cgroups is empty.  Since all
514  * tasks in the system use _some_ cgroup, and since there is always at
515  * least one task in the system (init, pid == 1), therefore, top_cgroup
516  * always has either children cgroups and/or using tasks.  So we don't
517  * need a special hack to ensure that top_cgroup cannot be deleted.
518  *
519  *	The task_lock() exception
520  *
521  * The need for this exception arises from the action of
522  * cgroup_attach_task(), which overwrites one tasks cgroup pointer with
523  * another.  It does so using cgroup_mutex, however there are
524  * several performance critical places that need to reference
525  * task->cgroup without the expense of grabbing a system global
526  * mutex.  Therefore except as noted below, when dereferencing or, as
527  * in cgroup_attach_task(), modifying a task'ss cgroup pointer we use
528  * task_lock(), which acts on a spinlock (task->alloc_lock) already in
529  * the task_struct routinely used for such matters.
530  *
531  * P.S.  One more locking exception.  RCU is used to guard the
532  * update of a tasks cgroup pointer by cgroup_attach_task()
533  */
534 
535 /**
536  * cgroup_lock - lock out any changes to cgroup structures
537  *
538  */
cgroup_lock(void)539 void cgroup_lock(void)
540 {
541 	mutex_lock(&cgroup_mutex);
542 }
543 
544 /**
545  * cgroup_unlock - release lock on cgroup changes
546  *
547  * Undo the lock taken in a previous cgroup_lock() call.
548  */
cgroup_unlock(void)549 void cgroup_unlock(void)
550 {
551 	mutex_unlock(&cgroup_mutex);
552 }
553 
554 /*
555  * A couple of forward declarations required, due to cyclic reference loop:
556  * cgroup_mkdir -> cgroup_create -> cgroup_populate_dir ->
557  * cgroup_add_file -> cgroup_create_file -> cgroup_dir_inode_operations
558  * -> cgroup_mkdir.
559  */
560 
561 static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode);
562 static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry);
563 static int cgroup_populate_dir(struct cgroup *cgrp);
564 static struct inode_operations cgroup_dir_inode_operations;
565 static struct file_operations proc_cgroupstats_operations;
566 
567 static struct backing_dev_info cgroup_backing_dev_info = {
568 	.capabilities	= BDI_CAP_NO_ACCT_AND_WRITEBACK,
569 };
570 
cgroup_new_inode(mode_t mode,struct super_block * sb)571 static struct inode *cgroup_new_inode(mode_t mode, struct super_block *sb)
572 {
573 	struct inode *inode = new_inode(sb);
574 
575 	if (inode) {
576 		inode->i_mode = mode;
577 		inode->i_uid = current_fsuid();
578 		inode->i_gid = current_fsgid();
579 		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
580 		inode->i_mapping->backing_dev_info = &cgroup_backing_dev_info;
581 	}
582 	return inode;
583 }
584 
585 /*
586  * Call subsys's pre_destroy handler.
587  * This is called before css refcnt check.
588  */
cgroup_call_pre_destroy(struct cgroup * cgrp)589 static void cgroup_call_pre_destroy(struct cgroup *cgrp)
590 {
591 	struct cgroup_subsys *ss;
592 	for_each_subsys(cgrp->root, ss)
593 		if (ss->pre_destroy)
594 			ss->pre_destroy(ss, cgrp);
595 	return;
596 }
597 
free_cgroup_rcu(struct rcu_head * obj)598 static void free_cgroup_rcu(struct rcu_head *obj)
599 {
600 	struct cgroup *cgrp = container_of(obj, struct cgroup, rcu_head);
601 
602 	kfree(cgrp);
603 }
604 
cgroup_diput(struct dentry * dentry,struct inode * inode)605 static void cgroup_diput(struct dentry *dentry, struct inode *inode)
606 {
607 	/* is dentry a directory ? if so, kfree() associated cgroup */
608 	if (S_ISDIR(inode->i_mode)) {
609 		struct cgroup *cgrp = dentry->d_fsdata;
610 		struct cgroup_subsys *ss;
611 		BUG_ON(!(cgroup_is_removed(cgrp)));
612 		/* It's possible for external users to be holding css
613 		 * reference counts on a cgroup; css_put() needs to
614 		 * be able to access the cgroup after decrementing
615 		 * the reference count in order to know if it needs to
616 		 * queue the cgroup to be handled by the release
617 		 * agent */
618 		synchronize_rcu();
619 
620 		mutex_lock(&cgroup_mutex);
621 		/*
622 		 * Release the subsystem state objects.
623 		 */
624 		for_each_subsys(cgrp->root, ss)
625 			ss->destroy(ss, cgrp);
626 
627 		cgrp->root->number_of_cgroups--;
628 		mutex_unlock(&cgroup_mutex);
629 
630 		/*
631 		 * Drop the active superblock reference that we took when we
632 		 * created the cgroup
633 		 */
634 		deactivate_super(cgrp->root->sb);
635 
636 		call_rcu(&cgrp->rcu_head, free_cgroup_rcu);
637 	}
638 	iput(inode);
639 }
640 
remove_dir(struct dentry * d)641 static void remove_dir(struct dentry *d)
642 {
643 	struct dentry *parent = dget(d->d_parent);
644 
645 	d_delete(d);
646 	simple_rmdir(parent->d_inode, d);
647 	dput(parent);
648 }
649 
cgroup_clear_directory(struct dentry * dentry)650 static void cgroup_clear_directory(struct dentry *dentry)
651 {
652 	struct list_head *node;
653 
654 	BUG_ON(!mutex_is_locked(&dentry->d_inode->i_mutex));
655 	spin_lock(&dcache_lock);
656 	node = dentry->d_subdirs.next;
657 	while (node != &dentry->d_subdirs) {
658 		struct dentry *d = list_entry(node, struct dentry, d_u.d_child);
659 		list_del_init(node);
660 		if (d->d_inode) {
661 			/* This should never be called on a cgroup
662 			 * directory with child cgroups */
663 			BUG_ON(d->d_inode->i_mode & S_IFDIR);
664 			d = dget_locked(d);
665 			spin_unlock(&dcache_lock);
666 			d_delete(d);
667 			simple_unlink(dentry->d_inode, d);
668 			dput(d);
669 			spin_lock(&dcache_lock);
670 		}
671 		node = dentry->d_subdirs.next;
672 	}
673 	spin_unlock(&dcache_lock);
674 }
675 
676 /*
677  * NOTE : the dentry must have been dget()'ed
678  */
cgroup_d_remove_dir(struct dentry * dentry)679 static void cgroup_d_remove_dir(struct dentry *dentry)
680 {
681 	cgroup_clear_directory(dentry);
682 
683 	spin_lock(&dcache_lock);
684 	list_del_init(&dentry->d_u.d_child);
685 	spin_unlock(&dcache_lock);
686 	remove_dir(dentry);
687 }
688 
rebind_subsystems(struct cgroupfs_root * root,unsigned long final_bits)689 static int rebind_subsystems(struct cgroupfs_root *root,
690 			      unsigned long final_bits)
691 {
692 	unsigned long added_bits, removed_bits;
693 	struct cgroup *cgrp = &root->top_cgroup;
694 	int i;
695 
696 	removed_bits = root->actual_subsys_bits & ~final_bits;
697 	added_bits = final_bits & ~root->actual_subsys_bits;
698 	/* Check that any added subsystems are currently free */
699 	for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
700 		unsigned long bit = 1UL << i;
701 		struct cgroup_subsys *ss = subsys[i];
702 		if (!(bit & added_bits))
703 			continue;
704 		if (ss->root != &rootnode) {
705 			/* Subsystem isn't free */
706 			return -EBUSY;
707 		}
708 	}
709 
710 	/* Currently we don't handle adding/removing subsystems when
711 	 * any child cgroups exist. This is theoretically supportable
712 	 * but involves complex error handling, so it's being left until
713 	 * later */
714 	if (root->number_of_cgroups > 1)
715 		return -EBUSY;
716 
717 	/* Process each subsystem */
718 	for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
719 		struct cgroup_subsys *ss = subsys[i];
720 		unsigned long bit = 1UL << i;
721 		if (bit & added_bits) {
722 			/* We're binding this subsystem to this hierarchy */
723 			BUG_ON(cgrp->subsys[i]);
724 			BUG_ON(!dummytop->subsys[i]);
725 			BUG_ON(dummytop->subsys[i]->cgroup != dummytop);
726 			mutex_lock(&ss->hierarchy_mutex);
727 			cgrp->subsys[i] = dummytop->subsys[i];
728 			cgrp->subsys[i]->cgroup = cgrp;
729 			list_move(&ss->sibling, &root->subsys_list);
730 			ss->root = root;
731 			if (ss->bind)
732 				ss->bind(ss, cgrp);
733 			mutex_unlock(&ss->hierarchy_mutex);
734 		} else if (bit & removed_bits) {
735 			/* We're removing this subsystem */
736 			BUG_ON(cgrp->subsys[i] != dummytop->subsys[i]);
737 			BUG_ON(cgrp->subsys[i]->cgroup != cgrp);
738 			mutex_lock(&ss->hierarchy_mutex);
739 			if (ss->bind)
740 				ss->bind(ss, dummytop);
741 			dummytop->subsys[i]->cgroup = dummytop;
742 			cgrp->subsys[i] = NULL;
743 			subsys[i]->root = &rootnode;
744 			list_move(&ss->sibling, &rootnode.subsys_list);
745 			mutex_unlock(&ss->hierarchy_mutex);
746 		} else if (bit & final_bits) {
747 			/* Subsystem state should already exist */
748 			BUG_ON(!cgrp->subsys[i]);
749 		} else {
750 			/* Subsystem state shouldn't exist */
751 			BUG_ON(cgrp->subsys[i]);
752 		}
753 	}
754 	root->subsys_bits = root->actual_subsys_bits = final_bits;
755 	synchronize_rcu();
756 
757 	return 0;
758 }
759 
cgroup_show_options(struct seq_file * seq,struct vfsmount * vfs)760 static int cgroup_show_options(struct seq_file *seq, struct vfsmount *vfs)
761 {
762 	struct cgroupfs_root *root = vfs->mnt_sb->s_fs_info;
763 	struct cgroup_subsys *ss;
764 
765 	mutex_lock(&cgroup_mutex);
766 	for_each_subsys(root, ss)
767 		seq_printf(seq, ",%s", ss->name);
768 	if (test_bit(ROOT_NOPREFIX, &root->flags))
769 		seq_puts(seq, ",noprefix");
770 	if (strlen(root->release_agent_path))
771 		seq_printf(seq, ",release_agent=%s", root->release_agent_path);
772 	mutex_unlock(&cgroup_mutex);
773 	return 0;
774 }
775 
776 struct cgroup_sb_opts {
777 	unsigned long subsys_bits;
778 	unsigned long flags;
779 	char *release_agent;
780 };
781 
782 /* Convert a hierarchy specifier into a bitmask of subsystems and
783  * flags. */
parse_cgroupfs_options(char * data,struct cgroup_sb_opts * opts)784 static int parse_cgroupfs_options(char *data,
785 				     struct cgroup_sb_opts *opts)
786 {
787 	char *token, *o = data ?: "all";
788 
789 	opts->subsys_bits = 0;
790 	opts->flags = 0;
791 	opts->release_agent = NULL;
792 
793 	while ((token = strsep(&o, ",")) != NULL) {
794 		if (!*token)
795 			return -EINVAL;
796 		if (!strcmp(token, "all")) {
797 			/* Add all non-disabled subsystems */
798 			int i;
799 			opts->subsys_bits = 0;
800 			for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
801 				struct cgroup_subsys *ss = subsys[i];
802 				if (!ss->disabled)
803 					opts->subsys_bits |= 1ul << i;
804 			}
805 		} else if (!strcmp(token, "noprefix")) {
806 			set_bit(ROOT_NOPREFIX, &opts->flags);
807 		} else if (!strncmp(token, "release_agent=", 14)) {
808 			/* Specifying two release agents is forbidden */
809 			if (opts->release_agent)
810 				return -EINVAL;
811 			opts->release_agent = kzalloc(PATH_MAX, GFP_KERNEL);
812 			if (!opts->release_agent)
813 				return -ENOMEM;
814 			strncpy(opts->release_agent, token + 14, PATH_MAX - 1);
815 			opts->release_agent[PATH_MAX - 1] = 0;
816 		} else {
817 			struct cgroup_subsys *ss;
818 			int i;
819 			for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
820 				ss = subsys[i];
821 				if (!strcmp(token, ss->name)) {
822 					if (!ss->disabled)
823 						set_bit(i, &opts->subsys_bits);
824 					break;
825 				}
826 			}
827 			if (i == CGROUP_SUBSYS_COUNT)
828 				return -ENOENT;
829 		}
830 	}
831 
832 	/* We can't have an empty hierarchy */
833 	if (!opts->subsys_bits)
834 		return -EINVAL;
835 
836 	return 0;
837 }
838 
cgroup_remount(struct super_block * sb,int * flags,char * data)839 static int cgroup_remount(struct super_block *sb, int *flags, char *data)
840 {
841 	int ret = 0;
842 	struct cgroupfs_root *root = sb->s_fs_info;
843 	struct cgroup *cgrp = &root->top_cgroup;
844 	struct cgroup_sb_opts opts;
845 
846 	mutex_lock(&cgrp->dentry->d_inode->i_mutex);
847 	mutex_lock(&cgroup_mutex);
848 
849 	/* See what subsystems are wanted */
850 	ret = parse_cgroupfs_options(data, &opts);
851 	if (ret)
852 		goto out_unlock;
853 
854 	/* Don't allow flags to change at remount */
855 	if (opts.flags != root->flags) {
856 		ret = -EINVAL;
857 		goto out_unlock;
858 	}
859 
860 	ret = rebind_subsystems(root, opts.subsys_bits);
861 
862 	/* (re)populate subsystem files */
863 	if (!ret)
864 		cgroup_populate_dir(cgrp);
865 
866 	if (opts.release_agent)
867 		strcpy(root->release_agent_path, opts.release_agent);
868  out_unlock:
869 	if (opts.release_agent)
870 		kfree(opts.release_agent);
871 	mutex_unlock(&cgroup_mutex);
872 	mutex_unlock(&cgrp->dentry->d_inode->i_mutex);
873 	return ret;
874 }
875 
876 static struct super_operations cgroup_ops = {
877 	.statfs = simple_statfs,
878 	.drop_inode = generic_delete_inode,
879 	.show_options = cgroup_show_options,
880 	.remount_fs = cgroup_remount,
881 };
882 
init_cgroup_housekeeping(struct cgroup * cgrp)883 static void init_cgroup_housekeeping(struct cgroup *cgrp)
884 {
885 	INIT_LIST_HEAD(&cgrp->sibling);
886 	INIT_LIST_HEAD(&cgrp->children);
887 	INIT_LIST_HEAD(&cgrp->css_sets);
888 	INIT_LIST_HEAD(&cgrp->release_list);
889 	init_rwsem(&cgrp->pids_mutex);
890 }
init_cgroup_root(struct cgroupfs_root * root)891 static void init_cgroup_root(struct cgroupfs_root *root)
892 {
893 	struct cgroup *cgrp = &root->top_cgroup;
894 	INIT_LIST_HEAD(&root->subsys_list);
895 	INIT_LIST_HEAD(&root->root_list);
896 	root->number_of_cgroups = 1;
897 	cgrp->root = root;
898 	cgrp->top_cgroup = cgrp;
899 	init_cgroup_housekeeping(cgrp);
900 }
901 
cgroup_test_super(struct super_block * sb,void * data)902 static int cgroup_test_super(struct super_block *sb, void *data)
903 {
904 	struct cgroupfs_root *new = data;
905 	struct cgroupfs_root *root = sb->s_fs_info;
906 
907 	/* First check subsystems */
908 	if (new->subsys_bits != root->subsys_bits)
909 	    return 0;
910 
911 	/* Next check flags */
912 	if (new->flags != root->flags)
913 		return 0;
914 
915 	return 1;
916 }
917 
cgroup_set_super(struct super_block * sb,void * data)918 static int cgroup_set_super(struct super_block *sb, void *data)
919 {
920 	int ret;
921 	struct cgroupfs_root *root = data;
922 
923 	ret = set_anon_super(sb, NULL);
924 	if (ret)
925 		return ret;
926 
927 	sb->s_fs_info = root;
928 	root->sb = sb;
929 
930 	sb->s_blocksize = PAGE_CACHE_SIZE;
931 	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
932 	sb->s_magic = CGROUP_SUPER_MAGIC;
933 	sb->s_op = &cgroup_ops;
934 
935 	return 0;
936 }
937 
cgroup_get_rootdir(struct super_block * sb)938 static int cgroup_get_rootdir(struct super_block *sb)
939 {
940 	struct inode *inode =
941 		cgroup_new_inode(S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR, sb);
942 	struct dentry *dentry;
943 
944 	if (!inode)
945 		return -ENOMEM;
946 
947 	inode->i_fop = &simple_dir_operations;
948 	inode->i_op = &cgroup_dir_inode_operations;
949 	/* directories start off with i_nlink == 2 (for "." entry) */
950 	inc_nlink(inode);
951 	dentry = d_alloc_root(inode);
952 	if (!dentry) {
953 		iput(inode);
954 		return -ENOMEM;
955 	}
956 	sb->s_root = dentry;
957 	return 0;
958 }
959 
cgroup_get_sb(struct file_system_type * fs_type,int flags,const char * unused_dev_name,void * data,struct vfsmount * mnt)960 static int cgroup_get_sb(struct file_system_type *fs_type,
961 			 int flags, const char *unused_dev_name,
962 			 void *data, struct vfsmount *mnt)
963 {
964 	struct cgroup_sb_opts opts;
965 	int ret = 0;
966 	struct super_block *sb;
967 	struct cgroupfs_root *root;
968 	struct list_head tmp_cg_links;
969 
970 	/* First find the desired set of subsystems */
971 	ret = parse_cgroupfs_options(data, &opts);
972 	if (ret) {
973 		if (opts.release_agent)
974 			kfree(opts.release_agent);
975 		return ret;
976 	}
977 
978 	root = kzalloc(sizeof(*root), GFP_KERNEL);
979 	if (!root) {
980 		if (opts.release_agent)
981 			kfree(opts.release_agent);
982 		return -ENOMEM;
983 	}
984 
985 	init_cgroup_root(root);
986 	root->subsys_bits = opts.subsys_bits;
987 	root->flags = opts.flags;
988 	if (opts.release_agent) {
989 		strcpy(root->release_agent_path, opts.release_agent);
990 		kfree(opts.release_agent);
991 	}
992 
993 	sb = sget(fs_type, cgroup_test_super, cgroup_set_super, root);
994 
995 	if (IS_ERR(sb)) {
996 		kfree(root);
997 		return PTR_ERR(sb);
998 	}
999 
1000 	if (sb->s_fs_info != root) {
1001 		/* Reusing an existing superblock */
1002 		BUG_ON(sb->s_root == NULL);
1003 		kfree(root);
1004 		root = NULL;
1005 	} else {
1006 		/* New superblock */
1007 		struct cgroup *root_cgrp = &root->top_cgroup;
1008 		struct inode *inode;
1009 		int i;
1010 
1011 		BUG_ON(sb->s_root != NULL);
1012 
1013 		ret = cgroup_get_rootdir(sb);
1014 		if (ret)
1015 			goto drop_new_super;
1016 		inode = sb->s_root->d_inode;
1017 
1018 		mutex_lock(&inode->i_mutex);
1019 		mutex_lock(&cgroup_mutex);
1020 
1021 		/*
1022 		 * We're accessing css_set_count without locking
1023 		 * css_set_lock here, but that's OK - it can only be
1024 		 * increased by someone holding cgroup_lock, and
1025 		 * that's us. The worst that can happen is that we
1026 		 * have some link structures left over
1027 		 */
1028 		ret = allocate_cg_links(css_set_count, &tmp_cg_links);
1029 		if (ret) {
1030 			mutex_unlock(&cgroup_mutex);
1031 			mutex_unlock(&inode->i_mutex);
1032 			goto drop_new_super;
1033 		}
1034 
1035 		ret = rebind_subsystems(root, root->subsys_bits);
1036 		if (ret == -EBUSY) {
1037 			mutex_unlock(&cgroup_mutex);
1038 			mutex_unlock(&inode->i_mutex);
1039 			goto free_cg_links;
1040 		}
1041 
1042 		/* EBUSY should be the only error here */
1043 		BUG_ON(ret);
1044 
1045 		list_add(&root->root_list, &roots);
1046 		root_count++;
1047 
1048 		sb->s_root->d_fsdata = root_cgrp;
1049 		root->top_cgroup.dentry = sb->s_root;
1050 
1051 		/* Link the top cgroup in this hierarchy into all
1052 		 * the css_set objects */
1053 		write_lock(&css_set_lock);
1054 		for (i = 0; i < CSS_SET_TABLE_SIZE; i++) {
1055 			struct hlist_head *hhead = &css_set_table[i];
1056 			struct hlist_node *node;
1057 			struct css_set *cg;
1058 
1059 			hlist_for_each_entry(cg, node, hhead, hlist)
1060 				link_css_set(&tmp_cg_links, cg, root_cgrp);
1061 		}
1062 		write_unlock(&css_set_lock);
1063 
1064 		free_cg_links(&tmp_cg_links);
1065 
1066 		BUG_ON(!list_empty(&root_cgrp->sibling));
1067 		BUG_ON(!list_empty(&root_cgrp->children));
1068 		BUG_ON(root->number_of_cgroups != 1);
1069 
1070 		cgroup_populate_dir(root_cgrp);
1071 		mutex_unlock(&inode->i_mutex);
1072 		mutex_unlock(&cgroup_mutex);
1073 	}
1074 
1075 	return simple_set_mnt(mnt, sb);
1076 
1077  free_cg_links:
1078 	free_cg_links(&tmp_cg_links);
1079  drop_new_super:
1080 	up_write(&sb->s_umount);
1081 	deactivate_super(sb);
1082 	return ret;
1083 }
1084 
cgroup_kill_sb(struct super_block * sb)1085 static void cgroup_kill_sb(struct super_block *sb) {
1086 	struct cgroupfs_root *root = sb->s_fs_info;
1087 	struct cgroup *cgrp = &root->top_cgroup;
1088 	int ret;
1089 	struct cg_cgroup_link *link;
1090 	struct cg_cgroup_link *saved_link;
1091 
1092 	BUG_ON(!root);
1093 
1094 	BUG_ON(root->number_of_cgroups != 1);
1095 	BUG_ON(!list_empty(&cgrp->children));
1096 	BUG_ON(!list_empty(&cgrp->sibling));
1097 
1098 	mutex_lock(&cgroup_mutex);
1099 
1100 	/* Rebind all subsystems back to the default hierarchy */
1101 	ret = rebind_subsystems(root, 0);
1102 	/* Shouldn't be able to fail ... */
1103 	BUG_ON(ret);
1104 
1105 	/*
1106 	 * Release all the links from css_sets to this hierarchy's
1107 	 * root cgroup
1108 	 */
1109 	write_lock(&css_set_lock);
1110 
1111 	list_for_each_entry_safe(link, saved_link, &cgrp->css_sets,
1112 				 cgrp_link_list) {
1113 		list_del(&link->cg_link_list);
1114 		list_del(&link->cgrp_link_list);
1115 		kfree(link);
1116 	}
1117 	write_unlock(&css_set_lock);
1118 
1119 	if (!list_empty(&root->root_list)) {
1120 		list_del(&root->root_list);
1121 		root_count--;
1122 	}
1123 
1124 	mutex_unlock(&cgroup_mutex);
1125 
1126 	kill_litter_super(sb);
1127 	kfree(root);
1128 }
1129 
1130 static struct file_system_type cgroup_fs_type = {
1131 	.name = "cgroup",
1132 	.get_sb = cgroup_get_sb,
1133 	.kill_sb = cgroup_kill_sb,
1134 };
1135 
__d_cgrp(struct dentry * dentry)1136 static inline struct cgroup *__d_cgrp(struct dentry *dentry)
1137 {
1138 	return dentry->d_fsdata;
1139 }
1140 
__d_cft(struct dentry * dentry)1141 static inline struct cftype *__d_cft(struct dentry *dentry)
1142 {
1143 	return dentry->d_fsdata;
1144 }
1145 
1146 /**
1147  * cgroup_path - generate the path of a cgroup
1148  * @cgrp: the cgroup in question
1149  * @buf: the buffer to write the path into
1150  * @buflen: the length of the buffer
1151  *
1152  * Called with cgroup_mutex held or else with an RCU-protected cgroup
1153  * reference.  Writes path of cgroup into buf.  Returns 0 on success,
1154  * -errno on error.
1155  */
cgroup_path(const struct cgroup * cgrp,char * buf,int buflen)1156 int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen)
1157 {
1158 	char *start;
1159 	struct dentry *dentry = rcu_dereference(cgrp->dentry);
1160 
1161 	if (!dentry || cgrp == dummytop) {
1162 		/*
1163 		 * Inactive subsystems have no dentry for their root
1164 		 * cgroup
1165 		 */
1166 		strcpy(buf, "/");
1167 		return 0;
1168 	}
1169 
1170 	start = buf + buflen;
1171 
1172 	*--start = '\0';
1173 	for (;;) {
1174 		int len = dentry->d_name.len;
1175 		if ((start -= len) < buf)
1176 			return -ENAMETOOLONG;
1177 		memcpy(start, cgrp->dentry->d_name.name, len);
1178 		cgrp = cgrp->parent;
1179 		if (!cgrp)
1180 			break;
1181 		dentry = rcu_dereference(cgrp->dentry);
1182 		if (!cgrp->parent)
1183 			continue;
1184 		if (--start < buf)
1185 			return -ENAMETOOLONG;
1186 		*start = '/';
1187 	}
1188 	memmove(buf, start, buf + buflen - start);
1189 	return 0;
1190 }
1191 
1192 /*
1193  * Return the first subsystem attached to a cgroup's hierarchy, and
1194  * its subsystem id.
1195  */
1196 
get_first_subsys(const struct cgroup * cgrp,struct cgroup_subsys_state ** css,int * subsys_id)1197 static void get_first_subsys(const struct cgroup *cgrp,
1198 			struct cgroup_subsys_state **css, int *subsys_id)
1199 {
1200 	const struct cgroupfs_root *root = cgrp->root;
1201 	const struct cgroup_subsys *test_ss;
1202 	BUG_ON(list_empty(&root->subsys_list));
1203 	test_ss = list_entry(root->subsys_list.next,
1204 			     struct cgroup_subsys, sibling);
1205 	if (css) {
1206 		*css = cgrp->subsys[test_ss->subsys_id];
1207 		BUG_ON(!*css);
1208 	}
1209 	if (subsys_id)
1210 		*subsys_id = test_ss->subsys_id;
1211 }
1212 
1213 /**
1214  * cgroup_attach_task - attach task 'tsk' to cgroup 'cgrp'
1215  * @cgrp: the cgroup the task is attaching to
1216  * @tsk: the task to be attached
1217  *
1218  * Call holding cgroup_mutex. May take task_lock of
1219  * the task 'tsk' during call.
1220  */
cgroup_attach_task(struct cgroup * cgrp,struct task_struct * tsk)1221 int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
1222 {
1223 	int retval = 0;
1224 	struct cgroup_subsys *ss;
1225 	struct cgroup *oldcgrp;
1226 	struct css_set *cg;
1227 	struct css_set *newcg;
1228 	struct cgroupfs_root *root = cgrp->root;
1229 	int subsys_id;
1230 
1231 	get_first_subsys(cgrp, NULL, &subsys_id);
1232 
1233 	/* Nothing to do if the task is already in that cgroup */
1234 	oldcgrp = task_cgroup(tsk, subsys_id);
1235 	if (cgrp == oldcgrp)
1236 		return 0;
1237 
1238 	for_each_subsys(root, ss) {
1239 		if (ss->can_attach) {
1240 			retval = ss->can_attach(ss, cgrp, tsk);
1241 			if (retval)
1242 				return retval;
1243 		} else if (!capable(CAP_SYS_ADMIN)) {
1244 			const struct cred *cred = current_cred(), *tcred;
1245 
1246 			/* No can_attach() - check perms generically */
1247 			tcred = __task_cred(tsk);
1248 			if (cred->euid != tcred->uid &&
1249 			    cred->euid != tcred->suid) {
1250 				return -EACCES;
1251 			}
1252 		}
1253 	}
1254 
1255 	task_lock(tsk);
1256 	cg = tsk->cgroups;
1257 	get_css_set(cg);
1258 	task_unlock(tsk);
1259 	/*
1260 	 * Locate or allocate a new css_set for this task,
1261 	 * based on its final set of cgroups
1262 	 */
1263 	newcg = find_css_set(cg, cgrp);
1264 	put_css_set(cg);
1265 	if (!newcg)
1266 		return -ENOMEM;
1267 
1268 	task_lock(tsk);
1269 	if (tsk->flags & PF_EXITING) {
1270 		task_unlock(tsk);
1271 		put_css_set(newcg);
1272 		return -ESRCH;
1273 	}
1274 	rcu_assign_pointer(tsk->cgroups, newcg);
1275 	task_unlock(tsk);
1276 
1277 	/* Update the css_set linked lists if we're using them */
1278 	write_lock(&css_set_lock);
1279 	if (!list_empty(&tsk->cg_list)) {
1280 		list_del(&tsk->cg_list);
1281 		list_add(&tsk->cg_list, &newcg->tasks);
1282 	}
1283 	write_unlock(&css_set_lock);
1284 
1285 	for_each_subsys(root, ss) {
1286 		if (ss->attach)
1287 			ss->attach(ss, cgrp, oldcgrp, tsk);
1288 	}
1289 	set_bit(CGRP_RELEASABLE, &oldcgrp->flags);
1290 	synchronize_rcu();
1291 	put_css_set(cg);
1292 	return 0;
1293 }
1294 
1295 /*
1296  * Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex
1297  * held. May take task_lock of task
1298  */
attach_task_by_pid(struct cgroup * cgrp,u64 pid)1299 static int attach_task_by_pid(struct cgroup *cgrp, u64 pid)
1300 {
1301 	struct task_struct *tsk;
1302 	int ret;
1303 
1304 	if (pid) {
1305 		rcu_read_lock();
1306 		tsk = find_task_by_vpid(pid);
1307 		if (!tsk || tsk->flags & PF_EXITING) {
1308 			rcu_read_unlock();
1309 			return -ESRCH;
1310 		}
1311 		get_task_struct(tsk);
1312 		rcu_read_unlock();
1313 	} else {
1314 		tsk = current;
1315 		get_task_struct(tsk);
1316 	}
1317 
1318 	ret = cgroup_attach_task(cgrp, tsk);
1319 	put_task_struct(tsk);
1320 	return ret;
1321 }
1322 
cgroup_tasks_write(struct cgroup * cgrp,struct cftype * cft,u64 pid)1323 static int cgroup_tasks_write(struct cgroup *cgrp, struct cftype *cft, u64 pid)
1324 {
1325 	int ret;
1326 	if (!cgroup_lock_live_group(cgrp))
1327 		return -ENODEV;
1328 	ret = attach_task_by_pid(cgrp, pid);
1329 	cgroup_unlock();
1330 	return ret;
1331 }
1332 
1333 /* The various types of files and directories in a cgroup file system */
1334 enum cgroup_filetype {
1335 	FILE_ROOT,
1336 	FILE_DIR,
1337 	FILE_TASKLIST,
1338 	FILE_NOTIFY_ON_RELEASE,
1339 	FILE_RELEASE_AGENT,
1340 };
1341 
1342 /**
1343  * cgroup_lock_live_group - take cgroup_mutex and check that cgrp is alive.
1344  * @cgrp: the cgroup to be checked for liveness
1345  *
1346  * On success, returns true; the lock should be later released with
1347  * cgroup_unlock(). On failure returns false with no lock held.
1348  */
cgroup_lock_live_group(struct cgroup * cgrp)1349 bool cgroup_lock_live_group(struct cgroup *cgrp)
1350 {
1351 	mutex_lock(&cgroup_mutex);
1352 	if (cgroup_is_removed(cgrp)) {
1353 		mutex_unlock(&cgroup_mutex);
1354 		return false;
1355 	}
1356 	return true;
1357 }
1358 
cgroup_release_agent_write(struct cgroup * cgrp,struct cftype * cft,const char * buffer)1359 static int cgroup_release_agent_write(struct cgroup *cgrp, struct cftype *cft,
1360 				      const char *buffer)
1361 {
1362 	BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
1363 	if (!cgroup_lock_live_group(cgrp))
1364 		return -ENODEV;
1365 	strcpy(cgrp->root->release_agent_path, buffer);
1366 	cgroup_unlock();
1367 	return 0;
1368 }
1369 
cgroup_release_agent_show(struct cgroup * cgrp,struct cftype * cft,struct seq_file * seq)1370 static int cgroup_release_agent_show(struct cgroup *cgrp, struct cftype *cft,
1371 				     struct seq_file *seq)
1372 {
1373 	if (!cgroup_lock_live_group(cgrp))
1374 		return -ENODEV;
1375 	seq_puts(seq, cgrp->root->release_agent_path);
1376 	seq_putc(seq, '\n');
1377 	cgroup_unlock();
1378 	return 0;
1379 }
1380 
1381 /* A buffer size big enough for numbers or short strings */
1382 #define CGROUP_LOCAL_BUFFER_SIZE 64
1383 
cgroup_write_X64(struct cgroup * cgrp,struct cftype * cft,struct file * file,const char __user * userbuf,size_t nbytes,loff_t * unused_ppos)1384 static ssize_t cgroup_write_X64(struct cgroup *cgrp, struct cftype *cft,
1385 				struct file *file,
1386 				const char __user *userbuf,
1387 				size_t nbytes, loff_t *unused_ppos)
1388 {
1389 	char buffer[CGROUP_LOCAL_BUFFER_SIZE];
1390 	int retval = 0;
1391 	char *end;
1392 
1393 	if (!nbytes)
1394 		return -EINVAL;
1395 	if (nbytes >= sizeof(buffer))
1396 		return -E2BIG;
1397 	if (copy_from_user(buffer, userbuf, nbytes))
1398 		return -EFAULT;
1399 
1400 	buffer[nbytes] = 0;     /* nul-terminate */
1401 	strstrip(buffer);
1402 	if (cft->write_u64) {
1403 		u64 val = simple_strtoull(buffer, &end, 0);
1404 		if (*end)
1405 			return -EINVAL;
1406 		retval = cft->write_u64(cgrp, cft, val);
1407 	} else {
1408 		s64 val = simple_strtoll(buffer, &end, 0);
1409 		if (*end)
1410 			return -EINVAL;
1411 		retval = cft->write_s64(cgrp, cft, val);
1412 	}
1413 	if (!retval)
1414 		retval = nbytes;
1415 	return retval;
1416 }
1417 
cgroup_write_string(struct cgroup * cgrp,struct cftype * cft,struct file * file,const char __user * userbuf,size_t nbytes,loff_t * unused_ppos)1418 static ssize_t cgroup_write_string(struct cgroup *cgrp, struct cftype *cft,
1419 				   struct file *file,
1420 				   const char __user *userbuf,
1421 				   size_t nbytes, loff_t *unused_ppos)
1422 {
1423 	char local_buffer[CGROUP_LOCAL_BUFFER_SIZE];
1424 	int retval = 0;
1425 	size_t max_bytes = cft->max_write_len;
1426 	char *buffer = local_buffer;
1427 
1428 	if (!max_bytes)
1429 		max_bytes = sizeof(local_buffer) - 1;
1430 	if (nbytes >= max_bytes)
1431 		return -E2BIG;
1432 	/* Allocate a dynamic buffer if we need one */
1433 	if (nbytes >= sizeof(local_buffer)) {
1434 		buffer = kmalloc(nbytes + 1, GFP_KERNEL);
1435 		if (buffer == NULL)
1436 			return -ENOMEM;
1437 	}
1438 	if (nbytes && copy_from_user(buffer, userbuf, nbytes)) {
1439 		retval = -EFAULT;
1440 		goto out;
1441 	}
1442 
1443 	buffer[nbytes] = 0;     /* nul-terminate */
1444 	strstrip(buffer);
1445 	retval = cft->write_string(cgrp, cft, buffer);
1446 	if (!retval)
1447 		retval = nbytes;
1448 out:
1449 	if (buffer != local_buffer)
1450 		kfree(buffer);
1451 	return retval;
1452 }
1453 
cgroup_file_write(struct file * file,const char __user * buf,size_t nbytes,loff_t * ppos)1454 static ssize_t cgroup_file_write(struct file *file, const char __user *buf,
1455 						size_t nbytes, loff_t *ppos)
1456 {
1457 	struct cftype *cft = __d_cft(file->f_dentry);
1458 	struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
1459 
1460 	if (cgroup_is_removed(cgrp))
1461 		return -ENODEV;
1462 	if (cft->write)
1463 		return cft->write(cgrp, cft, file, buf, nbytes, ppos);
1464 	if (cft->write_u64 || cft->write_s64)
1465 		return cgroup_write_X64(cgrp, cft, file, buf, nbytes, ppos);
1466 	if (cft->write_string)
1467 		return cgroup_write_string(cgrp, cft, file, buf, nbytes, ppos);
1468 	if (cft->trigger) {
1469 		int ret = cft->trigger(cgrp, (unsigned int)cft->private);
1470 		return ret ? ret : nbytes;
1471 	}
1472 	return -EINVAL;
1473 }
1474 
cgroup_read_u64(struct cgroup * cgrp,struct cftype * cft,struct file * file,char __user * buf,size_t nbytes,loff_t * ppos)1475 static ssize_t cgroup_read_u64(struct cgroup *cgrp, struct cftype *cft,
1476 			       struct file *file,
1477 			       char __user *buf, size_t nbytes,
1478 			       loff_t *ppos)
1479 {
1480 	char tmp[CGROUP_LOCAL_BUFFER_SIZE];
1481 	u64 val = cft->read_u64(cgrp, cft);
1482 	int len = sprintf(tmp, "%llu\n", (unsigned long long) val);
1483 
1484 	return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
1485 }
1486 
cgroup_read_s64(struct cgroup * cgrp,struct cftype * cft,struct file * file,char __user * buf,size_t nbytes,loff_t * ppos)1487 static ssize_t cgroup_read_s64(struct cgroup *cgrp, struct cftype *cft,
1488 			       struct file *file,
1489 			       char __user *buf, size_t nbytes,
1490 			       loff_t *ppos)
1491 {
1492 	char tmp[CGROUP_LOCAL_BUFFER_SIZE];
1493 	s64 val = cft->read_s64(cgrp, cft);
1494 	int len = sprintf(tmp, "%lld\n", (long long) val);
1495 
1496 	return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
1497 }
1498 
cgroup_file_read(struct file * file,char __user * buf,size_t nbytes,loff_t * ppos)1499 static ssize_t cgroup_file_read(struct file *file, char __user *buf,
1500 				   size_t nbytes, loff_t *ppos)
1501 {
1502 	struct cftype *cft = __d_cft(file->f_dentry);
1503 	struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
1504 
1505 	if (cgroup_is_removed(cgrp))
1506 		return -ENODEV;
1507 
1508 	if (cft->read)
1509 		return cft->read(cgrp, cft, file, buf, nbytes, ppos);
1510 	if (cft->read_u64)
1511 		return cgroup_read_u64(cgrp, cft, file, buf, nbytes, ppos);
1512 	if (cft->read_s64)
1513 		return cgroup_read_s64(cgrp, cft, file, buf, nbytes, ppos);
1514 	return -EINVAL;
1515 }
1516 
1517 /*
1518  * seqfile ops/methods for returning structured data. Currently just
1519  * supports string->u64 maps, but can be extended in future.
1520  */
1521 
1522 struct cgroup_seqfile_state {
1523 	struct cftype *cft;
1524 	struct cgroup *cgroup;
1525 };
1526 
cgroup_map_add(struct cgroup_map_cb * cb,const char * key,u64 value)1527 static int cgroup_map_add(struct cgroup_map_cb *cb, const char *key, u64 value)
1528 {
1529 	struct seq_file *sf = cb->state;
1530 	return seq_printf(sf, "%s %llu\n", key, (unsigned long long)value);
1531 }
1532 
cgroup_seqfile_show(struct seq_file * m,void * arg)1533 static int cgroup_seqfile_show(struct seq_file *m, void *arg)
1534 {
1535 	struct cgroup_seqfile_state *state = m->private;
1536 	struct cftype *cft = state->cft;
1537 	if (cft->read_map) {
1538 		struct cgroup_map_cb cb = {
1539 			.fill = cgroup_map_add,
1540 			.state = m,
1541 		};
1542 		return cft->read_map(state->cgroup, cft, &cb);
1543 	}
1544 	return cft->read_seq_string(state->cgroup, cft, m);
1545 }
1546 
cgroup_seqfile_release(struct inode * inode,struct file * file)1547 static int cgroup_seqfile_release(struct inode *inode, struct file *file)
1548 {
1549 	struct seq_file *seq = file->private_data;
1550 	kfree(seq->private);
1551 	return single_release(inode, file);
1552 }
1553 
1554 static struct file_operations cgroup_seqfile_operations = {
1555 	.read = seq_read,
1556 	.write = cgroup_file_write,
1557 	.llseek = seq_lseek,
1558 	.release = cgroup_seqfile_release,
1559 };
1560 
cgroup_file_open(struct inode * inode,struct file * file)1561 static int cgroup_file_open(struct inode *inode, struct file *file)
1562 {
1563 	int err;
1564 	struct cftype *cft;
1565 
1566 	err = generic_file_open(inode, file);
1567 	if (err)
1568 		return err;
1569 	cft = __d_cft(file->f_dentry);
1570 
1571 	if (cft->read_map || cft->read_seq_string) {
1572 		struct cgroup_seqfile_state *state =
1573 			kzalloc(sizeof(*state), GFP_USER);
1574 		if (!state)
1575 			return -ENOMEM;
1576 		state->cft = cft;
1577 		state->cgroup = __d_cgrp(file->f_dentry->d_parent);
1578 		file->f_op = &cgroup_seqfile_operations;
1579 		err = single_open(file, cgroup_seqfile_show, state);
1580 		if (err < 0)
1581 			kfree(state);
1582 	} else if (cft->open)
1583 		err = cft->open(inode, file);
1584 	else
1585 		err = 0;
1586 
1587 	return err;
1588 }
1589 
cgroup_file_release(struct inode * inode,struct file * file)1590 static int cgroup_file_release(struct inode *inode, struct file *file)
1591 {
1592 	struct cftype *cft = __d_cft(file->f_dentry);
1593 	if (cft->release)
1594 		return cft->release(inode, file);
1595 	return 0;
1596 }
1597 
1598 /*
1599  * cgroup_rename - Only allow simple rename of directories in place.
1600  */
cgroup_rename(struct inode * old_dir,struct dentry * old_dentry,struct inode * new_dir,struct dentry * new_dentry)1601 static int cgroup_rename(struct inode *old_dir, struct dentry *old_dentry,
1602 			    struct inode *new_dir, struct dentry *new_dentry)
1603 {
1604 	if (!S_ISDIR(old_dentry->d_inode->i_mode))
1605 		return -ENOTDIR;
1606 	if (new_dentry->d_inode)
1607 		return -EEXIST;
1608 	if (old_dir != new_dir)
1609 		return -EIO;
1610 	return simple_rename(old_dir, old_dentry, new_dir, new_dentry);
1611 }
1612 
1613 static struct file_operations cgroup_file_operations = {
1614 	.read = cgroup_file_read,
1615 	.write = cgroup_file_write,
1616 	.llseek = generic_file_llseek,
1617 	.open = cgroup_file_open,
1618 	.release = cgroup_file_release,
1619 };
1620 
1621 static struct inode_operations cgroup_dir_inode_operations = {
1622 	.lookup = simple_lookup,
1623 	.mkdir = cgroup_mkdir,
1624 	.rmdir = cgroup_rmdir,
1625 	.rename = cgroup_rename,
1626 };
1627 
cgroup_create_file(struct dentry * dentry,int mode,struct super_block * sb)1628 static int cgroup_create_file(struct dentry *dentry, int mode,
1629 				struct super_block *sb)
1630 {
1631 	static struct dentry_operations cgroup_dops = {
1632 		.d_iput = cgroup_diput,
1633 	};
1634 
1635 	struct inode *inode;
1636 
1637 	if (!dentry)
1638 		return -ENOENT;
1639 	if (dentry->d_inode)
1640 		return -EEXIST;
1641 
1642 	inode = cgroup_new_inode(mode, sb);
1643 	if (!inode)
1644 		return -ENOMEM;
1645 
1646 	if (S_ISDIR(mode)) {
1647 		inode->i_op = &cgroup_dir_inode_operations;
1648 		inode->i_fop = &simple_dir_operations;
1649 
1650 		/* start off with i_nlink == 2 (for "." entry) */
1651 		inc_nlink(inode);
1652 
1653 		/* start with the directory inode held, so that we can
1654 		 * populate it without racing with another mkdir */
1655 		mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD);
1656 	} else if (S_ISREG(mode)) {
1657 		inode->i_size = 0;
1658 		inode->i_fop = &cgroup_file_operations;
1659 	}
1660 	dentry->d_op = &cgroup_dops;
1661 	d_instantiate(dentry, inode);
1662 	dget(dentry);	/* Extra count - pin the dentry in core */
1663 	return 0;
1664 }
1665 
1666 /*
1667  * cgroup_create_dir - create a directory for an object.
1668  * @cgrp: the cgroup we create the directory for. It must have a valid
1669  *        ->parent field. And we are going to fill its ->dentry field.
1670  * @dentry: dentry of the new cgroup
1671  * @mode: mode to set on new directory.
1672  */
cgroup_create_dir(struct cgroup * cgrp,struct dentry * dentry,int mode)1673 static int cgroup_create_dir(struct cgroup *cgrp, struct dentry *dentry,
1674 				int mode)
1675 {
1676 	struct dentry *parent;
1677 	int error = 0;
1678 
1679 	parent = cgrp->parent->dentry;
1680 	error = cgroup_create_file(dentry, S_IFDIR | mode, cgrp->root->sb);
1681 	if (!error) {
1682 		dentry->d_fsdata = cgrp;
1683 		inc_nlink(parent->d_inode);
1684 		rcu_assign_pointer(cgrp->dentry, dentry);
1685 		dget(dentry);
1686 	}
1687 	dput(dentry);
1688 
1689 	return error;
1690 }
1691 
cgroup_add_file(struct cgroup * cgrp,struct cgroup_subsys * subsys,const struct cftype * cft)1692 int cgroup_add_file(struct cgroup *cgrp,
1693 		       struct cgroup_subsys *subsys,
1694 		       const struct cftype *cft)
1695 {
1696 	struct dentry *dir = cgrp->dentry;
1697 	struct dentry *dentry;
1698 	int error;
1699 
1700 	char name[MAX_CGROUP_TYPE_NAMELEN + MAX_CFTYPE_NAME + 2] = { 0 };
1701 	if (subsys && !test_bit(ROOT_NOPREFIX, &cgrp->root->flags)) {
1702 		strcpy(name, subsys->name);
1703 		strcat(name, ".");
1704 	}
1705 	strcat(name, cft->name);
1706 	BUG_ON(!mutex_is_locked(&dir->d_inode->i_mutex));
1707 	dentry = lookup_one_len(name, dir, strlen(name));
1708 	if (!IS_ERR(dentry)) {
1709 		error = cgroup_create_file(dentry, 0644 | S_IFREG,
1710 						cgrp->root->sb);
1711 		if (!error)
1712 			dentry->d_fsdata = (void *)cft;
1713 		dput(dentry);
1714 	} else
1715 		error = PTR_ERR(dentry);
1716 	return error;
1717 }
1718 
cgroup_add_files(struct cgroup * cgrp,struct cgroup_subsys * subsys,const struct cftype cft[],int count)1719 int cgroup_add_files(struct cgroup *cgrp,
1720 			struct cgroup_subsys *subsys,
1721 			const struct cftype cft[],
1722 			int count)
1723 {
1724 	int i, err;
1725 	for (i = 0; i < count; i++) {
1726 		err = cgroup_add_file(cgrp, subsys, &cft[i]);
1727 		if (err)
1728 			return err;
1729 	}
1730 	return 0;
1731 }
1732 
1733 /**
1734  * cgroup_task_count - count the number of tasks in a cgroup.
1735  * @cgrp: the cgroup in question
1736  *
1737  * Return the number of tasks in the cgroup.
1738  */
cgroup_task_count(const struct cgroup * cgrp)1739 int cgroup_task_count(const struct cgroup *cgrp)
1740 {
1741 	int count = 0;
1742 	struct cg_cgroup_link *link;
1743 
1744 	read_lock(&css_set_lock);
1745 	list_for_each_entry(link, &cgrp->css_sets, cgrp_link_list) {
1746 		count += atomic_read(&link->cg->refcount);
1747 	}
1748 	read_unlock(&css_set_lock);
1749 	return count;
1750 }
1751 
1752 /*
1753  * Advance a list_head iterator.  The iterator should be positioned at
1754  * the start of a css_set
1755  */
cgroup_advance_iter(struct cgroup * cgrp,struct cgroup_iter * it)1756 static void cgroup_advance_iter(struct cgroup *cgrp,
1757 					  struct cgroup_iter *it)
1758 {
1759 	struct list_head *l = it->cg_link;
1760 	struct cg_cgroup_link *link;
1761 	struct css_set *cg;
1762 
1763 	/* Advance to the next non-empty css_set */
1764 	do {
1765 		l = l->next;
1766 		if (l == &cgrp->css_sets) {
1767 			it->cg_link = NULL;
1768 			return;
1769 		}
1770 		link = list_entry(l, struct cg_cgroup_link, cgrp_link_list);
1771 		cg = link->cg;
1772 	} while (list_empty(&cg->tasks));
1773 	it->cg_link = l;
1774 	it->task = cg->tasks.next;
1775 }
1776 
1777 /*
1778  * To reduce the fork() overhead for systems that are not actually
1779  * using their cgroups capability, we don't maintain the lists running
1780  * through each css_set to its tasks until we see the list actually
1781  * used - in other words after the first call to cgroup_iter_start().
1782  *
1783  * The tasklist_lock is not held here, as do_each_thread() and
1784  * while_each_thread() are protected by RCU.
1785  */
cgroup_enable_task_cg_lists(void)1786 static void cgroup_enable_task_cg_lists(void)
1787 {
1788 	struct task_struct *p, *g;
1789 	write_lock(&css_set_lock);
1790 	use_task_css_set_links = 1;
1791 	do_each_thread(g, p) {
1792 		task_lock(p);
1793 		/*
1794 		 * We should check if the process is exiting, otherwise
1795 		 * it will race with cgroup_exit() in that the list
1796 		 * entry won't be deleted though the process has exited.
1797 		 */
1798 		if (!(p->flags & PF_EXITING) && list_empty(&p->cg_list))
1799 			list_add(&p->cg_list, &p->cgroups->tasks);
1800 		task_unlock(p);
1801 	} while_each_thread(g, p);
1802 	write_unlock(&css_set_lock);
1803 }
1804 
cgroup_iter_start(struct cgroup * cgrp,struct cgroup_iter * it)1805 void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it)
1806 {
1807 	/*
1808 	 * The first time anyone tries to iterate across a cgroup,
1809 	 * we need to enable the list linking each css_set to its
1810 	 * tasks, and fix up all existing tasks.
1811 	 */
1812 	if (!use_task_css_set_links)
1813 		cgroup_enable_task_cg_lists();
1814 
1815 	read_lock(&css_set_lock);
1816 	it->cg_link = &cgrp->css_sets;
1817 	cgroup_advance_iter(cgrp, it);
1818 }
1819 
cgroup_iter_next(struct cgroup * cgrp,struct cgroup_iter * it)1820 struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
1821 					struct cgroup_iter *it)
1822 {
1823 	struct task_struct *res;
1824 	struct list_head *l = it->task;
1825 	struct cg_cgroup_link *link;
1826 
1827 	/* If the iterator cg is NULL, we have no tasks */
1828 	if (!it->cg_link)
1829 		return NULL;
1830 	res = list_entry(l, struct task_struct, cg_list);
1831 	/* Advance iterator to find next entry */
1832 	l = l->next;
1833 	link = list_entry(it->cg_link, struct cg_cgroup_link, cgrp_link_list);
1834 	if (l == &link->cg->tasks) {
1835 		/* We reached the end of this task list - move on to
1836 		 * the next cg_cgroup_link */
1837 		cgroup_advance_iter(cgrp, it);
1838 	} else {
1839 		it->task = l;
1840 	}
1841 	return res;
1842 }
1843 
cgroup_iter_end(struct cgroup * cgrp,struct cgroup_iter * it)1844 void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it)
1845 {
1846 	read_unlock(&css_set_lock);
1847 }
1848 
started_after_time(struct task_struct * t1,struct timespec * time,struct task_struct * t2)1849 static inline int started_after_time(struct task_struct *t1,
1850 				     struct timespec *time,
1851 				     struct task_struct *t2)
1852 {
1853 	int start_diff = timespec_compare(&t1->start_time, time);
1854 	if (start_diff > 0) {
1855 		return 1;
1856 	} else if (start_diff < 0) {
1857 		return 0;
1858 	} else {
1859 		/*
1860 		 * Arbitrarily, if two processes started at the same
1861 		 * time, we'll say that the lower pointer value
1862 		 * started first. Note that t2 may have exited by now
1863 		 * so this may not be a valid pointer any longer, but
1864 		 * that's fine - it still serves to distinguish
1865 		 * between two tasks started (effectively) simultaneously.
1866 		 */
1867 		return t1 > t2;
1868 	}
1869 }
1870 
1871 /*
1872  * This function is a callback from heap_insert() and is used to order
1873  * the heap.
1874  * In this case we order the heap in descending task start time.
1875  */
started_after(void * p1,void * p2)1876 static inline int started_after(void *p1, void *p2)
1877 {
1878 	struct task_struct *t1 = p1;
1879 	struct task_struct *t2 = p2;
1880 	return started_after_time(t1, &t2->start_time, t2);
1881 }
1882 
1883 /**
1884  * cgroup_scan_tasks - iterate though all the tasks in a cgroup
1885  * @scan: struct cgroup_scanner containing arguments for the scan
1886  *
1887  * Arguments include pointers to callback functions test_task() and
1888  * process_task().
1889  * Iterate through all the tasks in a cgroup, calling test_task() for each,
1890  * and if it returns true, call process_task() for it also.
1891  * The test_task pointer may be NULL, meaning always true (select all tasks).
1892  * Effectively duplicates cgroup_iter_{start,next,end}()
1893  * but does not lock css_set_lock for the call to process_task().
1894  * The struct cgroup_scanner may be embedded in any structure of the caller's
1895  * creation.
1896  * It is guaranteed that process_task() will act on every task that
1897  * is a member of the cgroup for the duration of this call. This
1898  * function may or may not call process_task() for tasks that exit
1899  * or move to a different cgroup during the call, or are forked or
1900  * move into the cgroup during the call.
1901  *
1902  * Note that test_task() may be called with locks held, and may in some
1903  * situations be called multiple times for the same task, so it should
1904  * be cheap.
1905  * If the heap pointer in the struct cgroup_scanner is non-NULL, a heap has been
1906  * pre-allocated and will be used for heap operations (and its "gt" member will
1907  * be overwritten), else a temporary heap will be used (allocation of which
1908  * may cause this function to fail).
1909  */
cgroup_scan_tasks(struct cgroup_scanner * scan)1910 int cgroup_scan_tasks(struct cgroup_scanner *scan)
1911 {
1912 	int retval, i;
1913 	struct cgroup_iter it;
1914 	struct task_struct *p, *dropped;
1915 	/* Never dereference latest_task, since it's not refcounted */
1916 	struct task_struct *latest_task = NULL;
1917 	struct ptr_heap tmp_heap;
1918 	struct ptr_heap *heap;
1919 	struct timespec latest_time = { 0, 0 };
1920 
1921 	if (scan->heap) {
1922 		/* The caller supplied our heap and pre-allocated its memory */
1923 		heap = scan->heap;
1924 		heap->gt = &started_after;
1925 	} else {
1926 		/* We need to allocate our own heap memory */
1927 		heap = &tmp_heap;
1928 		retval = heap_init(heap, PAGE_SIZE, GFP_KERNEL, &started_after);
1929 		if (retval)
1930 			/* cannot allocate the heap */
1931 			return retval;
1932 	}
1933 
1934  again:
1935 	/*
1936 	 * Scan tasks in the cgroup, using the scanner's "test_task" callback
1937 	 * to determine which are of interest, and using the scanner's
1938 	 * "process_task" callback to process any of them that need an update.
1939 	 * Since we don't want to hold any locks during the task updates,
1940 	 * gather tasks to be processed in a heap structure.
1941 	 * The heap is sorted by descending task start time.
1942 	 * If the statically-sized heap fills up, we overflow tasks that
1943 	 * started later, and in future iterations only consider tasks that
1944 	 * started after the latest task in the previous pass. This
1945 	 * guarantees forward progress and that we don't miss any tasks.
1946 	 */
1947 	heap->size = 0;
1948 	cgroup_iter_start(scan->cg, &it);
1949 	while ((p = cgroup_iter_next(scan->cg, &it))) {
1950 		/*
1951 		 * Only affect tasks that qualify per the caller's callback,
1952 		 * if he provided one
1953 		 */
1954 		if (scan->test_task && !scan->test_task(p, scan))
1955 			continue;
1956 		/*
1957 		 * Only process tasks that started after the last task
1958 		 * we processed
1959 		 */
1960 		if (!started_after_time(p, &latest_time, latest_task))
1961 			continue;
1962 		dropped = heap_insert(heap, p);
1963 		if (dropped == NULL) {
1964 			/*
1965 			 * The new task was inserted; the heap wasn't
1966 			 * previously full
1967 			 */
1968 			get_task_struct(p);
1969 		} else if (dropped != p) {
1970 			/*
1971 			 * The new task was inserted, and pushed out a
1972 			 * different task
1973 			 */
1974 			get_task_struct(p);
1975 			put_task_struct(dropped);
1976 		}
1977 		/*
1978 		 * Else the new task was newer than anything already in
1979 		 * the heap and wasn't inserted
1980 		 */
1981 	}
1982 	cgroup_iter_end(scan->cg, &it);
1983 
1984 	if (heap->size) {
1985 		for (i = 0; i < heap->size; i++) {
1986 			struct task_struct *q = heap->ptrs[i];
1987 			if (i == 0) {
1988 				latest_time = q->start_time;
1989 				latest_task = q;
1990 			}
1991 			/* Process the task per the caller's callback */
1992 			scan->process_task(q, scan);
1993 			put_task_struct(q);
1994 		}
1995 		/*
1996 		 * If we had to process any tasks at all, scan again
1997 		 * in case some of them were in the middle of forking
1998 		 * children that didn't get processed.
1999 		 * Not the most efficient way to do it, but it avoids
2000 		 * having to take callback_mutex in the fork path
2001 		 */
2002 		goto again;
2003 	}
2004 	if (heap == &tmp_heap)
2005 		heap_free(&tmp_heap);
2006 	return 0;
2007 }
2008 
2009 /*
2010  * Stuff for reading the 'tasks' file.
2011  *
2012  * Reading this file can return large amounts of data if a cgroup has
2013  * *lots* of attached tasks. So it may need several calls to read(),
2014  * but we cannot guarantee that the information we produce is correct
2015  * unless we produce it entirely atomically.
2016  *
2017  */
2018 
2019 /*
2020  * Load into 'pidarray' up to 'npids' of the tasks using cgroup
2021  * 'cgrp'.  Return actual number of pids loaded.  No need to
2022  * task_lock(p) when reading out p->cgroup, since we're in an RCU
2023  * read section, so the css_set can't go away, and is
2024  * immutable after creation.
2025  */
pid_array_load(pid_t * pidarray,int npids,struct cgroup * cgrp)2026 static int pid_array_load(pid_t *pidarray, int npids, struct cgroup *cgrp)
2027 {
2028 	int n = 0, pid;
2029 	struct cgroup_iter it;
2030 	struct task_struct *tsk;
2031 	cgroup_iter_start(cgrp, &it);
2032 	while ((tsk = cgroup_iter_next(cgrp, &it))) {
2033 		if (unlikely(n == npids))
2034 			break;
2035 		pid = task_pid_vnr(tsk);
2036 		if (pid > 0)
2037 			pidarray[n++] = pid;
2038 	}
2039 	cgroup_iter_end(cgrp, &it);
2040 	return n;
2041 }
2042 
2043 /**
2044  * cgroupstats_build - build and fill cgroupstats
2045  * @stats: cgroupstats to fill information into
2046  * @dentry: A dentry entry belonging to the cgroup for which stats have
2047  * been requested.
2048  *
2049  * Build and fill cgroupstats so that taskstats can export it to user
2050  * space.
2051  */
cgroupstats_build(struct cgroupstats * stats,struct dentry * dentry)2052 int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
2053 {
2054 	int ret = -EINVAL;
2055 	struct cgroup *cgrp;
2056 	struct cgroup_iter it;
2057 	struct task_struct *tsk;
2058 
2059 	/*
2060 	 * Validate dentry by checking the superblock operations,
2061 	 * and make sure it's a directory.
2062 	 */
2063 	if (dentry->d_sb->s_op != &cgroup_ops ||
2064 	    !S_ISDIR(dentry->d_inode->i_mode))
2065 		 goto err;
2066 
2067 	ret = 0;
2068 	cgrp = dentry->d_fsdata;
2069 
2070 	cgroup_iter_start(cgrp, &it);
2071 	while ((tsk = cgroup_iter_next(cgrp, &it))) {
2072 		switch (tsk->state) {
2073 		case TASK_RUNNING:
2074 			stats->nr_running++;
2075 			break;
2076 		case TASK_INTERRUPTIBLE:
2077 			stats->nr_sleeping++;
2078 			break;
2079 		case TASK_UNINTERRUPTIBLE:
2080 			stats->nr_uninterruptible++;
2081 			break;
2082 		case TASK_STOPPED:
2083 			stats->nr_stopped++;
2084 			break;
2085 		default:
2086 			if (delayacct_is_task_waiting_on_io(tsk))
2087 				stats->nr_io_wait++;
2088 			break;
2089 		}
2090 	}
2091 	cgroup_iter_end(cgrp, &it);
2092 
2093 err:
2094 	return ret;
2095 }
2096 
cmppid(const void * a,const void * b)2097 static int cmppid(const void *a, const void *b)
2098 {
2099 	return *(pid_t *)a - *(pid_t *)b;
2100 }
2101 
2102 
2103 /*
2104  * seq_file methods for the "tasks" file. The seq_file position is the
2105  * next pid to display; the seq_file iterator is a pointer to the pid
2106  * in the cgroup->tasks_pids array.
2107  */
2108 
cgroup_tasks_start(struct seq_file * s,loff_t * pos)2109 static void *cgroup_tasks_start(struct seq_file *s, loff_t *pos)
2110 {
2111 	/*
2112 	 * Initially we receive a position value that corresponds to
2113 	 * one more than the last pid shown (or 0 on the first call or
2114 	 * after a seek to the start). Use a binary-search to find the
2115 	 * next pid to display, if any
2116 	 */
2117 	struct cgroup *cgrp = s->private;
2118 	int index = 0, pid = *pos;
2119 	int *iter;
2120 
2121 	down_read(&cgrp->pids_mutex);
2122 	if (pid) {
2123 		int end = cgrp->pids_length;
2124 
2125 		while (index < end) {
2126 			int mid = (index + end) / 2;
2127 			if (cgrp->tasks_pids[mid] == pid) {
2128 				index = mid;
2129 				break;
2130 			} else if (cgrp->tasks_pids[mid] <= pid)
2131 				index = mid + 1;
2132 			else
2133 				end = mid;
2134 		}
2135 	}
2136 	/* If we're off the end of the array, we're done */
2137 	if (index >= cgrp->pids_length)
2138 		return NULL;
2139 	/* Update the abstract position to be the actual pid that we found */
2140 	iter = cgrp->tasks_pids + index;
2141 	*pos = *iter;
2142 	return iter;
2143 }
2144 
cgroup_tasks_stop(struct seq_file * s,void * v)2145 static void cgroup_tasks_stop(struct seq_file *s, void *v)
2146 {
2147 	struct cgroup *cgrp = s->private;
2148 	up_read(&cgrp->pids_mutex);
2149 }
2150 
cgroup_tasks_next(struct seq_file * s,void * v,loff_t * pos)2151 static void *cgroup_tasks_next(struct seq_file *s, void *v, loff_t *pos)
2152 {
2153 	struct cgroup *cgrp = s->private;
2154 	int *p = v;
2155 	int *end = cgrp->tasks_pids + cgrp->pids_length;
2156 
2157 	/*
2158 	 * Advance to the next pid in the array. If this goes off the
2159 	 * end, we're done
2160 	 */
2161 	p++;
2162 	if (p >= end) {
2163 		return NULL;
2164 	} else {
2165 		*pos = *p;
2166 		return p;
2167 	}
2168 }
2169 
cgroup_tasks_show(struct seq_file * s,void * v)2170 static int cgroup_tasks_show(struct seq_file *s, void *v)
2171 {
2172 	return seq_printf(s, "%d\n", *(int *)v);
2173 }
2174 
2175 static struct seq_operations cgroup_tasks_seq_operations = {
2176 	.start = cgroup_tasks_start,
2177 	.stop = cgroup_tasks_stop,
2178 	.next = cgroup_tasks_next,
2179 	.show = cgroup_tasks_show,
2180 };
2181 
release_cgroup_pid_array(struct cgroup * cgrp)2182 static void release_cgroup_pid_array(struct cgroup *cgrp)
2183 {
2184 	down_write(&cgrp->pids_mutex);
2185 	BUG_ON(!cgrp->pids_use_count);
2186 	if (!--cgrp->pids_use_count) {
2187 		kfree(cgrp->tasks_pids);
2188 		cgrp->tasks_pids = NULL;
2189 		cgrp->pids_length = 0;
2190 	}
2191 	up_write(&cgrp->pids_mutex);
2192 }
2193 
cgroup_tasks_release(struct inode * inode,struct file * file)2194 static int cgroup_tasks_release(struct inode *inode, struct file *file)
2195 {
2196 	struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
2197 
2198 	if (!(file->f_mode & FMODE_READ))
2199 		return 0;
2200 
2201 	release_cgroup_pid_array(cgrp);
2202 	return seq_release(inode, file);
2203 }
2204 
2205 static struct file_operations cgroup_tasks_operations = {
2206 	.read = seq_read,
2207 	.llseek = seq_lseek,
2208 	.write = cgroup_file_write,
2209 	.release = cgroup_tasks_release,
2210 };
2211 
2212 /*
2213  * Handle an open on 'tasks' file.  Prepare an array containing the
2214  * process id's of tasks currently attached to the cgroup being opened.
2215  */
2216 
cgroup_tasks_open(struct inode * unused,struct file * file)2217 static int cgroup_tasks_open(struct inode *unused, struct file *file)
2218 {
2219 	struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
2220 	pid_t *pidarray;
2221 	int npids;
2222 	int retval;
2223 
2224 	/* Nothing to do for write-only files */
2225 	if (!(file->f_mode & FMODE_READ))
2226 		return 0;
2227 
2228 	/*
2229 	 * If cgroup gets more users after we read count, we won't have
2230 	 * enough space - tough.  This race is indistinguishable to the
2231 	 * caller from the case that the additional cgroup users didn't
2232 	 * show up until sometime later on.
2233 	 */
2234 	npids = cgroup_task_count(cgrp);
2235 	pidarray = kmalloc(npids * sizeof(pid_t), GFP_KERNEL);
2236 	if (!pidarray)
2237 		return -ENOMEM;
2238 	npids = pid_array_load(pidarray, npids, cgrp);
2239 	sort(pidarray, npids, sizeof(pid_t), cmppid, NULL);
2240 
2241 	/*
2242 	 * Store the array in the cgroup, freeing the old
2243 	 * array if necessary
2244 	 */
2245 	down_write(&cgrp->pids_mutex);
2246 	kfree(cgrp->tasks_pids);
2247 	cgrp->tasks_pids = pidarray;
2248 	cgrp->pids_length = npids;
2249 	cgrp->pids_use_count++;
2250 	up_write(&cgrp->pids_mutex);
2251 
2252 	file->f_op = &cgroup_tasks_operations;
2253 
2254 	retval = seq_open(file, &cgroup_tasks_seq_operations);
2255 	if (retval) {
2256 		release_cgroup_pid_array(cgrp);
2257 		return retval;
2258 	}
2259 	((struct seq_file *)file->private_data)->private = cgrp;
2260 	return 0;
2261 }
2262 
cgroup_read_notify_on_release(struct cgroup * cgrp,struct cftype * cft)2263 static u64 cgroup_read_notify_on_release(struct cgroup *cgrp,
2264 					    struct cftype *cft)
2265 {
2266 	return notify_on_release(cgrp);
2267 }
2268 
cgroup_write_notify_on_release(struct cgroup * cgrp,struct cftype * cft,u64 val)2269 static int cgroup_write_notify_on_release(struct cgroup *cgrp,
2270 					  struct cftype *cft,
2271 					  u64 val)
2272 {
2273 	clear_bit(CGRP_RELEASABLE, &cgrp->flags);
2274 	if (val)
2275 		set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
2276 	else
2277 		clear_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
2278 	return 0;
2279 }
2280 
2281 /*
2282  * for the common functions, 'private' gives the type of file
2283  */
2284 static struct cftype files[] = {
2285 	{
2286 		.name = "tasks",
2287 		.open = cgroup_tasks_open,
2288 		.write_u64 = cgroup_tasks_write,
2289 		.release = cgroup_tasks_release,
2290 		.private = FILE_TASKLIST,
2291 	},
2292 
2293 	{
2294 		.name = "notify_on_release",
2295 		.read_u64 = cgroup_read_notify_on_release,
2296 		.write_u64 = cgroup_write_notify_on_release,
2297 		.private = FILE_NOTIFY_ON_RELEASE,
2298 	},
2299 };
2300 
2301 static struct cftype cft_release_agent = {
2302 	.name = "release_agent",
2303 	.read_seq_string = cgroup_release_agent_show,
2304 	.write_string = cgroup_release_agent_write,
2305 	.max_write_len = PATH_MAX,
2306 	.private = FILE_RELEASE_AGENT,
2307 };
2308 
cgroup_populate_dir(struct cgroup * cgrp)2309 static int cgroup_populate_dir(struct cgroup *cgrp)
2310 {
2311 	int err;
2312 	struct cgroup_subsys *ss;
2313 
2314 	/* First clear out any existing files */
2315 	cgroup_clear_directory(cgrp->dentry);
2316 
2317 	err = cgroup_add_files(cgrp, NULL, files, ARRAY_SIZE(files));
2318 	if (err < 0)
2319 		return err;
2320 
2321 	if (cgrp == cgrp->top_cgroup) {
2322 		if ((err = cgroup_add_file(cgrp, NULL, &cft_release_agent)) < 0)
2323 			return err;
2324 	}
2325 
2326 	for_each_subsys(cgrp->root, ss) {
2327 		if (ss->populate && (err = ss->populate(ss, cgrp)) < 0)
2328 			return err;
2329 	}
2330 
2331 	return 0;
2332 }
2333 
init_cgroup_css(struct cgroup_subsys_state * css,struct cgroup_subsys * ss,struct cgroup * cgrp)2334 static void init_cgroup_css(struct cgroup_subsys_state *css,
2335 			       struct cgroup_subsys *ss,
2336 			       struct cgroup *cgrp)
2337 {
2338 	css->cgroup = cgrp;
2339 	atomic_set(&css->refcnt, 1);
2340 	css->flags = 0;
2341 	if (cgrp == dummytop)
2342 		set_bit(CSS_ROOT, &css->flags);
2343 	BUG_ON(cgrp->subsys[ss->subsys_id]);
2344 	cgrp->subsys[ss->subsys_id] = css;
2345 }
2346 
cgroup_lock_hierarchy(struct cgroupfs_root * root)2347 static void cgroup_lock_hierarchy(struct cgroupfs_root *root)
2348 {
2349 	/* We need to take each hierarchy_mutex in a consistent order */
2350 	int i;
2351 
2352 	for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
2353 		struct cgroup_subsys *ss = subsys[i];
2354 		if (ss->root == root)
2355 			mutex_lock(&ss->hierarchy_mutex);
2356 	}
2357 }
2358 
cgroup_unlock_hierarchy(struct cgroupfs_root * root)2359 static void cgroup_unlock_hierarchy(struct cgroupfs_root *root)
2360 {
2361 	int i;
2362 
2363 	for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
2364 		struct cgroup_subsys *ss = subsys[i];
2365 		if (ss->root == root)
2366 			mutex_unlock(&ss->hierarchy_mutex);
2367 	}
2368 }
2369 
2370 /*
2371  * cgroup_create - create a cgroup
2372  * @parent: cgroup that will be parent of the new cgroup
2373  * @dentry: dentry of the new cgroup
2374  * @mode: mode to set on new inode
2375  *
2376  * Must be called with the mutex on the parent inode held
2377  */
cgroup_create(struct cgroup * parent,struct dentry * dentry,int mode)2378 static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
2379 			     int mode)
2380 {
2381 	struct cgroup *cgrp;
2382 	struct cgroupfs_root *root = parent->root;
2383 	int err = 0;
2384 	struct cgroup_subsys *ss;
2385 	struct super_block *sb = root->sb;
2386 
2387 	cgrp = kzalloc(sizeof(*cgrp), GFP_KERNEL);
2388 	if (!cgrp)
2389 		return -ENOMEM;
2390 
2391 	/* Grab a reference on the superblock so the hierarchy doesn't
2392 	 * get deleted on unmount if there are child cgroups.  This
2393 	 * can be done outside cgroup_mutex, since the sb can't
2394 	 * disappear while someone has an open control file on the
2395 	 * fs */
2396 	atomic_inc(&sb->s_active);
2397 
2398 	mutex_lock(&cgroup_mutex);
2399 
2400 	init_cgroup_housekeeping(cgrp);
2401 
2402 	cgrp->parent = parent;
2403 	cgrp->root = parent->root;
2404 	cgrp->top_cgroup = parent->top_cgroup;
2405 
2406 	if (notify_on_release(parent))
2407 		set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
2408 
2409 	for_each_subsys(root, ss) {
2410 		struct cgroup_subsys_state *css = ss->create(ss, cgrp);
2411 		if (IS_ERR(css)) {
2412 			err = PTR_ERR(css);
2413 			goto err_destroy;
2414 		}
2415 		init_cgroup_css(css, ss, cgrp);
2416 	}
2417 
2418 	cgroup_lock_hierarchy(root);
2419 	list_add(&cgrp->sibling, &cgrp->parent->children);
2420 	cgroup_unlock_hierarchy(root);
2421 	root->number_of_cgroups++;
2422 
2423 	err = cgroup_create_dir(cgrp, dentry, mode);
2424 	if (err < 0)
2425 		goto err_remove;
2426 
2427 	/* The cgroup directory was pre-locked for us */
2428 	BUG_ON(!mutex_is_locked(&cgrp->dentry->d_inode->i_mutex));
2429 
2430 	err = cgroup_populate_dir(cgrp);
2431 	/* If err < 0, we have a half-filled directory - oh well ;) */
2432 
2433 	mutex_unlock(&cgroup_mutex);
2434 	mutex_unlock(&cgrp->dentry->d_inode->i_mutex);
2435 
2436 	return 0;
2437 
2438  err_remove:
2439 
2440 	cgroup_lock_hierarchy(root);
2441 	list_del(&cgrp->sibling);
2442 	cgroup_unlock_hierarchy(root);
2443 	root->number_of_cgroups--;
2444 
2445  err_destroy:
2446 
2447 	for_each_subsys(root, ss) {
2448 		if (cgrp->subsys[ss->subsys_id])
2449 			ss->destroy(ss, cgrp);
2450 	}
2451 
2452 	mutex_unlock(&cgroup_mutex);
2453 
2454 	/* Release the reference count that we took on the superblock */
2455 	deactivate_super(sb);
2456 
2457 	kfree(cgrp);
2458 	return err;
2459 }
2460 
cgroup_mkdir(struct inode * dir,struct dentry * dentry,int mode)2461 static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode)
2462 {
2463 	struct cgroup *c_parent = dentry->d_parent->d_fsdata;
2464 
2465 	/* the vfs holds inode->i_mutex already */
2466 	return cgroup_create(c_parent, dentry, mode | S_IFDIR);
2467 }
2468 
cgroup_has_css_refs(struct cgroup * cgrp)2469 static int cgroup_has_css_refs(struct cgroup *cgrp)
2470 {
2471 	/* Check the reference count on each subsystem. Since we
2472 	 * already established that there are no tasks in the
2473 	 * cgroup, if the css refcount is also 1, then there should
2474 	 * be no outstanding references, so the subsystem is safe to
2475 	 * destroy. We scan across all subsystems rather than using
2476 	 * the per-hierarchy linked list of mounted subsystems since
2477 	 * we can be called via check_for_release() with no
2478 	 * synchronization other than RCU, and the subsystem linked
2479 	 * list isn't RCU-safe */
2480 	int i;
2481 	for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
2482 		struct cgroup_subsys *ss = subsys[i];
2483 		struct cgroup_subsys_state *css;
2484 		/* Skip subsystems not in this hierarchy */
2485 		if (ss->root != cgrp->root)
2486 			continue;
2487 		css = cgrp->subsys[ss->subsys_id];
2488 		/* When called from check_for_release() it's possible
2489 		 * that by this point the cgroup has been removed
2490 		 * and the css deleted. But a false-positive doesn't
2491 		 * matter, since it can only happen if the cgroup
2492 		 * has been deleted and hence no longer needs the
2493 		 * release agent to be called anyway. */
2494 		if (css && (atomic_read(&css->refcnt) > 1))
2495 			return 1;
2496 	}
2497 	return 0;
2498 }
2499 
2500 /*
2501  * Atomically mark all (or else none) of the cgroup's CSS objects as
2502  * CSS_REMOVED. Return true on success, or false if the cgroup has
2503  * busy subsystems. Call with cgroup_mutex held
2504  */
2505 
cgroup_clear_css_refs(struct cgroup * cgrp)2506 static int cgroup_clear_css_refs(struct cgroup *cgrp)
2507 {
2508 	struct cgroup_subsys *ss;
2509 	unsigned long flags;
2510 	bool failed = false;
2511 	local_irq_save(flags);
2512 	for_each_subsys(cgrp->root, ss) {
2513 		struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
2514 		int refcnt;
2515 		while (1) {
2516 			/* We can only remove a CSS with a refcnt==1 */
2517 			refcnt = atomic_read(&css->refcnt);
2518 			if (refcnt > 1) {
2519 				failed = true;
2520 				goto done;
2521 			}
2522 			BUG_ON(!refcnt);
2523 			/*
2524 			 * Drop the refcnt to 0 while we check other
2525 			 * subsystems. This will cause any racing
2526 			 * css_tryget() to spin until we set the
2527 			 * CSS_REMOVED bits or abort
2528 			 */
2529 			if (atomic_cmpxchg(&css->refcnt, refcnt, 0) == refcnt)
2530 				break;
2531 			cpu_relax();
2532 		}
2533 	}
2534  done:
2535 	for_each_subsys(cgrp->root, ss) {
2536 		struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
2537 		if (failed) {
2538 			/*
2539 			 * Restore old refcnt if we previously managed
2540 			 * to clear it from 1 to 0
2541 			 */
2542 			if (!atomic_read(&css->refcnt))
2543 				atomic_set(&css->refcnt, 1);
2544 		} else {
2545 			/* Commit the fact that the CSS is removed */
2546 			set_bit(CSS_REMOVED, &css->flags);
2547 		}
2548 	}
2549 	local_irq_restore(flags);
2550 	return !failed;
2551 }
2552 
cgroup_rmdir(struct inode * unused_dir,struct dentry * dentry)2553 static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
2554 {
2555 	struct cgroup *cgrp = dentry->d_fsdata;
2556 	struct dentry *d;
2557 	struct cgroup *parent;
2558 
2559 	/* the vfs holds both inode->i_mutex already */
2560 
2561 	mutex_lock(&cgroup_mutex);
2562 	if (atomic_read(&cgrp->count) != 0) {
2563 		mutex_unlock(&cgroup_mutex);
2564 		return -EBUSY;
2565 	}
2566 	if (!list_empty(&cgrp->children)) {
2567 		mutex_unlock(&cgroup_mutex);
2568 		return -EBUSY;
2569 	}
2570 	mutex_unlock(&cgroup_mutex);
2571 
2572 	/*
2573 	 * Call pre_destroy handlers of subsys. Notify subsystems
2574 	 * that rmdir() request comes.
2575 	 */
2576 	cgroup_call_pre_destroy(cgrp);
2577 
2578 	mutex_lock(&cgroup_mutex);
2579 	parent = cgrp->parent;
2580 
2581 	if (atomic_read(&cgrp->count)
2582 	    || !list_empty(&cgrp->children)
2583 	    || !cgroup_clear_css_refs(cgrp)) {
2584 		mutex_unlock(&cgroup_mutex);
2585 		return -EBUSY;
2586 	}
2587 
2588 	spin_lock(&release_list_lock);
2589 	set_bit(CGRP_REMOVED, &cgrp->flags);
2590 	if (!list_empty(&cgrp->release_list))
2591 		list_del(&cgrp->release_list);
2592 	spin_unlock(&release_list_lock);
2593 
2594 	cgroup_lock_hierarchy(cgrp->root);
2595 	/* delete this cgroup from parent->children */
2596 	list_del(&cgrp->sibling);
2597 	cgroup_unlock_hierarchy(cgrp->root);
2598 
2599 	spin_lock(&cgrp->dentry->d_lock);
2600 	d = dget(cgrp->dentry);
2601 	spin_unlock(&d->d_lock);
2602 
2603 	cgroup_d_remove_dir(d);
2604 	dput(d);
2605 
2606 	set_bit(CGRP_RELEASABLE, &parent->flags);
2607 	check_for_release(parent);
2608 
2609 	mutex_unlock(&cgroup_mutex);
2610 	return 0;
2611 }
2612 
cgroup_init_subsys(struct cgroup_subsys * ss)2613 static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
2614 {
2615 	struct cgroup_subsys_state *css;
2616 
2617 	printk(KERN_INFO "Initializing cgroup subsys %s\n", ss->name);
2618 
2619 	/* Create the top cgroup state for this subsystem */
2620 	list_add(&ss->sibling, &rootnode.subsys_list);
2621 	ss->root = &rootnode;
2622 	css = ss->create(ss, dummytop);
2623 	/* We don't handle early failures gracefully */
2624 	BUG_ON(IS_ERR(css));
2625 	init_cgroup_css(css, ss, dummytop);
2626 
2627 	/* Update the init_css_set to contain a subsys
2628 	 * pointer to this state - since the subsystem is
2629 	 * newly registered, all tasks and hence the
2630 	 * init_css_set is in the subsystem's top cgroup. */
2631 	init_css_set.subsys[ss->subsys_id] = dummytop->subsys[ss->subsys_id];
2632 
2633 	need_forkexit_callback |= ss->fork || ss->exit;
2634 
2635 	/* At system boot, before all subsystems have been
2636 	 * registered, no tasks have been forked, so we don't
2637 	 * need to invoke fork callbacks here. */
2638 	BUG_ON(!list_empty(&init_task.tasks));
2639 
2640 	mutex_init(&ss->hierarchy_mutex);
2641 	lockdep_set_class(&ss->hierarchy_mutex, &ss->subsys_key);
2642 	ss->active = 1;
2643 }
2644 
2645 /**
2646  * cgroup_init_early - cgroup initialization at system boot
2647  *
2648  * Initialize cgroups at system boot, and initialize any
2649  * subsystems that request early init.
2650  */
cgroup_init_early(void)2651 int __init cgroup_init_early(void)
2652 {
2653 	int i;
2654 	atomic_set(&init_css_set.refcount, 1);
2655 	INIT_LIST_HEAD(&init_css_set.cg_links);
2656 	INIT_LIST_HEAD(&init_css_set.tasks);
2657 	INIT_HLIST_NODE(&init_css_set.hlist);
2658 	css_set_count = 1;
2659 	init_cgroup_root(&rootnode);
2660 	root_count = 1;
2661 	init_task.cgroups = &init_css_set;
2662 
2663 	init_css_set_link.cg = &init_css_set;
2664 	list_add(&init_css_set_link.cgrp_link_list,
2665 		 &rootnode.top_cgroup.css_sets);
2666 	list_add(&init_css_set_link.cg_link_list,
2667 		 &init_css_set.cg_links);
2668 
2669 	for (i = 0; i < CSS_SET_TABLE_SIZE; i++)
2670 		INIT_HLIST_HEAD(&css_set_table[i]);
2671 
2672 	for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
2673 		struct cgroup_subsys *ss = subsys[i];
2674 
2675 		BUG_ON(!ss->name);
2676 		BUG_ON(strlen(ss->name) > MAX_CGROUP_TYPE_NAMELEN);
2677 		BUG_ON(!ss->create);
2678 		BUG_ON(!ss->destroy);
2679 		if (ss->subsys_id != i) {
2680 			printk(KERN_ERR "cgroup: Subsys %s id == %d\n",
2681 			       ss->name, ss->subsys_id);
2682 			BUG();
2683 		}
2684 
2685 		if (ss->early_init)
2686 			cgroup_init_subsys(ss);
2687 	}
2688 	return 0;
2689 }
2690 
2691 /**
2692  * cgroup_init - cgroup initialization
2693  *
2694  * Register cgroup filesystem and /proc file, and initialize
2695  * any subsystems that didn't request early init.
2696  */
cgroup_init(void)2697 int __init cgroup_init(void)
2698 {
2699 	int err;
2700 	int i;
2701 	struct hlist_head *hhead;
2702 
2703 	err = bdi_init(&cgroup_backing_dev_info);
2704 	if (err)
2705 		return err;
2706 
2707 	for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
2708 		struct cgroup_subsys *ss = subsys[i];
2709 		if (!ss->early_init)
2710 			cgroup_init_subsys(ss);
2711 	}
2712 
2713 	/* Add init_css_set to the hash table */
2714 	hhead = css_set_hash(init_css_set.subsys);
2715 	hlist_add_head(&init_css_set.hlist, hhead);
2716 
2717 	err = register_filesystem(&cgroup_fs_type);
2718 	if (err < 0)
2719 		goto out;
2720 
2721 	proc_create("cgroups", 0, NULL, &proc_cgroupstats_operations);
2722 
2723 out:
2724 	if (err)
2725 		bdi_destroy(&cgroup_backing_dev_info);
2726 
2727 	return err;
2728 }
2729 
2730 /*
2731  * proc_cgroup_show()
2732  *  - Print task's cgroup paths into seq_file, one line for each hierarchy
2733  *  - Used for /proc/<pid>/cgroup.
2734  *  - No need to task_lock(tsk) on this tsk->cgroup reference, as it
2735  *    doesn't really matter if tsk->cgroup changes after we read it,
2736  *    and we take cgroup_mutex, keeping cgroup_attach_task() from changing it
2737  *    anyway.  No need to check that tsk->cgroup != NULL, thanks to
2738  *    the_top_cgroup_hack in cgroup_exit(), which sets an exiting tasks
2739  *    cgroup to top_cgroup.
2740  */
2741 
2742 /* TODO: Use a proper seq_file iterator */
proc_cgroup_show(struct seq_file * m,void * v)2743 static int proc_cgroup_show(struct seq_file *m, void *v)
2744 {
2745 	struct pid *pid;
2746 	struct task_struct *tsk;
2747 	char *buf;
2748 	int retval;
2749 	struct cgroupfs_root *root;
2750 
2751 	retval = -ENOMEM;
2752 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
2753 	if (!buf)
2754 		goto out;
2755 
2756 	retval = -ESRCH;
2757 	pid = m->private;
2758 	tsk = get_pid_task(pid, PIDTYPE_PID);
2759 	if (!tsk)
2760 		goto out_free;
2761 
2762 	retval = 0;
2763 
2764 	mutex_lock(&cgroup_mutex);
2765 
2766 	for_each_active_root(root) {
2767 		struct cgroup_subsys *ss;
2768 		struct cgroup *cgrp;
2769 		int subsys_id;
2770 		int count = 0;
2771 
2772 		seq_printf(m, "%lu:", root->subsys_bits);
2773 		for_each_subsys(root, ss)
2774 			seq_printf(m, "%s%s", count++ ? "," : "", ss->name);
2775 		seq_putc(m, ':');
2776 		get_first_subsys(&root->top_cgroup, NULL, &subsys_id);
2777 		cgrp = task_cgroup(tsk, subsys_id);
2778 		retval = cgroup_path(cgrp, buf, PAGE_SIZE);
2779 		if (retval < 0)
2780 			goto out_unlock;
2781 		seq_puts(m, buf);
2782 		seq_putc(m, '\n');
2783 	}
2784 
2785 out_unlock:
2786 	mutex_unlock(&cgroup_mutex);
2787 	put_task_struct(tsk);
2788 out_free:
2789 	kfree(buf);
2790 out:
2791 	return retval;
2792 }
2793 
cgroup_open(struct inode * inode,struct file * file)2794 static int cgroup_open(struct inode *inode, struct file *file)
2795 {
2796 	struct pid *pid = PROC_I(inode)->pid;
2797 	return single_open(file, proc_cgroup_show, pid);
2798 }
2799 
2800 struct file_operations proc_cgroup_operations = {
2801 	.open		= cgroup_open,
2802 	.read		= seq_read,
2803 	.llseek		= seq_lseek,
2804 	.release	= single_release,
2805 };
2806 
2807 /* Display information about each subsystem and each hierarchy */
proc_cgroupstats_show(struct seq_file * m,void * v)2808 static int proc_cgroupstats_show(struct seq_file *m, void *v)
2809 {
2810 	int i;
2811 
2812 	seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
2813 	mutex_lock(&cgroup_mutex);
2814 	for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
2815 		struct cgroup_subsys *ss = subsys[i];
2816 		seq_printf(m, "%s\t%lu\t%d\t%d\n",
2817 			   ss->name, ss->root->subsys_bits,
2818 			   ss->root->number_of_cgroups, !ss->disabled);
2819 	}
2820 	mutex_unlock(&cgroup_mutex);
2821 	return 0;
2822 }
2823 
cgroupstats_open(struct inode * inode,struct file * file)2824 static int cgroupstats_open(struct inode *inode, struct file *file)
2825 {
2826 	return single_open(file, proc_cgroupstats_show, NULL);
2827 }
2828 
2829 static struct file_operations proc_cgroupstats_operations = {
2830 	.open = cgroupstats_open,
2831 	.read = seq_read,
2832 	.llseek = seq_lseek,
2833 	.release = single_release,
2834 };
2835 
2836 /**
2837  * cgroup_fork - attach newly forked task to its parents cgroup.
2838  * @child: pointer to task_struct of forking parent process.
2839  *
2840  * Description: A task inherits its parent's cgroup at fork().
2841  *
2842  * A pointer to the shared css_set was automatically copied in
2843  * fork.c by dup_task_struct().  However, we ignore that copy, since
2844  * it was not made under the protection of RCU or cgroup_mutex, so
2845  * might no longer be a valid cgroup pointer.  cgroup_attach_task() might
2846  * have already changed current->cgroups, allowing the previously
2847  * referenced cgroup group to be removed and freed.
2848  *
2849  * At the point that cgroup_fork() is called, 'current' is the parent
2850  * task, and the passed argument 'child' points to the child task.
2851  */
cgroup_fork(struct task_struct * child)2852 void cgroup_fork(struct task_struct *child)
2853 {
2854 	task_lock(current);
2855 	child->cgroups = current->cgroups;
2856 	get_css_set(child->cgroups);
2857 	task_unlock(current);
2858 	INIT_LIST_HEAD(&child->cg_list);
2859 }
2860 
2861 /**
2862  * cgroup_fork_callbacks - run fork callbacks
2863  * @child: the new task
2864  *
2865  * Called on a new task very soon before adding it to the
2866  * tasklist. No need to take any locks since no-one can
2867  * be operating on this task.
2868  */
cgroup_fork_callbacks(struct task_struct * child)2869 void cgroup_fork_callbacks(struct task_struct *child)
2870 {
2871 	if (need_forkexit_callback) {
2872 		int i;
2873 		for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
2874 			struct cgroup_subsys *ss = subsys[i];
2875 			if (ss->fork)
2876 				ss->fork(ss, child);
2877 		}
2878 	}
2879 }
2880 
2881 /**
2882  * cgroup_post_fork - called on a new task after adding it to the task list
2883  * @child: the task in question
2884  *
2885  * Adds the task to the list running through its css_set if necessary.
2886  * Has to be after the task is visible on the task list in case we race
2887  * with the first call to cgroup_iter_start() - to guarantee that the
2888  * new task ends up on its list.
2889  */
cgroup_post_fork(struct task_struct * child)2890 void cgroup_post_fork(struct task_struct *child)
2891 {
2892 	if (use_task_css_set_links) {
2893 		write_lock(&css_set_lock);
2894 		task_lock(child);
2895 		if (list_empty(&child->cg_list))
2896 			list_add(&child->cg_list, &child->cgroups->tasks);
2897 		task_unlock(child);
2898 		write_unlock(&css_set_lock);
2899 	}
2900 }
2901 /**
2902  * cgroup_exit - detach cgroup from exiting task
2903  * @tsk: pointer to task_struct of exiting process
2904  * @run_callback: run exit callbacks?
2905  *
2906  * Description: Detach cgroup from @tsk and release it.
2907  *
2908  * Note that cgroups marked notify_on_release force every task in
2909  * them to take the global cgroup_mutex mutex when exiting.
2910  * This could impact scaling on very large systems.  Be reluctant to
2911  * use notify_on_release cgroups where very high task exit scaling
2912  * is required on large systems.
2913  *
2914  * the_top_cgroup_hack:
2915  *
2916  *    Set the exiting tasks cgroup to the root cgroup (top_cgroup).
2917  *
2918  *    We call cgroup_exit() while the task is still competent to
2919  *    handle notify_on_release(), then leave the task attached to the
2920  *    root cgroup in each hierarchy for the remainder of its exit.
2921  *
2922  *    To do this properly, we would increment the reference count on
2923  *    top_cgroup, and near the very end of the kernel/exit.c do_exit()
2924  *    code we would add a second cgroup function call, to drop that
2925  *    reference.  This would just create an unnecessary hot spot on
2926  *    the top_cgroup reference count, to no avail.
2927  *
2928  *    Normally, holding a reference to a cgroup without bumping its
2929  *    count is unsafe.   The cgroup could go away, or someone could
2930  *    attach us to a different cgroup, decrementing the count on
2931  *    the first cgroup that we never incremented.  But in this case,
2932  *    top_cgroup isn't going away, and either task has PF_EXITING set,
2933  *    which wards off any cgroup_attach_task() attempts, or task is a failed
2934  *    fork, never visible to cgroup_attach_task.
2935  */
cgroup_exit(struct task_struct * tsk,int run_callbacks)2936 void cgroup_exit(struct task_struct *tsk, int run_callbacks)
2937 {
2938 	int i;
2939 	struct css_set *cg;
2940 
2941 	if (run_callbacks && need_forkexit_callback) {
2942 		for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
2943 			struct cgroup_subsys *ss = subsys[i];
2944 			if (ss->exit)
2945 				ss->exit(ss, tsk);
2946 		}
2947 	}
2948 
2949 	/*
2950 	 * Unlink from the css_set task list if necessary.
2951 	 * Optimistically check cg_list before taking
2952 	 * css_set_lock
2953 	 */
2954 	if (!list_empty(&tsk->cg_list)) {
2955 		write_lock(&css_set_lock);
2956 		if (!list_empty(&tsk->cg_list))
2957 			list_del(&tsk->cg_list);
2958 		write_unlock(&css_set_lock);
2959 	}
2960 
2961 	/* Reassign the task to the init_css_set. */
2962 	task_lock(tsk);
2963 	cg = tsk->cgroups;
2964 	tsk->cgroups = &init_css_set;
2965 	task_unlock(tsk);
2966 	if (cg)
2967 		put_css_set_taskexit(cg);
2968 }
2969 
2970 /**
2971  * cgroup_clone - clone the cgroup the given subsystem is attached to
2972  * @tsk: the task to be moved
2973  * @subsys: the given subsystem
2974  * @nodename: the name for the new cgroup
2975  *
2976  * Duplicate the current cgroup in the hierarchy that the given
2977  * subsystem is attached to, and move this task into the new
2978  * child.
2979  */
cgroup_clone(struct task_struct * tsk,struct cgroup_subsys * subsys,char * nodename)2980 int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys,
2981 							char *nodename)
2982 {
2983 	struct dentry *dentry;
2984 	int ret = 0;
2985 	struct cgroup *parent, *child;
2986 	struct inode *inode;
2987 	struct css_set *cg;
2988 	struct cgroupfs_root *root;
2989 	struct cgroup_subsys *ss;
2990 
2991 	/* We shouldn't be called by an unregistered subsystem */
2992 	BUG_ON(!subsys->active);
2993 
2994 	/* First figure out what hierarchy and cgroup we're dealing
2995 	 * with, and pin them so we can drop cgroup_mutex */
2996 	mutex_lock(&cgroup_mutex);
2997  again:
2998 	root = subsys->root;
2999 	if (root == &rootnode) {
3000 		mutex_unlock(&cgroup_mutex);
3001 		return 0;
3002 	}
3003 
3004 	/* Pin the hierarchy */
3005 	if (!atomic_inc_not_zero(&root->sb->s_active)) {
3006 		/* We race with the final deactivate_super() */
3007 		mutex_unlock(&cgroup_mutex);
3008 		return 0;
3009 	}
3010 
3011 	/* Keep the cgroup alive */
3012 	task_lock(tsk);
3013 	parent = task_cgroup(tsk, subsys->subsys_id);
3014 	cg = tsk->cgroups;
3015 	get_css_set(cg);
3016 	task_unlock(tsk);
3017 
3018 	mutex_unlock(&cgroup_mutex);
3019 
3020 	/* Now do the VFS work to create a cgroup */
3021 	inode = parent->dentry->d_inode;
3022 
3023 	/* Hold the parent directory mutex across this operation to
3024 	 * stop anyone else deleting the new cgroup */
3025 	mutex_lock(&inode->i_mutex);
3026 	dentry = lookup_one_len(nodename, parent->dentry, strlen(nodename));
3027 	if (IS_ERR(dentry)) {
3028 		printk(KERN_INFO
3029 		       "cgroup: Couldn't allocate dentry for %s: %ld\n", nodename,
3030 		       PTR_ERR(dentry));
3031 		ret = PTR_ERR(dentry);
3032 		goto out_release;
3033 	}
3034 
3035 	/* Create the cgroup directory, which also creates the cgroup */
3036 	ret = vfs_mkdir(inode, dentry, 0755);
3037 	child = __d_cgrp(dentry);
3038 	dput(dentry);
3039 	if (ret) {
3040 		printk(KERN_INFO
3041 		       "Failed to create cgroup %s: %d\n", nodename,
3042 		       ret);
3043 		goto out_release;
3044 	}
3045 
3046 	/* The cgroup now exists. Retake cgroup_mutex and check
3047 	 * that we're still in the same state that we thought we
3048 	 * were. */
3049 	mutex_lock(&cgroup_mutex);
3050 	if ((root != subsys->root) ||
3051 	    (parent != task_cgroup(tsk, subsys->subsys_id))) {
3052 		/* Aargh, we raced ... */
3053 		mutex_unlock(&inode->i_mutex);
3054 		put_css_set(cg);
3055 
3056 		deactivate_super(root->sb);
3057 		/* The cgroup is still accessible in the VFS, but
3058 		 * we're not going to try to rmdir() it at this
3059 		 * point. */
3060 		printk(KERN_INFO
3061 		       "Race in cgroup_clone() - leaking cgroup %s\n",
3062 		       nodename);
3063 		goto again;
3064 	}
3065 
3066 	/* do any required auto-setup */
3067 	for_each_subsys(root, ss) {
3068 		if (ss->post_clone)
3069 			ss->post_clone(ss, child);
3070 	}
3071 
3072 	/* All seems fine. Finish by moving the task into the new cgroup */
3073 	ret = cgroup_attach_task(child, tsk);
3074 	mutex_unlock(&cgroup_mutex);
3075 
3076  out_release:
3077 	mutex_unlock(&inode->i_mutex);
3078 
3079 	mutex_lock(&cgroup_mutex);
3080 	put_css_set(cg);
3081 	mutex_unlock(&cgroup_mutex);
3082 	deactivate_super(root->sb);
3083 	return ret;
3084 }
3085 
3086 /**
3087  * cgroup_is_descendant - see if @cgrp is a descendant of current task's cgrp
3088  * @cgrp: the cgroup in question
3089  *
3090  * See if @cgrp is a descendant of the current task's cgroup in
3091  * the appropriate hierarchy.
3092  *
3093  * If we are sending in dummytop, then presumably we are creating
3094  * the top cgroup in the subsystem.
3095  *
3096  * Called only by the ns (nsproxy) cgroup.
3097  */
cgroup_is_descendant(const struct cgroup * cgrp)3098 int cgroup_is_descendant(const struct cgroup *cgrp)
3099 {
3100 	int ret;
3101 	struct cgroup *target;
3102 	int subsys_id;
3103 
3104 	if (cgrp == dummytop)
3105 		return 1;
3106 
3107 	get_first_subsys(cgrp, NULL, &subsys_id);
3108 	target = task_cgroup(current, subsys_id);
3109 	while (cgrp != target && cgrp!= cgrp->top_cgroup)
3110 		cgrp = cgrp->parent;
3111 	ret = (cgrp == target);
3112 	return ret;
3113 }
3114 
check_for_release(struct cgroup * cgrp)3115 static void check_for_release(struct cgroup *cgrp)
3116 {
3117 	/* All of these checks rely on RCU to keep the cgroup
3118 	 * structure alive */
3119 	if (cgroup_is_releasable(cgrp) && !atomic_read(&cgrp->count)
3120 	    && list_empty(&cgrp->children) && !cgroup_has_css_refs(cgrp)) {
3121 		/* Control Group is currently removeable. If it's not
3122 		 * already queued for a userspace notification, queue
3123 		 * it now */
3124 		int need_schedule_work = 0;
3125 		spin_lock(&release_list_lock);
3126 		if (!cgroup_is_removed(cgrp) &&
3127 		    list_empty(&cgrp->release_list)) {
3128 			list_add(&cgrp->release_list, &release_list);
3129 			need_schedule_work = 1;
3130 		}
3131 		spin_unlock(&release_list_lock);
3132 		if (need_schedule_work)
3133 			schedule_work(&release_agent_work);
3134 	}
3135 }
3136 
__css_put(struct cgroup_subsys_state * css)3137 void __css_put(struct cgroup_subsys_state *css)
3138 {
3139 	struct cgroup *cgrp = css->cgroup;
3140 	rcu_read_lock();
3141 	if ((atomic_dec_return(&css->refcnt) == 1) &&
3142 	    notify_on_release(cgrp)) {
3143 		set_bit(CGRP_RELEASABLE, &cgrp->flags);
3144 		check_for_release(cgrp);
3145 	}
3146 	rcu_read_unlock();
3147 }
3148 
3149 /*
3150  * Notify userspace when a cgroup is released, by running the
3151  * configured release agent with the name of the cgroup (path
3152  * relative to the root of cgroup file system) as the argument.
3153  *
3154  * Most likely, this user command will try to rmdir this cgroup.
3155  *
3156  * This races with the possibility that some other task will be
3157  * attached to this cgroup before it is removed, or that some other
3158  * user task will 'mkdir' a child cgroup of this cgroup.  That's ok.
3159  * The presumed 'rmdir' will fail quietly if this cgroup is no longer
3160  * unused, and this cgroup will be reprieved from its death sentence,
3161  * to continue to serve a useful existence.  Next time it's released,
3162  * we will get notified again, if it still has 'notify_on_release' set.
3163  *
3164  * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
3165  * means only wait until the task is successfully execve()'d.  The
3166  * separate release agent task is forked by call_usermodehelper(),
3167  * then control in this thread returns here, without waiting for the
3168  * release agent task.  We don't bother to wait because the caller of
3169  * this routine has no use for the exit status of the release agent
3170  * task, so no sense holding our caller up for that.
3171  */
cgroup_release_agent(struct work_struct * work)3172 static void cgroup_release_agent(struct work_struct *work)
3173 {
3174 	BUG_ON(work != &release_agent_work);
3175 	mutex_lock(&cgroup_mutex);
3176 	spin_lock(&release_list_lock);
3177 	while (!list_empty(&release_list)) {
3178 		char *argv[3], *envp[3];
3179 		int i;
3180 		char *pathbuf = NULL, *agentbuf = NULL;
3181 		struct cgroup *cgrp = list_entry(release_list.next,
3182 						    struct cgroup,
3183 						    release_list);
3184 		list_del_init(&cgrp->release_list);
3185 		spin_unlock(&release_list_lock);
3186 		pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
3187 		if (!pathbuf)
3188 			goto continue_free;
3189 		if (cgroup_path(cgrp, pathbuf, PAGE_SIZE) < 0)
3190 			goto continue_free;
3191 		agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
3192 		if (!agentbuf)
3193 			goto continue_free;
3194 
3195 		i = 0;
3196 		argv[i++] = agentbuf;
3197 		argv[i++] = pathbuf;
3198 		argv[i] = NULL;
3199 
3200 		i = 0;
3201 		/* minimal command environment */
3202 		envp[i++] = "HOME=/";
3203 		envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
3204 		envp[i] = NULL;
3205 
3206 		/* Drop the lock while we invoke the usermode helper,
3207 		 * since the exec could involve hitting disk and hence
3208 		 * be a slow process */
3209 		mutex_unlock(&cgroup_mutex);
3210 		call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
3211 		mutex_lock(&cgroup_mutex);
3212  continue_free:
3213 		kfree(pathbuf);
3214 		kfree(agentbuf);
3215 		spin_lock(&release_list_lock);
3216 	}
3217 	spin_unlock(&release_list_lock);
3218 	mutex_unlock(&cgroup_mutex);
3219 }
3220 
cgroup_disable(char * str)3221 static int __init cgroup_disable(char *str)
3222 {
3223 	int i;
3224 	char *token;
3225 
3226 	while ((token = strsep(&str, ",")) != NULL) {
3227 		if (!*token)
3228 			continue;
3229 
3230 		for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
3231 			struct cgroup_subsys *ss = subsys[i];
3232 
3233 			if (!strcmp(token, ss->name)) {
3234 				ss->disabled = 1;
3235 				printk(KERN_INFO "Disabling %s control group"
3236 					" subsystem\n", ss->name);
3237 				break;
3238 			}
3239 		}
3240 	}
3241 	return 1;
3242 }
3243 __setup("cgroup_disable=", cgroup_disable);
3244