• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Generic process-grouping system.
3  *
4  *  Based originally on the cpuset system, extracted by Paul Menage
5  *  Copyright (C) 2006 Google, Inc
6  *
7  *  Notifications support
8  *  Copyright (C) 2009 Nokia Corporation
9  *  Author: Kirill A. Shutemov
10  *
11  *  Copyright notices from the original cpuset code:
12  *  --------------------------------------------------
13  *  Copyright (C) 2003 BULL SA.
14  *  Copyright (C) 2004-2006 Silicon Graphics, Inc.
15  *
16  *  Portions derived from Patrick Mochel's sysfs code.
17  *  sysfs is Copyright (c) 2001-3 Patrick Mochel
18  *
19  *  2003-10-10 Written by Simon Derr.
20  *  2003-10-22 Updates by Stephen Hemminger.
21  *  2004 May-July Rework by Paul Jackson.
22  *  ---------------------------------------------------
23  *
24  *  This file is subject to the terms and conditions of the GNU General Public
25  *  License.  See the file COPYING in the main directory of the Linux
26  *  distribution for more details.
27  */
28 
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 
31 #include <linux/cgroup.h>
32 #include <linux/cred.h>
33 #include <linux/ctype.h>
34 #include <linux/errno.h>
35 #include <linux/init_task.h>
36 #include <linux/kernel.h>
37 #include <linux/list.h>
38 #include <linux/magic.h>
39 #include <linux/mm.h>
40 #include <linux/mutex.h>
41 #include <linux/mount.h>
42 #include <linux/pagemap.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rcupdate.h>
45 #include <linux/sched.h>
46 #include <linux/slab.h>
47 #include <linux/spinlock.h>
48 #include <linux/percpu-rwsem.h>
49 #include <linux/string.h>
50 #include <linux/sort.h>
51 #include <linux/kmod.h>
52 #include <linux/delayacct.h>
53 #include <linux/cgroupstats.h>
54 #include <linux/hashtable.h>
55 #include <linux/pid_namespace.h>
56 #include <linux/idr.h>
57 #include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */
58 #include <linux/kthread.h>
59 #include <linux/delay.h>
60 #include <linux/cpuset.h>
61 #include <linux/atomic.h>
62 
63 /*
64  * pidlists linger the following amount before being destroyed.  The goal
65  * is avoiding frequent destruction in the middle of consecutive read calls
66  * Expiring in the middle is a performance problem not a correctness one.
67  * 1 sec should be enough.
68  */
69 #define CGROUP_PIDLIST_DESTROY_DELAY	HZ
70 
71 #define CGROUP_FILE_NAME_MAX		(MAX_CGROUP_TYPE_NAMELEN +	\
72 					 MAX_CFTYPE_NAME + 2)
73 
74 /*
75  * cgroup_mutex is the master lock.  Any modification to cgroup or its
76  * hierarchy must be performed while holding it.
77  *
78  * css_set_lock protects task->cgroups pointer, the list of css_set
79  * objects, and the chain of tasks off each css_set.
80  *
81  * These locks are exported if CONFIG_PROVE_RCU so that accessors in
82  * cgroup.h can use them for lockdep annotations.
83  */
84 #ifdef CONFIG_PROVE_RCU
85 DEFINE_MUTEX(cgroup_mutex);
86 DEFINE_SPINLOCK(css_set_lock);
87 EXPORT_SYMBOL_GPL(cgroup_mutex);
88 EXPORT_SYMBOL_GPL(css_set_lock);
89 #else
90 static DEFINE_MUTEX(cgroup_mutex);
91 static DEFINE_SPINLOCK(css_set_lock);
92 #endif
93 
94 /*
95  * Protects cgroup_idr and css_idr so that IDs can be released without
96  * grabbing cgroup_mutex.
97  */
98 static DEFINE_SPINLOCK(cgroup_idr_lock);
99 
100 /*
101  * Protects cgroup_file->kn for !self csses.  It synchronizes notifications
102  * against file removal/re-creation across css hiding.
103  */
104 static DEFINE_SPINLOCK(cgroup_file_kn_lock);
105 
106 /*
107  * Protects cgroup_subsys->release_agent_path.  Modifying it also requires
108  * cgroup_mutex.  Reading requires either cgroup_mutex or this spinlock.
109  */
110 static DEFINE_SPINLOCK(release_agent_path_lock);
111 
112 struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
113 
114 #define cgroup_assert_mutex_or_rcu_locked()				\
115 	RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&			\
116 			   !lockdep_is_held(&cgroup_mutex),		\
117 			   "cgroup_mutex or RCU read lock required");
118 
119 /*
120  * cgroup destruction makes heavy use of work items and there can be a lot
121  * of concurrent destructions.  Use a separate workqueue so that cgroup
122  * destruction work items don't end up filling up max_active of system_wq
123  * which may lead to deadlock.
124  */
125 static struct workqueue_struct *cgroup_destroy_wq;
126 
127 /*
128  * pidlist destructions need to be flushed on cgroup destruction.  Use a
129  * separate workqueue as flush domain.
130  */
131 static struct workqueue_struct *cgroup_pidlist_destroy_wq;
132 
133 /* generate an array of cgroup subsystem pointers */
134 #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys,
135 static struct cgroup_subsys *cgroup_subsys[] = {
136 #include <linux/cgroup_subsys.h>
137 };
138 #undef SUBSYS
139 
140 /* array of cgroup subsystem names */
141 #define SUBSYS(_x) [_x ## _cgrp_id] = #_x,
142 static const char *cgroup_subsys_name[] = {
143 #include <linux/cgroup_subsys.h>
144 };
145 #undef SUBSYS
146 
147 /* array of static_keys for cgroup_subsys_enabled() and cgroup_subsys_on_dfl() */
148 #define SUBSYS(_x)								\
149 	DEFINE_STATIC_KEY_TRUE(_x ## _cgrp_subsys_enabled_key);			\
150 	DEFINE_STATIC_KEY_TRUE(_x ## _cgrp_subsys_on_dfl_key);			\
151 	EXPORT_SYMBOL_GPL(_x ## _cgrp_subsys_enabled_key);			\
152 	EXPORT_SYMBOL_GPL(_x ## _cgrp_subsys_on_dfl_key);
153 #include <linux/cgroup_subsys.h>
154 #undef SUBSYS
155 
156 #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys_enabled_key,
157 static struct static_key_true *cgroup_subsys_enabled_key[] = {
158 #include <linux/cgroup_subsys.h>
159 };
160 #undef SUBSYS
161 
162 #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys_on_dfl_key,
163 static struct static_key_true *cgroup_subsys_on_dfl_key[] = {
164 #include <linux/cgroup_subsys.h>
165 };
166 #undef SUBSYS
167 
168 /*
169  * The default hierarchy, reserved for the subsystems that are otherwise
170  * unattached - it never has more than a single cgroup, and all tasks are
171  * part of that cgroup.
172  */
173 struct cgroup_root cgrp_dfl_root;
174 EXPORT_SYMBOL_GPL(cgrp_dfl_root);
175 
176 /*
177  * The default hierarchy always exists but is hidden until mounted for the
178  * first time.  This is for backward compatibility.
179  */
180 static bool cgrp_dfl_root_visible;
181 
182 /* some controllers are not supported in the default hierarchy */
183 static unsigned long cgrp_dfl_root_inhibit_ss_mask;
184 
185 /* The list of hierarchy roots */
186 
187 static LIST_HEAD(cgroup_roots);
188 static int cgroup_root_count;
189 
190 /* hierarchy ID allocation and mapping, protected by cgroup_mutex */
191 static DEFINE_IDR(cgroup_hierarchy_idr);
192 
193 /*
194  * Assign a monotonically increasing serial number to csses.  It guarantees
195  * cgroups with bigger numbers are newer than those with smaller numbers.
196  * Also, as csses are always appended to the parent's ->children list, it
197  * guarantees that sibling csses are always sorted in the ascending serial
198  * number order on the list.  Protected by cgroup_mutex.
199  */
200 static u64 css_serial_nr_next = 1;
201 
202 /*
203  * These bitmask flags indicate whether tasks in the fork and exit paths have
204  * fork/exit handlers to call. This avoids us having to do extra work in the
205  * fork/exit path to check which subsystems have fork/exit callbacks.
206  */
207 static unsigned long have_fork_callback __read_mostly;
208 static unsigned long have_exit_callback __read_mostly;
209 static unsigned long have_free_callback __read_mostly;
210 
211 /* Ditto for the can_fork callback. */
212 static unsigned long have_canfork_callback __read_mostly;
213 
214 static struct cftype cgroup_dfl_base_files[];
215 static struct cftype cgroup_legacy_base_files[];
216 
217 static int rebind_subsystems(struct cgroup_root *dst_root,
218 			     unsigned long ss_mask);
219 static void css_task_iter_advance(struct css_task_iter *it);
220 static int cgroup_destroy_locked(struct cgroup *cgrp);
221 static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss,
222 		      bool visible);
223 static void css_release(struct percpu_ref *ref);
224 static void kill_css(struct cgroup_subsys_state *css);
225 static int cgroup_addrm_files(struct cgroup_subsys_state *css,
226 			      struct cgroup *cgrp, struct cftype cfts[],
227 			      bool is_add);
228 
229 /**
230  * cgroup_ssid_enabled - cgroup subsys enabled test by subsys ID
231  * @ssid: subsys ID of interest
232  *
233  * cgroup_subsys_enabled() can only be used with literal subsys names which
234  * is fine for individual subsystems but unsuitable for cgroup core.  This
235  * is slower static_key_enabled() based test indexed by @ssid.
236  */
cgroup_ssid_enabled(int ssid)237 static bool cgroup_ssid_enabled(int ssid)
238 {
239 	if (CGROUP_SUBSYS_COUNT == 0)
240 		return false;
241 
242 	return static_key_enabled(cgroup_subsys_enabled_key[ssid]);
243 }
244 
245 /**
246  * cgroup_on_dfl - test whether a cgroup is on the default hierarchy
247  * @cgrp: the cgroup of interest
248  *
249  * The default hierarchy is the v2 interface of cgroup and this function
250  * can be used to test whether a cgroup is on the default hierarchy for
251  * cases where a subsystem should behave differnetly depending on the
252  * interface version.
253  *
254  * The set of behaviors which change on the default hierarchy are still
255  * being determined and the mount option is prefixed with __DEVEL__.
256  *
257  * List of changed behaviors:
258  *
259  * - Mount options "noprefix", "xattr", "clone_children", "release_agent"
260  *   and "name" are disallowed.
261  *
262  * - When mounting an existing superblock, mount options should match.
263  *
264  * - Remount is disallowed.
265  *
266  * - rename(2) is disallowed.
267  *
268  * - "tasks" is removed.  Everything should be at process granularity.  Use
269  *   "cgroup.procs" instead.
270  *
271  * - "cgroup.procs" is not sorted.  pids will be unique unless they got
272  *   recycled inbetween reads.
273  *
274  * - "release_agent" and "notify_on_release" are removed.  Replacement
275  *   notification mechanism will be implemented.
276  *
277  * - "cgroup.clone_children" is removed.
278  *
279  * - "cgroup.subtree_populated" is available.  Its value is 0 if the cgroup
280  *   and its descendants contain no task; otherwise, 1.  The file also
281  *   generates kernfs notification which can be monitored through poll and
282  *   [di]notify when the value of the file changes.
283  *
284  * - cpuset: tasks will be kept in empty cpusets when hotplug happens and
285  *   take masks of ancestors with non-empty cpus/mems, instead of being
286  *   moved to an ancestor.
287  *
288  * - cpuset: a task can be moved into an empty cpuset, and again it takes
289  *   masks of ancestors.
290  *
291  * - memcg: use_hierarchy is on by default and the cgroup file for the flag
292  *   is not created.
293  *
294  * - blkcg: blk-throttle becomes properly hierarchical.
295  *
296  * - debug: disallowed on the default hierarchy.
297  */
cgroup_on_dfl(const struct cgroup * cgrp)298 static bool cgroup_on_dfl(const struct cgroup *cgrp)
299 {
300 	return cgrp->root == &cgrp_dfl_root;
301 }
302 
303 /* IDR wrappers which synchronize using cgroup_idr_lock */
cgroup_idr_alloc(struct idr * idr,void * ptr,int start,int end,gfp_t gfp_mask)304 static int cgroup_idr_alloc(struct idr *idr, void *ptr, int start, int end,
305 			    gfp_t gfp_mask)
306 {
307 	int ret;
308 
309 	idr_preload(gfp_mask);
310 	spin_lock_bh(&cgroup_idr_lock);
311 	ret = idr_alloc(idr, ptr, start, end, gfp_mask & ~__GFP_DIRECT_RECLAIM);
312 	spin_unlock_bh(&cgroup_idr_lock);
313 	idr_preload_end();
314 	return ret;
315 }
316 
cgroup_idr_replace(struct idr * idr,void * ptr,int id)317 static void *cgroup_idr_replace(struct idr *idr, void *ptr, int id)
318 {
319 	void *ret;
320 
321 	spin_lock_bh(&cgroup_idr_lock);
322 	ret = idr_replace(idr, ptr, id);
323 	spin_unlock_bh(&cgroup_idr_lock);
324 	return ret;
325 }
326 
cgroup_idr_remove(struct idr * idr,int id)327 static void cgroup_idr_remove(struct idr *idr, int id)
328 {
329 	spin_lock_bh(&cgroup_idr_lock);
330 	idr_remove(idr, id);
331 	spin_unlock_bh(&cgroup_idr_lock);
332 }
333 
cgroup_parent(struct cgroup * cgrp)334 static struct cgroup *cgroup_parent(struct cgroup *cgrp)
335 {
336 	struct cgroup_subsys_state *parent_css = cgrp->self.parent;
337 
338 	if (parent_css)
339 		return container_of(parent_css, struct cgroup, self);
340 	return NULL;
341 }
342 
343 /**
344  * cgroup_css - obtain a cgroup's css for the specified subsystem
345  * @cgrp: the cgroup of interest
346  * @ss: the subsystem of interest (%NULL returns @cgrp->self)
347  *
348  * Return @cgrp's css (cgroup_subsys_state) associated with @ss.  This
349  * function must be called either under cgroup_mutex or rcu_read_lock() and
350  * the caller is responsible for pinning the returned css if it wants to
351  * keep accessing it outside the said locks.  This function may return
352  * %NULL if @cgrp doesn't have @subsys_id enabled.
353  */
cgroup_css(struct cgroup * cgrp,struct cgroup_subsys * ss)354 static struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp,
355 					      struct cgroup_subsys *ss)
356 {
357 	if (ss)
358 		return rcu_dereference_check(cgrp->subsys[ss->id],
359 					lockdep_is_held(&cgroup_mutex));
360 	else
361 		return &cgrp->self;
362 }
363 
364 /**
365  * cgroup_e_css - obtain a cgroup's effective css for the specified subsystem
366  * @cgrp: the cgroup of interest
367  * @ss: the subsystem of interest (%NULL returns @cgrp->self)
368  *
369  * Similar to cgroup_css() but returns the effective css, which is defined
370  * as the matching css of the nearest ancestor including self which has @ss
371  * enabled.  If @ss is associated with the hierarchy @cgrp is on, this
372  * function is guaranteed to return non-NULL css.
373  */
cgroup_e_css(struct cgroup * cgrp,struct cgroup_subsys * ss)374 static struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp,
375 						struct cgroup_subsys *ss)
376 {
377 	lockdep_assert_held(&cgroup_mutex);
378 
379 	if (!ss)
380 		return &cgrp->self;
381 
382 	if (!(cgrp->root->subsys_mask & (1 << ss->id)))
383 		return NULL;
384 
385 	/*
386 	 * This function is used while updating css associations and thus
387 	 * can't test the csses directly.  Use ->child_subsys_mask.
388 	 */
389 	while (cgroup_parent(cgrp) &&
390 	       !(cgroup_parent(cgrp)->child_subsys_mask & (1 << ss->id)))
391 		cgrp = cgroup_parent(cgrp);
392 
393 	return cgroup_css(cgrp, ss);
394 }
395 
396 /**
397  * cgroup_get_e_css - get a cgroup's effective css for the specified subsystem
398  * @cgrp: the cgroup of interest
399  * @ss: the subsystem of interest
400  *
401  * Find and get the effective css of @cgrp for @ss.  The effective css is
402  * defined as the matching css of the nearest ancestor including self which
403  * has @ss enabled.  If @ss is not mounted on the hierarchy @cgrp is on,
404  * the root css is returned, so this function always returns a valid css.
405  * The returned css must be put using css_put().
406  */
cgroup_get_e_css(struct cgroup * cgrp,struct cgroup_subsys * ss)407 struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgrp,
408 					     struct cgroup_subsys *ss)
409 {
410 	struct cgroup_subsys_state *css;
411 
412 	rcu_read_lock();
413 
414 	do {
415 		css = cgroup_css(cgrp, ss);
416 
417 		if (css && css_tryget_online(css))
418 			goto out_unlock;
419 		cgrp = cgroup_parent(cgrp);
420 	} while (cgrp);
421 
422 	css = init_css_set.subsys[ss->id];
423 	css_get(css);
424 out_unlock:
425 	rcu_read_unlock();
426 	return css;
427 }
428 
429 /* convenient tests for these bits */
cgroup_is_dead(const struct cgroup * cgrp)430 static inline bool cgroup_is_dead(const struct cgroup *cgrp)
431 {
432 	return !(cgrp->self.flags & CSS_ONLINE);
433 }
434 
cgroup_get(struct cgroup * cgrp)435 static void cgroup_get(struct cgroup *cgrp)
436 {
437 	WARN_ON_ONCE(cgroup_is_dead(cgrp));
438 	css_get(&cgrp->self);
439 }
440 
cgroup_tryget(struct cgroup * cgrp)441 static bool cgroup_tryget(struct cgroup *cgrp)
442 {
443 	return css_tryget(&cgrp->self);
444 }
445 
cgroup_put(struct cgroup * cgrp)446 static void cgroup_put(struct cgroup *cgrp)
447 {
448 	css_put(&cgrp->self);
449 }
450 
of_css(struct kernfs_open_file * of)451 struct cgroup_subsys_state *of_css(struct kernfs_open_file *of)
452 {
453 	struct cgroup *cgrp = of->kn->parent->priv;
454 	struct cftype *cft = of_cft(of);
455 
456 	/*
457 	 * This is open and unprotected implementation of cgroup_css().
458 	 * seq_css() is only called from a kernfs file operation which has
459 	 * an active reference on the file.  Because all the subsystem
460 	 * files are drained before a css is disassociated with a cgroup,
461 	 * the matching css from the cgroup's subsys table is guaranteed to
462 	 * be and stay valid until the enclosing operation is complete.
463 	 */
464 	if (cft->ss)
465 		return rcu_dereference_raw(cgrp->subsys[cft->ss->id]);
466 	else
467 		return &cgrp->self;
468 }
469 EXPORT_SYMBOL_GPL(of_css);
470 
471 /**
472  * cgroup_is_descendant - test ancestry
473  * @cgrp: the cgroup to be tested
474  * @ancestor: possible ancestor of @cgrp
475  *
476  * Test whether @cgrp is a descendant of @ancestor.  It also returns %true
477  * if @cgrp == @ancestor.  This function is safe to call as long as @cgrp
478  * and @ancestor are accessible.
479  */
cgroup_is_descendant(struct cgroup * cgrp,struct cgroup * ancestor)480 bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor)
481 {
482 	while (cgrp) {
483 		if (cgrp == ancestor)
484 			return true;
485 		cgrp = cgroup_parent(cgrp);
486 	}
487 	return false;
488 }
489 
notify_on_release(const struct cgroup * cgrp)490 static int notify_on_release(const struct cgroup *cgrp)
491 {
492 	return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
493 }
494 
495 /**
496  * for_each_css - iterate all css's of a cgroup
497  * @css: the iteration cursor
498  * @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end
499  * @cgrp: the target cgroup to iterate css's of
500  *
501  * Should be called under cgroup_[tree_]mutex.
502  */
503 #define for_each_css(css, ssid, cgrp)					\
504 	for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++)	\
505 		if (!((css) = rcu_dereference_check(			\
506 				(cgrp)->subsys[(ssid)],			\
507 				lockdep_is_held(&cgroup_mutex)))) { }	\
508 		else
509 
510 /**
511  * for_each_e_css - iterate all effective css's of a cgroup
512  * @css: the iteration cursor
513  * @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end
514  * @cgrp: the target cgroup to iterate css's of
515  *
516  * Should be called under cgroup_[tree_]mutex.
517  */
518 #define for_each_e_css(css, ssid, cgrp)					\
519 	for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++)	\
520 		if (!((css) = cgroup_e_css(cgrp, cgroup_subsys[(ssid)]))) \
521 			;						\
522 		else
523 
524 /**
525  * for_each_subsys - iterate all enabled cgroup subsystems
526  * @ss: the iteration cursor
527  * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
528  */
529 #define for_each_subsys(ss, ssid)					\
530 	for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT &&		\
531 	     (((ss) = cgroup_subsys[ssid]) || true); (ssid)++)
532 
533 /**
534  * for_each_subsys_which - filter for_each_subsys with a bitmask
535  * @ss: the iteration cursor
536  * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
537  * @ss_maskp: a pointer to the bitmask
538  *
539  * The block will only run for cases where the ssid-th bit (1 << ssid) of
540  * mask is set to 1.
541  */
542 #define for_each_subsys_which(ss, ssid, ss_maskp)			\
543 	if (!CGROUP_SUBSYS_COUNT) /* to avoid spurious gcc warning */	\
544 		(ssid) = 0;						\
545 	else								\
546 		for_each_set_bit(ssid, ss_maskp, CGROUP_SUBSYS_COUNT)	\
547 			if (((ss) = cgroup_subsys[ssid]) && false)	\
548 				break;					\
549 			else
550 
551 /* iterate across the hierarchies */
552 #define for_each_root(root)						\
553 	list_for_each_entry((root), &cgroup_roots, root_list)
554 
555 /* iterate over child cgrps, lock should be held throughout iteration */
556 #define cgroup_for_each_live_child(child, cgrp)				\
557 	list_for_each_entry((child), &(cgrp)->self.children, self.sibling) \
558 		if (({ lockdep_assert_held(&cgroup_mutex);		\
559 		       cgroup_is_dead(child); }))			\
560 			;						\
561 		else
562 
563 static void cgroup_release_agent(struct work_struct *work);
564 static void check_for_release(struct cgroup *cgrp);
565 
566 /*
567  * A cgroup can be associated with multiple css_sets as different tasks may
568  * belong to different cgroups on different hierarchies.  In the other
569  * direction, a css_set is naturally associated with multiple cgroups.
570  * This M:N relationship is represented by the following link structure
571  * which exists for each association and allows traversing the associations
572  * from both sides.
573  */
574 struct cgrp_cset_link {
575 	/* the cgroup and css_set this link associates */
576 	struct cgroup		*cgrp;
577 	struct css_set		*cset;
578 
579 	/* list of cgrp_cset_links anchored at cgrp->cset_links */
580 	struct list_head	cset_link;
581 
582 	/* list of cgrp_cset_links anchored at css_set->cgrp_links */
583 	struct list_head	cgrp_link;
584 };
585 
586 /*
587  * The default css_set - used by init and its children prior to any
588  * hierarchies being mounted. It contains a pointer to the root state
589  * for each subsystem. Also used to anchor the list of css_sets. Not
590  * reference-counted, to improve performance when child cgroups
591  * haven't been created.
592  */
593 struct css_set init_css_set = {
594 	.refcount		= ATOMIC_INIT(1),
595 	.cgrp_links		= LIST_HEAD_INIT(init_css_set.cgrp_links),
596 	.tasks			= LIST_HEAD_INIT(init_css_set.tasks),
597 	.mg_tasks		= LIST_HEAD_INIT(init_css_set.mg_tasks),
598 	.mg_preload_node	= LIST_HEAD_INIT(init_css_set.mg_preload_node),
599 	.mg_node		= LIST_HEAD_INIT(init_css_set.mg_node),
600 	.task_iters		= LIST_HEAD_INIT(init_css_set.task_iters),
601 };
602 
603 static int css_set_count	= 1;	/* 1 for init_css_set */
604 
605 /**
606  * css_set_populated - does a css_set contain any tasks?
607  * @cset: target css_set
608  */
css_set_populated(struct css_set * cset)609 static bool css_set_populated(struct css_set *cset)
610 {
611 	lockdep_assert_held(&css_set_lock);
612 
613 	return !list_empty(&cset->tasks) || !list_empty(&cset->mg_tasks);
614 }
615 
616 /**
617  * cgroup_update_populated - updated populated count of a cgroup
618  * @cgrp: the target cgroup
619  * @populated: inc or dec populated count
620  *
621  * One of the css_sets associated with @cgrp is either getting its first
622  * task or losing the last.  Update @cgrp->populated_cnt accordingly.  The
623  * count is propagated towards root so that a given cgroup's populated_cnt
624  * is zero iff the cgroup and all its descendants don't contain any tasks.
625  *
626  * @cgrp's interface file "cgroup.populated" is zero if
627  * @cgrp->populated_cnt is zero and 1 otherwise.  When @cgrp->populated_cnt
628  * changes from or to zero, userland is notified that the content of the
629  * interface file has changed.  This can be used to detect when @cgrp and
630  * its descendants become populated or empty.
631  */
cgroup_update_populated(struct cgroup * cgrp,bool populated)632 static void cgroup_update_populated(struct cgroup *cgrp, bool populated)
633 {
634 	lockdep_assert_held(&css_set_lock);
635 
636 	do {
637 		bool trigger;
638 
639 		if (populated)
640 			trigger = !cgrp->populated_cnt++;
641 		else
642 			trigger = !--cgrp->populated_cnt;
643 
644 		if (!trigger)
645 			break;
646 
647 		check_for_release(cgrp);
648 		cgroup_file_notify(&cgrp->events_file);
649 
650 		cgrp = cgroup_parent(cgrp);
651 	} while (cgrp);
652 }
653 
654 /**
655  * css_set_update_populated - update populated state of a css_set
656  * @cset: target css_set
657  * @populated: whether @cset is populated or depopulated
658  *
659  * @cset is either getting the first task or losing the last.  Update the
660  * ->populated_cnt of all associated cgroups accordingly.
661  */
css_set_update_populated(struct css_set * cset,bool populated)662 static void css_set_update_populated(struct css_set *cset, bool populated)
663 {
664 	struct cgrp_cset_link *link;
665 
666 	lockdep_assert_held(&css_set_lock);
667 
668 	list_for_each_entry(link, &cset->cgrp_links, cgrp_link)
669 		cgroup_update_populated(link->cgrp, populated);
670 }
671 
672 /**
673  * css_set_move_task - move a task from one css_set to another
674  * @task: task being moved
675  * @from_cset: css_set @task currently belongs to (may be NULL)
676  * @to_cset: new css_set @task is being moved to (may be NULL)
677  * @use_mg_tasks: move to @to_cset->mg_tasks instead of ->tasks
678  *
679  * Move @task from @from_cset to @to_cset.  If @task didn't belong to any
680  * css_set, @from_cset can be NULL.  If @task is being disassociated
681  * instead of moved, @to_cset can be NULL.
682  *
683  * This function automatically handles populated_cnt updates and
684  * css_task_iter adjustments but the caller is responsible for managing
685  * @from_cset and @to_cset's reference counts.
686  */
css_set_move_task(struct task_struct * task,struct css_set * from_cset,struct css_set * to_cset,bool use_mg_tasks)687 static void css_set_move_task(struct task_struct *task,
688 			      struct css_set *from_cset, struct css_set *to_cset,
689 			      bool use_mg_tasks)
690 {
691 	lockdep_assert_held(&css_set_lock);
692 
693 	if (from_cset) {
694 		struct css_task_iter *it, *pos;
695 
696 		WARN_ON_ONCE(list_empty(&task->cg_list));
697 
698 		/*
699 		 * @task is leaving, advance task iterators which are
700 		 * pointing to it so that they can resume at the next
701 		 * position.  Advancing an iterator might remove it from
702 		 * the list, use safe walk.  See css_task_iter_advance*()
703 		 * for details.
704 		 */
705 		list_for_each_entry_safe(it, pos, &from_cset->task_iters,
706 					 iters_node)
707 			if (it->task_pos == &task->cg_list)
708 				css_task_iter_advance(it);
709 
710 		list_del_init(&task->cg_list);
711 		if (!css_set_populated(from_cset))
712 			css_set_update_populated(from_cset, false);
713 	} else {
714 		WARN_ON_ONCE(!list_empty(&task->cg_list));
715 	}
716 
717 	if (to_cset) {
718 		/*
719 		 * We are synchronized through cgroup_threadgroup_rwsem
720 		 * against PF_EXITING setting such that we can't race
721 		 * against cgroup_exit() changing the css_set to
722 		 * init_css_set and dropping the old one.
723 		 */
724 		WARN_ON_ONCE(task->flags & PF_EXITING);
725 
726 		if (!css_set_populated(to_cset))
727 			css_set_update_populated(to_cset, true);
728 		rcu_assign_pointer(task->cgroups, to_cset);
729 		list_add_tail(&task->cg_list, use_mg_tasks ? &to_cset->mg_tasks :
730 							     &to_cset->tasks);
731 	}
732 }
733 
734 /*
735  * hash table for cgroup groups. This improves the performance to find
736  * an existing css_set. This hash doesn't (currently) take into
737  * account cgroups in empty hierarchies.
738  */
739 #define CSS_SET_HASH_BITS	7
740 static DEFINE_HASHTABLE(css_set_table, CSS_SET_HASH_BITS);
741 
css_set_hash(struct cgroup_subsys_state * css[])742 static unsigned long css_set_hash(struct cgroup_subsys_state *css[])
743 {
744 	unsigned long key = 0UL;
745 	struct cgroup_subsys *ss;
746 	int i;
747 
748 	for_each_subsys(ss, i)
749 		key += (unsigned long)css[i];
750 	key = (key >> 16) ^ key;
751 
752 	return key;
753 }
754 
put_css_set_locked(struct css_set * cset)755 static void put_css_set_locked(struct css_set *cset)
756 {
757 	struct cgrp_cset_link *link, *tmp_link;
758 	struct cgroup_subsys *ss;
759 	int ssid;
760 
761 	lockdep_assert_held(&css_set_lock);
762 
763 	if (!atomic_dec_and_test(&cset->refcount))
764 		return;
765 
766 	/* This css_set is dead. unlink it and release cgroup and css refs */
767 	for_each_subsys(ss, ssid) {
768 		list_del(&cset->e_cset_node[ssid]);
769 		css_put(cset->subsys[ssid]);
770 	}
771 	hash_del(&cset->hlist);
772 	css_set_count--;
773 
774 	list_for_each_entry_safe(link, tmp_link, &cset->cgrp_links, cgrp_link) {
775 		list_del(&link->cset_link);
776 		list_del(&link->cgrp_link);
777 		if (cgroup_parent(link->cgrp))
778 			cgroup_put(link->cgrp);
779 		kfree(link);
780 	}
781 
782 	kfree_rcu(cset, rcu_head);
783 }
784 
put_css_set(struct css_set * cset)785 static void put_css_set(struct css_set *cset)
786 {
787 	unsigned long flags;
788 
789 	/*
790 	 * Ensure that the refcount doesn't hit zero while any readers
791 	 * can see it. Similar to atomic_dec_and_lock(), but for an
792 	 * rwlock
793 	 */
794 	if (atomic_add_unless(&cset->refcount, -1, 1))
795 		return;
796 
797 	spin_lock_irqsave(&css_set_lock, flags);
798 	put_css_set_locked(cset);
799 	spin_unlock_irqrestore(&css_set_lock, flags);
800 }
801 
802 /*
803  * refcounted get/put for css_set objects
804  */
get_css_set(struct css_set * cset)805 static inline void get_css_set(struct css_set *cset)
806 {
807 	atomic_inc(&cset->refcount);
808 }
809 
810 /**
811  * compare_css_sets - helper function for find_existing_css_set().
812  * @cset: candidate css_set being tested
813  * @old_cset: existing css_set for a task
814  * @new_cgrp: cgroup that's being entered by the task
815  * @template: desired set of css pointers in css_set (pre-calculated)
816  *
817  * Returns true if "cset" matches "old_cset" except for the hierarchy
818  * which "new_cgrp" belongs to, for which it should match "new_cgrp".
819  */
compare_css_sets(struct css_set * cset,struct css_set * old_cset,struct cgroup * new_cgrp,struct cgroup_subsys_state * template[])820 static bool compare_css_sets(struct css_set *cset,
821 			     struct css_set *old_cset,
822 			     struct cgroup *new_cgrp,
823 			     struct cgroup_subsys_state *template[])
824 {
825 	struct list_head *l1, *l2;
826 
827 	/*
828 	 * On the default hierarchy, there can be csets which are
829 	 * associated with the same set of cgroups but different csses.
830 	 * Let's first ensure that csses match.
831 	 */
832 	if (memcmp(template, cset->subsys, sizeof(cset->subsys)))
833 		return false;
834 
835 	/*
836 	 * Compare cgroup pointers in order to distinguish between
837 	 * different cgroups in hierarchies.  As different cgroups may
838 	 * share the same effective css, this comparison is always
839 	 * necessary.
840 	 */
841 	l1 = &cset->cgrp_links;
842 	l2 = &old_cset->cgrp_links;
843 	while (1) {
844 		struct cgrp_cset_link *link1, *link2;
845 		struct cgroup *cgrp1, *cgrp2;
846 
847 		l1 = l1->next;
848 		l2 = l2->next;
849 		/* See if we reached the end - both lists are equal length. */
850 		if (l1 == &cset->cgrp_links) {
851 			BUG_ON(l2 != &old_cset->cgrp_links);
852 			break;
853 		} else {
854 			BUG_ON(l2 == &old_cset->cgrp_links);
855 		}
856 		/* Locate the cgroups associated with these links. */
857 		link1 = list_entry(l1, struct cgrp_cset_link, cgrp_link);
858 		link2 = list_entry(l2, struct cgrp_cset_link, cgrp_link);
859 		cgrp1 = link1->cgrp;
860 		cgrp2 = link2->cgrp;
861 		/* Hierarchies should be linked in the same order. */
862 		BUG_ON(cgrp1->root != cgrp2->root);
863 
864 		/*
865 		 * If this hierarchy is the hierarchy of the cgroup
866 		 * that's changing, then we need to check that this
867 		 * css_set points to the new cgroup; if it's any other
868 		 * hierarchy, then this css_set should point to the
869 		 * same cgroup as the old css_set.
870 		 */
871 		if (cgrp1->root == new_cgrp->root) {
872 			if (cgrp1 != new_cgrp)
873 				return false;
874 		} else {
875 			if (cgrp1 != cgrp2)
876 				return false;
877 		}
878 	}
879 	return true;
880 }
881 
882 /**
883  * find_existing_css_set - init css array and find the matching css_set
884  * @old_cset: the css_set that we're using before the cgroup transition
885  * @cgrp: the cgroup that we're moving into
886  * @template: out param for the new set of csses, should be clear on entry
887  */
find_existing_css_set(struct css_set * old_cset,struct cgroup * cgrp,struct cgroup_subsys_state * template[])888 static struct css_set *find_existing_css_set(struct css_set *old_cset,
889 					struct cgroup *cgrp,
890 					struct cgroup_subsys_state *template[])
891 {
892 	struct cgroup_root *root = cgrp->root;
893 	struct cgroup_subsys *ss;
894 	struct css_set *cset;
895 	unsigned long key;
896 	int i;
897 
898 	/*
899 	 * Build the set of subsystem state objects that we want to see in the
900 	 * new css_set. while subsystems can change globally, the entries here
901 	 * won't change, so no need for locking.
902 	 */
903 	for_each_subsys(ss, i) {
904 		if (root->subsys_mask & (1UL << i)) {
905 			/*
906 			 * @ss is in this hierarchy, so we want the
907 			 * effective css from @cgrp.
908 			 */
909 			template[i] = cgroup_e_css(cgrp, ss);
910 		} else {
911 			/*
912 			 * @ss is not in this hierarchy, so we don't want
913 			 * to change the css.
914 			 */
915 			template[i] = old_cset->subsys[i];
916 		}
917 	}
918 
919 	key = css_set_hash(template);
920 	hash_for_each_possible(css_set_table, cset, hlist, key) {
921 		if (!compare_css_sets(cset, old_cset, cgrp, template))
922 			continue;
923 
924 		/* This css_set matches what we need */
925 		return cset;
926 	}
927 
928 	/* No existing cgroup group matched */
929 	return NULL;
930 }
931 
free_cgrp_cset_links(struct list_head * links_to_free)932 static void free_cgrp_cset_links(struct list_head *links_to_free)
933 {
934 	struct cgrp_cset_link *link, *tmp_link;
935 
936 	list_for_each_entry_safe(link, tmp_link, links_to_free, cset_link) {
937 		list_del(&link->cset_link);
938 		kfree(link);
939 	}
940 }
941 
942 /**
943  * allocate_cgrp_cset_links - allocate cgrp_cset_links
944  * @count: the number of links to allocate
945  * @tmp_links: list_head the allocated links are put on
946  *
947  * Allocate @count cgrp_cset_link structures and chain them on @tmp_links
948  * through ->cset_link.  Returns 0 on success or -errno.
949  */
allocate_cgrp_cset_links(int count,struct list_head * tmp_links)950 static int allocate_cgrp_cset_links(int count, struct list_head *tmp_links)
951 {
952 	struct cgrp_cset_link *link;
953 	int i;
954 
955 	INIT_LIST_HEAD(tmp_links);
956 
957 	for (i = 0; i < count; i++) {
958 		link = kzalloc(sizeof(*link), GFP_KERNEL);
959 		if (!link) {
960 			free_cgrp_cset_links(tmp_links);
961 			return -ENOMEM;
962 		}
963 		list_add(&link->cset_link, tmp_links);
964 	}
965 	return 0;
966 }
967 
968 /**
969  * link_css_set - a helper function to link a css_set to a cgroup
970  * @tmp_links: cgrp_cset_link objects allocated by allocate_cgrp_cset_links()
971  * @cset: the css_set to be linked
972  * @cgrp: the destination cgroup
973  */
link_css_set(struct list_head * tmp_links,struct css_set * cset,struct cgroup * cgrp)974 static void link_css_set(struct list_head *tmp_links, struct css_set *cset,
975 			 struct cgroup *cgrp)
976 {
977 	struct cgrp_cset_link *link;
978 
979 	BUG_ON(list_empty(tmp_links));
980 
981 	if (cgroup_on_dfl(cgrp))
982 		cset->dfl_cgrp = cgrp;
983 
984 	link = list_first_entry(tmp_links, struct cgrp_cset_link, cset_link);
985 	link->cset = cset;
986 	link->cgrp = cgrp;
987 
988 	/*
989 	 * Always add links to the tail of the lists so that the lists are
990 	 * in choronological order.
991 	 */
992 	list_move_tail(&link->cset_link, &cgrp->cset_links);
993 	list_add_tail(&link->cgrp_link, &cset->cgrp_links);
994 
995 	if (cgroup_parent(cgrp))
996 		cgroup_get(cgrp);
997 }
998 
999 /**
1000  * find_css_set - return a new css_set with one cgroup updated
1001  * @old_cset: the baseline css_set
1002  * @cgrp: the cgroup to be updated
1003  *
1004  * Return a new css_set that's equivalent to @old_cset, but with @cgrp
1005  * substituted into the appropriate hierarchy.
1006  */
find_css_set(struct css_set * old_cset,struct cgroup * cgrp)1007 static struct css_set *find_css_set(struct css_set *old_cset,
1008 				    struct cgroup *cgrp)
1009 {
1010 	struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT] = { };
1011 	struct css_set *cset;
1012 	struct list_head tmp_links;
1013 	struct cgrp_cset_link *link;
1014 	struct cgroup_subsys *ss;
1015 	unsigned long key;
1016 	int ssid;
1017 
1018 	lockdep_assert_held(&cgroup_mutex);
1019 
1020 	/* First see if we already have a cgroup group that matches
1021 	 * the desired set */
1022 	spin_lock_irq(&css_set_lock);
1023 	cset = find_existing_css_set(old_cset, cgrp, template);
1024 	if (cset)
1025 		get_css_set(cset);
1026 	spin_unlock_irq(&css_set_lock);
1027 
1028 	if (cset)
1029 		return cset;
1030 
1031 	cset = kzalloc(sizeof(*cset), GFP_KERNEL);
1032 	if (!cset)
1033 		return NULL;
1034 
1035 	/* Allocate all the cgrp_cset_link objects that we'll need */
1036 	if (allocate_cgrp_cset_links(cgroup_root_count, &tmp_links) < 0) {
1037 		kfree(cset);
1038 		return NULL;
1039 	}
1040 
1041 	atomic_set(&cset->refcount, 1);
1042 	INIT_LIST_HEAD(&cset->cgrp_links);
1043 	INIT_LIST_HEAD(&cset->tasks);
1044 	INIT_LIST_HEAD(&cset->mg_tasks);
1045 	INIT_LIST_HEAD(&cset->mg_preload_node);
1046 	INIT_LIST_HEAD(&cset->mg_node);
1047 	INIT_LIST_HEAD(&cset->task_iters);
1048 	INIT_HLIST_NODE(&cset->hlist);
1049 
1050 	/* Copy the set of subsystem state objects generated in
1051 	 * find_existing_css_set() */
1052 	memcpy(cset->subsys, template, sizeof(cset->subsys));
1053 
1054 	spin_lock_irq(&css_set_lock);
1055 	/* Add reference counts and links from the new css_set. */
1056 	list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) {
1057 		struct cgroup *c = link->cgrp;
1058 
1059 		if (c->root == cgrp->root)
1060 			c = cgrp;
1061 		link_css_set(&tmp_links, cset, c);
1062 	}
1063 
1064 	BUG_ON(!list_empty(&tmp_links));
1065 
1066 	css_set_count++;
1067 
1068 	/* Add @cset to the hash table */
1069 	key = css_set_hash(cset->subsys);
1070 	hash_add(css_set_table, &cset->hlist, key);
1071 
1072 	for_each_subsys(ss, ssid) {
1073 		struct cgroup_subsys_state *css = cset->subsys[ssid];
1074 
1075 		list_add_tail(&cset->e_cset_node[ssid],
1076 			      &css->cgroup->e_csets[ssid]);
1077 		css_get(css);
1078 	}
1079 
1080 	spin_unlock_irq(&css_set_lock);
1081 
1082 	return cset;
1083 }
1084 
cgroup_root_from_kf(struct kernfs_root * kf_root)1085 static struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root)
1086 {
1087 	struct cgroup *root_cgrp = kf_root->kn->priv;
1088 
1089 	return root_cgrp->root;
1090 }
1091 
cgroup_init_root_id(struct cgroup_root * root)1092 static int cgroup_init_root_id(struct cgroup_root *root)
1093 {
1094 	int id;
1095 
1096 	lockdep_assert_held(&cgroup_mutex);
1097 
1098 	id = idr_alloc_cyclic(&cgroup_hierarchy_idr, root, 0, 0, GFP_KERNEL);
1099 	if (id < 0)
1100 		return id;
1101 
1102 	root->hierarchy_id = id;
1103 	return 0;
1104 }
1105 
cgroup_exit_root_id(struct cgroup_root * root)1106 static void cgroup_exit_root_id(struct cgroup_root *root)
1107 {
1108 	lockdep_assert_held(&cgroup_mutex);
1109 
1110 	if (root->hierarchy_id) {
1111 		idr_remove(&cgroup_hierarchy_idr, root->hierarchy_id);
1112 		root->hierarchy_id = 0;
1113 	}
1114 }
1115 
cgroup_free_root(struct cgroup_root * root)1116 static void cgroup_free_root(struct cgroup_root *root)
1117 {
1118 	if (root) {
1119 		/* hierarchy ID should already have been released */
1120 		WARN_ON_ONCE(root->hierarchy_id);
1121 
1122 		idr_destroy(&root->cgroup_idr);
1123 		kfree(root);
1124 	}
1125 }
1126 
cgroup_destroy_root(struct cgroup_root * root)1127 static void cgroup_destroy_root(struct cgroup_root *root)
1128 {
1129 	struct cgroup *cgrp = &root->cgrp;
1130 	struct cgrp_cset_link *link, *tmp_link;
1131 
1132 	mutex_lock(&cgroup_mutex);
1133 
1134 	BUG_ON(atomic_read(&root->nr_cgrps));
1135 	BUG_ON(!list_empty(&cgrp->self.children));
1136 
1137 	/* Rebind all subsystems back to the default hierarchy */
1138 	rebind_subsystems(&cgrp_dfl_root, root->subsys_mask);
1139 
1140 	/*
1141 	 * Release all the links from cset_links to this hierarchy's
1142 	 * root cgroup
1143 	 */
1144 	spin_lock_irq(&css_set_lock);
1145 
1146 	list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) {
1147 		list_del(&link->cset_link);
1148 		list_del(&link->cgrp_link);
1149 		kfree(link);
1150 	}
1151 
1152 	spin_unlock_irq(&css_set_lock);
1153 
1154 	if (!list_empty(&root->root_list)) {
1155 		list_del(&root->root_list);
1156 		cgroup_root_count--;
1157 	}
1158 
1159 	cgroup_exit_root_id(root);
1160 
1161 	mutex_unlock(&cgroup_mutex);
1162 
1163 	kernfs_destroy_root(root->kf_root);
1164 	cgroup_free_root(root);
1165 }
1166 
1167 /* look up cgroup associated with given css_set on the specified hierarchy */
cset_cgroup_from_root(struct css_set * cset,struct cgroup_root * root)1168 static struct cgroup *cset_cgroup_from_root(struct css_set *cset,
1169 					    struct cgroup_root *root)
1170 {
1171 	struct cgroup *res = NULL;
1172 
1173 	lockdep_assert_held(&cgroup_mutex);
1174 	lockdep_assert_held(&css_set_lock);
1175 
1176 	if (cset == &init_css_set) {
1177 		res = &root->cgrp;
1178 	} else {
1179 		struct cgrp_cset_link *link;
1180 
1181 		list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
1182 			struct cgroup *c = link->cgrp;
1183 
1184 			if (c->root == root) {
1185 				res = c;
1186 				break;
1187 			}
1188 		}
1189 	}
1190 
1191 	BUG_ON(!res);
1192 	return res;
1193 }
1194 
1195 /*
1196  * Return the cgroup for "task" from the given hierarchy. Must be
1197  * called with cgroup_mutex and css_set_lock held.
1198  */
task_cgroup_from_root(struct task_struct * task,struct cgroup_root * root)1199 static struct cgroup *task_cgroup_from_root(struct task_struct *task,
1200 					    struct cgroup_root *root)
1201 {
1202 	/*
1203 	 * No need to lock the task - since we hold cgroup_mutex the
1204 	 * task can't change groups, so the only thing that can happen
1205 	 * is that it exits and its css is set back to init_css_set.
1206 	 */
1207 	return cset_cgroup_from_root(task_css_set(task), root);
1208 }
1209 
1210 /*
1211  * A task must hold cgroup_mutex to modify cgroups.
1212  *
1213  * Any task can increment and decrement the count field without lock.
1214  * So in general, code holding cgroup_mutex can't rely on the count
1215  * field not changing.  However, if the count goes to zero, then only
1216  * cgroup_attach_task() can increment it again.  Because a count of zero
1217  * means that no tasks are currently attached, therefore there is no
1218  * way a task attached to that cgroup can fork (the other way to
1219  * increment the count).  So code holding cgroup_mutex can safely
1220  * assume that if the count is zero, it will stay zero. Similarly, if
1221  * a task holds cgroup_mutex on a cgroup with zero count, it
1222  * knows that the cgroup won't be removed, as cgroup_rmdir()
1223  * needs that mutex.
1224  *
1225  * A cgroup can only be deleted if both its 'count' of using tasks
1226  * is zero, and its list of 'children' cgroups is empty.  Since all
1227  * tasks in the system use _some_ cgroup, and since there is always at
1228  * least one task in the system (init, pid == 1), therefore, root cgroup
1229  * always has either children cgroups and/or using tasks.  So we don't
1230  * need a special hack to ensure that root cgroup cannot be deleted.
1231  *
1232  * P.S.  One more locking exception.  RCU is used to guard the
1233  * update of a tasks cgroup pointer by cgroup_attach_task()
1234  */
1235 
1236 static struct kernfs_syscall_ops cgroup_kf_syscall_ops;
1237 static const struct file_operations proc_cgroupstats_operations;
1238 
cgroup_file_name(struct cgroup * cgrp,const struct cftype * cft,char * buf)1239 static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft,
1240 			      char *buf)
1241 {
1242 	struct cgroup_subsys *ss = cft->ss;
1243 
1244 	if (cft->ss && !(cft->flags & CFTYPE_NO_PREFIX) &&
1245 	    !(cgrp->root->flags & CGRP_ROOT_NOPREFIX))
1246 		snprintf(buf, CGROUP_FILE_NAME_MAX, "%s.%s",
1247 			 cgroup_on_dfl(cgrp) ? ss->name : ss->legacy_name,
1248 			 cft->name);
1249 	else
1250 		strncpy(buf, cft->name, CGROUP_FILE_NAME_MAX);
1251 	return buf;
1252 }
1253 
1254 /**
1255  * cgroup_file_mode - deduce file mode of a control file
1256  * @cft: the control file in question
1257  *
1258  * S_IRUGO for read, S_IWUSR for write.
1259  */
cgroup_file_mode(const struct cftype * cft)1260 static umode_t cgroup_file_mode(const struct cftype *cft)
1261 {
1262 	umode_t mode = 0;
1263 
1264 	if (cft->read_u64 || cft->read_s64 || cft->seq_show)
1265 		mode |= S_IRUGO;
1266 
1267 	if (cft->write_u64 || cft->write_s64 || cft->write) {
1268 		if (cft->flags & CFTYPE_WORLD_WRITABLE)
1269 			mode |= S_IWUGO;
1270 		else
1271 			mode |= S_IWUSR;
1272 	}
1273 
1274 	return mode;
1275 }
1276 
1277 /**
1278  * cgroup_calc_child_subsys_mask - calculate child_subsys_mask
1279  * @cgrp: the target cgroup
1280  * @subtree_control: the new subtree_control mask to consider
1281  *
1282  * On the default hierarchy, a subsystem may request other subsystems to be
1283  * enabled together through its ->depends_on mask.  In such cases, more
1284  * subsystems than specified in "cgroup.subtree_control" may be enabled.
1285  *
1286  * This function calculates which subsystems need to be enabled if
1287  * @subtree_control is to be applied to @cgrp.  The returned mask is always
1288  * a superset of @subtree_control and follows the usual hierarchy rules.
1289  */
cgroup_calc_child_subsys_mask(struct cgroup * cgrp,unsigned long subtree_control)1290 static unsigned long cgroup_calc_child_subsys_mask(struct cgroup *cgrp,
1291 						  unsigned long subtree_control)
1292 {
1293 	struct cgroup *parent = cgroup_parent(cgrp);
1294 	unsigned long cur_ss_mask = subtree_control;
1295 	struct cgroup_subsys *ss;
1296 	int ssid;
1297 
1298 	lockdep_assert_held(&cgroup_mutex);
1299 
1300 	if (!cgroup_on_dfl(cgrp))
1301 		return cur_ss_mask;
1302 
1303 	while (true) {
1304 		unsigned long new_ss_mask = cur_ss_mask;
1305 
1306 		for_each_subsys_which(ss, ssid, &cur_ss_mask)
1307 			new_ss_mask |= ss->depends_on;
1308 
1309 		/*
1310 		 * Mask out subsystems which aren't available.  This can
1311 		 * happen only if some depended-upon subsystems were bound
1312 		 * to non-default hierarchies.
1313 		 */
1314 		if (parent)
1315 			new_ss_mask &= parent->child_subsys_mask;
1316 		else
1317 			new_ss_mask &= cgrp->root->subsys_mask;
1318 
1319 		if (new_ss_mask == cur_ss_mask)
1320 			break;
1321 		cur_ss_mask = new_ss_mask;
1322 	}
1323 
1324 	return cur_ss_mask;
1325 }
1326 
1327 /**
1328  * cgroup_refresh_child_subsys_mask - update child_subsys_mask
1329  * @cgrp: the target cgroup
1330  *
1331  * Update @cgrp->child_subsys_mask according to the current
1332  * @cgrp->subtree_control using cgroup_calc_child_subsys_mask().
1333  */
cgroup_refresh_child_subsys_mask(struct cgroup * cgrp)1334 static void cgroup_refresh_child_subsys_mask(struct cgroup *cgrp)
1335 {
1336 	cgrp->child_subsys_mask =
1337 		cgroup_calc_child_subsys_mask(cgrp, cgrp->subtree_control);
1338 }
1339 
1340 /**
1341  * cgroup_kn_unlock - unlocking helper for cgroup kernfs methods
1342  * @kn: the kernfs_node being serviced
1343  *
1344  * This helper undoes cgroup_kn_lock_live() and should be invoked before
1345  * the method finishes if locking succeeded.  Note that once this function
1346  * returns the cgroup returned by cgroup_kn_lock_live() may become
1347  * inaccessible any time.  If the caller intends to continue to access the
1348  * cgroup, it should pin it before invoking this function.
1349  */
cgroup_kn_unlock(struct kernfs_node * kn)1350 static void cgroup_kn_unlock(struct kernfs_node *kn)
1351 {
1352 	struct cgroup *cgrp;
1353 
1354 	if (kernfs_type(kn) == KERNFS_DIR)
1355 		cgrp = kn->priv;
1356 	else
1357 		cgrp = kn->parent->priv;
1358 
1359 	mutex_unlock(&cgroup_mutex);
1360 
1361 	kernfs_unbreak_active_protection(kn);
1362 	cgroup_put(cgrp);
1363 }
1364 
1365 /**
1366  * cgroup_kn_lock_live - locking helper for cgroup kernfs methods
1367  * @kn: the kernfs_node being serviced
1368  *
1369  * This helper is to be used by a cgroup kernfs method currently servicing
1370  * @kn.  It breaks the active protection, performs cgroup locking and
1371  * verifies that the associated cgroup is alive.  Returns the cgroup if
1372  * alive; otherwise, %NULL.  A successful return should be undone by a
1373  * matching cgroup_kn_unlock() invocation.
1374  *
1375  * Any cgroup kernfs method implementation which requires locking the
1376  * associated cgroup should use this helper.  It avoids nesting cgroup
1377  * locking under kernfs active protection and allows all kernfs operations
1378  * including self-removal.
1379  */
cgroup_kn_lock_live(struct kernfs_node * kn)1380 static struct cgroup *cgroup_kn_lock_live(struct kernfs_node *kn)
1381 {
1382 	struct cgroup *cgrp;
1383 
1384 	if (kernfs_type(kn) == KERNFS_DIR)
1385 		cgrp = kn->priv;
1386 	else
1387 		cgrp = kn->parent->priv;
1388 
1389 	/*
1390 	 * We're gonna grab cgroup_mutex which nests outside kernfs
1391 	 * active_ref.  cgroup liveliness check alone provides enough
1392 	 * protection against removal.  Ensure @cgrp stays accessible and
1393 	 * break the active_ref protection.
1394 	 */
1395 	if (!cgroup_tryget(cgrp))
1396 		return NULL;
1397 	kernfs_break_active_protection(kn);
1398 
1399 	mutex_lock(&cgroup_mutex);
1400 
1401 	if (!cgroup_is_dead(cgrp))
1402 		return cgrp;
1403 
1404 	cgroup_kn_unlock(kn);
1405 	return NULL;
1406 }
1407 
cgroup_rm_file(struct cgroup * cgrp,const struct cftype * cft)1408 static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
1409 {
1410 	char name[CGROUP_FILE_NAME_MAX];
1411 
1412 	lockdep_assert_held(&cgroup_mutex);
1413 
1414 	if (cft->file_offset) {
1415 		struct cgroup_subsys_state *css = cgroup_css(cgrp, cft->ss);
1416 		struct cgroup_file *cfile = (void *)css + cft->file_offset;
1417 
1418 		spin_lock_irq(&cgroup_file_kn_lock);
1419 		cfile->kn = NULL;
1420 		spin_unlock_irq(&cgroup_file_kn_lock);
1421 	}
1422 
1423 	kernfs_remove_by_name(cgrp->kn, cgroup_file_name(cgrp, cft, name));
1424 }
1425 
1426 /**
1427  * css_clear_dir - remove subsys files in a cgroup directory
1428  * @css: taget css
1429  * @cgrp_override: specify if target cgroup is different from css->cgroup
1430  */
css_clear_dir(struct cgroup_subsys_state * css,struct cgroup * cgrp_override)1431 static void css_clear_dir(struct cgroup_subsys_state *css,
1432 			  struct cgroup *cgrp_override)
1433 {
1434 	struct cgroup *cgrp = cgrp_override ?: css->cgroup;
1435 	struct cftype *cfts;
1436 
1437 	list_for_each_entry(cfts, &css->ss->cfts, node)
1438 		cgroup_addrm_files(css, cgrp, cfts, false);
1439 }
1440 
1441 /**
1442  * css_populate_dir - create subsys files in a cgroup directory
1443  * @css: target css
1444  * @cgrp_overried: specify if target cgroup is different from css->cgroup
1445  *
1446  * On failure, no file is added.
1447  */
css_populate_dir(struct cgroup_subsys_state * css,struct cgroup * cgrp_override)1448 static int css_populate_dir(struct cgroup_subsys_state *css,
1449 			    struct cgroup *cgrp_override)
1450 {
1451 	struct cgroup *cgrp = cgrp_override ?: css->cgroup;
1452 	struct cftype *cfts, *failed_cfts;
1453 	int ret;
1454 
1455 	if (!css->ss) {
1456 		if (cgroup_on_dfl(cgrp))
1457 			cfts = cgroup_dfl_base_files;
1458 		else
1459 			cfts = cgroup_legacy_base_files;
1460 
1461 		return cgroup_addrm_files(&cgrp->self, cgrp, cfts, true);
1462 	}
1463 
1464 	list_for_each_entry(cfts, &css->ss->cfts, node) {
1465 		ret = cgroup_addrm_files(css, cgrp, cfts, true);
1466 		if (ret < 0) {
1467 			failed_cfts = cfts;
1468 			goto err;
1469 		}
1470 	}
1471 	return 0;
1472 err:
1473 	list_for_each_entry(cfts, &css->ss->cfts, node) {
1474 		if (cfts == failed_cfts)
1475 			break;
1476 		cgroup_addrm_files(css, cgrp, cfts, false);
1477 	}
1478 	return ret;
1479 }
1480 
rebind_subsystems(struct cgroup_root * dst_root,unsigned long ss_mask)1481 static int rebind_subsystems(struct cgroup_root *dst_root,
1482 			     unsigned long ss_mask)
1483 {
1484 	struct cgroup *dcgrp = &dst_root->cgrp;
1485 	struct cgroup_subsys *ss;
1486 	unsigned long tmp_ss_mask;
1487 	int ssid, i, ret;
1488 
1489 	lockdep_assert_held(&cgroup_mutex);
1490 
1491 	for_each_subsys_which(ss, ssid, &ss_mask) {
1492 		/* if @ss has non-root csses attached to it, can't move */
1493 		if (css_next_child(NULL, cgroup_css(&ss->root->cgrp, ss)))
1494 			return -EBUSY;
1495 
1496 		/* can't move between two non-dummy roots either */
1497 		if (ss->root != &cgrp_dfl_root && dst_root != &cgrp_dfl_root)
1498 			return -EBUSY;
1499 	}
1500 
1501 	/* skip creating root files on dfl_root for inhibited subsystems */
1502 	tmp_ss_mask = ss_mask;
1503 	if (dst_root == &cgrp_dfl_root)
1504 		tmp_ss_mask &= ~cgrp_dfl_root_inhibit_ss_mask;
1505 
1506 	for_each_subsys_which(ss, ssid, &tmp_ss_mask) {
1507 		struct cgroup *scgrp = &ss->root->cgrp;
1508 		int tssid;
1509 
1510 		ret = css_populate_dir(cgroup_css(scgrp, ss), dcgrp);
1511 		if (!ret)
1512 			continue;
1513 
1514 		/*
1515 		 * Rebinding back to the default root is not allowed to
1516 		 * fail.  Using both default and non-default roots should
1517 		 * be rare.  Moving subsystems back and forth even more so.
1518 		 * Just warn about it and continue.
1519 		 */
1520 		if (dst_root == &cgrp_dfl_root) {
1521 			if (cgrp_dfl_root_visible) {
1522 				pr_warn("failed to create files (%d) while rebinding 0x%lx to default root\n",
1523 					ret, ss_mask);
1524 				pr_warn("you may retry by moving them to a different hierarchy and unbinding\n");
1525 			}
1526 			continue;
1527 		}
1528 
1529 		for_each_subsys_which(ss, tssid, &tmp_ss_mask) {
1530 			if (tssid == ssid)
1531 				break;
1532 			css_clear_dir(cgroup_css(scgrp, ss), dcgrp);
1533 		}
1534 		return ret;
1535 	}
1536 
1537 	/*
1538 	 * Nothing can fail from this point on.  Remove files for the
1539 	 * removed subsystems and rebind each subsystem.
1540 	 */
1541 	for_each_subsys_which(ss, ssid, &ss_mask) {
1542 		struct cgroup_root *src_root = ss->root;
1543 		struct cgroup *scgrp = &src_root->cgrp;
1544 		struct cgroup_subsys_state *css = cgroup_css(scgrp, ss);
1545 		struct css_set *cset;
1546 
1547 		WARN_ON(!css || cgroup_css(dcgrp, ss));
1548 
1549 		css_clear_dir(css, NULL);
1550 
1551 		RCU_INIT_POINTER(scgrp->subsys[ssid], NULL);
1552 		rcu_assign_pointer(dcgrp->subsys[ssid], css);
1553 		ss->root = dst_root;
1554 		css->cgroup = dcgrp;
1555 
1556 		spin_lock_irq(&css_set_lock);
1557 		hash_for_each(css_set_table, i, cset, hlist)
1558 			list_move_tail(&cset->e_cset_node[ss->id],
1559 				       &dcgrp->e_csets[ss->id]);
1560 		spin_unlock_irq(&css_set_lock);
1561 
1562 		src_root->subsys_mask &= ~(1 << ssid);
1563 		scgrp->subtree_control &= ~(1 << ssid);
1564 		cgroup_refresh_child_subsys_mask(scgrp);
1565 
1566 		/* default hierarchy doesn't enable controllers by default */
1567 		dst_root->subsys_mask |= 1 << ssid;
1568 		if (dst_root == &cgrp_dfl_root) {
1569 			static_branch_enable(cgroup_subsys_on_dfl_key[ssid]);
1570 		} else {
1571 			dcgrp->subtree_control |= 1 << ssid;
1572 			cgroup_refresh_child_subsys_mask(dcgrp);
1573 			static_branch_disable(cgroup_subsys_on_dfl_key[ssid]);
1574 		}
1575 
1576 		if (ss->bind)
1577 			ss->bind(css);
1578 	}
1579 
1580 	kernfs_activate(dcgrp->kn);
1581 	return 0;
1582 }
1583 
cgroup_show_options(struct seq_file * seq,struct kernfs_root * kf_root)1584 static int cgroup_show_options(struct seq_file *seq,
1585 			       struct kernfs_root *kf_root)
1586 {
1587 	struct cgroup_root *root = cgroup_root_from_kf(kf_root);
1588 	struct cgroup_subsys *ss;
1589 	int ssid;
1590 
1591 	if (root != &cgrp_dfl_root)
1592 		for_each_subsys(ss, ssid)
1593 			if (root->subsys_mask & (1 << ssid))
1594 				seq_show_option(seq, ss->legacy_name, NULL);
1595 	if (root->flags & CGRP_ROOT_NOPREFIX)
1596 		seq_puts(seq, ",noprefix");
1597 	if (root->flags & CGRP_ROOT_XATTR)
1598 		seq_puts(seq, ",xattr");
1599 
1600 	spin_lock(&release_agent_path_lock);
1601 	if (strlen(root->release_agent_path))
1602 		seq_show_option(seq, "release_agent",
1603 				root->release_agent_path);
1604 	spin_unlock(&release_agent_path_lock);
1605 
1606 	if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags))
1607 		seq_puts(seq, ",clone_children");
1608 	if (strlen(root->name))
1609 		seq_show_option(seq, "name", root->name);
1610 	return 0;
1611 }
1612 
1613 struct cgroup_sb_opts {
1614 	unsigned long subsys_mask;
1615 	unsigned int flags;
1616 	char *release_agent;
1617 	bool cpuset_clone_children;
1618 	char *name;
1619 	/* User explicitly requested empty subsystem */
1620 	bool none;
1621 };
1622 
parse_cgroupfs_options(char * data,struct cgroup_sb_opts * opts)1623 static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
1624 {
1625 	char *token, *o = data;
1626 	bool all_ss = false, one_ss = false;
1627 	unsigned long mask = -1UL;
1628 	struct cgroup_subsys *ss;
1629 	int nr_opts = 0;
1630 	int i;
1631 
1632 #ifdef CONFIG_CPUSETS
1633 	mask = ~(1U << cpuset_cgrp_id);
1634 #endif
1635 
1636 	memset(opts, 0, sizeof(*opts));
1637 
1638 	while ((token = strsep(&o, ",")) != NULL) {
1639 		nr_opts++;
1640 
1641 		if (!*token)
1642 			return -EINVAL;
1643 		if (!strcmp(token, "none")) {
1644 			/* Explicitly have no subsystems */
1645 			opts->none = true;
1646 			continue;
1647 		}
1648 		if (!strcmp(token, "all")) {
1649 			/* Mutually exclusive option 'all' + subsystem name */
1650 			if (one_ss)
1651 				return -EINVAL;
1652 			all_ss = true;
1653 			continue;
1654 		}
1655 		if (!strcmp(token, "__DEVEL__sane_behavior")) {
1656 			opts->flags |= CGRP_ROOT_SANE_BEHAVIOR;
1657 			continue;
1658 		}
1659 		if (!strcmp(token, "noprefix")) {
1660 			opts->flags |= CGRP_ROOT_NOPREFIX;
1661 			continue;
1662 		}
1663 		if (!strcmp(token, "clone_children")) {
1664 			opts->cpuset_clone_children = true;
1665 			continue;
1666 		}
1667 		if (!strcmp(token, "xattr")) {
1668 			opts->flags |= CGRP_ROOT_XATTR;
1669 			continue;
1670 		}
1671 		if (!strncmp(token, "release_agent=", 14)) {
1672 			/* Specifying two release agents is forbidden */
1673 			if (opts->release_agent)
1674 				return -EINVAL;
1675 			opts->release_agent =
1676 				kstrndup(token + 14, PATH_MAX - 1, GFP_KERNEL);
1677 			if (!opts->release_agent)
1678 				return -ENOMEM;
1679 			continue;
1680 		}
1681 		if (!strncmp(token, "name=", 5)) {
1682 			const char *name = token + 5;
1683 			/* Can't specify an empty name */
1684 			if (!strlen(name))
1685 				return -EINVAL;
1686 			/* Must match [\w.-]+ */
1687 			for (i = 0; i < strlen(name); i++) {
1688 				char c = name[i];
1689 				if (isalnum(c))
1690 					continue;
1691 				if ((c == '.') || (c == '-') || (c == '_'))
1692 					continue;
1693 				return -EINVAL;
1694 			}
1695 			/* Specifying two names is forbidden */
1696 			if (opts->name)
1697 				return -EINVAL;
1698 			opts->name = kstrndup(name,
1699 					      MAX_CGROUP_ROOT_NAMELEN - 1,
1700 					      GFP_KERNEL);
1701 			if (!opts->name)
1702 				return -ENOMEM;
1703 
1704 			continue;
1705 		}
1706 
1707 		for_each_subsys(ss, i) {
1708 			if (strcmp(token, ss->legacy_name))
1709 				continue;
1710 			if (!cgroup_ssid_enabled(i))
1711 				continue;
1712 
1713 			/* Mutually exclusive option 'all' + subsystem name */
1714 			if (all_ss)
1715 				return -EINVAL;
1716 			opts->subsys_mask |= (1 << i);
1717 			one_ss = true;
1718 
1719 			break;
1720 		}
1721 		if (i == CGROUP_SUBSYS_COUNT)
1722 			return -ENOENT;
1723 	}
1724 
1725 	if (opts->flags & CGRP_ROOT_SANE_BEHAVIOR) {
1726 		pr_warn("sane_behavior: this is still under development and its behaviors will change, proceed at your own risk\n");
1727 		if (nr_opts != 1) {
1728 			pr_err("sane_behavior: no other mount options allowed\n");
1729 			return -EINVAL;
1730 		}
1731 		return 0;
1732 	}
1733 
1734 	/*
1735 	 * If the 'all' option was specified select all the subsystems,
1736 	 * otherwise if 'none', 'name=' and a subsystem name options were
1737 	 * not specified, let's default to 'all'
1738 	 */
1739 	if (all_ss || (!one_ss && !opts->none && !opts->name))
1740 		for_each_subsys(ss, i)
1741 			if (cgroup_ssid_enabled(i))
1742 				opts->subsys_mask |= (1 << i);
1743 
1744 	/*
1745 	 * We either have to specify by name or by subsystems. (So all
1746 	 * empty hierarchies must have a name).
1747 	 */
1748 	if (!opts->subsys_mask && !opts->name)
1749 		return -EINVAL;
1750 
1751 	/*
1752 	 * Option noprefix was introduced just for backward compatibility
1753 	 * with the old cpuset, so we allow noprefix only if mounting just
1754 	 * the cpuset subsystem.
1755 	 */
1756 	if ((opts->flags & CGRP_ROOT_NOPREFIX) && (opts->subsys_mask & mask))
1757 		return -EINVAL;
1758 
1759 	/* Can't specify "none" and some subsystems */
1760 	if (opts->subsys_mask && opts->none)
1761 		return -EINVAL;
1762 
1763 	return 0;
1764 }
1765 
cgroup_remount(struct kernfs_root * kf_root,int * flags,char * data)1766 static int cgroup_remount(struct kernfs_root *kf_root, int *flags, char *data)
1767 {
1768 	int ret = 0;
1769 	struct cgroup_root *root = cgroup_root_from_kf(kf_root);
1770 	struct cgroup_sb_opts opts;
1771 	unsigned long added_mask, removed_mask;
1772 
1773 	if (root == &cgrp_dfl_root) {
1774 		pr_err("remount is not allowed\n");
1775 		return -EINVAL;
1776 	}
1777 
1778 	mutex_lock(&cgroup_mutex);
1779 
1780 	/* See what subsystems are wanted */
1781 	ret = parse_cgroupfs_options(data, &opts);
1782 	if (ret)
1783 		goto out_unlock;
1784 
1785 	if (opts.subsys_mask != root->subsys_mask || opts.release_agent)
1786 		pr_warn("option changes via remount are deprecated (pid=%d comm=%s)\n",
1787 			task_tgid_nr(current), current->comm);
1788 
1789 	added_mask = opts.subsys_mask & ~root->subsys_mask;
1790 	removed_mask = root->subsys_mask & ~opts.subsys_mask;
1791 
1792 	/* Don't allow flags or name to change at remount */
1793 	if ((opts.flags ^ root->flags) ||
1794 	    (opts.name && strcmp(opts.name, root->name))) {
1795 		pr_err("option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"\n",
1796 		       opts.flags, opts.name ?: "", root->flags, root->name);
1797 		ret = -EINVAL;
1798 		goto out_unlock;
1799 	}
1800 
1801 	/* remounting is not allowed for populated hierarchies */
1802 	if (!list_empty(&root->cgrp.self.children)) {
1803 		ret = -EBUSY;
1804 		goto out_unlock;
1805 	}
1806 
1807 	ret = rebind_subsystems(root, added_mask);
1808 	if (ret)
1809 		goto out_unlock;
1810 
1811 	rebind_subsystems(&cgrp_dfl_root, removed_mask);
1812 
1813 	if (opts.release_agent) {
1814 		spin_lock(&release_agent_path_lock);
1815 		strcpy(root->release_agent_path, opts.release_agent);
1816 		spin_unlock(&release_agent_path_lock);
1817 	}
1818  out_unlock:
1819 	kfree(opts.release_agent);
1820 	kfree(opts.name);
1821 	mutex_unlock(&cgroup_mutex);
1822 	return ret;
1823 }
1824 
1825 /*
1826  * To reduce the fork() overhead for systems that are not actually using
1827  * their cgroups capability, we don't maintain the lists running through
1828  * each css_set to its tasks until we see the list actually used - in other
1829  * words after the first mount.
1830  */
1831 static bool use_task_css_set_links __read_mostly;
1832 
cgroup_enable_task_cg_lists(void)1833 static void cgroup_enable_task_cg_lists(void)
1834 {
1835 	struct task_struct *p, *g;
1836 
1837 	spin_lock_irq(&css_set_lock);
1838 
1839 	if (use_task_css_set_links)
1840 		goto out_unlock;
1841 
1842 	use_task_css_set_links = true;
1843 
1844 	/*
1845 	 * We need tasklist_lock because RCU is not safe against
1846 	 * while_each_thread(). Besides, a forking task that has passed
1847 	 * cgroup_post_fork() without seeing use_task_css_set_links = 1
1848 	 * is not guaranteed to have its child immediately visible in the
1849 	 * tasklist if we walk through it with RCU.
1850 	 */
1851 	read_lock(&tasklist_lock);
1852 	do_each_thread(g, p) {
1853 		WARN_ON_ONCE(!list_empty(&p->cg_list) ||
1854 			     task_css_set(p) != &init_css_set);
1855 
1856 		/*
1857 		 * We should check if the process is exiting, otherwise
1858 		 * it will race with cgroup_exit() in that the list
1859 		 * entry won't be deleted though the process has exited.
1860 		 * Do it while holding siglock so that we don't end up
1861 		 * racing against cgroup_exit().
1862 		 *
1863 		 * Interrupts were already disabled while acquiring
1864 		 * the css_set_lock, so we do not need to disable it
1865 		 * again when acquiring the sighand->siglock here.
1866 		 */
1867 		spin_lock(&p->sighand->siglock);
1868 		if (!(p->flags & PF_EXITING)) {
1869 			struct css_set *cset = task_css_set(p);
1870 
1871 			if (!css_set_populated(cset))
1872 				css_set_update_populated(cset, true);
1873 			list_add_tail(&p->cg_list, &cset->tasks);
1874 			get_css_set(cset);
1875 		}
1876 		spin_unlock(&p->sighand->siglock);
1877 	} while_each_thread(g, p);
1878 	read_unlock(&tasklist_lock);
1879 out_unlock:
1880 	spin_unlock_irq(&css_set_lock);
1881 }
1882 
init_cgroup_housekeeping(struct cgroup * cgrp)1883 static void init_cgroup_housekeeping(struct cgroup *cgrp)
1884 {
1885 	struct cgroup_subsys *ss;
1886 	int ssid;
1887 
1888 	INIT_LIST_HEAD(&cgrp->self.sibling);
1889 	INIT_LIST_HEAD(&cgrp->self.children);
1890 	INIT_LIST_HEAD(&cgrp->cset_links);
1891 	INIT_LIST_HEAD(&cgrp->pidlists);
1892 	mutex_init(&cgrp->pidlist_mutex);
1893 	cgrp->self.cgroup = cgrp;
1894 	cgrp->self.flags |= CSS_ONLINE;
1895 
1896 	for_each_subsys(ss, ssid)
1897 		INIT_LIST_HEAD(&cgrp->e_csets[ssid]);
1898 
1899 	init_waitqueue_head(&cgrp->offline_waitq);
1900 	INIT_WORK(&cgrp->release_agent_work, cgroup_release_agent);
1901 }
1902 
init_cgroup_root(struct cgroup_root * root,struct cgroup_sb_opts * opts)1903 static void init_cgroup_root(struct cgroup_root *root,
1904 			     struct cgroup_sb_opts *opts)
1905 {
1906 	struct cgroup *cgrp = &root->cgrp;
1907 
1908 	INIT_LIST_HEAD(&root->root_list);
1909 	atomic_set(&root->nr_cgrps, 1);
1910 	cgrp->root = root;
1911 	init_cgroup_housekeeping(cgrp);
1912 	idr_init(&root->cgroup_idr);
1913 
1914 	root->flags = opts->flags;
1915 	if (opts->release_agent)
1916 		strcpy(root->release_agent_path, opts->release_agent);
1917 	if (opts->name)
1918 		strcpy(root->name, opts->name);
1919 	if (opts->cpuset_clone_children)
1920 		set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags);
1921 }
1922 
cgroup_setup_root(struct cgroup_root * root,unsigned long ss_mask)1923 static int cgroup_setup_root(struct cgroup_root *root, unsigned long ss_mask)
1924 {
1925 	LIST_HEAD(tmp_links);
1926 	struct cgroup *root_cgrp = &root->cgrp;
1927 	struct css_set *cset;
1928 	int i, ret;
1929 
1930 	lockdep_assert_held(&cgroup_mutex);
1931 
1932 	ret = cgroup_idr_alloc(&root->cgroup_idr, root_cgrp, 1, 2, GFP_KERNEL);
1933 	if (ret < 0)
1934 		goto out;
1935 	root_cgrp->id = ret;
1936 
1937 	ret = percpu_ref_init(&root_cgrp->self.refcnt, css_release, 0,
1938 			      GFP_KERNEL);
1939 	if (ret)
1940 		goto out;
1941 
1942 	/*
1943 	 * We're accessing css_set_count without locking css_set_lock here,
1944 	 * but that's OK - it can only be increased by someone holding
1945 	 * cgroup_lock, and that's us. The worst that can happen is that we
1946 	 * have some link structures left over
1947 	 */
1948 	ret = allocate_cgrp_cset_links(css_set_count, &tmp_links);
1949 	if (ret)
1950 		goto cancel_ref;
1951 
1952 	ret = cgroup_init_root_id(root);
1953 	if (ret)
1954 		goto cancel_ref;
1955 
1956 	root->kf_root = kernfs_create_root(&cgroup_kf_syscall_ops,
1957 					   KERNFS_ROOT_CREATE_DEACTIVATED,
1958 					   root_cgrp);
1959 	if (IS_ERR(root->kf_root)) {
1960 		ret = PTR_ERR(root->kf_root);
1961 		goto exit_root_id;
1962 	}
1963 	root_cgrp->kn = root->kf_root->kn;
1964 
1965 	ret = css_populate_dir(&root_cgrp->self, NULL);
1966 	if (ret)
1967 		goto destroy_root;
1968 
1969 	ret = rebind_subsystems(root, ss_mask);
1970 	if (ret)
1971 		goto destroy_root;
1972 
1973 	/*
1974 	 * There must be no failure case after here, since rebinding takes
1975 	 * care of subsystems' refcounts, which are explicitly dropped in
1976 	 * the failure exit path.
1977 	 */
1978 	list_add(&root->root_list, &cgroup_roots);
1979 	cgroup_root_count++;
1980 
1981 	/*
1982 	 * Link the root cgroup in this hierarchy into all the css_set
1983 	 * objects.
1984 	 */
1985 	spin_lock_irq(&css_set_lock);
1986 	hash_for_each(css_set_table, i, cset, hlist) {
1987 		link_css_set(&tmp_links, cset, root_cgrp);
1988 		if (css_set_populated(cset))
1989 			cgroup_update_populated(root_cgrp, true);
1990 	}
1991 	spin_unlock_irq(&css_set_lock);
1992 
1993 	BUG_ON(!list_empty(&root_cgrp->self.children));
1994 	BUG_ON(atomic_read(&root->nr_cgrps) != 1);
1995 
1996 	kernfs_activate(root_cgrp->kn);
1997 	ret = 0;
1998 	goto out;
1999 
2000 destroy_root:
2001 	kernfs_destroy_root(root->kf_root);
2002 	root->kf_root = NULL;
2003 exit_root_id:
2004 	cgroup_exit_root_id(root);
2005 cancel_ref:
2006 	percpu_ref_exit(&root_cgrp->self.refcnt);
2007 out:
2008 	free_cgrp_cset_links(&tmp_links);
2009 	return ret;
2010 }
2011 
cgroup_mount(struct file_system_type * fs_type,int flags,const char * unused_dev_name,void * data)2012 static struct dentry *cgroup_mount(struct file_system_type *fs_type,
2013 			 int flags, const char *unused_dev_name,
2014 			 void *data)
2015 {
2016 	struct super_block *pinned_sb = NULL;
2017 	struct cgroup_subsys *ss;
2018 	struct cgroup_root *root;
2019 	struct cgroup_sb_opts opts;
2020 	struct dentry *dentry;
2021 	int ret;
2022 	int i;
2023 	bool new_sb;
2024 
2025 	/*
2026 	 * The first time anyone tries to mount a cgroup, enable the list
2027 	 * linking each css_set to its tasks and fix up all existing tasks.
2028 	 */
2029 	if (!use_task_css_set_links)
2030 		cgroup_enable_task_cg_lists();
2031 
2032 	mutex_lock(&cgroup_mutex);
2033 
2034 	/* First find the desired set of subsystems */
2035 	ret = parse_cgroupfs_options(data, &opts);
2036 	if (ret)
2037 		goto out_unlock;
2038 
2039 	/* look for a matching existing root */
2040 	if (opts.flags & CGRP_ROOT_SANE_BEHAVIOR) {
2041 		cgrp_dfl_root_visible = true;
2042 		root = &cgrp_dfl_root;
2043 		cgroup_get(&root->cgrp);
2044 		ret = 0;
2045 		goto out_unlock;
2046 	}
2047 
2048 	/*
2049 	 * Destruction of cgroup root is asynchronous, so subsystems may
2050 	 * still be dying after the previous unmount.  Let's drain the
2051 	 * dying subsystems.  We just need to ensure that the ones
2052 	 * unmounted previously finish dying and don't care about new ones
2053 	 * starting.  Testing ref liveliness is good enough.
2054 	 */
2055 	for_each_subsys(ss, i) {
2056 		if (!(opts.subsys_mask & (1 << i)) ||
2057 		    ss->root == &cgrp_dfl_root)
2058 			continue;
2059 
2060 		if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt)) {
2061 			mutex_unlock(&cgroup_mutex);
2062 			msleep(10);
2063 			ret = restart_syscall();
2064 			goto out_free;
2065 		}
2066 		cgroup_put(&ss->root->cgrp);
2067 	}
2068 
2069 	for_each_root(root) {
2070 		bool name_match = false;
2071 
2072 		if (root == &cgrp_dfl_root)
2073 			continue;
2074 
2075 		/*
2076 		 * If we asked for a name then it must match.  Also, if
2077 		 * name matches but sybsys_mask doesn't, we should fail.
2078 		 * Remember whether name matched.
2079 		 */
2080 		if (opts.name) {
2081 			if (strcmp(opts.name, root->name))
2082 				continue;
2083 			name_match = true;
2084 		}
2085 
2086 		/*
2087 		 * If we asked for subsystems (or explicitly for no
2088 		 * subsystems) then they must match.
2089 		 */
2090 		if ((opts.subsys_mask || opts.none) &&
2091 		    (opts.subsys_mask != root->subsys_mask)) {
2092 			if (!name_match)
2093 				continue;
2094 			ret = -EBUSY;
2095 			goto out_unlock;
2096 		}
2097 
2098 		if (root->flags ^ opts.flags)
2099 			pr_warn("new mount options do not match the existing superblock, will be ignored\n");
2100 
2101 		/*
2102 		 * We want to reuse @root whose lifetime is governed by its
2103 		 * ->cgrp.  Let's check whether @root is alive and keep it
2104 		 * that way.  As cgroup_kill_sb() can happen anytime, we
2105 		 * want to block it by pinning the sb so that @root doesn't
2106 		 * get killed before mount is complete.
2107 		 *
2108 		 * With the sb pinned, tryget_live can reliably indicate
2109 		 * whether @root can be reused.  If it's being killed,
2110 		 * drain it.  We can use wait_queue for the wait but this
2111 		 * path is super cold.  Let's just sleep a bit and retry.
2112 		 */
2113 		pinned_sb = kernfs_pin_sb(root->kf_root, NULL);
2114 		if (IS_ERR(pinned_sb) ||
2115 		    !percpu_ref_tryget_live(&root->cgrp.self.refcnt)) {
2116 			mutex_unlock(&cgroup_mutex);
2117 			if (!IS_ERR_OR_NULL(pinned_sb))
2118 				deactivate_super(pinned_sb);
2119 			msleep(10);
2120 			ret = restart_syscall();
2121 			goto out_free;
2122 		}
2123 
2124 		ret = 0;
2125 		goto out_unlock;
2126 	}
2127 
2128 	/*
2129 	 * No such thing, create a new one.  name= matching without subsys
2130 	 * specification is allowed for already existing hierarchies but we
2131 	 * can't create new one without subsys specification.
2132 	 */
2133 	if (!opts.subsys_mask && !opts.none) {
2134 		ret = -EINVAL;
2135 		goto out_unlock;
2136 	}
2137 
2138 	root = kzalloc(sizeof(*root), GFP_KERNEL);
2139 	if (!root) {
2140 		ret = -ENOMEM;
2141 		goto out_unlock;
2142 	}
2143 
2144 	init_cgroup_root(root, &opts);
2145 
2146 	ret = cgroup_setup_root(root, opts.subsys_mask);
2147 	if (ret)
2148 		cgroup_free_root(root);
2149 
2150 out_unlock:
2151 	mutex_unlock(&cgroup_mutex);
2152 out_free:
2153 	kfree(opts.release_agent);
2154 	kfree(opts.name);
2155 
2156 	if (ret)
2157 		return ERR_PTR(ret);
2158 
2159 	dentry = kernfs_mount(fs_type, flags, root->kf_root,
2160 				CGROUP_SUPER_MAGIC, &new_sb);
2161 	if (IS_ERR(dentry) || !new_sb)
2162 		cgroup_put(&root->cgrp);
2163 
2164 	/*
2165 	 * If @pinned_sb, we're reusing an existing root and holding an
2166 	 * extra ref on its sb.  Mount is complete.  Put the extra ref.
2167 	 */
2168 	if (pinned_sb) {
2169 		WARN_ON(new_sb);
2170 		deactivate_super(pinned_sb);
2171 	}
2172 
2173 	return dentry;
2174 }
2175 
cgroup_kill_sb(struct super_block * sb)2176 static void cgroup_kill_sb(struct super_block *sb)
2177 {
2178 	struct kernfs_root *kf_root = kernfs_root_from_sb(sb);
2179 	struct cgroup_root *root = cgroup_root_from_kf(kf_root);
2180 
2181 	/*
2182 	 * If @root doesn't have any mounts or children, start killing it.
2183 	 * This prevents new mounts by disabling percpu_ref_tryget_live().
2184 	 * cgroup_mount() may wait for @root's release.
2185 	 *
2186 	 * And don't kill the default root.
2187 	 */
2188 	if (!list_empty(&root->cgrp.self.children) ||
2189 	    root == &cgrp_dfl_root)
2190 		cgroup_put(&root->cgrp);
2191 	else
2192 		percpu_ref_kill(&root->cgrp.self.refcnt);
2193 
2194 	kernfs_kill_sb(sb);
2195 }
2196 
2197 static struct file_system_type cgroup_fs_type = {
2198 	.name = "cgroup",
2199 	.mount = cgroup_mount,
2200 	.kill_sb = cgroup_kill_sb,
2201 };
2202 
2203 /**
2204  * task_cgroup_path - cgroup path of a task in the first cgroup hierarchy
2205  * @task: target task
2206  * @buf: the buffer to write the path into
2207  * @buflen: the length of the buffer
2208  *
2209  * Determine @task's cgroup on the first (the one with the lowest non-zero
2210  * hierarchy_id) cgroup hierarchy and copy its path into @buf.  This
2211  * function grabs cgroup_mutex and shouldn't be used inside locks used by
2212  * cgroup controller callbacks.
2213  *
2214  * Return value is the same as kernfs_path().
2215  */
task_cgroup_path(struct task_struct * task,char * buf,size_t buflen)2216 char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
2217 {
2218 	struct cgroup_root *root;
2219 	struct cgroup *cgrp;
2220 	int hierarchy_id = 1;
2221 	char *path = NULL;
2222 
2223 	mutex_lock(&cgroup_mutex);
2224 	spin_lock_irq(&css_set_lock);
2225 
2226 	root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id);
2227 
2228 	if (root) {
2229 		cgrp = task_cgroup_from_root(task, root);
2230 		path = cgroup_path(cgrp, buf, buflen);
2231 	} else {
2232 		/* if no hierarchy exists, everyone is in "/" */
2233 		if (strlcpy(buf, "/", buflen) < buflen)
2234 			path = buf;
2235 	}
2236 
2237 	spin_unlock_irq(&css_set_lock);
2238 	mutex_unlock(&cgroup_mutex);
2239 	return path;
2240 }
2241 EXPORT_SYMBOL_GPL(task_cgroup_path);
2242 
2243 /* used to track tasks and other necessary states during migration */
2244 struct cgroup_taskset {
2245 	/* the src and dst cset list running through cset->mg_node */
2246 	struct list_head	src_csets;
2247 	struct list_head	dst_csets;
2248 
2249 	/* the subsys currently being processed */
2250 	int			ssid;
2251 
2252 	/*
2253 	 * Fields for cgroup_taskset_*() iteration.
2254 	 *
2255 	 * Before migration is committed, the target migration tasks are on
2256 	 * ->mg_tasks of the csets on ->src_csets.  After, on ->mg_tasks of
2257 	 * the csets on ->dst_csets.  ->csets point to either ->src_csets
2258 	 * or ->dst_csets depending on whether migration is committed.
2259 	 *
2260 	 * ->cur_csets and ->cur_task point to the current task position
2261 	 * during iteration.
2262 	 */
2263 	struct list_head	*csets;
2264 	struct css_set		*cur_cset;
2265 	struct task_struct	*cur_task;
2266 };
2267 
2268 #define CGROUP_TASKSET_INIT(tset)	(struct cgroup_taskset){	\
2269 	.src_csets		= LIST_HEAD_INIT(tset.src_csets),	\
2270 	.dst_csets		= LIST_HEAD_INIT(tset.dst_csets),	\
2271 	.csets			= &tset.src_csets,			\
2272 }
2273 
2274 /**
2275  * cgroup_taskset_add - try to add a migration target task to a taskset
2276  * @task: target task
2277  * @tset: target taskset
2278  *
2279  * Add @task, which is a migration target, to @tset.  This function becomes
2280  * noop if @task doesn't need to be migrated.  @task's css_set should have
2281  * been added as a migration source and @task->cg_list will be moved from
2282  * the css_set's tasks list to mg_tasks one.
2283  */
cgroup_taskset_add(struct task_struct * task,struct cgroup_taskset * tset)2284 static void cgroup_taskset_add(struct task_struct *task,
2285 			       struct cgroup_taskset *tset)
2286 {
2287 	struct css_set *cset;
2288 
2289 	lockdep_assert_held(&css_set_lock);
2290 
2291 	/* @task either already exited or can't exit until the end */
2292 	if (task->flags & PF_EXITING)
2293 		return;
2294 
2295 	/* leave @task alone if post_fork() hasn't linked it yet */
2296 	if (list_empty(&task->cg_list))
2297 		return;
2298 
2299 	cset = task_css_set(task);
2300 	if (!cset->mg_src_cgrp)
2301 		return;
2302 
2303 	list_move_tail(&task->cg_list, &cset->mg_tasks);
2304 	if (list_empty(&cset->mg_node))
2305 		list_add_tail(&cset->mg_node, &tset->src_csets);
2306 	if (list_empty(&cset->mg_dst_cset->mg_node))
2307 		list_move_tail(&cset->mg_dst_cset->mg_node,
2308 			       &tset->dst_csets);
2309 }
2310 
2311 /**
2312  * cgroup_taskset_first - reset taskset and return the first task
2313  * @tset: taskset of interest
2314  * @dst_cssp: output variable for the destination css
2315  *
2316  * @tset iteration is initialized and the first task is returned.
2317  */
cgroup_taskset_first(struct cgroup_taskset * tset,struct cgroup_subsys_state ** dst_cssp)2318 struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
2319 					 struct cgroup_subsys_state **dst_cssp)
2320 {
2321 	tset->cur_cset = list_first_entry(tset->csets, struct css_set, mg_node);
2322 	tset->cur_task = NULL;
2323 
2324 	return cgroup_taskset_next(tset, dst_cssp);
2325 }
2326 
2327 /**
2328  * cgroup_taskset_next - iterate to the next task in taskset
2329  * @tset: taskset of interest
2330  * @dst_cssp: output variable for the destination css
2331  *
2332  * Return the next task in @tset.  Iteration must have been initialized
2333  * with cgroup_taskset_first().
2334  */
cgroup_taskset_next(struct cgroup_taskset * tset,struct cgroup_subsys_state ** dst_cssp)2335 struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
2336 					struct cgroup_subsys_state **dst_cssp)
2337 {
2338 	struct css_set *cset = tset->cur_cset;
2339 	struct task_struct *task = tset->cur_task;
2340 
2341 	while (&cset->mg_node != tset->csets) {
2342 		if (!task)
2343 			task = list_first_entry(&cset->mg_tasks,
2344 						struct task_struct, cg_list);
2345 		else
2346 			task = list_next_entry(task, cg_list);
2347 
2348 		if (&task->cg_list != &cset->mg_tasks) {
2349 			tset->cur_cset = cset;
2350 			tset->cur_task = task;
2351 
2352 			/*
2353 			 * This function may be called both before and
2354 			 * after cgroup_taskset_migrate().  The two cases
2355 			 * can be distinguished by looking at whether @cset
2356 			 * has its ->mg_dst_cset set.
2357 			 */
2358 			if (cset->mg_dst_cset)
2359 				*dst_cssp = cset->mg_dst_cset->subsys[tset->ssid];
2360 			else
2361 				*dst_cssp = cset->subsys[tset->ssid];
2362 
2363 			return task;
2364 		}
2365 
2366 		cset = list_next_entry(cset, mg_node);
2367 		task = NULL;
2368 	}
2369 
2370 	return NULL;
2371 }
2372 
2373 /**
2374  * cgroup_taskset_migrate - migrate a taskset to a cgroup
2375  * @tset: taget taskset
2376  * @dst_cgrp: destination cgroup
2377  *
2378  * Migrate tasks in @tset to @dst_cgrp.  This function fails iff one of the
2379  * ->can_attach callbacks fails and guarantees that either all or none of
2380  * the tasks in @tset are migrated.  @tset is consumed regardless of
2381  * success.
2382  */
cgroup_taskset_migrate(struct cgroup_taskset * tset,struct cgroup * dst_cgrp)2383 static int cgroup_taskset_migrate(struct cgroup_taskset *tset,
2384 				  struct cgroup *dst_cgrp)
2385 {
2386 	struct cgroup_subsys_state *css, *failed_css = NULL;
2387 	struct task_struct *task, *tmp_task;
2388 	struct css_set *cset, *tmp_cset;
2389 	int i, ret;
2390 
2391 	/* methods shouldn't be called if no task is actually migrating */
2392 	if (list_empty(&tset->src_csets))
2393 		return 0;
2394 
2395 	/* check that we can legitimately attach to the cgroup */
2396 	for_each_e_css(css, i, dst_cgrp) {
2397 		if (css->ss->can_attach) {
2398 			tset->ssid = i;
2399 			ret = css->ss->can_attach(tset);
2400 			if (ret) {
2401 				failed_css = css;
2402 				goto out_cancel_attach;
2403 			}
2404 		}
2405 	}
2406 
2407 	/*
2408 	 * Now that we're guaranteed success, proceed to move all tasks to
2409 	 * the new cgroup.  There are no failure cases after here, so this
2410 	 * is the commit point.
2411 	 */
2412 	spin_lock_irq(&css_set_lock);
2413 	list_for_each_entry(cset, &tset->src_csets, mg_node) {
2414 		list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list) {
2415 			struct css_set *from_cset = task_css_set(task);
2416 			struct css_set *to_cset = cset->mg_dst_cset;
2417 
2418 			get_css_set(to_cset);
2419 			css_set_move_task(task, from_cset, to_cset, true);
2420 			put_css_set_locked(from_cset);
2421 		}
2422 	}
2423 	spin_unlock_irq(&css_set_lock);
2424 
2425 	/*
2426 	 * Migration is committed, all target tasks are now on dst_csets.
2427 	 * Nothing is sensitive to fork() after this point.  Notify
2428 	 * controllers that migration is complete.
2429 	 */
2430 	tset->csets = &tset->dst_csets;
2431 
2432 	for_each_e_css(css, i, dst_cgrp) {
2433 		if (css->ss->attach) {
2434 			tset->ssid = i;
2435 			css->ss->attach(tset);
2436 		}
2437 	}
2438 
2439 	ret = 0;
2440 	goto out_release_tset;
2441 
2442 out_cancel_attach:
2443 	for_each_e_css(css, i, dst_cgrp) {
2444 		if (css == failed_css)
2445 			break;
2446 		if (css->ss->cancel_attach) {
2447 			tset->ssid = i;
2448 			css->ss->cancel_attach(tset);
2449 		}
2450 	}
2451 out_release_tset:
2452 	spin_lock_irq(&css_set_lock);
2453 	list_splice_init(&tset->dst_csets, &tset->src_csets);
2454 	list_for_each_entry_safe(cset, tmp_cset, &tset->src_csets, mg_node) {
2455 		list_splice_tail_init(&cset->mg_tasks, &cset->tasks);
2456 		list_del_init(&cset->mg_node);
2457 	}
2458 	spin_unlock_irq(&css_set_lock);
2459 	return ret;
2460 }
2461 
2462 /**
2463  * cgroup_migrate_finish - cleanup after attach
2464  * @preloaded_csets: list of preloaded css_sets
2465  *
2466  * Undo cgroup_migrate_add_src() and cgroup_migrate_prepare_dst().  See
2467  * those functions for details.
2468  */
cgroup_migrate_finish(struct list_head * preloaded_csets)2469 static void cgroup_migrate_finish(struct list_head *preloaded_csets)
2470 {
2471 	struct css_set *cset, *tmp_cset;
2472 
2473 	lockdep_assert_held(&cgroup_mutex);
2474 
2475 	spin_lock_irq(&css_set_lock);
2476 	list_for_each_entry_safe(cset, tmp_cset, preloaded_csets, mg_preload_node) {
2477 		cset->mg_src_cgrp = NULL;
2478 		cset->mg_dst_cset = NULL;
2479 		list_del_init(&cset->mg_preload_node);
2480 		put_css_set_locked(cset);
2481 	}
2482 	spin_unlock_irq(&css_set_lock);
2483 }
2484 
2485 /**
2486  * cgroup_migrate_add_src - add a migration source css_set
2487  * @src_cset: the source css_set to add
2488  * @dst_cgrp: the destination cgroup
2489  * @preloaded_csets: list of preloaded css_sets
2490  *
2491  * Tasks belonging to @src_cset are about to be migrated to @dst_cgrp.  Pin
2492  * @src_cset and add it to @preloaded_csets, which should later be cleaned
2493  * up by cgroup_migrate_finish().
2494  *
2495  * This function may be called without holding cgroup_threadgroup_rwsem
2496  * even if the target is a process.  Threads may be created and destroyed
2497  * but as long as cgroup_mutex is not dropped, no new css_set can be put
2498  * into play and the preloaded css_sets are guaranteed to cover all
2499  * migrations.
2500  */
cgroup_migrate_add_src(struct css_set * src_cset,struct cgroup * dst_cgrp,struct list_head * preloaded_csets)2501 static void cgroup_migrate_add_src(struct css_set *src_cset,
2502 				   struct cgroup *dst_cgrp,
2503 				   struct list_head *preloaded_csets)
2504 {
2505 	struct cgroup *src_cgrp;
2506 
2507 	lockdep_assert_held(&cgroup_mutex);
2508 	lockdep_assert_held(&css_set_lock);
2509 
2510 	/*
2511 	 * If ->dead, @src_set is associated with one or more dead cgroups
2512 	 * and doesn't contain any migratable tasks.  Ignore it early so
2513 	 * that the rest of migration path doesn't get confused by it.
2514 	 */
2515 	if (src_cset->dead)
2516 		return;
2517 
2518 	src_cgrp = cset_cgroup_from_root(src_cset, dst_cgrp->root);
2519 
2520 	if (!list_empty(&src_cset->mg_preload_node))
2521 		return;
2522 
2523 	WARN_ON(src_cset->mg_src_cgrp);
2524 	WARN_ON(!list_empty(&src_cset->mg_tasks));
2525 	WARN_ON(!list_empty(&src_cset->mg_node));
2526 
2527 	src_cset->mg_src_cgrp = src_cgrp;
2528 	get_css_set(src_cset);
2529 	list_add(&src_cset->mg_preload_node, preloaded_csets);
2530 }
2531 
2532 /**
2533  * cgroup_migrate_prepare_dst - prepare destination css_sets for migration
2534  * @dst_cgrp: the destination cgroup (may be %NULL)
2535  * @preloaded_csets: list of preloaded source css_sets
2536  *
2537  * Tasks are about to be moved to @dst_cgrp and all the source css_sets
2538  * have been preloaded to @preloaded_csets.  This function looks up and
2539  * pins all destination css_sets, links each to its source, and append them
2540  * to @preloaded_csets.  If @dst_cgrp is %NULL, the destination of each
2541  * source css_set is assumed to be its cgroup on the default hierarchy.
2542  *
2543  * This function must be called after cgroup_migrate_add_src() has been
2544  * called on each migration source css_set.  After migration is performed
2545  * using cgroup_migrate(), cgroup_migrate_finish() must be called on
2546  * @preloaded_csets.
2547  */
cgroup_migrate_prepare_dst(struct cgroup * dst_cgrp,struct list_head * preloaded_csets)2548 static int cgroup_migrate_prepare_dst(struct cgroup *dst_cgrp,
2549 				      struct list_head *preloaded_csets)
2550 {
2551 	LIST_HEAD(csets);
2552 	struct css_set *src_cset, *tmp_cset;
2553 
2554 	lockdep_assert_held(&cgroup_mutex);
2555 
2556 	/*
2557 	 * Except for the root, child_subsys_mask must be zero for a cgroup
2558 	 * with tasks so that child cgroups don't compete against tasks.
2559 	 */
2560 	if (dst_cgrp && cgroup_on_dfl(dst_cgrp) && cgroup_parent(dst_cgrp) &&
2561 	    dst_cgrp->child_subsys_mask)
2562 		return -EBUSY;
2563 
2564 	/* look up the dst cset for each src cset and link it to src */
2565 	list_for_each_entry_safe(src_cset, tmp_cset, preloaded_csets, mg_preload_node) {
2566 		struct css_set *dst_cset;
2567 
2568 		dst_cset = find_css_set(src_cset,
2569 					dst_cgrp ?: src_cset->dfl_cgrp);
2570 		if (!dst_cset)
2571 			goto err;
2572 
2573 		WARN_ON_ONCE(src_cset->mg_dst_cset || dst_cset->mg_dst_cset);
2574 
2575 		/*
2576 		 * If src cset equals dst, it's noop.  Drop the src.
2577 		 * cgroup_migrate() will skip the cset too.  Note that we
2578 		 * can't handle src == dst as some nodes are used by both.
2579 		 */
2580 		if (src_cset == dst_cset) {
2581 			src_cset->mg_src_cgrp = NULL;
2582 			list_del_init(&src_cset->mg_preload_node);
2583 			put_css_set(src_cset);
2584 			put_css_set(dst_cset);
2585 			continue;
2586 		}
2587 
2588 		src_cset->mg_dst_cset = dst_cset;
2589 
2590 		if (list_empty(&dst_cset->mg_preload_node))
2591 			list_add(&dst_cset->mg_preload_node, &csets);
2592 		else
2593 			put_css_set(dst_cset);
2594 	}
2595 
2596 	list_splice_tail(&csets, preloaded_csets);
2597 	return 0;
2598 err:
2599 	cgroup_migrate_finish(&csets);
2600 	return -ENOMEM;
2601 }
2602 
2603 /**
2604  * cgroup_migrate - migrate a process or task to a cgroup
2605  * @leader: the leader of the process or the task to migrate
2606  * @threadgroup: whether @leader points to the whole process or a single task
2607  * @cgrp: the destination cgroup
2608  *
2609  * Migrate a process or task denoted by @leader to @cgrp.  If migrating a
2610  * process, the caller must be holding cgroup_threadgroup_rwsem.  The
2611  * caller is also responsible for invoking cgroup_migrate_add_src() and
2612  * cgroup_migrate_prepare_dst() on the targets before invoking this
2613  * function and following up with cgroup_migrate_finish().
2614  *
2615  * As long as a controller's ->can_attach() doesn't fail, this function is
2616  * guaranteed to succeed.  This means that, excluding ->can_attach()
2617  * failure, when migrating multiple targets, the success or failure can be
2618  * decided for all targets by invoking group_migrate_prepare_dst() before
2619  * actually starting migrating.
2620  */
cgroup_migrate(struct task_struct * leader,bool threadgroup,struct cgroup * cgrp)2621 static int cgroup_migrate(struct task_struct *leader, bool threadgroup,
2622 			  struct cgroup *cgrp)
2623 {
2624 	struct cgroup_taskset tset = CGROUP_TASKSET_INIT(tset);
2625 	struct task_struct *task;
2626 
2627 	/*
2628 	 * Prevent freeing of tasks while we take a snapshot. Tasks that are
2629 	 * already PF_EXITING could be freed from underneath us unless we
2630 	 * take an rcu_read_lock.
2631 	 */
2632 	spin_lock_irq(&css_set_lock);
2633 	rcu_read_lock();
2634 	task = leader;
2635 	do {
2636 		cgroup_taskset_add(task, &tset);
2637 		if (!threadgroup)
2638 			break;
2639 	} while_each_thread(leader, task);
2640 	rcu_read_unlock();
2641 	spin_unlock_irq(&css_set_lock);
2642 
2643 	return cgroup_taskset_migrate(&tset, cgrp);
2644 }
2645 
2646 /**
2647  * cgroup_attach_task - attach a task or a whole threadgroup to a cgroup
2648  * @dst_cgrp: the cgroup to attach to
2649  * @leader: the task or the leader of the threadgroup to be attached
2650  * @threadgroup: attach the whole threadgroup?
2651  *
2652  * Call holding cgroup_mutex and cgroup_threadgroup_rwsem.
2653  */
cgroup_attach_task(struct cgroup * dst_cgrp,struct task_struct * leader,bool threadgroup)2654 static int cgroup_attach_task(struct cgroup *dst_cgrp,
2655 			      struct task_struct *leader, bool threadgroup)
2656 {
2657 	LIST_HEAD(preloaded_csets);
2658 	struct task_struct *task;
2659 	int ret;
2660 
2661 	/* look up all src csets */
2662 	spin_lock_irq(&css_set_lock);
2663 	rcu_read_lock();
2664 	task = leader;
2665 	do {
2666 		cgroup_migrate_add_src(task_css_set(task), dst_cgrp,
2667 				       &preloaded_csets);
2668 		if (!threadgroup)
2669 			break;
2670 	} while_each_thread(leader, task);
2671 	rcu_read_unlock();
2672 	spin_unlock_irq(&css_set_lock);
2673 
2674 	/* prepare dst csets and commit */
2675 	ret = cgroup_migrate_prepare_dst(dst_cgrp, &preloaded_csets);
2676 	if (!ret)
2677 		ret = cgroup_migrate(leader, threadgroup, dst_cgrp);
2678 
2679 	cgroup_migrate_finish(&preloaded_csets);
2680 	return ret;
2681 }
2682 
cgroup_procs_write_permission(struct task_struct * task,struct cgroup * dst_cgrp,struct kernfs_open_file * of)2683 static int cgroup_procs_write_permission(struct task_struct *task,
2684 					 struct cgroup *dst_cgrp,
2685 					 struct kernfs_open_file *of)
2686 {
2687 	const struct cred *cred = current_cred();
2688 	const struct cred *tcred = get_task_cred(task);
2689 	int ret = 0;
2690 
2691 	/*
2692 	 * even if we're attaching all tasks in the thread group, we only
2693 	 * need to check permissions on one of them.
2694 	 */
2695 	if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
2696 	    !uid_eq(cred->euid, tcred->uid) &&
2697 	    !uid_eq(cred->euid, tcred->suid) &&
2698 	    !ns_capable(tcred->user_ns, CAP_SYS_NICE))
2699 		ret = -EACCES;
2700 
2701 	if (!ret && cgroup_on_dfl(dst_cgrp)) {
2702 		struct super_block *sb = of->file->f_path.dentry->d_sb;
2703 		struct cgroup *cgrp;
2704 		struct inode *inode;
2705 
2706 		spin_lock_irq(&css_set_lock);
2707 		cgrp = task_cgroup_from_root(task, &cgrp_dfl_root);
2708 		spin_unlock_irq(&css_set_lock);
2709 
2710 		while (!cgroup_is_descendant(dst_cgrp, cgrp))
2711 			cgrp = cgroup_parent(cgrp);
2712 
2713 		ret = -ENOMEM;
2714 		inode = kernfs_get_inode(sb, cgrp->procs_file.kn);
2715 		if (inode) {
2716 			ret = inode_permission(inode, MAY_WRITE);
2717 			iput(inode);
2718 		}
2719 	}
2720 
2721 	put_cred(tcred);
2722 	return ret;
2723 }
2724 
2725 /*
2726  * Find the task_struct of the task to attach by vpid and pass it along to the
2727  * function to attach either it or all tasks in its threadgroup. Will lock
2728  * cgroup_mutex and threadgroup.
2729  */
__cgroup_procs_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off,bool threadgroup)2730 static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
2731 				    size_t nbytes, loff_t off, bool threadgroup)
2732 {
2733 	struct task_struct *tsk;
2734 	struct cgroup_subsys *ss;
2735 	struct cgroup *cgrp;
2736 	pid_t pid;
2737 	int ssid, ret;
2738 
2739 	if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
2740 		return -EINVAL;
2741 
2742 	cgrp = cgroup_kn_lock_live(of->kn);
2743 	if (!cgrp)
2744 		return -ENODEV;
2745 
2746 	percpu_down_write(&cgroup_threadgroup_rwsem);
2747 	rcu_read_lock();
2748 	if (pid) {
2749 		tsk = find_task_by_vpid(pid);
2750 		if (!tsk) {
2751 			ret = -ESRCH;
2752 			goto out_unlock_rcu;
2753 		}
2754 	} else {
2755 		tsk = current;
2756 	}
2757 
2758 	if (threadgroup)
2759 		tsk = tsk->group_leader;
2760 
2761 	/*
2762 	 * kthreads may acquire PF_NO_SETAFFINITY during initialization.
2763 	 * If userland migrates such a kthread to a non-root cgroup, it can
2764 	 * become trapped in a cpuset, or RT kthread may be born in a
2765 	 * cgroup with no rt_runtime allocated.  Just say no.
2766 	 */
2767 	if (tsk->no_cgroup_migration || (tsk->flags & PF_NO_SETAFFINITY)) {
2768 		ret = -EINVAL;
2769 		goto out_unlock_rcu;
2770 	}
2771 
2772 	get_task_struct(tsk);
2773 	rcu_read_unlock();
2774 
2775 	ret = cgroup_procs_write_permission(tsk, cgrp, of);
2776 	if (!ret)
2777 		ret = cgroup_attach_task(cgrp, tsk, threadgroup);
2778 
2779 	put_task_struct(tsk);
2780 	goto out_unlock_threadgroup;
2781 
2782 out_unlock_rcu:
2783 	rcu_read_unlock();
2784 out_unlock_threadgroup:
2785 	percpu_up_write(&cgroup_threadgroup_rwsem);
2786 	for_each_subsys(ss, ssid)
2787 		if (ss->post_attach)
2788 			ss->post_attach();
2789 	cgroup_kn_unlock(of->kn);
2790 	return ret ?: nbytes;
2791 }
2792 
2793 /**
2794  * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
2795  * @from: attach to all cgroups of a given task
2796  * @tsk: the task to be attached
2797  */
cgroup_attach_task_all(struct task_struct * from,struct task_struct * tsk)2798 int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
2799 {
2800 	struct cgroup_root *root;
2801 	int retval = 0;
2802 
2803 	mutex_lock(&cgroup_mutex);
2804 	for_each_root(root) {
2805 		struct cgroup *from_cgrp;
2806 
2807 		if (root == &cgrp_dfl_root)
2808 			continue;
2809 
2810 		spin_lock_irq(&css_set_lock);
2811 		from_cgrp = task_cgroup_from_root(from, root);
2812 		spin_unlock_irq(&css_set_lock);
2813 
2814 		retval = cgroup_attach_task(from_cgrp, tsk, false);
2815 		if (retval)
2816 			break;
2817 	}
2818 	mutex_unlock(&cgroup_mutex);
2819 
2820 	return retval;
2821 }
2822 EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
2823 
cgroup_tasks_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)2824 static ssize_t cgroup_tasks_write(struct kernfs_open_file *of,
2825 				  char *buf, size_t nbytes, loff_t off)
2826 {
2827 	return __cgroup_procs_write(of, buf, nbytes, off, false);
2828 }
2829 
cgroup_procs_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)2830 static ssize_t cgroup_procs_write(struct kernfs_open_file *of,
2831 				  char *buf, size_t nbytes, loff_t off)
2832 {
2833 	return __cgroup_procs_write(of, buf, nbytes, off, true);
2834 }
2835 
cgroup_release_agent_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)2836 static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
2837 					  char *buf, size_t nbytes, loff_t off)
2838 {
2839 	struct cgroup *cgrp;
2840 
2841 	BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
2842 
2843 	cgrp = cgroup_kn_lock_live(of->kn);
2844 	if (!cgrp)
2845 		return -ENODEV;
2846 	spin_lock(&release_agent_path_lock);
2847 	strlcpy(cgrp->root->release_agent_path, strstrip(buf),
2848 		sizeof(cgrp->root->release_agent_path));
2849 	spin_unlock(&release_agent_path_lock);
2850 	cgroup_kn_unlock(of->kn);
2851 	return nbytes;
2852 }
2853 
cgroup_release_agent_show(struct seq_file * seq,void * v)2854 static int cgroup_release_agent_show(struct seq_file *seq, void *v)
2855 {
2856 	struct cgroup *cgrp = seq_css(seq)->cgroup;
2857 
2858 	spin_lock(&release_agent_path_lock);
2859 	seq_puts(seq, cgrp->root->release_agent_path);
2860 	spin_unlock(&release_agent_path_lock);
2861 	seq_putc(seq, '\n');
2862 	return 0;
2863 }
2864 
cgroup_sane_behavior_show(struct seq_file * seq,void * v)2865 static int cgroup_sane_behavior_show(struct seq_file *seq, void *v)
2866 {
2867 	seq_puts(seq, "0\n");
2868 	return 0;
2869 }
2870 
cgroup_print_ss_mask(struct seq_file * seq,unsigned long ss_mask)2871 static void cgroup_print_ss_mask(struct seq_file *seq, unsigned long ss_mask)
2872 {
2873 	struct cgroup_subsys *ss;
2874 	bool printed = false;
2875 	int ssid;
2876 
2877 	for_each_subsys_which(ss, ssid, &ss_mask) {
2878 		if (printed)
2879 			seq_putc(seq, ' ');
2880 		seq_printf(seq, "%s", ss->name);
2881 		printed = true;
2882 	}
2883 	if (printed)
2884 		seq_putc(seq, '\n');
2885 }
2886 
2887 /* show controllers which are currently attached to the default hierarchy */
cgroup_root_controllers_show(struct seq_file * seq,void * v)2888 static int cgroup_root_controllers_show(struct seq_file *seq, void *v)
2889 {
2890 	struct cgroup *cgrp = seq_css(seq)->cgroup;
2891 
2892 	cgroup_print_ss_mask(seq, cgrp->root->subsys_mask &
2893 			     ~cgrp_dfl_root_inhibit_ss_mask);
2894 	return 0;
2895 }
2896 
2897 /* show controllers which are enabled from the parent */
cgroup_controllers_show(struct seq_file * seq,void * v)2898 static int cgroup_controllers_show(struct seq_file *seq, void *v)
2899 {
2900 	struct cgroup *cgrp = seq_css(seq)->cgroup;
2901 
2902 	cgroup_print_ss_mask(seq, cgroup_parent(cgrp)->subtree_control);
2903 	return 0;
2904 }
2905 
2906 /* show controllers which are enabled for a given cgroup's children */
cgroup_subtree_control_show(struct seq_file * seq,void * v)2907 static int cgroup_subtree_control_show(struct seq_file *seq, void *v)
2908 {
2909 	struct cgroup *cgrp = seq_css(seq)->cgroup;
2910 
2911 	cgroup_print_ss_mask(seq, cgrp->subtree_control);
2912 	return 0;
2913 }
2914 
2915 /**
2916  * cgroup_update_dfl_csses - update css assoc of a subtree in default hierarchy
2917  * @cgrp: root of the subtree to update csses for
2918  *
2919  * @cgrp's child_subsys_mask has changed and its subtree's (self excluded)
2920  * css associations need to be updated accordingly.  This function looks up
2921  * all css_sets which are attached to the subtree, creates the matching
2922  * updated css_sets and migrates the tasks to the new ones.
2923  */
cgroup_update_dfl_csses(struct cgroup * cgrp)2924 static int cgroup_update_dfl_csses(struct cgroup *cgrp)
2925 {
2926 	LIST_HEAD(preloaded_csets);
2927 	struct cgroup_taskset tset = CGROUP_TASKSET_INIT(tset);
2928 	struct cgroup_subsys_state *css;
2929 	struct css_set *src_cset;
2930 	int ret;
2931 
2932 	lockdep_assert_held(&cgroup_mutex);
2933 
2934 	percpu_down_write(&cgroup_threadgroup_rwsem);
2935 
2936 	/* look up all csses currently attached to @cgrp's subtree */
2937 	spin_lock_irq(&css_set_lock);
2938 	css_for_each_descendant_pre(css, cgroup_css(cgrp, NULL)) {
2939 		struct cgrp_cset_link *link;
2940 
2941 		/* self is not affected by child_subsys_mask change */
2942 		if (css->cgroup == cgrp)
2943 			continue;
2944 
2945 		list_for_each_entry(link, &css->cgroup->cset_links, cset_link)
2946 			cgroup_migrate_add_src(link->cset, cgrp,
2947 					       &preloaded_csets);
2948 	}
2949 	spin_unlock_irq(&css_set_lock);
2950 
2951 	/* NULL dst indicates self on default hierarchy */
2952 	ret = cgroup_migrate_prepare_dst(NULL, &preloaded_csets);
2953 	if (ret)
2954 		goto out_finish;
2955 
2956 	spin_lock_irq(&css_set_lock);
2957 	list_for_each_entry(src_cset, &preloaded_csets, mg_preload_node) {
2958 		struct task_struct *task, *ntask;
2959 
2960 		/* src_csets precede dst_csets, break on the first dst_cset */
2961 		if (!src_cset->mg_src_cgrp)
2962 			break;
2963 
2964 		/* all tasks in src_csets need to be migrated */
2965 		list_for_each_entry_safe(task, ntask, &src_cset->tasks, cg_list)
2966 			cgroup_taskset_add(task, &tset);
2967 	}
2968 	spin_unlock_irq(&css_set_lock);
2969 
2970 	ret = cgroup_taskset_migrate(&tset, cgrp);
2971 out_finish:
2972 	cgroup_migrate_finish(&preloaded_csets);
2973 	percpu_up_write(&cgroup_threadgroup_rwsem);
2974 	return ret;
2975 }
2976 
2977 /* change the enabled child controllers for a cgroup in the default hierarchy */
cgroup_subtree_control_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)2978 static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
2979 					    char *buf, size_t nbytes,
2980 					    loff_t off)
2981 {
2982 	unsigned long enable = 0, disable = 0;
2983 	unsigned long css_enable, css_disable, old_sc, new_sc, old_ss, new_ss;
2984 	struct cgroup *cgrp, *child;
2985 	struct cgroup_subsys *ss;
2986 	char *tok;
2987 	int ssid, ret;
2988 
2989 	/*
2990 	 * Parse input - space separated list of subsystem names prefixed
2991 	 * with either + or -.
2992 	 */
2993 	buf = strstrip(buf);
2994 	while ((tok = strsep(&buf, " "))) {
2995 		unsigned long tmp_ss_mask = ~cgrp_dfl_root_inhibit_ss_mask;
2996 
2997 		if (tok[0] == '\0')
2998 			continue;
2999 		for_each_subsys_which(ss, ssid, &tmp_ss_mask) {
3000 			if (!cgroup_ssid_enabled(ssid) ||
3001 			    strcmp(tok + 1, ss->name))
3002 				continue;
3003 
3004 			if (*tok == '+') {
3005 				enable |= 1 << ssid;
3006 				disable &= ~(1 << ssid);
3007 			} else if (*tok == '-') {
3008 				disable |= 1 << ssid;
3009 				enable &= ~(1 << ssid);
3010 			} else {
3011 				return -EINVAL;
3012 			}
3013 			break;
3014 		}
3015 		if (ssid == CGROUP_SUBSYS_COUNT)
3016 			return -EINVAL;
3017 	}
3018 
3019 	cgrp = cgroup_kn_lock_live(of->kn);
3020 	if (!cgrp)
3021 		return -ENODEV;
3022 
3023 	for_each_subsys(ss, ssid) {
3024 		if (enable & (1 << ssid)) {
3025 			if (cgrp->subtree_control & (1 << ssid)) {
3026 				enable &= ~(1 << ssid);
3027 				continue;
3028 			}
3029 
3030 			/* unavailable or not enabled on the parent? */
3031 			if (!(cgrp_dfl_root.subsys_mask & (1 << ssid)) ||
3032 			    (cgroup_parent(cgrp) &&
3033 			     !(cgroup_parent(cgrp)->subtree_control & (1 << ssid)))) {
3034 				ret = -ENOENT;
3035 				goto out_unlock;
3036 			}
3037 		} else if (disable & (1 << ssid)) {
3038 			if (!(cgrp->subtree_control & (1 << ssid))) {
3039 				disable &= ~(1 << ssid);
3040 				continue;
3041 			}
3042 
3043 			/* a child has it enabled? */
3044 			cgroup_for_each_live_child(child, cgrp) {
3045 				if (child->subtree_control & (1 << ssid)) {
3046 					ret = -EBUSY;
3047 					goto out_unlock;
3048 				}
3049 			}
3050 		}
3051 	}
3052 
3053 	if (!enable && !disable) {
3054 		ret = 0;
3055 		goto out_unlock;
3056 	}
3057 
3058 	/*
3059 	 * Except for the root, subtree_control must be zero for a cgroup
3060 	 * with tasks so that child cgroups don't compete against tasks.
3061 	 */
3062 	if (enable && cgroup_parent(cgrp) && !list_empty(&cgrp->cset_links)) {
3063 		ret = -EBUSY;
3064 		goto out_unlock;
3065 	}
3066 
3067 	/*
3068 	 * Update subsys masks and calculate what needs to be done.  More
3069 	 * subsystems than specified may need to be enabled or disabled
3070 	 * depending on subsystem dependencies.
3071 	 */
3072 	old_sc = cgrp->subtree_control;
3073 	old_ss = cgrp->child_subsys_mask;
3074 	new_sc = (old_sc | enable) & ~disable;
3075 	new_ss = cgroup_calc_child_subsys_mask(cgrp, new_sc);
3076 
3077 	css_enable = ~old_ss & new_ss;
3078 	css_disable = old_ss & ~new_ss;
3079 	enable |= css_enable;
3080 	disable |= css_disable;
3081 
3082 	/*
3083 	 * Because css offlining is asynchronous, userland might try to
3084 	 * re-enable the same controller while the previous instance is
3085 	 * still around.  In such cases, wait till it's gone using
3086 	 * offline_waitq.
3087 	 */
3088 	for_each_subsys_which(ss, ssid, &css_enable) {
3089 		cgroup_for_each_live_child(child, cgrp) {
3090 			DEFINE_WAIT(wait);
3091 
3092 			if (!cgroup_css(child, ss))
3093 				continue;
3094 
3095 			cgroup_get(child);
3096 			prepare_to_wait(&child->offline_waitq, &wait,
3097 					TASK_UNINTERRUPTIBLE);
3098 			cgroup_kn_unlock(of->kn);
3099 			schedule();
3100 			finish_wait(&child->offline_waitq, &wait);
3101 			cgroup_put(child);
3102 
3103 			return restart_syscall();
3104 		}
3105 	}
3106 
3107 	cgrp->subtree_control = new_sc;
3108 	cgrp->child_subsys_mask = new_ss;
3109 
3110 	/*
3111 	 * Create new csses or make the existing ones visible.  A css is
3112 	 * created invisible if it's being implicitly enabled through
3113 	 * dependency.  An invisible css is made visible when the userland
3114 	 * explicitly enables it.
3115 	 */
3116 	for_each_subsys(ss, ssid) {
3117 		if (!(enable & (1 << ssid)))
3118 			continue;
3119 
3120 		cgroup_for_each_live_child(child, cgrp) {
3121 			if (css_enable & (1 << ssid))
3122 				ret = create_css(child, ss,
3123 					cgrp->subtree_control & (1 << ssid));
3124 			else
3125 				ret = css_populate_dir(cgroup_css(child, ss),
3126 						       NULL);
3127 			if (ret)
3128 				goto err_undo_css;
3129 		}
3130 	}
3131 
3132 	/*
3133 	 * At this point, cgroup_e_css() results reflect the new csses
3134 	 * making the following cgroup_update_dfl_csses() properly update
3135 	 * css associations of all tasks in the subtree.
3136 	 */
3137 	ret = cgroup_update_dfl_csses(cgrp);
3138 	if (ret)
3139 		goto err_undo_css;
3140 
3141 	/*
3142 	 * All tasks are migrated out of disabled csses.  Kill or hide
3143 	 * them.  A css is hidden when the userland requests it to be
3144 	 * disabled while other subsystems are still depending on it.  The
3145 	 * css must not actively control resources and be in the vanilla
3146 	 * state if it's made visible again later.  Controllers which may
3147 	 * be depended upon should provide ->css_reset() for this purpose.
3148 	 */
3149 	for_each_subsys(ss, ssid) {
3150 		if (!(disable & (1 << ssid)))
3151 			continue;
3152 
3153 		cgroup_for_each_live_child(child, cgrp) {
3154 			struct cgroup_subsys_state *css = cgroup_css(child, ss);
3155 
3156 			if (css_disable & (1 << ssid)) {
3157 				kill_css(css);
3158 			} else {
3159 				css_clear_dir(css, NULL);
3160 				if (ss->css_reset)
3161 					ss->css_reset(css);
3162 			}
3163 		}
3164 	}
3165 
3166 	/*
3167 	 * The effective csses of all the descendants (excluding @cgrp) may
3168 	 * have changed.  Subsystems can optionally subscribe to this event
3169 	 * by implementing ->css_e_css_changed() which is invoked if any of
3170 	 * the effective csses seen from the css's cgroup may have changed.
3171 	 */
3172 	for_each_subsys(ss, ssid) {
3173 		struct cgroup_subsys_state *this_css = cgroup_css(cgrp, ss);
3174 		struct cgroup_subsys_state *css;
3175 
3176 		if (!ss->css_e_css_changed || !this_css)
3177 			continue;
3178 
3179 		css_for_each_descendant_pre(css, this_css)
3180 			if (css != this_css)
3181 				ss->css_e_css_changed(css);
3182 	}
3183 
3184 	kernfs_activate(cgrp->kn);
3185 	ret = 0;
3186 out_unlock:
3187 	cgroup_kn_unlock(of->kn);
3188 	return ret ?: nbytes;
3189 
3190 err_undo_css:
3191 	cgrp->subtree_control = old_sc;
3192 	cgrp->child_subsys_mask = old_ss;
3193 
3194 	for_each_subsys(ss, ssid) {
3195 		if (!(enable & (1 << ssid)))
3196 			continue;
3197 
3198 		cgroup_for_each_live_child(child, cgrp) {
3199 			struct cgroup_subsys_state *css = cgroup_css(child, ss);
3200 
3201 			if (!css)
3202 				continue;
3203 
3204 			if (css_enable & (1 << ssid))
3205 				kill_css(css);
3206 			else
3207 				css_clear_dir(css, NULL);
3208 		}
3209 	}
3210 	goto out_unlock;
3211 }
3212 
cgroup_events_show(struct seq_file * seq,void * v)3213 static int cgroup_events_show(struct seq_file *seq, void *v)
3214 {
3215 	seq_printf(seq, "populated %d\n",
3216 		   cgroup_is_populated(seq_css(seq)->cgroup));
3217 	return 0;
3218 }
3219 
cgroup_file_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)3220 static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf,
3221 				 size_t nbytes, loff_t off)
3222 {
3223 	struct cgroup *cgrp = of->kn->parent->priv;
3224 	struct cftype *cft = of->kn->priv;
3225 	struct cgroup_subsys_state *css;
3226 	int ret;
3227 
3228 	if (cft->write)
3229 		return cft->write(of, buf, nbytes, off);
3230 
3231 	/*
3232 	 * kernfs guarantees that a file isn't deleted with operations in
3233 	 * flight, which means that the matching css is and stays alive and
3234 	 * doesn't need to be pinned.  The RCU locking is not necessary
3235 	 * either.  It's just for the convenience of using cgroup_css().
3236 	 */
3237 	rcu_read_lock();
3238 	css = cgroup_css(cgrp, cft->ss);
3239 	rcu_read_unlock();
3240 
3241 	if (cft->write_u64) {
3242 		unsigned long long v;
3243 		ret = kstrtoull(buf, 0, &v);
3244 		if (!ret)
3245 			ret = cft->write_u64(css, cft, v);
3246 	} else if (cft->write_s64) {
3247 		long long v;
3248 		ret = kstrtoll(buf, 0, &v);
3249 		if (!ret)
3250 			ret = cft->write_s64(css, cft, v);
3251 	} else {
3252 		ret = -EINVAL;
3253 	}
3254 
3255 	return ret ?: nbytes;
3256 }
3257 
cgroup_seqfile_start(struct seq_file * seq,loff_t * ppos)3258 static void *cgroup_seqfile_start(struct seq_file *seq, loff_t *ppos)
3259 {
3260 	return seq_cft(seq)->seq_start(seq, ppos);
3261 }
3262 
cgroup_seqfile_next(struct seq_file * seq,void * v,loff_t * ppos)3263 static void *cgroup_seqfile_next(struct seq_file *seq, void *v, loff_t *ppos)
3264 {
3265 	return seq_cft(seq)->seq_next(seq, v, ppos);
3266 }
3267 
cgroup_seqfile_stop(struct seq_file * seq,void * v)3268 static void cgroup_seqfile_stop(struct seq_file *seq, void *v)
3269 {
3270 	seq_cft(seq)->seq_stop(seq, v);
3271 }
3272 
cgroup_seqfile_show(struct seq_file * m,void * arg)3273 static int cgroup_seqfile_show(struct seq_file *m, void *arg)
3274 {
3275 	struct cftype *cft = seq_cft(m);
3276 	struct cgroup_subsys_state *css = seq_css(m);
3277 
3278 	if (cft->seq_show)
3279 		return cft->seq_show(m, arg);
3280 
3281 	if (cft->read_u64)
3282 		seq_printf(m, "%llu\n", cft->read_u64(css, cft));
3283 	else if (cft->read_s64)
3284 		seq_printf(m, "%lld\n", cft->read_s64(css, cft));
3285 	else
3286 		return -EINVAL;
3287 	return 0;
3288 }
3289 
3290 static struct kernfs_ops cgroup_kf_single_ops = {
3291 	.atomic_write_len	= PAGE_SIZE,
3292 	.write			= cgroup_file_write,
3293 	.seq_show		= cgroup_seqfile_show,
3294 };
3295 
3296 static struct kernfs_ops cgroup_kf_ops = {
3297 	.atomic_write_len	= PAGE_SIZE,
3298 	.write			= cgroup_file_write,
3299 	.seq_start		= cgroup_seqfile_start,
3300 	.seq_next		= cgroup_seqfile_next,
3301 	.seq_stop		= cgroup_seqfile_stop,
3302 	.seq_show		= cgroup_seqfile_show,
3303 };
3304 
3305 /*
3306  * cgroup_rename - Only allow simple rename of directories in place.
3307  */
cgroup_rename(struct kernfs_node * kn,struct kernfs_node * new_parent,const char * new_name_str)3308 static int cgroup_rename(struct kernfs_node *kn, struct kernfs_node *new_parent,
3309 			 const char *new_name_str)
3310 {
3311 	struct cgroup *cgrp = kn->priv;
3312 	int ret;
3313 
3314 	/* do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable */
3315 	if (strchr(new_name_str, '\n'))
3316 		return -EINVAL;
3317 
3318 	if (kernfs_type(kn) != KERNFS_DIR)
3319 		return -ENOTDIR;
3320 	if (kn->parent != new_parent)
3321 		return -EIO;
3322 
3323 	/*
3324 	 * This isn't a proper migration and its usefulness is very
3325 	 * limited.  Disallow on the default hierarchy.
3326 	 */
3327 	if (cgroup_on_dfl(cgrp))
3328 		return -EPERM;
3329 
3330 	/*
3331 	 * We're gonna grab cgroup_mutex which nests outside kernfs
3332 	 * active_ref.  kernfs_rename() doesn't require active_ref
3333 	 * protection.  Break them before grabbing cgroup_mutex.
3334 	 */
3335 	kernfs_break_active_protection(new_parent);
3336 	kernfs_break_active_protection(kn);
3337 
3338 	mutex_lock(&cgroup_mutex);
3339 
3340 	ret = kernfs_rename(kn, new_parent, new_name_str);
3341 
3342 	mutex_unlock(&cgroup_mutex);
3343 
3344 	kernfs_unbreak_active_protection(kn);
3345 	kernfs_unbreak_active_protection(new_parent);
3346 	return ret;
3347 }
3348 
3349 /* set uid and gid of cgroup dirs and files to that of the creator */
cgroup_kn_set_ugid(struct kernfs_node * kn)3350 static int cgroup_kn_set_ugid(struct kernfs_node *kn)
3351 {
3352 	struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID,
3353 			       .ia_uid = current_fsuid(),
3354 			       .ia_gid = current_fsgid(), };
3355 
3356 	if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) &&
3357 	    gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID))
3358 		return 0;
3359 
3360 	return kernfs_setattr(kn, &iattr);
3361 }
3362 
cgroup_add_file(struct cgroup_subsys_state * css,struct cgroup * cgrp,struct cftype * cft)3363 static int cgroup_add_file(struct cgroup_subsys_state *css, struct cgroup *cgrp,
3364 			   struct cftype *cft)
3365 {
3366 	char name[CGROUP_FILE_NAME_MAX];
3367 	struct kernfs_node *kn;
3368 	struct lock_class_key *key = NULL;
3369 	int ret;
3370 
3371 #ifdef CONFIG_DEBUG_LOCK_ALLOC
3372 	key = &cft->lockdep_key;
3373 #endif
3374 	kn = __kernfs_create_file(cgrp->kn, cgroup_file_name(cgrp, cft, name),
3375 				  cgroup_file_mode(cft), 0, cft->kf_ops, cft,
3376 				  NULL, key);
3377 	if (IS_ERR(kn))
3378 		return PTR_ERR(kn);
3379 
3380 	ret = cgroup_kn_set_ugid(kn);
3381 	if (ret) {
3382 		kernfs_remove(kn);
3383 		return ret;
3384 	}
3385 
3386 	if (cft->file_offset) {
3387 		struct cgroup_file *cfile = (void *)css + cft->file_offset;
3388 
3389 		spin_lock_irq(&cgroup_file_kn_lock);
3390 		cfile->kn = kn;
3391 		spin_unlock_irq(&cgroup_file_kn_lock);
3392 	}
3393 
3394 	return 0;
3395 }
3396 
3397 /**
3398  * cgroup_addrm_files - add or remove files to a cgroup directory
3399  * @css: the target css
3400  * @cgrp: the target cgroup (usually css->cgroup)
3401  * @cfts: array of cftypes to be added
3402  * @is_add: whether to add or remove
3403  *
3404  * Depending on @is_add, add or remove files defined by @cfts on @cgrp.
3405  * For removals, this function never fails.
3406  */
cgroup_addrm_files(struct cgroup_subsys_state * css,struct cgroup * cgrp,struct cftype cfts[],bool is_add)3407 static int cgroup_addrm_files(struct cgroup_subsys_state *css,
3408 			      struct cgroup *cgrp, struct cftype cfts[],
3409 			      bool is_add)
3410 {
3411 	struct cftype *cft, *cft_end = NULL;
3412 	int ret;
3413 
3414 	lockdep_assert_held(&cgroup_mutex);
3415 
3416 restart:
3417 	for (cft = cfts; cft != cft_end && cft->name[0] != '\0'; cft++) {
3418 		/* does cft->flags tell us to skip this file on @cgrp? */
3419 		if ((cft->flags & __CFTYPE_ONLY_ON_DFL) && !cgroup_on_dfl(cgrp))
3420 			continue;
3421 		if ((cft->flags & __CFTYPE_NOT_ON_DFL) && cgroup_on_dfl(cgrp))
3422 			continue;
3423 		if ((cft->flags & CFTYPE_NOT_ON_ROOT) && !cgroup_parent(cgrp))
3424 			continue;
3425 		if ((cft->flags & CFTYPE_ONLY_ON_ROOT) && cgroup_parent(cgrp))
3426 			continue;
3427 
3428 		if (is_add) {
3429 			ret = cgroup_add_file(css, cgrp, cft);
3430 			if (ret) {
3431 				pr_warn("%s: failed to add %s, err=%d\n",
3432 					__func__, cft->name, ret);
3433 				cft_end = cft;
3434 				is_add = false;
3435 				goto restart;
3436 			}
3437 		} else {
3438 			cgroup_rm_file(cgrp, cft);
3439 		}
3440 	}
3441 	return 0;
3442 }
3443 
cgroup_apply_cftypes(struct cftype * cfts,bool is_add)3444 static int cgroup_apply_cftypes(struct cftype *cfts, bool is_add)
3445 {
3446 	LIST_HEAD(pending);
3447 	struct cgroup_subsys *ss = cfts[0].ss;
3448 	struct cgroup *root = &ss->root->cgrp;
3449 	struct cgroup_subsys_state *css;
3450 	int ret = 0;
3451 
3452 	lockdep_assert_held(&cgroup_mutex);
3453 
3454 	/* add/rm files for all cgroups created before */
3455 	css_for_each_descendant_pre(css, cgroup_css(root, ss)) {
3456 		struct cgroup *cgrp = css->cgroup;
3457 
3458 		if (cgroup_is_dead(cgrp))
3459 			continue;
3460 
3461 		ret = cgroup_addrm_files(css, cgrp, cfts, is_add);
3462 		if (ret)
3463 			break;
3464 	}
3465 
3466 	if (is_add && !ret)
3467 		kernfs_activate(root->kn);
3468 	return ret;
3469 }
3470 
cgroup_exit_cftypes(struct cftype * cfts)3471 static void cgroup_exit_cftypes(struct cftype *cfts)
3472 {
3473 	struct cftype *cft;
3474 
3475 	for (cft = cfts; cft->name[0] != '\0'; cft++) {
3476 		/* free copy for custom atomic_write_len, see init_cftypes() */
3477 		if (cft->max_write_len && cft->max_write_len != PAGE_SIZE)
3478 			kfree(cft->kf_ops);
3479 		cft->kf_ops = NULL;
3480 		cft->ss = NULL;
3481 
3482 		/* revert flags set by cgroup core while adding @cfts */
3483 		cft->flags &= ~(__CFTYPE_ONLY_ON_DFL | __CFTYPE_NOT_ON_DFL);
3484 	}
3485 }
3486 
cgroup_init_cftypes(struct cgroup_subsys * ss,struct cftype * cfts)3487 static int cgroup_init_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
3488 {
3489 	struct cftype *cft;
3490 
3491 	for (cft = cfts; cft->name[0] != '\0'; cft++) {
3492 		struct kernfs_ops *kf_ops;
3493 
3494 		WARN_ON(cft->ss || cft->kf_ops);
3495 
3496 		if (cft->seq_start)
3497 			kf_ops = &cgroup_kf_ops;
3498 		else
3499 			kf_ops = &cgroup_kf_single_ops;
3500 
3501 		/*
3502 		 * Ugh... if @cft wants a custom max_write_len, we need to
3503 		 * make a copy of kf_ops to set its atomic_write_len.
3504 		 */
3505 		if (cft->max_write_len && cft->max_write_len != PAGE_SIZE) {
3506 			kf_ops = kmemdup(kf_ops, sizeof(*kf_ops), GFP_KERNEL);
3507 			if (!kf_ops) {
3508 				cgroup_exit_cftypes(cfts);
3509 				return -ENOMEM;
3510 			}
3511 			kf_ops->atomic_write_len = cft->max_write_len;
3512 		}
3513 
3514 		cft->kf_ops = kf_ops;
3515 		cft->ss = ss;
3516 	}
3517 
3518 	return 0;
3519 }
3520 
cgroup_rm_cftypes_locked(struct cftype * cfts)3521 static int cgroup_rm_cftypes_locked(struct cftype *cfts)
3522 {
3523 	lockdep_assert_held(&cgroup_mutex);
3524 
3525 	if (!cfts || !cfts[0].ss)
3526 		return -ENOENT;
3527 
3528 	list_del(&cfts->node);
3529 	cgroup_apply_cftypes(cfts, false);
3530 	cgroup_exit_cftypes(cfts);
3531 	return 0;
3532 }
3533 
3534 /**
3535  * cgroup_rm_cftypes - remove an array of cftypes from a subsystem
3536  * @cfts: zero-length name terminated array of cftypes
3537  *
3538  * Unregister @cfts.  Files described by @cfts are removed from all
3539  * existing cgroups and all future cgroups won't have them either.  This
3540  * function can be called anytime whether @cfts' subsys is attached or not.
3541  *
3542  * Returns 0 on successful unregistration, -ENOENT if @cfts is not
3543  * registered.
3544  */
cgroup_rm_cftypes(struct cftype * cfts)3545 int cgroup_rm_cftypes(struct cftype *cfts)
3546 {
3547 	int ret;
3548 
3549 	mutex_lock(&cgroup_mutex);
3550 	ret = cgroup_rm_cftypes_locked(cfts);
3551 	mutex_unlock(&cgroup_mutex);
3552 	return ret;
3553 }
3554 
3555 /**
3556  * cgroup_add_cftypes - add an array of cftypes to a subsystem
3557  * @ss: target cgroup subsystem
3558  * @cfts: zero-length name terminated array of cftypes
3559  *
3560  * Register @cfts to @ss.  Files described by @cfts are created for all
3561  * existing cgroups to which @ss is attached and all future cgroups will
3562  * have them too.  This function can be called anytime whether @ss is
3563  * attached or not.
3564  *
3565  * Returns 0 on successful registration, -errno on failure.  Note that this
3566  * function currently returns 0 as long as @cfts registration is successful
3567  * even if some file creation attempts on existing cgroups fail.
3568  */
cgroup_add_cftypes(struct cgroup_subsys * ss,struct cftype * cfts)3569 static int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
3570 {
3571 	int ret;
3572 
3573 	if (!cgroup_ssid_enabled(ss->id))
3574 		return 0;
3575 
3576 	if (!cfts || cfts[0].name[0] == '\0')
3577 		return 0;
3578 
3579 	ret = cgroup_init_cftypes(ss, cfts);
3580 	if (ret)
3581 		return ret;
3582 
3583 	mutex_lock(&cgroup_mutex);
3584 
3585 	list_add_tail(&cfts->node, &ss->cfts);
3586 	ret = cgroup_apply_cftypes(cfts, true);
3587 	if (ret)
3588 		cgroup_rm_cftypes_locked(cfts);
3589 
3590 	mutex_unlock(&cgroup_mutex);
3591 	return ret;
3592 }
3593 
3594 /**
3595  * cgroup_add_dfl_cftypes - add an array of cftypes for default hierarchy
3596  * @ss: target cgroup subsystem
3597  * @cfts: zero-length name terminated array of cftypes
3598  *
3599  * Similar to cgroup_add_cftypes() but the added files are only used for
3600  * the default hierarchy.
3601  */
cgroup_add_dfl_cftypes(struct cgroup_subsys * ss,struct cftype * cfts)3602 int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
3603 {
3604 	struct cftype *cft;
3605 
3606 	for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
3607 		cft->flags |= __CFTYPE_ONLY_ON_DFL;
3608 	return cgroup_add_cftypes(ss, cfts);
3609 }
3610 
3611 /**
3612  * cgroup_add_legacy_cftypes - add an array of cftypes for legacy hierarchies
3613  * @ss: target cgroup subsystem
3614  * @cfts: zero-length name terminated array of cftypes
3615  *
3616  * Similar to cgroup_add_cftypes() but the added files are only used for
3617  * the legacy hierarchies.
3618  */
cgroup_add_legacy_cftypes(struct cgroup_subsys * ss,struct cftype * cfts)3619 int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
3620 {
3621 	struct cftype *cft;
3622 
3623 	for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
3624 		cft->flags |= __CFTYPE_NOT_ON_DFL;
3625 	return cgroup_add_cftypes(ss, cfts);
3626 }
3627 
3628 /**
3629  * cgroup_file_notify - generate a file modified event for a cgroup_file
3630  * @cfile: target cgroup_file
3631  *
3632  * @cfile must have been obtained by setting cftype->file_offset.
3633  */
cgroup_file_notify(struct cgroup_file * cfile)3634 void cgroup_file_notify(struct cgroup_file *cfile)
3635 {
3636 	unsigned long flags;
3637 
3638 	spin_lock_irqsave(&cgroup_file_kn_lock, flags);
3639 	if (cfile->kn)
3640 		kernfs_notify(cfile->kn);
3641 	spin_unlock_irqrestore(&cgroup_file_kn_lock, flags);
3642 }
3643 
3644 /**
3645  * cgroup_task_count - count the number of tasks in a cgroup.
3646  * @cgrp: the cgroup in question
3647  *
3648  * Return the number of tasks in the cgroup.
3649  */
cgroup_task_count(const struct cgroup * cgrp)3650 static int cgroup_task_count(const struct cgroup *cgrp)
3651 {
3652 	int count = 0;
3653 	struct cgrp_cset_link *link;
3654 
3655 	spin_lock_irq(&css_set_lock);
3656 	list_for_each_entry(link, &cgrp->cset_links, cset_link)
3657 		count += atomic_read(&link->cset->refcount);
3658 	spin_unlock_irq(&css_set_lock);
3659 	return count;
3660 }
3661 
3662 /**
3663  * css_next_child - find the next child of a given css
3664  * @pos: the current position (%NULL to initiate traversal)
3665  * @parent: css whose children to walk
3666  *
3667  * This function returns the next child of @parent and should be called
3668  * under either cgroup_mutex or RCU read lock.  The only requirement is
3669  * that @parent and @pos are accessible.  The next sibling is guaranteed to
3670  * be returned regardless of their states.
3671  *
3672  * If a subsystem synchronizes ->css_online() and the start of iteration, a
3673  * css which finished ->css_online() is guaranteed to be visible in the
3674  * future iterations and will stay visible until the last reference is put.
3675  * A css which hasn't finished ->css_online() or already finished
3676  * ->css_offline() may show up during traversal.  It's each subsystem's
3677  * responsibility to synchronize against on/offlining.
3678  */
css_next_child(struct cgroup_subsys_state * pos,struct cgroup_subsys_state * parent)3679 struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
3680 					   struct cgroup_subsys_state *parent)
3681 {
3682 	struct cgroup_subsys_state *next;
3683 
3684 	cgroup_assert_mutex_or_rcu_locked();
3685 
3686 	/*
3687 	 * @pos could already have been unlinked from the sibling list.
3688 	 * Once a cgroup is removed, its ->sibling.next is no longer
3689 	 * updated when its next sibling changes.  CSS_RELEASED is set when
3690 	 * @pos is taken off list, at which time its next pointer is valid,
3691 	 * and, as releases are serialized, the one pointed to by the next
3692 	 * pointer is guaranteed to not have started release yet.  This
3693 	 * implies that if we observe !CSS_RELEASED on @pos in this RCU
3694 	 * critical section, the one pointed to by its next pointer is
3695 	 * guaranteed to not have finished its RCU grace period even if we
3696 	 * have dropped rcu_read_lock() inbetween iterations.
3697 	 *
3698 	 * If @pos has CSS_RELEASED set, its next pointer can't be
3699 	 * dereferenced; however, as each css is given a monotonically
3700 	 * increasing unique serial number and always appended to the
3701 	 * sibling list, the next one can be found by walking the parent's
3702 	 * children until the first css with higher serial number than
3703 	 * @pos's.  While this path can be slower, it happens iff iteration
3704 	 * races against release and the race window is very small.
3705 	 */
3706 	if (!pos) {
3707 		next = list_entry_rcu(parent->children.next, struct cgroup_subsys_state, sibling);
3708 	} else if (likely(!(pos->flags & CSS_RELEASED))) {
3709 		next = list_entry_rcu(pos->sibling.next, struct cgroup_subsys_state, sibling);
3710 	} else {
3711 		list_for_each_entry_rcu(next, &parent->children, sibling)
3712 			if (next->serial_nr > pos->serial_nr)
3713 				break;
3714 	}
3715 
3716 	/*
3717 	 * @next, if not pointing to the head, can be dereferenced and is
3718 	 * the next sibling.
3719 	 */
3720 	if (&next->sibling != &parent->children)
3721 		return next;
3722 	return NULL;
3723 }
3724 
3725 /**
3726  * css_next_descendant_pre - find the next descendant for pre-order walk
3727  * @pos: the current position (%NULL to initiate traversal)
3728  * @root: css whose descendants to walk
3729  *
3730  * To be used by css_for_each_descendant_pre().  Find the next descendant
3731  * to visit for pre-order traversal of @root's descendants.  @root is
3732  * included in the iteration and the first node to be visited.
3733  *
3734  * While this function requires cgroup_mutex or RCU read locking, it
3735  * doesn't require the whole traversal to be contained in a single critical
3736  * section.  This function will return the correct next descendant as long
3737  * as both @pos and @root are accessible and @pos is a descendant of @root.
3738  *
3739  * If a subsystem synchronizes ->css_online() and the start of iteration, a
3740  * css which finished ->css_online() is guaranteed to be visible in the
3741  * future iterations and will stay visible until the last reference is put.
3742  * A css which hasn't finished ->css_online() or already finished
3743  * ->css_offline() may show up during traversal.  It's each subsystem's
3744  * responsibility to synchronize against on/offlining.
3745  */
3746 struct cgroup_subsys_state *
css_next_descendant_pre(struct cgroup_subsys_state * pos,struct cgroup_subsys_state * root)3747 css_next_descendant_pre(struct cgroup_subsys_state *pos,
3748 			struct cgroup_subsys_state *root)
3749 {
3750 	struct cgroup_subsys_state *next;
3751 
3752 	cgroup_assert_mutex_or_rcu_locked();
3753 
3754 	/* if first iteration, visit @root */
3755 	if (!pos)
3756 		return root;
3757 
3758 	/* visit the first child if exists */
3759 	next = css_next_child(NULL, pos);
3760 	if (next)
3761 		return next;
3762 
3763 	/* no child, visit my or the closest ancestor's next sibling */
3764 	while (pos != root) {
3765 		next = css_next_child(pos, pos->parent);
3766 		if (next)
3767 			return next;
3768 		pos = pos->parent;
3769 	}
3770 
3771 	return NULL;
3772 }
3773 
3774 /**
3775  * css_rightmost_descendant - return the rightmost descendant of a css
3776  * @pos: css of interest
3777  *
3778  * Return the rightmost descendant of @pos.  If there's no descendant, @pos
3779  * is returned.  This can be used during pre-order traversal to skip
3780  * subtree of @pos.
3781  *
3782  * While this function requires cgroup_mutex or RCU read locking, it
3783  * doesn't require the whole traversal to be contained in a single critical
3784  * section.  This function will return the correct rightmost descendant as
3785  * long as @pos is accessible.
3786  */
3787 struct cgroup_subsys_state *
css_rightmost_descendant(struct cgroup_subsys_state * pos)3788 css_rightmost_descendant(struct cgroup_subsys_state *pos)
3789 {
3790 	struct cgroup_subsys_state *last, *tmp;
3791 
3792 	cgroup_assert_mutex_or_rcu_locked();
3793 
3794 	do {
3795 		last = pos;
3796 		/* ->prev isn't RCU safe, walk ->next till the end */
3797 		pos = NULL;
3798 		css_for_each_child(tmp, last)
3799 			pos = tmp;
3800 	} while (pos);
3801 
3802 	return last;
3803 }
3804 
3805 static struct cgroup_subsys_state *
css_leftmost_descendant(struct cgroup_subsys_state * pos)3806 css_leftmost_descendant(struct cgroup_subsys_state *pos)
3807 {
3808 	struct cgroup_subsys_state *last;
3809 
3810 	do {
3811 		last = pos;
3812 		pos = css_next_child(NULL, pos);
3813 	} while (pos);
3814 
3815 	return last;
3816 }
3817 
3818 /**
3819  * css_next_descendant_post - find the next descendant for post-order walk
3820  * @pos: the current position (%NULL to initiate traversal)
3821  * @root: css whose descendants to walk
3822  *
3823  * To be used by css_for_each_descendant_post().  Find the next descendant
3824  * to visit for post-order traversal of @root's descendants.  @root is
3825  * included in the iteration and the last node to be visited.
3826  *
3827  * While this function requires cgroup_mutex or RCU read locking, it
3828  * doesn't require the whole traversal to be contained in a single critical
3829  * section.  This function will return the correct next descendant as long
3830  * as both @pos and @cgroup are accessible and @pos is a descendant of
3831  * @cgroup.
3832  *
3833  * If a subsystem synchronizes ->css_online() and the start of iteration, a
3834  * css which finished ->css_online() is guaranteed to be visible in the
3835  * future iterations and will stay visible until the last reference is put.
3836  * A css which hasn't finished ->css_online() or already finished
3837  * ->css_offline() may show up during traversal.  It's each subsystem's
3838  * responsibility to synchronize against on/offlining.
3839  */
3840 struct cgroup_subsys_state *
css_next_descendant_post(struct cgroup_subsys_state * pos,struct cgroup_subsys_state * root)3841 css_next_descendant_post(struct cgroup_subsys_state *pos,
3842 			 struct cgroup_subsys_state *root)
3843 {
3844 	struct cgroup_subsys_state *next;
3845 
3846 	cgroup_assert_mutex_or_rcu_locked();
3847 
3848 	/* if first iteration, visit leftmost descendant which may be @root */
3849 	if (!pos)
3850 		return css_leftmost_descendant(root);
3851 
3852 	/* if we visited @root, we're done */
3853 	if (pos == root)
3854 		return NULL;
3855 
3856 	/* if there's an unvisited sibling, visit its leftmost descendant */
3857 	next = css_next_child(pos, pos->parent);
3858 	if (next)
3859 		return css_leftmost_descendant(next);
3860 
3861 	/* no sibling left, visit parent */
3862 	return pos->parent;
3863 }
3864 
3865 /**
3866  * css_has_online_children - does a css have online children
3867  * @css: the target css
3868  *
3869  * Returns %true if @css has any online children; otherwise, %false.  This
3870  * function can be called from any context but the caller is responsible
3871  * for synchronizing against on/offlining as necessary.
3872  */
css_has_online_children(struct cgroup_subsys_state * css)3873 bool css_has_online_children(struct cgroup_subsys_state *css)
3874 {
3875 	struct cgroup_subsys_state *child;
3876 	bool ret = false;
3877 
3878 	rcu_read_lock();
3879 	css_for_each_child(child, css) {
3880 		if (child->flags & CSS_ONLINE) {
3881 			ret = true;
3882 			break;
3883 		}
3884 	}
3885 	rcu_read_unlock();
3886 	return ret;
3887 }
3888 
3889 /**
3890  * css_task_iter_advance_css_set - advance a task itererator to the next css_set
3891  * @it: the iterator to advance
3892  *
3893  * Advance @it to the next css_set to walk.
3894  */
css_task_iter_advance_css_set(struct css_task_iter * it)3895 static void css_task_iter_advance_css_set(struct css_task_iter *it)
3896 {
3897 	struct list_head *l = it->cset_pos;
3898 	struct cgrp_cset_link *link;
3899 	struct css_set *cset;
3900 
3901 	lockdep_assert_held(&css_set_lock);
3902 
3903 	/* Advance to the next non-empty css_set */
3904 	do {
3905 		l = l->next;
3906 		if (l == it->cset_head) {
3907 			it->cset_pos = NULL;
3908 			it->task_pos = NULL;
3909 			return;
3910 		}
3911 
3912 		if (it->ss) {
3913 			cset = container_of(l, struct css_set,
3914 					    e_cset_node[it->ss->id]);
3915 		} else {
3916 			link = list_entry(l, struct cgrp_cset_link, cset_link);
3917 			cset = link->cset;
3918 		}
3919 	} while (!css_set_populated(cset));
3920 
3921 	it->cset_pos = l;
3922 
3923 	if (!list_empty(&cset->tasks))
3924 		it->task_pos = cset->tasks.next;
3925 	else
3926 		it->task_pos = cset->mg_tasks.next;
3927 
3928 	it->tasks_head = &cset->tasks;
3929 	it->mg_tasks_head = &cset->mg_tasks;
3930 
3931 	/*
3932 	 * We don't keep css_sets locked across iteration steps and thus
3933 	 * need to take steps to ensure that iteration can be resumed after
3934 	 * the lock is re-acquired.  Iteration is performed at two levels -
3935 	 * css_sets and tasks in them.
3936 	 *
3937 	 * Once created, a css_set never leaves its cgroup lists, so a
3938 	 * pinned css_set is guaranteed to stay put and we can resume
3939 	 * iteration afterwards.
3940 	 *
3941 	 * Tasks may leave @cset across iteration steps.  This is resolved
3942 	 * by registering each iterator with the css_set currently being
3943 	 * walked and making css_set_move_task() advance iterators whose
3944 	 * next task is leaving.
3945 	 */
3946 	if (it->cur_cset) {
3947 		list_del(&it->iters_node);
3948 		put_css_set_locked(it->cur_cset);
3949 	}
3950 	get_css_set(cset);
3951 	it->cur_cset = cset;
3952 	list_add(&it->iters_node, &cset->task_iters);
3953 }
3954 
css_task_iter_advance(struct css_task_iter * it)3955 static void css_task_iter_advance(struct css_task_iter *it)
3956 {
3957 	struct list_head *l = it->task_pos;
3958 
3959 	lockdep_assert_held(&css_set_lock);
3960 	WARN_ON_ONCE(!l);
3961 
3962 	/*
3963 	 * Advance iterator to find next entry.  cset->tasks is consumed
3964 	 * first and then ->mg_tasks.  After ->mg_tasks, we move onto the
3965 	 * next cset.
3966 	 */
3967 	l = l->next;
3968 
3969 	if (l == it->tasks_head)
3970 		l = it->mg_tasks_head->next;
3971 
3972 	if (l == it->mg_tasks_head)
3973 		css_task_iter_advance_css_set(it);
3974 	else
3975 		it->task_pos = l;
3976 }
3977 
3978 /**
3979  * css_task_iter_start - initiate task iteration
3980  * @css: the css to walk tasks of
3981  * @it: the task iterator to use
3982  *
3983  * Initiate iteration through the tasks of @css.  The caller can call
3984  * css_task_iter_next() to walk through the tasks until the function
3985  * returns NULL.  On completion of iteration, css_task_iter_end() must be
3986  * called.
3987  */
css_task_iter_start(struct cgroup_subsys_state * css,struct css_task_iter * it)3988 void css_task_iter_start(struct cgroup_subsys_state *css,
3989 			 struct css_task_iter *it)
3990 {
3991 	/* no one should try to iterate before mounting cgroups */
3992 	WARN_ON_ONCE(!use_task_css_set_links);
3993 
3994 	memset(it, 0, sizeof(*it));
3995 
3996 	spin_lock_irq(&css_set_lock);
3997 
3998 	it->ss = css->ss;
3999 
4000 	if (it->ss)
4001 		it->cset_pos = &css->cgroup->e_csets[css->ss->id];
4002 	else
4003 		it->cset_pos = &css->cgroup->cset_links;
4004 
4005 	it->cset_head = it->cset_pos;
4006 
4007 	css_task_iter_advance_css_set(it);
4008 
4009 	spin_unlock_irq(&css_set_lock);
4010 }
4011 
4012 /**
4013  * css_task_iter_next - return the next task for the iterator
4014  * @it: the task iterator being iterated
4015  *
4016  * The "next" function for task iteration.  @it should have been
4017  * initialized via css_task_iter_start().  Returns NULL when the iteration
4018  * reaches the end.
4019  */
css_task_iter_next(struct css_task_iter * it)4020 struct task_struct *css_task_iter_next(struct css_task_iter *it)
4021 {
4022 	if (it->cur_task) {
4023 		put_task_struct(it->cur_task);
4024 		it->cur_task = NULL;
4025 	}
4026 
4027 	spin_lock_irq(&css_set_lock);
4028 
4029 	if (it->task_pos) {
4030 		it->cur_task = list_entry(it->task_pos, struct task_struct,
4031 					  cg_list);
4032 		get_task_struct(it->cur_task);
4033 		css_task_iter_advance(it);
4034 	}
4035 
4036 	spin_unlock_irq(&css_set_lock);
4037 
4038 	return it->cur_task;
4039 }
4040 
4041 /**
4042  * css_task_iter_end - finish task iteration
4043  * @it: the task iterator to finish
4044  *
4045  * Finish task iteration started by css_task_iter_start().
4046  */
css_task_iter_end(struct css_task_iter * it)4047 void css_task_iter_end(struct css_task_iter *it)
4048 {
4049 	if (it->cur_cset) {
4050 		spin_lock_irq(&css_set_lock);
4051 		list_del(&it->iters_node);
4052 		put_css_set_locked(it->cur_cset);
4053 		spin_unlock_irq(&css_set_lock);
4054 	}
4055 
4056 	if (it->cur_task)
4057 		put_task_struct(it->cur_task);
4058 }
4059 
4060 /**
4061  * cgroup_trasnsfer_tasks - move tasks from one cgroup to another
4062  * @to: cgroup to which the tasks will be moved
4063  * @from: cgroup in which the tasks currently reside
4064  *
4065  * Locking rules between cgroup_post_fork() and the migration path
4066  * guarantee that, if a task is forking while being migrated, the new child
4067  * is guaranteed to be either visible in the source cgroup after the
4068  * parent's migration is complete or put into the target cgroup.  No task
4069  * can slip out of migration through forking.
4070  */
cgroup_transfer_tasks(struct cgroup * to,struct cgroup * from)4071 int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
4072 {
4073 	LIST_HEAD(preloaded_csets);
4074 	struct cgrp_cset_link *link;
4075 	struct css_task_iter it;
4076 	struct task_struct *task;
4077 	int ret;
4078 
4079 	mutex_lock(&cgroup_mutex);
4080 
4081 	/* all tasks in @from are being moved, all csets are source */
4082 	spin_lock_irq(&css_set_lock);
4083 	list_for_each_entry(link, &from->cset_links, cset_link)
4084 		cgroup_migrate_add_src(link->cset, to, &preloaded_csets);
4085 	spin_unlock_irq(&css_set_lock);
4086 
4087 	ret = cgroup_migrate_prepare_dst(to, &preloaded_csets);
4088 	if (ret)
4089 		goto out_err;
4090 
4091 	/*
4092 	 * Migrate tasks one-by-one until @form is empty.  This fails iff
4093 	 * ->can_attach() fails.
4094 	 */
4095 	do {
4096 		css_task_iter_start(&from->self, &it);
4097 
4098 		do {
4099 			task = css_task_iter_next(&it);
4100 		} while (task && (task->flags & PF_EXITING));
4101 
4102 		if (task)
4103 			get_task_struct(task);
4104 		css_task_iter_end(&it);
4105 
4106 		if (task) {
4107 			ret = cgroup_migrate(task, false, to);
4108 			put_task_struct(task);
4109 		}
4110 	} while (task && !ret);
4111 out_err:
4112 	cgroup_migrate_finish(&preloaded_csets);
4113 	mutex_unlock(&cgroup_mutex);
4114 	return ret;
4115 }
4116 
4117 /*
4118  * Stuff for reading the 'tasks'/'procs' files.
4119  *
4120  * Reading this file can return large amounts of data if a cgroup has
4121  * *lots* of attached tasks. So it may need several calls to read(),
4122  * but we cannot guarantee that the information we produce is correct
4123  * unless we produce it entirely atomically.
4124  *
4125  */
4126 
4127 /* which pidlist file are we talking about? */
4128 enum cgroup_filetype {
4129 	CGROUP_FILE_PROCS,
4130 	CGROUP_FILE_TASKS,
4131 };
4132 
4133 /*
4134  * A pidlist is a list of pids that virtually represents the contents of one
4135  * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
4136  * a pair (one each for procs, tasks) for each pid namespace that's relevant
4137  * to the cgroup.
4138  */
4139 struct cgroup_pidlist {
4140 	/*
4141 	 * used to find which pidlist is wanted. doesn't change as long as
4142 	 * this particular list stays in the list.
4143 	*/
4144 	struct { enum cgroup_filetype type; struct pid_namespace *ns; } key;
4145 	/* array of xids */
4146 	pid_t *list;
4147 	/* how many elements the above list has */
4148 	int length;
4149 	/* each of these stored in a list by its cgroup */
4150 	struct list_head links;
4151 	/* pointer to the cgroup we belong to, for list removal purposes */
4152 	struct cgroup *owner;
4153 	/* for delayed destruction */
4154 	struct delayed_work destroy_dwork;
4155 };
4156 
4157 /*
4158  * The following two functions "fix" the issue where there are more pids
4159  * than kmalloc will give memory for; in such cases, we use vmalloc/vfree.
4160  * TODO: replace with a kernel-wide solution to this problem
4161  */
4162 #define PIDLIST_TOO_LARGE(c) ((c) * sizeof(pid_t) > (PAGE_SIZE * 2))
pidlist_allocate(int count)4163 static void *pidlist_allocate(int count)
4164 {
4165 	if (PIDLIST_TOO_LARGE(count))
4166 		return vmalloc(count * sizeof(pid_t));
4167 	else
4168 		return kmalloc(count * sizeof(pid_t), GFP_KERNEL);
4169 }
4170 
pidlist_free(void * p)4171 static void pidlist_free(void *p)
4172 {
4173 	kvfree(p);
4174 }
4175 
4176 /*
4177  * Used to destroy all pidlists lingering waiting for destroy timer.  None
4178  * should be left afterwards.
4179  */
cgroup_pidlist_destroy_all(struct cgroup * cgrp)4180 static void cgroup_pidlist_destroy_all(struct cgroup *cgrp)
4181 {
4182 	struct cgroup_pidlist *l, *tmp_l;
4183 
4184 	mutex_lock(&cgrp->pidlist_mutex);
4185 	list_for_each_entry_safe(l, tmp_l, &cgrp->pidlists, links)
4186 		mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, 0);
4187 	mutex_unlock(&cgrp->pidlist_mutex);
4188 
4189 	flush_workqueue(cgroup_pidlist_destroy_wq);
4190 	BUG_ON(!list_empty(&cgrp->pidlists));
4191 }
4192 
cgroup_pidlist_destroy_work_fn(struct work_struct * work)4193 static void cgroup_pidlist_destroy_work_fn(struct work_struct *work)
4194 {
4195 	struct delayed_work *dwork = to_delayed_work(work);
4196 	struct cgroup_pidlist *l = container_of(dwork, struct cgroup_pidlist,
4197 						destroy_dwork);
4198 	struct cgroup_pidlist *tofree = NULL;
4199 
4200 	mutex_lock(&l->owner->pidlist_mutex);
4201 
4202 	/*
4203 	 * Destroy iff we didn't get queued again.  The state won't change
4204 	 * as destroy_dwork can only be queued while locked.
4205 	 */
4206 	if (!delayed_work_pending(dwork)) {
4207 		list_del(&l->links);
4208 		pidlist_free(l->list);
4209 		put_pid_ns(l->key.ns);
4210 		tofree = l;
4211 	}
4212 
4213 	mutex_unlock(&l->owner->pidlist_mutex);
4214 	kfree(tofree);
4215 }
4216 
4217 /*
4218  * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
4219  * Returns the number of unique elements.
4220  */
pidlist_uniq(pid_t * list,int length)4221 static int pidlist_uniq(pid_t *list, int length)
4222 {
4223 	int src, dest = 1;
4224 
4225 	/*
4226 	 * we presume the 0th element is unique, so i starts at 1. trivial
4227 	 * edge cases first; no work needs to be done for either
4228 	 */
4229 	if (length == 0 || length == 1)
4230 		return length;
4231 	/* src and dest walk down the list; dest counts unique elements */
4232 	for (src = 1; src < length; src++) {
4233 		/* find next unique element */
4234 		while (list[src] == list[src-1]) {
4235 			src++;
4236 			if (src == length)
4237 				goto after;
4238 		}
4239 		/* dest always points to where the next unique element goes */
4240 		list[dest] = list[src];
4241 		dest++;
4242 	}
4243 after:
4244 	return dest;
4245 }
4246 
4247 /*
4248  * The two pid files - task and cgroup.procs - guaranteed that the result
4249  * is sorted, which forced this whole pidlist fiasco.  As pid order is
4250  * different per namespace, each namespace needs differently sorted list,
4251  * making it impossible to use, for example, single rbtree of member tasks
4252  * sorted by task pointer.  As pidlists can be fairly large, allocating one
4253  * per open file is dangerous, so cgroup had to implement shared pool of
4254  * pidlists keyed by cgroup and namespace.
4255  *
4256  * All this extra complexity was caused by the original implementation
4257  * committing to an entirely unnecessary property.  In the long term, we
4258  * want to do away with it.  Explicitly scramble sort order if on the
4259  * default hierarchy so that no such expectation exists in the new
4260  * interface.
4261  *
4262  * Scrambling is done by swapping every two consecutive bits, which is
4263  * non-identity one-to-one mapping which disturbs sort order sufficiently.
4264  */
pid_fry(pid_t pid)4265 static pid_t pid_fry(pid_t pid)
4266 {
4267 	unsigned a = pid & 0x55555555;
4268 	unsigned b = pid & 0xAAAAAAAA;
4269 
4270 	return (a << 1) | (b >> 1);
4271 }
4272 
cgroup_pid_fry(struct cgroup * cgrp,pid_t pid)4273 static pid_t cgroup_pid_fry(struct cgroup *cgrp, pid_t pid)
4274 {
4275 	if (cgroup_on_dfl(cgrp))
4276 		return pid_fry(pid);
4277 	else
4278 		return pid;
4279 }
4280 
cmppid(const void * a,const void * b)4281 static int cmppid(const void *a, const void *b)
4282 {
4283 	return *(pid_t *)a - *(pid_t *)b;
4284 }
4285 
fried_cmppid(const void * a,const void * b)4286 static int fried_cmppid(const void *a, const void *b)
4287 {
4288 	return pid_fry(*(pid_t *)a) - pid_fry(*(pid_t *)b);
4289 }
4290 
cgroup_pidlist_find(struct cgroup * cgrp,enum cgroup_filetype type)4291 static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
4292 						  enum cgroup_filetype type)
4293 {
4294 	struct cgroup_pidlist *l;
4295 	/* don't need task_nsproxy() if we're looking at ourself */
4296 	struct pid_namespace *ns = task_active_pid_ns(current);
4297 
4298 	lockdep_assert_held(&cgrp->pidlist_mutex);
4299 
4300 	list_for_each_entry(l, &cgrp->pidlists, links)
4301 		if (l->key.type == type && l->key.ns == ns)
4302 			return l;
4303 	return NULL;
4304 }
4305 
4306 /*
4307  * find the appropriate pidlist for our purpose (given procs vs tasks)
4308  * returns with the lock on that pidlist already held, and takes care
4309  * of the use count, or returns NULL with no locks held if we're out of
4310  * memory.
4311  */
cgroup_pidlist_find_create(struct cgroup * cgrp,enum cgroup_filetype type)4312 static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp,
4313 						enum cgroup_filetype type)
4314 {
4315 	struct cgroup_pidlist *l;
4316 
4317 	lockdep_assert_held(&cgrp->pidlist_mutex);
4318 
4319 	l = cgroup_pidlist_find(cgrp, type);
4320 	if (l)
4321 		return l;
4322 
4323 	/* entry not found; create a new one */
4324 	l = kzalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL);
4325 	if (!l)
4326 		return l;
4327 
4328 	INIT_DELAYED_WORK(&l->destroy_dwork, cgroup_pidlist_destroy_work_fn);
4329 	l->key.type = type;
4330 	/* don't need task_nsproxy() if we're looking at ourself */
4331 	l->key.ns = get_pid_ns(task_active_pid_ns(current));
4332 	l->owner = cgrp;
4333 	list_add(&l->links, &cgrp->pidlists);
4334 	return l;
4335 }
4336 
4337 /*
4338  * Load a cgroup's pidarray with either procs' tgids or tasks' pids
4339  */
pidlist_array_load(struct cgroup * cgrp,enum cgroup_filetype type,struct cgroup_pidlist ** lp)4340 static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
4341 			      struct cgroup_pidlist **lp)
4342 {
4343 	pid_t *array;
4344 	int length;
4345 	int pid, n = 0; /* used for populating the array */
4346 	struct css_task_iter it;
4347 	struct task_struct *tsk;
4348 	struct cgroup_pidlist *l;
4349 
4350 	lockdep_assert_held(&cgrp->pidlist_mutex);
4351 
4352 	/*
4353 	 * If cgroup gets more users after we read count, we won't have
4354 	 * enough space - tough.  This race is indistinguishable to the
4355 	 * caller from the case that the additional cgroup users didn't
4356 	 * show up until sometime later on.
4357 	 */
4358 	length = cgroup_task_count(cgrp);
4359 	array = pidlist_allocate(length);
4360 	if (!array)
4361 		return -ENOMEM;
4362 	/* now, populate the array */
4363 	css_task_iter_start(&cgrp->self, &it);
4364 	while ((tsk = css_task_iter_next(&it))) {
4365 		if (unlikely(n == length))
4366 			break;
4367 		/* get tgid or pid for procs or tasks file respectively */
4368 		if (type == CGROUP_FILE_PROCS)
4369 			pid = task_tgid_vnr(tsk);
4370 		else
4371 			pid = task_pid_vnr(tsk);
4372 		if (pid > 0) /* make sure to only use valid results */
4373 			array[n++] = pid;
4374 	}
4375 	css_task_iter_end(&it);
4376 	length = n;
4377 	/* now sort & (if procs) strip out duplicates */
4378 	if (cgroup_on_dfl(cgrp))
4379 		sort(array, length, sizeof(pid_t), fried_cmppid, NULL);
4380 	else
4381 		sort(array, length, sizeof(pid_t), cmppid, NULL);
4382 	if (type == CGROUP_FILE_PROCS)
4383 		length = pidlist_uniq(array, length);
4384 
4385 	l = cgroup_pidlist_find_create(cgrp, type);
4386 	if (!l) {
4387 		pidlist_free(array);
4388 		return -ENOMEM;
4389 	}
4390 
4391 	/* store array, freeing old if necessary */
4392 	pidlist_free(l->list);
4393 	l->list = array;
4394 	l->length = length;
4395 	*lp = l;
4396 	return 0;
4397 }
4398 
4399 /**
4400  * cgroupstats_build - build and fill cgroupstats
4401  * @stats: cgroupstats to fill information into
4402  * @dentry: A dentry entry belonging to the cgroup for which stats have
4403  * been requested.
4404  *
4405  * Build and fill cgroupstats so that taskstats can export it to user
4406  * space.
4407  */
cgroupstats_build(struct cgroupstats * stats,struct dentry * dentry)4408 int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
4409 {
4410 	struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
4411 	struct cgroup *cgrp;
4412 	struct css_task_iter it;
4413 	struct task_struct *tsk;
4414 
4415 	/* it should be kernfs_node belonging to cgroupfs and is a directory */
4416 	if (dentry->d_sb->s_type != &cgroup_fs_type || !kn ||
4417 	    kernfs_type(kn) != KERNFS_DIR)
4418 		return -EINVAL;
4419 
4420 	mutex_lock(&cgroup_mutex);
4421 
4422 	/*
4423 	 * We aren't being called from kernfs and there's no guarantee on
4424 	 * @kn->priv's validity.  For this and css_tryget_online_from_dir(),
4425 	 * @kn->priv is RCU safe.  Let's do the RCU dancing.
4426 	 */
4427 	rcu_read_lock();
4428 	cgrp = rcu_dereference(kn->priv);
4429 	if (!cgrp || cgroup_is_dead(cgrp)) {
4430 		rcu_read_unlock();
4431 		mutex_unlock(&cgroup_mutex);
4432 		return -ENOENT;
4433 	}
4434 	rcu_read_unlock();
4435 
4436 	css_task_iter_start(&cgrp->self, &it);
4437 	while ((tsk = css_task_iter_next(&it))) {
4438 		switch (tsk->state) {
4439 		case TASK_RUNNING:
4440 			stats->nr_running++;
4441 			break;
4442 		case TASK_INTERRUPTIBLE:
4443 			stats->nr_sleeping++;
4444 			break;
4445 		case TASK_UNINTERRUPTIBLE:
4446 			stats->nr_uninterruptible++;
4447 			break;
4448 		case TASK_STOPPED:
4449 			stats->nr_stopped++;
4450 			break;
4451 		default:
4452 			if (delayacct_is_task_waiting_on_io(tsk))
4453 				stats->nr_io_wait++;
4454 			break;
4455 		}
4456 	}
4457 	css_task_iter_end(&it);
4458 
4459 	mutex_unlock(&cgroup_mutex);
4460 	return 0;
4461 }
4462 
4463 
4464 /*
4465  * seq_file methods for the tasks/procs files. The seq_file position is the
4466  * next pid to display; the seq_file iterator is a pointer to the pid
4467  * in the cgroup->l->list array.
4468  */
4469 
cgroup_pidlist_start(struct seq_file * s,loff_t * pos)4470 static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
4471 {
4472 	/*
4473 	 * Initially we receive a position value that corresponds to
4474 	 * one more than the last pid shown (or 0 on the first call or
4475 	 * after a seek to the start). Use a binary-search to find the
4476 	 * next pid to display, if any
4477 	 */
4478 	struct kernfs_open_file *of = s->private;
4479 	struct cgroup *cgrp = seq_css(s)->cgroup;
4480 	struct cgroup_pidlist *l;
4481 	enum cgroup_filetype type = seq_cft(s)->private;
4482 	int index = 0, pid = *pos;
4483 	int *iter, ret;
4484 
4485 	mutex_lock(&cgrp->pidlist_mutex);
4486 
4487 	/*
4488 	 * !NULL @of->priv indicates that this isn't the first start()
4489 	 * after open.  If the matching pidlist is around, we can use that.
4490 	 * Look for it.  Note that @of->priv can't be used directly.  It
4491 	 * could already have been destroyed.
4492 	 */
4493 	if (of->priv)
4494 		of->priv = cgroup_pidlist_find(cgrp, type);
4495 
4496 	/*
4497 	 * Either this is the first start() after open or the matching
4498 	 * pidlist has been destroyed inbetween.  Create a new one.
4499 	 */
4500 	if (!of->priv) {
4501 		ret = pidlist_array_load(cgrp, type,
4502 					 (struct cgroup_pidlist **)&of->priv);
4503 		if (ret)
4504 			return ERR_PTR(ret);
4505 	}
4506 	l = of->priv;
4507 
4508 	if (pid) {
4509 		int end = l->length;
4510 
4511 		while (index < end) {
4512 			int mid = (index + end) / 2;
4513 			if (cgroup_pid_fry(cgrp, l->list[mid]) == pid) {
4514 				index = mid;
4515 				break;
4516 			} else if (cgroup_pid_fry(cgrp, l->list[mid]) <= pid)
4517 				index = mid + 1;
4518 			else
4519 				end = mid;
4520 		}
4521 	}
4522 	/* If we're off the end of the array, we're done */
4523 	if (index >= l->length)
4524 		return NULL;
4525 	/* Update the abstract position to be the actual pid that we found */
4526 	iter = l->list + index;
4527 	*pos = cgroup_pid_fry(cgrp, *iter);
4528 	return iter;
4529 }
4530 
cgroup_pidlist_stop(struct seq_file * s,void * v)4531 static void cgroup_pidlist_stop(struct seq_file *s, void *v)
4532 {
4533 	struct kernfs_open_file *of = s->private;
4534 	struct cgroup_pidlist *l = of->priv;
4535 
4536 	if (l)
4537 		mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork,
4538 				 CGROUP_PIDLIST_DESTROY_DELAY);
4539 	mutex_unlock(&seq_css(s)->cgroup->pidlist_mutex);
4540 }
4541 
cgroup_pidlist_next(struct seq_file * s,void * v,loff_t * pos)4542 static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
4543 {
4544 	struct kernfs_open_file *of = s->private;
4545 	struct cgroup_pidlist *l = of->priv;
4546 	pid_t *p = v;
4547 	pid_t *end = l->list + l->length;
4548 	/*
4549 	 * Advance to the next pid in the array. If this goes off the
4550 	 * end, we're done
4551 	 */
4552 	p++;
4553 	if (p >= end) {
4554 		return NULL;
4555 	} else {
4556 		*pos = cgroup_pid_fry(seq_css(s)->cgroup, *p);
4557 		return p;
4558 	}
4559 }
4560 
cgroup_pidlist_show(struct seq_file * s,void * v)4561 static int cgroup_pidlist_show(struct seq_file *s, void *v)
4562 {
4563 	seq_printf(s, "%d\n", *(int *)v);
4564 
4565 	return 0;
4566 }
4567 
cgroup_read_notify_on_release(struct cgroup_subsys_state * css,struct cftype * cft)4568 static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css,
4569 					 struct cftype *cft)
4570 {
4571 	return notify_on_release(css->cgroup);
4572 }
4573 
cgroup_write_notify_on_release(struct cgroup_subsys_state * css,struct cftype * cft,u64 val)4574 static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css,
4575 					  struct cftype *cft, u64 val)
4576 {
4577 	if (val)
4578 		set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
4579 	else
4580 		clear_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
4581 	return 0;
4582 }
4583 
cgroup_clone_children_read(struct cgroup_subsys_state * css,struct cftype * cft)4584 static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css,
4585 				      struct cftype *cft)
4586 {
4587 	return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
4588 }
4589 
cgroup_clone_children_write(struct cgroup_subsys_state * css,struct cftype * cft,u64 val)4590 static int cgroup_clone_children_write(struct cgroup_subsys_state *css,
4591 				       struct cftype *cft, u64 val)
4592 {
4593 	if (val)
4594 		set_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
4595 	else
4596 		clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
4597 	return 0;
4598 }
4599 
4600 /* cgroup core interface files for the default hierarchy */
4601 static struct cftype cgroup_dfl_base_files[] = {
4602 	{
4603 		.name = "cgroup.procs",
4604 		.file_offset = offsetof(struct cgroup, procs_file),
4605 		.seq_start = cgroup_pidlist_start,
4606 		.seq_next = cgroup_pidlist_next,
4607 		.seq_stop = cgroup_pidlist_stop,
4608 		.seq_show = cgroup_pidlist_show,
4609 		.private = CGROUP_FILE_PROCS,
4610 		.write = cgroup_procs_write,
4611 	},
4612 	{
4613 		.name = "cgroup.controllers",
4614 		.flags = CFTYPE_ONLY_ON_ROOT,
4615 		.seq_show = cgroup_root_controllers_show,
4616 	},
4617 	{
4618 		.name = "cgroup.controllers",
4619 		.flags = CFTYPE_NOT_ON_ROOT,
4620 		.seq_show = cgroup_controllers_show,
4621 	},
4622 	{
4623 		.name = "cgroup.subtree_control",
4624 		.seq_show = cgroup_subtree_control_show,
4625 		.write = cgroup_subtree_control_write,
4626 	},
4627 	{
4628 		.name = "cgroup.events",
4629 		.flags = CFTYPE_NOT_ON_ROOT,
4630 		.file_offset = offsetof(struct cgroup, events_file),
4631 		.seq_show = cgroup_events_show,
4632 	},
4633 	{ }	/* terminate */
4634 };
4635 
4636 /* cgroup core interface files for the legacy hierarchies */
4637 static struct cftype cgroup_legacy_base_files[] = {
4638 	{
4639 		.name = "cgroup.procs",
4640 		.seq_start = cgroup_pidlist_start,
4641 		.seq_next = cgroup_pidlist_next,
4642 		.seq_stop = cgroup_pidlist_stop,
4643 		.seq_show = cgroup_pidlist_show,
4644 		.private = CGROUP_FILE_PROCS,
4645 		.write = cgroup_procs_write,
4646 	},
4647 	{
4648 		.name = "cgroup.clone_children",
4649 		.read_u64 = cgroup_clone_children_read,
4650 		.write_u64 = cgroup_clone_children_write,
4651 	},
4652 	{
4653 		.name = "cgroup.sane_behavior",
4654 		.flags = CFTYPE_ONLY_ON_ROOT,
4655 		.seq_show = cgroup_sane_behavior_show,
4656 	},
4657 	{
4658 		.name = "tasks",
4659 		.seq_start = cgroup_pidlist_start,
4660 		.seq_next = cgroup_pidlist_next,
4661 		.seq_stop = cgroup_pidlist_stop,
4662 		.seq_show = cgroup_pidlist_show,
4663 		.private = CGROUP_FILE_TASKS,
4664 		.write = cgroup_tasks_write,
4665 	},
4666 	{
4667 		.name = "notify_on_release",
4668 		.read_u64 = cgroup_read_notify_on_release,
4669 		.write_u64 = cgroup_write_notify_on_release,
4670 	},
4671 	{
4672 		.name = "release_agent",
4673 		.flags = CFTYPE_ONLY_ON_ROOT,
4674 		.seq_show = cgroup_release_agent_show,
4675 		.write = cgroup_release_agent_write,
4676 		.max_write_len = PATH_MAX - 1,
4677 	},
4678 	{ }	/* terminate */
4679 };
4680 
4681 /*
4682  * css destruction is four-stage process.
4683  *
4684  * 1. Destruction starts.  Killing of the percpu_ref is initiated.
4685  *    Implemented in kill_css().
4686  *
4687  * 2. When the percpu_ref is confirmed to be visible as killed on all CPUs
4688  *    and thus css_tryget_online() is guaranteed to fail, the css can be
4689  *    offlined by invoking offline_css().  After offlining, the base ref is
4690  *    put.  Implemented in css_killed_work_fn().
4691  *
4692  * 3. When the percpu_ref reaches zero, the only possible remaining
4693  *    accessors are inside RCU read sections.  css_release() schedules the
4694  *    RCU callback.
4695  *
4696  * 4. After the grace period, the css can be freed.  Implemented in
4697  *    css_free_work_fn().
4698  *
4699  * It is actually hairier because both step 2 and 4 require process context
4700  * and thus involve punting to css->destroy_work adding two additional
4701  * steps to the already complex sequence.
4702  */
css_free_work_fn(struct work_struct * work)4703 static void css_free_work_fn(struct work_struct *work)
4704 {
4705 	struct cgroup_subsys_state *css =
4706 		container_of(work, struct cgroup_subsys_state, destroy_work);
4707 	struct cgroup_subsys *ss = css->ss;
4708 	struct cgroup *cgrp = css->cgroup;
4709 
4710 	percpu_ref_exit(&css->refcnt);
4711 
4712 	if (ss) {
4713 		/* css free path */
4714 		struct cgroup_subsys_state *parent = css->parent;
4715 		int id = css->id;
4716 
4717 		ss->css_free(css);
4718 		cgroup_idr_remove(&ss->css_idr, id);
4719 		cgroup_put(cgrp);
4720 
4721 		if (parent)
4722 			css_put(parent);
4723 	} else {
4724 		/* cgroup free path */
4725 		atomic_dec(&cgrp->root->nr_cgrps);
4726 		cgroup_pidlist_destroy_all(cgrp);
4727 		cancel_work_sync(&cgrp->release_agent_work);
4728 
4729 		if (cgroup_parent(cgrp)) {
4730 			/*
4731 			 * We get a ref to the parent, and put the ref when
4732 			 * this cgroup is being freed, so it's guaranteed
4733 			 * that the parent won't be destroyed before its
4734 			 * children.
4735 			 */
4736 			cgroup_put(cgroup_parent(cgrp));
4737 			kernfs_put(cgrp->kn);
4738 			kfree(cgrp);
4739 		} else {
4740 			/*
4741 			 * This is root cgroup's refcnt reaching zero,
4742 			 * which indicates that the root should be
4743 			 * released.
4744 			 */
4745 			cgroup_destroy_root(cgrp->root);
4746 		}
4747 	}
4748 }
4749 
css_free_rcu_fn(struct rcu_head * rcu_head)4750 static void css_free_rcu_fn(struct rcu_head *rcu_head)
4751 {
4752 	struct cgroup_subsys_state *css =
4753 		container_of(rcu_head, struct cgroup_subsys_state, rcu_head);
4754 
4755 	INIT_WORK(&css->destroy_work, css_free_work_fn);
4756 	queue_work(cgroup_destroy_wq, &css->destroy_work);
4757 }
4758 
css_release_work_fn(struct work_struct * work)4759 static void css_release_work_fn(struct work_struct *work)
4760 {
4761 	struct cgroup_subsys_state *css =
4762 		container_of(work, struct cgroup_subsys_state, destroy_work);
4763 	struct cgroup_subsys *ss = css->ss;
4764 	struct cgroup *cgrp = css->cgroup;
4765 
4766 	mutex_lock(&cgroup_mutex);
4767 
4768 	css->flags |= CSS_RELEASED;
4769 	list_del_rcu(&css->sibling);
4770 
4771 	if (ss) {
4772 		/* css release path */
4773 		cgroup_idr_replace(&ss->css_idr, NULL, css->id);
4774 		if (ss->css_released)
4775 			ss->css_released(css);
4776 	} else {
4777 		/* cgroup release path */
4778 		cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
4779 		cgrp->id = -1;
4780 
4781 		/*
4782 		 * There are two control paths which try to determine
4783 		 * cgroup from dentry without going through kernfs -
4784 		 * cgroupstats_build() and css_tryget_online_from_dir().
4785 		 * Those are supported by RCU protecting clearing of
4786 		 * cgrp->kn->priv backpointer.
4787 		 */
4788 		RCU_INIT_POINTER(*(void __rcu __force **)&cgrp->kn->priv, NULL);
4789 	}
4790 
4791 	mutex_unlock(&cgroup_mutex);
4792 
4793 	call_rcu(&css->rcu_head, css_free_rcu_fn);
4794 }
4795 
css_release(struct percpu_ref * ref)4796 static void css_release(struct percpu_ref *ref)
4797 {
4798 	struct cgroup_subsys_state *css =
4799 		container_of(ref, struct cgroup_subsys_state, refcnt);
4800 
4801 	INIT_WORK(&css->destroy_work, css_release_work_fn);
4802 	queue_work(cgroup_destroy_wq, &css->destroy_work);
4803 }
4804 
init_and_link_css(struct cgroup_subsys_state * css,struct cgroup_subsys * ss,struct cgroup * cgrp)4805 static void init_and_link_css(struct cgroup_subsys_state *css,
4806 			      struct cgroup_subsys *ss, struct cgroup *cgrp)
4807 {
4808 	lockdep_assert_held(&cgroup_mutex);
4809 
4810 	cgroup_get(cgrp);
4811 
4812 	memset(css, 0, sizeof(*css));
4813 	css->cgroup = cgrp;
4814 	css->ss = ss;
4815 	css->id = -1;
4816 	INIT_LIST_HEAD(&css->sibling);
4817 	INIT_LIST_HEAD(&css->children);
4818 	css->serial_nr = css_serial_nr_next++;
4819 	atomic_set(&css->online_cnt, 0);
4820 
4821 	if (cgroup_parent(cgrp)) {
4822 		css->parent = cgroup_css(cgroup_parent(cgrp), ss);
4823 		css_get(css->parent);
4824 	}
4825 
4826 	BUG_ON(cgroup_css(cgrp, ss));
4827 }
4828 
4829 /* invoke ->css_online() on a new CSS and mark it online if successful */
online_css(struct cgroup_subsys_state * css)4830 static int online_css(struct cgroup_subsys_state *css)
4831 {
4832 	struct cgroup_subsys *ss = css->ss;
4833 	int ret = 0;
4834 
4835 	lockdep_assert_held(&cgroup_mutex);
4836 
4837 	if (ss->css_online)
4838 		ret = ss->css_online(css);
4839 	if (!ret) {
4840 		css->flags |= CSS_ONLINE;
4841 		rcu_assign_pointer(css->cgroup->subsys[ss->id], css);
4842 
4843 		atomic_inc(&css->online_cnt);
4844 		if (css->parent)
4845 			atomic_inc(&css->parent->online_cnt);
4846 	}
4847 	return ret;
4848 }
4849 
4850 /* if the CSS is online, invoke ->css_offline() on it and mark it offline */
offline_css(struct cgroup_subsys_state * css)4851 static void offline_css(struct cgroup_subsys_state *css)
4852 {
4853 	struct cgroup_subsys *ss = css->ss;
4854 
4855 	lockdep_assert_held(&cgroup_mutex);
4856 
4857 	if (!(css->flags & CSS_ONLINE))
4858 		return;
4859 
4860 	if (ss->css_offline)
4861 		ss->css_offline(css);
4862 
4863 	css->flags &= ~CSS_ONLINE;
4864 	RCU_INIT_POINTER(css->cgroup->subsys[ss->id], NULL);
4865 
4866 	wake_up_all(&css->cgroup->offline_waitq);
4867 }
4868 
4869 /**
4870  * create_css - create a cgroup_subsys_state
4871  * @cgrp: the cgroup new css will be associated with
4872  * @ss: the subsys of new css
4873  * @visible: whether to create control knobs for the new css or not
4874  *
4875  * Create a new css associated with @cgrp - @ss pair.  On success, the new
4876  * css is online and installed in @cgrp with all interface files created if
4877  * @visible.  Returns 0 on success, -errno on failure.
4878  */
create_css(struct cgroup * cgrp,struct cgroup_subsys * ss,bool visible)4879 static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss,
4880 		      bool visible)
4881 {
4882 	struct cgroup *parent = cgroup_parent(cgrp);
4883 	struct cgroup_subsys_state *parent_css = cgroup_css(parent, ss);
4884 	struct cgroup_subsys_state *css;
4885 	int err;
4886 
4887 	lockdep_assert_held(&cgroup_mutex);
4888 
4889 	css = ss->css_alloc(parent_css);
4890 	if (IS_ERR(css))
4891 		return PTR_ERR(css);
4892 
4893 	init_and_link_css(css, ss, cgrp);
4894 
4895 	err = percpu_ref_init(&css->refcnt, css_release, 0, GFP_KERNEL);
4896 	if (err)
4897 		goto err_free_css;
4898 
4899 	err = cgroup_idr_alloc(&ss->css_idr, NULL, 2, 0, GFP_KERNEL);
4900 	if (err < 0)
4901 		goto err_free_percpu_ref;
4902 	css->id = err;
4903 
4904 	if (visible) {
4905 		err = css_populate_dir(css, NULL);
4906 		if (err)
4907 			goto err_free_id;
4908 	}
4909 
4910 	/* @css is ready to be brought online now, make it visible */
4911 	list_add_tail_rcu(&css->sibling, &parent_css->children);
4912 	cgroup_idr_replace(&ss->css_idr, css, css->id);
4913 
4914 	err = online_css(css);
4915 	if (err)
4916 		goto err_list_del;
4917 
4918 	if (ss->broken_hierarchy && !ss->warned_broken_hierarchy &&
4919 	    cgroup_parent(parent)) {
4920 		pr_warn("%s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n",
4921 			current->comm, current->pid, ss->name);
4922 		if (!strcmp(ss->name, "memory"))
4923 			pr_warn("\"memory\" requires setting use_hierarchy to 1 on the root\n");
4924 		ss->warned_broken_hierarchy = true;
4925 	}
4926 
4927 	return 0;
4928 
4929 err_list_del:
4930 	list_del_rcu(&css->sibling);
4931 	css_clear_dir(css, NULL);
4932 err_free_id:
4933 	cgroup_idr_remove(&ss->css_idr, css->id);
4934 err_free_percpu_ref:
4935 	percpu_ref_exit(&css->refcnt);
4936 err_free_css:
4937 	call_rcu(&css->rcu_head, css_free_rcu_fn);
4938 	return err;
4939 }
4940 
cgroup_mkdir(struct kernfs_node * parent_kn,const char * name,umode_t mode)4941 static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
4942 			umode_t mode)
4943 {
4944 	struct cgroup *parent, *cgrp;
4945 	struct cgroup_root *root;
4946 	struct cgroup_subsys *ss;
4947 	struct kernfs_node *kn;
4948 	int ssid, ret;
4949 
4950 	/* Do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable.
4951 	 */
4952 	if (strchr(name, '\n'))
4953 		return -EINVAL;
4954 
4955 	parent = cgroup_kn_lock_live(parent_kn);
4956 	if (!parent)
4957 		return -ENODEV;
4958 	root = parent->root;
4959 
4960 	/* allocate the cgroup and its ID, 0 is reserved for the root */
4961 	cgrp = kzalloc(sizeof(*cgrp), GFP_KERNEL);
4962 	if (!cgrp) {
4963 		ret = -ENOMEM;
4964 		goto out_unlock;
4965 	}
4966 
4967 	ret = percpu_ref_init(&cgrp->self.refcnt, css_release, 0, GFP_KERNEL);
4968 	if (ret)
4969 		goto out_free_cgrp;
4970 
4971 	/*
4972 	 * Temporarily set the pointer to NULL, so idr_find() won't return
4973 	 * a half-baked cgroup.
4974 	 */
4975 	cgrp->id = cgroup_idr_alloc(&root->cgroup_idr, NULL, 2, 0, GFP_KERNEL);
4976 	if (cgrp->id < 0) {
4977 		ret = -ENOMEM;
4978 		goto out_cancel_ref;
4979 	}
4980 
4981 	init_cgroup_housekeeping(cgrp);
4982 
4983 	cgrp->self.parent = &parent->self;
4984 	cgrp->root = root;
4985 
4986 	if (notify_on_release(parent))
4987 		set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
4988 
4989 	if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &parent->flags))
4990 		set_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags);
4991 
4992 	/* create the directory */
4993 	kn = kernfs_create_dir(parent->kn, name, mode, cgrp);
4994 	if (IS_ERR(kn)) {
4995 		ret = PTR_ERR(kn);
4996 		goto out_free_id;
4997 	}
4998 	cgrp->kn = kn;
4999 
5000 	/*
5001 	 * This extra ref will be put in cgroup_free_fn() and guarantees
5002 	 * that @cgrp->kn is always accessible.
5003 	 */
5004 	kernfs_get(kn);
5005 
5006 	cgrp->self.serial_nr = css_serial_nr_next++;
5007 
5008 	/* allocation complete, commit to creation */
5009 	list_add_tail_rcu(&cgrp->self.sibling, &cgroup_parent(cgrp)->self.children);
5010 	atomic_inc(&root->nr_cgrps);
5011 	cgroup_get(parent);
5012 
5013 	/*
5014 	 * @cgrp is now fully operational.  If something fails after this
5015 	 * point, it'll be released via the normal destruction path.
5016 	 */
5017 	cgroup_idr_replace(&root->cgroup_idr, cgrp, cgrp->id);
5018 
5019 	ret = cgroup_kn_set_ugid(kn);
5020 	if (ret)
5021 		goto out_destroy;
5022 
5023 	ret = css_populate_dir(&cgrp->self, NULL);
5024 	if (ret)
5025 		goto out_destroy;
5026 
5027 	/* let's create and online css's */
5028 	for_each_subsys(ss, ssid) {
5029 		if (parent->child_subsys_mask & (1 << ssid)) {
5030 			ret = create_css(cgrp, ss,
5031 					 parent->subtree_control & (1 << ssid));
5032 			if (ret)
5033 				goto out_destroy;
5034 		}
5035 	}
5036 
5037 	/*
5038 	 * On the default hierarchy, a child doesn't automatically inherit
5039 	 * subtree_control from the parent.  Each is configured manually.
5040 	 */
5041 	if (!cgroup_on_dfl(cgrp)) {
5042 		cgrp->subtree_control = parent->subtree_control;
5043 		cgroup_refresh_child_subsys_mask(cgrp);
5044 	}
5045 
5046 	kernfs_activate(kn);
5047 
5048 	ret = 0;
5049 	goto out_unlock;
5050 
5051 out_free_id:
5052 	cgroup_idr_remove(&root->cgroup_idr, cgrp->id);
5053 out_cancel_ref:
5054 	percpu_ref_exit(&cgrp->self.refcnt);
5055 out_free_cgrp:
5056 	kfree(cgrp);
5057 out_unlock:
5058 	cgroup_kn_unlock(parent_kn);
5059 	return ret;
5060 
5061 out_destroy:
5062 	cgroup_destroy_locked(cgrp);
5063 	goto out_unlock;
5064 }
5065 
5066 /*
5067  * This is called when the refcnt of a css is confirmed to be killed.
5068  * css_tryget_online() is now guaranteed to fail.  Tell the subsystem to
5069  * initate destruction and put the css ref from kill_css().
5070  */
css_killed_work_fn(struct work_struct * work)5071 static void css_killed_work_fn(struct work_struct *work)
5072 {
5073 	struct cgroup_subsys_state *css =
5074 		container_of(work, struct cgroup_subsys_state, destroy_work);
5075 
5076 	mutex_lock(&cgroup_mutex);
5077 
5078 	do {
5079 		offline_css(css);
5080 		css_put(css);
5081 		/* @css can't go away while we're holding cgroup_mutex */
5082 		css = css->parent;
5083 	} while (css && atomic_dec_and_test(&css->online_cnt));
5084 
5085 	mutex_unlock(&cgroup_mutex);
5086 }
5087 
5088 /* css kill confirmation processing requires process context, bounce */
css_killed_ref_fn(struct percpu_ref * ref)5089 static void css_killed_ref_fn(struct percpu_ref *ref)
5090 {
5091 	struct cgroup_subsys_state *css =
5092 		container_of(ref, struct cgroup_subsys_state, refcnt);
5093 
5094 	if (atomic_dec_and_test(&css->online_cnt)) {
5095 		INIT_WORK(&css->destroy_work, css_killed_work_fn);
5096 		queue_work(cgroup_destroy_wq, &css->destroy_work);
5097 	}
5098 }
5099 
5100 /**
5101  * kill_css - destroy a css
5102  * @css: css to destroy
5103  *
5104  * This function initiates destruction of @css by removing cgroup interface
5105  * files and putting its base reference.  ->css_offline() will be invoked
5106  * asynchronously once css_tryget_online() is guaranteed to fail and when
5107  * the reference count reaches zero, @css will be released.
5108  */
kill_css(struct cgroup_subsys_state * css)5109 static void kill_css(struct cgroup_subsys_state *css)
5110 {
5111 	lockdep_assert_held(&cgroup_mutex);
5112 
5113 	/*
5114 	 * This must happen before css is disassociated with its cgroup.
5115 	 * See seq_css() for details.
5116 	 */
5117 	css_clear_dir(css, NULL);
5118 
5119 	/*
5120 	 * Killing would put the base ref, but we need to keep it alive
5121 	 * until after ->css_offline().
5122 	 */
5123 	css_get(css);
5124 
5125 	/*
5126 	 * cgroup core guarantees that, by the time ->css_offline() is
5127 	 * invoked, no new css reference will be given out via
5128 	 * css_tryget_online().  We can't simply call percpu_ref_kill() and
5129 	 * proceed to offlining css's because percpu_ref_kill() doesn't
5130 	 * guarantee that the ref is seen as killed on all CPUs on return.
5131 	 *
5132 	 * Use percpu_ref_kill_and_confirm() to get notifications as each
5133 	 * css is confirmed to be seen as killed on all CPUs.
5134 	 */
5135 	percpu_ref_kill_and_confirm(&css->refcnt, css_killed_ref_fn);
5136 }
5137 
5138 /**
5139  * cgroup_destroy_locked - the first stage of cgroup destruction
5140  * @cgrp: cgroup to be destroyed
5141  *
5142  * css's make use of percpu refcnts whose killing latency shouldn't be
5143  * exposed to userland and are RCU protected.  Also, cgroup core needs to
5144  * guarantee that css_tryget_online() won't succeed by the time
5145  * ->css_offline() is invoked.  To satisfy all the requirements,
5146  * destruction is implemented in the following two steps.
5147  *
5148  * s1. Verify @cgrp can be destroyed and mark it dying.  Remove all
5149  *     userland visible parts and start killing the percpu refcnts of
5150  *     css's.  Set up so that the next stage will be kicked off once all
5151  *     the percpu refcnts are confirmed to be killed.
5152  *
5153  * s2. Invoke ->css_offline(), mark the cgroup dead and proceed with the
5154  *     rest of destruction.  Once all cgroup references are gone, the
5155  *     cgroup is RCU-freed.
5156  *
5157  * This function implements s1.  After this step, @cgrp is gone as far as
5158  * the userland is concerned and a new cgroup with the same name may be
5159  * created.  As cgroup doesn't care about the names internally, this
5160  * doesn't cause any problem.
5161  */
cgroup_destroy_locked(struct cgroup * cgrp)5162 static int cgroup_destroy_locked(struct cgroup *cgrp)
5163 	__releases(&cgroup_mutex) __acquires(&cgroup_mutex)
5164 {
5165 	struct cgroup_subsys_state *css;
5166 	struct cgrp_cset_link *link;
5167 	int ssid;
5168 
5169 	lockdep_assert_held(&cgroup_mutex);
5170 
5171 	/*
5172 	 * Only migration can raise populated from zero and we're already
5173 	 * holding cgroup_mutex.
5174 	 */
5175 	if (cgroup_is_populated(cgrp))
5176 		return -EBUSY;
5177 
5178 	/*
5179 	 * Make sure there's no live children.  We can't test emptiness of
5180 	 * ->self.children as dead children linger on it while being
5181 	 * drained; otherwise, "rmdir parent/child parent" may fail.
5182 	 */
5183 	if (css_has_online_children(&cgrp->self))
5184 		return -EBUSY;
5185 
5186 	/*
5187 	 * Mark @cgrp and the associated csets dead.  The former prevents
5188 	 * further task migration and child creation by disabling
5189 	 * cgroup_lock_live_group().  The latter makes the csets ignored by
5190 	 * the migration path.
5191 	 */
5192 	cgrp->self.flags &= ~CSS_ONLINE;
5193 
5194 	spin_lock_irq(&css_set_lock);
5195 	list_for_each_entry(link, &cgrp->cset_links, cset_link)
5196 		link->cset->dead = true;
5197 	spin_unlock_irq(&css_set_lock);
5198 
5199 	/* initiate massacre of all css's */
5200 	for_each_css(css, ssid, cgrp)
5201 		kill_css(css);
5202 
5203 	/*
5204 	 * Remove @cgrp directory along with the base files.  @cgrp has an
5205 	 * extra ref on its kn.
5206 	 */
5207 	kernfs_remove(cgrp->kn);
5208 
5209 	check_for_release(cgroup_parent(cgrp));
5210 
5211 	/* put the base reference */
5212 	percpu_ref_kill(&cgrp->self.refcnt);
5213 
5214 	return 0;
5215 };
5216 
cgroup_rmdir(struct kernfs_node * kn)5217 static int cgroup_rmdir(struct kernfs_node *kn)
5218 {
5219 	struct cgroup *cgrp;
5220 	int ret = 0;
5221 
5222 	cgrp = cgroup_kn_lock_live(kn);
5223 	if (!cgrp)
5224 		return 0;
5225 
5226 	ret = cgroup_destroy_locked(cgrp);
5227 
5228 	cgroup_kn_unlock(kn);
5229 	return ret;
5230 }
5231 
5232 static struct kernfs_syscall_ops cgroup_kf_syscall_ops = {
5233 	.remount_fs		= cgroup_remount,
5234 	.show_options		= cgroup_show_options,
5235 	.mkdir			= cgroup_mkdir,
5236 	.rmdir			= cgroup_rmdir,
5237 	.rename			= cgroup_rename,
5238 };
5239 
cgroup_init_subsys(struct cgroup_subsys * ss,bool early)5240 static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early)
5241 {
5242 	struct cgroup_subsys_state *css;
5243 
5244 	printk(KERN_INFO "Initializing cgroup subsys %s\n", ss->name);
5245 
5246 	mutex_lock(&cgroup_mutex);
5247 
5248 	idr_init(&ss->css_idr);
5249 	INIT_LIST_HEAD(&ss->cfts);
5250 
5251 	/* Create the root cgroup state for this subsystem */
5252 	ss->root = &cgrp_dfl_root;
5253 	css = ss->css_alloc(cgroup_css(&cgrp_dfl_root.cgrp, ss));
5254 	/* We don't handle early failures gracefully */
5255 	BUG_ON(IS_ERR(css));
5256 	init_and_link_css(css, ss, &cgrp_dfl_root.cgrp);
5257 
5258 	/*
5259 	 * Root csses are never destroyed and we can't initialize
5260 	 * percpu_ref during early init.  Disable refcnting.
5261 	 */
5262 	css->flags |= CSS_NO_REF;
5263 
5264 	if (early) {
5265 		/* allocation can't be done safely during early init */
5266 		css->id = 1;
5267 	} else {
5268 		css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2, GFP_KERNEL);
5269 		BUG_ON(css->id < 0);
5270 	}
5271 
5272 	/* Update the init_css_set to contain a subsys
5273 	 * pointer to this state - since the subsystem is
5274 	 * newly registered, all tasks and hence the
5275 	 * init_css_set is in the subsystem's root cgroup. */
5276 	init_css_set.subsys[ss->id] = css;
5277 
5278 	have_fork_callback |= (bool)ss->fork << ss->id;
5279 	have_exit_callback |= (bool)ss->exit << ss->id;
5280 	have_free_callback |= (bool)ss->free << ss->id;
5281 	have_canfork_callback |= (bool)ss->can_fork << ss->id;
5282 
5283 	/* At system boot, before all subsystems have been
5284 	 * registered, no tasks have been forked, so we don't
5285 	 * need to invoke fork callbacks here. */
5286 	BUG_ON(!list_empty(&init_task.tasks));
5287 
5288 	BUG_ON(online_css(css));
5289 
5290 	mutex_unlock(&cgroup_mutex);
5291 }
5292 
5293 /**
5294  * cgroup_init_early - cgroup initialization at system boot
5295  *
5296  * Initialize cgroups at system boot, and initialize any
5297  * subsystems that request early init.
5298  */
cgroup_init_early(void)5299 int __init cgroup_init_early(void)
5300 {
5301 	static struct cgroup_sb_opts __initdata opts;
5302 	struct cgroup_subsys *ss;
5303 	int i;
5304 
5305 	init_cgroup_root(&cgrp_dfl_root, &opts);
5306 	cgrp_dfl_root.cgrp.self.flags |= CSS_NO_REF;
5307 
5308 	RCU_INIT_POINTER(init_task.cgroups, &init_css_set);
5309 
5310 	for_each_subsys(ss, i) {
5311 		WARN(!ss->css_alloc || !ss->css_free || ss->name || ss->id,
5312 		     "invalid cgroup_subsys %d:%s css_alloc=%p css_free=%p name:id=%d:%s\n",
5313 		     i, cgroup_subsys_name[i], ss->css_alloc, ss->css_free,
5314 		     ss->id, ss->name);
5315 		WARN(strlen(cgroup_subsys_name[i]) > MAX_CGROUP_TYPE_NAMELEN,
5316 		     "cgroup_subsys_name %s too long\n", cgroup_subsys_name[i]);
5317 
5318 		ss->id = i;
5319 		ss->name = cgroup_subsys_name[i];
5320 		if (!ss->legacy_name)
5321 			ss->legacy_name = cgroup_subsys_name[i];
5322 
5323 		if (ss->early_init)
5324 			cgroup_init_subsys(ss, true);
5325 	}
5326 	return 0;
5327 }
5328 
5329 static unsigned long cgroup_disable_mask __initdata;
5330 
5331 /**
5332  * cgroup_init - cgroup initialization
5333  *
5334  * Register cgroup filesystem and /proc file, and initialize
5335  * any subsystems that didn't request early init.
5336  */
cgroup_init(void)5337 int __init cgroup_init(void)
5338 {
5339 	struct cgroup_subsys *ss;
5340 	unsigned long key;
5341 	int ssid;
5342 
5343 	BUG_ON(percpu_init_rwsem(&cgroup_threadgroup_rwsem));
5344 	BUG_ON(cgroup_init_cftypes(NULL, cgroup_dfl_base_files));
5345 	BUG_ON(cgroup_init_cftypes(NULL, cgroup_legacy_base_files));
5346 
5347 	/*
5348 	 * The latency of the synchronize_sched() is too high for cgroups,
5349 	 * avoid it at the cost of forcing all readers into the slow path.
5350 	 */
5351 	rcu_sync_enter_start(&cgroup_threadgroup_rwsem.rss);
5352 
5353 	mutex_lock(&cgroup_mutex);
5354 
5355 	/* Add init_css_set to the hash table */
5356 	key = css_set_hash(init_css_set.subsys);
5357 	hash_add(css_set_table, &init_css_set.hlist, key);
5358 
5359 	BUG_ON(cgroup_setup_root(&cgrp_dfl_root, 0));
5360 
5361 	mutex_unlock(&cgroup_mutex);
5362 
5363 	for_each_subsys(ss, ssid) {
5364 		if (ss->early_init) {
5365 			struct cgroup_subsys_state *css =
5366 				init_css_set.subsys[ss->id];
5367 
5368 			css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2,
5369 						   GFP_KERNEL);
5370 			BUG_ON(css->id < 0);
5371 		} else {
5372 			cgroup_init_subsys(ss, false);
5373 		}
5374 
5375 		list_add_tail(&init_css_set.e_cset_node[ssid],
5376 			      &cgrp_dfl_root.cgrp.e_csets[ssid]);
5377 
5378 		/*
5379 		 * Setting dfl_root subsys_mask needs to consider the
5380 		 * disabled flag and cftype registration needs kmalloc,
5381 		 * both of which aren't available during early_init.
5382 		 */
5383 		if (cgroup_disable_mask & (1 << ssid)) {
5384 			static_branch_disable(cgroup_subsys_enabled_key[ssid]);
5385 			printk(KERN_INFO "Disabling %s control group subsystem\n",
5386 			       ss->name);
5387 			continue;
5388 		}
5389 
5390 		cgrp_dfl_root.subsys_mask |= 1 << ss->id;
5391 
5392 		if (!ss->dfl_cftypes)
5393 			cgrp_dfl_root_inhibit_ss_mask |= 1 << ss->id;
5394 
5395 		if (ss->dfl_cftypes == ss->legacy_cftypes) {
5396 			WARN_ON(cgroup_add_cftypes(ss, ss->dfl_cftypes));
5397 		} else {
5398 			WARN_ON(cgroup_add_dfl_cftypes(ss, ss->dfl_cftypes));
5399 			WARN_ON(cgroup_add_legacy_cftypes(ss, ss->legacy_cftypes));
5400 		}
5401 
5402 		if (ss->bind)
5403 			ss->bind(init_css_set.subsys[ssid]);
5404 	}
5405 
5406 	WARN_ON(sysfs_create_mount_point(fs_kobj, "cgroup"));
5407 	WARN_ON(register_filesystem(&cgroup_fs_type));
5408 	WARN_ON(!proc_create("cgroups", 0, NULL, &proc_cgroupstats_operations));
5409 
5410 	return 0;
5411 }
5412 
cgroup_wq_init(void)5413 static int __init cgroup_wq_init(void)
5414 {
5415 	/*
5416 	 * There isn't much point in executing destruction path in
5417 	 * parallel.  Good chunk is serialized with cgroup_mutex anyway.
5418 	 * Use 1 for @max_active.
5419 	 *
5420 	 * We would prefer to do this in cgroup_init() above, but that
5421 	 * is called before init_workqueues(): so leave this until after.
5422 	 */
5423 	cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
5424 	BUG_ON(!cgroup_destroy_wq);
5425 
5426 	/*
5427 	 * Used to destroy pidlists and separate to serve as flush domain.
5428 	 * Cap @max_active to 1 too.
5429 	 */
5430 	cgroup_pidlist_destroy_wq = alloc_workqueue("cgroup_pidlist_destroy",
5431 						    0, 1);
5432 	BUG_ON(!cgroup_pidlist_destroy_wq);
5433 
5434 	return 0;
5435 }
5436 core_initcall(cgroup_wq_init);
5437 
5438 /*
5439  * proc_cgroup_show()
5440  *  - Print task's cgroup paths into seq_file, one line for each hierarchy
5441  *  - Used for /proc/<pid>/cgroup.
5442  */
proc_cgroup_show(struct seq_file * m,struct pid_namespace * ns,struct pid * pid,struct task_struct * tsk)5443 int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
5444 		     struct pid *pid, struct task_struct *tsk)
5445 {
5446 	char *buf, *path;
5447 	int retval;
5448 	struct cgroup_root *root;
5449 
5450 	retval = -ENOMEM;
5451 	buf = kmalloc(PATH_MAX, GFP_KERNEL);
5452 	if (!buf)
5453 		goto out;
5454 
5455 	mutex_lock(&cgroup_mutex);
5456 	spin_lock_irq(&css_set_lock);
5457 
5458 	for_each_root(root) {
5459 		struct cgroup_subsys *ss;
5460 		struct cgroup *cgrp;
5461 		int ssid, count = 0;
5462 
5463 		if (root == &cgrp_dfl_root && !cgrp_dfl_root_visible)
5464 			continue;
5465 
5466 		seq_printf(m, "%d:", root->hierarchy_id);
5467 		if (root != &cgrp_dfl_root)
5468 			for_each_subsys(ss, ssid)
5469 				if (root->subsys_mask & (1 << ssid))
5470 					seq_printf(m, "%s%s", count++ ? "," : "",
5471 						   ss->legacy_name);
5472 		if (strlen(root->name))
5473 			seq_printf(m, "%sname=%s", count ? "," : "",
5474 				   root->name);
5475 		seq_putc(m, ':');
5476 
5477 		cgrp = task_cgroup_from_root(tsk, root);
5478 
5479 		/*
5480 		 * On traditional hierarchies, all zombie tasks show up as
5481 		 * belonging to the root cgroup.  On the default hierarchy,
5482 		 * while a zombie doesn't show up in "cgroup.procs" and
5483 		 * thus can't be migrated, its /proc/PID/cgroup keeps
5484 		 * reporting the cgroup it belonged to before exiting.  If
5485 		 * the cgroup is removed before the zombie is reaped,
5486 		 * " (deleted)" is appended to the cgroup path.
5487 		 */
5488 		if (cgroup_on_dfl(cgrp) || !(tsk->flags & PF_EXITING)) {
5489 			path = cgroup_path(cgrp, buf, PATH_MAX);
5490 			if (!path) {
5491 				retval = -ENAMETOOLONG;
5492 				goto out_unlock;
5493 			}
5494 		} else {
5495 			path = "/";
5496 		}
5497 
5498 		seq_puts(m, path);
5499 
5500 		if (cgroup_on_dfl(cgrp) && cgroup_is_dead(cgrp))
5501 			seq_puts(m, " (deleted)\n");
5502 		else
5503 			seq_putc(m, '\n');
5504 	}
5505 
5506 	retval = 0;
5507 out_unlock:
5508 	spin_unlock_irq(&css_set_lock);
5509 	mutex_unlock(&cgroup_mutex);
5510 	kfree(buf);
5511 out:
5512 	return retval;
5513 }
5514 
5515 /* Display information about each subsystem and each hierarchy */
proc_cgroupstats_show(struct seq_file * m,void * v)5516 static int proc_cgroupstats_show(struct seq_file *m, void *v)
5517 {
5518 	struct cgroup_subsys *ss;
5519 	int i;
5520 
5521 	seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
5522 	/*
5523 	 * ideally we don't want subsystems moving around while we do this.
5524 	 * cgroup_mutex is also necessary to guarantee an atomic snapshot of
5525 	 * subsys/hierarchy state.
5526 	 */
5527 	mutex_lock(&cgroup_mutex);
5528 
5529 	for_each_subsys(ss, i)
5530 		seq_printf(m, "%s\t%d\t%d\t%d\n",
5531 			   ss->legacy_name, ss->root->hierarchy_id,
5532 			   atomic_read(&ss->root->nr_cgrps),
5533 			   cgroup_ssid_enabled(i));
5534 
5535 	mutex_unlock(&cgroup_mutex);
5536 	return 0;
5537 }
5538 
cgroupstats_open(struct inode * inode,struct file * file)5539 static int cgroupstats_open(struct inode *inode, struct file *file)
5540 {
5541 	return single_open(file, proc_cgroupstats_show, NULL);
5542 }
5543 
5544 static const struct file_operations proc_cgroupstats_operations = {
5545 	.open = cgroupstats_open,
5546 	.read = seq_read,
5547 	.llseek = seq_lseek,
5548 	.release = single_release,
5549 };
5550 
subsys_canfork_priv_p(void * ss_priv[CGROUP_CANFORK_COUNT],int i)5551 static void **subsys_canfork_priv_p(void *ss_priv[CGROUP_CANFORK_COUNT], int i)
5552 {
5553 	if (CGROUP_CANFORK_START <= i && i < CGROUP_CANFORK_END)
5554 		return &ss_priv[i - CGROUP_CANFORK_START];
5555 	return NULL;
5556 }
5557 
subsys_canfork_priv(void * ss_priv[CGROUP_CANFORK_COUNT],int i)5558 static void *subsys_canfork_priv(void *ss_priv[CGROUP_CANFORK_COUNT], int i)
5559 {
5560 	void **private = subsys_canfork_priv_p(ss_priv, i);
5561 	return private ? *private : NULL;
5562 }
5563 
5564 /**
5565  * cgroup_fork - initialize cgroup related fields during copy_process()
5566  * @child: pointer to task_struct of forking parent process.
5567  *
5568  * A task is associated with the init_css_set until cgroup_post_fork()
5569  * attaches it to the parent's css_set.  Empty cg_list indicates that
5570  * @child isn't holding reference to its css_set.
5571  */
cgroup_fork(struct task_struct * child)5572 void cgroup_fork(struct task_struct *child)
5573 {
5574 	RCU_INIT_POINTER(child->cgroups, &init_css_set);
5575 	INIT_LIST_HEAD(&child->cg_list);
5576 }
5577 
5578 /**
5579  * cgroup_can_fork - called on a new task before the process is exposed
5580  * @child: the task in question.
5581  *
5582  * This calls the subsystem can_fork() callbacks. If the can_fork() callback
5583  * returns an error, the fork aborts with that error code. This allows for
5584  * a cgroup subsystem to conditionally allow or deny new forks.
5585  */
cgroup_can_fork(struct task_struct * child,void * ss_priv[CGROUP_CANFORK_COUNT])5586 int cgroup_can_fork(struct task_struct *child,
5587 		    void *ss_priv[CGROUP_CANFORK_COUNT])
5588 {
5589 	struct cgroup_subsys *ss;
5590 	int i, j, ret;
5591 
5592 	for_each_subsys_which(ss, i, &have_canfork_callback) {
5593 		ret = ss->can_fork(child, subsys_canfork_priv_p(ss_priv, i));
5594 		if (ret)
5595 			goto out_revert;
5596 	}
5597 
5598 	return 0;
5599 
5600 out_revert:
5601 	for_each_subsys(ss, j) {
5602 		if (j >= i)
5603 			break;
5604 		if (ss->cancel_fork)
5605 			ss->cancel_fork(child, subsys_canfork_priv(ss_priv, j));
5606 	}
5607 
5608 	return ret;
5609 }
5610 
5611 /**
5612  * cgroup_cancel_fork - called if a fork failed after cgroup_can_fork()
5613  * @child: the task in question
5614  *
5615  * This calls the cancel_fork() callbacks if a fork failed *after*
5616  * cgroup_can_fork() succeded.
5617  */
cgroup_cancel_fork(struct task_struct * child,void * ss_priv[CGROUP_CANFORK_COUNT])5618 void cgroup_cancel_fork(struct task_struct *child,
5619 			void *ss_priv[CGROUP_CANFORK_COUNT])
5620 {
5621 	struct cgroup_subsys *ss;
5622 	int i;
5623 
5624 	for_each_subsys(ss, i)
5625 		if (ss->cancel_fork)
5626 			ss->cancel_fork(child, subsys_canfork_priv(ss_priv, i));
5627 }
5628 
5629 /**
5630  * cgroup_post_fork - called on a new task after adding it to the task list
5631  * @child: the task in question
5632  *
5633  * Adds the task to the list running through its css_set if necessary and
5634  * call the subsystem fork() callbacks.  Has to be after the task is
5635  * visible on the task list in case we race with the first call to
5636  * cgroup_task_iter_start() - to guarantee that the new task ends up on its
5637  * list.
5638  */
cgroup_post_fork(struct task_struct * child,void * old_ss_priv[CGROUP_CANFORK_COUNT])5639 void cgroup_post_fork(struct task_struct *child,
5640 		      void *old_ss_priv[CGROUP_CANFORK_COUNT])
5641 {
5642 	struct cgroup_subsys *ss;
5643 	int i;
5644 
5645 	/*
5646 	 * This may race against cgroup_enable_task_cg_lists().  As that
5647 	 * function sets use_task_css_set_links before grabbing
5648 	 * tasklist_lock and we just went through tasklist_lock to add
5649 	 * @child, it's guaranteed that either we see the set
5650 	 * use_task_css_set_links or cgroup_enable_task_cg_lists() sees
5651 	 * @child during its iteration.
5652 	 *
5653 	 * If we won the race, @child is associated with %current's
5654 	 * css_set.  Grabbing css_set_lock guarantees both that the
5655 	 * association is stable, and, on completion of the parent's
5656 	 * migration, @child is visible in the source of migration or
5657 	 * already in the destination cgroup.  This guarantee is necessary
5658 	 * when implementing operations which need to migrate all tasks of
5659 	 * a cgroup to another.
5660 	 *
5661 	 * Note that if we lose to cgroup_enable_task_cg_lists(), @child
5662 	 * will remain in init_css_set.  This is safe because all tasks are
5663 	 * in the init_css_set before cg_links is enabled and there's no
5664 	 * operation which transfers all tasks out of init_css_set.
5665 	 */
5666 	if (use_task_css_set_links) {
5667 		struct css_set *cset;
5668 
5669 		spin_lock_irq(&css_set_lock);
5670 		cset = task_css_set(current);
5671 		if (list_empty(&child->cg_list)) {
5672 			get_css_set(cset);
5673 			css_set_move_task(child, NULL, cset, false);
5674 		}
5675 		spin_unlock_irq(&css_set_lock);
5676 	}
5677 
5678 	/*
5679 	 * Call ss->fork().  This must happen after @child is linked on
5680 	 * css_set; otherwise, @child might change state between ->fork()
5681 	 * and addition to css_set.
5682 	 */
5683 	for_each_subsys_which(ss, i, &have_fork_callback)
5684 		ss->fork(child, subsys_canfork_priv(old_ss_priv, i));
5685 }
5686 
5687 /**
5688  * cgroup_exit - detach cgroup from exiting task
5689  * @tsk: pointer to task_struct of exiting process
5690  *
5691  * Description: Detach cgroup from @tsk and release it.
5692  *
5693  * Note that cgroups marked notify_on_release force every task in
5694  * them to take the global cgroup_mutex mutex when exiting.
5695  * This could impact scaling on very large systems.  Be reluctant to
5696  * use notify_on_release cgroups where very high task exit scaling
5697  * is required on large systems.
5698  *
5699  * We set the exiting tasks cgroup to the root cgroup (top_cgroup).  We
5700  * call cgroup_exit() while the task is still competent to handle
5701  * notify_on_release(), then leave the task attached to the root cgroup in
5702  * each hierarchy for the remainder of its exit.  No need to bother with
5703  * init_css_set refcnting.  init_css_set never goes away and we can't race
5704  * with migration path - PF_EXITING is visible to migration path.
5705  */
cgroup_exit(struct task_struct * tsk)5706 void cgroup_exit(struct task_struct *tsk)
5707 {
5708 	struct cgroup_subsys *ss;
5709 	struct css_set *cset;
5710 	int i;
5711 
5712 	/*
5713 	 * Unlink from @tsk from its css_set.  As migration path can't race
5714 	 * with us, we can check css_set and cg_list without synchronization.
5715 	 */
5716 	cset = task_css_set(tsk);
5717 
5718 	if (!list_empty(&tsk->cg_list)) {
5719 		spin_lock_irq(&css_set_lock);
5720 		css_set_move_task(tsk, cset, NULL, false);
5721 		spin_unlock_irq(&css_set_lock);
5722 	} else {
5723 		get_css_set(cset);
5724 	}
5725 
5726 	/* see cgroup_post_fork() for details */
5727 	for_each_subsys_which(ss, i, &have_exit_callback)
5728 		ss->exit(tsk);
5729 }
5730 
cgroup_free(struct task_struct * task)5731 void cgroup_free(struct task_struct *task)
5732 {
5733 	struct css_set *cset = task_css_set(task);
5734 	struct cgroup_subsys *ss;
5735 	int ssid;
5736 
5737 	for_each_subsys_which(ss, ssid, &have_free_callback)
5738 		ss->free(task);
5739 
5740 	put_css_set(cset);
5741 }
5742 
check_for_release(struct cgroup * cgrp)5743 static void check_for_release(struct cgroup *cgrp)
5744 {
5745 	if (notify_on_release(cgrp) && !cgroup_is_populated(cgrp) &&
5746 	    !css_has_online_children(&cgrp->self) && !cgroup_is_dead(cgrp))
5747 		schedule_work(&cgrp->release_agent_work);
5748 }
5749 
5750 /*
5751  * Notify userspace when a cgroup is released, by running the
5752  * configured release agent with the name of the cgroup (path
5753  * relative to the root of cgroup file system) as the argument.
5754  *
5755  * Most likely, this user command will try to rmdir this cgroup.
5756  *
5757  * This races with the possibility that some other task will be
5758  * attached to this cgroup before it is removed, or that some other
5759  * user task will 'mkdir' a child cgroup of this cgroup.  That's ok.
5760  * The presumed 'rmdir' will fail quietly if this cgroup is no longer
5761  * unused, and this cgroup will be reprieved from its death sentence,
5762  * to continue to serve a useful existence.  Next time it's released,
5763  * we will get notified again, if it still has 'notify_on_release' set.
5764  *
5765  * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
5766  * means only wait until the task is successfully execve()'d.  The
5767  * separate release agent task is forked by call_usermodehelper(),
5768  * then control in this thread returns here, without waiting for the
5769  * release agent task.  We don't bother to wait because the caller of
5770  * this routine has no use for the exit status of the release agent
5771  * task, so no sense holding our caller up for that.
5772  */
cgroup_release_agent(struct work_struct * work)5773 static void cgroup_release_agent(struct work_struct *work)
5774 {
5775 	struct cgroup *cgrp =
5776 		container_of(work, struct cgroup, release_agent_work);
5777 	char *pathbuf = NULL, *agentbuf = NULL, *path;
5778 	char *argv[3], *envp[3];
5779 
5780 	mutex_lock(&cgroup_mutex);
5781 
5782 	pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
5783 	agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
5784 	if (!pathbuf || !agentbuf)
5785 		goto out;
5786 
5787 	path = cgroup_path(cgrp, pathbuf, PATH_MAX);
5788 	if (!path)
5789 		goto out;
5790 
5791 	argv[0] = agentbuf;
5792 	argv[1] = path;
5793 	argv[2] = NULL;
5794 
5795 	/* minimal command environment */
5796 	envp[0] = "HOME=/";
5797 	envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
5798 	envp[2] = NULL;
5799 
5800 	mutex_unlock(&cgroup_mutex);
5801 	call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
5802 	goto out_free;
5803 out:
5804 	mutex_unlock(&cgroup_mutex);
5805 out_free:
5806 	kfree(agentbuf);
5807 	kfree(pathbuf);
5808 }
5809 
cgroup_disable(char * str)5810 static int __init cgroup_disable(char *str)
5811 {
5812 	struct cgroup_subsys *ss;
5813 	char *token;
5814 	int i;
5815 
5816 	while ((token = strsep(&str, ",")) != NULL) {
5817 		if (!*token)
5818 			continue;
5819 
5820 		for_each_subsys(ss, i) {
5821 			if (strcmp(token, ss->name) &&
5822 			    strcmp(token, ss->legacy_name))
5823 				continue;
5824 			cgroup_disable_mask |= 1 << i;
5825 		}
5826 	}
5827 	return 1;
5828 }
5829 __setup("cgroup_disable=", cgroup_disable);
5830 
5831 /**
5832  * css_tryget_online_from_dir - get corresponding css from a cgroup dentry
5833  * @dentry: directory dentry of interest
5834  * @ss: subsystem of interest
5835  *
5836  * If @dentry is a directory for a cgroup which has @ss enabled on it, try
5837  * to get the corresponding css and return it.  If such css doesn't exist
5838  * or can't be pinned, an ERR_PTR value is returned.
5839  */
css_tryget_online_from_dir(struct dentry * dentry,struct cgroup_subsys * ss)5840 struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
5841 						       struct cgroup_subsys *ss)
5842 {
5843 	struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
5844 	struct cgroup_subsys_state *css = NULL;
5845 	struct cgroup *cgrp;
5846 
5847 	/* is @dentry a cgroup dir? */
5848 	if (dentry->d_sb->s_type != &cgroup_fs_type || !kn ||
5849 	    kernfs_type(kn) != KERNFS_DIR)
5850 		return ERR_PTR(-EBADF);
5851 
5852 	rcu_read_lock();
5853 
5854 	/*
5855 	 * This path doesn't originate from kernfs and @kn could already
5856 	 * have been or be removed at any point.  @kn->priv is RCU
5857 	 * protected for this access.  See css_release_work_fn() for details.
5858 	 */
5859 	cgrp = rcu_dereference(kn->priv);
5860 	if (cgrp)
5861 		css = cgroup_css(cgrp, ss);
5862 
5863 	if (!css || !css_tryget_online(css))
5864 		css = ERR_PTR(-ENOENT);
5865 
5866 	rcu_read_unlock();
5867 	return css;
5868 }
5869 
5870 /**
5871  * css_from_id - lookup css by id
5872  * @id: the cgroup id
5873  * @ss: cgroup subsys to be looked into
5874  *
5875  * Returns the css if there's valid one with @id, otherwise returns NULL.
5876  * Should be called under rcu_read_lock().
5877  */
css_from_id(int id,struct cgroup_subsys * ss)5878 struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss)
5879 {
5880 	WARN_ON_ONCE(!rcu_read_lock_held());
5881 	return id > 0 ? idr_find(&ss->css_idr, id) : NULL;
5882 }
5883 
5884 #ifdef CONFIG_CGROUP_DEBUG
5885 static struct cgroup_subsys_state *
debug_css_alloc(struct cgroup_subsys_state * parent_css)5886 debug_css_alloc(struct cgroup_subsys_state *parent_css)
5887 {
5888 	struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);
5889 
5890 	if (!css)
5891 		return ERR_PTR(-ENOMEM);
5892 
5893 	return css;
5894 }
5895 
debug_css_free(struct cgroup_subsys_state * css)5896 static void debug_css_free(struct cgroup_subsys_state *css)
5897 {
5898 	kfree(css);
5899 }
5900 
debug_taskcount_read(struct cgroup_subsys_state * css,struct cftype * cft)5901 static u64 debug_taskcount_read(struct cgroup_subsys_state *css,
5902 				struct cftype *cft)
5903 {
5904 	return cgroup_task_count(css->cgroup);
5905 }
5906 
current_css_set_read(struct cgroup_subsys_state * css,struct cftype * cft)5907 static u64 current_css_set_read(struct cgroup_subsys_state *css,
5908 				struct cftype *cft)
5909 {
5910 	return (u64)(unsigned long)current->cgroups;
5911 }
5912 
current_css_set_refcount_read(struct cgroup_subsys_state * css,struct cftype * cft)5913 static u64 current_css_set_refcount_read(struct cgroup_subsys_state *css,
5914 					 struct cftype *cft)
5915 {
5916 	u64 count;
5917 
5918 	rcu_read_lock();
5919 	count = atomic_read(&task_css_set(current)->refcount);
5920 	rcu_read_unlock();
5921 	return count;
5922 }
5923 
current_css_set_cg_links_read(struct seq_file * seq,void * v)5924 static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
5925 {
5926 	struct cgrp_cset_link *link;
5927 	struct css_set *cset;
5928 	char *name_buf;
5929 
5930 	name_buf = kmalloc(NAME_MAX + 1, GFP_KERNEL);
5931 	if (!name_buf)
5932 		return -ENOMEM;
5933 
5934 	spin_lock_irq(&css_set_lock);
5935 	rcu_read_lock();
5936 	cset = rcu_dereference(current->cgroups);
5937 	list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
5938 		struct cgroup *c = link->cgrp;
5939 
5940 		cgroup_name(c, name_buf, NAME_MAX + 1);
5941 		seq_printf(seq, "Root %d group %s\n",
5942 			   c->root->hierarchy_id, name_buf);
5943 	}
5944 	rcu_read_unlock();
5945 	spin_unlock_irq(&css_set_lock);
5946 	kfree(name_buf);
5947 	return 0;
5948 }
5949 
5950 #define MAX_TASKS_SHOWN_PER_CSS 25
cgroup_css_links_read(struct seq_file * seq,void * v)5951 static int cgroup_css_links_read(struct seq_file *seq, void *v)
5952 {
5953 	struct cgroup_subsys_state *css = seq_css(seq);
5954 	struct cgrp_cset_link *link;
5955 
5956 	spin_lock_irq(&css_set_lock);
5957 	list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
5958 		struct css_set *cset = link->cset;
5959 		struct task_struct *task;
5960 		int count = 0;
5961 
5962 		seq_printf(seq, "css_set %pK\n", cset);
5963 
5964 		list_for_each_entry(task, &cset->tasks, cg_list) {
5965 			if (count++ > MAX_TASKS_SHOWN_PER_CSS)
5966 				goto overflow;
5967 			seq_printf(seq, "  task %d\n", task_pid_vnr(task));
5968 		}
5969 
5970 		list_for_each_entry(task, &cset->mg_tasks, cg_list) {
5971 			if (count++ > MAX_TASKS_SHOWN_PER_CSS)
5972 				goto overflow;
5973 			seq_printf(seq, "  task %d\n", task_pid_vnr(task));
5974 		}
5975 		continue;
5976 	overflow:
5977 		seq_puts(seq, "  ...\n");
5978 	}
5979 	spin_unlock_irq(&css_set_lock);
5980 	return 0;
5981 }
5982 
releasable_read(struct cgroup_subsys_state * css,struct cftype * cft)5983 static u64 releasable_read(struct cgroup_subsys_state *css, struct cftype *cft)
5984 {
5985 	return (!cgroup_is_populated(css->cgroup) &&
5986 		!css_has_online_children(&css->cgroup->self));
5987 }
5988 
5989 static struct cftype debug_files[] =  {
5990 	{
5991 		.name = "taskcount",
5992 		.read_u64 = debug_taskcount_read,
5993 	},
5994 
5995 	{
5996 		.name = "current_css_set",
5997 		.read_u64 = current_css_set_read,
5998 	},
5999 
6000 	{
6001 		.name = "current_css_set_refcount",
6002 		.read_u64 = current_css_set_refcount_read,
6003 	},
6004 
6005 	{
6006 		.name = "current_css_set_cg_links",
6007 		.seq_show = current_css_set_cg_links_read,
6008 	},
6009 
6010 	{
6011 		.name = "cgroup_css_links",
6012 		.seq_show = cgroup_css_links_read,
6013 	},
6014 
6015 	{
6016 		.name = "releasable",
6017 		.read_u64 = releasable_read,
6018 	},
6019 
6020 	{ }	/* terminate */
6021 };
6022 
6023 struct cgroup_subsys debug_cgrp_subsys = {
6024 	.css_alloc = debug_css_alloc,
6025 	.css_free = debug_css_free,
6026 	.legacy_cftypes = debug_files,
6027 };
6028 #endif /* CONFIG_CGROUP_DEBUG */
6029