1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_CGROUP_H
3 #define _LINUX_CGROUP_H
4 /*
5 * cgroup interface
6 *
7 * Copyright (C) 2003 BULL SA
8 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
9 *
10 */
11
12 #include <linux/sched.h>
13 #include <linux/cpumask.h>
14 #include <linux/nodemask.h>
15 #include <linux/rculist.h>
16 #include <linux/cgroupstats.h>
17 #include <linux/fs.h>
18 #include <linux/seq_file.h>
19 #include <linux/kernfs.h>
20 #include <linux/jump_label.h>
21 #include <linux/types.h>
22 #include <linux/ns_common.h>
23 #include <linux/nsproxy.h>
24 #include <linux/user_namespace.h>
25 #include <linux/refcount.h>
26 #include <linux/kernel_stat.h>
27
28 #include <linux/cgroup-defs.h>
29
30 #ifdef CONFIG_CGROUPS
31
32 /*
33 * All weight knobs on the default hierarhcy should use the following min,
34 * default and max values. The default value is the logarithmic center of
35 * MIN and MAX and allows 100x to be expressed in both directions.
36 */
37 #define CGROUP_WEIGHT_MIN 1
38 #define CGROUP_WEIGHT_DFL 100
39 #define CGROUP_WEIGHT_MAX 10000
40
41 /* walk only threadgroup leaders */
42 #define CSS_TASK_ITER_PROCS (1U << 0)
43 /* walk all threaded css_sets in the domain */
44 #define CSS_TASK_ITER_THREADED (1U << 1)
45
46 /* internal flags */
47 #define CSS_TASK_ITER_SKIPPED (1U << 16)
48
49 /* a css_task_iter should be treated as an opaque object */
50 struct css_task_iter {
51 struct cgroup_subsys *ss;
52 unsigned int flags;
53
54 struct list_head *cset_pos;
55 struct list_head *cset_head;
56
57 struct list_head *tcset_pos;
58 struct list_head *tcset_head;
59
60 struct list_head *task_pos;
61 struct list_head *tasks_head;
62 struct list_head *mg_tasks_head;
63 struct list_head *dying_tasks_head;
64
65 struct list_head *cur_tasks_head;
66 struct css_set *cur_cset;
67 struct css_set *cur_dcset;
68 struct task_struct *cur_task;
69 struct list_head iters_node; /* css_set->task_iters */
70 };
71
72 extern struct cgroup_root cgrp_dfl_root;
73 extern struct css_set init_css_set;
74
75 #define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
76 #include <linux/cgroup_subsys.h>
77 #undef SUBSYS
78
79 #define SUBSYS(_x) \
80 extern struct static_key_true _x ## _cgrp_subsys_enabled_key; \
81 extern struct static_key_true _x ## _cgrp_subsys_on_dfl_key;
82 #include <linux/cgroup_subsys.h>
83 #undef SUBSYS
84
85 /**
86 * cgroup_subsys_enabled - fast test on whether a subsys is enabled
87 * @ss: subsystem in question
88 */
89 #define cgroup_subsys_enabled(ss) \
90 static_branch_likely(&ss ## _enabled_key)
91
92 /**
93 * cgroup_subsys_on_dfl - fast test on whether a subsys is on default hierarchy
94 * @ss: subsystem in question
95 */
96 #define cgroup_subsys_on_dfl(ss) \
97 static_branch_likely(&ss ## _on_dfl_key)
98
99 bool css_has_online_children(struct cgroup_subsys_state *css);
100 struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
101 struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
102 struct cgroup_subsys *ss);
103 struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
104 struct cgroup_subsys *ss);
105
106 struct cgroup *cgroup_get_from_path(const char *path);
107 struct cgroup *cgroup_get_from_fd(int fd);
108
109 int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
110 int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
111
112 int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
113 int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
114 int cgroup_rm_cftypes(struct cftype *cfts);
115 void cgroup_file_notify(struct cgroup_file *cfile);
116
117 int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
118 int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry);
119 int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
120 struct pid *pid, struct task_struct *tsk);
121
122 void cgroup_fork(struct task_struct *p);
123 extern int cgroup_can_fork(struct task_struct *p);
124 extern void cgroup_cancel_fork(struct task_struct *p);
125 extern void cgroup_post_fork(struct task_struct *p);
126 void cgroup_exit(struct task_struct *p);
127 void cgroup_release(struct task_struct *p);
128 void cgroup_free(struct task_struct *p);
129
130 int cgroup_init_early(void);
131 int cgroup_init(void);
132
133 /*
134 * Iteration helpers and macros.
135 */
136
137 struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
138 struct cgroup_subsys_state *parent);
139 struct cgroup_subsys_state *css_next_descendant_pre(struct cgroup_subsys_state *pos,
140 struct cgroup_subsys_state *css);
141 struct cgroup_subsys_state *css_rightmost_descendant(struct cgroup_subsys_state *pos);
142 struct cgroup_subsys_state *css_next_descendant_post(struct cgroup_subsys_state *pos,
143 struct cgroup_subsys_state *css);
144
145 struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
146 struct cgroup_subsys_state **dst_cssp);
147 struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
148 struct cgroup_subsys_state **dst_cssp);
149
150 void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags,
151 struct css_task_iter *it);
152 struct task_struct *css_task_iter_next(struct css_task_iter *it);
153 void css_task_iter_end(struct css_task_iter *it);
154
155 /**
156 * css_for_each_child - iterate through children of a css
157 * @pos: the css * to use as the loop cursor
158 * @parent: css whose children to walk
159 *
160 * Walk @parent's children. Must be called under rcu_read_lock().
161 *
162 * If a subsystem synchronizes ->css_online() and the start of iteration, a
163 * css which finished ->css_online() is guaranteed to be visible in the
164 * future iterations and will stay visible until the last reference is put.
165 * A css which hasn't finished ->css_online() or already finished
166 * ->css_offline() may show up during traversal. It's each subsystem's
167 * responsibility to synchronize against on/offlining.
168 *
169 * It is allowed to temporarily drop RCU read lock during iteration. The
170 * caller is responsible for ensuring that @pos remains accessible until
171 * the start of the next iteration by, for example, bumping the css refcnt.
172 */
173 #define css_for_each_child(pos, parent) \
174 for ((pos) = css_next_child(NULL, (parent)); (pos); \
175 (pos) = css_next_child((pos), (parent)))
176
177 /**
178 * css_for_each_descendant_pre - pre-order walk of a css's descendants
179 * @pos: the css * to use as the loop cursor
180 * @root: css whose descendants to walk
181 *
182 * Walk @root's descendants. @root is included in the iteration and the
183 * first node to be visited. Must be called under rcu_read_lock().
184 *
185 * If a subsystem synchronizes ->css_online() and the start of iteration, a
186 * css which finished ->css_online() is guaranteed to be visible in the
187 * future iterations and will stay visible until the last reference is put.
188 * A css which hasn't finished ->css_online() or already finished
189 * ->css_offline() may show up during traversal. It's each subsystem's
190 * responsibility to synchronize against on/offlining.
191 *
192 * For example, the following guarantees that a descendant can't escape
193 * state updates of its ancestors.
194 *
195 * my_online(@css)
196 * {
197 * Lock @css's parent and @css;
198 * Inherit state from the parent;
199 * Unlock both.
200 * }
201 *
202 * my_update_state(@css)
203 * {
204 * css_for_each_descendant_pre(@pos, @css) {
205 * Lock @pos;
206 * if (@pos == @css)
207 * Update @css's state;
208 * else
209 * Verify @pos is alive and inherit state from its parent;
210 * Unlock @pos;
211 * }
212 * }
213 *
214 * As long as the inheriting step, including checking the parent state, is
215 * enclosed inside @pos locking, double-locking the parent isn't necessary
216 * while inheriting. The state update to the parent is guaranteed to be
217 * visible by walking order and, as long as inheriting operations to the
218 * same @pos are atomic to each other, multiple updates racing each other
219 * still result in the correct state. It's guaranateed that at least one
220 * inheritance happens for any css after the latest update to its parent.
221 *
222 * If checking parent's state requires locking the parent, each inheriting
223 * iteration should lock and unlock both @pos->parent and @pos.
224 *
225 * Alternatively, a subsystem may choose to use a single global lock to
226 * synchronize ->css_online() and ->css_offline() against tree-walking
227 * operations.
228 *
229 * It is allowed to temporarily drop RCU read lock during iteration. The
230 * caller is responsible for ensuring that @pos remains accessible until
231 * the start of the next iteration by, for example, bumping the css refcnt.
232 */
233 #define css_for_each_descendant_pre(pos, css) \
234 for ((pos) = css_next_descendant_pre(NULL, (css)); (pos); \
235 (pos) = css_next_descendant_pre((pos), (css)))
236
237 /**
238 * css_for_each_descendant_post - post-order walk of a css's descendants
239 * @pos: the css * to use as the loop cursor
240 * @css: css whose descendants to walk
241 *
242 * Similar to css_for_each_descendant_pre() but performs post-order
243 * traversal instead. @root is included in the iteration and the last
244 * node to be visited.
245 *
246 * If a subsystem synchronizes ->css_online() and the start of iteration, a
247 * css which finished ->css_online() is guaranteed to be visible in the
248 * future iterations and will stay visible until the last reference is put.
249 * A css which hasn't finished ->css_online() or already finished
250 * ->css_offline() may show up during traversal. It's each subsystem's
251 * responsibility to synchronize against on/offlining.
252 *
253 * Note that the walk visibility guarantee example described in pre-order
254 * walk doesn't apply the same to post-order walks.
255 */
256 #define css_for_each_descendant_post(pos, css) \
257 for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \
258 (pos) = css_next_descendant_post((pos), (css)))
259
260 /**
261 * cgroup_taskset_for_each - iterate cgroup_taskset
262 * @task: the loop cursor
263 * @dst_css: the destination css
264 * @tset: taskset to iterate
265 *
266 * @tset may contain multiple tasks and they may belong to multiple
267 * processes.
268 *
269 * On the v2 hierarchy, there may be tasks from multiple processes and they
270 * may not share the source or destination csses.
271 *
272 * On traditional hierarchies, when there are multiple tasks in @tset, if a
273 * task of a process is in @tset, all tasks of the process are in @tset.
274 * Also, all are guaranteed to share the same source and destination csses.
275 *
276 * Iteration is not in any specific order.
277 */
278 #define cgroup_taskset_for_each(task, dst_css, tset) \
279 for ((task) = cgroup_taskset_first((tset), &(dst_css)); \
280 (task); \
281 (task) = cgroup_taskset_next((tset), &(dst_css)))
282
283 /**
284 * cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset
285 * @leader: the loop cursor
286 * @dst_css: the destination css
287 * @tset: taskset to iterate
288 *
289 * Iterate threadgroup leaders of @tset. For single-task migrations, @tset
290 * may not contain any.
291 */
292 #define cgroup_taskset_for_each_leader(leader, dst_css, tset) \
293 for ((leader) = cgroup_taskset_first((tset), &(dst_css)); \
294 (leader); \
295 (leader) = cgroup_taskset_next((tset), &(dst_css))) \
296 if ((leader) != (leader)->group_leader) \
297 ; \
298 else
299
300 /*
301 * Inline functions.
302 */
303
304 /**
305 * css_get - obtain a reference on the specified css
306 * @css: target css
307 *
308 * The caller must already have a reference.
309 */
css_get(struct cgroup_subsys_state * css)310 static inline void css_get(struct cgroup_subsys_state *css)
311 {
312 if (!(css->flags & CSS_NO_REF))
313 percpu_ref_get(&css->refcnt);
314 }
315
316 /**
317 * css_get_many - obtain references on the specified css
318 * @css: target css
319 * @n: number of references to get
320 *
321 * The caller must already have a reference.
322 */
css_get_many(struct cgroup_subsys_state * css,unsigned int n)323 static inline void css_get_many(struct cgroup_subsys_state *css, unsigned int n)
324 {
325 if (!(css->flags & CSS_NO_REF))
326 percpu_ref_get_many(&css->refcnt, n);
327 }
328
329 /**
330 * css_tryget - try to obtain a reference on the specified css
331 * @css: target css
332 *
333 * Obtain a reference on @css unless it already has reached zero and is
334 * being released. This function doesn't care whether @css is on or
335 * offline. The caller naturally needs to ensure that @css is accessible
336 * but doesn't have to be holding a reference on it - IOW, RCU protected
337 * access is good enough for this function. Returns %true if a reference
338 * count was successfully obtained; %false otherwise.
339 */
css_tryget(struct cgroup_subsys_state * css)340 static inline bool css_tryget(struct cgroup_subsys_state *css)
341 {
342 if (!(css->flags & CSS_NO_REF))
343 return percpu_ref_tryget(&css->refcnt);
344 return true;
345 }
346
347 /**
348 * css_tryget_online - try to obtain a reference on the specified css if online
349 * @css: target css
350 *
351 * Obtain a reference on @css if it's online. The caller naturally needs
352 * to ensure that @css is accessible but doesn't have to be holding a
353 * reference on it - IOW, RCU protected access is good enough for this
354 * function. Returns %true if a reference count was successfully obtained;
355 * %false otherwise.
356 */
css_tryget_online(struct cgroup_subsys_state * css)357 static inline bool css_tryget_online(struct cgroup_subsys_state *css)
358 {
359 if (!(css->flags & CSS_NO_REF))
360 return percpu_ref_tryget_live(&css->refcnt);
361 return true;
362 }
363
364 /**
365 * css_is_dying - test whether the specified css is dying
366 * @css: target css
367 *
368 * Test whether @css is in the process of offlining or already offline. In
369 * most cases, ->css_online() and ->css_offline() callbacks should be
370 * enough; however, the actual offline operations are RCU delayed and this
371 * test returns %true also when @css is scheduled to be offlined.
372 *
373 * This is useful, for example, when the use case requires synchronous
374 * behavior with respect to cgroup removal. cgroup removal schedules css
375 * offlining but the css can seem alive while the operation is being
376 * delayed. If the delay affects user visible semantics, this test can be
377 * used to resolve the situation.
378 */
css_is_dying(struct cgroup_subsys_state * css)379 static inline bool css_is_dying(struct cgroup_subsys_state *css)
380 {
381 return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt);
382 }
383
384 /**
385 * css_put - put a css reference
386 * @css: target css
387 *
388 * Put a reference obtained via css_get() and css_tryget_online().
389 */
css_put(struct cgroup_subsys_state * css)390 static inline void css_put(struct cgroup_subsys_state *css)
391 {
392 if (!(css->flags & CSS_NO_REF))
393 percpu_ref_put(&css->refcnt);
394 }
395
396 /**
397 * css_put_many - put css references
398 * @css: target css
399 * @n: number of references to put
400 *
401 * Put references obtained via css_get() and css_tryget_online().
402 */
css_put_many(struct cgroup_subsys_state * css,unsigned int n)403 static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n)
404 {
405 if (!(css->flags & CSS_NO_REF))
406 percpu_ref_put_many(&css->refcnt, n);
407 }
408
cgroup_get(struct cgroup * cgrp)409 static inline void cgroup_get(struct cgroup *cgrp)
410 {
411 css_get(&cgrp->self);
412 }
413
cgroup_tryget(struct cgroup * cgrp)414 static inline bool cgroup_tryget(struct cgroup *cgrp)
415 {
416 return css_tryget(&cgrp->self);
417 }
418
cgroup_put(struct cgroup * cgrp)419 static inline void cgroup_put(struct cgroup *cgrp)
420 {
421 css_put(&cgrp->self);
422 }
423
424 /**
425 * task_css_set_check - obtain a task's css_set with extra access conditions
426 * @task: the task to obtain css_set for
427 * @__c: extra condition expression to be passed to rcu_dereference_check()
428 *
429 * A task's css_set is RCU protected, initialized and exited while holding
430 * task_lock(), and can only be modified while holding both cgroup_mutex
431 * and task_lock() while the task is alive. This macro verifies that the
432 * caller is inside proper critical section and returns @task's css_set.
433 *
434 * The caller can also specify additional allowed conditions via @__c, such
435 * as locks used during the cgroup_subsys::attach() methods.
436 */
437 #ifdef CONFIG_PROVE_RCU
438 extern struct mutex cgroup_mutex;
439 extern spinlock_t css_set_lock;
440 #define task_css_set_check(task, __c) \
441 rcu_dereference_check((task)->cgroups, \
442 lockdep_is_held(&cgroup_mutex) || \
443 lockdep_is_held(&css_set_lock) || \
444 ((task)->flags & PF_EXITING) || (__c))
445 #else
446 #define task_css_set_check(task, __c) \
447 rcu_dereference((task)->cgroups)
448 #endif
449
450 /**
451 * task_css_check - obtain css for (task, subsys) w/ extra access conds
452 * @task: the target task
453 * @subsys_id: the target subsystem ID
454 * @__c: extra condition expression to be passed to rcu_dereference_check()
455 *
456 * Return the cgroup_subsys_state for the (@task, @subsys_id) pair. The
457 * synchronization rules are the same as task_css_set_check().
458 */
459 #define task_css_check(task, subsys_id, __c) \
460 task_css_set_check((task), (__c))->subsys[(subsys_id)]
461
462 /**
463 * task_css_set - obtain a task's css_set
464 * @task: the task to obtain css_set for
465 *
466 * See task_css_set_check().
467 */
task_css_set(struct task_struct * task)468 static inline struct css_set *task_css_set(struct task_struct *task)
469 {
470 return task_css_set_check(task, false);
471 }
472
473 /**
474 * task_css - obtain css for (task, subsys)
475 * @task: the target task
476 * @subsys_id: the target subsystem ID
477 *
478 * See task_css_check().
479 */
task_css(struct task_struct * task,int subsys_id)480 static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
481 int subsys_id)
482 {
483 return task_css_check(task, subsys_id, false);
484 }
485
486 /**
487 * task_get_css - find and get the css for (task, subsys)
488 * @task: the target task
489 * @subsys_id: the target subsystem ID
490 *
491 * Find the css for the (@task, @subsys_id) combination, increment a
492 * reference on and return it. This function is guaranteed to return a
493 * valid css. The returned css may already have been offlined.
494 */
495 static inline struct cgroup_subsys_state *
task_get_css(struct task_struct * task,int subsys_id)496 task_get_css(struct task_struct *task, int subsys_id)
497 {
498 struct cgroup_subsys_state *css;
499
500 rcu_read_lock();
501 while (true) {
502 css = task_css(task, subsys_id);
503 /*
504 * Can't use css_tryget_online() here. A task which has
505 * PF_EXITING set may stay associated with an offline css.
506 * If such task calls this function, css_tryget_online()
507 * will keep failing.
508 */
509 if (likely(css_tryget(css)))
510 break;
511 cpu_relax();
512 }
513 rcu_read_unlock();
514 return css;
515 }
516
517 /**
518 * task_css_is_root - test whether a task belongs to the root css
519 * @task: the target task
520 * @subsys_id: the target subsystem ID
521 *
522 * Test whether @task belongs to the root css on the specified subsystem.
523 * May be invoked in any context.
524 */
task_css_is_root(struct task_struct * task,int subsys_id)525 static inline bool task_css_is_root(struct task_struct *task, int subsys_id)
526 {
527 return task_css_check(task, subsys_id, true) ==
528 init_css_set.subsys[subsys_id];
529 }
530
task_cgroup(struct task_struct * task,int subsys_id)531 static inline struct cgroup *task_cgroup(struct task_struct *task,
532 int subsys_id)
533 {
534 return task_css(task, subsys_id)->cgroup;
535 }
536
task_dfl_cgroup(struct task_struct * task)537 static inline struct cgroup *task_dfl_cgroup(struct task_struct *task)
538 {
539 return task_css_set(task)->dfl_cgrp;
540 }
541
cgroup_parent(struct cgroup * cgrp)542 static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
543 {
544 struct cgroup_subsys_state *parent_css = cgrp->self.parent;
545
546 if (parent_css)
547 return container_of(parent_css, struct cgroup, self);
548 return NULL;
549 }
550
551 /**
552 * cgroup_is_descendant - test ancestry
553 * @cgrp: the cgroup to be tested
554 * @ancestor: possible ancestor of @cgrp
555 *
556 * Test whether @cgrp is a descendant of @ancestor. It also returns %true
557 * if @cgrp == @ancestor. This function is safe to call as long as @cgrp
558 * and @ancestor are accessible.
559 */
cgroup_is_descendant(struct cgroup * cgrp,struct cgroup * ancestor)560 static inline bool cgroup_is_descendant(struct cgroup *cgrp,
561 struct cgroup *ancestor)
562 {
563 if (cgrp->root != ancestor->root || cgrp->level < ancestor->level)
564 return false;
565 return cgrp->ancestor_ids[ancestor->level] == ancestor->id;
566 }
567
568 /**
569 * cgroup_ancestor - find ancestor of cgroup
570 * @cgrp: cgroup to find ancestor of
571 * @ancestor_level: level of ancestor to find starting from root
572 *
573 * Find ancestor of cgroup at specified level starting from root if it exists
574 * and return pointer to it. Return NULL if @cgrp doesn't have ancestor at
575 * @ancestor_level.
576 *
577 * This function is safe to call as long as @cgrp is accessible.
578 */
cgroup_ancestor(struct cgroup * cgrp,int ancestor_level)579 static inline struct cgroup *cgroup_ancestor(struct cgroup *cgrp,
580 int ancestor_level)
581 {
582 struct cgroup *ptr;
583
584 if (cgrp->level < ancestor_level)
585 return NULL;
586
587 for (ptr = cgrp;
588 ptr && ptr->level > ancestor_level;
589 ptr = cgroup_parent(ptr))
590 ;
591
592 if (ptr && ptr->level == ancestor_level)
593 return ptr;
594
595 return NULL;
596 }
597
598 /**
599 * task_under_cgroup_hierarchy - test task's membership of cgroup ancestry
600 * @task: the task to be tested
601 * @ancestor: possible ancestor of @task's cgroup
602 *
603 * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor.
604 * It follows all the same rules as cgroup_is_descendant, and only applies
605 * to the default hierarchy.
606 */
task_under_cgroup_hierarchy(struct task_struct * task,struct cgroup * ancestor)607 static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
608 struct cgroup *ancestor)
609 {
610 struct css_set *cset = task_css_set(task);
611
612 return cgroup_is_descendant(cset->dfl_cgrp, ancestor);
613 }
614
615 /* no synchronization, the result can only be used as a hint */
cgroup_is_populated(struct cgroup * cgrp)616 static inline bool cgroup_is_populated(struct cgroup *cgrp)
617 {
618 return cgrp->nr_populated_csets + cgrp->nr_populated_domain_children +
619 cgrp->nr_populated_threaded_children;
620 }
621
622 /* returns ino associated with a cgroup */
cgroup_ino(struct cgroup * cgrp)623 static inline ino_t cgroup_ino(struct cgroup *cgrp)
624 {
625 return cgrp->kn->id.ino;
626 }
627
628 /* cft/css accessors for cftype->write() operation */
of_cft(struct kernfs_open_file * of)629 static inline struct cftype *of_cft(struct kernfs_open_file *of)
630 {
631 return of->kn->priv;
632 }
633
634 struct cgroup_subsys_state *of_css(struct kernfs_open_file *of);
635
636 /* cft/css accessors for cftype->seq_*() operations */
seq_cft(struct seq_file * seq)637 static inline struct cftype *seq_cft(struct seq_file *seq)
638 {
639 return of_cft(seq->private);
640 }
641
seq_css(struct seq_file * seq)642 static inline struct cgroup_subsys_state *seq_css(struct seq_file *seq)
643 {
644 return of_css(seq->private);
645 }
646
647 /*
648 * Name / path handling functions. All are thin wrappers around the kernfs
649 * counterparts and can be called under any context.
650 */
651
cgroup_name(struct cgroup * cgrp,char * buf,size_t buflen)652 static inline int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen)
653 {
654 return kernfs_name(cgrp->kn, buf, buflen);
655 }
656
cgroup_path(struct cgroup * cgrp,char * buf,size_t buflen)657 static inline int cgroup_path(struct cgroup *cgrp, char *buf, size_t buflen)
658 {
659 return kernfs_path(cgrp->kn, buf, buflen);
660 }
661
pr_cont_cgroup_name(struct cgroup * cgrp)662 static inline void pr_cont_cgroup_name(struct cgroup *cgrp)
663 {
664 pr_cont_kernfs_name(cgrp->kn);
665 }
666
pr_cont_cgroup_path(struct cgroup * cgrp)667 static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
668 {
669 pr_cont_kernfs_path(cgrp->kn);
670 }
671
cgroup_init_kthreadd(void)672 static inline void cgroup_init_kthreadd(void)
673 {
674 /*
675 * kthreadd is inherited by all kthreads, keep it in the root so
676 * that the new kthreads are guaranteed to stay in the root until
677 * initialization is finished.
678 */
679 current->no_cgroup_migration = 1;
680 }
681
cgroup_kthread_ready(void)682 static inline void cgroup_kthread_ready(void)
683 {
684 /*
685 * This kthread finished initialization. The creator should have
686 * set PF_NO_SETAFFINITY if this kthread should stay in the root.
687 */
688 current->no_cgroup_migration = 0;
689 }
690
cgroup_get_kernfs_id(struct cgroup * cgrp)691 static inline union kernfs_node_id *cgroup_get_kernfs_id(struct cgroup *cgrp)
692 {
693 return &cgrp->kn->id;
694 }
695
696 void cgroup_path_from_kernfs_id(const union kernfs_node_id *id,
697 char *buf, size_t buflen);
698 #else /* !CONFIG_CGROUPS */
699
700 struct cgroup_subsys_state;
701 struct cgroup;
702
css_put(struct cgroup_subsys_state * css)703 static inline void css_put(struct cgroup_subsys_state *css) {}
cgroup_attach_task_all(struct task_struct * from,struct task_struct * t)704 static inline int cgroup_attach_task_all(struct task_struct *from,
705 struct task_struct *t) { return 0; }
cgroupstats_build(struct cgroupstats * stats,struct dentry * dentry)706 static inline int cgroupstats_build(struct cgroupstats *stats,
707 struct dentry *dentry) { return -EINVAL; }
708
cgroup_fork(struct task_struct * p)709 static inline void cgroup_fork(struct task_struct *p) {}
cgroup_can_fork(struct task_struct * p)710 static inline int cgroup_can_fork(struct task_struct *p) { return 0; }
cgroup_cancel_fork(struct task_struct * p)711 static inline void cgroup_cancel_fork(struct task_struct *p) {}
cgroup_post_fork(struct task_struct * p)712 static inline void cgroup_post_fork(struct task_struct *p) {}
cgroup_exit(struct task_struct * p)713 static inline void cgroup_exit(struct task_struct *p) {}
cgroup_release(struct task_struct * p)714 static inline void cgroup_release(struct task_struct *p) {}
cgroup_free(struct task_struct * p)715 static inline void cgroup_free(struct task_struct *p) {}
716
cgroup_init_early(void)717 static inline int cgroup_init_early(void) { return 0; }
cgroup_init(void)718 static inline int cgroup_init(void) { return 0; }
cgroup_init_kthreadd(void)719 static inline void cgroup_init_kthreadd(void) {}
cgroup_kthread_ready(void)720 static inline void cgroup_kthread_ready(void) {}
cgroup_get_kernfs_id(struct cgroup * cgrp)721 static inline union kernfs_node_id *cgroup_get_kernfs_id(struct cgroup *cgrp)
722 {
723 return NULL;
724 }
725
task_under_cgroup_hierarchy(struct task_struct * task,struct cgroup * ancestor)726 static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
727 struct cgroup *ancestor)
728 {
729 return true;
730 }
731
cgroup_path_from_kernfs_id(const union kernfs_node_id * id,char * buf,size_t buflen)732 static inline void cgroup_path_from_kernfs_id(const union kernfs_node_id *id,
733 char *buf, size_t buflen) {}
734 #endif /* !CONFIG_CGROUPS */
735
736 #ifdef CONFIG_CGROUPS
737 /*
738 * cgroup scalable recursive statistics.
739 */
740 void cgroup_rstat_updated(struct cgroup *cgrp, int cpu);
741 void cgroup_rstat_flush(struct cgroup *cgrp);
742 void cgroup_rstat_flush_irqsafe(struct cgroup *cgrp);
743 void cgroup_rstat_flush_hold(struct cgroup *cgrp);
744 void cgroup_rstat_flush_release(void);
745
746 /*
747 * Basic resource stats.
748 */
749 #ifdef CONFIG_CGROUP_CPUACCT
750 void cpuacct_charge(struct task_struct *tsk, u64 cputime);
751 void cpuacct_account_field(struct task_struct *tsk, int index, u64 val);
752 #else
cpuacct_charge(struct task_struct * tsk,u64 cputime)753 static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
cpuacct_account_field(struct task_struct * tsk,int index,u64 val)754 static inline void cpuacct_account_field(struct task_struct *tsk, int index,
755 u64 val) {}
756 #endif
757
758 void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec);
759 void __cgroup_account_cputime_field(struct cgroup *cgrp,
760 enum cpu_usage_stat index, u64 delta_exec);
761
cgroup_account_cputime(struct task_struct * task,u64 delta_exec)762 static inline void cgroup_account_cputime(struct task_struct *task,
763 u64 delta_exec)
764 {
765 struct cgroup *cgrp;
766
767 cpuacct_charge(task, delta_exec);
768
769 rcu_read_lock();
770 cgrp = task_dfl_cgroup(task);
771 if (cgroup_parent(cgrp))
772 __cgroup_account_cputime(cgrp, delta_exec);
773 rcu_read_unlock();
774 }
775
cgroup_account_cputime_field(struct task_struct * task,enum cpu_usage_stat index,u64 delta_exec)776 static inline void cgroup_account_cputime_field(struct task_struct *task,
777 enum cpu_usage_stat index,
778 u64 delta_exec)
779 {
780 struct cgroup *cgrp;
781
782 cpuacct_account_field(task, index, delta_exec);
783
784 rcu_read_lock();
785 cgrp = task_dfl_cgroup(task);
786 if (cgroup_parent(cgrp))
787 __cgroup_account_cputime_field(cgrp, index, delta_exec);
788 rcu_read_unlock();
789 }
790
791 #else /* CONFIG_CGROUPS */
792
cgroup_account_cputime(struct task_struct * task,u64 delta_exec)793 static inline void cgroup_account_cputime(struct task_struct *task,
794 u64 delta_exec) {}
cgroup_account_cputime_field(struct task_struct * task,enum cpu_usage_stat index,u64 delta_exec)795 static inline void cgroup_account_cputime_field(struct task_struct *task,
796 enum cpu_usage_stat index,
797 u64 delta_exec) {}
798
799 #endif /* CONFIG_CGROUPS */
800
801 /*
802 * sock->sk_cgrp_data handling. For more info, see sock_cgroup_data
803 * definition in cgroup-defs.h.
804 */
805 #ifdef CONFIG_SOCK_CGROUP_DATA
806
807 #if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
808 extern spinlock_t cgroup_sk_update_lock;
809 #endif
810
811 void cgroup_sk_alloc_disable(void);
812 void cgroup_sk_alloc(struct sock_cgroup_data *skcd);
813 void cgroup_sk_clone(struct sock_cgroup_data *skcd);
814 void cgroup_sk_free(struct sock_cgroup_data *skcd);
815
sock_cgroup_ptr(struct sock_cgroup_data * skcd)816 static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
817 {
818 #if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
819 unsigned long v;
820
821 /*
822 * @skcd->val is 64bit but the following is safe on 32bit too as we
823 * just need the lower ulong to be written and read atomically.
824 */
825 v = READ_ONCE(skcd->val);
826
827 if (v & 3)
828 return &cgrp_dfl_root.cgrp;
829
830 return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp;
831 #else
832 return (struct cgroup *)(unsigned long)skcd->val;
833 #endif
834 }
835
836 #else /* CONFIG_CGROUP_DATA */
837
cgroup_sk_alloc(struct sock_cgroup_data * skcd)838 static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {}
cgroup_sk_clone(struct sock_cgroup_data * skcd)839 static inline void cgroup_sk_clone(struct sock_cgroup_data *skcd) {}
cgroup_sk_free(struct sock_cgroup_data * skcd)840 static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {}
841
842 #endif /* CONFIG_CGROUP_DATA */
843
844 struct cgroup_namespace {
845 refcount_t count;
846 struct ns_common ns;
847 struct user_namespace *user_ns;
848 struct ucounts *ucounts;
849 struct css_set *root_cset;
850 };
851
852 extern struct cgroup_namespace init_cgroup_ns;
853
854 #ifdef CONFIG_CGROUPS
855
856 void free_cgroup_ns(struct cgroup_namespace *ns);
857
858 struct cgroup_namespace *copy_cgroup_ns(unsigned long flags,
859 struct user_namespace *user_ns,
860 struct cgroup_namespace *old_ns);
861
862 int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
863 struct cgroup_namespace *ns);
864
865 #else /* !CONFIG_CGROUPS */
866
free_cgroup_ns(struct cgroup_namespace * ns)867 static inline void free_cgroup_ns(struct cgroup_namespace *ns) { }
868 static inline struct cgroup_namespace *
copy_cgroup_ns(unsigned long flags,struct user_namespace * user_ns,struct cgroup_namespace * old_ns)869 copy_cgroup_ns(unsigned long flags, struct user_namespace *user_ns,
870 struct cgroup_namespace *old_ns)
871 {
872 return old_ns;
873 }
874
875 #endif /* !CONFIG_CGROUPS */
876
get_cgroup_ns(struct cgroup_namespace * ns)877 static inline void get_cgroup_ns(struct cgroup_namespace *ns)
878 {
879 if (ns)
880 refcount_inc(&ns->count);
881 }
882
put_cgroup_ns(struct cgroup_namespace * ns)883 static inline void put_cgroup_ns(struct cgroup_namespace *ns)
884 {
885 if (ns && refcount_dec_and_test(&ns->count))
886 free_cgroup_ns(ns);
887 }
888
889 #endif /* _LINUX_CGROUP_H */
890