1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_CGROUP_H
3 #define _LINUX_CGROUP_H
4 /*
5 * cgroup interface
6 *
7 * Copyright (C) 2003 BULL SA
8 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
9 *
10 */
11
12 #include <linux/sched.h>
13 #include <linux/cpumask.h>
14 #include <linux/nodemask.h>
15 #include <linux/rculist.h>
16 #include <linux/cgroupstats.h>
17 #include <linux/fs.h>
18 #include <linux/seq_file.h>
19 #include <linux/kernfs.h>
20 #include <linux/jump_label.h>
21 #include <linux/types.h>
22 #include <linux/ns_common.h>
23 #include <linux/nsproxy.h>
24 #include <linux/user_namespace.h>
25 #include <linux/refcount.h>
26 #include <linux/kernel_stat.h>
27
28 #include <linux/cgroup-defs.h>
29
30 #ifdef CONFIG_CGROUPS
31
32 /*
33 * All weight knobs on the default hierarhcy should use the following min,
34 * default and max values. The default value is the logarithmic center of
35 * MIN and MAX and allows 100x to be expressed in both directions.
36 */
37 #define CGROUP_WEIGHT_MIN 1
38 #define CGROUP_WEIGHT_DFL 100
39 #define CGROUP_WEIGHT_MAX 10000
40
41 /* walk only threadgroup leaders */
42 #define CSS_TASK_ITER_PROCS (1U << 0)
43 /* walk all threaded css_sets in the domain */
44 #define CSS_TASK_ITER_THREADED (1U << 1)
45
46 /* internal flags */
47 #define CSS_TASK_ITER_SKIPPED (1U << 16)
48
49 /* a css_task_iter should be treated as an opaque object */
50 struct css_task_iter {
51 struct cgroup_subsys *ss;
52 unsigned int flags;
53
54 struct list_head *cset_pos;
55 struct list_head *cset_head;
56
57 struct list_head *tcset_pos;
58 struct list_head *tcset_head;
59
60 struct list_head *task_pos;
61 struct list_head *tasks_head;
62 struct list_head *mg_tasks_head;
63 struct list_head *dying_tasks_head;
64
65 struct list_head *cur_tasks_head;
66 struct css_set *cur_cset;
67 struct css_set *cur_dcset;
68 struct task_struct *cur_task;
69 struct list_head iters_node; /* css_set->task_iters */
70 };
71
72 extern struct file_system_type cgroup_fs_type;
73 extern struct cgroup_root cgrp_dfl_root;
74 extern struct ext_css_set init_ext_css_set;
75 #define init_css_set init_ext_css_set.cset
76
77 #define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
78 #include <linux/cgroup_subsys.h>
79 #undef SUBSYS
80
81 #define SUBSYS(_x) \
82 extern struct static_key_true _x ## _cgrp_subsys_enabled_key; \
83 extern struct static_key_true _x ## _cgrp_subsys_on_dfl_key;
84 #include <linux/cgroup_subsys.h>
85 #undef SUBSYS
86
87 /**
88 * cgroup_subsys_enabled - fast test on whether a subsys is enabled
89 * @ss: subsystem in question
90 */
91 #define cgroup_subsys_enabled(ss) \
92 static_branch_likely(&ss ## _enabled_key)
93
94 /**
95 * cgroup_subsys_on_dfl - fast test on whether a subsys is on default hierarchy
96 * @ss: subsystem in question
97 */
98 #define cgroup_subsys_on_dfl(ss) \
99 static_branch_likely(&ss ## _on_dfl_key)
100
101 bool css_has_online_children(struct cgroup_subsys_state *css);
102 struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
103 struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgroup,
104 struct cgroup_subsys *ss);
105 struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
106 struct cgroup_subsys *ss);
107 struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
108 struct cgroup_subsys *ss);
109
110 struct cgroup *cgroup_get_from_path(const char *path);
111 struct cgroup *cgroup_get_from_fd(int fd);
112
113 int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
114 int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
115
116 int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
117 int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
118 int cgroup_rm_cftypes(struct cftype *cfts);
119 void cgroup_file_notify(struct cgroup_file *cfile);
120
121 int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
122 int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry);
123 int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
124 struct pid *pid, struct task_struct *tsk);
125
126 void cgroup_fork(struct task_struct *p);
127 extern int cgroup_can_fork(struct task_struct *p);
128 extern void cgroup_cancel_fork(struct task_struct *p);
129 extern void cgroup_post_fork(struct task_struct *p);
130 void cgroup_exit(struct task_struct *p);
131 void cgroup_release(struct task_struct *p);
132 void cgroup_free(struct task_struct *p);
133
134 int cgroup_init_early(void);
135 int cgroup_init(void);
136
137 int cgroup_parse_float(const char *input, unsigned dec_shift, s64 *v);
138
139 /*
140 * Iteration helpers and macros.
141 */
142
143 struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
144 struct cgroup_subsys_state *parent);
145 struct cgroup_subsys_state *css_next_descendant_pre(struct cgroup_subsys_state *pos,
146 struct cgroup_subsys_state *css);
147 struct cgroup_subsys_state *css_rightmost_descendant(struct cgroup_subsys_state *pos);
148 struct cgroup_subsys_state *css_next_descendant_post(struct cgroup_subsys_state *pos,
149 struct cgroup_subsys_state *css);
150
151 struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
152 struct cgroup_subsys_state **dst_cssp);
153 struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
154 struct cgroup_subsys_state **dst_cssp);
155
156 void cgroup_enable_task_cg_lists(void);
157 void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags,
158 struct css_task_iter *it);
159 struct task_struct *css_task_iter_next(struct css_task_iter *it);
160 void css_task_iter_end(struct css_task_iter *it);
161
162 /**
163 * css_for_each_child - iterate through children of a css
164 * @pos: the css * to use as the loop cursor
165 * @parent: css whose children to walk
166 *
167 * Walk @parent's children. Must be called under rcu_read_lock().
168 *
169 * If a subsystem synchronizes ->css_online() and the start of iteration, a
170 * css which finished ->css_online() is guaranteed to be visible in the
171 * future iterations and will stay visible until the last reference is put.
172 * A css which hasn't finished ->css_online() or already finished
173 * ->css_offline() may show up during traversal. It's each subsystem's
174 * responsibility to synchronize against on/offlining.
175 *
176 * It is allowed to temporarily drop RCU read lock during iteration. The
177 * caller is responsible for ensuring that @pos remains accessible until
178 * the start of the next iteration by, for example, bumping the css refcnt.
179 */
180 #define css_for_each_child(pos, parent) \
181 for ((pos) = css_next_child(NULL, (parent)); (pos); \
182 (pos) = css_next_child((pos), (parent)))
183
184 /**
185 * css_for_each_descendant_pre - pre-order walk of a css's descendants
186 * @pos: the css * to use as the loop cursor
187 * @root: css whose descendants to walk
188 *
189 * Walk @root's descendants. @root is included in the iteration and the
190 * first node to be visited. Must be called under rcu_read_lock().
191 *
192 * If a subsystem synchronizes ->css_online() and the start of iteration, a
193 * css which finished ->css_online() is guaranteed to be visible in the
194 * future iterations and will stay visible until the last reference is put.
195 * A css which hasn't finished ->css_online() or already finished
196 * ->css_offline() may show up during traversal. It's each subsystem's
197 * responsibility to synchronize against on/offlining.
198 *
199 * For example, the following guarantees that a descendant can't escape
200 * state updates of its ancestors.
201 *
202 * my_online(@css)
203 * {
204 * Lock @css's parent and @css;
205 * Inherit state from the parent;
206 * Unlock both.
207 * }
208 *
209 * my_update_state(@css)
210 * {
211 * css_for_each_descendant_pre(@pos, @css) {
212 * Lock @pos;
213 * if (@pos == @css)
214 * Update @css's state;
215 * else
216 * Verify @pos is alive and inherit state from its parent;
217 * Unlock @pos;
218 * }
219 * }
220 *
221 * As long as the inheriting step, including checking the parent state, is
222 * enclosed inside @pos locking, double-locking the parent isn't necessary
223 * while inheriting. The state update to the parent is guaranteed to be
224 * visible by walking order and, as long as inheriting operations to the
225 * same @pos are atomic to each other, multiple updates racing each other
226 * still result in the correct state. It's guaranateed that at least one
227 * inheritance happens for any css after the latest update to its parent.
228 *
229 * If checking parent's state requires locking the parent, each inheriting
230 * iteration should lock and unlock both @pos->parent and @pos.
231 *
232 * Alternatively, a subsystem may choose to use a single global lock to
233 * synchronize ->css_online() and ->css_offline() against tree-walking
234 * operations.
235 *
236 * It is allowed to temporarily drop RCU read lock during iteration. The
237 * caller is responsible for ensuring that @pos remains accessible until
238 * the start of the next iteration by, for example, bumping the css refcnt.
239 */
240 #define css_for_each_descendant_pre(pos, css) \
241 for ((pos) = css_next_descendant_pre(NULL, (css)); (pos); \
242 (pos) = css_next_descendant_pre((pos), (css)))
243
244 /**
245 * css_for_each_descendant_post - post-order walk of a css's descendants
246 * @pos: the css * to use as the loop cursor
247 * @css: css whose descendants to walk
248 *
249 * Similar to css_for_each_descendant_pre() but performs post-order
250 * traversal instead. @root is included in the iteration and the last
251 * node to be visited.
252 *
253 * If a subsystem synchronizes ->css_online() and the start of iteration, a
254 * css which finished ->css_online() is guaranteed to be visible in the
255 * future iterations and will stay visible until the last reference is put.
256 * A css which hasn't finished ->css_online() or already finished
257 * ->css_offline() may show up during traversal. It's each subsystem's
258 * responsibility to synchronize against on/offlining.
259 *
260 * Note that the walk visibility guarantee example described in pre-order
261 * walk doesn't apply the same to post-order walks.
262 */
263 #define css_for_each_descendant_post(pos, css) \
264 for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \
265 (pos) = css_next_descendant_post((pos), (css)))
266
267 /**
268 * cgroup_taskset_for_each - iterate cgroup_taskset
269 * @task: the loop cursor
270 * @dst_css: the destination css
271 * @tset: taskset to iterate
272 *
273 * @tset may contain multiple tasks and they may belong to multiple
274 * processes.
275 *
276 * On the v2 hierarchy, there may be tasks from multiple processes and they
277 * may not share the source or destination csses.
278 *
279 * On traditional hierarchies, when there are multiple tasks in @tset, if a
280 * task of a process is in @tset, all tasks of the process are in @tset.
281 * Also, all are guaranteed to share the same source and destination csses.
282 *
283 * Iteration is not in any specific order.
284 */
285 #define cgroup_taskset_for_each(task, dst_css, tset) \
286 for ((task) = cgroup_taskset_first((tset), &(dst_css)); \
287 (task); \
288 (task) = cgroup_taskset_next((tset), &(dst_css)))
289
290 /**
291 * cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset
292 * @leader: the loop cursor
293 * @dst_css: the destination css
294 * @tset: taskset to iterate
295 *
296 * Iterate threadgroup leaders of @tset. For single-task migrations, @tset
297 * may not contain any.
298 */
299 #define cgroup_taskset_for_each_leader(leader, dst_css, tset) \
300 for ((leader) = cgroup_taskset_first((tset), &(dst_css)); \
301 (leader); \
302 (leader) = cgroup_taskset_next((tset), &(dst_css))) \
303 if ((leader) != (leader)->group_leader) \
304 ; \
305 else
306
307 /*
308 * Inline functions.
309 */
310
311 /**
312 * css_get - obtain a reference on the specified css
313 * @css: target css
314 *
315 * The caller must already have a reference.
316 */
css_get(struct cgroup_subsys_state * css)317 static inline void css_get(struct cgroup_subsys_state *css)
318 {
319 if (!(css->flags & CSS_NO_REF))
320 percpu_ref_get(&css->refcnt);
321 }
322
323 /**
324 * css_get_many - obtain references on the specified css
325 * @css: target css
326 * @n: number of references to get
327 *
328 * The caller must already have a reference.
329 */
css_get_many(struct cgroup_subsys_state * css,unsigned int n)330 static inline void css_get_many(struct cgroup_subsys_state *css, unsigned int n)
331 {
332 if (!(css->flags & CSS_NO_REF))
333 percpu_ref_get_many(&css->refcnt, n);
334 }
335
336 /**
337 * css_tryget - try to obtain a reference on the specified css
338 * @css: target css
339 *
340 * Obtain a reference on @css unless it already has reached zero and is
341 * being released. This function doesn't care whether @css is on or
342 * offline. The caller naturally needs to ensure that @css is accessible
343 * but doesn't have to be holding a reference on it - IOW, RCU protected
344 * access is good enough for this function. Returns %true if a reference
345 * count was successfully obtained; %false otherwise.
346 */
css_tryget(struct cgroup_subsys_state * css)347 static inline bool css_tryget(struct cgroup_subsys_state *css)
348 {
349 if (!(css->flags & CSS_NO_REF))
350 return percpu_ref_tryget(&css->refcnt);
351 return true;
352 }
353
354 /**
355 * css_tryget_online - try to obtain a reference on the specified css if online
356 * @css: target css
357 *
358 * Obtain a reference on @css if it's online. The caller naturally needs
359 * to ensure that @css is accessible but doesn't have to be holding a
360 * reference on it - IOW, RCU protected access is good enough for this
361 * function. Returns %true if a reference count was successfully obtained;
362 * %false otherwise.
363 */
css_tryget_online(struct cgroup_subsys_state * css)364 static inline bool css_tryget_online(struct cgroup_subsys_state *css)
365 {
366 if (!(css->flags & CSS_NO_REF))
367 return percpu_ref_tryget_live(&css->refcnt);
368 return true;
369 }
370
371 /**
372 * css_is_dying - test whether the specified css is dying
373 * @css: target css
374 *
375 * Test whether @css is in the process of offlining or already offline. In
376 * most cases, ->css_online() and ->css_offline() callbacks should be
377 * enough; however, the actual offline operations are RCU delayed and this
378 * test returns %true also when @css is scheduled to be offlined.
379 *
380 * This is useful, for example, when the use case requires synchronous
381 * behavior with respect to cgroup removal. cgroup removal schedules css
382 * offlining but the css can seem alive while the operation is being
383 * delayed. If the delay affects user visible semantics, this test can be
384 * used to resolve the situation.
385 */
css_is_dying(struct cgroup_subsys_state * css)386 static inline bool css_is_dying(struct cgroup_subsys_state *css)
387 {
388 return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt);
389 }
390
391 /**
392 * css_put - put a css reference
393 * @css: target css
394 *
395 * Put a reference obtained via css_get() and css_tryget_online().
396 */
css_put(struct cgroup_subsys_state * css)397 static inline void css_put(struct cgroup_subsys_state *css)
398 {
399 if (!(css->flags & CSS_NO_REF))
400 percpu_ref_put(&css->refcnt);
401 }
402
403 /**
404 * css_put_many - put css references
405 * @css: target css
406 * @n: number of references to put
407 *
408 * Put references obtained via css_get() and css_tryget_online().
409 */
css_put_many(struct cgroup_subsys_state * css,unsigned int n)410 static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n)
411 {
412 if (!(css->flags & CSS_NO_REF))
413 percpu_ref_put_many(&css->refcnt, n);
414 }
415
cgroup_get(struct cgroup * cgrp)416 static inline void cgroup_get(struct cgroup *cgrp)
417 {
418 css_get(&cgrp->self);
419 }
420
cgroup_tryget(struct cgroup * cgrp)421 static inline bool cgroup_tryget(struct cgroup *cgrp)
422 {
423 return css_tryget(&cgrp->self);
424 }
425
cgroup_put(struct cgroup * cgrp)426 static inline void cgroup_put(struct cgroup *cgrp)
427 {
428 css_put(&cgrp->self);
429 }
430
431 /**
432 * task_css_set_check - obtain a task's css_set with extra access conditions
433 * @task: the task to obtain css_set for
434 * @__c: extra condition expression to be passed to rcu_dereference_check()
435 *
436 * A task's css_set is RCU protected, initialized and exited while holding
437 * task_lock(), and can only be modified while holding both cgroup_mutex
438 * and task_lock() while the task is alive. This macro verifies that the
439 * caller is inside proper critical section and returns @task's css_set.
440 *
441 * The caller can also specify additional allowed conditions via @__c, such
442 * as locks used during the cgroup_subsys::attach() methods.
443 */
444 #ifdef CONFIG_PROVE_RCU
445 extern struct mutex cgroup_mutex;
446 extern spinlock_t css_set_lock;
447 #define task_css_set_check(task, __c) \
448 rcu_dereference_check((task)->cgroups, \
449 lockdep_is_held(&cgroup_mutex) || \
450 lockdep_is_held(&css_set_lock) || \
451 ((task)->flags & PF_EXITING) || (__c))
452 #else
453 #define task_css_set_check(task, __c) \
454 rcu_dereference((task)->cgroups)
455 #endif
456
457 /**
458 * task_css_check - obtain css for (task, subsys) w/ extra access conds
459 * @task: the target task
460 * @subsys_id: the target subsystem ID
461 * @__c: extra condition expression to be passed to rcu_dereference_check()
462 *
463 * Return the cgroup_subsys_state for the (@task, @subsys_id) pair. The
464 * synchronization rules are the same as task_css_set_check().
465 */
466 #define task_css_check(task, subsys_id, __c) \
467 task_css_set_check((task), (__c))->subsys[(subsys_id)]
468
469 /**
470 * task_css_set - obtain a task's css_set
471 * @task: the task to obtain css_set for
472 *
473 * See task_css_set_check().
474 */
task_css_set(struct task_struct * task)475 static inline struct css_set *task_css_set(struct task_struct *task)
476 {
477 return task_css_set_check(task, false);
478 }
479
480 /**
481 * task_css - obtain css for (task, subsys)
482 * @task: the target task
483 * @subsys_id: the target subsystem ID
484 *
485 * See task_css_check().
486 */
task_css(struct task_struct * task,int subsys_id)487 static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
488 int subsys_id)
489 {
490 return task_css_check(task, subsys_id, false);
491 }
492
493 /**
494 * task_get_css - find and get the css for (task, subsys)
495 * @task: the target task
496 * @subsys_id: the target subsystem ID
497 *
498 * Find the css for the (@task, @subsys_id) combination, increment a
499 * reference on and return it. This function is guaranteed to return a
500 * valid css. The returned css may already have been offlined.
501 */
502 static inline struct cgroup_subsys_state *
task_get_css(struct task_struct * task,int subsys_id)503 task_get_css(struct task_struct *task, int subsys_id)
504 {
505 struct cgroup_subsys_state *css;
506
507 rcu_read_lock();
508 while (true) {
509 css = task_css(task, subsys_id);
510 /*
511 * Can't use css_tryget_online() here. A task which has
512 * PF_EXITING set may stay associated with an offline css.
513 * If such task calls this function, css_tryget_online()
514 * will keep failing.
515 */
516 if (likely(css_tryget(css)))
517 break;
518 cpu_relax();
519 }
520 rcu_read_unlock();
521 return css;
522 }
523
524 /**
525 * task_css_is_root - test whether a task belongs to the root css
526 * @task: the target task
527 * @subsys_id: the target subsystem ID
528 *
529 * Test whether @task belongs to the root css on the specified subsystem.
530 * May be invoked in any context.
531 */
task_css_is_root(struct task_struct * task,int subsys_id)532 static inline bool task_css_is_root(struct task_struct *task, int subsys_id)
533 {
534 return task_css_check(task, subsys_id, true) ==
535 init_css_set.subsys[subsys_id];
536 }
537
task_cgroup(struct task_struct * task,int subsys_id)538 static inline struct cgroup *task_cgroup(struct task_struct *task,
539 int subsys_id)
540 {
541 return task_css(task, subsys_id)->cgroup;
542 }
543
task_dfl_cgroup(struct task_struct * task)544 static inline struct cgroup *task_dfl_cgroup(struct task_struct *task)
545 {
546 return task_css_set(task)->dfl_cgrp;
547 }
548
cgroup_parent(struct cgroup * cgrp)549 static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
550 {
551 struct cgroup_subsys_state *parent_css = cgrp->self.parent;
552
553 if (parent_css)
554 return container_of(parent_css, struct cgroup, self);
555 return NULL;
556 }
557
558 /**
559 * cgroup_is_descendant - test ancestry
560 * @cgrp: the cgroup to be tested
561 * @ancestor: possible ancestor of @cgrp
562 *
563 * Test whether @cgrp is a descendant of @ancestor. It also returns %true
564 * if @cgrp == @ancestor. This function is safe to call as long as @cgrp
565 * and @ancestor are accessible.
566 */
cgroup_is_descendant(struct cgroup * cgrp,struct cgroup * ancestor)567 static inline bool cgroup_is_descendant(struct cgroup *cgrp,
568 struct cgroup *ancestor)
569 {
570 if (cgrp->root != ancestor->root || cgrp->level < ancestor->level)
571 return false;
572 return cgrp->ancestor_ids[ancestor->level] == ancestor->id;
573 }
574
575 /**
576 * cgroup_ancestor - find ancestor of cgroup
577 * @cgrp: cgroup to find ancestor of
578 * @ancestor_level: level of ancestor to find starting from root
579 *
580 * Find ancestor of cgroup at specified level starting from root if it exists
581 * and return pointer to it. Return NULL if @cgrp doesn't have ancestor at
582 * @ancestor_level.
583 *
584 * This function is safe to call as long as @cgrp is accessible.
585 */
cgroup_ancestor(struct cgroup * cgrp,int ancestor_level)586 static inline struct cgroup *cgroup_ancestor(struct cgroup *cgrp,
587 int ancestor_level)
588 {
589 if (cgrp->level < ancestor_level)
590 return NULL;
591 while (cgrp && cgrp->level > ancestor_level)
592 cgrp = cgroup_parent(cgrp);
593 return cgrp;
594 }
595
596 /**
597 * task_under_cgroup_hierarchy - test task's membership of cgroup ancestry
598 * @task: the task to be tested
599 * @ancestor: possible ancestor of @task's cgroup
600 *
601 * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor.
602 * It follows all the same rules as cgroup_is_descendant, and only applies
603 * to the default hierarchy.
604 */
task_under_cgroup_hierarchy(struct task_struct * task,struct cgroup * ancestor)605 static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
606 struct cgroup *ancestor)
607 {
608 struct css_set *cset = task_css_set(task);
609
610 return cgroup_is_descendant(cset->dfl_cgrp, ancestor);
611 }
612
613 /* no synchronization, the result can only be used as a hint */
cgroup_is_populated(struct cgroup * cgrp)614 static inline bool cgroup_is_populated(struct cgroup *cgrp)
615 {
616 return cgrp->nr_populated_csets + cgrp->nr_populated_domain_children +
617 cgrp->nr_populated_threaded_children;
618 }
619
620 /* returns ino associated with a cgroup */
cgroup_ino(struct cgroup * cgrp)621 static inline ino_t cgroup_ino(struct cgroup *cgrp)
622 {
623 return cgrp->kn->id.ino;
624 }
625
626 /* cft/css accessors for cftype->write() operation */
of_cft(struct kernfs_open_file * of)627 static inline struct cftype *of_cft(struct kernfs_open_file *of)
628 {
629 return of->kn->priv;
630 }
631
632 struct cgroup_subsys_state *of_css(struct kernfs_open_file *of);
633
634 /* cft/css accessors for cftype->seq_*() operations */
seq_cft(struct seq_file * seq)635 static inline struct cftype *seq_cft(struct seq_file *seq)
636 {
637 return of_cft(seq->private);
638 }
639
seq_css(struct seq_file * seq)640 static inline struct cgroup_subsys_state *seq_css(struct seq_file *seq)
641 {
642 return of_css(seq->private);
643 }
644
645 /*
646 * Name / path handling functions. All are thin wrappers around the kernfs
647 * counterparts and can be called under any context.
648 */
649
cgroup_name(struct cgroup * cgrp,char * buf,size_t buflen)650 static inline int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen)
651 {
652 return kernfs_name(cgrp->kn, buf, buflen);
653 }
654
cgroup_path(struct cgroup * cgrp,char * buf,size_t buflen)655 static inline int cgroup_path(struct cgroup *cgrp, char *buf, size_t buflen)
656 {
657 return kernfs_path(cgrp->kn, buf, buflen);
658 }
659
pr_cont_cgroup_name(struct cgroup * cgrp)660 static inline void pr_cont_cgroup_name(struct cgroup *cgrp)
661 {
662 pr_cont_kernfs_name(cgrp->kn);
663 }
664
pr_cont_cgroup_path(struct cgroup * cgrp)665 static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
666 {
667 pr_cont_kernfs_path(cgrp->kn);
668 }
669
cgroup_psi(struct cgroup * cgrp)670 static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
671 {
672 return &cgrp->psi;
673 }
674
cgroup_init_kthreadd(void)675 static inline void cgroup_init_kthreadd(void)
676 {
677 /*
678 * kthreadd is inherited by all kthreads, keep it in the root so
679 * that the new kthreads are guaranteed to stay in the root until
680 * initialization is finished.
681 */
682 current->no_cgroup_migration = 1;
683 }
684
cgroup_kthread_ready(void)685 static inline void cgroup_kthread_ready(void)
686 {
687 /*
688 * This kthread finished initialization. The creator should have
689 * set PF_NO_SETAFFINITY if this kthread should stay in the root.
690 */
691 current->no_cgroup_migration = 0;
692 }
693
cgroup_get_kernfs_id(struct cgroup * cgrp)694 static inline union kernfs_node_id *cgroup_get_kernfs_id(struct cgroup *cgrp)
695 {
696 return &cgrp->kn->id;
697 }
698
699 void cgroup_path_from_kernfs_id(const union kernfs_node_id *id,
700 char *buf, size_t buflen);
701 #else /* !CONFIG_CGROUPS */
702
703 struct cgroup_subsys_state;
704 struct cgroup;
705
css_get(struct cgroup_subsys_state * css)706 static inline void css_get(struct cgroup_subsys_state *css) {}
css_put(struct cgroup_subsys_state * css)707 static inline void css_put(struct cgroup_subsys_state *css) {}
cgroup_attach_task_all(struct task_struct * from,struct task_struct * t)708 static inline int cgroup_attach_task_all(struct task_struct *from,
709 struct task_struct *t) { return 0; }
cgroupstats_build(struct cgroupstats * stats,struct dentry * dentry)710 static inline int cgroupstats_build(struct cgroupstats *stats,
711 struct dentry *dentry) { return -EINVAL; }
712
cgroup_fork(struct task_struct * p)713 static inline void cgroup_fork(struct task_struct *p) {}
cgroup_can_fork(struct task_struct * p)714 static inline int cgroup_can_fork(struct task_struct *p) { return 0; }
cgroup_cancel_fork(struct task_struct * p)715 static inline void cgroup_cancel_fork(struct task_struct *p) {}
cgroup_post_fork(struct task_struct * p)716 static inline void cgroup_post_fork(struct task_struct *p) {}
cgroup_exit(struct task_struct * p)717 static inline void cgroup_exit(struct task_struct *p) {}
cgroup_release(struct task_struct * p)718 static inline void cgroup_release(struct task_struct *p) {}
cgroup_free(struct task_struct * p)719 static inline void cgroup_free(struct task_struct *p) {}
720
cgroup_init_early(void)721 static inline int cgroup_init_early(void) { return 0; }
cgroup_init(void)722 static inline int cgroup_init(void) { return 0; }
cgroup_init_kthreadd(void)723 static inline void cgroup_init_kthreadd(void) {}
cgroup_kthread_ready(void)724 static inline void cgroup_kthread_ready(void) {}
cgroup_get_kernfs_id(struct cgroup * cgrp)725 static inline union kernfs_node_id *cgroup_get_kernfs_id(struct cgroup *cgrp)
726 {
727 return NULL;
728 }
729
cgroup_parent(struct cgroup * cgrp)730 static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
731 {
732 return NULL;
733 }
734
cgroup_psi(struct cgroup * cgrp)735 static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
736 {
737 return NULL;
738 }
739
task_under_cgroup_hierarchy(struct task_struct * task,struct cgroup * ancestor)740 static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
741 struct cgroup *ancestor)
742 {
743 return true;
744 }
745
cgroup_path_from_kernfs_id(const union kernfs_node_id * id,char * buf,size_t buflen)746 static inline void cgroup_path_from_kernfs_id(const union kernfs_node_id *id,
747 char *buf, size_t buflen) {}
748 #endif /* !CONFIG_CGROUPS */
749
750 #ifdef CONFIG_CGROUPS
751 /*
752 * cgroup scalable recursive statistics.
753 */
754 void cgroup_rstat_updated(struct cgroup *cgrp, int cpu);
755 void cgroup_rstat_flush(struct cgroup *cgrp);
756 void cgroup_rstat_flush_irqsafe(struct cgroup *cgrp);
757 void cgroup_rstat_flush_hold(struct cgroup *cgrp);
758 void cgroup_rstat_flush_release(void);
759
760 /*
761 * Basic resource stats.
762 */
763 #ifdef CONFIG_CGROUP_CPUACCT
764 void cpuacct_charge(struct task_struct *tsk, u64 cputime);
765 void cpuacct_account_field(struct task_struct *tsk, int index, u64 val);
766 #else
cpuacct_charge(struct task_struct * tsk,u64 cputime)767 static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
cpuacct_account_field(struct task_struct * tsk,int index,u64 val)768 static inline void cpuacct_account_field(struct task_struct *tsk, int index,
769 u64 val) {}
770 #endif
771
772 void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec);
773 void __cgroup_account_cputime_field(struct cgroup *cgrp,
774 enum cpu_usage_stat index, u64 delta_exec);
775
cgroup_account_cputime(struct task_struct * task,u64 delta_exec)776 static inline void cgroup_account_cputime(struct task_struct *task,
777 u64 delta_exec)
778 {
779 struct cgroup *cgrp;
780
781 cpuacct_charge(task, delta_exec);
782
783 rcu_read_lock();
784 cgrp = task_dfl_cgroup(task);
785 if (cgroup_parent(cgrp))
786 __cgroup_account_cputime(cgrp, delta_exec);
787 rcu_read_unlock();
788 }
789
cgroup_account_cputime_field(struct task_struct * task,enum cpu_usage_stat index,u64 delta_exec)790 static inline void cgroup_account_cputime_field(struct task_struct *task,
791 enum cpu_usage_stat index,
792 u64 delta_exec)
793 {
794 struct cgroup *cgrp;
795
796 cpuacct_account_field(task, index, delta_exec);
797
798 rcu_read_lock();
799 cgrp = task_dfl_cgroup(task);
800 if (cgroup_parent(cgrp))
801 __cgroup_account_cputime_field(cgrp, index, delta_exec);
802 rcu_read_unlock();
803 }
804
805 #else /* CONFIG_CGROUPS */
806
cgroup_account_cputime(struct task_struct * task,u64 delta_exec)807 static inline void cgroup_account_cputime(struct task_struct *task,
808 u64 delta_exec) {}
cgroup_account_cputime_field(struct task_struct * task,enum cpu_usage_stat index,u64 delta_exec)809 static inline void cgroup_account_cputime_field(struct task_struct *task,
810 enum cpu_usage_stat index,
811 u64 delta_exec) {}
812
813 #endif /* CONFIG_CGROUPS */
814
815 /*
816 * sock->sk_cgrp_data handling. For more info, see sock_cgroup_data
817 * definition in cgroup-defs.h.
818 */
819 #ifdef CONFIG_SOCK_CGROUP_DATA
820
821 #if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
822 extern spinlock_t cgroup_sk_update_lock;
823 #endif
824
825 void cgroup_sk_alloc_disable(void);
826 void cgroup_sk_alloc(struct sock_cgroup_data *skcd);
827 void cgroup_sk_clone(struct sock_cgroup_data *skcd);
828 void cgroup_sk_free(struct sock_cgroup_data *skcd);
829
sock_cgroup_ptr(struct sock_cgroup_data * skcd)830 static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
831 {
832 #if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
833 unsigned long v;
834
835 /*
836 * @skcd->val is 64bit but the following is safe on 32bit too as we
837 * just need the lower ulong to be written and read atomically.
838 */
839 v = READ_ONCE(skcd->val);
840
841 if (v & 3)
842 return &cgrp_dfl_root.cgrp;
843
844 return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp;
845 #else
846 return (struct cgroup *)(unsigned long)skcd->val;
847 #endif
848 }
849
850 #else /* CONFIG_CGROUP_DATA */
851
cgroup_sk_alloc(struct sock_cgroup_data * skcd)852 static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {}
cgroup_sk_clone(struct sock_cgroup_data * skcd)853 static inline void cgroup_sk_clone(struct sock_cgroup_data *skcd) {}
cgroup_sk_free(struct sock_cgroup_data * skcd)854 static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {}
855
856 #endif /* CONFIG_CGROUP_DATA */
857
858 struct cgroup_namespace {
859 refcount_t count;
860 struct ns_common ns;
861 struct user_namespace *user_ns;
862 struct ucounts *ucounts;
863 struct css_set *root_cset;
864 };
865
866 extern struct cgroup_namespace init_cgroup_ns;
867
868 #ifdef CONFIG_CGROUPS
869
870 void free_cgroup_ns(struct cgroup_namespace *ns);
871
872 struct cgroup_namespace *copy_cgroup_ns(unsigned long flags,
873 struct user_namespace *user_ns,
874 struct cgroup_namespace *old_ns);
875
876 int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
877 struct cgroup_namespace *ns);
878
879 #else /* !CONFIG_CGROUPS */
880
free_cgroup_ns(struct cgroup_namespace * ns)881 static inline void free_cgroup_ns(struct cgroup_namespace *ns) { }
882 static inline struct cgroup_namespace *
copy_cgroup_ns(unsigned long flags,struct user_namespace * user_ns,struct cgroup_namespace * old_ns)883 copy_cgroup_ns(unsigned long flags, struct user_namespace *user_ns,
884 struct cgroup_namespace *old_ns)
885 {
886 return old_ns;
887 }
888
889 #endif /* !CONFIG_CGROUPS */
890
get_cgroup_ns(struct cgroup_namespace * ns)891 static inline void get_cgroup_ns(struct cgroup_namespace *ns)
892 {
893 if (ns)
894 refcount_inc(&ns->count);
895 }
896
put_cgroup_ns(struct cgroup_namespace * ns)897 static inline void put_cgroup_ns(struct cgroup_namespace *ns)
898 {
899 if (ns && refcount_dec_and_test(&ns->count))
900 free_cgroup_ns(ns);
901 }
902
903 #ifdef CONFIG_CGROUPS
904
905 void cgroup_enter_frozen(void);
906 void cgroup_leave_frozen(bool always_leave);
907 void cgroup_update_frozen(struct cgroup *cgrp);
908 void cgroup_freeze(struct cgroup *cgrp, bool freeze);
909 void cgroup_freezer_migrate_task(struct task_struct *task, struct cgroup *src,
910 struct cgroup *dst);
911
cgroup_task_freeze(struct task_struct * task)912 static inline bool cgroup_task_freeze(struct task_struct *task)
913 {
914 bool ret;
915
916 if (task->flags & PF_KTHREAD)
917 return false;
918
919 rcu_read_lock();
920 ret = test_bit(CGRP_FREEZE, &task_dfl_cgroup(task)->flags);
921 rcu_read_unlock();
922
923 return ret;
924 }
925
cgroup_task_frozen(struct task_struct * task)926 static inline bool cgroup_task_frozen(struct task_struct *task)
927 {
928 return task->frozen;
929 }
930
931 #else /* !CONFIG_CGROUPS */
932
cgroup_enter_frozen(void)933 static inline void cgroup_enter_frozen(void) { }
cgroup_leave_frozen(bool always_leave)934 static inline void cgroup_leave_frozen(bool always_leave) { }
cgroup_task_freeze(struct task_struct * task)935 static inline bool cgroup_task_freeze(struct task_struct *task)
936 {
937 return false;
938 }
cgroup_task_frozen(struct task_struct * task)939 static inline bool cgroup_task_frozen(struct task_struct *task)
940 {
941 return false;
942 }
943
944 #endif /* !CONFIG_CGROUPS */
945
946 #ifdef CONFIG_CGROUP_BPF
cgroup_bpf_get(struct cgroup * cgrp)947 static inline void cgroup_bpf_get(struct cgroup *cgrp)
948 {
949 percpu_ref_get(&cgrp->bpf.refcnt);
950 }
951
cgroup_bpf_put(struct cgroup * cgrp)952 static inline void cgroup_bpf_put(struct cgroup *cgrp)
953 {
954 percpu_ref_put(&cgrp->bpf.refcnt);
955 }
956
957 #else /* CONFIG_CGROUP_BPF */
958
cgroup_bpf_get(struct cgroup * cgrp)959 static inline void cgroup_bpf_get(struct cgroup *cgrp) {}
cgroup_bpf_put(struct cgroup * cgrp)960 static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
961
962 #endif /* CONFIG_CGROUP_BPF */
963
964 #endif /* _LINUX_CGROUP_H */
965