1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_CGROUP_H
3 #define _LINUX_CGROUP_H
4 /*
5 * cgroup interface
6 *
7 * Copyright (C) 2003 BULL SA
8 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
9 *
10 */
11
12 #include <linux/sched.h>
13 #include <linux/cpumask.h>
14 #include <linux/nodemask.h>
15 #include <linux/rculist.h>
16 #include <linux/cgroupstats.h>
17 #include <linux/fs.h>
18 #include <linux/seq_file.h>
19 #include <linux/kernfs.h>
20 #include <linux/jump_label.h>
21 #include <linux/types.h>
22 #include <linux/ns_common.h>
23 #include <linux/nsproxy.h>
24 #include <linux/user_namespace.h>
25 #include <linux/refcount.h>
26 #include <linux/kernel_stat.h>
27
28 #include <linux/cgroup-defs.h>
29
30 struct kernel_clone_args;
31
32 #ifdef CONFIG_CGROUPS
33
34 /*
35 * All weight knobs on the default hierarhcy should use the following min,
36 * default and max values. The default value is the logarithmic center of
37 * MIN and MAX and allows 100x to be expressed in both directions.
38 */
39 #define CGROUP_WEIGHT_MIN 1
40 #define CGROUP_WEIGHT_DFL 100
41 #define CGROUP_WEIGHT_MAX 10000
42
43 /* walk only threadgroup leaders */
44 #define CSS_TASK_ITER_PROCS (1U << 0)
45 /* walk all threaded css_sets in the domain */
46 #define CSS_TASK_ITER_THREADED (1U << 1)
47
48 /* internal flags */
49 #define CSS_TASK_ITER_SKIPPED (1U << 16)
50
51 /* a css_task_iter should be treated as an opaque object */
52 struct css_task_iter {
53 struct cgroup_subsys *ss;
54 unsigned int flags;
55
56 struct list_head *cset_pos;
57 struct list_head *cset_head;
58
59 struct list_head *tcset_pos;
60 struct list_head *tcset_head;
61
62 struct list_head *task_pos;
63
64 struct list_head *cur_tasks_head;
65 struct css_set *cur_cset;
66 struct css_set *cur_dcset;
67 struct task_struct *cur_task;
68 struct list_head iters_node; /* css_set->task_iters */
69 };
70
71 extern struct cgroup_root cgrp_dfl_root;
72 extern struct css_set init_css_set;
73
74 #define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
75 #include <linux/cgroup_subsys.h>
76 #undef SUBSYS
77
78 #define SUBSYS(_x) \
79 extern struct static_key_true _x ## _cgrp_subsys_enabled_key; \
80 extern struct static_key_true _x ## _cgrp_subsys_on_dfl_key;
81 #include <linux/cgroup_subsys.h>
82 #undef SUBSYS
83
84 /**
85 * cgroup_subsys_enabled - fast test on whether a subsys is enabled
86 * @ss: subsystem in question
87 */
88 #define cgroup_subsys_enabled(ss) \
89 static_branch_likely(&ss ## _enabled_key)
90
91 /**
92 * cgroup_subsys_on_dfl - fast test on whether a subsys is on default hierarchy
93 * @ss: subsystem in question
94 */
95 #define cgroup_subsys_on_dfl(ss) \
96 static_branch_likely(&ss ## _on_dfl_key)
97
98 bool css_has_online_children(struct cgroup_subsys_state *css);
99 struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
100 struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgroup,
101 struct cgroup_subsys *ss);
102 struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
103 struct cgroup_subsys *ss);
104 struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
105 struct cgroup_subsys *ss);
106
107 struct cgroup *cgroup_get_from_path(const char *path);
108 struct cgroup *cgroup_get_from_fd(int fd);
109
110 int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
111 int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
112
113 int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
114 int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
115 int cgroup_rm_cftypes(struct cftype *cfts);
116 void cgroup_file_notify(struct cgroup_file *cfile);
117
118 int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
119 int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry);
120 int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
121 struct pid *pid, struct task_struct *tsk);
122
123 void cgroup_fork(struct task_struct *p);
124 extern int cgroup_can_fork(struct task_struct *p,
125 struct kernel_clone_args *kargs);
126 extern void cgroup_cancel_fork(struct task_struct *p,
127 struct kernel_clone_args *kargs);
128 extern void cgroup_post_fork(struct task_struct *p,
129 struct kernel_clone_args *kargs);
130 void cgroup_exit(struct task_struct *p);
131 void cgroup_release(struct task_struct *p);
132 void cgroup_free(struct task_struct *p);
133
134 int cgroup_init_early(void);
135 int cgroup_init(void);
136
137 int cgroup_parse_float(const char *input, unsigned dec_shift, s64 *v);
138
139 /*
140 * Iteration helpers and macros.
141 */
142
143 struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
144 struct cgroup_subsys_state *parent);
145 struct cgroup_subsys_state *css_next_descendant_pre(struct cgroup_subsys_state *pos,
146 struct cgroup_subsys_state *css);
147 struct cgroup_subsys_state *css_rightmost_descendant(struct cgroup_subsys_state *pos);
148 struct cgroup_subsys_state *css_next_descendant_post(struct cgroup_subsys_state *pos,
149 struct cgroup_subsys_state *css);
150
151 struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
152 struct cgroup_subsys_state **dst_cssp);
153 struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
154 struct cgroup_subsys_state **dst_cssp);
155
156 void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags,
157 struct css_task_iter *it);
158 struct task_struct *css_task_iter_next(struct css_task_iter *it);
159 void css_task_iter_end(struct css_task_iter *it);
160
161 /**
162 * css_for_each_child - iterate through children of a css
163 * @pos: the css * to use as the loop cursor
164 * @parent: css whose children to walk
165 *
166 * Walk @parent's children. Must be called under rcu_read_lock().
167 *
168 * If a subsystem synchronizes ->css_online() and the start of iteration, a
169 * css which finished ->css_online() is guaranteed to be visible in the
170 * future iterations and will stay visible until the last reference is put.
171 * A css which hasn't finished ->css_online() or already finished
172 * ->css_offline() may show up during traversal. It's each subsystem's
173 * responsibility to synchronize against on/offlining.
174 *
175 * It is allowed to temporarily drop RCU read lock during iteration. The
176 * caller is responsible for ensuring that @pos remains accessible until
177 * the start of the next iteration by, for example, bumping the css refcnt.
178 */
179 #define css_for_each_child(pos, parent) \
180 for ((pos) = css_next_child(NULL, (parent)); (pos); \
181 (pos) = css_next_child((pos), (parent)))
182
183 /**
184 * css_for_each_descendant_pre - pre-order walk of a css's descendants
185 * @pos: the css * to use as the loop cursor
186 * @root: css whose descendants to walk
187 *
188 * Walk @root's descendants. @root is included in the iteration and the
189 * first node to be visited. Must be called under rcu_read_lock().
190 *
191 * If a subsystem synchronizes ->css_online() and the start of iteration, a
192 * css which finished ->css_online() is guaranteed to be visible in the
193 * future iterations and will stay visible until the last reference is put.
194 * A css which hasn't finished ->css_online() or already finished
195 * ->css_offline() may show up during traversal. It's each subsystem's
196 * responsibility to synchronize against on/offlining.
197 *
198 * For example, the following guarantees that a descendant can't escape
199 * state updates of its ancestors.
200 *
201 * my_online(@css)
202 * {
203 * Lock @css's parent and @css;
204 * Inherit state from the parent;
205 * Unlock both.
206 * }
207 *
208 * my_update_state(@css)
209 * {
210 * css_for_each_descendant_pre(@pos, @css) {
211 * Lock @pos;
212 * if (@pos == @css)
213 * Update @css's state;
214 * else
215 * Verify @pos is alive and inherit state from its parent;
216 * Unlock @pos;
217 * }
218 * }
219 *
220 * As long as the inheriting step, including checking the parent state, is
221 * enclosed inside @pos locking, double-locking the parent isn't necessary
222 * while inheriting. The state update to the parent is guaranteed to be
223 * visible by walking order and, as long as inheriting operations to the
224 * same @pos are atomic to each other, multiple updates racing each other
225 * still result in the correct state. It's guaranateed that at least one
226 * inheritance happens for any css after the latest update to its parent.
227 *
228 * If checking parent's state requires locking the parent, each inheriting
229 * iteration should lock and unlock both @pos->parent and @pos.
230 *
231 * Alternatively, a subsystem may choose to use a single global lock to
232 * synchronize ->css_online() and ->css_offline() against tree-walking
233 * operations.
234 *
235 * It is allowed to temporarily drop RCU read lock during iteration. The
236 * caller is responsible for ensuring that @pos remains accessible until
237 * the start of the next iteration by, for example, bumping the css refcnt.
238 */
239 #define css_for_each_descendant_pre(pos, css) \
240 for ((pos) = css_next_descendant_pre(NULL, (css)); (pos); \
241 (pos) = css_next_descendant_pre((pos), (css)))
242
243 /**
244 * css_for_each_descendant_post - post-order walk of a css's descendants
245 * @pos: the css * to use as the loop cursor
246 * @css: css whose descendants to walk
247 *
248 * Similar to css_for_each_descendant_pre() but performs post-order
249 * traversal instead. @root is included in the iteration and the last
250 * node to be visited.
251 *
252 * If a subsystem synchronizes ->css_online() and the start of iteration, a
253 * css which finished ->css_online() is guaranteed to be visible in the
254 * future iterations and will stay visible until the last reference is put.
255 * A css which hasn't finished ->css_online() or already finished
256 * ->css_offline() may show up during traversal. It's each subsystem's
257 * responsibility to synchronize against on/offlining.
258 *
259 * Note that the walk visibility guarantee example described in pre-order
260 * walk doesn't apply the same to post-order walks.
261 */
262 #define css_for_each_descendant_post(pos, css) \
263 for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \
264 (pos) = css_next_descendant_post((pos), (css)))
265
266 /**
267 * cgroup_taskset_for_each - iterate cgroup_taskset
268 * @task: the loop cursor
269 * @dst_css: the destination css
270 * @tset: taskset to iterate
271 *
272 * @tset may contain multiple tasks and they may belong to multiple
273 * processes.
274 *
275 * On the v2 hierarchy, there may be tasks from multiple processes and they
276 * may not share the source or destination csses.
277 *
278 * On traditional hierarchies, when there are multiple tasks in @tset, if a
279 * task of a process is in @tset, all tasks of the process are in @tset.
280 * Also, all are guaranteed to share the same source and destination csses.
281 *
282 * Iteration is not in any specific order.
283 */
284 #define cgroup_taskset_for_each(task, dst_css, tset) \
285 for ((task) = cgroup_taskset_first((tset), &(dst_css)); \
286 (task); \
287 (task) = cgroup_taskset_next((tset), &(dst_css)))
288
289 /**
290 * cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset
291 * @leader: the loop cursor
292 * @dst_css: the destination css
293 * @tset: taskset to iterate
294 *
295 * Iterate threadgroup leaders of @tset. For single-task migrations, @tset
296 * may not contain any.
297 */
298 #define cgroup_taskset_for_each_leader(leader, dst_css, tset) \
299 for ((leader) = cgroup_taskset_first((tset), &(dst_css)); \
300 (leader); \
301 (leader) = cgroup_taskset_next((tset), &(dst_css))) \
302 if ((leader) != (leader)->group_leader) \
303 ; \
304 else
305
306 /*
307 * Inline functions.
308 */
309
cgroup_id(struct cgroup * cgrp)310 static inline u64 cgroup_id(struct cgroup *cgrp)
311 {
312 return cgrp->kn->id;
313 }
314
315 /**
316 * css_get - obtain a reference on the specified css
317 * @css: target css
318 *
319 * The caller must already have a reference.
320 */
css_get(struct cgroup_subsys_state * css)321 static inline void css_get(struct cgroup_subsys_state *css)
322 {
323 if (!(css->flags & CSS_NO_REF))
324 percpu_ref_get(&css->refcnt);
325 }
326
327 /**
328 * css_get_many - obtain references on the specified css
329 * @css: target css
330 * @n: number of references to get
331 *
332 * The caller must already have a reference.
333 */
css_get_many(struct cgroup_subsys_state * css,unsigned int n)334 static inline void css_get_many(struct cgroup_subsys_state *css, unsigned int n)
335 {
336 if (!(css->flags & CSS_NO_REF))
337 percpu_ref_get_many(&css->refcnt, n);
338 }
339
340 /**
341 * css_tryget - try to obtain a reference on the specified css
342 * @css: target css
343 *
344 * Obtain a reference on @css unless it already has reached zero and is
345 * being released. This function doesn't care whether @css is on or
346 * offline. The caller naturally needs to ensure that @css is accessible
347 * but doesn't have to be holding a reference on it - IOW, RCU protected
348 * access is good enough for this function. Returns %true if a reference
349 * count was successfully obtained; %false otherwise.
350 */
css_tryget(struct cgroup_subsys_state * css)351 static inline bool css_tryget(struct cgroup_subsys_state *css)
352 {
353 if (!(css->flags & CSS_NO_REF))
354 return percpu_ref_tryget(&css->refcnt);
355 return true;
356 }
357
358 /**
359 * css_tryget_online - try to obtain a reference on the specified css if online
360 * @css: target css
361 *
362 * Obtain a reference on @css if it's online. The caller naturally needs
363 * to ensure that @css is accessible but doesn't have to be holding a
364 * reference on it - IOW, RCU protected access is good enough for this
365 * function. Returns %true if a reference count was successfully obtained;
366 * %false otherwise.
367 */
css_tryget_online(struct cgroup_subsys_state * css)368 static inline bool css_tryget_online(struct cgroup_subsys_state *css)
369 {
370 if (!(css->flags & CSS_NO_REF))
371 return percpu_ref_tryget_live(&css->refcnt);
372 return true;
373 }
374
375 /**
376 * css_is_dying - test whether the specified css is dying
377 * @css: target css
378 *
379 * Test whether @css is in the process of offlining or already offline. In
380 * most cases, ->css_online() and ->css_offline() callbacks should be
381 * enough; however, the actual offline operations are RCU delayed and this
382 * test returns %true also when @css is scheduled to be offlined.
383 *
384 * This is useful, for example, when the use case requires synchronous
385 * behavior with respect to cgroup removal. cgroup removal schedules css
386 * offlining but the css can seem alive while the operation is being
387 * delayed. If the delay affects user visible semantics, this test can be
388 * used to resolve the situation.
389 */
css_is_dying(struct cgroup_subsys_state * css)390 static inline bool css_is_dying(struct cgroup_subsys_state *css)
391 {
392 return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt);
393 }
394
395 /**
396 * css_put - put a css reference
397 * @css: target css
398 *
399 * Put a reference obtained via css_get() and css_tryget_online().
400 */
css_put(struct cgroup_subsys_state * css)401 static inline void css_put(struct cgroup_subsys_state *css)
402 {
403 if (!(css->flags & CSS_NO_REF))
404 percpu_ref_put(&css->refcnt);
405 }
406
407 /**
408 * css_put_many - put css references
409 * @css: target css
410 * @n: number of references to put
411 *
412 * Put references obtained via css_get() and css_tryget_online().
413 */
css_put_many(struct cgroup_subsys_state * css,unsigned int n)414 static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n)
415 {
416 if (!(css->flags & CSS_NO_REF))
417 percpu_ref_put_many(&css->refcnt, n);
418 }
419
cgroup_get(struct cgroup * cgrp)420 static inline void cgroup_get(struct cgroup *cgrp)
421 {
422 css_get(&cgrp->self);
423 }
424
cgroup_tryget(struct cgroup * cgrp)425 static inline bool cgroup_tryget(struct cgroup *cgrp)
426 {
427 return css_tryget(&cgrp->self);
428 }
429
cgroup_put(struct cgroup * cgrp)430 static inline void cgroup_put(struct cgroup *cgrp)
431 {
432 css_put(&cgrp->self);
433 }
434
435 /**
436 * task_css_set_check - obtain a task's css_set with extra access conditions
437 * @task: the task to obtain css_set for
438 * @__c: extra condition expression to be passed to rcu_dereference_check()
439 *
440 * A task's css_set is RCU protected, initialized and exited while holding
441 * task_lock(), and can only be modified while holding both cgroup_mutex
442 * and task_lock() while the task is alive. This macro verifies that the
443 * caller is inside proper critical section and returns @task's css_set.
444 *
445 * The caller can also specify additional allowed conditions via @__c, such
446 * as locks used during the cgroup_subsys::attach() methods.
447 */
448 #ifdef CONFIG_PROVE_RCU
449 extern struct mutex cgroup_mutex;
450 extern spinlock_t css_set_lock;
451 #define task_css_set_check(task, __c) \
452 rcu_dereference_check((task)->cgroups, \
453 lockdep_is_held(&cgroup_mutex) || \
454 lockdep_is_held(&css_set_lock) || \
455 ((task)->flags & PF_EXITING) || (__c))
456 #else
457 #define task_css_set_check(task, __c) \
458 rcu_dereference((task)->cgroups)
459 #endif
460
461 /**
462 * task_css_check - obtain css for (task, subsys) w/ extra access conds
463 * @task: the target task
464 * @subsys_id: the target subsystem ID
465 * @__c: extra condition expression to be passed to rcu_dereference_check()
466 *
467 * Return the cgroup_subsys_state for the (@task, @subsys_id) pair. The
468 * synchronization rules are the same as task_css_set_check().
469 */
470 #define task_css_check(task, subsys_id, __c) \
471 task_css_set_check((task), (__c))->subsys[(subsys_id)]
472
473 /**
474 * task_css_set - obtain a task's css_set
475 * @task: the task to obtain css_set for
476 *
477 * See task_css_set_check().
478 */
task_css_set(struct task_struct * task)479 static inline struct css_set *task_css_set(struct task_struct *task)
480 {
481 return task_css_set_check(task, false);
482 }
483
484 /**
485 * task_css - obtain css for (task, subsys)
486 * @task: the target task
487 * @subsys_id: the target subsystem ID
488 *
489 * See task_css_check().
490 */
task_css(struct task_struct * task,int subsys_id)491 static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
492 int subsys_id)
493 {
494 return task_css_check(task, subsys_id, false);
495 }
496
497 /**
498 * task_get_css - find and get the css for (task, subsys)
499 * @task: the target task
500 * @subsys_id: the target subsystem ID
501 *
502 * Find the css for the (@task, @subsys_id) combination, increment a
503 * reference on and return it. This function is guaranteed to return a
504 * valid css. The returned css may already have been offlined.
505 */
506 static inline struct cgroup_subsys_state *
task_get_css(struct task_struct * task,int subsys_id)507 task_get_css(struct task_struct *task, int subsys_id)
508 {
509 struct cgroup_subsys_state *css;
510
511 rcu_read_lock();
512 while (true) {
513 css = task_css(task, subsys_id);
514 /*
515 * Can't use css_tryget_online() here. A task which has
516 * PF_EXITING set may stay associated with an offline css.
517 * If such task calls this function, css_tryget_online()
518 * will keep failing.
519 */
520 if (likely(css_tryget(css)))
521 break;
522 cpu_relax();
523 }
524 rcu_read_unlock();
525 return css;
526 }
527
528 /**
529 * task_css_is_root - test whether a task belongs to the root css
530 * @task: the target task
531 * @subsys_id: the target subsystem ID
532 *
533 * Test whether @task belongs to the root css on the specified subsystem.
534 * May be invoked in any context.
535 */
task_css_is_root(struct task_struct * task,int subsys_id)536 static inline bool task_css_is_root(struct task_struct *task, int subsys_id)
537 {
538 return task_css_check(task, subsys_id, true) ==
539 init_css_set.subsys[subsys_id];
540 }
541
task_cgroup(struct task_struct * task,int subsys_id)542 static inline struct cgroup *task_cgroup(struct task_struct *task,
543 int subsys_id)
544 {
545 return task_css(task, subsys_id)->cgroup;
546 }
547
task_dfl_cgroup(struct task_struct * task)548 static inline struct cgroup *task_dfl_cgroup(struct task_struct *task)
549 {
550 return task_css_set(task)->dfl_cgrp;
551 }
552
cgroup_parent(struct cgroup * cgrp)553 static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
554 {
555 struct cgroup_subsys_state *parent_css = cgrp->self.parent;
556
557 if (parent_css)
558 return container_of(parent_css, struct cgroup, self);
559 return NULL;
560 }
561
562 /**
563 * cgroup_is_descendant - test ancestry
564 * @cgrp: the cgroup to be tested
565 * @ancestor: possible ancestor of @cgrp
566 *
567 * Test whether @cgrp is a descendant of @ancestor. It also returns %true
568 * if @cgrp == @ancestor. This function is safe to call as long as @cgrp
569 * and @ancestor are accessible.
570 */
cgroup_is_descendant(struct cgroup * cgrp,struct cgroup * ancestor)571 static inline bool cgroup_is_descendant(struct cgroup *cgrp,
572 struct cgroup *ancestor)
573 {
574 if (cgrp->root != ancestor->root || cgrp->level < ancestor->level)
575 return false;
576 return cgrp->ancestor_ids[ancestor->level] == cgroup_id(ancestor);
577 }
578
579 /**
580 * cgroup_ancestor - find ancestor of cgroup
581 * @cgrp: cgroup to find ancestor of
582 * @ancestor_level: level of ancestor to find starting from root
583 *
584 * Find ancestor of cgroup at specified level starting from root if it exists
585 * and return pointer to it. Return NULL if @cgrp doesn't have ancestor at
586 * @ancestor_level.
587 *
588 * This function is safe to call as long as @cgrp is accessible.
589 */
cgroup_ancestor(struct cgroup * cgrp,int ancestor_level)590 static inline struct cgroup *cgroup_ancestor(struct cgroup *cgrp,
591 int ancestor_level)
592 {
593 if (cgrp->level < ancestor_level)
594 return NULL;
595 while (cgrp && cgrp->level > ancestor_level)
596 cgrp = cgroup_parent(cgrp);
597 return cgrp;
598 }
599
600 /**
601 * task_under_cgroup_hierarchy - test task's membership of cgroup ancestry
602 * @task: the task to be tested
603 * @ancestor: possible ancestor of @task's cgroup
604 *
605 * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor.
606 * It follows all the same rules as cgroup_is_descendant, and only applies
607 * to the default hierarchy.
608 */
task_under_cgroup_hierarchy(struct task_struct * task,struct cgroup * ancestor)609 static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
610 struct cgroup *ancestor)
611 {
612 struct css_set *cset = task_css_set(task);
613
614 return cgroup_is_descendant(cset->dfl_cgrp, ancestor);
615 }
616
617 /* no synchronization, the result can only be used as a hint */
cgroup_is_populated(struct cgroup * cgrp)618 static inline bool cgroup_is_populated(struct cgroup *cgrp)
619 {
620 return cgrp->nr_populated_csets + cgrp->nr_populated_domain_children +
621 cgrp->nr_populated_threaded_children;
622 }
623
624 /* returns ino associated with a cgroup */
cgroup_ino(struct cgroup * cgrp)625 static inline ino_t cgroup_ino(struct cgroup *cgrp)
626 {
627 return kernfs_ino(cgrp->kn);
628 }
629
630 /* cft/css accessors for cftype->write() operation */
of_cft(struct kernfs_open_file * of)631 static inline struct cftype *of_cft(struct kernfs_open_file *of)
632 {
633 return of->kn->priv;
634 }
635
636 struct cgroup_subsys_state *of_css(struct kernfs_open_file *of);
637
638 /* cft/css accessors for cftype->seq_*() operations */
seq_cft(struct seq_file * seq)639 static inline struct cftype *seq_cft(struct seq_file *seq)
640 {
641 return of_cft(seq->private);
642 }
643
seq_css(struct seq_file * seq)644 static inline struct cgroup_subsys_state *seq_css(struct seq_file *seq)
645 {
646 return of_css(seq->private);
647 }
648
649 /*
650 * Name / path handling functions. All are thin wrappers around the kernfs
651 * counterparts and can be called under any context.
652 */
653
cgroup_name(struct cgroup * cgrp,char * buf,size_t buflen)654 static inline int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen)
655 {
656 return kernfs_name(cgrp->kn, buf, buflen);
657 }
658
cgroup_path(struct cgroup * cgrp,char * buf,size_t buflen)659 static inline int cgroup_path(struct cgroup *cgrp, char *buf, size_t buflen)
660 {
661 return kernfs_path(cgrp->kn, buf, buflen);
662 }
663
pr_cont_cgroup_name(struct cgroup * cgrp)664 static inline void pr_cont_cgroup_name(struct cgroup *cgrp)
665 {
666 pr_cont_kernfs_name(cgrp->kn);
667 }
668
pr_cont_cgroup_path(struct cgroup * cgrp)669 static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
670 {
671 pr_cont_kernfs_path(cgrp->kn);
672 }
673
cgroup_psi(struct cgroup * cgrp)674 static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
675 {
676 return &cgrp->psi;
677 }
678
cgroup_init_kthreadd(void)679 static inline void cgroup_init_kthreadd(void)
680 {
681 /*
682 * kthreadd is inherited by all kthreads, keep it in the root so
683 * that the new kthreads are guaranteed to stay in the root until
684 * initialization is finished.
685 */
686 current->no_cgroup_migration = 1;
687 }
688
cgroup_kthread_ready(void)689 static inline void cgroup_kthread_ready(void)
690 {
691 /*
692 * This kthread finished initialization. The creator should have
693 * set PF_NO_SETAFFINITY if this kthread should stay in the root.
694 */
695 current->no_cgroup_migration = 0;
696 }
697
698 void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen);
699 #else /* !CONFIG_CGROUPS */
700
701 struct cgroup_subsys_state;
702 struct cgroup;
703
cgroup_id(struct cgroup * cgrp)704 static inline u64 cgroup_id(struct cgroup *cgrp) { return 1; }
css_get(struct cgroup_subsys_state * css)705 static inline void css_get(struct cgroup_subsys_state *css) {}
css_put(struct cgroup_subsys_state * css)706 static inline void css_put(struct cgroup_subsys_state *css) {}
cgroup_attach_task_all(struct task_struct * from,struct task_struct * t)707 static inline int cgroup_attach_task_all(struct task_struct *from,
708 struct task_struct *t) { return 0; }
cgroupstats_build(struct cgroupstats * stats,struct dentry * dentry)709 static inline int cgroupstats_build(struct cgroupstats *stats,
710 struct dentry *dentry) { return -EINVAL; }
711
cgroup_fork(struct task_struct * p)712 static inline void cgroup_fork(struct task_struct *p) {}
cgroup_can_fork(struct task_struct * p,struct kernel_clone_args * kargs)713 static inline int cgroup_can_fork(struct task_struct *p,
714 struct kernel_clone_args *kargs) { return 0; }
cgroup_cancel_fork(struct task_struct * p,struct kernel_clone_args * kargs)715 static inline void cgroup_cancel_fork(struct task_struct *p,
716 struct kernel_clone_args *kargs) {}
cgroup_post_fork(struct task_struct * p,struct kernel_clone_args * kargs)717 static inline void cgroup_post_fork(struct task_struct *p,
718 struct kernel_clone_args *kargs) {}
cgroup_exit(struct task_struct * p)719 static inline void cgroup_exit(struct task_struct *p) {}
cgroup_release(struct task_struct * p)720 static inline void cgroup_release(struct task_struct *p) {}
cgroup_free(struct task_struct * p)721 static inline void cgroup_free(struct task_struct *p) {}
722
cgroup_init_early(void)723 static inline int cgroup_init_early(void) { return 0; }
cgroup_init(void)724 static inline int cgroup_init(void) { return 0; }
cgroup_init_kthreadd(void)725 static inline void cgroup_init_kthreadd(void) {}
cgroup_kthread_ready(void)726 static inline void cgroup_kthread_ready(void) {}
727
cgroup_parent(struct cgroup * cgrp)728 static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
729 {
730 return NULL;
731 }
732
cgroup_psi(struct cgroup * cgrp)733 static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
734 {
735 return NULL;
736 }
737
task_under_cgroup_hierarchy(struct task_struct * task,struct cgroup * ancestor)738 static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
739 struct cgroup *ancestor)
740 {
741 return true;
742 }
743
cgroup_path_from_kernfs_id(u64 id,char * buf,size_t buflen)744 static inline void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen)
745 {}
746 #endif /* !CONFIG_CGROUPS */
747
748 #ifdef CONFIG_CGROUPS
749 /*
750 * cgroup scalable recursive statistics.
751 */
752 void cgroup_rstat_updated(struct cgroup *cgrp, int cpu);
753 void cgroup_rstat_flush(struct cgroup *cgrp);
754 void cgroup_rstat_flush_irqsafe(struct cgroup *cgrp);
755 void cgroup_rstat_flush_hold(struct cgroup *cgrp);
756 void cgroup_rstat_flush_release(void);
757
758 /*
759 * Basic resource stats.
760 */
761 #ifdef CONFIG_CGROUP_CPUACCT
762 void cpuacct_charge(struct task_struct *tsk, u64 cputime);
763 void cpuacct_account_field(struct task_struct *tsk, int index, u64 val);
764 #else
cpuacct_charge(struct task_struct * tsk,u64 cputime)765 static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
cpuacct_account_field(struct task_struct * tsk,int index,u64 val)766 static inline void cpuacct_account_field(struct task_struct *tsk, int index,
767 u64 val) {}
768 #endif
769
770 void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec);
771 void __cgroup_account_cputime_field(struct cgroup *cgrp,
772 enum cpu_usage_stat index, u64 delta_exec);
773
cgroup_account_cputime(struct task_struct * task,u64 delta_exec)774 static inline void cgroup_account_cputime(struct task_struct *task,
775 u64 delta_exec)
776 {
777 struct cgroup *cgrp;
778
779 cpuacct_charge(task, delta_exec);
780
781 rcu_read_lock();
782 cgrp = task_dfl_cgroup(task);
783 if (cgroup_parent(cgrp))
784 __cgroup_account_cputime(cgrp, delta_exec);
785 rcu_read_unlock();
786 }
787
cgroup_account_cputime_field(struct task_struct * task,enum cpu_usage_stat index,u64 delta_exec)788 static inline void cgroup_account_cputime_field(struct task_struct *task,
789 enum cpu_usage_stat index,
790 u64 delta_exec)
791 {
792 struct cgroup *cgrp;
793
794 cpuacct_account_field(task, index, delta_exec);
795
796 rcu_read_lock();
797 cgrp = task_dfl_cgroup(task);
798 if (cgroup_parent(cgrp))
799 __cgroup_account_cputime_field(cgrp, index, delta_exec);
800 rcu_read_unlock();
801 }
802
803 #else /* CONFIG_CGROUPS */
804
cgroup_account_cputime(struct task_struct * task,u64 delta_exec)805 static inline void cgroup_account_cputime(struct task_struct *task,
806 u64 delta_exec) {}
cgroup_account_cputime_field(struct task_struct * task,enum cpu_usage_stat index,u64 delta_exec)807 static inline void cgroup_account_cputime_field(struct task_struct *task,
808 enum cpu_usage_stat index,
809 u64 delta_exec) {}
810
811 #endif /* CONFIG_CGROUPS */
812
813 /*
814 * sock->sk_cgrp_data handling. For more info, see sock_cgroup_data
815 * definition in cgroup-defs.h.
816 */
817 #ifdef CONFIG_SOCK_CGROUP_DATA
818
819 void cgroup_sk_alloc(struct sock_cgroup_data *skcd);
820 void cgroup_sk_clone(struct sock_cgroup_data *skcd);
821 void cgroup_sk_free(struct sock_cgroup_data *skcd);
822
sock_cgroup_ptr(struct sock_cgroup_data * skcd)823 static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
824 {
825 return skcd->cgroup;
826 }
827
828 #else /* CONFIG_CGROUP_DATA */
829
cgroup_sk_alloc(struct sock_cgroup_data * skcd)830 static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {}
cgroup_sk_clone(struct sock_cgroup_data * skcd)831 static inline void cgroup_sk_clone(struct sock_cgroup_data *skcd) {}
cgroup_sk_free(struct sock_cgroup_data * skcd)832 static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {}
833
834 #endif /* CONFIG_CGROUP_DATA */
835
836 struct cgroup_namespace {
837 refcount_t count;
838 struct ns_common ns;
839 struct user_namespace *user_ns;
840 struct ucounts *ucounts;
841 struct css_set *root_cset;
842 };
843
844 extern struct cgroup_namespace init_cgroup_ns;
845
846 #ifdef CONFIG_CGROUPS
847
848 void free_cgroup_ns(struct cgroup_namespace *ns);
849
850 struct cgroup_namespace *copy_cgroup_ns(unsigned long flags,
851 struct user_namespace *user_ns,
852 struct cgroup_namespace *old_ns);
853
854 int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
855 struct cgroup_namespace *ns);
856
857 #else /* !CONFIG_CGROUPS */
858
free_cgroup_ns(struct cgroup_namespace * ns)859 static inline void free_cgroup_ns(struct cgroup_namespace *ns) { }
860 static inline struct cgroup_namespace *
copy_cgroup_ns(unsigned long flags,struct user_namespace * user_ns,struct cgroup_namespace * old_ns)861 copy_cgroup_ns(unsigned long flags, struct user_namespace *user_ns,
862 struct cgroup_namespace *old_ns)
863 {
864 return old_ns;
865 }
866
867 #endif /* !CONFIG_CGROUPS */
868
get_cgroup_ns(struct cgroup_namespace * ns)869 static inline void get_cgroup_ns(struct cgroup_namespace *ns)
870 {
871 if (ns)
872 refcount_inc(&ns->count);
873 }
874
put_cgroup_ns(struct cgroup_namespace * ns)875 static inline void put_cgroup_ns(struct cgroup_namespace *ns)
876 {
877 if (ns && refcount_dec_and_test(&ns->count))
878 free_cgroup_ns(ns);
879 }
880
881 #ifdef CONFIG_CGROUPS
882
883 void cgroup_enter_frozen(void);
884 void cgroup_leave_frozen(bool always_leave);
885 void cgroup_update_frozen(struct cgroup *cgrp);
886 void cgroup_freeze(struct cgroup *cgrp, bool freeze);
887 void cgroup_freezer_migrate_task(struct task_struct *task, struct cgroup *src,
888 struct cgroup *dst);
889
cgroup_task_freeze(struct task_struct * task)890 static inline bool cgroup_task_freeze(struct task_struct *task)
891 {
892 bool ret;
893
894 if (task->flags & PF_KTHREAD)
895 return false;
896
897 rcu_read_lock();
898 ret = test_bit(CGRP_FREEZE, &task_dfl_cgroup(task)->flags);
899 rcu_read_unlock();
900
901 return ret;
902 }
903
cgroup_task_frozen(struct task_struct * task)904 static inline bool cgroup_task_frozen(struct task_struct *task)
905 {
906 return task->frozen;
907 }
908
909 #else /* !CONFIG_CGROUPS */
910
cgroup_enter_frozen(void)911 static inline void cgroup_enter_frozen(void) { }
cgroup_leave_frozen(bool always_leave)912 static inline void cgroup_leave_frozen(bool always_leave) { }
cgroup_task_freeze(struct task_struct * task)913 static inline bool cgroup_task_freeze(struct task_struct *task)
914 {
915 return false;
916 }
cgroup_task_frozen(struct task_struct * task)917 static inline bool cgroup_task_frozen(struct task_struct *task)
918 {
919 return false;
920 }
921
922 #endif /* !CONFIG_CGROUPS */
923
924 #ifdef CONFIG_CGROUP_BPF
cgroup_bpf_get(struct cgroup * cgrp)925 static inline void cgroup_bpf_get(struct cgroup *cgrp)
926 {
927 percpu_ref_get(&cgrp->bpf.refcnt);
928 }
929
cgroup_bpf_put(struct cgroup * cgrp)930 static inline void cgroup_bpf_put(struct cgroup *cgrp)
931 {
932 percpu_ref_put(&cgrp->bpf.refcnt);
933 }
934
935 #else /* CONFIG_CGROUP_BPF */
936
cgroup_bpf_get(struct cgroup * cgrp)937 static inline void cgroup_bpf_get(struct cgroup *cgrp) {}
cgroup_bpf_put(struct cgroup * cgrp)938 static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
939
940 #endif /* CONFIG_CGROUP_BPF */
941
942 #endif /* _LINUX_CGROUP_H */
943