1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __CGROUP_INTERNAL_H
3 #define __CGROUP_INTERNAL_H
4
5 #include <linux/cgroup.h>
6 #include <linux/kernfs.h>
7 #include <linux/workqueue.h>
8 #include <linux/list.h>
9 #include <linux/refcount.h>
10 #include <linux/fs_parser.h>
11
12 #define TRACE_CGROUP_PATH_LEN 1024
13 extern spinlock_t trace_cgroup_path_lock;
14 extern char trace_cgroup_path[TRACE_CGROUP_PATH_LEN];
15 extern bool cgroup_debug;
16 extern void __init enable_debug_cgroup(void);
17
18 /*
19 * cgroup_path() takes a spin lock. It is good practice not to take
20 * spin locks within trace point handlers, as they are mostly hidden
21 * from normal view. As cgroup_path() can take the kernfs_rename_lock
22 * spin lock, it is best to not call that function from the trace event
23 * handler.
24 *
25 * Note: trace_cgroup_##type##_enabled() is a static branch that will only
26 * be set when the trace event is enabled.
27 */
28 #define TRACE_CGROUP_PATH(type, cgrp, ...) \
29 do { \
30 if (trace_cgroup_##type##_enabled()) { \
31 unsigned long flags; \
32 spin_lock_irqsave(&trace_cgroup_path_lock, \
33 flags); \
34 cgroup_path(cgrp, trace_cgroup_path, \
35 TRACE_CGROUP_PATH_LEN); \
36 trace_cgroup_##type(cgrp, trace_cgroup_path, \
37 ##__VA_ARGS__); \
38 spin_unlock_irqrestore(&trace_cgroup_path_lock, \
39 flags); \
40 } \
41 } while (0)
42
43 /*
44 * The cgroup filesystem superblock creation/mount context.
45 */
46 struct cgroup_fs_context {
47 struct kernfs_fs_context kfc;
48 struct cgroup_root *root;
49 struct cgroup_namespace *ns;
50 unsigned int flags; /* CGRP_ROOT_* flags */
51
52 /* cgroup1 bits */
53 bool cpuset_clone_children;
54 bool none; /* User explicitly requested empty subsystem */
55 bool all_ss; /* Seen 'all' option */
56 u16 subsys_mask; /* Selected subsystems */
57 char *name; /* Hierarchy name */
58 char *release_agent; /* Path for release notifications */
59 };
60
cgroup_fc2context(struct fs_context * fc)61 static inline struct cgroup_fs_context *cgroup_fc2context(struct fs_context *fc)
62 {
63 struct kernfs_fs_context *kfc = fc->fs_private;
64
65 return container_of(kfc, struct cgroup_fs_context, kfc);
66 }
67
68 struct cgroup_pidlist;
69
70 struct cgroup_file_ctx {
71 struct cgroup_namespace *ns;
72
73 struct {
74 void *trigger;
75 } psi;
76
77 struct {
78 bool started;
79 struct css_task_iter iter;
80 } procs;
81
82 struct {
83 struct cgroup_pidlist *pidlist;
84 } procs1;
85 };
86
87 /*
88 * A cgroup can be associated with multiple css_sets as different tasks may
89 * belong to different cgroups on different hierarchies. In the other
90 * direction, a css_set is naturally associated with multiple cgroups.
91 * This M:N relationship is represented by the following link structure
92 * which exists for each association and allows traversing the associations
93 * from both sides.
94 */
95 struct cgrp_cset_link {
96 /* the cgroup and css_set this link associates */
97 struct cgroup *cgrp;
98 struct css_set *cset;
99
100 /* list of cgrp_cset_links anchored at cgrp->cset_links */
101 struct list_head cset_link;
102
103 /* list of cgrp_cset_links anchored at css_set->cgrp_links */
104 struct list_head cgrp_link;
105 };
106
107 /* used to track tasks and csets during migration */
108 struct cgroup_taskset {
109 /* the src and dst cset list running through cset->mg_node */
110 struct list_head src_csets;
111 struct list_head dst_csets;
112
113 /* the number of tasks in the set */
114 int nr_tasks;
115
116 /* the subsys currently being processed */
117 int ssid;
118
119 /*
120 * Fields for cgroup_taskset_*() iteration.
121 *
122 * Before migration is committed, the target migration tasks are on
123 * ->mg_tasks of the csets on ->src_csets. After, on ->mg_tasks of
124 * the csets on ->dst_csets. ->csets point to either ->src_csets
125 * or ->dst_csets depending on whether migration is committed.
126 *
127 * ->cur_csets and ->cur_task point to the current task position
128 * during iteration.
129 */
130 struct list_head *csets;
131 struct css_set *cur_cset;
132 struct task_struct *cur_task;
133 };
134
135 /* migration context also tracks preloading */
136 struct cgroup_mgctx {
137 /*
138 * Preloaded source and destination csets. Used to guarantee
139 * atomic success or failure on actual migration.
140 */
141 struct list_head preloaded_src_csets;
142 struct list_head preloaded_dst_csets;
143
144 /* tasks and csets to migrate */
145 struct cgroup_taskset tset;
146
147 /* subsystems affected by migration */
148 u16 ss_mask;
149 };
150
151 #define CGROUP_TASKSET_INIT(tset) \
152 { \
153 .src_csets = LIST_HEAD_INIT(tset.src_csets), \
154 .dst_csets = LIST_HEAD_INIT(tset.dst_csets), \
155 .csets = &tset.src_csets, \
156 }
157
158 #define CGROUP_MGCTX_INIT(name) \
159 { \
160 LIST_HEAD_INIT(name.preloaded_src_csets), \
161 LIST_HEAD_INIT(name.preloaded_dst_csets), \
162 CGROUP_TASKSET_INIT(name.tset), \
163 }
164
165 #define DEFINE_CGROUP_MGCTX(name) \
166 struct cgroup_mgctx name = CGROUP_MGCTX_INIT(name)
167
168 extern spinlock_t css_set_lock;
169 extern struct cgroup_subsys *cgroup_subsys[];
170 extern struct list_head cgroup_roots;
171
172 /* iterate across the hierarchies */
173 #define for_each_root(root) \
174 list_for_each_entry((root), &cgroup_roots, root_list)
175
176 /**
177 * for_each_subsys - iterate all enabled cgroup subsystems
178 * @ss: the iteration cursor
179 * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
180 */
181 #define for_each_subsys(ss, ssid) \
182 for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT && \
183 (((ss) = cgroup_subsys[ssid]) || true); (ssid)++)
184
cgroup_is_dead(const struct cgroup * cgrp)185 static inline bool cgroup_is_dead(const struct cgroup *cgrp)
186 {
187 return !(cgrp->self.flags & CSS_ONLINE);
188 }
189
notify_on_release(const struct cgroup * cgrp)190 static inline bool notify_on_release(const struct cgroup *cgrp)
191 {
192 return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
193 }
194
195 void put_css_set_locked(struct css_set *cset);
196
put_css_set(struct css_set * cset)197 static inline void put_css_set(struct css_set *cset)
198 {
199 unsigned long flags;
200
201 /*
202 * Ensure that the refcount doesn't hit zero while any readers
203 * can see it. Similar to atomic_dec_and_lock(), but for an
204 * rwlock
205 */
206 if (refcount_dec_not_one(&cset->refcount))
207 return;
208
209 spin_lock_irqsave(&css_set_lock, flags);
210 put_css_set_locked(cset);
211 spin_unlock_irqrestore(&css_set_lock, flags);
212 }
213
214 /*
215 * refcounted get/put for css_set objects
216 */
get_css_set(struct css_set * cset)217 static inline void get_css_set(struct css_set *cset)
218 {
219 refcount_inc(&cset->refcount);
220 }
221
222 bool cgroup_ssid_enabled(int ssid);
223 bool cgroup_on_dfl(const struct cgroup *cgrp);
224 bool cgroup_is_thread_root(struct cgroup *cgrp);
225 bool cgroup_is_threaded(struct cgroup *cgrp);
226
227 struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root);
228 struct cgroup *task_cgroup_from_root(struct task_struct *task,
229 struct cgroup_root *root);
230 struct cgroup *cgroup_kn_lock_live(struct kernfs_node *kn, bool drain_offline);
231 void cgroup_kn_unlock(struct kernfs_node *kn);
232 int cgroup_path_ns_locked(struct cgroup *cgrp, char *buf, size_t buflen,
233 struct cgroup_namespace *ns);
234
235 void cgroup_free_root(struct cgroup_root *root);
236 void init_cgroup_root(struct cgroup_fs_context *ctx);
237 int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask);
238 int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask);
239 int cgroup_do_get_tree(struct fs_context *fc);
240
241 int cgroup_migrate_vet_dst(struct cgroup *dst_cgrp);
242 void cgroup_migrate_finish(struct cgroup_mgctx *mgctx);
243 void cgroup_migrate_add_src(struct css_set *src_cset, struct cgroup *dst_cgrp,
244 struct cgroup_mgctx *mgctx);
245 int cgroup_migrate_prepare_dst(struct cgroup_mgctx *mgctx);
246 int cgroup_migrate(struct task_struct *leader, bool threadgroup,
247 struct cgroup_mgctx *mgctx);
248
249 int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader,
250 bool threadgroup);
251 struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup,
252 bool *locked,
253 struct cgroup *dst_cgrp);
254 __acquires(&cgroup_threadgroup_rwsem);
255 void cgroup_procs_write_finish(struct task_struct *task, bool locked)
256 __releases(&cgroup_threadgroup_rwsem);
257
258 void cgroup_lock_and_drain_offline(struct cgroup *cgrp);
259
260 int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, umode_t mode);
261 int cgroup_rmdir(struct kernfs_node *kn);
262 int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node,
263 struct kernfs_root *kf_root);
264
265 int __cgroup_task_count(const struct cgroup *cgrp);
266 int cgroup_task_count(const struct cgroup *cgrp);
267
268 /*
269 * rstat.c
270 */
271 int cgroup_rstat_init(struct cgroup *cgrp);
272 void cgroup_rstat_exit(struct cgroup *cgrp);
273 void cgroup_rstat_boot(void);
274 void cgroup_base_stat_cputime_show(struct seq_file *seq);
275
276 /*
277 * namespace.c
278 */
279 extern const struct proc_ns_operations cgroupns_operations;
280
281 /*
282 * cgroup-v1.c
283 */
284 extern struct cftype cgroup1_base_files[];
285 extern struct kernfs_syscall_ops cgroup1_kf_syscall_ops;
286 extern const struct fs_parameter_spec cgroup1_fs_parameters[];
287
288 int proc_cgroupstats_show(struct seq_file *m, void *v);
289 bool cgroup1_ssid_disabled(int ssid);
290 void cgroup1_pidlist_destroy_all(struct cgroup *cgrp);
291 void cgroup1_release_agent(struct work_struct *work);
292 void cgroup1_check_for_release(struct cgroup *cgrp);
293 int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param);
294 int cgroup1_get_tree(struct fs_context *fc);
295 int cgroup1_reconfigure(struct fs_context *ctx);
296
297 #endif /* __CGROUP_INTERNAL_H */
298