• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BLK_CGROUP_PRIVATE_H
3 #define _BLK_CGROUP_PRIVATE_H
4 /*
5  * block cgroup private header
6  *
7  * Based on ideas and code from CFQ, CFS and BFQ:
8  * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
9  *
10  * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
11  *		      Paolo Valente <paolo.valente@unimore.it>
12  *
13  * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
14  * 	              Nauman Rafique <nauman@google.com>
15  */
16 
17 #include <linux/blk-cgroup.h>
18 #include <linux/cgroup.h>
19 #include <linux/kthread.h>
20 #include <linux/blk-mq.h>
21 #include <linux/llist.h>
22 #include "blk.h"
23 #include <linux/android_vendor.h>
24 
25 struct blkcg_gq;
26 struct blkg_policy_data;
27 
28 
29 /* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
30 #define BLKG_STAT_CPU_BATCH	(INT_MAX / 2)
31 
32 #ifdef CONFIG_BLK_CGROUP
33 
34 enum blkg_iostat_type {
35 	BLKG_IOSTAT_READ,
36 	BLKG_IOSTAT_WRITE,
37 	BLKG_IOSTAT_DISCARD,
38 
39 	BLKG_IOSTAT_NR,
40 };
41 
42 struct blkg_iostat {
43 	u64				bytes[BLKG_IOSTAT_NR];
44 	u64				ios[BLKG_IOSTAT_NR];
45 };
46 
47 struct blkg_iostat_set {
48 	struct u64_stats_sync		sync;
49 	struct blkcg_gq		       *blkg;
50 	struct llist_node		lnode;
51 	int				lqueued;	/* queued in llist */
52 	struct blkg_iostat		cur;
53 	struct blkg_iostat		last;
54 };
55 
56 /* association between a blk cgroup and a request queue */
57 struct blkcg_gq {
58 	/* Pointer to the associated request_queue */
59 	struct request_queue		*q;
60 	struct list_head		q_node;
61 	struct hlist_node		blkcg_node;
62 	struct blkcg			*blkcg;
63 
64 	/* all non-root blkcg_gq's are guaranteed to have access to parent */
65 	struct blkcg_gq			*parent;
66 
67 	/* reference count */
68 	struct percpu_ref		refcnt;
69 
70 	/* is this blkg online? protected by both blkcg and q locks */
71 	bool				online;
72 
73 	struct blkg_iostat_set __percpu	*iostat_cpu;
74 	struct blkg_iostat_set		iostat;
75 
76 	struct blkg_policy_data		*pd[BLKCG_MAX_POLS];
77 #ifdef CONFIG_BLK_CGROUP_PUNT_BIO
78 	spinlock_t			async_bio_lock;
79 	struct bio_list			async_bios;
80 #endif
81 	union {
82 		struct work_struct	async_bio_work;
83 		struct work_struct	free_work;
84 	};
85 
86 	atomic_t			use_delay;
87 	atomic64_t			delay_nsec;
88 	atomic64_t			delay_start;
89 	u64				last_delay;
90 	int				last_use;
91 
92 	struct rcu_head			rcu_head;
93 
94 	ANDROID_OEM_DATA(1);
95 };
96 
97 struct blkcg {
98 	struct cgroup_subsys_state	css;
99 	spinlock_t			lock;
100 	refcount_t			online_pin;
101 	/* If there is block congestion on this cgroup. */
102 	atomic_t			congestion_count;
103 
104 	struct radix_tree_root		blkg_tree;
105 	struct blkcg_gq	__rcu		*blkg_hint;
106 	struct hlist_head		blkg_list;
107 
108 	struct blkcg_policy_data	*cpd[BLKCG_MAX_POLS];
109 
110 	struct list_head		all_blkcgs_node;
111 
112 	/*
113 	 * List of updated percpu blkg_iostat_set's since the last flush.
114 	 */
115 	struct llist_head __percpu	*lhead;
116 
117 #ifdef CONFIG_BLK_CGROUP_FC_APPID
118 	char                            fc_app_id[FC_APPID_LEN];
119 #endif
120 #ifdef CONFIG_CGROUP_WRITEBACK
121 	struct list_head		cgwb_list;
122 #endif
123 
124 	ANDROID_OEM_DATA(1);
125 };
126 
css_to_blkcg(struct cgroup_subsys_state * css)127 static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
128 {
129 	return css ? container_of(css, struct blkcg, css) : NULL;
130 }
131 
132 /*
133  * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
134  * request_queue (q).  This is used by blkcg policies which need to track
135  * information per blkcg - q pair.
136  *
137  * There can be multiple active blkcg policies and each blkg:policy pair is
138  * represented by a blkg_policy_data which is allocated and freed by each
139  * policy's pd_alloc/free_fn() methods.  A policy can allocate private data
140  * area by allocating larger data structure which embeds blkg_policy_data
141  * at the beginning.
142  */
143 struct blkg_policy_data {
144 	/* the blkg and policy id this per-policy data belongs to */
145 	struct blkcg_gq			*blkg;
146 	int				plid;
147 	bool				online;
148 };
149 
150 /*
151  * Policies that need to keep per-blkcg data which is independent from any
152  * request_queue associated to it should implement cpd_alloc/free_fn()
153  * methods.  A policy can allocate private data area by allocating larger
154  * data structure which embeds blkcg_policy_data at the beginning.
155  * cpd_init() is invoked to let each policy handle per-blkcg data.
156  */
157 struct blkcg_policy_data {
158 	/* the blkcg and policy id this per-policy data belongs to */
159 	struct blkcg			*blkcg;
160 	int				plid;
161 };
162 
163 typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
164 typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
165 typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
166 typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
167 typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(struct gendisk *disk,
168 		struct blkcg *blkcg, gfp_t gfp);
169 typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
170 typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
171 typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
172 typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
173 typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
174 typedef void (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd,
175 				struct seq_file *s);
176 
177 struct blkcg_policy {
178 	int				plid;
179 	/* cgroup files for the policy */
180 	struct cftype			*dfl_cftypes;
181 	struct cftype			*legacy_cftypes;
182 
183 	/* operations */
184 	blkcg_pol_alloc_cpd_fn		*cpd_alloc_fn;
185 	blkcg_pol_free_cpd_fn		*cpd_free_fn;
186 
187 	blkcg_pol_alloc_pd_fn		*pd_alloc_fn;
188 	blkcg_pol_init_pd_fn		*pd_init_fn;
189 	blkcg_pol_online_pd_fn		*pd_online_fn;
190 	blkcg_pol_offline_pd_fn		*pd_offline_fn;
191 	blkcg_pol_free_pd_fn		*pd_free_fn;
192 	blkcg_pol_reset_pd_stats_fn	*pd_reset_stats_fn;
193 	blkcg_pol_stat_pd_fn		*pd_stat_fn;
194 };
195 
196 extern struct blkcg blkcg_root;
197 extern bool blkcg_debug_stats;
198 
199 void blkg_init_queue(struct request_queue *q);
200 int blkcg_init_disk(struct gendisk *disk);
201 void blkcg_exit_disk(struct gendisk *disk);
202 
203 /* Blkio controller policy registration */
204 int blkcg_policy_register(struct blkcg_policy *pol);
205 void blkcg_policy_unregister(struct blkcg_policy *pol);
206 int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol);
207 void blkcg_deactivate_policy(struct gendisk *disk,
208 			     const struct blkcg_policy *pol);
209 
210 const char *blkg_dev_name(struct blkcg_gq *blkg);
211 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
212 		       u64 (*prfill)(struct seq_file *,
213 				     struct blkg_policy_data *, int),
214 		       const struct blkcg_policy *pol, int data,
215 		       bool show_total);
216 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
217 
218 struct blkg_conf_ctx {
219 	char				*input;
220 	char				*body;
221 	struct block_device		*bdev;
222 	struct blkcg_gq			*blkg;
223 };
224 
225 void blkg_conf_init(struct blkg_conf_ctx *ctx, char *input);
226 int blkg_conf_open_bdev(struct blkg_conf_ctx *ctx);
227 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
228 		   struct blkg_conf_ctx *ctx);
229 void blkg_conf_exit(struct blkg_conf_ctx *ctx);
230 
231 /**
232  * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg
233  * @return: true if this bio needs to be submitted with the root blkg context.
234  *
235  * In order to avoid priority inversions we sometimes need to issue a bio as if
236  * it were attached to the root blkg, and then backcharge to the actual owning
237  * blkg.  The idea is we do bio_blkcg_css() to look up the actual context for
238  * the bio and attach the appropriate blkg to the bio.  Then we call this helper
239  * and if it is true run with the root blkg for that queue and then do any
240  * backcharging to the originating cgroup once the io is complete.
241  */
bio_issue_as_root_blkg(struct bio * bio)242 static inline bool bio_issue_as_root_blkg(struct bio *bio)
243 {
244 	return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0;
245 }
246 
247 /**
248  * blkg_lookup - lookup blkg for the specified blkcg - q pair
249  * @blkcg: blkcg of interest
250  * @q: request_queue of interest
251  *
252  * Lookup blkg for the @blkcg - @q pair.
253 
254  * Must be called in a RCU critical section.
255  */
blkg_lookup(struct blkcg * blkcg,struct request_queue * q)256 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
257 					   struct request_queue *q)
258 {
259 	struct blkcg_gq *blkg;
260 
261 	if (blkcg == &blkcg_root)
262 		return q->root_blkg;
263 
264 	blkg = rcu_dereference_check(blkcg->blkg_hint,
265 			lockdep_is_held(&q->queue_lock));
266 	if (blkg && blkg->q == q)
267 		return blkg;
268 
269 	blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
270 	if (blkg && blkg->q != q)
271 		blkg = NULL;
272 	return blkg;
273 }
274 
275 /**
276  * blkg_to_pdata - get policy private data
277  * @blkg: blkg of interest
278  * @pol: policy of interest
279  *
280  * Return pointer to private data associated with the @blkg-@pol pair.
281  */
blkg_to_pd(struct blkcg_gq * blkg,struct blkcg_policy * pol)282 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
283 						  struct blkcg_policy *pol)
284 {
285 	return blkg ? blkg->pd[pol->plid] : NULL;
286 }
287 
blkcg_to_cpd(struct blkcg * blkcg,struct blkcg_policy * pol)288 static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
289 						     struct blkcg_policy *pol)
290 {
291 	return blkcg ? blkcg->cpd[pol->plid] : NULL;
292 }
293 
294 /**
295  * pdata_to_blkg - get blkg associated with policy private data
296  * @pd: policy private data of interest
297  *
298  * @pd is policy private data.  Determine the blkg it's associated with.
299  */
pd_to_blkg(struct blkg_policy_data * pd)300 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
301 {
302 	return pd ? pd->blkg : NULL;
303 }
304 
cpd_to_blkcg(struct blkcg_policy_data * cpd)305 static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
306 {
307 	return cpd ? cpd->blkcg : NULL;
308 }
309 
310 /**
311  * blkg_get - get a blkg reference
312  * @blkg: blkg to get
313  *
314  * The caller should be holding an existing reference.
315  */
blkg_get(struct blkcg_gq * blkg)316 static inline void blkg_get(struct blkcg_gq *blkg)
317 {
318 	percpu_ref_get(&blkg->refcnt);
319 }
320 
321 /**
322  * blkg_tryget - try and get a blkg reference
323  * @blkg: blkg to get
324  *
325  * This is for use when doing an RCU lookup of the blkg.  We may be in the midst
326  * of freeing this blkg, so we can only use it if the refcnt is not zero.
327  */
blkg_tryget(struct blkcg_gq * blkg)328 static inline bool blkg_tryget(struct blkcg_gq *blkg)
329 {
330 	return blkg && percpu_ref_tryget(&blkg->refcnt);
331 }
332 
333 /**
334  * blkg_put - put a blkg reference
335  * @blkg: blkg to put
336  */
blkg_put(struct blkcg_gq * blkg)337 static inline void blkg_put(struct blkcg_gq *blkg)
338 {
339 	percpu_ref_put(&blkg->refcnt);
340 }
341 
342 /**
343  * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
344  * @d_blkg: loop cursor pointing to the current descendant
345  * @pos_css: used for iteration
346  * @p_blkg: target blkg to walk descendants of
347  *
348  * Walk @c_blkg through the descendants of @p_blkg.  Must be used with RCU
349  * read locked.  If called under either blkcg or queue lock, the iteration
350  * is guaranteed to include all and only online blkgs.  The caller may
351  * update @pos_css by calling css_rightmost_descendant() to skip subtree.
352  * @p_blkg is included in the iteration and the first node to be visited.
353  */
354 #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg)		\
355 	css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css)	\
356 		if (((d_blkg) = blkg_lookup(css_to_blkcg(pos_css),	\
357 					    (p_blkg)->q)))
358 
359 /**
360  * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
361  * @d_blkg: loop cursor pointing to the current descendant
362  * @pos_css: used for iteration
363  * @p_blkg: target blkg to walk descendants of
364  *
365  * Similar to blkg_for_each_descendant_pre() but performs post-order
366  * traversal instead.  Synchronization rules are the same.  @p_blkg is
367  * included in the iteration and the last node to be visited.
368  */
369 #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg)		\
370 	css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css)	\
371 		if (((d_blkg) = blkg_lookup(css_to_blkcg(pos_css),	\
372 					    (p_blkg)->q)))
373 
blkcg_bio_issue_init(struct bio * bio)374 static inline void blkcg_bio_issue_init(struct bio *bio)
375 {
376 	bio_issue_init(&bio->bi_issue, bio_sectors(bio));
377 }
378 
blkcg_use_delay(struct blkcg_gq * blkg)379 static inline void blkcg_use_delay(struct blkcg_gq *blkg)
380 {
381 	if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0))
382 		return;
383 	if (atomic_add_return(1, &blkg->use_delay) == 1)
384 		atomic_inc(&blkg->blkcg->congestion_count);
385 }
386 
blkcg_unuse_delay(struct blkcg_gq * blkg)387 static inline int blkcg_unuse_delay(struct blkcg_gq *blkg)
388 {
389 	int old = atomic_read(&blkg->use_delay);
390 
391 	if (WARN_ON_ONCE(old < 0))
392 		return 0;
393 	if (old == 0)
394 		return 0;
395 
396 	/*
397 	 * We do this song and dance because we can race with somebody else
398 	 * adding or removing delay.  If we just did an atomic_dec we'd end up
399 	 * negative and we'd already be in trouble.  We need to subtract 1 and
400 	 * then check to see if we were the last delay so we can drop the
401 	 * congestion count on the cgroup.
402 	 */
403 	while (old && !atomic_try_cmpxchg(&blkg->use_delay, &old, old - 1))
404 		;
405 
406 	if (old == 0)
407 		return 0;
408 	if (old == 1)
409 		atomic_dec(&blkg->blkcg->congestion_count);
410 	return 1;
411 }
412 
413 /**
414  * blkcg_set_delay - Enable allocator delay mechanism with the specified delay amount
415  * @blkg: target blkg
416  * @delay: delay duration in nsecs
417  *
418  * When enabled with this function, the delay is not decayed and must be
419  * explicitly cleared with blkcg_clear_delay(). Must not be mixed with
420  * blkcg_[un]use_delay() and blkcg_add_delay() usages.
421  */
blkcg_set_delay(struct blkcg_gq * blkg,u64 delay)422 static inline void blkcg_set_delay(struct blkcg_gq *blkg, u64 delay)
423 {
424 	int old = atomic_read(&blkg->use_delay);
425 
426 	/* We only want 1 person setting the congestion count for this blkg. */
427 	if (!old && atomic_try_cmpxchg(&blkg->use_delay, &old, -1))
428 		atomic_inc(&blkg->blkcg->congestion_count);
429 
430 	atomic64_set(&blkg->delay_nsec, delay);
431 }
432 
433 /**
434  * blkcg_clear_delay - Disable allocator delay mechanism
435  * @blkg: target blkg
436  *
437  * Disable use_delay mechanism. See blkcg_set_delay().
438  */
blkcg_clear_delay(struct blkcg_gq * blkg)439 static inline void blkcg_clear_delay(struct blkcg_gq *blkg)
440 {
441 	int old = atomic_read(&blkg->use_delay);
442 
443 	/* We only want 1 person clearing the congestion count for this blkg. */
444 	if (old && atomic_try_cmpxchg(&blkg->use_delay, &old, 0))
445 		atomic_dec(&blkg->blkcg->congestion_count);
446 }
447 
448 /**
449  * blk_cgroup_mergeable - Determine whether to allow or disallow merges
450  * @rq: request to merge into
451  * @bio: bio to merge
452  *
453  * @bio and @rq should belong to the same cgroup and their issue_as_root should
454  * match. The latter is necessary as we don't want to throttle e.g. a metadata
455  * update because it happens to be next to a regular IO.
456  */
blk_cgroup_mergeable(struct request * rq,struct bio * bio)457 static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio)
458 {
459 	return rq->bio->bi_blkg == bio->bi_blkg &&
460 		bio_issue_as_root_blkg(rq->bio) == bio_issue_as_root_blkg(bio);
461 }
462 
463 void blk_cgroup_bio_start(struct bio *bio);
464 void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
465 #else	/* CONFIG_BLK_CGROUP */
466 
467 struct blkg_policy_data {
468 };
469 
470 struct blkcg_policy_data {
471 };
472 
473 struct blkcg_policy {
474 };
475 
476 struct blkcg {
477 };
478 
blkg_lookup(struct blkcg * blkcg,void * key)479 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
blkg_init_queue(struct request_queue * q)480 static inline void blkg_init_queue(struct request_queue *q) { }
blkcg_init_disk(struct gendisk * disk)481 static inline int blkcg_init_disk(struct gendisk *disk) { return 0; }
blkcg_exit_disk(struct gendisk * disk)482 static inline void blkcg_exit_disk(struct gendisk *disk) { }
blkcg_policy_register(struct blkcg_policy * pol)483 static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
blkcg_policy_unregister(struct blkcg_policy * pol)484 static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
blkcg_activate_policy(struct gendisk * disk,const struct blkcg_policy * pol)485 static inline int blkcg_activate_policy(struct gendisk *disk,
486 					const struct blkcg_policy *pol) { return 0; }
blkcg_deactivate_policy(struct gendisk * disk,const struct blkcg_policy * pol)487 static inline void blkcg_deactivate_policy(struct gendisk *disk,
488 					   const struct blkcg_policy *pol) { }
489 
blkg_to_pd(struct blkcg_gq * blkg,struct blkcg_policy * pol)490 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
491 						  struct blkcg_policy *pol) { return NULL; }
pd_to_blkg(struct blkg_policy_data * pd)492 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
blkg_get(struct blkcg_gq * blkg)493 static inline void blkg_get(struct blkcg_gq *blkg) { }
blkg_put(struct blkcg_gq * blkg)494 static inline void blkg_put(struct blkcg_gq *blkg) { }
blkcg_bio_issue_init(struct bio * bio)495 static inline void blkcg_bio_issue_init(struct bio *bio) { }
blk_cgroup_bio_start(struct bio * bio)496 static inline void blk_cgroup_bio_start(struct bio *bio) { }
blk_cgroup_mergeable(struct request * rq,struct bio * bio)497 static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio) { return true; }
498 
499 #define blk_queue_for_each_rl(rl, q)	\
500 	for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
501 
502 #endif	/* CONFIG_BLK_CGROUP */
503 
504 #endif /* _BLK_CGROUP_PRIVATE_H */
505