• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BLK_CGROUP_H
3 #define _BLK_CGROUP_H
4 /*
5  * Common Block IO controller cgroup interface
6  *
7  * Based on ideas and code from CFQ, CFS and BFQ:
8  * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
9  *
10  * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
11  *		      Paolo Valente <paolo.valente@unimore.it>
12  *
13  * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
14  * 	              Nauman Rafique <nauman@google.com>
15  */
16 
17 #include <linux/cgroup.h>
18 #include <linux/percpu.h>
19 #include <linux/percpu_counter.h>
20 #include <linux/u64_stats_sync.h>
21 #include <linux/seq_file.h>
22 #include <linux/radix-tree.h>
23 #include <linux/blkdev.h>
24 #include <linux/atomic.h>
25 #include <linux/kthread.h>
26 #include <linux/fs.h>
27 #include <linux/blk-mq.h>
28 
29 /* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
30 #define BLKG_STAT_CPU_BATCH	(INT_MAX / 2)
31 
32 /* Max limits for throttle policy */
33 #define THROTL_IOPS_MAX		UINT_MAX
34 #define FC_APPID_LEN              129
35 
36 
37 #ifdef CONFIG_BLK_CGROUP
38 
39 enum blkg_iostat_type {
40 	BLKG_IOSTAT_READ,
41 	BLKG_IOSTAT_WRITE,
42 	BLKG_IOSTAT_DISCARD,
43 
44 	BLKG_IOSTAT_NR,
45 };
46 
47 struct blkcg_gq;
48 
49 struct blkcg {
50 	struct cgroup_subsys_state	css;
51 	spinlock_t			lock;
52 	refcount_t			online_pin;
53 
54 	struct radix_tree_root		blkg_tree;
55 	struct blkcg_gq	__rcu		*blkg_hint;
56 	struct hlist_head		blkg_list;
57 
58 	struct blkcg_policy_data	*cpd[BLKCG_MAX_POLS];
59 
60 	struct list_head		all_blkcgs_node;
61 #ifdef CONFIG_BLK_CGROUP_FC_APPID
62 	char                            fc_app_id[FC_APPID_LEN];
63 #endif
64 #ifdef CONFIG_CGROUP_WRITEBACK
65 	struct list_head		cgwb_list;
66 #endif
67 };
68 
69 struct blkg_iostat {
70 	u64				bytes[BLKG_IOSTAT_NR];
71 	u64				ios[BLKG_IOSTAT_NR];
72 };
73 
74 struct blkg_iostat_set {
75 	struct u64_stats_sync		sync;
76 	struct blkg_iostat		cur;
77 	struct blkg_iostat		last;
78 };
79 
80 /*
81  * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
82  * request_queue (q).  This is used by blkcg policies which need to track
83  * information per blkcg - q pair.
84  *
85  * There can be multiple active blkcg policies and each blkg:policy pair is
86  * represented by a blkg_policy_data which is allocated and freed by each
87  * policy's pd_alloc/free_fn() methods.  A policy can allocate private data
88  * area by allocating larger data structure which embeds blkg_policy_data
89  * at the beginning.
90  */
91 struct blkg_policy_data {
92 	/* the blkg and policy id this per-policy data belongs to */
93 	struct blkcg_gq			*blkg;
94 	int				plid;
95 };
96 
97 /*
98  * Policies that need to keep per-blkcg data which is independent from any
99  * request_queue associated to it should implement cpd_alloc/free_fn()
100  * methods.  A policy can allocate private data area by allocating larger
101  * data structure which embeds blkcg_policy_data at the beginning.
102  * cpd_init() is invoked to let each policy handle per-blkcg data.
103  */
104 struct blkcg_policy_data {
105 	/* the blkcg and policy id this per-policy data belongs to */
106 	struct blkcg			*blkcg;
107 	int				plid;
108 };
109 
110 /* association between a blk cgroup and a request queue */
111 struct blkcg_gq {
112 	/* Pointer to the associated request_queue */
113 	struct request_queue		*q;
114 	struct list_head		q_node;
115 	struct hlist_node		blkcg_node;
116 	struct blkcg			*blkcg;
117 
118 	/* all non-root blkcg_gq's are guaranteed to have access to parent */
119 	struct blkcg_gq			*parent;
120 
121 	/* reference count */
122 	struct percpu_ref		refcnt;
123 
124 	/* is this blkg online? protected by both blkcg and q locks */
125 	bool				online;
126 
127 	struct blkg_iostat_set __percpu	*iostat_cpu;
128 	struct blkg_iostat_set		iostat;
129 
130 	struct blkg_policy_data		*pd[BLKCG_MAX_POLS];
131 
132 	spinlock_t			async_bio_lock;
133 	struct bio_list			async_bios;
134 	struct work_struct		async_bio_work;
135 
136 	atomic_t			use_delay;
137 	atomic64_t			delay_nsec;
138 	atomic64_t			delay_start;
139 	u64				last_delay;
140 	int				last_use;
141 
142 	struct rcu_head			rcu_head;
143 };
144 
145 typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
146 typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
147 typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
148 typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
149 typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp,
150 				struct request_queue *q, struct blkcg *blkcg);
151 typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
152 typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
153 typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
154 typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
155 typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
156 typedef bool (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd,
157 				struct seq_file *s);
158 
159 struct blkcg_policy {
160 	int				plid;
161 	/* cgroup files for the policy */
162 	struct cftype			*dfl_cftypes;
163 	struct cftype			*legacy_cftypes;
164 
165 	/* operations */
166 	blkcg_pol_alloc_cpd_fn		*cpd_alloc_fn;
167 	blkcg_pol_init_cpd_fn		*cpd_init_fn;
168 	blkcg_pol_free_cpd_fn		*cpd_free_fn;
169 	blkcg_pol_bind_cpd_fn		*cpd_bind_fn;
170 
171 	blkcg_pol_alloc_pd_fn		*pd_alloc_fn;
172 	blkcg_pol_init_pd_fn		*pd_init_fn;
173 	blkcg_pol_online_pd_fn		*pd_online_fn;
174 	blkcg_pol_offline_pd_fn		*pd_offline_fn;
175 	blkcg_pol_free_pd_fn		*pd_free_fn;
176 	blkcg_pol_reset_pd_stats_fn	*pd_reset_stats_fn;
177 	blkcg_pol_stat_pd_fn		*pd_stat_fn;
178 };
179 
180 extern struct blkcg blkcg_root;
181 extern struct cgroup_subsys_state * const blkcg_root_css;
182 extern bool blkcg_debug_stats;
183 
184 struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
185 				      struct request_queue *q, bool update_hint);
186 int blkcg_init_queue(struct request_queue *q);
187 void blkcg_exit_queue(struct request_queue *q);
188 
189 /* Blkio controller policy registration */
190 int blkcg_policy_register(struct blkcg_policy *pol);
191 void blkcg_policy_unregister(struct blkcg_policy *pol);
192 int blkcg_activate_policy(struct request_queue *q,
193 			  const struct blkcg_policy *pol);
194 void blkcg_deactivate_policy(struct request_queue *q,
195 			     const struct blkcg_policy *pol);
196 
197 const char *blkg_dev_name(struct blkcg_gq *blkg);
198 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
199 		       u64 (*prfill)(struct seq_file *,
200 				     struct blkg_policy_data *, int),
201 		       const struct blkcg_policy *pol, int data,
202 		       bool show_total);
203 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
204 
205 struct blkg_conf_ctx {
206 	struct block_device		*bdev;
207 	struct blkcg_gq			*blkg;
208 	char				*body;
209 };
210 
211 struct block_device *blkcg_conf_open_bdev(char **inputp);
212 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
213 		   char *input, struct blkg_conf_ctx *ctx);
214 void blkg_conf_finish(struct blkg_conf_ctx *ctx);
215 
216 /**
217  * blkcg_css - find the current css
218  *
219  * Find the css associated with either the kthread or the current task.
220  * This may return a dying css, so it is up to the caller to use tryget logic
221  * to confirm it is alive and well.
222  */
blkcg_css(void)223 static inline struct cgroup_subsys_state *blkcg_css(void)
224 {
225 	struct cgroup_subsys_state *css;
226 
227 	css = kthread_blkcg();
228 	if (css)
229 		return css;
230 	return task_css(current, io_cgrp_id);
231 }
232 
css_to_blkcg(struct cgroup_subsys_state * css)233 static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
234 {
235 	return css ? container_of(css, struct blkcg, css) : NULL;
236 }
237 
238 /**
239  * __bio_blkcg - internal, inconsistent version to get blkcg
240  *
241  * DO NOT USE.
242  * This function is inconsistent and consequently is dangerous to use.  The
243  * first part of the function returns a blkcg where a reference is owned by the
244  * bio.  This means it does not need to be rcu protected as it cannot go away
245  * with the bio owning a reference to it.  However, the latter potentially gets
246  * it from task_css().  This can race against task migration and the cgroup
247  * dying.  It is also semantically different as it must be called rcu protected
248  * and is susceptible to failure when trying to get a reference to it.
249  * Therefore, it is not ok to assume that *_get() will always succeed on the
250  * blkcg returned here.
251  */
__bio_blkcg(struct bio * bio)252 static inline struct blkcg *__bio_blkcg(struct bio *bio)
253 {
254 	if (bio && bio->bi_blkg)
255 		return bio->bi_blkg->blkcg;
256 	return css_to_blkcg(blkcg_css());
257 }
258 
259 /**
260  * bio_blkcg - grab the blkcg associated with a bio
261  * @bio: target bio
262  *
263  * This returns the blkcg associated with a bio, %NULL if not associated.
264  * Callers are expected to either handle %NULL or know association has been
265  * done prior to calling this.
266  */
bio_blkcg(struct bio * bio)267 static inline struct blkcg *bio_blkcg(struct bio *bio)
268 {
269 	if (bio && bio->bi_blkg)
270 		return bio->bi_blkg->blkcg;
271 	return NULL;
272 }
273 
blk_cgroup_congested(void)274 static inline bool blk_cgroup_congested(void)
275 {
276 	struct cgroup_subsys_state *css;
277 	bool ret = false;
278 
279 	rcu_read_lock();
280 	css = kthread_blkcg();
281 	if (!css)
282 		css = task_css(current, io_cgrp_id);
283 	while (css) {
284 		if (atomic_read(&css->cgroup->congestion_count)) {
285 			ret = true;
286 			break;
287 		}
288 		css = css->parent;
289 	}
290 	rcu_read_unlock();
291 	return ret;
292 }
293 
294 /**
295  * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg
296  * @return: true if this bio needs to be submitted with the root blkg context.
297  *
298  * In order to avoid priority inversions we sometimes need to issue a bio as if
299  * it were attached to the root blkg, and then backcharge to the actual owning
300  * blkg.  The idea is we do bio_blkcg() to look up the actual context for the
301  * bio and attach the appropriate blkg to the bio.  Then we call this helper and
302  * if it is true run with the root blkg for that queue and then do any
303  * backcharging to the originating cgroup once the io is complete.
304  */
bio_issue_as_root_blkg(struct bio * bio)305 static inline bool bio_issue_as_root_blkg(struct bio *bio)
306 {
307 	return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0;
308 }
309 
310 /**
311  * blkcg_parent - get the parent of a blkcg
312  * @blkcg: blkcg of interest
313  *
314  * Return the parent blkcg of @blkcg.  Can be called anytime.
315  */
blkcg_parent(struct blkcg * blkcg)316 static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
317 {
318 	return css_to_blkcg(blkcg->css.parent);
319 }
320 
321 /**
322  * __blkg_lookup - internal version of blkg_lookup()
323  * @blkcg: blkcg of interest
324  * @q: request_queue of interest
325  * @update_hint: whether to update lookup hint with the result or not
326  *
327  * This is internal version and shouldn't be used by policy
328  * implementations.  Looks up blkgs for the @blkcg - @q pair regardless of
329  * @q's bypass state.  If @update_hint is %true, the caller should be
330  * holding @q->queue_lock and lookup hint is updated on success.
331  */
__blkg_lookup(struct blkcg * blkcg,struct request_queue * q,bool update_hint)332 static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
333 					     struct request_queue *q,
334 					     bool update_hint)
335 {
336 	struct blkcg_gq *blkg;
337 
338 	if (blkcg == &blkcg_root)
339 		return q->root_blkg;
340 
341 	blkg = rcu_dereference(blkcg->blkg_hint);
342 	if (blkg && blkg->q == q)
343 		return blkg;
344 
345 	return blkg_lookup_slowpath(blkcg, q, update_hint);
346 }
347 
348 /**
349  * blkg_lookup - lookup blkg for the specified blkcg - q pair
350  * @blkcg: blkcg of interest
351  * @q: request_queue of interest
352  *
353  * Lookup blkg for the @blkcg - @q pair.  This function should be called
354  * under RCU read lock.
355  */
blkg_lookup(struct blkcg * blkcg,struct request_queue * q)356 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
357 					   struct request_queue *q)
358 {
359 	WARN_ON_ONCE(!rcu_read_lock_held());
360 	return __blkg_lookup(blkcg, q, false);
361 }
362 
363 /**
364  * blk_queue_root_blkg - return blkg for the (blkcg_root, @q) pair
365  * @q: request_queue of interest
366  *
367  * Lookup blkg for @q at the root level. See also blkg_lookup().
368  */
blk_queue_root_blkg(struct request_queue * q)369 static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
370 {
371 	return q->root_blkg;
372 }
373 
374 /**
375  * blkg_to_pdata - get policy private data
376  * @blkg: blkg of interest
377  * @pol: policy of interest
378  *
379  * Return pointer to private data associated with the @blkg-@pol pair.
380  */
blkg_to_pd(struct blkcg_gq * blkg,struct blkcg_policy * pol)381 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
382 						  struct blkcg_policy *pol)
383 {
384 	return blkg ? blkg->pd[pol->plid] : NULL;
385 }
386 
blkcg_to_cpd(struct blkcg * blkcg,struct blkcg_policy * pol)387 static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
388 						     struct blkcg_policy *pol)
389 {
390 	return blkcg ? blkcg->cpd[pol->plid] : NULL;
391 }
392 
393 /**
394  * pdata_to_blkg - get blkg associated with policy private data
395  * @pd: policy private data of interest
396  *
397  * @pd is policy private data.  Determine the blkg it's associated with.
398  */
pd_to_blkg(struct blkg_policy_data * pd)399 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
400 {
401 	return pd ? pd->blkg : NULL;
402 }
403 
cpd_to_blkcg(struct blkcg_policy_data * cpd)404 static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
405 {
406 	return cpd ? cpd->blkcg : NULL;
407 }
408 
409 extern void blkcg_destroy_blkgs(struct blkcg *blkcg);
410 
411 /**
412  * blkcg_pin_online - pin online state
413  * @blkcg: blkcg of interest
414  *
415  * While pinned, a blkcg is kept online.  This is primarily used to
416  * impedance-match blkg and cgwb lifetimes so that blkg doesn't go offline
417  * while an associated cgwb is still active.
418  */
blkcg_pin_online(struct blkcg * blkcg)419 static inline void blkcg_pin_online(struct blkcg *blkcg)
420 {
421 	refcount_inc(&blkcg->online_pin);
422 }
423 
424 /**
425  * blkcg_unpin_online - unpin online state
426  * @blkcg: blkcg of interest
427  *
428  * This is primarily used to impedance-match blkg and cgwb lifetimes so
429  * that blkg doesn't go offline while an associated cgwb is still active.
430  * When this count goes to zero, all active cgwbs have finished so the
431  * blkcg can continue destruction by calling blkcg_destroy_blkgs().
432  */
blkcg_unpin_online(struct blkcg * blkcg)433 static inline void blkcg_unpin_online(struct blkcg *blkcg)
434 {
435 	do {
436 		if (!refcount_dec_and_test(&blkcg->online_pin))
437 			break;
438 		blkcg_destroy_blkgs(blkcg);
439 		blkcg = blkcg_parent(blkcg);
440 	} while (blkcg);
441 }
442 
443 /**
444  * blkg_path - format cgroup path of blkg
445  * @blkg: blkg of interest
446  * @buf: target buffer
447  * @buflen: target buffer length
448  *
449  * Format the path of the cgroup of @blkg into @buf.
450  */
blkg_path(struct blkcg_gq * blkg,char * buf,int buflen)451 static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
452 {
453 	return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
454 }
455 
456 /**
457  * blkg_get - get a blkg reference
458  * @blkg: blkg to get
459  *
460  * The caller should be holding an existing reference.
461  */
blkg_get(struct blkcg_gq * blkg)462 static inline void blkg_get(struct blkcg_gq *blkg)
463 {
464 	percpu_ref_get(&blkg->refcnt);
465 }
466 
467 /**
468  * blkg_tryget - try and get a blkg reference
469  * @blkg: blkg to get
470  *
471  * This is for use when doing an RCU lookup of the blkg.  We may be in the midst
472  * of freeing this blkg, so we can only use it if the refcnt is not zero.
473  */
blkg_tryget(struct blkcg_gq * blkg)474 static inline bool blkg_tryget(struct blkcg_gq *blkg)
475 {
476 	return blkg && percpu_ref_tryget(&blkg->refcnt);
477 }
478 
479 /**
480  * blkg_put - put a blkg reference
481  * @blkg: blkg to put
482  */
blkg_put(struct blkcg_gq * blkg)483 static inline void blkg_put(struct blkcg_gq *blkg)
484 {
485 	percpu_ref_put(&blkg->refcnt);
486 }
487 
488 /**
489  * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
490  * @d_blkg: loop cursor pointing to the current descendant
491  * @pos_css: used for iteration
492  * @p_blkg: target blkg to walk descendants of
493  *
494  * Walk @c_blkg through the descendants of @p_blkg.  Must be used with RCU
495  * read locked.  If called under either blkcg or queue lock, the iteration
496  * is guaranteed to include all and only online blkgs.  The caller may
497  * update @pos_css by calling css_rightmost_descendant() to skip subtree.
498  * @p_blkg is included in the iteration and the first node to be visited.
499  */
500 #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg)		\
501 	css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css)	\
502 		if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),	\
503 					      (p_blkg)->q, false)))
504 
505 /**
506  * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
507  * @d_blkg: loop cursor pointing to the current descendant
508  * @pos_css: used for iteration
509  * @p_blkg: target blkg to walk descendants of
510  *
511  * Similar to blkg_for_each_descendant_pre() but performs post-order
512  * traversal instead.  Synchronization rules are the same.  @p_blkg is
513  * included in the iteration and the last node to be visited.
514  */
515 #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg)		\
516 	css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css)	\
517 		if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),	\
518 					      (p_blkg)->q, false)))
519 
520 bool __blkcg_punt_bio_submit(struct bio *bio);
521 
blkcg_punt_bio_submit(struct bio * bio)522 static inline bool blkcg_punt_bio_submit(struct bio *bio)
523 {
524 	if (bio->bi_opf & REQ_CGROUP_PUNT)
525 		return __blkcg_punt_bio_submit(bio);
526 	else
527 		return false;
528 }
529 
blkcg_bio_issue_init(struct bio * bio)530 static inline void blkcg_bio_issue_init(struct bio *bio)
531 {
532 	bio_issue_init(&bio->bi_issue, bio_sectors(bio));
533 }
534 
blkcg_use_delay(struct blkcg_gq * blkg)535 static inline void blkcg_use_delay(struct blkcg_gq *blkg)
536 {
537 	if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0))
538 		return;
539 	if (atomic_add_return(1, &blkg->use_delay) == 1)
540 		atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
541 }
542 
blkcg_unuse_delay(struct blkcg_gq * blkg)543 static inline int blkcg_unuse_delay(struct blkcg_gq *blkg)
544 {
545 	int old = atomic_read(&blkg->use_delay);
546 
547 	if (WARN_ON_ONCE(old < 0))
548 		return 0;
549 	if (old == 0)
550 		return 0;
551 
552 	/*
553 	 * We do this song and dance because we can race with somebody else
554 	 * adding or removing delay.  If we just did an atomic_dec we'd end up
555 	 * negative and we'd already be in trouble.  We need to subtract 1 and
556 	 * then check to see if we were the last delay so we can drop the
557 	 * congestion count on the cgroup.
558 	 */
559 	while (old) {
560 		int cur = atomic_cmpxchg(&blkg->use_delay, old, old - 1);
561 		if (cur == old)
562 			break;
563 		old = cur;
564 	}
565 
566 	if (old == 0)
567 		return 0;
568 	if (old == 1)
569 		atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
570 	return 1;
571 }
572 
573 /**
574  * blkcg_set_delay - Enable allocator delay mechanism with the specified delay amount
575  * @blkg: target blkg
576  * @delay: delay duration in nsecs
577  *
578  * When enabled with this function, the delay is not decayed and must be
579  * explicitly cleared with blkcg_clear_delay(). Must not be mixed with
580  * blkcg_[un]use_delay() and blkcg_add_delay() usages.
581  */
blkcg_set_delay(struct blkcg_gq * blkg,u64 delay)582 static inline void blkcg_set_delay(struct blkcg_gq *blkg, u64 delay)
583 {
584 	int old = atomic_read(&blkg->use_delay);
585 
586 	/* We only want 1 person setting the congestion count for this blkg. */
587 	if (!old && atomic_cmpxchg(&blkg->use_delay, old, -1) == old)
588 		atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
589 
590 	atomic64_set(&blkg->delay_nsec, delay);
591 }
592 
593 /**
594  * blkcg_clear_delay - Disable allocator delay mechanism
595  * @blkg: target blkg
596  *
597  * Disable use_delay mechanism. See blkcg_set_delay().
598  */
blkcg_clear_delay(struct blkcg_gq * blkg)599 static inline void blkcg_clear_delay(struct blkcg_gq *blkg)
600 {
601 	int old = atomic_read(&blkg->use_delay);
602 
603 	/* We only want 1 person clearing the congestion count for this blkg. */
604 	if (old && atomic_cmpxchg(&blkg->use_delay, old, 0) == old)
605 		atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
606 }
607 
608 /**
609  * blk_cgroup_mergeable - Determine whether to allow or disallow merges
610  * @rq: request to merge into
611  * @bio: bio to merge
612  *
613  * @bio and @rq should belong to the same cgroup and their issue_as_root should
614  * match. The latter is necessary as we don't want to throttle e.g. a metadata
615  * update because it happens to be next to a regular IO.
616  */
blk_cgroup_mergeable(struct request * rq,struct bio * bio)617 static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio)
618 {
619 	return rq->bio->bi_blkg == bio->bi_blkg &&
620 		bio_issue_as_root_blkg(rq->bio) == bio_issue_as_root_blkg(bio);
621 }
622 
623 void blk_cgroup_bio_start(struct bio *bio);
624 void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
625 void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay);
626 void blkcg_maybe_throttle_current(void);
627 #else	/* CONFIG_BLK_CGROUP */
628 
629 struct blkcg {
630 };
631 
632 struct blkg_policy_data {
633 };
634 
635 struct blkcg_policy_data {
636 };
637 
638 struct blkcg_gq {
639 };
640 
641 struct blkcg_policy {
642 };
643 
644 #define blkcg_root_css	((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
645 
blkcg_maybe_throttle_current(void)646 static inline void blkcg_maybe_throttle_current(void) { }
blk_cgroup_congested(void)647 static inline bool blk_cgroup_congested(void) { return false; }
648 
649 #ifdef CONFIG_BLOCK
650 
blkcg_schedule_throttle(struct request_queue * q,bool use_memdelay)651 static inline void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) { }
652 
blkg_lookup(struct blkcg * blkcg,void * key)653 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
blk_queue_root_blkg(struct request_queue * q)654 static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
655 { return NULL; }
blkcg_init_queue(struct request_queue * q)656 static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
blkcg_exit_queue(struct request_queue * q)657 static inline void blkcg_exit_queue(struct request_queue *q) { }
blkcg_policy_register(struct blkcg_policy * pol)658 static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
blkcg_policy_unregister(struct blkcg_policy * pol)659 static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
blkcg_activate_policy(struct request_queue * q,const struct blkcg_policy * pol)660 static inline int blkcg_activate_policy(struct request_queue *q,
661 					const struct blkcg_policy *pol) { return 0; }
blkcg_deactivate_policy(struct request_queue * q,const struct blkcg_policy * pol)662 static inline void blkcg_deactivate_policy(struct request_queue *q,
663 					   const struct blkcg_policy *pol) { }
664 
__bio_blkcg(struct bio * bio)665 static inline struct blkcg *__bio_blkcg(struct bio *bio) { return NULL; }
bio_blkcg(struct bio * bio)666 static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
667 
blkg_to_pd(struct blkcg_gq * blkg,struct blkcg_policy * pol)668 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
669 						  struct blkcg_policy *pol) { return NULL; }
pd_to_blkg(struct blkg_policy_data * pd)670 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
blkg_path(struct blkcg_gq * blkg)671 static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
blkg_get(struct blkcg_gq * blkg)672 static inline void blkg_get(struct blkcg_gq *blkg) { }
blkg_put(struct blkcg_gq * blkg)673 static inline void blkg_put(struct blkcg_gq *blkg) { }
674 
blkcg_punt_bio_submit(struct bio * bio)675 static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; }
blkcg_bio_issue_init(struct bio * bio)676 static inline void blkcg_bio_issue_init(struct bio *bio) { }
blk_cgroup_bio_start(struct bio * bio)677 static inline void blk_cgroup_bio_start(struct bio *bio) { }
blk_cgroup_mergeable(struct request * rq,struct bio * bio)678 static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio) { return true; }
679 
680 #define blk_queue_for_each_rl(rl, q)	\
681 	for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
682 
683 #endif	/* CONFIG_BLOCK */
684 #endif	/* CONFIG_BLK_CGROUP */
685 
686 #ifdef CONFIG_BLK_CGROUP_FC_APPID
687 /*
688  * Sets the fc_app_id field associted to blkcg
689  * @app_id: application identifier
690  * @cgrp_id: cgroup id
691  * @app_id_len: size of application identifier
692  */
blkcg_set_fc_appid(char * app_id,u64 cgrp_id,size_t app_id_len)693 static inline int blkcg_set_fc_appid(char *app_id, u64 cgrp_id, size_t app_id_len)
694 {
695 	struct cgroup *cgrp;
696 	struct cgroup_subsys_state *css;
697 	struct blkcg *blkcg;
698 	int ret  = 0;
699 
700 	if (app_id_len > FC_APPID_LEN)
701 		return -EINVAL;
702 
703 	cgrp = cgroup_get_from_id(cgrp_id);
704 	if (!cgrp)
705 		return -ENOENT;
706 	css = cgroup_get_e_css(cgrp, &io_cgrp_subsys);
707 	if (!css) {
708 		ret = -ENOENT;
709 		goto out_cgrp_put;
710 	}
711 	blkcg = css_to_blkcg(css);
712 	/*
713 	 * There is a slight race condition on setting the appid.
714 	 * Worst case an I/O may not find the right id.
715 	 * This is no different from the I/O we let pass while obtaining
716 	 * the vmid from the fabric.
717 	 * Adding the overhead of a lock is not necessary.
718 	 */
719 	strlcpy(blkcg->fc_app_id, app_id, app_id_len);
720 	css_put(css);
721 out_cgrp_put:
722 	cgroup_put(cgrp);
723 	return ret;
724 }
725 
726 /**
727  * blkcg_get_fc_appid - get the fc app identifier associated with a bio
728  * @bio: target bio
729  *
730  * On success return the fc_app_id, on failure return NULL
731  */
blkcg_get_fc_appid(struct bio * bio)732 static inline char *blkcg_get_fc_appid(struct bio *bio)
733 {
734 	if (bio && bio->bi_blkg &&
735 		(bio->bi_blkg->blkcg->fc_app_id[0] != '\0'))
736 		return bio->bi_blkg->blkcg->fc_app_id;
737 	return NULL;
738 }
739 #else
blkcg_set_fc_appid(char * buf,u64 id,size_t len)740 static inline int blkcg_set_fc_appid(char *buf, u64 id, size_t len) { return -EINVAL; }
blkcg_get_fc_appid(struct bio * bio)741 static inline char *blkcg_get_fc_appid(struct bio *bio) { return NULL; }
742 #endif /*CONFIG_BLK_CGROUP_FC_APPID*/
743 #endif	/* _BLK_CGROUP_H */
744