• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef _BLK_CGROUP_H
2 #define _BLK_CGROUP_H
3 /*
4  * Common Block IO controller cgroup interface
5  *
6  * Based on ideas and code from CFQ, CFS and BFQ:
7  * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8  *
9  * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
10  *		      Paolo Valente <paolo.valente@unimore.it>
11  *
12  * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
13  * 	              Nauman Rafique <nauman@google.com>
14  */
15 
16 #include <linux/cgroup.h>
17 #include <linux/u64_stats_sync.h>
18 #include <linux/seq_file.h>
19 #include <linux/radix-tree.h>
20 #include <linux/blkdev.h>
21 #include <linux/atomic.h>
22 
23 /* Max limits for throttle policy */
24 #define THROTL_IOPS_MAX		UINT_MAX
25 
26 /* CFQ specific, out here for blkcg->cfq_weight */
27 #define CFQ_WEIGHT_MIN		10
28 #define CFQ_WEIGHT_MAX		1000
29 #define CFQ_WEIGHT_DEFAULT	500
30 
31 #ifdef CONFIG_BLK_CGROUP
32 
33 enum blkg_rwstat_type {
34 	BLKG_RWSTAT_READ,
35 	BLKG_RWSTAT_WRITE,
36 	BLKG_RWSTAT_SYNC,
37 	BLKG_RWSTAT_ASYNC,
38 
39 	BLKG_RWSTAT_NR,
40 	BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
41 };
42 
43 struct blkcg_gq;
44 
45 struct blkcg {
46 	struct cgroup_subsys_state	css;
47 	spinlock_t			lock;
48 
49 	struct radix_tree_root		blkg_tree;
50 	struct blkcg_gq			*blkg_hint;
51 	struct hlist_head		blkg_list;
52 
53 	/* TODO: per-policy storage in blkcg */
54 	unsigned int			cfq_weight;	/* belongs to cfq */
55 	unsigned int			cfq_leaf_weight;
56 };
57 
58 struct blkg_stat {
59 	struct u64_stats_sync		syncp;
60 	uint64_t			cnt;
61 };
62 
63 struct blkg_rwstat {
64 	struct u64_stats_sync		syncp;
65 	uint64_t			cnt[BLKG_RWSTAT_NR];
66 };
67 
68 /*
69  * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
70  * request_queue (q).  This is used by blkcg policies which need to track
71  * information per blkcg - q pair.
72  *
73  * There can be multiple active blkcg policies and each has its private
74  * data on each blkg, the size of which is determined by
75  * blkcg_policy->pd_size.  blkcg core allocates and frees such areas
76  * together with blkg and invokes pd_init/exit_fn() methods.
77  *
78  * Such private data must embed struct blkg_policy_data (pd) at the
79  * beginning and pd_size can't be smaller than pd.
80  */
81 struct blkg_policy_data {
82 	/* the blkg and policy id this per-policy data belongs to */
83 	struct blkcg_gq			*blkg;
84 	int				plid;
85 
86 	/* used during policy activation */
87 	struct list_head		alloc_node;
88 };
89 
90 /* association between a blk cgroup and a request queue */
91 struct blkcg_gq {
92 	/* Pointer to the associated request_queue */
93 	struct request_queue		*q;
94 	struct list_head		q_node;
95 	struct hlist_node		blkcg_node;
96 	struct blkcg			*blkcg;
97 
98 	/* all non-root blkcg_gq's are guaranteed to have access to parent */
99 	struct blkcg_gq			*parent;
100 
101 	/* request allocation list for this blkcg-q pair */
102 	struct request_list		rl;
103 
104 	/* reference count */
105 	atomic_t			refcnt;
106 
107 	/* is this blkg online? protected by both blkcg and q locks */
108 	bool				online;
109 
110 	struct blkg_policy_data		*pd[BLKCG_MAX_POLS];
111 
112 	struct rcu_head			rcu_head;
113 };
114 
115 typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg);
116 typedef void (blkcg_pol_online_pd_fn)(struct blkcg_gq *blkg);
117 typedef void (blkcg_pol_offline_pd_fn)(struct blkcg_gq *blkg);
118 typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg);
119 typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg);
120 
121 struct blkcg_policy {
122 	int				plid;
123 	/* policy specific private data size */
124 	size_t				pd_size;
125 	/* cgroup files for the policy */
126 	struct cftype			*cftypes;
127 
128 	/* operations */
129 	blkcg_pol_init_pd_fn		*pd_init_fn;
130 	blkcg_pol_online_pd_fn		*pd_online_fn;
131 	blkcg_pol_offline_pd_fn		*pd_offline_fn;
132 	blkcg_pol_exit_pd_fn		*pd_exit_fn;
133 	blkcg_pol_reset_pd_stats_fn	*pd_reset_stats_fn;
134 };
135 
136 extern struct blkcg blkcg_root;
137 
138 struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q);
139 struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
140 				    struct request_queue *q);
141 int blkcg_init_queue(struct request_queue *q);
142 void blkcg_drain_queue(struct request_queue *q);
143 void blkcg_exit_queue(struct request_queue *q);
144 
145 /* Blkio controller policy registration */
146 int blkcg_policy_register(struct blkcg_policy *pol);
147 void blkcg_policy_unregister(struct blkcg_policy *pol);
148 int blkcg_activate_policy(struct request_queue *q,
149 			  const struct blkcg_policy *pol);
150 void blkcg_deactivate_policy(struct request_queue *q,
151 			     const struct blkcg_policy *pol);
152 
153 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
154 		       u64 (*prfill)(struct seq_file *,
155 				     struct blkg_policy_data *, int),
156 		       const struct blkcg_policy *pol, int data,
157 		       bool show_total);
158 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
159 u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
160 			 const struct blkg_rwstat *rwstat);
161 u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
162 u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
163 		       int off);
164 
165 u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off);
166 struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
167 					     int off);
168 
169 struct blkg_conf_ctx {
170 	struct gendisk			*disk;
171 	struct blkcg_gq			*blkg;
172 	u64				v;
173 };
174 
175 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
176 		   const char *input, struct blkg_conf_ctx *ctx);
177 void blkg_conf_finish(struct blkg_conf_ctx *ctx);
178 
179 
css_to_blkcg(struct cgroup_subsys_state * css)180 static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
181 {
182 	return css ? container_of(css, struct blkcg, css) : NULL;
183 }
184 
task_blkcg(struct task_struct * tsk)185 static inline struct blkcg *task_blkcg(struct task_struct *tsk)
186 {
187 	return css_to_blkcg(task_css(tsk, blkio_cgrp_id));
188 }
189 
bio_blkcg(struct bio * bio)190 static inline struct blkcg *bio_blkcg(struct bio *bio)
191 {
192 	if (bio && bio->bi_css)
193 		return css_to_blkcg(bio->bi_css);
194 	return task_blkcg(current);
195 }
196 
197 /**
198  * blkcg_parent - get the parent of a blkcg
199  * @blkcg: blkcg of interest
200  *
201  * Return the parent blkcg of @blkcg.  Can be called anytime.
202  */
blkcg_parent(struct blkcg * blkcg)203 static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
204 {
205 	return css_to_blkcg(blkcg->css.parent);
206 }
207 
208 /**
209  * blkg_to_pdata - get policy private data
210  * @blkg: blkg of interest
211  * @pol: policy of interest
212  *
213  * Return pointer to private data associated with the @blkg-@pol pair.
214  */
blkg_to_pd(struct blkcg_gq * blkg,struct blkcg_policy * pol)215 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
216 						  struct blkcg_policy *pol)
217 {
218 	return blkg ? blkg->pd[pol->plid] : NULL;
219 }
220 
221 /**
222  * pdata_to_blkg - get blkg associated with policy private data
223  * @pd: policy private data of interest
224  *
225  * @pd is policy private data.  Determine the blkg it's associated with.
226  */
pd_to_blkg(struct blkg_policy_data * pd)227 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
228 {
229 	return pd ? pd->blkg : NULL;
230 }
231 
232 /**
233  * blkg_path - format cgroup path of blkg
234  * @blkg: blkg of interest
235  * @buf: target buffer
236  * @buflen: target buffer length
237  *
238  * Format the path of the cgroup of @blkg into @buf.
239  */
blkg_path(struct blkcg_gq * blkg,char * buf,int buflen)240 static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
241 {
242 	char *p;
243 
244 	p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
245 	if (!p) {
246 		strncpy(buf, "<unavailable>", buflen);
247 		return -ENAMETOOLONG;
248 	}
249 
250 	memmove(buf, p, buf + buflen - p);
251 	return 0;
252 }
253 
254 /**
255  * blkg_get - get a blkg reference
256  * @blkg: blkg to get
257  *
258  * The caller should be holding an existing reference.
259  */
blkg_get(struct blkcg_gq * blkg)260 static inline void blkg_get(struct blkcg_gq *blkg)
261 {
262 	WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
263 	atomic_inc(&blkg->refcnt);
264 }
265 
266 void __blkg_release_rcu(struct rcu_head *rcu);
267 
268 /**
269  * blkg_put - put a blkg reference
270  * @blkg: blkg to put
271  */
blkg_put(struct blkcg_gq * blkg)272 static inline void blkg_put(struct blkcg_gq *blkg)
273 {
274 	WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
275 	if (atomic_dec_and_test(&blkg->refcnt))
276 		call_rcu(&blkg->rcu_head, __blkg_release_rcu);
277 }
278 
279 struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
280 			       bool update_hint);
281 
282 /**
283  * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
284  * @d_blkg: loop cursor pointing to the current descendant
285  * @pos_css: used for iteration
286  * @p_blkg: target blkg to walk descendants of
287  *
288  * Walk @c_blkg through the descendants of @p_blkg.  Must be used with RCU
289  * read locked.  If called under either blkcg or queue lock, the iteration
290  * is guaranteed to include all and only online blkgs.  The caller may
291  * update @pos_css by calling css_rightmost_descendant() to skip subtree.
292  * @p_blkg is included in the iteration and the first node to be visited.
293  */
294 #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg)		\
295 	css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css)	\
296 		if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),	\
297 					      (p_blkg)->q, false)))
298 
299 /**
300  * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
301  * @d_blkg: loop cursor pointing to the current descendant
302  * @pos_css: used for iteration
303  * @p_blkg: target blkg to walk descendants of
304  *
305  * Similar to blkg_for_each_descendant_pre() but performs post-order
306  * traversal instead.  Synchronization rules are the same.  @p_blkg is
307  * included in the iteration and the last node to be visited.
308  */
309 #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg)		\
310 	css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css)	\
311 		if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),	\
312 					      (p_blkg)->q, false)))
313 
314 /**
315  * blk_get_rl - get request_list to use
316  * @q: request_queue of interest
317  * @bio: bio which will be attached to the allocated request (may be %NULL)
318  *
319  * The caller wants to allocate a request from @q to use for @bio.  Find
320  * the request_list to use and obtain a reference on it.  Should be called
321  * under queue_lock.  This function is guaranteed to return non-%NULL
322  * request_list.
323  */
blk_get_rl(struct request_queue * q,struct bio * bio)324 static inline struct request_list *blk_get_rl(struct request_queue *q,
325 					      struct bio *bio)
326 {
327 	struct blkcg *blkcg;
328 	struct blkcg_gq *blkg;
329 
330 	rcu_read_lock();
331 
332 	blkcg = bio_blkcg(bio);
333 
334 	/* bypass blkg lookup and use @q->root_rl directly for root */
335 	if (blkcg == &blkcg_root)
336 		goto root_rl;
337 
338 	/*
339 	 * Try to use blkg->rl.  blkg lookup may fail under memory pressure
340 	 * or if either the blkcg or queue is going away.  Fall back to
341 	 * root_rl in such cases.
342 	 */
343 	blkg = blkg_lookup_create(blkcg, q);
344 	if (unlikely(IS_ERR(blkg)))
345 		goto root_rl;
346 
347 	blkg_get(blkg);
348 	rcu_read_unlock();
349 	return &blkg->rl;
350 root_rl:
351 	rcu_read_unlock();
352 	return &q->root_rl;
353 }
354 
355 /**
356  * blk_put_rl - put request_list
357  * @rl: request_list to put
358  *
359  * Put the reference acquired by blk_get_rl().  Should be called under
360  * queue_lock.
361  */
blk_put_rl(struct request_list * rl)362 static inline void blk_put_rl(struct request_list *rl)
363 {
364 	/* root_rl may not have blkg set */
365 	if (rl->blkg && rl->blkg->blkcg != &blkcg_root)
366 		blkg_put(rl->blkg);
367 }
368 
369 /**
370  * blk_rq_set_rl - associate a request with a request_list
371  * @rq: request of interest
372  * @rl: target request_list
373  *
374  * Associate @rq with @rl so that accounting and freeing can know the
375  * request_list @rq came from.
376  */
blk_rq_set_rl(struct request * rq,struct request_list * rl)377 static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
378 {
379 	rq->rl = rl;
380 }
381 
382 /**
383  * blk_rq_rl - return the request_list a request came from
384  * @rq: request of interest
385  *
386  * Return the request_list @rq is allocated from.
387  */
blk_rq_rl(struct request * rq)388 static inline struct request_list *blk_rq_rl(struct request *rq)
389 {
390 	return rq->rl;
391 }
392 
393 struct request_list *__blk_queue_next_rl(struct request_list *rl,
394 					 struct request_queue *q);
395 /**
396  * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
397  *
398  * Should be used under queue_lock.
399  */
400 #define blk_queue_for_each_rl(rl, q)	\
401 	for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
402 
blkg_stat_init(struct blkg_stat * stat)403 static inline void blkg_stat_init(struct blkg_stat *stat)
404 {
405 	u64_stats_init(&stat->syncp);
406 }
407 
408 /**
409  * blkg_stat_add - add a value to a blkg_stat
410  * @stat: target blkg_stat
411  * @val: value to add
412  *
413  * Add @val to @stat.  The caller is responsible for synchronizing calls to
414  * this function.
415  */
blkg_stat_add(struct blkg_stat * stat,uint64_t val)416 static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
417 {
418 	u64_stats_update_begin(&stat->syncp);
419 	stat->cnt += val;
420 	u64_stats_update_end(&stat->syncp);
421 }
422 
423 /**
424  * blkg_stat_read - read the current value of a blkg_stat
425  * @stat: blkg_stat to read
426  *
427  * Read the current value of @stat.  This function can be called without
428  * synchroniztion and takes care of u64 atomicity.
429  */
blkg_stat_read(struct blkg_stat * stat)430 static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
431 {
432 	unsigned int start;
433 	uint64_t v;
434 
435 	do {
436 		start = u64_stats_fetch_begin_irq(&stat->syncp);
437 		v = stat->cnt;
438 	} while (u64_stats_fetch_retry_irq(&stat->syncp, start));
439 
440 	return v;
441 }
442 
443 /**
444  * blkg_stat_reset - reset a blkg_stat
445  * @stat: blkg_stat to reset
446  */
blkg_stat_reset(struct blkg_stat * stat)447 static inline void blkg_stat_reset(struct blkg_stat *stat)
448 {
449 	stat->cnt = 0;
450 }
451 
452 /**
453  * blkg_stat_merge - merge a blkg_stat into another
454  * @to: the destination blkg_stat
455  * @from: the source
456  *
457  * Add @from's count to @to.
458  */
blkg_stat_merge(struct blkg_stat * to,struct blkg_stat * from)459 static inline void blkg_stat_merge(struct blkg_stat *to, struct blkg_stat *from)
460 {
461 	blkg_stat_add(to, blkg_stat_read(from));
462 }
463 
blkg_rwstat_init(struct blkg_rwstat * rwstat)464 static inline void blkg_rwstat_init(struct blkg_rwstat *rwstat)
465 {
466 	u64_stats_init(&rwstat->syncp);
467 }
468 
469 /**
470  * blkg_rwstat_add - add a value to a blkg_rwstat
471  * @rwstat: target blkg_rwstat
472  * @rw: mask of REQ_{WRITE|SYNC}
473  * @val: value to add
474  *
475  * Add @val to @rwstat.  The counters are chosen according to @rw.  The
476  * caller is responsible for synchronizing calls to this function.
477  */
blkg_rwstat_add(struct blkg_rwstat * rwstat,int rw,uint64_t val)478 static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
479 				   int rw, uint64_t val)
480 {
481 	u64_stats_update_begin(&rwstat->syncp);
482 
483 	if (rw & REQ_WRITE)
484 		rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
485 	else
486 		rwstat->cnt[BLKG_RWSTAT_READ] += val;
487 	if (rw & REQ_SYNC)
488 		rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
489 	else
490 		rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;
491 
492 	u64_stats_update_end(&rwstat->syncp);
493 }
494 
495 /**
496  * blkg_rwstat_read - read the current values of a blkg_rwstat
497  * @rwstat: blkg_rwstat to read
498  *
499  * Read the current snapshot of @rwstat and return it as the return value.
500  * This function can be called without synchronization and takes care of
501  * u64 atomicity.
502  */
blkg_rwstat_read(struct blkg_rwstat * rwstat)503 static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
504 {
505 	unsigned int start;
506 	struct blkg_rwstat tmp;
507 
508 	do {
509 		start = u64_stats_fetch_begin_irq(&rwstat->syncp);
510 		tmp = *rwstat;
511 	} while (u64_stats_fetch_retry_irq(&rwstat->syncp, start));
512 
513 	return tmp;
514 }
515 
516 /**
517  * blkg_rwstat_total - read the total count of a blkg_rwstat
518  * @rwstat: blkg_rwstat to read
519  *
520  * Return the total count of @rwstat regardless of the IO direction.  This
521  * function can be called without synchronization and takes care of u64
522  * atomicity.
523  */
blkg_rwstat_total(struct blkg_rwstat * rwstat)524 static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
525 {
526 	struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
527 
528 	return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
529 }
530 
531 /**
532  * blkg_rwstat_reset - reset a blkg_rwstat
533  * @rwstat: blkg_rwstat to reset
534  */
blkg_rwstat_reset(struct blkg_rwstat * rwstat)535 static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
536 {
537 	memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
538 }
539 
540 /**
541  * blkg_rwstat_merge - merge a blkg_rwstat into another
542  * @to: the destination blkg_rwstat
543  * @from: the source
544  *
545  * Add @from's counts to @to.
546  */
blkg_rwstat_merge(struct blkg_rwstat * to,struct blkg_rwstat * from)547 static inline void blkg_rwstat_merge(struct blkg_rwstat *to,
548 				     struct blkg_rwstat *from)
549 {
550 	struct blkg_rwstat v = blkg_rwstat_read(from);
551 	int i;
552 
553 	u64_stats_update_begin(&to->syncp);
554 	for (i = 0; i < BLKG_RWSTAT_NR; i++)
555 		to->cnt[i] += v.cnt[i];
556 	u64_stats_update_end(&to->syncp);
557 }
558 
559 #else	/* CONFIG_BLK_CGROUP */
560 
561 struct cgroup;
562 struct blkcg;
563 
564 struct blkg_policy_data {
565 };
566 
567 struct blkcg_gq {
568 };
569 
570 struct blkcg_policy {
571 };
572 
blkg_lookup(struct blkcg * blkcg,void * key)573 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
blkcg_init_queue(struct request_queue * q)574 static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
blkcg_drain_queue(struct request_queue * q)575 static inline void blkcg_drain_queue(struct request_queue *q) { }
blkcg_exit_queue(struct request_queue * q)576 static inline void blkcg_exit_queue(struct request_queue *q) { }
blkcg_policy_register(struct blkcg_policy * pol)577 static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
blkcg_policy_unregister(struct blkcg_policy * pol)578 static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
blkcg_activate_policy(struct request_queue * q,const struct blkcg_policy * pol)579 static inline int blkcg_activate_policy(struct request_queue *q,
580 					const struct blkcg_policy *pol) { return 0; }
blkcg_deactivate_policy(struct request_queue * q,const struct blkcg_policy * pol)581 static inline void blkcg_deactivate_policy(struct request_queue *q,
582 					   const struct blkcg_policy *pol) { }
583 
bio_blkcg(struct bio * bio)584 static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
585 
blkg_to_pd(struct blkcg_gq * blkg,struct blkcg_policy * pol)586 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
587 						  struct blkcg_policy *pol) { return NULL; }
pd_to_blkg(struct blkg_policy_data * pd)588 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
blkg_path(struct blkcg_gq * blkg)589 static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
blkg_get(struct blkcg_gq * blkg)590 static inline void blkg_get(struct blkcg_gq *blkg) { }
blkg_put(struct blkcg_gq * blkg)591 static inline void blkg_put(struct blkcg_gq *blkg) { }
592 
blk_get_rl(struct request_queue * q,struct bio * bio)593 static inline struct request_list *blk_get_rl(struct request_queue *q,
594 					      struct bio *bio) { return &q->root_rl; }
blk_put_rl(struct request_list * rl)595 static inline void blk_put_rl(struct request_list *rl) { }
blk_rq_set_rl(struct request * rq,struct request_list * rl)596 static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
blk_rq_rl(struct request * rq)597 static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
598 
599 #define blk_queue_for_each_rl(rl, q)	\
600 	for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
601 
602 #endif	/* CONFIG_BLK_CGROUP */
603 #endif	/* _BLK_CGROUP_H */
604