• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef _BLK_CGROUP_H
2 #define _BLK_CGROUP_H
3 /*
4  * Common Block IO controller cgroup interface
5  *
6  * Based on ideas and code from CFQ, CFS and BFQ:
7  * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8  *
9  * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
10  *		      Paolo Valente <paolo.valente@unimore.it>
11  *
12  * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
13  * 	              Nauman Rafique <nauman@google.com>
14  */
15 
16 #include <linux/cgroup.h>
17 #include <linux/u64_stats_sync.h>
18 #include <linux/seq_file.h>
19 #include <linux/radix-tree.h>
20 #include <linux/blkdev.h>
21 
22 /* Max limits for throttle policy */
23 #define THROTL_IOPS_MAX		UINT_MAX
24 
25 /* CFQ specific, out here for blkcg->cfq_weight */
26 #define CFQ_WEIGHT_MIN		10
27 #define CFQ_WEIGHT_MAX		1000
28 #define CFQ_WEIGHT_DEFAULT	500
29 
30 #ifdef CONFIG_BLK_CGROUP
31 
32 enum blkg_rwstat_type {
33 	BLKG_RWSTAT_READ,
34 	BLKG_RWSTAT_WRITE,
35 	BLKG_RWSTAT_SYNC,
36 	BLKG_RWSTAT_ASYNC,
37 
38 	BLKG_RWSTAT_NR,
39 	BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
40 };
41 
42 struct blkcg_gq;
43 
44 struct blkcg {
45 	struct cgroup_subsys_state	css;
46 	spinlock_t			lock;
47 
48 	struct radix_tree_root		blkg_tree;
49 	struct blkcg_gq			*blkg_hint;
50 	struct hlist_head		blkg_list;
51 
52 	/* for policies to test whether associated blkcg has changed */
53 	uint64_t			id;
54 
55 	/* TODO: per-policy storage in blkcg */
56 	unsigned int			cfq_weight;	/* belongs to cfq */
57 	unsigned int			cfq_leaf_weight;
58 };
59 
60 struct blkg_stat {
61 	struct u64_stats_sync		syncp;
62 	uint64_t			cnt;
63 };
64 
65 struct blkg_rwstat {
66 	struct u64_stats_sync		syncp;
67 	uint64_t			cnt[BLKG_RWSTAT_NR];
68 };
69 
70 /*
71  * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
72  * request_queue (q).  This is used by blkcg policies which need to track
73  * information per blkcg - q pair.
74  *
75  * There can be multiple active blkcg policies and each has its private
76  * data on each blkg, the size of which is determined by
77  * blkcg_policy->pd_size.  blkcg core allocates and frees such areas
78  * together with blkg and invokes pd_init/exit_fn() methods.
79  *
80  * Such private data must embed struct blkg_policy_data (pd) at the
81  * beginning and pd_size can't be smaller than pd.
82  */
83 struct blkg_policy_data {
84 	/* the blkg and policy id this per-policy data belongs to */
85 	struct blkcg_gq			*blkg;
86 	int				plid;
87 
88 	/* used during policy activation */
89 	struct list_head		alloc_node;
90 };
91 
92 /* association between a blk cgroup and a request queue */
93 struct blkcg_gq {
94 	/* Pointer to the associated request_queue */
95 	struct request_queue		*q;
96 	struct list_head		q_node;
97 	struct hlist_node		blkcg_node;
98 	struct blkcg			*blkcg;
99 
100 	/* all non-root blkcg_gq's are guaranteed to have access to parent */
101 	struct blkcg_gq			*parent;
102 
103 	/* request allocation list for this blkcg-q pair */
104 	struct request_list		rl;
105 
106 	/* reference count */
107 	int				refcnt;
108 
109 	/* is this blkg online? protected by both blkcg and q locks */
110 	bool				online;
111 
112 	struct blkg_policy_data		*pd[BLKCG_MAX_POLS];
113 
114 	struct rcu_head			rcu_head;
115 };
116 
117 typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg);
118 typedef void (blkcg_pol_online_pd_fn)(struct blkcg_gq *blkg);
119 typedef void (blkcg_pol_offline_pd_fn)(struct blkcg_gq *blkg);
120 typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg);
121 typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg);
122 
123 struct blkcg_policy {
124 	int				plid;
125 	/* policy specific private data size */
126 	size_t				pd_size;
127 	/* cgroup files for the policy */
128 	struct cftype			*cftypes;
129 
130 	/* operations */
131 	blkcg_pol_init_pd_fn		*pd_init_fn;
132 	blkcg_pol_online_pd_fn		*pd_online_fn;
133 	blkcg_pol_offline_pd_fn		*pd_offline_fn;
134 	blkcg_pol_exit_pd_fn		*pd_exit_fn;
135 	blkcg_pol_reset_pd_stats_fn	*pd_reset_stats_fn;
136 };
137 
138 extern struct blkcg blkcg_root;
139 
140 struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q);
141 struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
142 				    struct request_queue *q);
143 int blkcg_init_queue(struct request_queue *q);
144 void blkcg_drain_queue(struct request_queue *q);
145 void blkcg_exit_queue(struct request_queue *q);
146 
147 /* Blkio controller policy registration */
148 int blkcg_policy_register(struct blkcg_policy *pol);
149 void blkcg_policy_unregister(struct blkcg_policy *pol);
150 int blkcg_activate_policy(struct request_queue *q,
151 			  const struct blkcg_policy *pol);
152 void blkcg_deactivate_policy(struct request_queue *q,
153 			     const struct blkcg_policy *pol);
154 
155 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
156 		       u64 (*prfill)(struct seq_file *,
157 				     struct blkg_policy_data *, int),
158 		       const struct blkcg_policy *pol, int data,
159 		       bool show_total);
160 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
161 u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
162 			 const struct blkg_rwstat *rwstat);
163 u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
164 u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
165 		       int off);
166 
167 u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off);
168 struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
169 					     int off);
170 
171 struct blkg_conf_ctx {
172 	struct gendisk			*disk;
173 	struct blkcg_gq			*blkg;
174 	u64				v;
175 };
176 
177 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
178 		   const char *input, struct blkg_conf_ctx *ctx);
179 void blkg_conf_finish(struct blkg_conf_ctx *ctx);
180 
181 
cgroup_to_blkcg(struct cgroup * cgroup)182 static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup)
183 {
184 	return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
185 			    struct blkcg, css);
186 }
187 
task_blkcg(struct task_struct * tsk)188 static inline struct blkcg *task_blkcg(struct task_struct *tsk)
189 {
190 	return container_of(task_subsys_state(tsk, blkio_subsys_id),
191 			    struct blkcg, css);
192 }
193 
bio_blkcg(struct bio * bio)194 static inline struct blkcg *bio_blkcg(struct bio *bio)
195 {
196 	if (bio && bio->bi_css)
197 		return container_of(bio->bi_css, struct blkcg, css);
198 	return task_blkcg(current);
199 }
200 
201 /**
202  * blkcg_parent - get the parent of a blkcg
203  * @blkcg: blkcg of interest
204  *
205  * Return the parent blkcg of @blkcg.  Can be called anytime.
206  */
blkcg_parent(struct blkcg * blkcg)207 static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
208 {
209 	struct cgroup *pcg = blkcg->css.cgroup->parent;
210 
211 	return pcg ? cgroup_to_blkcg(pcg) : NULL;
212 }
213 
214 /**
215  * blkg_to_pdata - get policy private data
216  * @blkg: blkg of interest
217  * @pol: policy of interest
218  *
219  * Return pointer to private data associated with the @blkg-@pol pair.
220  */
blkg_to_pd(struct blkcg_gq * blkg,struct blkcg_policy * pol)221 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
222 						  struct blkcg_policy *pol)
223 {
224 	return blkg ? blkg->pd[pol->plid] : NULL;
225 }
226 
227 /**
228  * pdata_to_blkg - get blkg associated with policy private data
229  * @pd: policy private data of interest
230  *
231  * @pd is policy private data.  Determine the blkg it's associated with.
232  */
pd_to_blkg(struct blkg_policy_data * pd)233 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
234 {
235 	return pd ? pd->blkg : NULL;
236 }
237 
238 /**
239  * blkg_path - format cgroup path of blkg
240  * @blkg: blkg of interest
241  * @buf: target buffer
242  * @buflen: target buffer length
243  *
244  * Format the path of the cgroup of @blkg into @buf.
245  */
blkg_path(struct blkcg_gq * blkg,char * buf,int buflen)246 static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
247 {
248 	int ret;
249 
250 	ret = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
251 	if (ret)
252 		strncpy(buf, "<unavailable>", buflen);
253 	return ret;
254 }
255 
256 /**
257  * blkg_get - get a blkg reference
258  * @blkg: blkg to get
259  *
260  * The caller should be holding queue_lock and an existing reference.
261  */
blkg_get(struct blkcg_gq * blkg)262 static inline void blkg_get(struct blkcg_gq *blkg)
263 {
264 	lockdep_assert_held(blkg->q->queue_lock);
265 	WARN_ON_ONCE(!blkg->refcnt);
266 	blkg->refcnt++;
267 }
268 
269 void __blkg_release(struct blkcg_gq *blkg);
270 
271 /**
272  * blkg_put - put a blkg reference
273  * @blkg: blkg to put
274  *
275  * The caller should be holding queue_lock.
276  */
blkg_put(struct blkcg_gq * blkg)277 static inline void blkg_put(struct blkcg_gq *blkg)
278 {
279 	lockdep_assert_held(blkg->q->queue_lock);
280 	WARN_ON_ONCE(blkg->refcnt <= 0);
281 	if (!--blkg->refcnt)
282 		__blkg_release(blkg);
283 }
284 
285 /**
286  * blk_get_rl - get request_list to use
287  * @q: request_queue of interest
288  * @bio: bio which will be attached to the allocated request (may be %NULL)
289  *
290  * The caller wants to allocate a request from @q to use for @bio.  Find
291  * the request_list to use and obtain a reference on it.  Should be called
292  * under queue_lock.  This function is guaranteed to return non-%NULL
293  * request_list.
294  */
blk_get_rl(struct request_queue * q,struct bio * bio)295 static inline struct request_list *blk_get_rl(struct request_queue *q,
296 					      struct bio *bio)
297 {
298 	struct blkcg *blkcg;
299 	struct blkcg_gq *blkg;
300 
301 	rcu_read_lock();
302 
303 	blkcg = bio_blkcg(bio);
304 
305 	/* bypass blkg lookup and use @q->root_rl directly for root */
306 	if (blkcg == &blkcg_root)
307 		goto root_rl;
308 
309 	/*
310 	 * Try to use blkg->rl.  blkg lookup may fail under memory pressure
311 	 * or if either the blkcg or queue is going away.  Fall back to
312 	 * root_rl in such cases.
313 	 */
314 	blkg = blkg_lookup_create(blkcg, q);
315 	if (unlikely(IS_ERR(blkg)))
316 		goto root_rl;
317 
318 	blkg_get(blkg);
319 	rcu_read_unlock();
320 	return &blkg->rl;
321 root_rl:
322 	rcu_read_unlock();
323 	return &q->root_rl;
324 }
325 
326 /**
327  * blk_put_rl - put request_list
328  * @rl: request_list to put
329  *
330  * Put the reference acquired by blk_get_rl().  Should be called under
331  * queue_lock.
332  */
blk_put_rl(struct request_list * rl)333 static inline void blk_put_rl(struct request_list *rl)
334 {
335 	/* root_rl may not have blkg set */
336 	if (rl->blkg && rl->blkg->blkcg != &blkcg_root)
337 		blkg_put(rl->blkg);
338 }
339 
340 /**
341  * blk_rq_set_rl - associate a request with a request_list
342  * @rq: request of interest
343  * @rl: target request_list
344  *
345  * Associate @rq with @rl so that accounting and freeing can know the
346  * request_list @rq came from.
347  */
blk_rq_set_rl(struct request * rq,struct request_list * rl)348 static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
349 {
350 	rq->rl = rl;
351 }
352 
353 /**
354  * blk_rq_rl - return the request_list a request came from
355  * @rq: request of interest
356  *
357  * Return the request_list @rq is allocated from.
358  */
blk_rq_rl(struct request * rq)359 static inline struct request_list *blk_rq_rl(struct request *rq)
360 {
361 	return rq->rl;
362 }
363 
364 struct request_list *__blk_queue_next_rl(struct request_list *rl,
365 					 struct request_queue *q);
366 /**
367  * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
368  *
369  * Should be used under queue_lock.
370  */
371 #define blk_queue_for_each_rl(rl, q)	\
372 	for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
373 
374 /**
375  * blkg_stat_add - add a value to a blkg_stat
376  * @stat: target blkg_stat
377  * @val: value to add
378  *
379  * Add @val to @stat.  The caller is responsible for synchronizing calls to
380  * this function.
381  */
blkg_stat_add(struct blkg_stat * stat,uint64_t val)382 static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
383 {
384 	u64_stats_update_begin(&stat->syncp);
385 	stat->cnt += val;
386 	u64_stats_update_end(&stat->syncp);
387 }
388 
389 /**
390  * blkg_stat_read - read the current value of a blkg_stat
391  * @stat: blkg_stat to read
392  *
393  * Read the current value of @stat.  This function can be called without
394  * synchroniztion and takes care of u64 atomicity.
395  */
blkg_stat_read(struct blkg_stat * stat)396 static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
397 {
398 	unsigned int start;
399 	uint64_t v;
400 
401 	do {
402 		start = u64_stats_fetch_begin(&stat->syncp);
403 		v = stat->cnt;
404 	} while (u64_stats_fetch_retry(&stat->syncp, start));
405 
406 	return v;
407 }
408 
409 /**
410  * blkg_stat_reset - reset a blkg_stat
411  * @stat: blkg_stat to reset
412  */
blkg_stat_reset(struct blkg_stat * stat)413 static inline void blkg_stat_reset(struct blkg_stat *stat)
414 {
415 	stat->cnt = 0;
416 }
417 
418 /**
419  * blkg_stat_merge - merge a blkg_stat into another
420  * @to: the destination blkg_stat
421  * @from: the source
422  *
423  * Add @from's count to @to.
424  */
blkg_stat_merge(struct blkg_stat * to,struct blkg_stat * from)425 static inline void blkg_stat_merge(struct blkg_stat *to, struct blkg_stat *from)
426 {
427 	blkg_stat_add(to, blkg_stat_read(from));
428 }
429 
430 /**
431  * blkg_rwstat_add - add a value to a blkg_rwstat
432  * @rwstat: target blkg_rwstat
433  * @rw: mask of REQ_{WRITE|SYNC}
434  * @val: value to add
435  *
436  * Add @val to @rwstat.  The counters are chosen according to @rw.  The
437  * caller is responsible for synchronizing calls to this function.
438  */
blkg_rwstat_add(struct blkg_rwstat * rwstat,int rw,uint64_t val)439 static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
440 				   int rw, uint64_t val)
441 {
442 	u64_stats_update_begin(&rwstat->syncp);
443 
444 	if (rw & REQ_WRITE)
445 		rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
446 	else
447 		rwstat->cnt[BLKG_RWSTAT_READ] += val;
448 	if (rw & REQ_SYNC)
449 		rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
450 	else
451 		rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;
452 
453 	u64_stats_update_end(&rwstat->syncp);
454 }
455 
456 /**
457  * blkg_rwstat_read - read the current values of a blkg_rwstat
458  * @rwstat: blkg_rwstat to read
459  *
460  * Read the current snapshot of @rwstat and return it as the return value.
461  * This function can be called without synchronization and takes care of
462  * u64 atomicity.
463  */
blkg_rwstat_read(struct blkg_rwstat * rwstat)464 static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
465 {
466 	unsigned int start;
467 	struct blkg_rwstat tmp;
468 
469 	do {
470 		start = u64_stats_fetch_begin(&rwstat->syncp);
471 		tmp = *rwstat;
472 	} while (u64_stats_fetch_retry(&rwstat->syncp, start));
473 
474 	return tmp;
475 }
476 
477 /**
478  * blkg_rwstat_total - read the total count of a blkg_rwstat
479  * @rwstat: blkg_rwstat to read
480  *
481  * Return the total count of @rwstat regardless of the IO direction.  This
482  * function can be called without synchronization and takes care of u64
483  * atomicity.
484  */
blkg_rwstat_total(struct blkg_rwstat * rwstat)485 static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
486 {
487 	struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
488 
489 	return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
490 }
491 
492 /**
493  * blkg_rwstat_reset - reset a blkg_rwstat
494  * @rwstat: blkg_rwstat to reset
495  */
blkg_rwstat_reset(struct blkg_rwstat * rwstat)496 static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
497 {
498 	memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
499 }
500 
501 /**
502  * blkg_rwstat_merge - merge a blkg_rwstat into another
503  * @to: the destination blkg_rwstat
504  * @from: the source
505  *
506  * Add @from's counts to @to.
507  */
blkg_rwstat_merge(struct blkg_rwstat * to,struct blkg_rwstat * from)508 static inline void blkg_rwstat_merge(struct blkg_rwstat *to,
509 				     struct blkg_rwstat *from)
510 {
511 	struct blkg_rwstat v = blkg_rwstat_read(from);
512 	int i;
513 
514 	u64_stats_update_begin(&to->syncp);
515 	for (i = 0; i < BLKG_RWSTAT_NR; i++)
516 		to->cnt[i] += v.cnt[i];
517 	u64_stats_update_end(&to->syncp);
518 }
519 
520 #else	/* CONFIG_BLK_CGROUP */
521 
522 struct cgroup;
523 struct blkcg;
524 
525 struct blkg_policy_data {
526 };
527 
528 struct blkcg_gq {
529 };
530 
531 struct blkcg_policy {
532 };
533 
blkg_lookup(struct blkcg * blkcg,void * key)534 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
blkcg_init_queue(struct request_queue * q)535 static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
blkcg_drain_queue(struct request_queue * q)536 static inline void blkcg_drain_queue(struct request_queue *q) { }
blkcg_exit_queue(struct request_queue * q)537 static inline void blkcg_exit_queue(struct request_queue *q) { }
blkcg_policy_register(struct blkcg_policy * pol)538 static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
blkcg_policy_unregister(struct blkcg_policy * pol)539 static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
blkcg_activate_policy(struct request_queue * q,const struct blkcg_policy * pol)540 static inline int blkcg_activate_policy(struct request_queue *q,
541 					const struct blkcg_policy *pol) { return 0; }
blkcg_deactivate_policy(struct request_queue * q,const struct blkcg_policy * pol)542 static inline void blkcg_deactivate_policy(struct request_queue *q,
543 					   const struct blkcg_policy *pol) { }
544 
cgroup_to_blkcg(struct cgroup * cgroup)545 static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) { return NULL; }
bio_blkcg(struct bio * bio)546 static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
547 
blkg_to_pd(struct blkcg_gq * blkg,struct blkcg_policy * pol)548 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
549 						  struct blkcg_policy *pol) { return NULL; }
pd_to_blkg(struct blkg_policy_data * pd)550 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
blkg_path(struct blkcg_gq * blkg)551 static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
blkg_get(struct blkcg_gq * blkg)552 static inline void blkg_get(struct blkcg_gq *blkg) { }
blkg_put(struct blkcg_gq * blkg)553 static inline void blkg_put(struct blkcg_gq *blkg) { }
554 
blk_get_rl(struct request_queue * q,struct bio * bio)555 static inline struct request_list *blk_get_rl(struct request_queue *q,
556 					      struct bio *bio) { return &q->root_rl; }
blk_put_rl(struct request_list * rl)557 static inline void blk_put_rl(struct request_list *rl) { }
blk_rq_set_rl(struct request * rq,struct request_list * rl)558 static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
blk_rq_rl(struct request * rq)559 static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
560 
561 #define blk_queue_for_each_rl(rl, q)	\
562 	for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
563 
564 #endif	/* CONFIG_BLK_CGROUP */
565 #endif	/* _BLK_CGROUP_H */
566