1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BLK_CGROUP_H
3 #define _BLK_CGROUP_H
4 /*
5 * Common Block IO controller cgroup interface
6 *
7 * Based on ideas and code from CFQ, CFS and BFQ:
8 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
9 *
10 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
11 * Paolo Valente <paolo.valente@unimore.it>
12 *
13 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
14 * Nauman Rafique <nauman@google.com>
15 */
16
17 #include <linux/cgroup.h>
18 #include <linux/percpu_counter.h>
19 #include <linux/seq_file.h>
20 #include <linux/radix-tree.h>
21 #include <linux/blkdev.h>
22 #include <linux/atomic.h>
23 #include <linux/kthread.h>
24 #include <linux/fs.h>
25 #ifndef __GENKSYMS__
26 #include <linux/blkdev.h>
27 #endif
28
29 /* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
30 #define BLKG_STAT_CPU_BATCH (INT_MAX / 2)
31
32 /* Max limits for throttle policy */
33 #define THROTL_IOPS_MAX UINT_MAX
34
35 #ifdef CONFIG_BLK_CGROUP
36
37 enum blkg_rwstat_type {
38 BLKG_RWSTAT_READ,
39 BLKG_RWSTAT_WRITE,
40 BLKG_RWSTAT_SYNC,
41 BLKG_RWSTAT_ASYNC,
42 BLKG_RWSTAT_DISCARD,
43
44 BLKG_RWSTAT_NR,
45 BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
46 };
47
48 struct blkcg_gq;
49
50 struct blkcg {
51 struct cgroup_subsys_state css;
52 spinlock_t lock;
53
54 struct radix_tree_root blkg_tree;
55 struct blkcg_gq __rcu *blkg_hint;
56 struct hlist_head blkg_list;
57
58 struct blkcg_policy_data *cpd[BLKCG_MAX_POLS];
59
60 struct list_head all_blkcgs_node;
61 #ifdef CONFIG_CGROUP_WRITEBACK
62 struct list_head cgwb_list;
63 refcount_t cgwb_refcnt;
64 #endif
65 };
66
67 /*
68 * blkg_[rw]stat->aux_cnt is excluded for local stats but included for
69 * recursive. Used to carry stats of dead children.
70 */
71 struct blkg_rwstat {
72 struct percpu_counter cpu_cnt[BLKG_RWSTAT_NR];
73 atomic64_t aux_cnt[BLKG_RWSTAT_NR];
74 };
75
76 struct blkg_rwstat_sample {
77 u64 cnt[BLKG_RWSTAT_NR];
78 };
79
80 /*
81 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
82 * request_queue (q). This is used by blkcg policies which need to track
83 * information per blkcg - q pair.
84 *
85 * There can be multiple active blkcg policies and each blkg:policy pair is
86 * represented by a blkg_policy_data which is allocated and freed by each
87 * policy's pd_alloc/free_fn() methods. A policy can allocate private data
88 * area by allocating larger data structure which embeds blkg_policy_data
89 * at the beginning.
90 */
91 struct blkg_policy_data {
92 /* the blkg and policy id this per-policy data belongs to */
93 struct blkcg_gq *blkg;
94 int plid;
95 };
96
97 /*
98 * Policies that need to keep per-blkcg data which is independent from any
99 * request_queue associated to it should implement cpd_alloc/free_fn()
100 * methods. A policy can allocate private data area by allocating larger
101 * data structure which embeds blkcg_policy_data at the beginning.
102 * cpd_init() is invoked to let each policy handle per-blkcg data.
103 */
104 struct blkcg_policy_data {
105 /* the blkcg and policy id this per-policy data belongs to */
106 struct blkcg *blkcg;
107 int plid;
108 };
109
110 /* association between a blk cgroup and a request queue */
111 struct blkcg_gq {
112 /* Pointer to the associated request_queue */
113 struct request_queue *q;
114 struct list_head q_node;
115 struct hlist_node blkcg_node;
116 struct blkcg *blkcg;
117
118 /*
119 * Each blkg gets congested separately and the congestion state is
120 * propagated to the matching bdi_writeback_congested.
121 */
122 struct bdi_writeback_congested *wb_congested;
123
124 /* all non-root blkcg_gq's are guaranteed to have access to parent */
125 struct blkcg_gq *parent;
126
127 /* reference count */
128 struct percpu_ref refcnt;
129
130 /* is this blkg online? protected by both blkcg and q locks */
131 bool online;
132
133 struct blkg_rwstat stat_bytes;
134 struct blkg_rwstat stat_ios;
135
136 struct blkg_policy_data *pd[BLKCG_MAX_POLS];
137
138 spinlock_t async_bio_lock;
139 struct bio_list async_bios;
140 struct work_struct async_bio_work;
141
142 atomic_t use_delay;
143 atomic64_t delay_nsec;
144 atomic64_t delay_start;
145 u64 last_delay;
146 int last_use;
147
148 struct rcu_head rcu_head;
149 };
150
151 typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
152 typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
153 typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
154 typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
155 typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp,
156 struct request_queue *q, struct blkcg *blkcg);
157 typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
158 typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
159 typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
160 typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
161 typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
162 typedef size_t (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, char *buf,
163 size_t size);
164
165 struct blkcg_policy {
166 int plid;
167 /* cgroup files for the policy */
168 struct cftype *dfl_cftypes;
169 struct cftype *legacy_cftypes;
170
171 /* operations */
172 blkcg_pol_alloc_cpd_fn *cpd_alloc_fn;
173 blkcg_pol_init_cpd_fn *cpd_init_fn;
174 blkcg_pol_free_cpd_fn *cpd_free_fn;
175 blkcg_pol_bind_cpd_fn *cpd_bind_fn;
176
177 blkcg_pol_alloc_pd_fn *pd_alloc_fn;
178 blkcg_pol_init_pd_fn *pd_init_fn;
179 blkcg_pol_online_pd_fn *pd_online_fn;
180 blkcg_pol_offline_pd_fn *pd_offline_fn;
181 blkcg_pol_free_pd_fn *pd_free_fn;
182 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
183 blkcg_pol_stat_pd_fn *pd_stat_fn;
184 };
185
186 extern struct blkcg blkcg_root;
187 extern struct cgroup_subsys_state * const blkcg_root_css;
188 extern bool blkcg_debug_stats;
189
190 struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
191 struct request_queue *q, bool update_hint);
192 struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
193 struct request_queue *q);
194 struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
195 struct request_queue *q);
196 int blkcg_init_queue(struct request_queue *q);
197 void blkcg_drain_queue(struct request_queue *q);
198 void blkcg_exit_queue(struct request_queue *q);
199
200 /* Blkio controller policy registration */
201 int blkcg_policy_register(struct blkcg_policy *pol);
202 void blkcg_policy_unregister(struct blkcg_policy *pol);
203 int blkcg_activate_policy(struct request_queue *q,
204 const struct blkcg_policy *pol);
205 void blkcg_deactivate_policy(struct request_queue *q,
206 const struct blkcg_policy *pol);
207
blkg_rwstat_read_counter(struct blkg_rwstat * rwstat,unsigned int idx)208 static inline u64 blkg_rwstat_read_counter(struct blkg_rwstat *rwstat,
209 unsigned int idx)
210 {
211 return atomic64_read(&rwstat->aux_cnt[idx]) +
212 percpu_counter_sum_positive(&rwstat->cpu_cnt[idx]);
213 }
214
215 const char *blkg_dev_name(struct blkcg_gq *blkg);
216 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
217 u64 (*prfill)(struct seq_file *,
218 struct blkg_policy_data *, int),
219 const struct blkcg_policy *pol, int data,
220 bool show_total);
221 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
222 u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
223 const struct blkg_rwstat_sample *rwstat);
224 u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
225 int off);
226 int blkg_print_stat_bytes(struct seq_file *sf, void *v);
227 int blkg_print_stat_ios(struct seq_file *sf, void *v);
228 int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v);
229 int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v);
230
231 void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol,
232 int off, struct blkg_rwstat_sample *sum);
233
234 struct blkg_conf_ctx {
235 struct gendisk *disk;
236 struct blkcg_gq *blkg;
237 char *body;
238 };
239
240 struct gendisk *blkcg_conf_get_disk(char **inputp);
241 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
242 char *input, struct blkg_conf_ctx *ctx);
243 void blkg_conf_finish(struct blkg_conf_ctx *ctx);
244
245 /**
246 * blkcg_css - find the current css
247 *
248 * Find the css associated with either the kthread or the current task.
249 * This may return a dying css, so it is up to the caller to use tryget logic
250 * to confirm it is alive and well.
251 */
blkcg_css(void)252 static inline struct cgroup_subsys_state *blkcg_css(void)
253 {
254 struct cgroup_subsys_state *css;
255
256 css = kthread_blkcg();
257 if (css)
258 return css;
259 return task_css(current, io_cgrp_id);
260 }
261
css_to_blkcg(struct cgroup_subsys_state * css)262 static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
263 {
264 return css ? container_of(css, struct blkcg, css) : NULL;
265 }
266
267 /**
268 * __bio_blkcg - internal, inconsistent version to get blkcg
269 *
270 * DO NOT USE.
271 * This function is inconsistent and consequently is dangerous to use. The
272 * first part of the function returns a blkcg where a reference is owned by the
273 * bio. This means it does not need to be rcu protected as it cannot go away
274 * with the bio owning a reference to it. However, the latter potentially gets
275 * it from task_css(). This can race against task migration and the cgroup
276 * dying. It is also semantically different as it must be called rcu protected
277 * and is susceptible to failure when trying to get a reference to it.
278 * Therefore, it is not ok to assume that *_get() will always succeed on the
279 * blkcg returned here.
280 */
__bio_blkcg(struct bio * bio)281 static inline struct blkcg *__bio_blkcg(struct bio *bio)
282 {
283 if (bio && bio->bi_blkg)
284 return bio->bi_blkg->blkcg;
285 return css_to_blkcg(blkcg_css());
286 }
287
288 /**
289 * bio_blkcg - grab the blkcg associated with a bio
290 * @bio: target bio
291 *
292 * This returns the blkcg associated with a bio, %NULL if not associated.
293 * Callers are expected to either handle %NULL or know association has been
294 * done prior to calling this.
295 */
bio_blkcg(struct bio * bio)296 static inline struct blkcg *bio_blkcg(struct bio *bio)
297 {
298 if (bio && bio->bi_blkg)
299 return bio->bi_blkg->blkcg;
300 return NULL;
301 }
302
blk_cgroup_congested(void)303 static inline bool blk_cgroup_congested(void)
304 {
305 struct cgroup_subsys_state *css;
306 bool ret = false;
307
308 rcu_read_lock();
309 css = kthread_blkcg();
310 if (!css)
311 css = task_css(current, io_cgrp_id);
312 while (css) {
313 if (atomic_read(&css->cgroup->congestion_count)) {
314 ret = true;
315 break;
316 }
317 css = css->parent;
318 }
319 rcu_read_unlock();
320 return ret;
321 }
322
323 /**
324 * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg
325 * @return: true if this bio needs to be submitted with the root blkg context.
326 *
327 * In order to avoid priority inversions we sometimes need to issue a bio as if
328 * it were attached to the root blkg, and then backcharge to the actual owning
329 * blkg. The idea is we do bio_blkcg() to look up the actual context for the
330 * bio and attach the appropriate blkg to the bio. Then we call this helper and
331 * if it is true run with the root blkg for that queue and then do any
332 * backcharging to the originating cgroup once the io is complete.
333 */
bio_issue_as_root_blkg(struct bio * bio)334 static inline bool bio_issue_as_root_blkg(struct bio *bio)
335 {
336 return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0;
337 }
338
339 /**
340 * blkcg_parent - get the parent of a blkcg
341 * @blkcg: blkcg of interest
342 *
343 * Return the parent blkcg of @blkcg. Can be called anytime.
344 */
blkcg_parent(struct blkcg * blkcg)345 static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
346 {
347 return css_to_blkcg(blkcg->css.parent);
348 }
349
350 /**
351 * __blkg_lookup - internal version of blkg_lookup()
352 * @blkcg: blkcg of interest
353 * @q: request_queue of interest
354 * @update_hint: whether to update lookup hint with the result or not
355 *
356 * This is internal version and shouldn't be used by policy
357 * implementations. Looks up blkgs for the @blkcg - @q pair regardless of
358 * @q's bypass state. If @update_hint is %true, the caller should be
359 * holding @q->queue_lock and lookup hint is updated on success.
360 */
__blkg_lookup(struct blkcg * blkcg,struct request_queue * q,bool update_hint)361 static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
362 struct request_queue *q,
363 bool update_hint)
364 {
365 struct blkcg_gq *blkg;
366
367 if (blkcg == &blkcg_root)
368 return q->root_blkg;
369
370 blkg = rcu_dereference(blkcg->blkg_hint);
371 if (blkg && blkg->q == q)
372 return blkg;
373
374 return blkg_lookup_slowpath(blkcg, q, update_hint);
375 }
376
377 /**
378 * blkg_lookup - lookup blkg for the specified blkcg - q pair
379 * @blkcg: blkcg of interest
380 * @q: request_queue of interest
381 *
382 * Lookup blkg for the @blkcg - @q pair. This function should be called
383 * under RCU read lock.
384 */
blkg_lookup(struct blkcg * blkcg,struct request_queue * q)385 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
386 struct request_queue *q)
387 {
388 WARN_ON_ONCE(!rcu_read_lock_held());
389 return __blkg_lookup(blkcg, q, false);
390 }
391
392 /**
393 * blk_queue_root_blkg - return blkg for the (blkcg_root, @q) pair
394 * @q: request_queue of interest
395 *
396 * Lookup blkg for @q at the root level. See also blkg_lookup().
397 */
blk_queue_root_blkg(struct request_queue * q)398 static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
399 {
400 return q->root_blkg;
401 }
402
403 /**
404 * blkg_to_pdata - get policy private data
405 * @blkg: blkg of interest
406 * @pol: policy of interest
407 *
408 * Return pointer to private data associated with the @blkg-@pol pair.
409 */
blkg_to_pd(struct blkcg_gq * blkg,struct blkcg_policy * pol)410 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
411 struct blkcg_policy *pol)
412 {
413 return blkg ? blkg->pd[pol->plid] : NULL;
414 }
415
blkcg_to_cpd(struct blkcg * blkcg,struct blkcg_policy * pol)416 static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
417 struct blkcg_policy *pol)
418 {
419 return blkcg ? blkcg->cpd[pol->plid] : NULL;
420 }
421
422 /**
423 * pdata_to_blkg - get blkg associated with policy private data
424 * @pd: policy private data of interest
425 *
426 * @pd is policy private data. Determine the blkg it's associated with.
427 */
pd_to_blkg(struct blkg_policy_data * pd)428 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
429 {
430 return pd ? pd->blkg : NULL;
431 }
432
cpd_to_blkcg(struct blkcg_policy_data * cpd)433 static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
434 {
435 return cpd ? cpd->blkcg : NULL;
436 }
437
438 extern void blkcg_destroy_blkgs(struct blkcg *blkcg);
439
440 #ifdef CONFIG_CGROUP_WRITEBACK
441
442 /**
443 * blkcg_cgwb_get - get a reference for blkcg->cgwb_list
444 * @blkcg: blkcg of interest
445 *
446 * This is used to track the number of active wb's related to a blkcg.
447 */
blkcg_cgwb_get(struct blkcg * blkcg)448 static inline void blkcg_cgwb_get(struct blkcg *blkcg)
449 {
450 refcount_inc(&blkcg->cgwb_refcnt);
451 }
452
453 /**
454 * blkcg_cgwb_put - put a reference for @blkcg->cgwb_list
455 * @blkcg: blkcg of interest
456 *
457 * This is used to track the number of active wb's related to a blkcg.
458 * When this count goes to zero, all active wb has finished so the
459 * blkcg can continue destruction by calling blkcg_destroy_blkgs().
460 * This work may occur in cgwb_release_workfn() on the cgwb_release
461 * workqueue.
462 */
blkcg_cgwb_put(struct blkcg * blkcg)463 static inline void blkcg_cgwb_put(struct blkcg *blkcg)
464 {
465 if (refcount_dec_and_test(&blkcg->cgwb_refcnt))
466 blkcg_destroy_blkgs(blkcg);
467 }
468
469 #else
470
blkcg_cgwb_get(struct blkcg * blkcg)471 static inline void blkcg_cgwb_get(struct blkcg *blkcg) { }
472
blkcg_cgwb_put(struct blkcg * blkcg)473 static inline void blkcg_cgwb_put(struct blkcg *blkcg)
474 {
475 /* wb isn't being accounted, so trigger destruction right away */
476 blkcg_destroy_blkgs(blkcg);
477 }
478
479 #endif
480
481 /**
482 * blkg_path - format cgroup path of blkg
483 * @blkg: blkg of interest
484 * @buf: target buffer
485 * @buflen: target buffer length
486 *
487 * Format the path of the cgroup of @blkg into @buf.
488 */
blkg_path(struct blkcg_gq * blkg,char * buf,int buflen)489 static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
490 {
491 return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
492 }
493
494 /**
495 * blkg_get - get a blkg reference
496 * @blkg: blkg to get
497 *
498 * The caller should be holding an existing reference.
499 */
blkg_get(struct blkcg_gq * blkg)500 static inline void blkg_get(struct blkcg_gq *blkg)
501 {
502 percpu_ref_get(&blkg->refcnt);
503 }
504
505 /**
506 * blkg_tryget - try and get a blkg reference
507 * @blkg: blkg to get
508 *
509 * This is for use when doing an RCU lookup of the blkg. We may be in the midst
510 * of freeing this blkg, so we can only use it if the refcnt is not zero.
511 */
blkg_tryget(struct blkcg_gq * blkg)512 static inline bool blkg_tryget(struct blkcg_gq *blkg)
513 {
514 return blkg && percpu_ref_tryget(&blkg->refcnt);
515 }
516
517 /**
518 * blkg_tryget_closest - try and get a blkg ref on the closet blkg
519 * @blkg: blkg to get
520 *
521 * This needs to be called rcu protected. As the failure mode here is to walk
522 * up the blkg tree, this ensure that the blkg->parent pointers are always
523 * valid. This returns the blkg that it ended up taking a reference on or %NULL
524 * if no reference was taken.
525 */
blkg_tryget_closest(struct blkcg_gq * blkg)526 static inline struct blkcg_gq *blkg_tryget_closest(struct blkcg_gq *blkg)
527 {
528 struct blkcg_gq *ret_blkg = NULL;
529
530 WARN_ON_ONCE(!rcu_read_lock_held());
531
532 while (blkg) {
533 if (blkg_tryget(blkg)) {
534 ret_blkg = blkg;
535 break;
536 }
537 blkg = blkg->parent;
538 }
539
540 return ret_blkg;
541 }
542
543 /**
544 * blkg_put - put a blkg reference
545 * @blkg: blkg to put
546 */
blkg_put(struct blkcg_gq * blkg)547 static inline void blkg_put(struct blkcg_gq *blkg)
548 {
549 percpu_ref_put(&blkg->refcnt);
550 }
551
552 /**
553 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
554 * @d_blkg: loop cursor pointing to the current descendant
555 * @pos_css: used for iteration
556 * @p_blkg: target blkg to walk descendants of
557 *
558 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
559 * read locked. If called under either blkcg or queue lock, the iteration
560 * is guaranteed to include all and only online blkgs. The caller may
561 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
562 * @p_blkg is included in the iteration and the first node to be visited.
563 */
564 #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
565 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
566 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
567 (p_blkg)->q, false)))
568
569 /**
570 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
571 * @d_blkg: loop cursor pointing to the current descendant
572 * @pos_css: used for iteration
573 * @p_blkg: target blkg to walk descendants of
574 *
575 * Similar to blkg_for_each_descendant_pre() but performs post-order
576 * traversal instead. Synchronization rules are the same. @p_blkg is
577 * included in the iteration and the last node to be visited.
578 */
579 #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
580 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
581 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
582 (p_blkg)->q, false)))
583
blkg_rwstat_init(struct blkg_rwstat * rwstat,gfp_t gfp)584 static inline int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp)
585 {
586 int i, ret;
587
588 for (i = 0; i < BLKG_RWSTAT_NR; i++) {
589 ret = percpu_counter_init(&rwstat->cpu_cnt[i], 0, gfp);
590 if (ret) {
591 while (--i >= 0)
592 percpu_counter_destroy(&rwstat->cpu_cnt[i]);
593 return ret;
594 }
595 atomic64_set(&rwstat->aux_cnt[i], 0);
596 }
597 return 0;
598 }
599
blkg_rwstat_exit(struct blkg_rwstat * rwstat)600 static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat)
601 {
602 int i;
603
604 for (i = 0; i < BLKG_RWSTAT_NR; i++)
605 percpu_counter_destroy(&rwstat->cpu_cnt[i]);
606 }
607
608 /**
609 * blkg_rwstat_add - add a value to a blkg_rwstat
610 * @rwstat: target blkg_rwstat
611 * @op: REQ_OP and flags
612 * @val: value to add
613 *
614 * Add @val to @rwstat. The counters are chosen according to @rw. The
615 * caller is responsible for synchronizing calls to this function.
616 */
blkg_rwstat_add(struct blkg_rwstat * rwstat,unsigned int op,uint64_t val)617 static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
618 unsigned int op, uint64_t val)
619 {
620 struct percpu_counter *cnt;
621
622 if (op_is_discard(op))
623 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_DISCARD];
624 else if (op_is_write(op))
625 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE];
626 else
627 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];
628
629 percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
630
631 if (op_is_sync(op))
632 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
633 else
634 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];
635
636 percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
637 }
638
639 /**
640 * blkg_rwstat_read - read the current values of a blkg_rwstat
641 * @rwstat: blkg_rwstat to read
642 *
643 * Read the current snapshot of @rwstat and return it in the aux counts.
644 */
blkg_rwstat_read(struct blkg_rwstat * rwstat,struct blkg_rwstat_sample * result)645 static inline void blkg_rwstat_read(struct blkg_rwstat *rwstat,
646 struct blkg_rwstat_sample *result)
647 {
648 int i;
649
650 for (i = 0; i < BLKG_RWSTAT_NR; i++)
651 result->cnt[i] =
652 percpu_counter_sum_positive(&rwstat->cpu_cnt[i]);
653 }
654
655 /**
656 * blkg_rwstat_total - read the total count of a blkg_rwstat
657 * @rwstat: blkg_rwstat to read
658 *
659 * Return the total count of @rwstat regardless of the IO direction. This
660 * function can be called without synchronization and takes care of u64
661 * atomicity.
662 */
blkg_rwstat_total(struct blkg_rwstat * rwstat)663 static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
664 {
665 struct blkg_rwstat_sample tmp = { };
666
667 blkg_rwstat_read(rwstat, &tmp);
668 return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
669 }
670
671 /**
672 * blkg_rwstat_reset - reset a blkg_rwstat
673 * @rwstat: blkg_rwstat to reset
674 */
blkg_rwstat_reset(struct blkg_rwstat * rwstat)675 static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
676 {
677 int i;
678
679 for (i = 0; i < BLKG_RWSTAT_NR; i++) {
680 percpu_counter_set(&rwstat->cpu_cnt[i], 0);
681 atomic64_set(&rwstat->aux_cnt[i], 0);
682 }
683 }
684
685 /**
686 * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count
687 * @to: the destination blkg_rwstat
688 * @from: the source
689 *
690 * Add @from's count including the aux one to @to's aux count.
691 */
blkg_rwstat_add_aux(struct blkg_rwstat * to,struct blkg_rwstat * from)692 static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
693 struct blkg_rwstat *from)
694 {
695 u64 sum[BLKG_RWSTAT_NR];
696 int i;
697
698 for (i = 0; i < BLKG_RWSTAT_NR; i++)
699 sum[i] = percpu_counter_sum_positive(&from->cpu_cnt[i]);
700
701 for (i = 0; i < BLKG_RWSTAT_NR; i++)
702 atomic64_add(sum[i] + atomic64_read(&from->aux_cnt[i]),
703 &to->aux_cnt[i]);
704 }
705
706 #ifdef CONFIG_BLK_DEV_THROTTLING
707 extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
708 struct bio *bio);
709 #else
blk_throtl_bio(struct request_queue * q,struct blkcg_gq * blkg,struct bio * bio)710 static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
711 struct bio *bio) { return false; }
712 #endif
713
714 bool __blkcg_punt_bio_submit(struct bio *bio);
715
blkcg_punt_bio_submit(struct bio * bio)716 static inline bool blkcg_punt_bio_submit(struct bio *bio)
717 {
718 if (bio->bi_opf & REQ_CGROUP_PUNT)
719 return __blkcg_punt_bio_submit(bio);
720 else
721 return false;
722 }
723
blkcg_bio_issue_init(struct bio * bio)724 static inline void blkcg_bio_issue_init(struct bio *bio)
725 {
726 bio_issue_init(&bio->bi_issue, bio_sectors(bio));
727 }
728
blkcg_bio_issue_check(struct request_queue * q,struct bio * bio)729 static inline bool blkcg_bio_issue_check(struct request_queue *q,
730 struct bio *bio)
731 {
732 struct blkcg_gq *blkg;
733 bool throtl = false;
734
735 rcu_read_lock();
736
737 if (!bio->bi_blkg) {
738 char b[BDEVNAME_SIZE];
739
740 WARN_ONCE(1,
741 "no blkg associated for bio on block-device: %s\n",
742 bio_devname(bio, b));
743 bio_associate_blkg(bio);
744 }
745
746 blkg = bio->bi_blkg;
747
748 throtl = blk_throtl_bio(q, blkg, bio);
749
750 if (!throtl) {
751 /*
752 * If the bio is flagged with BIO_QUEUE_ENTERED it means this
753 * is a split bio and we would have already accounted for the
754 * size of the bio.
755 */
756 if (!bio_flagged(bio, BIO_QUEUE_ENTERED))
757 blkg_rwstat_add(&blkg->stat_bytes, bio->bi_opf,
758 bio->bi_iter.bi_size);
759 blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1);
760 }
761
762 blkcg_bio_issue_init(bio);
763
764 rcu_read_unlock();
765 return !throtl;
766 }
767
blkcg_use_delay(struct blkcg_gq * blkg)768 static inline void blkcg_use_delay(struct blkcg_gq *blkg)
769 {
770 if (atomic_add_return(1, &blkg->use_delay) == 1)
771 atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
772 }
773
774 /**
775 * blk_cgroup_mergeable - Determine whether to allow or disallow merges
776 * @rq: request to merge into
777 * @bio: bio to merge
778 *
779 * @bio and @rq should belong to the same cgroup and their issue_as_root should
780 * match. The latter is necessary as we don't want to throttle e.g. a metadata
781 * update because it happens to be next to a regular IO.
782 */
blk_cgroup_mergeable(struct request * rq,struct bio * bio)783 static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio)
784 {
785 return rq->bio->bi_blkg == bio->bi_blkg &&
786 bio_issue_as_root_blkg(rq->bio) == bio_issue_as_root_blkg(bio);
787 }
788
blkcg_unuse_delay(struct blkcg_gq * blkg)789 static inline int blkcg_unuse_delay(struct blkcg_gq *blkg)
790 {
791 int old = atomic_read(&blkg->use_delay);
792
793 if (old == 0)
794 return 0;
795
796 /*
797 * We do this song and dance because we can race with somebody else
798 * adding or removing delay. If we just did an atomic_dec we'd end up
799 * negative and we'd already be in trouble. We need to subtract 1 and
800 * then check to see if we were the last delay so we can drop the
801 * congestion count on the cgroup.
802 */
803 while (old) {
804 int cur = atomic_cmpxchg(&blkg->use_delay, old, old - 1);
805 if (cur == old)
806 break;
807 old = cur;
808 }
809
810 if (old == 0)
811 return 0;
812 if (old == 1)
813 atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
814 return 1;
815 }
816
blkcg_clear_delay(struct blkcg_gq * blkg)817 static inline void blkcg_clear_delay(struct blkcg_gq *blkg)
818 {
819 int old = atomic_read(&blkg->use_delay);
820 if (!old)
821 return;
822 /* We only want 1 person clearing the congestion count for this blkg. */
823 while (old) {
824 int cur = atomic_cmpxchg(&blkg->use_delay, old, 0);
825 if (cur == old) {
826 atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
827 break;
828 }
829 old = cur;
830 }
831 }
832
833 void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
834 void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay);
835 void blkcg_maybe_throttle_current(void);
836 #else /* CONFIG_BLK_CGROUP */
837
838 struct blkcg {
839 };
840
841 struct blkg_policy_data {
842 };
843
844 struct blkcg_policy_data {
845 };
846
847 struct blkcg_gq {
848 };
849
850 struct blkcg_policy {
851 };
852
853 #define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
854
blkcg_maybe_throttle_current(void)855 static inline void blkcg_maybe_throttle_current(void) { }
blk_cgroup_congested(void)856 static inline bool blk_cgroup_congested(void) { return false; }
857
858 #ifdef CONFIG_BLOCK
859
blkcg_schedule_throttle(struct request_queue * q,bool use_memdelay)860 static inline void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) { }
861
blkg_lookup(struct blkcg * blkcg,void * key)862 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
blk_queue_root_blkg(struct request_queue * q)863 static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
864 { return NULL; }
blkcg_init_queue(struct request_queue * q)865 static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
blkcg_drain_queue(struct request_queue * q)866 static inline void blkcg_drain_queue(struct request_queue *q) { }
blkcg_exit_queue(struct request_queue * q)867 static inline void blkcg_exit_queue(struct request_queue *q) { }
blkcg_policy_register(struct blkcg_policy * pol)868 static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
blkcg_policy_unregister(struct blkcg_policy * pol)869 static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
blkcg_activate_policy(struct request_queue * q,const struct blkcg_policy * pol)870 static inline int blkcg_activate_policy(struct request_queue *q,
871 const struct blkcg_policy *pol) { return 0; }
blkcg_deactivate_policy(struct request_queue * q,const struct blkcg_policy * pol)872 static inline void blkcg_deactivate_policy(struct request_queue *q,
873 const struct blkcg_policy *pol) { }
874
__bio_blkcg(struct bio * bio)875 static inline struct blkcg *__bio_blkcg(struct bio *bio) { return NULL; }
bio_blkcg(struct bio * bio)876 static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
877
blkg_to_pd(struct blkcg_gq * blkg,struct blkcg_policy * pol)878 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
879 struct blkcg_policy *pol) { return NULL; }
pd_to_blkg(struct blkg_policy_data * pd)880 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
blkg_path(struct blkcg_gq * blkg)881 static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
blkg_get(struct blkcg_gq * blkg)882 static inline void blkg_get(struct blkcg_gq *blkg) { }
blkg_put(struct blkcg_gq * blkg)883 static inline void blkg_put(struct blkcg_gq *blkg) { }
884
blkcg_punt_bio_submit(struct bio * bio)885 static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; }
blkcg_bio_issue_init(struct bio * bio)886 static inline void blkcg_bio_issue_init(struct bio *bio) { }
blkcg_bio_issue_check(struct request_queue * q,struct bio * bio)887 static inline bool blkcg_bio_issue_check(struct request_queue *q,
888 struct bio *bio) { return true; }
blk_cgroup_mergeable(struct request * rq,struct bio * bio)889 static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio) { return true; }
890
891 #define blk_queue_for_each_rl(rl, q) \
892 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
893
894 #endif /* CONFIG_BLOCK */
895 #endif /* CONFIG_BLK_CGROUP */
896 #endif /* _BLK_CGROUP_H */
897