• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Interface for controlling IO bandwidth on a request queue
4  *
5  * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
6  */
7 
8 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <linux/blkdev.h>
11 #include <linux/bio.h>
12 #include <linux/blktrace_api.h>
13 #include <linux/blk-cgroup.h>
14 #include "blk.h"
15 
16 /* Max dispatch from a group in 1 round */
17 static int throtl_grp_quantum = 8;
18 
19 /* Total max dispatch from all groups in one round */
20 static int throtl_quantum = 32;
21 
22 /* Throttling is performed over a slice and after that slice is renewed */
23 #define DFL_THROTL_SLICE_HD (HZ / 10)
24 #define DFL_THROTL_SLICE_SSD (HZ / 50)
25 #define MAX_THROTL_SLICE (HZ)
26 #define MAX_IDLE_TIME (5L * 1000 * 1000) /* 5 s */
27 #define MIN_THROTL_BPS (320 * 1024)
28 #define MIN_THROTL_IOPS (10)
29 #define DFL_LATENCY_TARGET (-1L)
30 #define DFL_IDLE_THRESHOLD (0)
31 #define DFL_HD_BASELINE_LATENCY (4000L) /* 4ms */
32 #define LATENCY_FILTERED_SSD (0)
33 /*
34  * For HD, very small latency comes from sequential IO. Such IO is helpless to
35  * help determine if its IO is impacted by others, hence we ignore the IO
36  */
37 #define LATENCY_FILTERED_HD (1000L) /* 1ms */
38 
39 #define SKIP_LATENCY (((u64)1) << BLK_STAT_RES_SHIFT)
40 
41 static struct blkcg_policy blkcg_policy_throtl;
42 
43 /* A workqueue to queue throttle related work */
44 static struct workqueue_struct *kthrotld_workqueue;
45 
46 /*
47  * To implement hierarchical throttling, throtl_grps form a tree and bios
48  * are dispatched upwards level by level until they reach the top and get
49  * issued.  When dispatching bios from the children and local group at each
50  * level, if the bios are dispatched into a single bio_list, there's a risk
51  * of a local or child group which can queue many bios at once filling up
52  * the list starving others.
53  *
54  * To avoid such starvation, dispatched bios are queued separately
55  * according to where they came from.  When they are again dispatched to
56  * the parent, they're popped in round-robin order so that no single source
57  * hogs the dispatch window.
58  *
59  * throtl_qnode is used to keep the queued bios separated by their sources.
60  * Bios are queued to throtl_qnode which in turn is queued to
61  * throtl_service_queue and then dispatched in round-robin order.
62  *
63  * It's also used to track the reference counts on blkg's.  A qnode always
64  * belongs to a throtl_grp and gets queued on itself or the parent, so
65  * incrementing the reference of the associated throtl_grp when a qnode is
66  * queued and decrementing when dequeued is enough to keep the whole blkg
67  * tree pinned while bios are in flight.
68  */
69 struct throtl_qnode {
70 	struct list_head	node;		/* service_queue->queued[] */
71 	struct bio_list		bios;		/* queued bios */
72 	struct throtl_grp	*tg;		/* tg this qnode belongs to */
73 };
74 
75 struct throtl_service_queue {
76 	struct throtl_service_queue *parent_sq;	/* the parent service_queue */
77 
78 	/*
79 	 * Bios queued directly to this service_queue or dispatched from
80 	 * children throtl_grp's.
81 	 */
82 	struct list_head	queued[2];	/* throtl_qnode [READ/WRITE] */
83 	unsigned int		nr_queued[2];	/* number of queued bios */
84 
85 	/*
86 	 * RB tree of active children throtl_grp's, which are sorted by
87 	 * their ->disptime.
88 	 */
89 	struct rb_root		pending_tree;	/* RB tree of active tgs */
90 	struct rb_node		*first_pending;	/* first node in the tree */
91 	unsigned int		nr_pending;	/* # queued in the tree */
92 	unsigned long		first_pending_disptime;	/* disptime of the first tg */
93 	struct timer_list	pending_timer;	/* fires on first_pending_disptime */
94 };
95 
96 enum tg_state_flags {
97 	THROTL_TG_PENDING	= 1 << 0,	/* on parent's pending tree */
98 	THROTL_TG_WAS_EMPTY	= 1 << 1,	/* bio_lists[] became non-empty */
99 };
100 
101 #define rb_entry_tg(node)	rb_entry((node), struct throtl_grp, rb_node)
102 
103 enum {
104 	LIMIT_LOW,
105 	LIMIT_MAX,
106 	LIMIT_CNT,
107 };
108 
109 struct throtl_grp {
110 	/* must be the first member */
111 	struct blkg_policy_data pd;
112 
113 	/* active throtl group service_queue member */
114 	struct rb_node rb_node;
115 
116 	/* throtl_data this group belongs to */
117 	struct throtl_data *td;
118 
119 	/* this group's service queue */
120 	struct throtl_service_queue service_queue;
121 
122 	/*
123 	 * qnode_on_self is used when bios are directly queued to this
124 	 * throtl_grp so that local bios compete fairly with bios
125 	 * dispatched from children.  qnode_on_parent is used when bios are
126 	 * dispatched from this throtl_grp into its parent and will compete
127 	 * with the sibling qnode_on_parents and the parent's
128 	 * qnode_on_self.
129 	 */
130 	struct throtl_qnode qnode_on_self[2];
131 	struct throtl_qnode qnode_on_parent[2];
132 
133 	/*
134 	 * Dispatch time in jiffies. This is the estimated time when group
135 	 * will unthrottle and is ready to dispatch more bio. It is used as
136 	 * key to sort active groups in service tree.
137 	 */
138 	unsigned long disptime;
139 
140 	unsigned int flags;
141 
142 	/* are there any throtl rules between this group and td? */
143 	bool has_rules[2];
144 
145 	/* internally used bytes per second rate limits */
146 	uint64_t bps[2][LIMIT_CNT];
147 	/* user configured bps limits */
148 	uint64_t bps_conf[2][LIMIT_CNT];
149 
150 	/* internally used IOPS limits */
151 	unsigned int iops[2][LIMIT_CNT];
152 	/* user configured IOPS limits */
153 	unsigned int iops_conf[2][LIMIT_CNT];
154 
155 	/* Number of bytes disptached in current slice */
156 	uint64_t bytes_disp[2];
157 	/* Number of bio's dispatched in current slice */
158 	unsigned int io_disp[2];
159 
160 	unsigned long last_low_overflow_time[2];
161 
162 	uint64_t last_bytes_disp[2];
163 	unsigned int last_io_disp[2];
164 
165 	unsigned long last_check_time;
166 
167 	unsigned long latency_target; /* us */
168 	unsigned long latency_target_conf; /* us */
169 	/* When did we start a new slice */
170 	unsigned long slice_start[2];
171 	unsigned long slice_end[2];
172 
173 	unsigned long last_finish_time; /* ns / 1024 */
174 	unsigned long checked_last_finish_time; /* ns / 1024 */
175 	unsigned long avg_idletime; /* ns / 1024 */
176 	unsigned long idletime_threshold; /* us */
177 	unsigned long idletime_threshold_conf; /* us */
178 
179 	unsigned int bio_cnt; /* total bios */
180 	unsigned int bad_bio_cnt; /* bios exceeding latency threshold */
181 	unsigned long bio_cnt_reset_time;
182 };
183 
184 /* We measure latency for request size from <= 4k to >= 1M */
185 #define LATENCY_BUCKET_SIZE 9
186 
187 struct latency_bucket {
188 	unsigned long total_latency; /* ns / 1024 */
189 	int samples;
190 };
191 
192 struct avg_latency_bucket {
193 	unsigned long latency; /* ns / 1024 */
194 	bool valid;
195 };
196 
197 struct throtl_data
198 {
199 	/* service tree for active throtl groups */
200 	struct throtl_service_queue service_queue;
201 
202 	struct request_queue *queue;
203 
204 	/* Total Number of queued bios on READ and WRITE lists */
205 	unsigned int nr_queued[2];
206 
207 	unsigned int throtl_slice;
208 
209 	/* Work for dispatching throttled bios */
210 	struct work_struct dispatch_work;
211 	unsigned int limit_index;
212 	bool limit_valid[LIMIT_CNT];
213 
214 	unsigned long low_upgrade_time;
215 	unsigned long low_downgrade_time;
216 
217 	unsigned int scale;
218 
219 	struct latency_bucket tmp_buckets[LATENCY_BUCKET_SIZE];
220 	struct avg_latency_bucket avg_buckets[LATENCY_BUCKET_SIZE];
221 	struct latency_bucket __percpu *latency_buckets;
222 	unsigned long last_calculate_time;
223 	unsigned long filtered_latency;
224 
225 	bool track_bio_latency;
226 };
227 
228 static void throtl_pending_timer_fn(unsigned long arg);
229 
pd_to_tg(struct blkg_policy_data * pd)230 static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
231 {
232 	return pd ? container_of(pd, struct throtl_grp, pd) : NULL;
233 }
234 
blkg_to_tg(struct blkcg_gq * blkg)235 static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
236 {
237 	return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl));
238 }
239 
tg_to_blkg(struct throtl_grp * tg)240 static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
241 {
242 	return pd_to_blkg(&tg->pd);
243 }
244 
245 /**
246  * sq_to_tg - return the throl_grp the specified service queue belongs to
247  * @sq: the throtl_service_queue of interest
248  *
249  * Return the throtl_grp @sq belongs to.  If @sq is the top-level one
250  * embedded in throtl_data, %NULL is returned.
251  */
sq_to_tg(struct throtl_service_queue * sq)252 static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq)
253 {
254 	if (sq && sq->parent_sq)
255 		return container_of(sq, struct throtl_grp, service_queue);
256 	else
257 		return NULL;
258 }
259 
260 /**
261  * sq_to_td - return throtl_data the specified service queue belongs to
262  * @sq: the throtl_service_queue of interest
263  *
264  * A service_queue can be embedded in either a throtl_grp or throtl_data.
265  * Determine the associated throtl_data accordingly and return it.
266  */
sq_to_td(struct throtl_service_queue * sq)267 static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
268 {
269 	struct throtl_grp *tg = sq_to_tg(sq);
270 
271 	if (tg)
272 		return tg->td;
273 	else
274 		return container_of(sq, struct throtl_data, service_queue);
275 }
276 
277 /*
278  * cgroup's limit in LIMIT_MAX is scaled if low limit is set. This scale is to
279  * make the IO dispatch more smooth.
280  * Scale up: linearly scale up according to lapsed time since upgrade. For
281  *           every throtl_slice, the limit scales up 1/2 .low limit till the
282  *           limit hits .max limit
283  * Scale down: exponentially scale down if a cgroup doesn't hit its .low limit
284  */
throtl_adjusted_limit(uint64_t low,struct throtl_data * td)285 static uint64_t throtl_adjusted_limit(uint64_t low, struct throtl_data *td)
286 {
287 	/* arbitrary value to avoid too big scale */
288 	if (td->scale < 4096 && time_after_eq(jiffies,
289 	    td->low_upgrade_time + td->scale * td->throtl_slice))
290 		td->scale = (jiffies - td->low_upgrade_time) / td->throtl_slice;
291 
292 	return low + (low >> 1) * td->scale;
293 }
294 
tg_bps_limit(struct throtl_grp * tg,int rw)295 static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw)
296 {
297 	struct blkcg_gq *blkg = tg_to_blkg(tg);
298 	struct throtl_data *td;
299 	uint64_t ret;
300 
301 	if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
302 		return U64_MAX;
303 
304 	td = tg->td;
305 	ret = tg->bps[rw][td->limit_index];
306 	if (ret == 0 && td->limit_index == LIMIT_LOW) {
307 		/* intermediate node or iops isn't 0 */
308 		if (!list_empty(&blkg->blkcg->css.children) ||
309 		    tg->iops[rw][td->limit_index])
310 			return U64_MAX;
311 		else
312 			return MIN_THROTL_BPS;
313 	}
314 
315 	if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] &&
316 	    tg->bps[rw][LIMIT_LOW] != tg->bps[rw][LIMIT_MAX]) {
317 		uint64_t adjusted;
318 
319 		adjusted = throtl_adjusted_limit(tg->bps[rw][LIMIT_LOW], td);
320 		ret = min(tg->bps[rw][LIMIT_MAX], adjusted);
321 	}
322 	return ret;
323 }
324 
tg_iops_limit(struct throtl_grp * tg,int rw)325 static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
326 {
327 	struct blkcg_gq *blkg = tg_to_blkg(tg);
328 	struct throtl_data *td;
329 	unsigned int ret;
330 
331 	if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
332 		return UINT_MAX;
333 
334 	td = tg->td;
335 	ret = tg->iops[rw][td->limit_index];
336 	if (ret == 0 && tg->td->limit_index == LIMIT_LOW) {
337 		/* intermediate node or bps isn't 0 */
338 		if (!list_empty(&blkg->blkcg->css.children) ||
339 		    tg->bps[rw][td->limit_index])
340 			return UINT_MAX;
341 		else
342 			return MIN_THROTL_IOPS;
343 	}
344 
345 	if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] &&
346 	    tg->iops[rw][LIMIT_LOW] != tg->iops[rw][LIMIT_MAX]) {
347 		uint64_t adjusted;
348 
349 		adjusted = throtl_adjusted_limit(tg->iops[rw][LIMIT_LOW], td);
350 		if (adjusted > UINT_MAX)
351 			adjusted = UINT_MAX;
352 		ret = min_t(unsigned int, tg->iops[rw][LIMIT_MAX], adjusted);
353 	}
354 	return ret;
355 }
356 
357 #define request_bucket_index(sectors) \
358 	clamp_t(int, order_base_2(sectors) - 3, 0, LATENCY_BUCKET_SIZE - 1)
359 
360 /**
361  * throtl_log - log debug message via blktrace
362  * @sq: the service_queue being reported
363  * @fmt: printf format string
364  * @args: printf args
365  *
366  * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
367  * throtl_grp; otherwise, just "throtl".
368  */
369 #define throtl_log(sq, fmt, args...)	do {				\
370 	struct throtl_grp *__tg = sq_to_tg((sq));			\
371 	struct throtl_data *__td = sq_to_td((sq));			\
372 									\
373 	(void)__td;							\
374 	if (likely(!blk_trace_note_message_enabled(__td->queue)))	\
375 		break;							\
376 	if ((__tg)) {							\
377 		blk_add_cgroup_trace_msg(__td->queue,			\
378 			tg_to_blkg(__tg)->blkcg, "throtl " fmt, ##args);\
379 	} else {							\
380 		blk_add_trace_msg(__td->queue, "throtl " fmt, ##args);	\
381 	}								\
382 } while (0)
383 
throtl_bio_data_size(struct bio * bio)384 static inline unsigned int throtl_bio_data_size(struct bio *bio)
385 {
386 	/* assume it's one sector */
387 	if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
388 		return 512;
389 	return bio->bi_iter.bi_size;
390 }
391 
throtl_qnode_init(struct throtl_qnode * qn,struct throtl_grp * tg)392 static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
393 {
394 	INIT_LIST_HEAD(&qn->node);
395 	bio_list_init(&qn->bios);
396 	qn->tg = tg;
397 }
398 
399 /**
400  * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
401  * @bio: bio being added
402  * @qn: qnode to add bio to
403  * @queued: the service_queue->queued[] list @qn belongs to
404  *
405  * Add @bio to @qn and put @qn on @queued if it's not already on.
406  * @qn->tg's reference count is bumped when @qn is activated.  See the
407  * comment on top of throtl_qnode definition for details.
408  */
throtl_qnode_add_bio(struct bio * bio,struct throtl_qnode * qn,struct list_head * queued)409 static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn,
410 				 struct list_head *queued)
411 {
412 	bio_list_add(&qn->bios, bio);
413 	if (list_empty(&qn->node)) {
414 		list_add_tail(&qn->node, queued);
415 		blkg_get(tg_to_blkg(qn->tg));
416 	}
417 }
418 
419 /**
420  * throtl_peek_queued - peek the first bio on a qnode list
421  * @queued: the qnode list to peek
422  */
throtl_peek_queued(struct list_head * queued)423 static struct bio *throtl_peek_queued(struct list_head *queued)
424 {
425 	struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
426 	struct bio *bio;
427 
428 	if (list_empty(queued))
429 		return NULL;
430 
431 	bio = bio_list_peek(&qn->bios);
432 	WARN_ON_ONCE(!bio);
433 	return bio;
434 }
435 
436 /**
437  * throtl_pop_queued - pop the first bio form a qnode list
438  * @queued: the qnode list to pop a bio from
439  * @tg_to_put: optional out argument for throtl_grp to put
440  *
441  * Pop the first bio from the qnode list @queued.  After popping, the first
442  * qnode is removed from @queued if empty or moved to the end of @queued so
443  * that the popping order is round-robin.
444  *
445  * When the first qnode is removed, its associated throtl_grp should be put
446  * too.  If @tg_to_put is NULL, this function automatically puts it;
447  * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is
448  * responsible for putting it.
449  */
throtl_pop_queued(struct list_head * queued,struct throtl_grp ** tg_to_put)450 static struct bio *throtl_pop_queued(struct list_head *queued,
451 				     struct throtl_grp **tg_to_put)
452 {
453 	struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
454 	struct bio *bio;
455 
456 	if (list_empty(queued))
457 		return NULL;
458 
459 	bio = bio_list_pop(&qn->bios);
460 	WARN_ON_ONCE(!bio);
461 
462 	if (bio_list_empty(&qn->bios)) {
463 		list_del_init(&qn->node);
464 		if (tg_to_put)
465 			*tg_to_put = qn->tg;
466 		else
467 			blkg_put(tg_to_blkg(qn->tg));
468 	} else {
469 		list_move_tail(&qn->node, queued);
470 	}
471 
472 	return bio;
473 }
474 
475 /* init a service_queue, assumes the caller zeroed it */
throtl_service_queue_init(struct throtl_service_queue * sq)476 static void throtl_service_queue_init(struct throtl_service_queue *sq)
477 {
478 	INIT_LIST_HEAD(&sq->queued[0]);
479 	INIT_LIST_HEAD(&sq->queued[1]);
480 	sq->pending_tree = RB_ROOT;
481 	setup_timer(&sq->pending_timer, throtl_pending_timer_fn,
482 		    (unsigned long)sq);
483 }
484 
throtl_pd_alloc(gfp_t gfp,int node)485 static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp, int node)
486 {
487 	struct throtl_grp *tg;
488 	int rw;
489 
490 	tg = kzalloc_node(sizeof(*tg), gfp, node);
491 	if (!tg)
492 		return NULL;
493 
494 	throtl_service_queue_init(&tg->service_queue);
495 
496 	for (rw = READ; rw <= WRITE; rw++) {
497 		throtl_qnode_init(&tg->qnode_on_self[rw], tg);
498 		throtl_qnode_init(&tg->qnode_on_parent[rw], tg);
499 	}
500 
501 	RB_CLEAR_NODE(&tg->rb_node);
502 	tg->bps[READ][LIMIT_MAX] = U64_MAX;
503 	tg->bps[WRITE][LIMIT_MAX] = U64_MAX;
504 	tg->iops[READ][LIMIT_MAX] = UINT_MAX;
505 	tg->iops[WRITE][LIMIT_MAX] = UINT_MAX;
506 	tg->bps_conf[READ][LIMIT_MAX] = U64_MAX;
507 	tg->bps_conf[WRITE][LIMIT_MAX] = U64_MAX;
508 	tg->iops_conf[READ][LIMIT_MAX] = UINT_MAX;
509 	tg->iops_conf[WRITE][LIMIT_MAX] = UINT_MAX;
510 	/* LIMIT_LOW will have default value 0 */
511 
512 	tg->latency_target = DFL_LATENCY_TARGET;
513 	tg->latency_target_conf = DFL_LATENCY_TARGET;
514 	tg->idletime_threshold = DFL_IDLE_THRESHOLD;
515 	tg->idletime_threshold_conf = DFL_IDLE_THRESHOLD;
516 
517 	return &tg->pd;
518 }
519 
throtl_pd_init(struct blkg_policy_data * pd)520 static void throtl_pd_init(struct blkg_policy_data *pd)
521 {
522 	struct throtl_grp *tg = pd_to_tg(pd);
523 	struct blkcg_gq *blkg = tg_to_blkg(tg);
524 	struct throtl_data *td = blkg->q->td;
525 	struct throtl_service_queue *sq = &tg->service_queue;
526 
527 	/*
528 	 * If on the default hierarchy, we switch to properly hierarchical
529 	 * behavior where limits on a given throtl_grp are applied to the
530 	 * whole subtree rather than just the group itself.  e.g. If 16M
531 	 * read_bps limit is set on the root group, the whole system can't
532 	 * exceed 16M for the device.
533 	 *
534 	 * If not on the default hierarchy, the broken flat hierarchy
535 	 * behavior is retained where all throtl_grps are treated as if
536 	 * they're all separate root groups right below throtl_data.
537 	 * Limits of a group don't interact with limits of other groups
538 	 * regardless of the position of the group in the hierarchy.
539 	 */
540 	sq->parent_sq = &td->service_queue;
541 	if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent)
542 		sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
543 	tg->td = td;
544 }
545 
546 /*
547  * Set has_rules[] if @tg or any of its parents have limits configured.
548  * This doesn't require walking up to the top of the hierarchy as the
549  * parent's has_rules[] is guaranteed to be correct.
550  */
tg_update_has_rules(struct throtl_grp * tg)551 static void tg_update_has_rules(struct throtl_grp *tg)
552 {
553 	struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq);
554 	struct throtl_data *td = tg->td;
555 	int rw;
556 
557 	for (rw = READ; rw <= WRITE; rw++)
558 		tg->has_rules[rw] = (parent_tg && parent_tg->has_rules[rw]) ||
559 			(td->limit_valid[td->limit_index] &&
560 			 (tg_bps_limit(tg, rw) != U64_MAX ||
561 			  tg_iops_limit(tg, rw) != UINT_MAX));
562 }
563 
throtl_pd_online(struct blkg_policy_data * pd)564 static void throtl_pd_online(struct blkg_policy_data *pd)
565 {
566 	struct throtl_grp *tg = pd_to_tg(pd);
567 	/*
568 	 * We don't want new groups to escape the limits of its ancestors.
569 	 * Update has_rules[] after a new group is brought online.
570 	 */
571 	tg_update_has_rules(tg);
572 }
573 
blk_throtl_update_limit_valid(struct throtl_data * td)574 static void blk_throtl_update_limit_valid(struct throtl_data *td)
575 {
576 	struct cgroup_subsys_state *pos_css;
577 	struct blkcg_gq *blkg;
578 	bool low_valid = false;
579 
580 	rcu_read_lock();
581 	blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
582 		struct throtl_grp *tg = blkg_to_tg(blkg);
583 
584 		if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] ||
585 		    tg->iops[READ][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW])
586 			low_valid = true;
587 	}
588 	rcu_read_unlock();
589 
590 	td->limit_valid[LIMIT_LOW] = low_valid;
591 }
592 
593 static void throtl_upgrade_state(struct throtl_data *td);
throtl_pd_offline(struct blkg_policy_data * pd)594 static void throtl_pd_offline(struct blkg_policy_data *pd)
595 {
596 	struct throtl_grp *tg = pd_to_tg(pd);
597 
598 	tg->bps[READ][LIMIT_LOW] = 0;
599 	tg->bps[WRITE][LIMIT_LOW] = 0;
600 	tg->iops[READ][LIMIT_LOW] = 0;
601 	tg->iops[WRITE][LIMIT_LOW] = 0;
602 
603 	blk_throtl_update_limit_valid(tg->td);
604 
605 	if (!tg->td->limit_valid[tg->td->limit_index])
606 		throtl_upgrade_state(tg->td);
607 }
608 
throtl_pd_free(struct blkg_policy_data * pd)609 static void throtl_pd_free(struct blkg_policy_data *pd)
610 {
611 	struct throtl_grp *tg = pd_to_tg(pd);
612 
613 	del_timer_sync(&tg->service_queue.pending_timer);
614 	kfree(tg);
615 }
616 
617 static struct throtl_grp *
throtl_rb_first(struct throtl_service_queue * parent_sq)618 throtl_rb_first(struct throtl_service_queue *parent_sq)
619 {
620 	/* Service tree is empty */
621 	if (!parent_sq->nr_pending)
622 		return NULL;
623 
624 	if (!parent_sq->first_pending)
625 		parent_sq->first_pending = rb_first(&parent_sq->pending_tree);
626 
627 	if (parent_sq->first_pending)
628 		return rb_entry_tg(parent_sq->first_pending);
629 
630 	return NULL;
631 }
632 
rb_erase_init(struct rb_node * n,struct rb_root * root)633 static void rb_erase_init(struct rb_node *n, struct rb_root *root)
634 {
635 	rb_erase(n, root);
636 	RB_CLEAR_NODE(n);
637 }
638 
throtl_rb_erase(struct rb_node * n,struct throtl_service_queue * parent_sq)639 static void throtl_rb_erase(struct rb_node *n,
640 			    struct throtl_service_queue *parent_sq)
641 {
642 	if (parent_sq->first_pending == n)
643 		parent_sq->first_pending = NULL;
644 	rb_erase_init(n, &parent_sq->pending_tree);
645 	--parent_sq->nr_pending;
646 }
647 
update_min_dispatch_time(struct throtl_service_queue * parent_sq)648 static void update_min_dispatch_time(struct throtl_service_queue *parent_sq)
649 {
650 	struct throtl_grp *tg;
651 
652 	tg = throtl_rb_first(parent_sq);
653 	if (!tg)
654 		return;
655 
656 	parent_sq->first_pending_disptime = tg->disptime;
657 }
658 
tg_service_queue_add(struct throtl_grp * tg)659 static void tg_service_queue_add(struct throtl_grp *tg)
660 {
661 	struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq;
662 	struct rb_node **node = &parent_sq->pending_tree.rb_node;
663 	struct rb_node *parent = NULL;
664 	struct throtl_grp *__tg;
665 	unsigned long key = tg->disptime;
666 	int left = 1;
667 
668 	while (*node != NULL) {
669 		parent = *node;
670 		__tg = rb_entry_tg(parent);
671 
672 		if (time_before(key, __tg->disptime))
673 			node = &parent->rb_left;
674 		else {
675 			node = &parent->rb_right;
676 			left = 0;
677 		}
678 	}
679 
680 	if (left)
681 		parent_sq->first_pending = &tg->rb_node;
682 
683 	rb_link_node(&tg->rb_node, parent, node);
684 	rb_insert_color(&tg->rb_node, &parent_sq->pending_tree);
685 }
686 
__throtl_enqueue_tg(struct throtl_grp * tg)687 static void __throtl_enqueue_tg(struct throtl_grp *tg)
688 {
689 	tg_service_queue_add(tg);
690 	tg->flags |= THROTL_TG_PENDING;
691 	tg->service_queue.parent_sq->nr_pending++;
692 }
693 
throtl_enqueue_tg(struct throtl_grp * tg)694 static void throtl_enqueue_tg(struct throtl_grp *tg)
695 {
696 	if (!(tg->flags & THROTL_TG_PENDING))
697 		__throtl_enqueue_tg(tg);
698 }
699 
__throtl_dequeue_tg(struct throtl_grp * tg)700 static void __throtl_dequeue_tg(struct throtl_grp *tg)
701 {
702 	throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq);
703 	tg->flags &= ~THROTL_TG_PENDING;
704 }
705 
throtl_dequeue_tg(struct throtl_grp * tg)706 static void throtl_dequeue_tg(struct throtl_grp *tg)
707 {
708 	if (tg->flags & THROTL_TG_PENDING)
709 		__throtl_dequeue_tg(tg);
710 }
711 
712 /* Call with queue lock held */
throtl_schedule_pending_timer(struct throtl_service_queue * sq,unsigned long expires)713 static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
714 					  unsigned long expires)
715 {
716 	unsigned long max_expire = jiffies + 8 * sq_to_td(sq)->throtl_slice;
717 
718 	/*
719 	 * Since we are adjusting the throttle limit dynamically, the sleep
720 	 * time calculated according to previous limit might be invalid. It's
721 	 * possible the cgroup sleep time is very long and no other cgroups
722 	 * have IO running so notify the limit changes. Make sure the cgroup
723 	 * doesn't sleep too long to avoid the missed notification.
724 	 */
725 	if (time_after(expires, max_expire))
726 		expires = max_expire;
727 	mod_timer(&sq->pending_timer, expires);
728 	throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu",
729 		   expires - jiffies, jiffies);
730 }
731 
732 /**
733  * throtl_schedule_next_dispatch - schedule the next dispatch cycle
734  * @sq: the service_queue to schedule dispatch for
735  * @force: force scheduling
736  *
737  * Arm @sq->pending_timer so that the next dispatch cycle starts on the
738  * dispatch time of the first pending child.  Returns %true if either timer
739  * is armed or there's no pending child left.  %false if the current
740  * dispatch window is still open and the caller should continue
741  * dispatching.
742  *
743  * If @force is %true, the dispatch timer is always scheduled and this
744  * function is guaranteed to return %true.  This is to be used when the
745  * caller can't dispatch itself and needs to invoke pending_timer
746  * unconditionally.  Note that forced scheduling is likely to induce short
747  * delay before dispatch starts even if @sq->first_pending_disptime is not
748  * in the future and thus shouldn't be used in hot paths.
749  */
throtl_schedule_next_dispatch(struct throtl_service_queue * sq,bool force)750 static bool throtl_schedule_next_dispatch(struct throtl_service_queue *sq,
751 					  bool force)
752 {
753 	/* any pending children left? */
754 	if (!sq->nr_pending)
755 		return true;
756 
757 	update_min_dispatch_time(sq);
758 
759 	/* is the next dispatch time in the future? */
760 	if (force || time_after(sq->first_pending_disptime, jiffies)) {
761 		throtl_schedule_pending_timer(sq, sq->first_pending_disptime);
762 		return true;
763 	}
764 
765 	/* tell the caller to continue dispatching */
766 	return false;
767 }
768 
throtl_start_new_slice_with_credit(struct throtl_grp * tg,bool rw,unsigned long start)769 static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
770 		bool rw, unsigned long start)
771 {
772 	tg->bytes_disp[rw] = 0;
773 	tg->io_disp[rw] = 0;
774 
775 	/*
776 	 * Previous slice has expired. We must have trimmed it after last
777 	 * bio dispatch. That means since start of last slice, we never used
778 	 * that bandwidth. Do try to make use of that bandwidth while giving
779 	 * credit.
780 	 */
781 	if (time_after_eq(start, tg->slice_start[rw]))
782 		tg->slice_start[rw] = start;
783 
784 	tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
785 	throtl_log(&tg->service_queue,
786 		   "[%c] new slice with credit start=%lu end=%lu jiffies=%lu",
787 		   rw == READ ? 'R' : 'W', tg->slice_start[rw],
788 		   tg->slice_end[rw], jiffies);
789 }
790 
throtl_start_new_slice(struct throtl_grp * tg,bool rw)791 static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
792 {
793 	tg->bytes_disp[rw] = 0;
794 	tg->io_disp[rw] = 0;
795 	tg->slice_start[rw] = jiffies;
796 	tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
797 	throtl_log(&tg->service_queue,
798 		   "[%c] new slice start=%lu end=%lu jiffies=%lu",
799 		   rw == READ ? 'R' : 'W', tg->slice_start[rw],
800 		   tg->slice_end[rw], jiffies);
801 }
802 
throtl_set_slice_end(struct throtl_grp * tg,bool rw,unsigned long jiffy_end)803 static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
804 					unsigned long jiffy_end)
805 {
806 	tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice);
807 }
808 
throtl_extend_slice(struct throtl_grp * tg,bool rw,unsigned long jiffy_end)809 static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
810 				       unsigned long jiffy_end)
811 {
812 	tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice);
813 	throtl_log(&tg->service_queue,
814 		   "[%c] extend slice start=%lu end=%lu jiffies=%lu",
815 		   rw == READ ? 'R' : 'W', tg->slice_start[rw],
816 		   tg->slice_end[rw], jiffies);
817 }
818 
819 /* Determine if previously allocated or extended slice is complete or not */
throtl_slice_used(struct throtl_grp * tg,bool rw)820 static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
821 {
822 	if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
823 		return false;
824 
825 	return 1;
826 }
827 
828 /* Trim the used slices and adjust slice start accordingly */
throtl_trim_slice(struct throtl_grp * tg,bool rw)829 static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
830 {
831 	unsigned long nr_slices, time_elapsed, io_trim;
832 	u64 bytes_trim, tmp;
833 
834 	BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
835 
836 	/*
837 	 * If bps are unlimited (-1), then time slice don't get
838 	 * renewed. Don't try to trim the slice if slice is used. A new
839 	 * slice will start when appropriate.
840 	 */
841 	if (throtl_slice_used(tg, rw))
842 		return;
843 
844 	/*
845 	 * A bio has been dispatched. Also adjust slice_end. It might happen
846 	 * that initially cgroup limit was very low resulting in high
847 	 * slice_end, but later limit was bumped up and bio was dispached
848 	 * sooner, then we need to reduce slice_end. A high bogus slice_end
849 	 * is bad because it does not allow new slice to start.
850 	 */
851 
852 	throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice);
853 
854 	time_elapsed = jiffies - tg->slice_start[rw];
855 
856 	nr_slices = time_elapsed / tg->td->throtl_slice;
857 
858 	if (!nr_slices)
859 		return;
860 	tmp = tg_bps_limit(tg, rw) * tg->td->throtl_slice * nr_slices;
861 	do_div(tmp, HZ);
862 	bytes_trim = tmp;
863 
864 	io_trim = (tg_iops_limit(tg, rw) * tg->td->throtl_slice * nr_slices) /
865 		HZ;
866 
867 	if (!bytes_trim && !io_trim)
868 		return;
869 
870 	if (tg->bytes_disp[rw] >= bytes_trim)
871 		tg->bytes_disp[rw] -= bytes_trim;
872 	else
873 		tg->bytes_disp[rw] = 0;
874 
875 	if (tg->io_disp[rw] >= io_trim)
876 		tg->io_disp[rw] -= io_trim;
877 	else
878 		tg->io_disp[rw] = 0;
879 
880 	tg->slice_start[rw] += nr_slices * tg->td->throtl_slice;
881 
882 	throtl_log(&tg->service_queue,
883 		   "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu",
884 		   rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
885 		   tg->slice_start[rw], tg->slice_end[rw], jiffies);
886 }
887 
tg_with_in_iops_limit(struct throtl_grp * tg,struct bio * bio,unsigned long * wait)888 static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
889 				  unsigned long *wait)
890 {
891 	bool rw = bio_data_dir(bio);
892 	unsigned int io_allowed;
893 	unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
894 	u64 tmp;
895 
896 	jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
897 
898 	/* Slice has just started. Consider one slice interval */
899 	if (!jiffy_elapsed)
900 		jiffy_elapsed_rnd = tg->td->throtl_slice;
901 
902 	jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
903 
904 	/*
905 	 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
906 	 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
907 	 * will allow dispatch after 1 second and after that slice should
908 	 * have been trimmed.
909 	 */
910 
911 	tmp = (u64)tg_iops_limit(tg, rw) * jiffy_elapsed_rnd;
912 	do_div(tmp, HZ);
913 
914 	if (tmp > UINT_MAX)
915 		io_allowed = UINT_MAX;
916 	else
917 		io_allowed = tmp;
918 
919 	if (tg->io_disp[rw] + 1 <= io_allowed) {
920 		if (wait)
921 			*wait = 0;
922 		return true;
923 	}
924 
925 	/* Calc approx time to dispatch */
926 	jiffy_wait = ((tg->io_disp[rw] + 1) * HZ) / tg_iops_limit(tg, rw) + 1;
927 
928 	if (jiffy_wait > jiffy_elapsed)
929 		jiffy_wait = jiffy_wait - jiffy_elapsed;
930 	else
931 		jiffy_wait = 1;
932 
933 	if (wait)
934 		*wait = jiffy_wait;
935 	return 0;
936 }
937 
tg_with_in_bps_limit(struct throtl_grp * tg,struct bio * bio,unsigned long * wait)938 static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
939 				 unsigned long *wait)
940 {
941 	bool rw = bio_data_dir(bio);
942 	u64 bytes_allowed, extra_bytes, tmp;
943 	unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
944 	unsigned int bio_size = throtl_bio_data_size(bio);
945 
946 	jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
947 
948 	/* Slice has just started. Consider one slice interval */
949 	if (!jiffy_elapsed)
950 		jiffy_elapsed_rnd = tg->td->throtl_slice;
951 
952 	jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
953 
954 	tmp = tg_bps_limit(tg, rw) * jiffy_elapsed_rnd;
955 	do_div(tmp, HZ);
956 	bytes_allowed = tmp;
957 
958 	if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) {
959 		if (wait)
960 			*wait = 0;
961 		return true;
962 	}
963 
964 	/* Calc approx time to dispatch */
965 	extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed;
966 	jiffy_wait = div64_u64(extra_bytes * HZ, tg_bps_limit(tg, rw));
967 
968 	if (!jiffy_wait)
969 		jiffy_wait = 1;
970 
971 	/*
972 	 * This wait time is without taking into consideration the rounding
973 	 * up we did. Add that time also.
974 	 */
975 	jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
976 	if (wait)
977 		*wait = jiffy_wait;
978 	return 0;
979 }
980 
981 /*
982  * Returns whether one can dispatch a bio or not. Also returns approx number
983  * of jiffies to wait before this bio is with-in IO rate and can be dispatched
984  */
tg_may_dispatch(struct throtl_grp * tg,struct bio * bio,unsigned long * wait)985 static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
986 			    unsigned long *wait)
987 {
988 	bool rw = bio_data_dir(bio);
989 	unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
990 
991 	/*
992  	 * Currently whole state machine of group depends on first bio
993 	 * queued in the group bio list. So one should not be calling
994 	 * this function with a different bio if there are other bios
995 	 * queued.
996 	 */
997 	BUG_ON(tg->service_queue.nr_queued[rw] &&
998 	       bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
999 
1000 	/* If tg->bps = -1, then BW is unlimited */
1001 	if (tg_bps_limit(tg, rw) == U64_MAX &&
1002 	    tg_iops_limit(tg, rw) == UINT_MAX) {
1003 		if (wait)
1004 			*wait = 0;
1005 		return true;
1006 	}
1007 
1008 	/*
1009 	 * If previous slice expired, start a new one otherwise renew/extend
1010 	 * existing slice to make sure it is at least throtl_slice interval
1011 	 * long since now. New slice is started only for empty throttle group.
1012 	 * If there is queued bio, that means there should be an active
1013 	 * slice and it should be extended instead.
1014 	 */
1015 	if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))
1016 		throtl_start_new_slice(tg, rw);
1017 	else {
1018 		if (time_before(tg->slice_end[rw],
1019 		    jiffies + tg->td->throtl_slice))
1020 			throtl_extend_slice(tg, rw,
1021 				jiffies + tg->td->throtl_slice);
1022 	}
1023 
1024 	if (tg_with_in_bps_limit(tg, bio, &bps_wait) &&
1025 	    tg_with_in_iops_limit(tg, bio, &iops_wait)) {
1026 		if (wait)
1027 			*wait = 0;
1028 		return 1;
1029 	}
1030 
1031 	max_wait = max(bps_wait, iops_wait);
1032 
1033 	if (wait)
1034 		*wait = max_wait;
1035 
1036 	if (time_before(tg->slice_end[rw], jiffies + max_wait))
1037 		throtl_extend_slice(tg, rw, jiffies + max_wait);
1038 
1039 	return 0;
1040 }
1041 
throtl_charge_bio(struct throtl_grp * tg,struct bio * bio)1042 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
1043 {
1044 	bool rw = bio_data_dir(bio);
1045 	unsigned int bio_size = throtl_bio_data_size(bio);
1046 
1047 	/* Charge the bio to the group */
1048 	tg->bytes_disp[rw] += bio_size;
1049 	tg->io_disp[rw]++;
1050 	tg->last_bytes_disp[rw] += bio_size;
1051 	tg->last_io_disp[rw]++;
1052 
1053 	/*
1054 	 * BIO_THROTTLED is used to prevent the same bio to be throttled
1055 	 * more than once as a throttled bio will go through blk-throtl the
1056 	 * second time when it eventually gets issued.  Set it when a bio
1057 	 * is being charged to a tg.
1058 	 */
1059 	if (!bio_flagged(bio, BIO_THROTTLED))
1060 		bio_set_flag(bio, BIO_THROTTLED);
1061 }
1062 
1063 /**
1064  * throtl_add_bio_tg - add a bio to the specified throtl_grp
1065  * @bio: bio to add
1066  * @qn: qnode to use
1067  * @tg: the target throtl_grp
1068  *
1069  * Add @bio to @tg's service_queue using @qn.  If @qn is not specified,
1070  * tg->qnode_on_self[] is used.
1071  */
throtl_add_bio_tg(struct bio * bio,struct throtl_qnode * qn,struct throtl_grp * tg)1072 static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn,
1073 			      struct throtl_grp *tg)
1074 {
1075 	struct throtl_service_queue *sq = &tg->service_queue;
1076 	bool rw = bio_data_dir(bio);
1077 
1078 	if (!qn)
1079 		qn = &tg->qnode_on_self[rw];
1080 
1081 	/*
1082 	 * If @tg doesn't currently have any bios queued in the same
1083 	 * direction, queueing @bio can change when @tg should be
1084 	 * dispatched.  Mark that @tg was empty.  This is automatically
1085 	 * cleaered on the next tg_update_disptime().
1086 	 */
1087 	if (!sq->nr_queued[rw])
1088 		tg->flags |= THROTL_TG_WAS_EMPTY;
1089 
1090 	throtl_qnode_add_bio(bio, qn, &sq->queued[rw]);
1091 
1092 	sq->nr_queued[rw]++;
1093 	throtl_enqueue_tg(tg);
1094 }
1095 
tg_update_disptime(struct throtl_grp * tg)1096 static void tg_update_disptime(struct throtl_grp *tg)
1097 {
1098 	struct throtl_service_queue *sq = &tg->service_queue;
1099 	unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
1100 	struct bio *bio;
1101 
1102 	bio = throtl_peek_queued(&sq->queued[READ]);
1103 	if (bio)
1104 		tg_may_dispatch(tg, bio, &read_wait);
1105 
1106 	bio = throtl_peek_queued(&sq->queued[WRITE]);
1107 	if (bio)
1108 		tg_may_dispatch(tg, bio, &write_wait);
1109 
1110 	min_wait = min(read_wait, write_wait);
1111 	disptime = jiffies + min_wait;
1112 
1113 	/* Update dispatch time */
1114 	throtl_dequeue_tg(tg);
1115 	tg->disptime = disptime;
1116 	throtl_enqueue_tg(tg);
1117 
1118 	/* see throtl_add_bio_tg() */
1119 	tg->flags &= ~THROTL_TG_WAS_EMPTY;
1120 }
1121 
start_parent_slice_with_credit(struct throtl_grp * child_tg,struct throtl_grp * parent_tg,bool rw)1122 static void start_parent_slice_with_credit(struct throtl_grp *child_tg,
1123 					struct throtl_grp *parent_tg, bool rw)
1124 {
1125 	if (throtl_slice_used(parent_tg, rw)) {
1126 		throtl_start_new_slice_with_credit(parent_tg, rw,
1127 				child_tg->slice_start[rw]);
1128 	}
1129 
1130 }
1131 
tg_dispatch_one_bio(struct throtl_grp * tg,bool rw)1132 static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
1133 {
1134 	struct throtl_service_queue *sq = &tg->service_queue;
1135 	struct throtl_service_queue *parent_sq = sq->parent_sq;
1136 	struct throtl_grp *parent_tg = sq_to_tg(parent_sq);
1137 	struct throtl_grp *tg_to_put = NULL;
1138 	struct bio *bio;
1139 
1140 	/*
1141 	 * @bio is being transferred from @tg to @parent_sq.  Popping a bio
1142 	 * from @tg may put its reference and @parent_sq might end up
1143 	 * getting released prematurely.  Remember the tg to put and put it
1144 	 * after @bio is transferred to @parent_sq.
1145 	 */
1146 	bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put);
1147 	sq->nr_queued[rw]--;
1148 
1149 	throtl_charge_bio(tg, bio);
1150 
1151 	/*
1152 	 * If our parent is another tg, we just need to transfer @bio to
1153 	 * the parent using throtl_add_bio_tg().  If our parent is
1154 	 * @td->service_queue, @bio is ready to be issued.  Put it on its
1155 	 * bio_lists[] and decrease total number queued.  The caller is
1156 	 * responsible for issuing these bios.
1157 	 */
1158 	if (parent_tg) {
1159 		throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
1160 		start_parent_slice_with_credit(tg, parent_tg, rw);
1161 	} else {
1162 		throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
1163 				     &parent_sq->queued[rw]);
1164 		BUG_ON(tg->td->nr_queued[rw] <= 0);
1165 		tg->td->nr_queued[rw]--;
1166 	}
1167 
1168 	throtl_trim_slice(tg, rw);
1169 
1170 	if (tg_to_put)
1171 		blkg_put(tg_to_blkg(tg_to_put));
1172 }
1173 
throtl_dispatch_tg(struct throtl_grp * tg)1174 static int throtl_dispatch_tg(struct throtl_grp *tg)
1175 {
1176 	struct throtl_service_queue *sq = &tg->service_queue;
1177 	unsigned int nr_reads = 0, nr_writes = 0;
1178 	unsigned int max_nr_reads = throtl_grp_quantum*3/4;
1179 	unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
1180 	struct bio *bio;
1181 
1182 	/* Try to dispatch 75% READS and 25% WRITES */
1183 
1184 	while ((bio = throtl_peek_queued(&sq->queued[READ])) &&
1185 	       tg_may_dispatch(tg, bio, NULL)) {
1186 
1187 		tg_dispatch_one_bio(tg, bio_data_dir(bio));
1188 		nr_reads++;
1189 
1190 		if (nr_reads >= max_nr_reads)
1191 			break;
1192 	}
1193 
1194 	while ((bio = throtl_peek_queued(&sq->queued[WRITE])) &&
1195 	       tg_may_dispatch(tg, bio, NULL)) {
1196 
1197 		tg_dispatch_one_bio(tg, bio_data_dir(bio));
1198 		nr_writes++;
1199 
1200 		if (nr_writes >= max_nr_writes)
1201 			break;
1202 	}
1203 
1204 	return nr_reads + nr_writes;
1205 }
1206 
throtl_select_dispatch(struct throtl_service_queue * parent_sq)1207 static int throtl_select_dispatch(struct throtl_service_queue *parent_sq)
1208 {
1209 	unsigned int nr_disp = 0;
1210 
1211 	while (1) {
1212 		struct throtl_grp *tg = throtl_rb_first(parent_sq);
1213 		struct throtl_service_queue *sq = &tg->service_queue;
1214 
1215 		if (!tg)
1216 			break;
1217 
1218 		if (time_before(jiffies, tg->disptime))
1219 			break;
1220 
1221 		throtl_dequeue_tg(tg);
1222 
1223 		nr_disp += throtl_dispatch_tg(tg);
1224 
1225 		if (sq->nr_queued[0] || sq->nr_queued[1])
1226 			tg_update_disptime(tg);
1227 
1228 		if (nr_disp >= throtl_quantum)
1229 			break;
1230 	}
1231 
1232 	return nr_disp;
1233 }
1234 
1235 static bool throtl_can_upgrade(struct throtl_data *td,
1236 	struct throtl_grp *this_tg);
1237 /**
1238  * throtl_pending_timer_fn - timer function for service_queue->pending_timer
1239  * @arg: the throtl_service_queue being serviced
1240  *
1241  * This timer is armed when a child throtl_grp with active bio's become
1242  * pending and queued on the service_queue's pending_tree and expires when
1243  * the first child throtl_grp should be dispatched.  This function
1244  * dispatches bio's from the children throtl_grps to the parent
1245  * service_queue.
1246  *
1247  * If the parent's parent is another throtl_grp, dispatching is propagated
1248  * by either arming its pending_timer or repeating dispatch directly.  If
1249  * the top-level service_tree is reached, throtl_data->dispatch_work is
1250  * kicked so that the ready bio's are issued.
1251  */
throtl_pending_timer_fn(unsigned long arg)1252 static void throtl_pending_timer_fn(unsigned long arg)
1253 {
1254 	struct throtl_service_queue *sq = (void *)arg;
1255 	struct throtl_grp *tg = sq_to_tg(sq);
1256 	struct throtl_data *td = sq_to_td(sq);
1257 	struct request_queue *q = td->queue;
1258 	struct throtl_service_queue *parent_sq;
1259 	bool dispatched;
1260 	int ret;
1261 
1262 	spin_lock_irq(q->queue_lock);
1263 	if (throtl_can_upgrade(td, NULL))
1264 		throtl_upgrade_state(td);
1265 
1266 again:
1267 	parent_sq = sq->parent_sq;
1268 	dispatched = false;
1269 
1270 	while (true) {
1271 		throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u",
1272 			   sq->nr_queued[READ] + sq->nr_queued[WRITE],
1273 			   sq->nr_queued[READ], sq->nr_queued[WRITE]);
1274 
1275 		ret = throtl_select_dispatch(sq);
1276 		if (ret) {
1277 			throtl_log(sq, "bios disp=%u", ret);
1278 			dispatched = true;
1279 		}
1280 
1281 		if (throtl_schedule_next_dispatch(sq, false))
1282 			break;
1283 
1284 		/* this dispatch windows is still open, relax and repeat */
1285 		spin_unlock_irq(q->queue_lock);
1286 		cpu_relax();
1287 		spin_lock_irq(q->queue_lock);
1288 	}
1289 
1290 	if (!dispatched)
1291 		goto out_unlock;
1292 
1293 	if (parent_sq) {
1294 		/* @parent_sq is another throl_grp, propagate dispatch */
1295 		if (tg->flags & THROTL_TG_WAS_EMPTY) {
1296 			tg_update_disptime(tg);
1297 			if (!throtl_schedule_next_dispatch(parent_sq, false)) {
1298 				/* window is already open, repeat dispatching */
1299 				sq = parent_sq;
1300 				tg = sq_to_tg(sq);
1301 				goto again;
1302 			}
1303 		}
1304 	} else {
1305 		/* reached the top-level, queue issueing */
1306 		queue_work(kthrotld_workqueue, &td->dispatch_work);
1307 	}
1308 out_unlock:
1309 	spin_unlock_irq(q->queue_lock);
1310 }
1311 
1312 /**
1313  * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work
1314  * @work: work item being executed
1315  *
1316  * This function is queued for execution when bio's reach the bio_lists[]
1317  * of throtl_data->service_queue.  Those bio's are ready and issued by this
1318  * function.
1319  */
blk_throtl_dispatch_work_fn(struct work_struct * work)1320 static void blk_throtl_dispatch_work_fn(struct work_struct *work)
1321 {
1322 	struct throtl_data *td = container_of(work, struct throtl_data,
1323 					      dispatch_work);
1324 	struct throtl_service_queue *td_sq = &td->service_queue;
1325 	struct request_queue *q = td->queue;
1326 	struct bio_list bio_list_on_stack;
1327 	struct bio *bio;
1328 	struct blk_plug plug;
1329 	int rw;
1330 
1331 	bio_list_init(&bio_list_on_stack);
1332 
1333 	spin_lock_irq(q->queue_lock);
1334 	for (rw = READ; rw <= WRITE; rw++)
1335 		while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
1336 			bio_list_add(&bio_list_on_stack, bio);
1337 	spin_unlock_irq(q->queue_lock);
1338 
1339 	if (!bio_list_empty(&bio_list_on_stack)) {
1340 		blk_start_plug(&plug);
1341 		while((bio = bio_list_pop(&bio_list_on_stack)))
1342 			generic_make_request(bio);
1343 		blk_finish_plug(&plug);
1344 	}
1345 }
1346 
tg_prfill_conf_u64(struct seq_file * sf,struct blkg_policy_data * pd,int off)1347 static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
1348 			      int off)
1349 {
1350 	struct throtl_grp *tg = pd_to_tg(pd);
1351 	u64 v = *(u64 *)((void *)tg + off);
1352 
1353 	if (v == U64_MAX)
1354 		return 0;
1355 	return __blkg_prfill_u64(sf, pd, v);
1356 }
1357 
tg_prfill_conf_uint(struct seq_file * sf,struct blkg_policy_data * pd,int off)1358 static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
1359 			       int off)
1360 {
1361 	struct throtl_grp *tg = pd_to_tg(pd);
1362 	unsigned int v = *(unsigned int *)((void *)tg + off);
1363 
1364 	if (v == UINT_MAX)
1365 		return 0;
1366 	return __blkg_prfill_u64(sf, pd, v);
1367 }
1368 
tg_print_conf_u64(struct seq_file * sf,void * v)1369 static int tg_print_conf_u64(struct seq_file *sf, void *v)
1370 {
1371 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_u64,
1372 			  &blkcg_policy_throtl, seq_cft(sf)->private, false);
1373 	return 0;
1374 }
1375 
tg_print_conf_uint(struct seq_file * sf,void * v)1376 static int tg_print_conf_uint(struct seq_file *sf, void *v)
1377 {
1378 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_uint,
1379 			  &blkcg_policy_throtl, seq_cft(sf)->private, false);
1380 	return 0;
1381 }
1382 
tg_conf_updated(struct throtl_grp * tg,bool global)1383 static void tg_conf_updated(struct throtl_grp *tg, bool global)
1384 {
1385 	struct throtl_service_queue *sq = &tg->service_queue;
1386 	struct cgroup_subsys_state *pos_css;
1387 	struct blkcg_gq *blkg;
1388 
1389 	throtl_log(&tg->service_queue,
1390 		   "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
1391 		   tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE),
1392 		   tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE));
1393 
1394 	/*
1395 	 * Update has_rules[] flags for the updated tg's subtree.  A tg is
1396 	 * considered to have rules if either the tg itself or any of its
1397 	 * ancestors has rules.  This identifies groups without any
1398 	 * restrictions in the whole hierarchy and allows them to bypass
1399 	 * blk-throttle.
1400 	 */
1401 	blkg_for_each_descendant_pre(blkg, pos_css,
1402 			global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) {
1403 		struct throtl_grp *this_tg = blkg_to_tg(blkg);
1404 		struct throtl_grp *parent_tg;
1405 
1406 		tg_update_has_rules(this_tg);
1407 		/* ignore root/second level */
1408 		if (!cgroup_subsys_on_dfl(io_cgrp_subsys) || !blkg->parent ||
1409 		    !blkg->parent->parent)
1410 			continue;
1411 		parent_tg = blkg_to_tg(blkg->parent);
1412 		/*
1413 		 * make sure all children has lower idle time threshold and
1414 		 * higher latency target
1415 		 */
1416 		this_tg->idletime_threshold = min(this_tg->idletime_threshold,
1417 				parent_tg->idletime_threshold);
1418 		this_tg->latency_target = max(this_tg->latency_target,
1419 				parent_tg->latency_target);
1420 	}
1421 
1422 	/*
1423 	 * We're already holding queue_lock and know @tg is valid.  Let's
1424 	 * apply the new config directly.
1425 	 *
1426 	 * Restart the slices for both READ and WRITES. It might happen
1427 	 * that a group's limit are dropped suddenly and we don't want to
1428 	 * account recently dispatched IO with new low rate.
1429 	 */
1430 	throtl_start_new_slice(tg, 0);
1431 	throtl_start_new_slice(tg, 1);
1432 
1433 	if (tg->flags & THROTL_TG_PENDING) {
1434 		tg_update_disptime(tg);
1435 		throtl_schedule_next_dispatch(sq->parent_sq, true);
1436 	}
1437 }
1438 
tg_set_conf(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off,bool is_u64)1439 static ssize_t tg_set_conf(struct kernfs_open_file *of,
1440 			   char *buf, size_t nbytes, loff_t off, bool is_u64)
1441 {
1442 	struct blkcg *blkcg = css_to_blkcg(of_css(of));
1443 	struct blkg_conf_ctx ctx;
1444 	struct throtl_grp *tg;
1445 	int ret;
1446 	u64 v;
1447 
1448 	ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1449 	if (ret)
1450 		return ret;
1451 
1452 	ret = -EINVAL;
1453 	if (sscanf(ctx.body, "%llu", &v) != 1)
1454 		goto out_finish;
1455 	if (!v)
1456 		v = U64_MAX;
1457 
1458 	tg = blkg_to_tg(ctx.blkg);
1459 
1460 	if (is_u64)
1461 		*(u64 *)((void *)tg + of_cft(of)->private) = v;
1462 	else
1463 		*(unsigned int *)((void *)tg + of_cft(of)->private) = v;
1464 
1465 	tg_conf_updated(tg, false);
1466 	ret = 0;
1467 out_finish:
1468 	blkg_conf_finish(&ctx);
1469 	return ret ?: nbytes;
1470 }
1471 
tg_set_conf_u64(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)1472 static ssize_t tg_set_conf_u64(struct kernfs_open_file *of,
1473 			       char *buf, size_t nbytes, loff_t off)
1474 {
1475 	return tg_set_conf(of, buf, nbytes, off, true);
1476 }
1477 
tg_set_conf_uint(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)1478 static ssize_t tg_set_conf_uint(struct kernfs_open_file *of,
1479 				char *buf, size_t nbytes, loff_t off)
1480 {
1481 	return tg_set_conf(of, buf, nbytes, off, false);
1482 }
1483 
1484 static struct cftype throtl_legacy_files[] = {
1485 	{
1486 		.name = "throttle.read_bps_device",
1487 		.private = offsetof(struct throtl_grp, bps[READ][LIMIT_MAX]),
1488 		.seq_show = tg_print_conf_u64,
1489 		.write = tg_set_conf_u64,
1490 	},
1491 	{
1492 		.name = "throttle.write_bps_device",
1493 		.private = offsetof(struct throtl_grp, bps[WRITE][LIMIT_MAX]),
1494 		.seq_show = tg_print_conf_u64,
1495 		.write = tg_set_conf_u64,
1496 	},
1497 	{
1498 		.name = "throttle.read_iops_device",
1499 		.private = offsetof(struct throtl_grp, iops[READ][LIMIT_MAX]),
1500 		.seq_show = tg_print_conf_uint,
1501 		.write = tg_set_conf_uint,
1502 	},
1503 	{
1504 		.name = "throttle.write_iops_device",
1505 		.private = offsetof(struct throtl_grp, iops[WRITE][LIMIT_MAX]),
1506 		.seq_show = tg_print_conf_uint,
1507 		.write = tg_set_conf_uint,
1508 	},
1509 	{
1510 		.name = "throttle.io_service_bytes",
1511 		.private = (unsigned long)&blkcg_policy_throtl,
1512 		.seq_show = blkg_print_stat_bytes,
1513 	},
1514 	{
1515 		.name = "throttle.io_serviced",
1516 		.private = (unsigned long)&blkcg_policy_throtl,
1517 		.seq_show = blkg_print_stat_ios,
1518 	},
1519 	{ }	/* terminate */
1520 };
1521 
tg_prfill_limit(struct seq_file * sf,struct blkg_policy_data * pd,int off)1522 static u64 tg_prfill_limit(struct seq_file *sf, struct blkg_policy_data *pd,
1523 			 int off)
1524 {
1525 	struct throtl_grp *tg = pd_to_tg(pd);
1526 	const char *dname = blkg_dev_name(pd->blkg);
1527 	char bufs[4][21] = { "max", "max", "max", "max" };
1528 	u64 bps_dft;
1529 	unsigned int iops_dft;
1530 	char idle_time[26] = "";
1531 	char latency_time[26] = "";
1532 
1533 	if (!dname)
1534 		return 0;
1535 
1536 	if (off == LIMIT_LOW) {
1537 		bps_dft = 0;
1538 		iops_dft = 0;
1539 	} else {
1540 		bps_dft = U64_MAX;
1541 		iops_dft = UINT_MAX;
1542 	}
1543 
1544 	if (tg->bps_conf[READ][off] == bps_dft &&
1545 	    tg->bps_conf[WRITE][off] == bps_dft &&
1546 	    tg->iops_conf[READ][off] == iops_dft &&
1547 	    tg->iops_conf[WRITE][off] == iops_dft &&
1548 	    (off != LIMIT_LOW ||
1549 	     (tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD &&
1550 	      tg->latency_target_conf == DFL_LATENCY_TARGET)))
1551 		return 0;
1552 
1553 	if (tg->bps_conf[READ][off] != U64_MAX)
1554 		snprintf(bufs[0], sizeof(bufs[0]), "%llu",
1555 			tg->bps_conf[READ][off]);
1556 	if (tg->bps_conf[WRITE][off] != U64_MAX)
1557 		snprintf(bufs[1], sizeof(bufs[1]), "%llu",
1558 			tg->bps_conf[WRITE][off]);
1559 	if (tg->iops_conf[READ][off] != UINT_MAX)
1560 		snprintf(bufs[2], sizeof(bufs[2]), "%u",
1561 			tg->iops_conf[READ][off]);
1562 	if (tg->iops_conf[WRITE][off] != UINT_MAX)
1563 		snprintf(bufs[3], sizeof(bufs[3]), "%u",
1564 			tg->iops_conf[WRITE][off]);
1565 	if (off == LIMIT_LOW) {
1566 		if (tg->idletime_threshold_conf == ULONG_MAX)
1567 			strcpy(idle_time, " idle=max");
1568 		else
1569 			snprintf(idle_time, sizeof(idle_time), " idle=%lu",
1570 				tg->idletime_threshold_conf);
1571 
1572 		if (tg->latency_target_conf == ULONG_MAX)
1573 			strcpy(latency_time, " latency=max");
1574 		else
1575 			snprintf(latency_time, sizeof(latency_time),
1576 				" latency=%lu", tg->latency_target_conf);
1577 	}
1578 
1579 	seq_printf(sf, "%s rbps=%s wbps=%s riops=%s wiops=%s%s%s\n",
1580 		   dname, bufs[0], bufs[1], bufs[2], bufs[3], idle_time,
1581 		   latency_time);
1582 	return 0;
1583 }
1584 
tg_print_limit(struct seq_file * sf,void * v)1585 static int tg_print_limit(struct seq_file *sf, void *v)
1586 {
1587 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_limit,
1588 			  &blkcg_policy_throtl, seq_cft(sf)->private, false);
1589 	return 0;
1590 }
1591 
tg_set_limit(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)1592 static ssize_t tg_set_limit(struct kernfs_open_file *of,
1593 			  char *buf, size_t nbytes, loff_t off)
1594 {
1595 	struct blkcg *blkcg = css_to_blkcg(of_css(of));
1596 	struct blkg_conf_ctx ctx;
1597 	struct throtl_grp *tg;
1598 	u64 v[4];
1599 	unsigned long idle_time;
1600 	unsigned long latency_time;
1601 	int ret;
1602 	int index = of_cft(of)->private;
1603 
1604 	ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1605 	if (ret)
1606 		return ret;
1607 
1608 	tg = blkg_to_tg(ctx.blkg);
1609 
1610 	v[0] = tg->bps_conf[READ][index];
1611 	v[1] = tg->bps_conf[WRITE][index];
1612 	v[2] = tg->iops_conf[READ][index];
1613 	v[3] = tg->iops_conf[WRITE][index];
1614 
1615 	idle_time = tg->idletime_threshold_conf;
1616 	latency_time = tg->latency_target_conf;
1617 	while (true) {
1618 		char tok[27];	/* wiops=18446744073709551616 */
1619 		char *p;
1620 		u64 val = U64_MAX;
1621 		int len;
1622 
1623 		if (sscanf(ctx.body, "%26s%n", tok, &len) != 1)
1624 			break;
1625 		if (tok[0] == '\0')
1626 			break;
1627 		ctx.body += len;
1628 
1629 		ret = -EINVAL;
1630 		p = tok;
1631 		strsep(&p, "=");
1632 		if (!p || (sscanf(p, "%llu", &val) != 1 && strcmp(p, "max")))
1633 			goto out_finish;
1634 
1635 		ret = -ERANGE;
1636 		if (!val)
1637 			goto out_finish;
1638 
1639 		ret = -EINVAL;
1640 		if (!strcmp(tok, "rbps"))
1641 			v[0] = val;
1642 		else if (!strcmp(tok, "wbps"))
1643 			v[1] = val;
1644 		else if (!strcmp(tok, "riops"))
1645 			v[2] = min_t(u64, val, UINT_MAX);
1646 		else if (!strcmp(tok, "wiops"))
1647 			v[3] = min_t(u64, val, UINT_MAX);
1648 		else if (off == LIMIT_LOW && !strcmp(tok, "idle"))
1649 			idle_time = val;
1650 		else if (off == LIMIT_LOW && !strcmp(tok, "latency"))
1651 			latency_time = val;
1652 		else
1653 			goto out_finish;
1654 	}
1655 
1656 	tg->bps_conf[READ][index] = v[0];
1657 	tg->bps_conf[WRITE][index] = v[1];
1658 	tg->iops_conf[READ][index] = v[2];
1659 	tg->iops_conf[WRITE][index] = v[3];
1660 
1661 	if (index == LIMIT_MAX) {
1662 		tg->bps[READ][index] = v[0];
1663 		tg->bps[WRITE][index] = v[1];
1664 		tg->iops[READ][index] = v[2];
1665 		tg->iops[WRITE][index] = v[3];
1666 	}
1667 	tg->bps[READ][LIMIT_LOW] = min(tg->bps_conf[READ][LIMIT_LOW],
1668 		tg->bps_conf[READ][LIMIT_MAX]);
1669 	tg->bps[WRITE][LIMIT_LOW] = min(tg->bps_conf[WRITE][LIMIT_LOW],
1670 		tg->bps_conf[WRITE][LIMIT_MAX]);
1671 	tg->iops[READ][LIMIT_LOW] = min(tg->iops_conf[READ][LIMIT_LOW],
1672 		tg->iops_conf[READ][LIMIT_MAX]);
1673 	tg->iops[WRITE][LIMIT_LOW] = min(tg->iops_conf[WRITE][LIMIT_LOW],
1674 		tg->iops_conf[WRITE][LIMIT_MAX]);
1675 	tg->idletime_threshold_conf = idle_time;
1676 	tg->latency_target_conf = latency_time;
1677 
1678 	/* force user to configure all settings for low limit  */
1679 	if (!(tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW] ||
1680 	      tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) ||
1681 	    tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD ||
1682 	    tg->latency_target_conf == DFL_LATENCY_TARGET) {
1683 		tg->bps[READ][LIMIT_LOW] = 0;
1684 		tg->bps[WRITE][LIMIT_LOW] = 0;
1685 		tg->iops[READ][LIMIT_LOW] = 0;
1686 		tg->iops[WRITE][LIMIT_LOW] = 0;
1687 		tg->idletime_threshold = DFL_IDLE_THRESHOLD;
1688 		tg->latency_target = DFL_LATENCY_TARGET;
1689 	} else if (index == LIMIT_LOW) {
1690 		tg->idletime_threshold = tg->idletime_threshold_conf;
1691 		tg->latency_target = tg->latency_target_conf;
1692 	}
1693 
1694 	blk_throtl_update_limit_valid(tg->td);
1695 	if (tg->td->limit_valid[LIMIT_LOW]) {
1696 		if (index == LIMIT_LOW)
1697 			tg->td->limit_index = LIMIT_LOW;
1698 	} else
1699 		tg->td->limit_index = LIMIT_MAX;
1700 	tg_conf_updated(tg, index == LIMIT_LOW &&
1701 		tg->td->limit_valid[LIMIT_LOW]);
1702 	ret = 0;
1703 out_finish:
1704 	blkg_conf_finish(&ctx);
1705 	return ret ?: nbytes;
1706 }
1707 
1708 static struct cftype throtl_files[] = {
1709 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
1710 	{
1711 		.name = "low",
1712 		.flags = CFTYPE_NOT_ON_ROOT,
1713 		.seq_show = tg_print_limit,
1714 		.write = tg_set_limit,
1715 		.private = LIMIT_LOW,
1716 	},
1717 #endif
1718 	{
1719 		.name = "max",
1720 		.flags = CFTYPE_NOT_ON_ROOT,
1721 		.seq_show = tg_print_limit,
1722 		.write = tg_set_limit,
1723 		.private = LIMIT_MAX,
1724 	},
1725 	{ }	/* terminate */
1726 };
1727 
throtl_shutdown_wq(struct request_queue * q)1728 static void throtl_shutdown_wq(struct request_queue *q)
1729 {
1730 	struct throtl_data *td = q->td;
1731 
1732 	cancel_work_sync(&td->dispatch_work);
1733 }
1734 
1735 static struct blkcg_policy blkcg_policy_throtl = {
1736 	.dfl_cftypes		= throtl_files,
1737 	.legacy_cftypes		= throtl_legacy_files,
1738 
1739 	.pd_alloc_fn		= throtl_pd_alloc,
1740 	.pd_init_fn		= throtl_pd_init,
1741 	.pd_online_fn		= throtl_pd_online,
1742 	.pd_offline_fn		= throtl_pd_offline,
1743 	.pd_free_fn		= throtl_pd_free,
1744 };
1745 
__tg_last_low_overflow_time(struct throtl_grp * tg)1746 static unsigned long __tg_last_low_overflow_time(struct throtl_grp *tg)
1747 {
1748 	unsigned long rtime = jiffies, wtime = jiffies;
1749 
1750 	if (tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW])
1751 		rtime = tg->last_low_overflow_time[READ];
1752 	if (tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW])
1753 		wtime = tg->last_low_overflow_time[WRITE];
1754 	return min(rtime, wtime);
1755 }
1756 
1757 /* tg should not be an intermediate node */
tg_last_low_overflow_time(struct throtl_grp * tg)1758 static unsigned long tg_last_low_overflow_time(struct throtl_grp *tg)
1759 {
1760 	struct throtl_service_queue *parent_sq;
1761 	struct throtl_grp *parent = tg;
1762 	unsigned long ret = __tg_last_low_overflow_time(tg);
1763 
1764 	while (true) {
1765 		parent_sq = parent->service_queue.parent_sq;
1766 		parent = sq_to_tg(parent_sq);
1767 		if (!parent)
1768 			break;
1769 
1770 		/*
1771 		 * The parent doesn't have low limit, it always reaches low
1772 		 * limit. Its overflow time is useless for children
1773 		 */
1774 		if (!parent->bps[READ][LIMIT_LOW] &&
1775 		    !parent->iops[READ][LIMIT_LOW] &&
1776 		    !parent->bps[WRITE][LIMIT_LOW] &&
1777 		    !parent->iops[WRITE][LIMIT_LOW])
1778 			continue;
1779 		if (time_after(__tg_last_low_overflow_time(parent), ret))
1780 			ret = __tg_last_low_overflow_time(parent);
1781 	}
1782 	return ret;
1783 }
1784 
throtl_tg_is_idle(struct throtl_grp * tg)1785 static bool throtl_tg_is_idle(struct throtl_grp *tg)
1786 {
1787 	/*
1788 	 * cgroup is idle if:
1789 	 * - single idle is too long, longer than a fixed value (in case user
1790 	 *   configure a too big threshold) or 4 times of idletime threshold
1791 	 * - average think time is more than threshold
1792 	 * - IO latency is largely below threshold
1793 	 */
1794 	unsigned long time;
1795 	bool ret;
1796 
1797 	time = min_t(unsigned long, MAX_IDLE_TIME, 4 * tg->idletime_threshold);
1798 	ret = tg->latency_target == DFL_LATENCY_TARGET ||
1799 	      tg->idletime_threshold == DFL_IDLE_THRESHOLD ||
1800 	      (ktime_get_ns() >> 10) - tg->last_finish_time > time ||
1801 	      tg->avg_idletime > tg->idletime_threshold ||
1802 	      (tg->latency_target && tg->bio_cnt &&
1803 		tg->bad_bio_cnt * 5 < tg->bio_cnt);
1804 	throtl_log(&tg->service_queue,
1805 		"avg_idle=%ld, idle_threshold=%ld, bad_bio=%d, total_bio=%d, is_idle=%d, scale=%d",
1806 		tg->avg_idletime, tg->idletime_threshold, tg->bad_bio_cnt,
1807 		tg->bio_cnt, ret, tg->td->scale);
1808 	return ret;
1809 }
1810 
throtl_tg_can_upgrade(struct throtl_grp * tg)1811 static bool throtl_tg_can_upgrade(struct throtl_grp *tg)
1812 {
1813 	struct throtl_service_queue *sq = &tg->service_queue;
1814 	bool read_limit, write_limit;
1815 
1816 	/*
1817 	 * if cgroup reaches low limit (if low limit is 0, the cgroup always
1818 	 * reaches), it's ok to upgrade to next limit
1819 	 */
1820 	read_limit = tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW];
1821 	write_limit = tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW];
1822 	if (!read_limit && !write_limit)
1823 		return true;
1824 	if (read_limit && sq->nr_queued[READ] &&
1825 	    (!write_limit || sq->nr_queued[WRITE]))
1826 		return true;
1827 	if (write_limit && sq->nr_queued[WRITE] &&
1828 	    (!read_limit || sq->nr_queued[READ]))
1829 		return true;
1830 
1831 	if (time_after_eq(jiffies,
1832 		tg_last_low_overflow_time(tg) + tg->td->throtl_slice) &&
1833 	    throtl_tg_is_idle(tg))
1834 		return true;
1835 	return false;
1836 }
1837 
throtl_hierarchy_can_upgrade(struct throtl_grp * tg)1838 static bool throtl_hierarchy_can_upgrade(struct throtl_grp *tg)
1839 {
1840 	while (true) {
1841 		if (throtl_tg_can_upgrade(tg))
1842 			return true;
1843 		tg = sq_to_tg(tg->service_queue.parent_sq);
1844 		if (!tg || !tg_to_blkg(tg)->parent)
1845 			return false;
1846 	}
1847 	return false;
1848 }
1849 
throtl_can_upgrade(struct throtl_data * td,struct throtl_grp * this_tg)1850 static bool throtl_can_upgrade(struct throtl_data *td,
1851 	struct throtl_grp *this_tg)
1852 {
1853 	struct cgroup_subsys_state *pos_css;
1854 	struct blkcg_gq *blkg;
1855 
1856 	if (td->limit_index != LIMIT_LOW)
1857 		return false;
1858 
1859 	if (time_before(jiffies, td->low_downgrade_time + td->throtl_slice))
1860 		return false;
1861 
1862 	rcu_read_lock();
1863 	blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
1864 		struct throtl_grp *tg = blkg_to_tg(blkg);
1865 
1866 		if (tg == this_tg)
1867 			continue;
1868 		if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
1869 			continue;
1870 		if (!throtl_hierarchy_can_upgrade(tg)) {
1871 			rcu_read_unlock();
1872 			return false;
1873 		}
1874 	}
1875 	rcu_read_unlock();
1876 	return true;
1877 }
1878 
throtl_upgrade_check(struct throtl_grp * tg)1879 static void throtl_upgrade_check(struct throtl_grp *tg)
1880 {
1881 	unsigned long now = jiffies;
1882 
1883 	if (tg->td->limit_index != LIMIT_LOW)
1884 		return;
1885 
1886 	if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
1887 		return;
1888 
1889 	tg->last_check_time = now;
1890 
1891 	if (!time_after_eq(now,
1892 	     __tg_last_low_overflow_time(tg) + tg->td->throtl_slice))
1893 		return;
1894 
1895 	if (throtl_can_upgrade(tg->td, NULL))
1896 		throtl_upgrade_state(tg->td);
1897 }
1898 
throtl_upgrade_state(struct throtl_data * td)1899 static void throtl_upgrade_state(struct throtl_data *td)
1900 {
1901 	struct cgroup_subsys_state *pos_css;
1902 	struct blkcg_gq *blkg;
1903 
1904 	throtl_log(&td->service_queue, "upgrade to max");
1905 	td->limit_index = LIMIT_MAX;
1906 	td->low_upgrade_time = jiffies;
1907 	td->scale = 0;
1908 	rcu_read_lock();
1909 	blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
1910 		struct throtl_grp *tg = blkg_to_tg(blkg);
1911 		struct throtl_service_queue *sq = &tg->service_queue;
1912 
1913 		tg->disptime = jiffies - 1;
1914 		throtl_select_dispatch(sq);
1915 		throtl_schedule_next_dispatch(sq, true);
1916 	}
1917 	rcu_read_unlock();
1918 	throtl_select_dispatch(&td->service_queue);
1919 	throtl_schedule_next_dispatch(&td->service_queue, true);
1920 	queue_work(kthrotld_workqueue, &td->dispatch_work);
1921 }
1922 
throtl_downgrade_state(struct throtl_data * td,int new)1923 static void throtl_downgrade_state(struct throtl_data *td, int new)
1924 {
1925 	td->scale /= 2;
1926 
1927 	throtl_log(&td->service_queue, "downgrade, scale %d", td->scale);
1928 	if (td->scale) {
1929 		td->low_upgrade_time = jiffies - td->scale * td->throtl_slice;
1930 		return;
1931 	}
1932 
1933 	td->limit_index = new;
1934 	td->low_downgrade_time = jiffies;
1935 }
1936 
throtl_tg_can_downgrade(struct throtl_grp * tg)1937 static bool throtl_tg_can_downgrade(struct throtl_grp *tg)
1938 {
1939 	struct throtl_data *td = tg->td;
1940 	unsigned long now = jiffies;
1941 
1942 	/*
1943 	 * If cgroup is below low limit, consider downgrade and throttle other
1944 	 * cgroups
1945 	 */
1946 	if (time_after_eq(now, td->low_upgrade_time + td->throtl_slice) &&
1947 	    time_after_eq(now, tg_last_low_overflow_time(tg) +
1948 					td->throtl_slice) &&
1949 	    (!throtl_tg_is_idle(tg) ||
1950 	     !list_empty(&tg_to_blkg(tg)->blkcg->css.children)))
1951 		return true;
1952 	return false;
1953 }
1954 
throtl_hierarchy_can_downgrade(struct throtl_grp * tg)1955 static bool throtl_hierarchy_can_downgrade(struct throtl_grp *tg)
1956 {
1957 	while (true) {
1958 		if (!throtl_tg_can_downgrade(tg))
1959 			return false;
1960 		tg = sq_to_tg(tg->service_queue.parent_sq);
1961 		if (!tg || !tg_to_blkg(tg)->parent)
1962 			break;
1963 	}
1964 	return true;
1965 }
1966 
throtl_downgrade_check(struct throtl_grp * tg)1967 static void throtl_downgrade_check(struct throtl_grp *tg)
1968 {
1969 	uint64_t bps;
1970 	unsigned int iops;
1971 	unsigned long elapsed_time;
1972 	unsigned long now = jiffies;
1973 
1974 	if (tg->td->limit_index != LIMIT_MAX ||
1975 	    !tg->td->limit_valid[LIMIT_LOW])
1976 		return;
1977 	if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
1978 		return;
1979 	if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
1980 		return;
1981 
1982 	elapsed_time = now - tg->last_check_time;
1983 	tg->last_check_time = now;
1984 
1985 	if (time_before(now, tg_last_low_overflow_time(tg) +
1986 			tg->td->throtl_slice))
1987 		return;
1988 
1989 	if (tg->bps[READ][LIMIT_LOW]) {
1990 		bps = tg->last_bytes_disp[READ] * HZ;
1991 		do_div(bps, elapsed_time);
1992 		if (bps >= tg->bps[READ][LIMIT_LOW])
1993 			tg->last_low_overflow_time[READ] = now;
1994 	}
1995 
1996 	if (tg->bps[WRITE][LIMIT_LOW]) {
1997 		bps = tg->last_bytes_disp[WRITE] * HZ;
1998 		do_div(bps, elapsed_time);
1999 		if (bps >= tg->bps[WRITE][LIMIT_LOW])
2000 			tg->last_low_overflow_time[WRITE] = now;
2001 	}
2002 
2003 	if (tg->iops[READ][LIMIT_LOW]) {
2004 		iops = tg->last_io_disp[READ] * HZ / elapsed_time;
2005 		if (iops >= tg->iops[READ][LIMIT_LOW])
2006 			tg->last_low_overflow_time[READ] = now;
2007 	}
2008 
2009 	if (tg->iops[WRITE][LIMIT_LOW]) {
2010 		iops = tg->last_io_disp[WRITE] * HZ / elapsed_time;
2011 		if (iops >= tg->iops[WRITE][LIMIT_LOW])
2012 			tg->last_low_overflow_time[WRITE] = now;
2013 	}
2014 
2015 	/*
2016 	 * If cgroup is below low limit, consider downgrade and throttle other
2017 	 * cgroups
2018 	 */
2019 	if (throtl_hierarchy_can_downgrade(tg))
2020 		throtl_downgrade_state(tg->td, LIMIT_LOW);
2021 
2022 	tg->last_bytes_disp[READ] = 0;
2023 	tg->last_bytes_disp[WRITE] = 0;
2024 	tg->last_io_disp[READ] = 0;
2025 	tg->last_io_disp[WRITE] = 0;
2026 }
2027 
blk_throtl_update_idletime(struct throtl_grp * tg)2028 static void blk_throtl_update_idletime(struct throtl_grp *tg)
2029 {
2030 	unsigned long now = ktime_get_ns() >> 10;
2031 	unsigned long last_finish_time = tg->last_finish_time;
2032 
2033 	if (now <= last_finish_time || last_finish_time == 0 ||
2034 	    last_finish_time == tg->checked_last_finish_time)
2035 		return;
2036 
2037 	tg->avg_idletime = (tg->avg_idletime * 7 + now - last_finish_time) >> 3;
2038 	tg->checked_last_finish_time = last_finish_time;
2039 }
2040 
2041 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
throtl_update_latency_buckets(struct throtl_data * td)2042 static void throtl_update_latency_buckets(struct throtl_data *td)
2043 {
2044 	struct avg_latency_bucket avg_latency[LATENCY_BUCKET_SIZE];
2045 	int i, cpu;
2046 	unsigned long last_latency = 0;
2047 	unsigned long latency;
2048 
2049 	if (!blk_queue_nonrot(td->queue))
2050 		return;
2051 	if (time_before(jiffies, td->last_calculate_time + HZ))
2052 		return;
2053 	td->last_calculate_time = jiffies;
2054 
2055 	memset(avg_latency, 0, sizeof(avg_latency));
2056 	for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2057 		struct latency_bucket *tmp = &td->tmp_buckets[i];
2058 
2059 		for_each_possible_cpu(cpu) {
2060 			struct latency_bucket *bucket;
2061 
2062 			/* this isn't race free, but ok in practice */
2063 			bucket = per_cpu_ptr(td->latency_buckets, cpu);
2064 			tmp->total_latency += bucket[i].total_latency;
2065 			tmp->samples += bucket[i].samples;
2066 			bucket[i].total_latency = 0;
2067 			bucket[i].samples = 0;
2068 		}
2069 
2070 		if (tmp->samples >= 32) {
2071 			int samples = tmp->samples;
2072 
2073 			latency = tmp->total_latency;
2074 
2075 			tmp->total_latency = 0;
2076 			tmp->samples = 0;
2077 			latency /= samples;
2078 			if (latency == 0)
2079 				continue;
2080 			avg_latency[i].latency = latency;
2081 		}
2082 	}
2083 
2084 	for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2085 		if (!avg_latency[i].latency) {
2086 			if (td->avg_buckets[i].latency < last_latency)
2087 				td->avg_buckets[i].latency = last_latency;
2088 			continue;
2089 		}
2090 
2091 		if (!td->avg_buckets[i].valid)
2092 			latency = avg_latency[i].latency;
2093 		else
2094 			latency = (td->avg_buckets[i].latency * 7 +
2095 				avg_latency[i].latency) >> 3;
2096 
2097 		td->avg_buckets[i].latency = max(latency, last_latency);
2098 		td->avg_buckets[i].valid = true;
2099 		last_latency = td->avg_buckets[i].latency;
2100 	}
2101 
2102 	for (i = 0; i < LATENCY_BUCKET_SIZE; i++)
2103 		throtl_log(&td->service_queue,
2104 			"Latency bucket %d: latency=%ld, valid=%d", i,
2105 			td->avg_buckets[i].latency, td->avg_buckets[i].valid);
2106 }
2107 #else
throtl_update_latency_buckets(struct throtl_data * td)2108 static inline void throtl_update_latency_buckets(struct throtl_data *td)
2109 {
2110 }
2111 #endif
2112 
blk_throtl_assoc_bio(struct throtl_grp * tg,struct bio * bio)2113 static void blk_throtl_assoc_bio(struct throtl_grp *tg, struct bio *bio)
2114 {
2115 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2116 	if (bio->bi_css)
2117 		bio->bi_cg_private = tg;
2118 	blk_stat_set_issue(&bio->bi_issue_stat, bio_sectors(bio));
2119 #endif
2120 }
2121 
blk_throtl_bio(struct request_queue * q,struct blkcg_gq * blkg,struct bio * bio)2122 bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
2123 		    struct bio *bio)
2124 {
2125 	struct throtl_qnode *qn = NULL;
2126 	struct throtl_grp *tg = blkg_to_tg(blkg ?: q->root_blkg);
2127 	struct throtl_service_queue *sq;
2128 	bool rw = bio_data_dir(bio);
2129 	bool throttled = false;
2130 	struct throtl_data *td = tg->td;
2131 
2132 	WARN_ON_ONCE(!rcu_read_lock_held());
2133 
2134 	/* see throtl_charge_bio() */
2135 	if (bio_flagged(bio, BIO_THROTTLED) || !tg->has_rules[rw])
2136 		goto out;
2137 
2138 	spin_lock_irq(q->queue_lock);
2139 
2140 	throtl_update_latency_buckets(td);
2141 
2142 	if (unlikely(blk_queue_bypass(q)))
2143 		goto out_unlock;
2144 
2145 	blk_throtl_assoc_bio(tg, bio);
2146 	blk_throtl_update_idletime(tg);
2147 
2148 	sq = &tg->service_queue;
2149 
2150 again:
2151 	while (true) {
2152 		if (tg->last_low_overflow_time[rw] == 0)
2153 			tg->last_low_overflow_time[rw] = jiffies;
2154 		throtl_downgrade_check(tg);
2155 		throtl_upgrade_check(tg);
2156 		/* throtl is FIFO - if bios are already queued, should queue */
2157 		if (sq->nr_queued[rw])
2158 			break;
2159 
2160 		/* if above limits, break to queue */
2161 		if (!tg_may_dispatch(tg, bio, NULL)) {
2162 			tg->last_low_overflow_time[rw] = jiffies;
2163 			if (throtl_can_upgrade(td, tg)) {
2164 				throtl_upgrade_state(td);
2165 				goto again;
2166 			}
2167 			break;
2168 		}
2169 
2170 		/* within limits, let's charge and dispatch directly */
2171 		throtl_charge_bio(tg, bio);
2172 
2173 		/*
2174 		 * We need to trim slice even when bios are not being queued
2175 		 * otherwise it might happen that a bio is not queued for
2176 		 * a long time and slice keeps on extending and trim is not
2177 		 * called for a long time. Now if limits are reduced suddenly
2178 		 * we take into account all the IO dispatched so far at new
2179 		 * low rate and * newly queued IO gets a really long dispatch
2180 		 * time.
2181 		 *
2182 		 * So keep on trimming slice even if bio is not queued.
2183 		 */
2184 		throtl_trim_slice(tg, rw);
2185 
2186 		/*
2187 		 * @bio passed through this layer without being throttled.
2188 		 * Climb up the ladder.  If we''re already at the top, it
2189 		 * can be executed directly.
2190 		 */
2191 		qn = &tg->qnode_on_parent[rw];
2192 		sq = sq->parent_sq;
2193 		tg = sq_to_tg(sq);
2194 		if (!tg)
2195 			goto out_unlock;
2196 	}
2197 
2198 	/* out-of-limit, queue to @tg */
2199 	throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
2200 		   rw == READ ? 'R' : 'W',
2201 		   tg->bytes_disp[rw], bio->bi_iter.bi_size,
2202 		   tg_bps_limit(tg, rw),
2203 		   tg->io_disp[rw], tg_iops_limit(tg, rw),
2204 		   sq->nr_queued[READ], sq->nr_queued[WRITE]);
2205 
2206 	tg->last_low_overflow_time[rw] = jiffies;
2207 
2208 	td->nr_queued[rw]++;
2209 	throtl_add_bio_tg(bio, qn, tg);
2210 	throttled = true;
2211 
2212 	/*
2213 	 * Update @tg's dispatch time and force schedule dispatch if @tg
2214 	 * was empty before @bio.  The forced scheduling isn't likely to
2215 	 * cause undue delay as @bio is likely to be dispatched directly if
2216 	 * its @tg's disptime is not in the future.
2217 	 */
2218 	if (tg->flags & THROTL_TG_WAS_EMPTY) {
2219 		tg_update_disptime(tg);
2220 		throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true);
2221 	}
2222 
2223 out_unlock:
2224 	spin_unlock_irq(q->queue_lock);
2225 out:
2226 	bio_set_flag(bio, BIO_THROTTLED);
2227 
2228 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2229 	if (throttled || !td->track_bio_latency)
2230 		bio->bi_issue_stat.stat |= SKIP_LATENCY;
2231 #endif
2232 	return throttled;
2233 }
2234 
2235 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
throtl_track_latency(struct throtl_data * td,sector_t size,int op,unsigned long time)2236 static void throtl_track_latency(struct throtl_data *td, sector_t size,
2237 	int op, unsigned long time)
2238 {
2239 	struct latency_bucket *latency;
2240 	int index;
2241 
2242 	if (!td || td->limit_index != LIMIT_LOW || op != REQ_OP_READ ||
2243 	    !blk_queue_nonrot(td->queue))
2244 		return;
2245 
2246 	index = request_bucket_index(size);
2247 
2248 	latency = get_cpu_ptr(td->latency_buckets);
2249 	latency[index].total_latency += time;
2250 	latency[index].samples++;
2251 	put_cpu_ptr(td->latency_buckets);
2252 }
2253 
blk_throtl_stat_add(struct request * rq,u64 time_ns)2254 void blk_throtl_stat_add(struct request *rq, u64 time_ns)
2255 {
2256 	struct request_queue *q = rq->q;
2257 	struct throtl_data *td = q->td;
2258 
2259 	throtl_track_latency(td, blk_stat_size(&rq->issue_stat),
2260 		req_op(rq), time_ns >> 10);
2261 }
2262 
blk_throtl_bio_endio(struct bio * bio)2263 void blk_throtl_bio_endio(struct bio *bio)
2264 {
2265 	struct throtl_grp *tg;
2266 	u64 finish_time_ns;
2267 	unsigned long finish_time;
2268 	unsigned long start_time;
2269 	unsigned long lat;
2270 
2271 	tg = bio->bi_cg_private;
2272 	if (!tg)
2273 		return;
2274 	bio->bi_cg_private = NULL;
2275 
2276 	finish_time_ns = ktime_get_ns();
2277 	tg->last_finish_time = finish_time_ns >> 10;
2278 
2279 	start_time = blk_stat_time(&bio->bi_issue_stat) >> 10;
2280 	finish_time = __blk_stat_time(finish_time_ns) >> 10;
2281 	if (!start_time || finish_time <= start_time)
2282 		return;
2283 
2284 	lat = finish_time - start_time;
2285 	/* this is only for bio based driver */
2286 	if (!(bio->bi_issue_stat.stat & SKIP_LATENCY))
2287 		throtl_track_latency(tg->td, blk_stat_size(&bio->bi_issue_stat),
2288 			bio_op(bio), lat);
2289 
2290 	if (tg->latency_target && lat >= tg->td->filtered_latency) {
2291 		int bucket;
2292 		unsigned int threshold;
2293 
2294 		bucket = request_bucket_index(
2295 			blk_stat_size(&bio->bi_issue_stat));
2296 		threshold = tg->td->avg_buckets[bucket].latency +
2297 			tg->latency_target;
2298 		if (lat > threshold)
2299 			tg->bad_bio_cnt++;
2300 		/*
2301 		 * Not race free, could get wrong count, which means cgroups
2302 		 * will be throttled
2303 		 */
2304 		tg->bio_cnt++;
2305 	}
2306 
2307 	if (time_after(jiffies, tg->bio_cnt_reset_time) || tg->bio_cnt > 1024) {
2308 		tg->bio_cnt_reset_time = tg->td->throtl_slice + jiffies;
2309 		tg->bio_cnt /= 2;
2310 		tg->bad_bio_cnt /= 2;
2311 	}
2312 }
2313 #endif
2314 
2315 /*
2316  * Dispatch all bios from all children tg's queued on @parent_sq.  On
2317  * return, @parent_sq is guaranteed to not have any active children tg's
2318  * and all bios from previously active tg's are on @parent_sq->bio_lists[].
2319  */
tg_drain_bios(struct throtl_service_queue * parent_sq)2320 static void tg_drain_bios(struct throtl_service_queue *parent_sq)
2321 {
2322 	struct throtl_grp *tg;
2323 
2324 	while ((tg = throtl_rb_first(parent_sq))) {
2325 		struct throtl_service_queue *sq = &tg->service_queue;
2326 		struct bio *bio;
2327 
2328 		throtl_dequeue_tg(tg);
2329 
2330 		while ((bio = throtl_peek_queued(&sq->queued[READ])))
2331 			tg_dispatch_one_bio(tg, bio_data_dir(bio));
2332 		while ((bio = throtl_peek_queued(&sq->queued[WRITE])))
2333 			tg_dispatch_one_bio(tg, bio_data_dir(bio));
2334 	}
2335 }
2336 
2337 /**
2338  * blk_throtl_drain - drain throttled bios
2339  * @q: request_queue to drain throttled bios for
2340  *
2341  * Dispatch all currently throttled bios on @q through ->make_request_fn().
2342  */
blk_throtl_drain(struct request_queue * q)2343 void blk_throtl_drain(struct request_queue *q)
2344 	__releases(q->queue_lock) __acquires(q->queue_lock)
2345 {
2346 	struct throtl_data *td = q->td;
2347 	struct blkcg_gq *blkg;
2348 	struct cgroup_subsys_state *pos_css;
2349 	struct bio *bio;
2350 	int rw;
2351 
2352 	queue_lockdep_assert_held(q);
2353 	rcu_read_lock();
2354 
2355 	/*
2356 	 * Drain each tg while doing post-order walk on the blkg tree, so
2357 	 * that all bios are propagated to td->service_queue.  It'd be
2358 	 * better to walk service_queue tree directly but blkg walk is
2359 	 * easier.
2360 	 */
2361 	blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg)
2362 		tg_drain_bios(&blkg_to_tg(blkg)->service_queue);
2363 
2364 	/* finally, transfer bios from top-level tg's into the td */
2365 	tg_drain_bios(&td->service_queue);
2366 
2367 	rcu_read_unlock();
2368 	spin_unlock_irq(q->queue_lock);
2369 
2370 	/* all bios now should be in td->service_queue, issue them */
2371 	for (rw = READ; rw <= WRITE; rw++)
2372 		while ((bio = throtl_pop_queued(&td->service_queue.queued[rw],
2373 						NULL)))
2374 			generic_make_request(bio);
2375 
2376 	spin_lock_irq(q->queue_lock);
2377 }
2378 
blk_throtl_init(struct request_queue * q)2379 int blk_throtl_init(struct request_queue *q)
2380 {
2381 	struct throtl_data *td;
2382 	int ret;
2383 
2384 	td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
2385 	if (!td)
2386 		return -ENOMEM;
2387 	td->latency_buckets = __alloc_percpu(sizeof(struct latency_bucket) *
2388 		LATENCY_BUCKET_SIZE, __alignof__(u64));
2389 	if (!td->latency_buckets) {
2390 		kfree(td);
2391 		return -ENOMEM;
2392 	}
2393 
2394 	INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
2395 	throtl_service_queue_init(&td->service_queue);
2396 
2397 	q->td = td;
2398 	td->queue = q;
2399 
2400 	td->limit_valid[LIMIT_MAX] = true;
2401 	td->limit_index = LIMIT_MAX;
2402 	td->low_upgrade_time = jiffies;
2403 	td->low_downgrade_time = jiffies;
2404 
2405 	/* activate policy */
2406 	ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
2407 	if (ret) {
2408 		free_percpu(td->latency_buckets);
2409 		kfree(td);
2410 	}
2411 	return ret;
2412 }
2413 
blk_throtl_exit(struct request_queue * q)2414 void blk_throtl_exit(struct request_queue *q)
2415 {
2416 	BUG_ON(!q->td);
2417 	throtl_shutdown_wq(q);
2418 	blkcg_deactivate_policy(q, &blkcg_policy_throtl);
2419 	free_percpu(q->td->latency_buckets);
2420 	kfree(q->td);
2421 }
2422 
blk_throtl_register_queue(struct request_queue * q)2423 void blk_throtl_register_queue(struct request_queue *q)
2424 {
2425 	struct throtl_data *td;
2426 	int i;
2427 
2428 	td = q->td;
2429 	BUG_ON(!td);
2430 
2431 	if (blk_queue_nonrot(q)) {
2432 		td->throtl_slice = DFL_THROTL_SLICE_SSD;
2433 		td->filtered_latency = LATENCY_FILTERED_SSD;
2434 	} else {
2435 		td->throtl_slice = DFL_THROTL_SLICE_HD;
2436 		td->filtered_latency = LATENCY_FILTERED_HD;
2437 		for (i = 0; i < LATENCY_BUCKET_SIZE; i++)
2438 			td->avg_buckets[i].latency = DFL_HD_BASELINE_LATENCY;
2439 	}
2440 #ifndef CONFIG_BLK_DEV_THROTTLING_LOW
2441 	/* if no low limit, use previous default */
2442 	td->throtl_slice = DFL_THROTL_SLICE_HD;
2443 #endif
2444 
2445 	td->track_bio_latency = !q->mq_ops && !q->request_fn;
2446 	if (!td->track_bio_latency)
2447 		blk_stat_enable_accounting(q);
2448 }
2449 
2450 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
blk_throtl_sample_time_show(struct request_queue * q,char * page)2451 ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page)
2452 {
2453 	if (!q->td)
2454 		return -EINVAL;
2455 	return sprintf(page, "%u\n", jiffies_to_msecs(q->td->throtl_slice));
2456 }
2457 
blk_throtl_sample_time_store(struct request_queue * q,const char * page,size_t count)2458 ssize_t blk_throtl_sample_time_store(struct request_queue *q,
2459 	const char *page, size_t count)
2460 {
2461 	unsigned long v;
2462 	unsigned long t;
2463 
2464 	if (!q->td)
2465 		return -EINVAL;
2466 	if (kstrtoul(page, 10, &v))
2467 		return -EINVAL;
2468 	t = msecs_to_jiffies(v);
2469 	if (t == 0 || t > MAX_THROTL_SLICE)
2470 		return -EINVAL;
2471 	q->td->throtl_slice = t;
2472 	return count;
2473 }
2474 #endif
2475 
throtl_init(void)2476 static int __init throtl_init(void)
2477 {
2478 	kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
2479 	if (!kthrotld_workqueue)
2480 		panic("Failed to create kthrotld\n");
2481 
2482 	return blkcg_policy_register(&blkcg_policy_throtl);
2483 }
2484 
2485 module_init(throtl_init);
2486