• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler,
4  *  for the blk-mq scheduling framework
5  *
6  *  Copyright (C) 2016 Jens Axboe <axboe@kernel.dk>
7  */
8 #include <linux/kernel.h>
9 #include <linux/fs.h>
10 #include <linux/blkdev.h>
11 #include <linux/blk-mq.h>
12 #include <linux/elevator.h>
13 #include <linux/bio.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/compiler.h>
18 #include <linux/rbtree.h>
19 #include <linux/sbitmap.h>
20 
21 #include "blk.h"
22 #include "blk-mq.h"
23 #include "blk-mq-debugfs.h"
24 #include "blk-mq-tag.h"
25 #include "blk-mq-sched.h"
26 
27 /*
28  * See Documentation/block/deadline-iosched.rst
29  */
30 static const int read_expire = HZ / 2;  /* max time before a read is submitted. */
31 static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
32 /*
33  * Time after which to dispatch lower priority requests even if higher
34  * priority requests are pending.
35  */
36 static const int prio_aging_expire = 10 * HZ;
37 static const int writes_starved = 2;    /* max times reads can starve a write */
38 static const int fifo_batch = 16;       /* # of sequential requests treated as one
39 				     by the above parameters. For throughput. */
40 
41 enum dd_data_dir {
42 	DD_READ		= READ,
43 	DD_WRITE	= WRITE,
44 };
45 
46 enum { DD_DIR_COUNT = 2 };
47 
48 enum dd_prio {
49 	DD_RT_PRIO	= 0,
50 	DD_BE_PRIO	= 1,
51 	DD_IDLE_PRIO	= 2,
52 	DD_PRIO_MAX	= 2,
53 };
54 
55 enum { DD_PRIO_COUNT = 3 };
56 
57 /*
58  * I/O statistics per I/O priority. It is fine if these counters overflow.
59  * What matters is that these counters are at least as wide as
60  * log2(max_outstanding_requests).
61  */
62 struct io_stats_per_prio {
63 	uint32_t inserted;
64 	uint32_t merged;
65 	uint32_t dispatched;
66 	atomic_t completed;
67 };
68 
69 /*
70  * Deadline scheduler data per I/O priority (enum dd_prio). Requests are
71  * present on both sort_list[] and fifo_list[].
72  */
73 struct dd_per_prio {
74 	struct list_head dispatch;
75 	struct rb_root sort_list[DD_DIR_COUNT];
76 	struct list_head fifo_list[DD_DIR_COUNT];
77 	/* Next request in FIFO order. Read, write or both are NULL. */
78 	struct request *next_rq[DD_DIR_COUNT];
79 	struct io_stats_per_prio stats;
80 };
81 
82 struct deadline_data {
83 	/*
84 	 * run time data
85 	 */
86 
87 	struct dd_per_prio per_prio[DD_PRIO_COUNT];
88 
89 	/* Data direction of latest dispatched request. */
90 	enum dd_data_dir last_dir;
91 	unsigned int batching;		/* number of sequential requests made */
92 	unsigned int starved;		/* times reads have starved writes */
93 
94 	/*
95 	 * settings that change how the i/o scheduler behaves
96 	 */
97 	int fifo_expire[DD_DIR_COUNT];
98 	int fifo_batch;
99 	int writes_starved;
100 	int front_merges;
101 	u32 async_depth;
102 	int prio_aging_expire;
103 
104 	spinlock_t lock;
105 	spinlock_t zone_lock;
106 };
107 
108 /* Maps an I/O priority class to a deadline scheduler priority. */
109 static const enum dd_prio ioprio_class_to_prio[] = {
110 	[IOPRIO_CLASS_NONE]	= DD_BE_PRIO,
111 	[IOPRIO_CLASS_RT]	= DD_RT_PRIO,
112 	[IOPRIO_CLASS_BE]	= DD_BE_PRIO,
113 	[IOPRIO_CLASS_IDLE]	= DD_IDLE_PRIO,
114 };
115 
116 static inline struct rb_root *
deadline_rb_root(struct dd_per_prio * per_prio,struct request * rq)117 deadline_rb_root(struct dd_per_prio *per_prio, struct request *rq)
118 {
119 	return &per_prio->sort_list[rq_data_dir(rq)];
120 }
121 
122 /*
123  * Returns the I/O priority class (IOPRIO_CLASS_*) that has been assigned to a
124  * request.
125  */
dd_rq_ioclass(struct request * rq)126 static u8 dd_rq_ioclass(struct request *rq)
127 {
128 	return IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
129 }
130 
131 /*
132  * get the request after `rq' in sector-sorted order
133  */
134 static inline struct request *
deadline_latter_request(struct request * rq)135 deadline_latter_request(struct request *rq)
136 {
137 	struct rb_node *node = rb_next(&rq->rb_node);
138 
139 	if (node)
140 		return rb_entry_rq(node);
141 
142 	return NULL;
143 }
144 
145 static void
deadline_add_rq_rb(struct dd_per_prio * per_prio,struct request * rq)146 deadline_add_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
147 {
148 	struct rb_root *root = deadline_rb_root(per_prio, rq);
149 
150 	elv_rb_add(root, rq);
151 }
152 
153 static inline void
deadline_del_rq_rb(struct dd_per_prio * per_prio,struct request * rq)154 deadline_del_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
155 {
156 	const enum dd_data_dir data_dir = rq_data_dir(rq);
157 
158 	if (per_prio->next_rq[data_dir] == rq)
159 		per_prio->next_rq[data_dir] = deadline_latter_request(rq);
160 
161 	elv_rb_del(deadline_rb_root(per_prio, rq), rq);
162 }
163 
164 /*
165  * remove rq from rbtree and fifo.
166  */
deadline_remove_request(struct request_queue * q,struct dd_per_prio * per_prio,struct request * rq)167 static void deadline_remove_request(struct request_queue *q,
168 				    struct dd_per_prio *per_prio,
169 				    struct request *rq)
170 {
171 	list_del_init(&rq->queuelist);
172 
173 	/*
174 	 * We might not be on the rbtree, if we are doing an insert merge
175 	 */
176 	if (!RB_EMPTY_NODE(&rq->rb_node))
177 		deadline_del_rq_rb(per_prio, rq);
178 
179 	elv_rqhash_del(q, rq);
180 	if (q->last_merge == rq)
181 		q->last_merge = NULL;
182 }
183 
dd_request_merged(struct request_queue * q,struct request * req,enum elv_merge type)184 static void dd_request_merged(struct request_queue *q, struct request *req,
185 			      enum elv_merge type)
186 {
187 	struct deadline_data *dd = q->elevator->elevator_data;
188 	const u8 ioprio_class = dd_rq_ioclass(req);
189 	const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
190 	struct dd_per_prio *per_prio = &dd->per_prio[prio];
191 
192 	/*
193 	 * if the merge was a front merge, we need to reposition request
194 	 */
195 	if (type == ELEVATOR_FRONT_MERGE) {
196 		elv_rb_del(deadline_rb_root(per_prio, req), req);
197 		deadline_add_rq_rb(per_prio, req);
198 	}
199 }
200 
201 /*
202  * Callback function that is invoked after @next has been merged into @req.
203  */
dd_merged_requests(struct request_queue * q,struct request * req,struct request * next)204 static void dd_merged_requests(struct request_queue *q, struct request *req,
205 			       struct request *next)
206 {
207 	struct deadline_data *dd = q->elevator->elevator_data;
208 	const u8 ioprio_class = dd_rq_ioclass(next);
209 	const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
210 
211 	lockdep_assert_held(&dd->lock);
212 
213 	dd->per_prio[prio].stats.merged++;
214 
215 	/*
216 	 * if next expires before rq, assign its expire time to rq
217 	 * and move into next position (next will be deleted) in fifo
218 	 */
219 	if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
220 		if (time_before((unsigned long)next->fifo_time,
221 				(unsigned long)req->fifo_time)) {
222 			list_move(&req->queuelist, &next->queuelist);
223 			req->fifo_time = next->fifo_time;
224 		}
225 	}
226 
227 	/*
228 	 * kill knowledge of next, this one is a goner
229 	 */
230 	deadline_remove_request(q, &dd->per_prio[prio], next);
231 }
232 
233 /*
234  * move an entry to dispatch queue
235  */
236 static void
deadline_move_request(struct deadline_data * dd,struct dd_per_prio * per_prio,struct request * rq)237 deadline_move_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
238 		      struct request *rq)
239 {
240 	const enum dd_data_dir data_dir = rq_data_dir(rq);
241 
242 	per_prio->next_rq[data_dir] = deadline_latter_request(rq);
243 
244 	/*
245 	 * take it off the sort and fifo list
246 	 */
247 	deadline_remove_request(rq->q, per_prio, rq);
248 }
249 
250 /* Number of requests queued for a given priority level. */
dd_queued(struct deadline_data * dd,enum dd_prio prio)251 static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio)
252 {
253 	const struct io_stats_per_prio *stats = &dd->per_prio[prio].stats;
254 
255 	lockdep_assert_held(&dd->lock);
256 
257 	return stats->inserted - atomic_read(&stats->completed);
258 }
259 
260 /*
261  * deadline_check_fifo returns 0 if there are no expired requests on the fifo,
262  * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
263  */
deadline_check_fifo(struct dd_per_prio * per_prio,enum dd_data_dir data_dir)264 static inline int deadline_check_fifo(struct dd_per_prio *per_prio,
265 				      enum dd_data_dir data_dir)
266 {
267 	struct request *rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
268 
269 	/*
270 	 * rq is expired!
271 	 */
272 	if (time_after_eq(jiffies, (unsigned long)rq->fifo_time))
273 		return 1;
274 
275 	return 0;
276 }
277 
278 /*
279  * For the specified data direction, return the next request to
280  * dispatch using arrival ordered lists.
281  */
282 static struct request *
deadline_fifo_request(struct deadline_data * dd,struct dd_per_prio * per_prio,enum dd_data_dir data_dir)283 deadline_fifo_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
284 		      enum dd_data_dir data_dir)
285 {
286 	struct request *rq;
287 	unsigned long flags;
288 
289 	if (list_empty(&per_prio->fifo_list[data_dir]))
290 		return NULL;
291 
292 	rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
293 	if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
294 		return rq;
295 
296 	/*
297 	 * Look for a write request that can be dispatched, that is one with
298 	 * an unlocked target zone.
299 	 */
300 	spin_lock_irqsave(&dd->zone_lock, flags);
301 	list_for_each_entry(rq, &per_prio->fifo_list[DD_WRITE], queuelist) {
302 		if (blk_req_can_dispatch_to_zone(rq))
303 			goto out;
304 	}
305 	rq = NULL;
306 out:
307 	spin_unlock_irqrestore(&dd->zone_lock, flags);
308 
309 	return rq;
310 }
311 
312 /*
313  * For the specified data direction, return the next request to
314  * dispatch using sector position sorted lists.
315  */
316 static struct request *
deadline_next_request(struct deadline_data * dd,struct dd_per_prio * per_prio,enum dd_data_dir data_dir)317 deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
318 		      enum dd_data_dir data_dir)
319 {
320 	struct request *rq;
321 	unsigned long flags;
322 
323 	rq = per_prio->next_rq[data_dir];
324 	if (!rq)
325 		return NULL;
326 
327 	if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
328 		return rq;
329 
330 	/*
331 	 * Look for a write request that can be dispatched, that is one with
332 	 * an unlocked target zone.
333 	 */
334 	spin_lock_irqsave(&dd->zone_lock, flags);
335 	while (rq) {
336 		if (blk_req_can_dispatch_to_zone(rq))
337 			break;
338 		rq = deadline_latter_request(rq);
339 	}
340 	spin_unlock_irqrestore(&dd->zone_lock, flags);
341 
342 	return rq;
343 }
344 
345 /*
346  * Returns true if and only if @rq started after @latest_start where
347  * @latest_start is in jiffies.
348  */
started_after(struct deadline_data * dd,struct request * rq,unsigned long latest_start)349 static bool started_after(struct deadline_data *dd, struct request *rq,
350 			  unsigned long latest_start)
351 {
352 	unsigned long start_time = (unsigned long)rq->fifo_time;
353 
354 	start_time -= dd->fifo_expire[rq_data_dir(rq)];
355 
356 	return time_after(start_time, latest_start);
357 }
358 
359 /*
360  * deadline_dispatch_requests selects the best request according to
361  * read/write expire, fifo_batch, etc and with a start time <= @latest.
362  */
__dd_dispatch_request(struct deadline_data * dd,struct dd_per_prio * per_prio,unsigned long latest_start)363 static struct request *__dd_dispatch_request(struct deadline_data *dd,
364 					     struct dd_per_prio *per_prio,
365 					     unsigned long latest_start)
366 {
367 	struct request *rq, *next_rq;
368 	enum dd_data_dir data_dir;
369 	enum dd_prio prio;
370 	u8 ioprio_class;
371 
372 	lockdep_assert_held(&dd->lock);
373 
374 	if (!list_empty(&per_prio->dispatch)) {
375 		rq = list_first_entry(&per_prio->dispatch, struct request,
376 				      queuelist);
377 		if (started_after(dd, rq, latest_start))
378 			return NULL;
379 		list_del_init(&rq->queuelist);
380 		goto done;
381 	}
382 
383 	/*
384 	 * batches are currently reads XOR writes
385 	 */
386 	rq = deadline_next_request(dd, per_prio, dd->last_dir);
387 	if (rq && dd->batching < dd->fifo_batch)
388 		/* we have a next request are still entitled to batch */
389 		goto dispatch_request;
390 
391 	/*
392 	 * at this point we are not running a batch. select the appropriate
393 	 * data direction (read / write)
394 	 */
395 
396 	if (!list_empty(&per_prio->fifo_list[DD_READ])) {
397 		BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_READ]));
398 
399 		if (deadline_fifo_request(dd, per_prio, DD_WRITE) &&
400 		    (dd->starved++ >= dd->writes_starved))
401 			goto dispatch_writes;
402 
403 		data_dir = DD_READ;
404 
405 		goto dispatch_find_request;
406 	}
407 
408 	/*
409 	 * there are either no reads or writes have been starved
410 	 */
411 
412 	if (!list_empty(&per_prio->fifo_list[DD_WRITE])) {
413 dispatch_writes:
414 		BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_WRITE]));
415 
416 		dd->starved = 0;
417 
418 		data_dir = DD_WRITE;
419 
420 		goto dispatch_find_request;
421 	}
422 
423 	return NULL;
424 
425 dispatch_find_request:
426 	/*
427 	 * we are not running a batch, find best request for selected data_dir
428 	 */
429 	next_rq = deadline_next_request(dd, per_prio, data_dir);
430 	if (deadline_check_fifo(per_prio, data_dir) || !next_rq) {
431 		/*
432 		 * A deadline has expired, the last request was in the other
433 		 * direction, or we have run out of higher-sectored requests.
434 		 * Start again from the request with the earliest expiry time.
435 		 */
436 		rq = deadline_fifo_request(dd, per_prio, data_dir);
437 	} else {
438 		/*
439 		 * The last req was the same dir and we have a next request in
440 		 * sort order. No expired requests so continue on from here.
441 		 */
442 		rq = next_rq;
443 	}
444 
445 	/*
446 	 * For a zoned block device, if we only have writes queued and none of
447 	 * them can be dispatched, rq will be NULL.
448 	 */
449 	if (!rq)
450 		return NULL;
451 
452 	dd->last_dir = data_dir;
453 	dd->batching = 0;
454 
455 dispatch_request:
456 	if (started_after(dd, rq, latest_start))
457 		return NULL;
458 
459 	/*
460 	 * rq is the selected appropriate request.
461 	 */
462 	dd->batching++;
463 	deadline_move_request(dd, per_prio, rq);
464 done:
465 	ioprio_class = dd_rq_ioclass(rq);
466 	prio = ioprio_class_to_prio[ioprio_class];
467 	dd->per_prio[prio].stats.dispatched++;
468 	/*
469 	 * If the request needs its target zone locked, do it.
470 	 */
471 	blk_req_zone_write_lock(rq);
472 	rq->rq_flags |= RQF_STARTED;
473 	return rq;
474 }
475 
476 /*
477  * Check whether there are any requests with priority other than DD_RT_PRIO
478  * that were inserted more than prio_aging_expire jiffies ago.
479  */
dd_dispatch_prio_aged_requests(struct deadline_data * dd,unsigned long now)480 static struct request *dd_dispatch_prio_aged_requests(struct deadline_data *dd,
481 						      unsigned long now)
482 {
483 	struct request *rq;
484 	enum dd_prio prio;
485 	int prio_cnt;
486 
487 	lockdep_assert_held(&dd->lock);
488 
489 	prio_cnt = !!dd_queued(dd, DD_RT_PRIO) + !!dd_queued(dd, DD_BE_PRIO) +
490 		   !!dd_queued(dd, DD_IDLE_PRIO);
491 	if (prio_cnt < 2)
492 		return NULL;
493 
494 	for (prio = DD_BE_PRIO; prio <= DD_PRIO_MAX; prio++) {
495 		rq = __dd_dispatch_request(dd, &dd->per_prio[prio],
496 					   now - dd->prio_aging_expire);
497 		if (rq)
498 			return rq;
499 	}
500 
501 	return NULL;
502 }
503 
504 /*
505  * Called from blk_mq_run_hw_queue() -> __blk_mq_sched_dispatch_requests().
506  *
507  * One confusing aspect here is that we get called for a specific
508  * hardware queue, but we may return a request that is for a
509  * different hardware queue. This is because mq-deadline has shared
510  * state for all hardware queues, in terms of sorting, FIFOs, etc.
511  */
dd_dispatch_request(struct blk_mq_hw_ctx * hctx)512 static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
513 {
514 	struct deadline_data *dd = hctx->queue->elevator->elevator_data;
515 	const unsigned long now = jiffies;
516 	struct request *rq;
517 	enum dd_prio prio;
518 
519 	spin_lock(&dd->lock);
520 	rq = dd_dispatch_prio_aged_requests(dd, now);
521 	if (rq)
522 		goto unlock;
523 
524 	/*
525 	 * Next, dispatch requests in priority order. Ignore lower priority
526 	 * requests if any higher priority requests are pending.
527 	 */
528 	for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
529 		rq = __dd_dispatch_request(dd, &dd->per_prio[prio], now);
530 		if (rq || dd_queued(dd, prio))
531 			break;
532 	}
533 
534 unlock:
535 	spin_unlock(&dd->lock);
536 
537 	return rq;
538 }
539 
540 /*
541  * Called by __blk_mq_alloc_request(). The shallow_depth value set by this
542  * function is used by __blk_mq_get_tag().
543  */
dd_limit_depth(unsigned int op,struct blk_mq_alloc_data * data)544 static void dd_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
545 {
546 	struct deadline_data *dd = data->q->elevator->elevator_data;
547 
548 	/* Do not throttle synchronous reads. */
549 	if (op_is_sync(op) && !op_is_write(op))
550 		return;
551 
552 	/*
553 	 * Throttle asynchronous requests and writes such that these requests
554 	 * do not block the allocation of synchronous requests.
555 	 */
556 	data->shallow_depth = dd->async_depth;
557 }
558 
559 /* Called by blk_mq_update_nr_requests(). */
dd_depth_updated(struct blk_mq_hw_ctx * hctx)560 static void dd_depth_updated(struct blk_mq_hw_ctx *hctx)
561 {
562 	struct request_queue *q = hctx->queue;
563 	struct deadline_data *dd = q->elevator->elevator_data;
564 	struct blk_mq_tags *tags = hctx->sched_tags;
565 
566 	dd->async_depth = max(1UL, 3 * q->nr_requests / 4);
567 
568 	sbitmap_queue_min_shallow_depth(tags->bitmap_tags, dd->async_depth);
569 }
570 
571 /* Called by blk_mq_init_hctx() and blk_mq_init_sched(). */
dd_init_hctx(struct blk_mq_hw_ctx * hctx,unsigned int hctx_idx)572 static int dd_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
573 {
574 	dd_depth_updated(hctx);
575 	return 0;
576 }
577 
dd_exit_sched(struct elevator_queue * e)578 static void dd_exit_sched(struct elevator_queue *e)
579 {
580 	struct deadline_data *dd = e->elevator_data;
581 	enum dd_prio prio;
582 
583 	for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
584 		struct dd_per_prio *per_prio = &dd->per_prio[prio];
585 		const struct io_stats_per_prio *stats = &per_prio->stats;
586 		uint32_t queued;
587 
588 		WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_READ]));
589 		WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_WRITE]));
590 
591 		spin_lock(&dd->lock);
592 		queued = dd_queued(dd, prio);
593 		spin_unlock(&dd->lock);
594 
595 		WARN_ONCE(queued != 0,
596 			  "statistics for priority %d: i %u m %u d %u c %u\n",
597 			  prio, stats->inserted, stats->merged,
598 			  stats->dispatched, atomic_read(&stats->completed));
599 	}
600 
601 	kfree(dd);
602 }
603 
604 /*
605  * initialize elevator private data (deadline_data).
606  */
dd_init_sched(struct request_queue * q,struct elevator_type * e)607 static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
608 {
609 	struct deadline_data *dd;
610 	struct elevator_queue *eq;
611 	enum dd_prio prio;
612 	int ret = -ENOMEM;
613 
614 	eq = elevator_alloc(q, e);
615 	if (!eq)
616 		return ret;
617 
618 	dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
619 	if (!dd)
620 		goto put_eq;
621 
622 	eq->elevator_data = dd;
623 
624 	for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
625 		struct dd_per_prio *per_prio = &dd->per_prio[prio];
626 
627 		INIT_LIST_HEAD(&per_prio->dispatch);
628 		INIT_LIST_HEAD(&per_prio->fifo_list[DD_READ]);
629 		INIT_LIST_HEAD(&per_prio->fifo_list[DD_WRITE]);
630 		per_prio->sort_list[DD_READ] = RB_ROOT;
631 		per_prio->sort_list[DD_WRITE] = RB_ROOT;
632 	}
633 	dd->fifo_expire[DD_READ] = read_expire;
634 	dd->fifo_expire[DD_WRITE] = write_expire;
635 	dd->writes_starved = writes_starved;
636 	dd->front_merges = 1;
637 	dd->last_dir = DD_WRITE;
638 	dd->fifo_batch = fifo_batch;
639 	dd->prio_aging_expire = prio_aging_expire;
640 	spin_lock_init(&dd->lock);
641 	spin_lock_init(&dd->zone_lock);
642 
643 	q->elevator = eq;
644 	return 0;
645 
646 put_eq:
647 	kobject_put(&eq->kobj);
648 	return ret;
649 }
650 
651 /*
652  * Try to merge @bio into an existing request. If @bio has been merged into
653  * an existing request, store the pointer to that request into *@rq.
654  */
dd_request_merge(struct request_queue * q,struct request ** rq,struct bio * bio)655 static int dd_request_merge(struct request_queue *q, struct request **rq,
656 			    struct bio *bio)
657 {
658 	struct deadline_data *dd = q->elevator->elevator_data;
659 	const u8 ioprio_class = IOPRIO_PRIO_CLASS(bio->bi_ioprio);
660 	const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
661 	struct dd_per_prio *per_prio = &dd->per_prio[prio];
662 	sector_t sector = bio_end_sector(bio);
663 	struct request *__rq;
664 
665 	if (!dd->front_merges)
666 		return ELEVATOR_NO_MERGE;
667 
668 	__rq = elv_rb_find(&per_prio->sort_list[bio_data_dir(bio)], sector);
669 	if (__rq) {
670 		BUG_ON(sector != blk_rq_pos(__rq));
671 
672 		if (elv_bio_merge_ok(__rq, bio)) {
673 			*rq = __rq;
674 			if (blk_discard_mergable(__rq))
675 				return ELEVATOR_DISCARD_MERGE;
676 			return ELEVATOR_FRONT_MERGE;
677 		}
678 	}
679 
680 	return ELEVATOR_NO_MERGE;
681 }
682 
683 /*
684  * Attempt to merge a bio into an existing request. This function is called
685  * before @bio is associated with a request.
686  */
dd_bio_merge(struct request_queue * q,struct bio * bio,unsigned int nr_segs)687 static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
688 		unsigned int nr_segs)
689 {
690 	struct deadline_data *dd = q->elevator->elevator_data;
691 	struct request *free = NULL;
692 	bool ret;
693 
694 	spin_lock(&dd->lock);
695 	ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
696 	spin_unlock(&dd->lock);
697 
698 	if (free)
699 		blk_mq_free_request(free);
700 
701 	return ret;
702 }
703 
704 /*
705  * add rq to rbtree and fifo
706  */
dd_insert_request(struct blk_mq_hw_ctx * hctx,struct request * rq,bool at_head)707 static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
708 			      bool at_head)
709 {
710 	struct request_queue *q = hctx->queue;
711 	struct deadline_data *dd = q->elevator->elevator_data;
712 	const enum dd_data_dir data_dir = rq_data_dir(rq);
713 	u16 ioprio = req_get_ioprio(rq);
714 	u8 ioprio_class = IOPRIO_PRIO_CLASS(ioprio);
715 	struct dd_per_prio *per_prio;
716 	enum dd_prio prio;
717 
718 	lockdep_assert_held(&dd->lock);
719 
720 	/*
721 	 * This may be a requeue of a write request that has locked its
722 	 * target zone. If it is the case, this releases the zone lock.
723 	 */
724 	blk_req_zone_write_unlock(rq);
725 
726 	prio = ioprio_class_to_prio[ioprio_class];
727 	per_prio = &dd->per_prio[prio];
728 	if (!rq->elv.priv[0]) {
729 		per_prio->stats.inserted++;
730 		rq->elv.priv[0] = (void *)(uintptr_t)1;
731 	}
732 
733 	if (blk_mq_sched_try_insert_merge(q, rq))
734 		return;
735 
736 	blk_mq_sched_request_inserted(rq);
737 
738 	if (at_head) {
739 		list_add(&rq->queuelist, &per_prio->dispatch);
740 		rq->fifo_time = jiffies;
741 	} else {
742 		deadline_add_rq_rb(per_prio, rq);
743 
744 		if (rq_mergeable(rq)) {
745 			elv_rqhash_add(q, rq);
746 			if (!q->last_merge)
747 				q->last_merge = rq;
748 		}
749 
750 		/*
751 		 * set expire time and add to fifo list
752 		 */
753 		rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
754 		list_add_tail(&rq->queuelist, &per_prio->fifo_list[data_dir]);
755 	}
756 }
757 
758 /*
759  * Called from blk_mq_sched_insert_request() or blk_mq_sched_insert_requests().
760  */
dd_insert_requests(struct blk_mq_hw_ctx * hctx,struct list_head * list,bool at_head)761 static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
762 			       struct list_head *list, bool at_head)
763 {
764 	struct request_queue *q = hctx->queue;
765 	struct deadline_data *dd = q->elevator->elevator_data;
766 
767 	spin_lock(&dd->lock);
768 	while (!list_empty(list)) {
769 		struct request *rq;
770 
771 		rq = list_first_entry(list, struct request, queuelist);
772 		list_del_init(&rq->queuelist);
773 		dd_insert_request(hctx, rq, at_head);
774 	}
775 	spin_unlock(&dd->lock);
776 }
777 
778 /* Callback from inside blk_mq_rq_ctx_init(). */
dd_prepare_request(struct request * rq)779 static void dd_prepare_request(struct request *rq)
780 {
781 	rq->elv.priv[0] = NULL;
782 }
783 
dd_has_write_work(struct blk_mq_hw_ctx * hctx)784 static bool dd_has_write_work(struct blk_mq_hw_ctx *hctx)
785 {
786 	struct deadline_data *dd = hctx->queue->elevator->elevator_data;
787 	enum dd_prio p;
788 
789 	for (p = 0; p <= DD_PRIO_MAX; p++)
790 		if (!list_empty_careful(&dd->per_prio[p].fifo_list[DD_WRITE]))
791 			return true;
792 
793 	return false;
794 }
795 
796 /*
797  * Callback from inside blk_mq_free_request().
798  *
799  * For zoned block devices, write unlock the target zone of
800  * completed write requests. Do this while holding the zone lock
801  * spinlock so that the zone is never unlocked while deadline_fifo_request()
802  * or deadline_next_request() are executing. This function is called for
803  * all requests, whether or not these requests complete successfully.
804  *
805  * For a zoned block device, __dd_dispatch_request() may have stopped
806  * dispatching requests if all the queued requests are write requests directed
807  * at zones that are already locked due to on-going write requests. To ensure
808  * write request dispatch progress in this case, mark the queue as needing a
809  * restart to ensure that the queue is run again after completion of the
810  * request and zones being unlocked.
811  */
dd_finish_request(struct request * rq)812 static void dd_finish_request(struct request *rq)
813 {
814 	struct request_queue *q = rq->q;
815 	struct deadline_data *dd = q->elevator->elevator_data;
816 	const u8 ioprio_class = dd_rq_ioclass(rq);
817 	const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
818 	struct dd_per_prio *per_prio = &dd->per_prio[prio];
819 
820 	/*
821 	 * The block layer core may call dd_finish_request() without having
822 	 * called dd_insert_requests(). Skip requests that bypassed I/O
823 	 * scheduling. See also blk_mq_request_bypass_insert().
824 	 */
825 	if (!rq->elv.priv[0])
826 		return;
827 
828 	atomic_inc(&per_prio->stats.completed);
829 
830 	if (blk_queue_is_zoned(q)) {
831 		unsigned long flags;
832 
833 		spin_lock_irqsave(&dd->zone_lock, flags);
834 		blk_req_zone_write_unlock(rq);
835 		spin_unlock_irqrestore(&dd->zone_lock, flags);
836 
837 		if (dd_has_write_work(rq->mq_hctx))
838 			blk_mq_sched_mark_restart_hctx(rq->mq_hctx);
839 	}
840 }
841 
dd_has_work_for_prio(struct dd_per_prio * per_prio)842 static bool dd_has_work_for_prio(struct dd_per_prio *per_prio)
843 {
844 	return !list_empty_careful(&per_prio->dispatch) ||
845 		!list_empty_careful(&per_prio->fifo_list[DD_READ]) ||
846 		!list_empty_careful(&per_prio->fifo_list[DD_WRITE]);
847 }
848 
dd_has_work(struct blk_mq_hw_ctx * hctx)849 static bool dd_has_work(struct blk_mq_hw_ctx *hctx)
850 {
851 	struct deadline_data *dd = hctx->queue->elevator->elevator_data;
852 	enum dd_prio prio;
853 
854 	for (prio = 0; prio <= DD_PRIO_MAX; prio++)
855 		if (dd_has_work_for_prio(&dd->per_prio[prio]))
856 			return true;
857 
858 	return false;
859 }
860 
861 /*
862  * sysfs parts below
863  */
864 #define SHOW_INT(__FUNC, __VAR)						\
865 static ssize_t __FUNC(struct elevator_queue *e, char *page)		\
866 {									\
867 	struct deadline_data *dd = e->elevator_data;			\
868 									\
869 	return sysfs_emit(page, "%d\n", __VAR);				\
870 }
871 #define SHOW_JIFFIES(__FUNC, __VAR) SHOW_INT(__FUNC, jiffies_to_msecs(__VAR))
872 SHOW_JIFFIES(deadline_read_expire_show, dd->fifo_expire[DD_READ]);
873 SHOW_JIFFIES(deadline_write_expire_show, dd->fifo_expire[DD_WRITE]);
874 SHOW_JIFFIES(deadline_prio_aging_expire_show, dd->prio_aging_expire);
875 SHOW_INT(deadline_writes_starved_show, dd->writes_starved);
876 SHOW_INT(deadline_front_merges_show, dd->front_merges);
877 SHOW_INT(deadline_async_depth_show, dd->async_depth);
878 SHOW_INT(deadline_fifo_batch_show, dd->fifo_batch);
879 #undef SHOW_INT
880 #undef SHOW_JIFFIES
881 
882 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)			\
883 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)	\
884 {									\
885 	struct deadline_data *dd = e->elevator_data;			\
886 	int __data, __ret;						\
887 									\
888 	__ret = kstrtoint(page, 0, &__data);				\
889 	if (__ret < 0)							\
890 		return __ret;						\
891 	if (__data < (MIN))						\
892 		__data = (MIN);						\
893 	else if (__data > (MAX))					\
894 		__data = (MAX);						\
895 	*(__PTR) = __CONV(__data);					\
896 	return count;							\
897 }
898 #define STORE_INT(__FUNC, __PTR, MIN, MAX)				\
899 	STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, )
900 #define STORE_JIFFIES(__FUNC, __PTR, MIN, MAX)				\
901 	STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, msecs_to_jiffies)
902 STORE_JIFFIES(deadline_read_expire_store, &dd->fifo_expire[DD_READ], 0, INT_MAX);
903 STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MAX);
904 STORE_JIFFIES(deadline_prio_aging_expire_store, &dd->prio_aging_expire, 0, INT_MAX);
905 STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX);
906 STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1);
907 STORE_INT(deadline_async_depth_store, &dd->async_depth, 1, INT_MAX);
908 STORE_INT(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX);
909 #undef STORE_FUNCTION
910 #undef STORE_INT
911 #undef STORE_JIFFIES
912 
913 #define DD_ATTR(name) \
914 	__ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store)
915 
916 static struct elv_fs_entry deadline_attrs[] = {
917 	DD_ATTR(read_expire),
918 	DD_ATTR(write_expire),
919 	DD_ATTR(writes_starved),
920 	DD_ATTR(front_merges),
921 	DD_ATTR(async_depth),
922 	DD_ATTR(fifo_batch),
923 	DD_ATTR(prio_aging_expire),
924 	__ATTR_NULL
925 };
926 
927 #ifdef CONFIG_BLK_DEBUG_FS
928 #define DEADLINE_DEBUGFS_DDIR_ATTRS(prio, data_dir, name)		\
929 static void *deadline_##name##_fifo_start(struct seq_file *m,		\
930 					  loff_t *pos)			\
931 	__acquires(&dd->lock)						\
932 {									\
933 	struct request_queue *q = m->private;				\
934 	struct deadline_data *dd = q->elevator->elevator_data;		\
935 	struct dd_per_prio *per_prio = &dd->per_prio[prio];		\
936 									\
937 	spin_lock(&dd->lock);						\
938 	return seq_list_start(&per_prio->fifo_list[data_dir], *pos);	\
939 }									\
940 									\
941 static void *deadline_##name##_fifo_next(struct seq_file *m, void *v,	\
942 					 loff_t *pos)			\
943 {									\
944 	struct request_queue *q = m->private;				\
945 	struct deadline_data *dd = q->elevator->elevator_data;		\
946 	struct dd_per_prio *per_prio = &dd->per_prio[prio];		\
947 									\
948 	return seq_list_next(v, &per_prio->fifo_list[data_dir], pos);	\
949 }									\
950 									\
951 static void deadline_##name##_fifo_stop(struct seq_file *m, void *v)	\
952 	__releases(&dd->lock)						\
953 {									\
954 	struct request_queue *q = m->private;				\
955 	struct deadline_data *dd = q->elevator->elevator_data;		\
956 									\
957 	spin_unlock(&dd->lock);						\
958 }									\
959 									\
960 static const struct seq_operations deadline_##name##_fifo_seq_ops = {	\
961 	.start	= deadline_##name##_fifo_start,				\
962 	.next	= deadline_##name##_fifo_next,				\
963 	.stop	= deadline_##name##_fifo_stop,				\
964 	.show	= blk_mq_debugfs_rq_show,				\
965 };									\
966 									\
967 static int deadline_##name##_next_rq_show(void *data,			\
968 					  struct seq_file *m)		\
969 {									\
970 	struct request_queue *q = data;					\
971 	struct deadline_data *dd = q->elevator->elevator_data;		\
972 	struct dd_per_prio *per_prio = &dd->per_prio[prio];		\
973 	struct request *rq = per_prio->next_rq[data_dir];		\
974 									\
975 	if (rq)								\
976 		__blk_mq_debugfs_rq_show(m, rq);			\
977 	return 0;							\
978 }
979 
980 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_READ, read0);
981 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_WRITE, write0);
982 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_READ, read1);
983 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_WRITE, write1);
984 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_READ, read2);
985 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_WRITE, write2);
986 #undef DEADLINE_DEBUGFS_DDIR_ATTRS
987 
deadline_batching_show(void * data,struct seq_file * m)988 static int deadline_batching_show(void *data, struct seq_file *m)
989 {
990 	struct request_queue *q = data;
991 	struct deadline_data *dd = q->elevator->elevator_data;
992 
993 	seq_printf(m, "%u\n", dd->batching);
994 	return 0;
995 }
996 
deadline_starved_show(void * data,struct seq_file * m)997 static int deadline_starved_show(void *data, struct seq_file *m)
998 {
999 	struct request_queue *q = data;
1000 	struct deadline_data *dd = q->elevator->elevator_data;
1001 
1002 	seq_printf(m, "%u\n", dd->starved);
1003 	return 0;
1004 }
1005 
dd_async_depth_show(void * data,struct seq_file * m)1006 static int dd_async_depth_show(void *data, struct seq_file *m)
1007 {
1008 	struct request_queue *q = data;
1009 	struct deadline_data *dd = q->elevator->elevator_data;
1010 
1011 	seq_printf(m, "%u\n", dd->async_depth);
1012 	return 0;
1013 }
1014 
dd_queued_show(void * data,struct seq_file * m)1015 static int dd_queued_show(void *data, struct seq_file *m)
1016 {
1017 	struct request_queue *q = data;
1018 	struct deadline_data *dd = q->elevator->elevator_data;
1019 	u32 rt, be, idle;
1020 
1021 	spin_lock(&dd->lock);
1022 	rt = dd_queued(dd, DD_RT_PRIO);
1023 	be = dd_queued(dd, DD_BE_PRIO);
1024 	idle = dd_queued(dd, DD_IDLE_PRIO);
1025 	spin_unlock(&dd->lock);
1026 
1027 	seq_printf(m, "%u %u %u\n", rt, be, idle);
1028 
1029 	return 0;
1030 }
1031 
1032 /* Number of requests owned by the block driver for a given priority. */
dd_owned_by_driver(struct deadline_data * dd,enum dd_prio prio)1033 static u32 dd_owned_by_driver(struct deadline_data *dd, enum dd_prio prio)
1034 {
1035 	const struct io_stats_per_prio *stats = &dd->per_prio[prio].stats;
1036 
1037 	lockdep_assert_held(&dd->lock);
1038 
1039 	return stats->dispatched + stats->merged -
1040 		atomic_read(&stats->completed);
1041 }
1042 
dd_owned_by_driver_show(void * data,struct seq_file * m)1043 static int dd_owned_by_driver_show(void *data, struct seq_file *m)
1044 {
1045 	struct request_queue *q = data;
1046 	struct deadline_data *dd = q->elevator->elevator_data;
1047 	u32 rt, be, idle;
1048 
1049 	spin_lock(&dd->lock);
1050 	rt = dd_owned_by_driver(dd, DD_RT_PRIO);
1051 	be = dd_owned_by_driver(dd, DD_BE_PRIO);
1052 	idle = dd_owned_by_driver(dd, DD_IDLE_PRIO);
1053 	spin_unlock(&dd->lock);
1054 
1055 	seq_printf(m, "%u %u %u\n", rt, be, idle);
1056 
1057 	return 0;
1058 }
1059 
1060 #define DEADLINE_DISPATCH_ATTR(prio)					\
1061 static void *deadline_dispatch##prio##_start(struct seq_file *m,	\
1062 					     loff_t *pos)		\
1063 	__acquires(&dd->lock)						\
1064 {									\
1065 	struct request_queue *q = m->private;				\
1066 	struct deadline_data *dd = q->elevator->elevator_data;		\
1067 	struct dd_per_prio *per_prio = &dd->per_prio[prio];		\
1068 									\
1069 	spin_lock(&dd->lock);						\
1070 	return seq_list_start(&per_prio->dispatch, *pos);		\
1071 }									\
1072 									\
1073 static void *deadline_dispatch##prio##_next(struct seq_file *m,		\
1074 					    void *v, loff_t *pos)	\
1075 {									\
1076 	struct request_queue *q = m->private;				\
1077 	struct deadline_data *dd = q->elevator->elevator_data;		\
1078 	struct dd_per_prio *per_prio = &dd->per_prio[prio];		\
1079 									\
1080 	return seq_list_next(v, &per_prio->dispatch, pos);		\
1081 }									\
1082 									\
1083 static void deadline_dispatch##prio##_stop(struct seq_file *m, void *v)	\
1084 	__releases(&dd->lock)						\
1085 {									\
1086 	struct request_queue *q = m->private;				\
1087 	struct deadline_data *dd = q->elevator->elevator_data;		\
1088 									\
1089 	spin_unlock(&dd->lock);						\
1090 }									\
1091 									\
1092 static const struct seq_operations deadline_dispatch##prio##_seq_ops = { \
1093 	.start	= deadline_dispatch##prio##_start,			\
1094 	.next	= deadline_dispatch##prio##_next,			\
1095 	.stop	= deadline_dispatch##prio##_stop,			\
1096 	.show	= blk_mq_debugfs_rq_show,				\
1097 }
1098 
1099 DEADLINE_DISPATCH_ATTR(0);
1100 DEADLINE_DISPATCH_ATTR(1);
1101 DEADLINE_DISPATCH_ATTR(2);
1102 #undef DEADLINE_DISPATCH_ATTR
1103 
1104 #define DEADLINE_QUEUE_DDIR_ATTRS(name)					\
1105 	{#name "_fifo_list", 0400,					\
1106 			.seq_ops = &deadline_##name##_fifo_seq_ops}
1107 #define DEADLINE_NEXT_RQ_ATTR(name)					\
1108 	{#name "_next_rq", 0400, deadline_##name##_next_rq_show}
1109 static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = {
1110 	DEADLINE_QUEUE_DDIR_ATTRS(read0),
1111 	DEADLINE_QUEUE_DDIR_ATTRS(write0),
1112 	DEADLINE_QUEUE_DDIR_ATTRS(read1),
1113 	DEADLINE_QUEUE_DDIR_ATTRS(write1),
1114 	DEADLINE_QUEUE_DDIR_ATTRS(read2),
1115 	DEADLINE_QUEUE_DDIR_ATTRS(write2),
1116 	DEADLINE_NEXT_RQ_ATTR(read0),
1117 	DEADLINE_NEXT_RQ_ATTR(write0),
1118 	DEADLINE_NEXT_RQ_ATTR(read1),
1119 	DEADLINE_NEXT_RQ_ATTR(write1),
1120 	DEADLINE_NEXT_RQ_ATTR(read2),
1121 	DEADLINE_NEXT_RQ_ATTR(write2),
1122 	{"batching", 0400, deadline_batching_show},
1123 	{"starved", 0400, deadline_starved_show},
1124 	{"async_depth", 0400, dd_async_depth_show},
1125 	{"dispatch0", 0400, .seq_ops = &deadline_dispatch0_seq_ops},
1126 	{"dispatch1", 0400, .seq_ops = &deadline_dispatch1_seq_ops},
1127 	{"dispatch2", 0400, .seq_ops = &deadline_dispatch2_seq_ops},
1128 	{"owned_by_driver", 0400, dd_owned_by_driver_show},
1129 	{"queued", 0400, dd_queued_show},
1130 	{},
1131 };
1132 #undef DEADLINE_QUEUE_DDIR_ATTRS
1133 #endif
1134 
1135 static struct elevator_type mq_deadline = {
1136 	.ops = {
1137 		.depth_updated		= dd_depth_updated,
1138 		.limit_depth		= dd_limit_depth,
1139 		.insert_requests	= dd_insert_requests,
1140 		.dispatch_request	= dd_dispatch_request,
1141 		.prepare_request	= dd_prepare_request,
1142 		.finish_request		= dd_finish_request,
1143 		.next_request		= elv_rb_latter_request,
1144 		.former_request		= elv_rb_former_request,
1145 		.bio_merge		= dd_bio_merge,
1146 		.request_merge		= dd_request_merge,
1147 		.requests_merged	= dd_merged_requests,
1148 		.request_merged		= dd_request_merged,
1149 		.has_work		= dd_has_work,
1150 		.init_sched		= dd_init_sched,
1151 		.exit_sched		= dd_exit_sched,
1152 		.init_hctx		= dd_init_hctx,
1153 	},
1154 
1155 #ifdef CONFIG_BLK_DEBUG_FS
1156 	.queue_debugfs_attrs = deadline_queue_debugfs_attrs,
1157 #endif
1158 	.elevator_attrs = deadline_attrs,
1159 	.elevator_name = "mq-deadline",
1160 	.elevator_alias = "deadline",
1161 	.elevator_features = ELEVATOR_F_ZBD_SEQ_WRITE,
1162 	.elevator_owner = THIS_MODULE,
1163 };
1164 MODULE_ALIAS("mq-deadline-iosched");
1165 
deadline_init(void)1166 static int __init deadline_init(void)
1167 {
1168 	return elv_register(&mq_deadline);
1169 }
1170 
deadline_exit(void)1171 static void __exit deadline_exit(void)
1172 {
1173 	elv_unregister(&mq_deadline);
1174 }
1175 
1176 module_init(deadline_init);
1177 module_exit(deadline_exit);
1178 
1179 MODULE_AUTHOR("Jens Axboe, Damien Le Moal and Bart Van Assche");
1180 MODULE_LICENSE("GPL");
1181 MODULE_DESCRIPTION("MQ deadline IO scheduler");
1182