1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler,
4 * for the blk-mq scheduling framework
5 *
6 * Copyright (C) 2016 Jens Axboe <axboe@kernel.dk>
7 */
8 #include <linux/kernel.h>
9 #include <linux/fs.h>
10 #include <linux/blkdev.h>
11 #include <linux/blk-mq.h>
12 #include <linux/elevator.h>
13 #include <linux/bio.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/compiler.h>
18 #include <linux/rbtree.h>
19 #include <linux/sbitmap.h>
20
21 #include <trace/events/block.h>
22
23 #include "blk.h"
24 #include "blk-mq.h"
25 #include "blk-mq-debugfs.h"
26 #include "blk-mq-tag.h"
27 #include "blk-mq-sched.h"
28
29 /*
30 * See Documentation/block/deadline-iosched.rst
31 */
32 static const int read_expire = HZ / 2; /* max time before a read is submitted. */
33 static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
34 /*
35 * Time after which to dispatch lower priority requests even if higher
36 * priority requests are pending.
37 */
38 static const int prio_aging_expire = 10 * HZ;
39 static const int writes_starved = 2; /* max times reads can starve a write */
40 static const int fifo_batch = 16; /* # of sequential requests treated as one
41 by the above parameters. For throughput. */
42
43 enum dd_data_dir {
44 DD_READ = READ,
45 DD_WRITE = WRITE,
46 };
47
48 enum { DD_DIR_COUNT = 2 };
49
50 enum dd_prio {
51 DD_RT_PRIO = 0,
52 DD_BE_PRIO = 1,
53 DD_IDLE_PRIO = 2,
54 DD_PRIO_MAX = 2,
55 };
56
57 enum { DD_PRIO_COUNT = 3 };
58
59 /*
60 * I/O statistics per I/O priority. It is fine if these counters overflow.
61 * What matters is that these counters are at least as wide as
62 * log2(max_outstanding_requests).
63 */
64 struct io_stats_per_prio {
65 uint32_t inserted;
66 uint32_t merged;
67 uint32_t dispatched;
68 atomic_t completed;
69 };
70
71 /*
72 * Deadline scheduler data per I/O priority (enum dd_prio). Requests are
73 * present on both sort_list[] and fifo_list[].
74 */
75 struct dd_per_prio {
76 struct list_head dispatch;
77 struct rb_root sort_list[DD_DIR_COUNT];
78 struct list_head fifo_list[DD_DIR_COUNT];
79 /* Next request in FIFO order. Read, write or both are NULL. */
80 struct request *next_rq[DD_DIR_COUNT];
81 struct io_stats_per_prio stats;
82 };
83
84 struct deadline_data {
85 /*
86 * run time data
87 */
88
89 struct dd_per_prio per_prio[DD_PRIO_COUNT];
90
91 /* Data direction of latest dispatched request. */
92 enum dd_data_dir last_dir;
93 unsigned int batching; /* number of sequential requests made */
94 unsigned int starved; /* times reads have starved writes */
95
96 /*
97 * settings that change how the i/o scheduler behaves
98 */
99 int fifo_expire[DD_DIR_COUNT];
100 int fifo_batch;
101 int writes_starved;
102 int front_merges;
103 u32 async_depth;
104 int prio_aging_expire;
105
106 spinlock_t lock;
107 spinlock_t zone_lock;
108 };
109
110 /* Maps an I/O priority class to a deadline scheduler priority. */
111 static const enum dd_prio ioprio_class_to_prio[] = {
112 [IOPRIO_CLASS_NONE] = DD_BE_PRIO,
113 [IOPRIO_CLASS_RT] = DD_RT_PRIO,
114 [IOPRIO_CLASS_BE] = DD_BE_PRIO,
115 [IOPRIO_CLASS_IDLE] = DD_IDLE_PRIO,
116 };
117
118 static inline struct rb_root *
deadline_rb_root(struct dd_per_prio * per_prio,struct request * rq)119 deadline_rb_root(struct dd_per_prio *per_prio, struct request *rq)
120 {
121 return &per_prio->sort_list[rq_data_dir(rq)];
122 }
123
124 /*
125 * Returns the I/O priority class (IOPRIO_CLASS_*) that has been assigned to a
126 * request.
127 */
dd_rq_ioclass(struct request * rq)128 static u8 dd_rq_ioclass(struct request *rq)
129 {
130 return IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
131 }
132
133 /*
134 * get the request before `rq' in sector-sorted order
135 */
136 static inline struct request *
deadline_earlier_request(struct request * rq)137 deadline_earlier_request(struct request *rq)
138 {
139 struct rb_node *node = rb_prev(&rq->rb_node);
140
141 if (node)
142 return rb_entry_rq(node);
143
144 return NULL;
145 }
146
147 /*
148 * get the request after `rq' in sector-sorted order
149 */
150 static inline struct request *
deadline_latter_request(struct request * rq)151 deadline_latter_request(struct request *rq)
152 {
153 struct rb_node *node = rb_next(&rq->rb_node);
154
155 if (node)
156 return rb_entry_rq(node);
157
158 return NULL;
159 }
160
161 static void
deadline_add_rq_rb(struct dd_per_prio * per_prio,struct request * rq)162 deadline_add_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
163 {
164 struct rb_root *root = deadline_rb_root(per_prio, rq);
165
166 elv_rb_add(root, rq);
167 }
168
169 static inline void
deadline_del_rq_rb(struct dd_per_prio * per_prio,struct request * rq)170 deadline_del_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
171 {
172 const enum dd_data_dir data_dir = rq_data_dir(rq);
173
174 if (per_prio->next_rq[data_dir] == rq)
175 per_prio->next_rq[data_dir] = deadline_latter_request(rq);
176
177 elv_rb_del(deadline_rb_root(per_prio, rq), rq);
178 }
179
180 /*
181 * remove rq from rbtree and fifo.
182 */
deadline_remove_request(struct request_queue * q,struct dd_per_prio * per_prio,struct request * rq)183 static void deadline_remove_request(struct request_queue *q,
184 struct dd_per_prio *per_prio,
185 struct request *rq)
186 {
187 list_del_init(&rq->queuelist);
188
189 /*
190 * We might not be on the rbtree, if we are doing an insert merge
191 */
192 if (!RB_EMPTY_NODE(&rq->rb_node))
193 deadline_del_rq_rb(per_prio, rq);
194
195 elv_rqhash_del(q, rq);
196 if (q->last_merge == rq)
197 q->last_merge = NULL;
198 }
199
dd_request_merged(struct request_queue * q,struct request * req,enum elv_merge type)200 static void dd_request_merged(struct request_queue *q, struct request *req,
201 enum elv_merge type)
202 {
203 struct deadline_data *dd = q->elevator->elevator_data;
204 const u8 ioprio_class = dd_rq_ioclass(req);
205 const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
206 struct dd_per_prio *per_prio = &dd->per_prio[prio];
207
208 /*
209 * if the merge was a front merge, we need to reposition request
210 */
211 if (type == ELEVATOR_FRONT_MERGE) {
212 elv_rb_del(deadline_rb_root(per_prio, req), req);
213 deadline_add_rq_rb(per_prio, req);
214 }
215 }
216
217 /*
218 * Callback function that is invoked after @next has been merged into @req.
219 */
dd_merged_requests(struct request_queue * q,struct request * req,struct request * next)220 static void dd_merged_requests(struct request_queue *q, struct request *req,
221 struct request *next)
222 {
223 struct deadline_data *dd = q->elevator->elevator_data;
224 const u8 ioprio_class = dd_rq_ioclass(next);
225 const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
226
227 lockdep_assert_held(&dd->lock);
228
229 dd->per_prio[prio].stats.merged++;
230
231 /*
232 * if next expires before rq, assign its expire time to rq
233 * and move into next position (next will be deleted) in fifo
234 */
235 if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
236 if (time_before((unsigned long)next->fifo_time,
237 (unsigned long)req->fifo_time)) {
238 list_move(&req->queuelist, &next->queuelist);
239 req->fifo_time = next->fifo_time;
240 }
241 }
242
243 /*
244 * kill knowledge of next, this one is a goner
245 */
246 deadline_remove_request(q, &dd->per_prio[prio], next);
247 }
248
249 /*
250 * move an entry to dispatch queue
251 */
252 static void
deadline_move_request(struct deadline_data * dd,struct dd_per_prio * per_prio,struct request * rq)253 deadline_move_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
254 struct request *rq)
255 {
256 const enum dd_data_dir data_dir = rq_data_dir(rq);
257
258 per_prio->next_rq[data_dir] = deadline_latter_request(rq);
259
260 /*
261 * take it off the sort and fifo list
262 */
263 deadline_remove_request(rq->q, per_prio, rq);
264 }
265
266 /* Number of requests queued for a given priority level. */
dd_queued(struct deadline_data * dd,enum dd_prio prio)267 static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio)
268 {
269 const struct io_stats_per_prio *stats = &dd->per_prio[prio].stats;
270
271 lockdep_assert_held(&dd->lock);
272
273 return stats->inserted - atomic_read(&stats->completed);
274 }
275
276 /*
277 * deadline_check_fifo returns 0 if there are no expired requests on the fifo,
278 * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
279 */
deadline_check_fifo(struct dd_per_prio * per_prio,enum dd_data_dir data_dir)280 static inline int deadline_check_fifo(struct dd_per_prio *per_prio,
281 enum dd_data_dir data_dir)
282 {
283 struct request *rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
284
285 /*
286 * rq is expired!
287 */
288 if (time_after_eq(jiffies, (unsigned long)rq->fifo_time))
289 return 1;
290
291 return 0;
292 }
293
294 /*
295 * Check if rq has a sequential request preceding it.
296 */
deadline_is_seq_write(struct deadline_data * dd,struct request * rq)297 static bool deadline_is_seq_write(struct deadline_data *dd, struct request *rq)
298 {
299 struct request *prev = deadline_earlier_request(rq);
300
301 if (!prev)
302 return false;
303
304 return blk_rq_pos(prev) + blk_rq_sectors(prev) == blk_rq_pos(rq);
305 }
306
307 /*
308 * Skip all write requests that are sequential from @rq, even if we cross
309 * a zone boundary.
310 */
deadline_skip_seq_writes(struct deadline_data * dd,struct request * rq)311 static struct request *deadline_skip_seq_writes(struct deadline_data *dd,
312 struct request *rq)
313 {
314 sector_t pos = blk_rq_pos(rq);
315 sector_t skipped_sectors = 0;
316
317 while (rq) {
318 if (blk_rq_pos(rq) != pos + skipped_sectors)
319 break;
320 skipped_sectors += blk_rq_sectors(rq);
321 rq = deadline_latter_request(rq);
322 }
323
324 return rq;
325 }
326
327 /*
328 * For the specified data direction, return the next request to
329 * dispatch using arrival ordered lists.
330 */
331 static struct request *
deadline_fifo_request(struct deadline_data * dd,struct dd_per_prio * per_prio,enum dd_data_dir data_dir)332 deadline_fifo_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
333 enum dd_data_dir data_dir)
334 {
335 struct request *rq;
336 unsigned long flags;
337
338 if (list_empty(&per_prio->fifo_list[data_dir]))
339 return NULL;
340
341 rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
342 if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
343 return rq;
344
345 /*
346 * Look for a write request that can be dispatched, that is one with
347 * an unlocked target zone. For some HDDs, breaking a sequential
348 * write stream can lead to lower throughput, so make sure to preserve
349 * sequential write streams, even if that stream crosses into the next
350 * zones and these zones are unlocked.
351 */
352 spin_lock_irqsave(&dd->zone_lock, flags);
353 list_for_each_entry(rq, &per_prio->fifo_list[DD_WRITE], queuelist) {
354 if (blk_req_can_dispatch_to_zone(rq) &&
355 (blk_queue_nonrot(rq->q) ||
356 !deadline_is_seq_write(dd, rq)))
357 goto out;
358 }
359 rq = NULL;
360 out:
361 spin_unlock_irqrestore(&dd->zone_lock, flags);
362
363 return rq;
364 }
365
366 /*
367 * For the specified data direction, return the next request to
368 * dispatch using sector position sorted lists.
369 */
370 static struct request *
deadline_next_request(struct deadline_data * dd,struct dd_per_prio * per_prio,enum dd_data_dir data_dir)371 deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
372 enum dd_data_dir data_dir)
373 {
374 struct request *rq;
375 unsigned long flags;
376
377 rq = per_prio->next_rq[data_dir];
378 if (!rq)
379 return NULL;
380
381 if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
382 return rq;
383
384 /*
385 * Look for a write request that can be dispatched, that is one with
386 * an unlocked target zone. For some HDDs, breaking a sequential
387 * write stream can lead to lower throughput, so make sure to preserve
388 * sequential write streams, even if that stream crosses into the next
389 * zones and these zones are unlocked.
390 */
391 spin_lock_irqsave(&dd->zone_lock, flags);
392 while (rq) {
393 if (blk_req_can_dispatch_to_zone(rq))
394 break;
395 if (blk_queue_nonrot(rq->q))
396 rq = deadline_latter_request(rq);
397 else
398 rq = deadline_skip_seq_writes(dd, rq);
399 }
400 spin_unlock_irqrestore(&dd->zone_lock, flags);
401
402 return rq;
403 }
404
405 /*
406 * Returns true if and only if @rq started after @latest_start where
407 * @latest_start is in jiffies.
408 */
started_after(struct deadline_data * dd,struct request * rq,unsigned long latest_start)409 static bool started_after(struct deadline_data *dd, struct request *rq,
410 unsigned long latest_start)
411 {
412 unsigned long start_time = (unsigned long)rq->fifo_time;
413
414 start_time -= dd->fifo_expire[rq_data_dir(rq)];
415
416 return time_after(start_time, latest_start);
417 }
418
419 /*
420 * deadline_dispatch_requests selects the best request according to
421 * read/write expire, fifo_batch, etc and with a start time <= @latest_start.
422 */
__dd_dispatch_request(struct deadline_data * dd,struct dd_per_prio * per_prio,unsigned long latest_start)423 static struct request *__dd_dispatch_request(struct deadline_data *dd,
424 struct dd_per_prio *per_prio,
425 unsigned long latest_start)
426 {
427 struct request *rq, *next_rq;
428 enum dd_data_dir data_dir;
429 enum dd_prio prio;
430 u8 ioprio_class;
431
432 lockdep_assert_held(&dd->lock);
433
434 if (!list_empty(&per_prio->dispatch)) {
435 rq = list_first_entry(&per_prio->dispatch, struct request,
436 queuelist);
437 if (started_after(dd, rq, latest_start))
438 return NULL;
439 list_del_init(&rq->queuelist);
440 goto done;
441 }
442
443 /*
444 * batches are currently reads XOR writes
445 */
446 rq = deadline_next_request(dd, per_prio, dd->last_dir);
447 if (rq && dd->batching < dd->fifo_batch)
448 /* we have a next request are still entitled to batch */
449 goto dispatch_request;
450
451 /*
452 * at this point we are not running a batch. select the appropriate
453 * data direction (read / write)
454 */
455
456 if (!list_empty(&per_prio->fifo_list[DD_READ])) {
457 BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_READ]));
458
459 if (deadline_fifo_request(dd, per_prio, DD_WRITE) &&
460 (dd->starved++ >= dd->writes_starved))
461 goto dispatch_writes;
462
463 data_dir = DD_READ;
464
465 goto dispatch_find_request;
466 }
467
468 /*
469 * there are either no reads or writes have been starved
470 */
471
472 if (!list_empty(&per_prio->fifo_list[DD_WRITE])) {
473 dispatch_writes:
474 BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_WRITE]));
475
476 dd->starved = 0;
477
478 data_dir = DD_WRITE;
479
480 goto dispatch_find_request;
481 }
482
483 return NULL;
484
485 dispatch_find_request:
486 /*
487 * we are not running a batch, find best request for selected data_dir
488 */
489 next_rq = deadline_next_request(dd, per_prio, data_dir);
490 if (deadline_check_fifo(per_prio, data_dir) || !next_rq) {
491 /*
492 * A deadline has expired, the last request was in the other
493 * direction, or we have run out of higher-sectored requests.
494 * Start again from the request with the earliest expiry time.
495 */
496 rq = deadline_fifo_request(dd, per_prio, data_dir);
497 } else {
498 /*
499 * The last req was the same dir and we have a next request in
500 * sort order. No expired requests so continue on from here.
501 */
502 rq = next_rq;
503 }
504
505 /*
506 * For a zoned block device, if we only have writes queued and none of
507 * them can be dispatched, rq will be NULL.
508 */
509 if (!rq)
510 return NULL;
511
512 dd->last_dir = data_dir;
513 dd->batching = 0;
514
515 dispatch_request:
516 if (started_after(dd, rq, latest_start))
517 return NULL;
518
519 /*
520 * rq is the selected appropriate request.
521 */
522 dd->batching++;
523 deadline_move_request(dd, per_prio, rq);
524 done:
525 ioprio_class = dd_rq_ioclass(rq);
526 prio = ioprio_class_to_prio[ioprio_class];
527 dd->per_prio[prio].stats.dispatched++;
528 /*
529 * If the request needs its target zone locked, do it.
530 */
531 blk_req_zone_write_lock(rq);
532 rq->rq_flags |= RQF_STARTED;
533 return rq;
534 }
535
536 /*
537 * Check whether there are any requests with priority other than DD_RT_PRIO
538 * that were inserted more than prio_aging_expire jiffies ago.
539 */
dd_dispatch_prio_aged_requests(struct deadline_data * dd,unsigned long now)540 static struct request *dd_dispatch_prio_aged_requests(struct deadline_data *dd,
541 unsigned long now)
542 {
543 struct request *rq;
544 enum dd_prio prio;
545 int prio_cnt;
546
547 lockdep_assert_held(&dd->lock);
548
549 prio_cnt = !!dd_queued(dd, DD_RT_PRIO) + !!dd_queued(dd, DD_BE_PRIO) +
550 !!dd_queued(dd, DD_IDLE_PRIO);
551 if (prio_cnt < 2)
552 return NULL;
553
554 for (prio = DD_BE_PRIO; prio <= DD_PRIO_MAX; prio++) {
555 rq = __dd_dispatch_request(dd, &dd->per_prio[prio],
556 now - dd->prio_aging_expire);
557 if (rq)
558 return rq;
559 }
560
561 return NULL;
562 }
563
564 /*
565 * Called from blk_mq_run_hw_queue() -> __blk_mq_sched_dispatch_requests().
566 *
567 * One confusing aspect here is that we get called for a specific
568 * hardware queue, but we may return a request that is for a
569 * different hardware queue. This is because mq-deadline has shared
570 * state for all hardware queues, in terms of sorting, FIFOs, etc.
571 */
dd_dispatch_request(struct blk_mq_hw_ctx * hctx)572 static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
573 {
574 struct deadline_data *dd = hctx->queue->elevator->elevator_data;
575 const unsigned long now = jiffies;
576 struct request *rq;
577 enum dd_prio prio;
578
579 spin_lock(&dd->lock);
580 rq = dd_dispatch_prio_aged_requests(dd, now);
581 if (rq)
582 goto unlock;
583
584 /*
585 * Next, dispatch requests in priority order. Ignore lower priority
586 * requests if any higher priority requests are pending.
587 */
588 for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
589 rq = __dd_dispatch_request(dd, &dd->per_prio[prio], now);
590 if (rq || dd_queued(dd, prio))
591 break;
592 }
593
594 unlock:
595 spin_unlock(&dd->lock);
596
597 return rq;
598 }
599
600 /*
601 * Called by __blk_mq_alloc_request(). The shallow_depth value set by this
602 * function is used by __blk_mq_get_tag().
603 */
dd_limit_depth(unsigned int op,struct blk_mq_alloc_data * data)604 static void dd_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
605 {
606 struct deadline_data *dd = data->q->elevator->elevator_data;
607
608 /* Do not throttle synchronous reads. */
609 if (op_is_sync(op) && !op_is_write(op))
610 return;
611
612 /*
613 * Throttle asynchronous requests and writes such that these requests
614 * do not block the allocation of synchronous requests.
615 */
616 data->shallow_depth = dd->async_depth;
617 }
618
619 /* Called by blk_mq_update_nr_requests(). */
dd_depth_updated(struct blk_mq_hw_ctx * hctx)620 static void dd_depth_updated(struct blk_mq_hw_ctx *hctx)
621 {
622 struct request_queue *q = hctx->queue;
623 struct deadline_data *dd = q->elevator->elevator_data;
624 struct blk_mq_tags *tags = hctx->sched_tags;
625 unsigned int shift = tags->bitmap_tags->sb.shift;
626
627 dd->async_depth = max(1U, 3 * (1U << shift) / 4);
628
629 sbitmap_queue_min_shallow_depth(tags->bitmap_tags, dd->async_depth);
630 }
631
632 /* Called by blk_mq_init_hctx() and blk_mq_init_sched(). */
dd_init_hctx(struct blk_mq_hw_ctx * hctx,unsigned int hctx_idx)633 static int dd_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
634 {
635 dd_depth_updated(hctx);
636 return 0;
637 }
638
dd_exit_sched(struct elevator_queue * e)639 static void dd_exit_sched(struct elevator_queue *e)
640 {
641 struct deadline_data *dd = e->elevator_data;
642 enum dd_prio prio;
643
644 for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
645 struct dd_per_prio *per_prio = &dd->per_prio[prio];
646 const struct io_stats_per_prio *stats = &per_prio->stats;
647 uint32_t queued;
648
649 WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_READ]));
650 WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_WRITE]));
651
652 spin_lock(&dd->lock);
653 queued = dd_queued(dd, prio);
654 spin_unlock(&dd->lock);
655
656 WARN_ONCE(queued != 0,
657 "statistics for priority %d: i %u m %u d %u c %u\n",
658 prio, stats->inserted, stats->merged,
659 stats->dispatched, atomic_read(&stats->completed));
660 }
661
662 kfree(dd);
663 }
664
665 /*
666 * initialize elevator private data (deadline_data).
667 */
dd_init_sched(struct request_queue * q,struct elevator_type * e)668 static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
669 {
670 struct deadline_data *dd;
671 struct elevator_queue *eq;
672 enum dd_prio prio;
673 int ret = -ENOMEM;
674
675 eq = elevator_alloc(q, e);
676 if (!eq)
677 return ret;
678
679 dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
680 if (!dd)
681 goto put_eq;
682
683 eq->elevator_data = dd;
684
685 for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
686 struct dd_per_prio *per_prio = &dd->per_prio[prio];
687
688 INIT_LIST_HEAD(&per_prio->dispatch);
689 INIT_LIST_HEAD(&per_prio->fifo_list[DD_READ]);
690 INIT_LIST_HEAD(&per_prio->fifo_list[DD_WRITE]);
691 per_prio->sort_list[DD_READ] = RB_ROOT;
692 per_prio->sort_list[DD_WRITE] = RB_ROOT;
693 }
694 dd->fifo_expire[DD_READ] = read_expire;
695 dd->fifo_expire[DD_WRITE] = write_expire;
696 dd->writes_starved = writes_starved;
697 dd->front_merges = 1;
698 dd->last_dir = DD_WRITE;
699 dd->fifo_batch = fifo_batch;
700 dd->prio_aging_expire = prio_aging_expire;
701 spin_lock_init(&dd->lock);
702 spin_lock_init(&dd->zone_lock);
703
704 q->elevator = eq;
705 return 0;
706
707 put_eq:
708 kobject_put(&eq->kobj);
709 return ret;
710 }
711
712 /*
713 * Try to merge @bio into an existing request. If @bio has been merged into
714 * an existing request, store the pointer to that request into *@rq.
715 */
dd_request_merge(struct request_queue * q,struct request ** rq,struct bio * bio)716 static int dd_request_merge(struct request_queue *q, struct request **rq,
717 struct bio *bio)
718 {
719 struct deadline_data *dd = q->elevator->elevator_data;
720 const u8 ioprio_class = IOPRIO_PRIO_CLASS(bio->bi_ioprio);
721 const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
722 struct dd_per_prio *per_prio = &dd->per_prio[prio];
723 sector_t sector = bio_end_sector(bio);
724 struct request *__rq;
725
726 if (!dd->front_merges)
727 return ELEVATOR_NO_MERGE;
728
729 __rq = elv_rb_find(&per_prio->sort_list[bio_data_dir(bio)], sector);
730 if (__rq) {
731 BUG_ON(sector != blk_rq_pos(__rq));
732
733 if (elv_bio_merge_ok(__rq, bio)) {
734 *rq = __rq;
735 if (blk_discard_mergable(__rq))
736 return ELEVATOR_DISCARD_MERGE;
737 return ELEVATOR_FRONT_MERGE;
738 }
739 }
740
741 return ELEVATOR_NO_MERGE;
742 }
743
744 /*
745 * Attempt to merge a bio into an existing request. This function is called
746 * before @bio is associated with a request.
747 */
dd_bio_merge(struct request_queue * q,struct bio * bio,unsigned int nr_segs)748 static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
749 unsigned int nr_segs)
750 {
751 struct deadline_data *dd = q->elevator->elevator_data;
752 struct request *free = NULL;
753 bool ret;
754
755 spin_lock(&dd->lock);
756 ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
757 spin_unlock(&dd->lock);
758
759 if (free)
760 blk_mq_free_request(free);
761
762 return ret;
763 }
764
765 /*
766 * add rq to rbtree and fifo
767 */
dd_insert_request(struct blk_mq_hw_ctx * hctx,struct request * rq,bool at_head)768 static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
769 bool at_head)
770 {
771 struct request_queue *q = hctx->queue;
772 struct deadline_data *dd = q->elevator->elevator_data;
773 const enum dd_data_dir data_dir = rq_data_dir(rq);
774 u16 ioprio = req_get_ioprio(rq);
775 u8 ioprio_class = IOPRIO_PRIO_CLASS(ioprio);
776 struct dd_per_prio *per_prio;
777 enum dd_prio prio;
778 LIST_HEAD(free);
779
780 lockdep_assert_held(&dd->lock);
781
782 /*
783 * This may be a requeue of a write request that has locked its
784 * target zone. If it is the case, this releases the zone lock.
785 */
786 blk_req_zone_write_unlock(rq);
787
788 prio = ioprio_class_to_prio[ioprio_class];
789 per_prio = &dd->per_prio[prio];
790 if (!rq->elv.priv[0]) {
791 per_prio->stats.inserted++;
792 rq->elv.priv[0] = (void *)(uintptr_t)1;
793 }
794
795 if (blk_mq_sched_try_insert_merge(q, rq, &free)) {
796 blk_mq_free_requests(&free);
797 return;
798 }
799
800 trace_block_rq_insert(rq);
801
802 if (at_head) {
803 list_add(&rq->queuelist, &per_prio->dispatch);
804 rq->fifo_time = jiffies;
805 } else {
806 deadline_add_rq_rb(per_prio, rq);
807
808 if (rq_mergeable(rq)) {
809 elv_rqhash_add(q, rq);
810 if (!q->last_merge)
811 q->last_merge = rq;
812 }
813
814 /*
815 * set expire time and add to fifo list
816 */
817 rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
818 list_add_tail(&rq->queuelist, &per_prio->fifo_list[data_dir]);
819 }
820 }
821
822 /*
823 * Called from blk_mq_sched_insert_request() or blk_mq_sched_insert_requests().
824 */
dd_insert_requests(struct blk_mq_hw_ctx * hctx,struct list_head * list,bool at_head)825 static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
826 struct list_head *list, bool at_head)
827 {
828 struct request_queue *q = hctx->queue;
829 struct deadline_data *dd = q->elevator->elevator_data;
830
831 spin_lock(&dd->lock);
832 while (!list_empty(list)) {
833 struct request *rq;
834
835 rq = list_first_entry(list, struct request, queuelist);
836 list_del_init(&rq->queuelist);
837 dd_insert_request(hctx, rq, at_head);
838 }
839 spin_unlock(&dd->lock);
840 }
841
842 /* Callback from inside blk_mq_rq_ctx_init(). */
dd_prepare_request(struct request * rq)843 static void dd_prepare_request(struct request *rq)
844 {
845 rq->elv.priv[0] = NULL;
846 }
847
dd_has_write_work(struct blk_mq_hw_ctx * hctx)848 static bool dd_has_write_work(struct blk_mq_hw_ctx *hctx)
849 {
850 struct deadline_data *dd = hctx->queue->elevator->elevator_data;
851 enum dd_prio p;
852
853 for (p = 0; p <= DD_PRIO_MAX; p++)
854 if (!list_empty_careful(&dd->per_prio[p].fifo_list[DD_WRITE]))
855 return true;
856
857 return false;
858 }
859
860 /*
861 * Callback from inside blk_mq_free_request().
862 *
863 * For zoned block devices, write unlock the target zone of
864 * completed write requests. Do this while holding the zone lock
865 * spinlock so that the zone is never unlocked while deadline_fifo_request()
866 * or deadline_next_request() are executing. This function is called for
867 * all requests, whether or not these requests complete successfully.
868 *
869 * For a zoned block device, __dd_dispatch_request() may have stopped
870 * dispatching requests if all the queued requests are write requests directed
871 * at zones that are already locked due to on-going write requests. To ensure
872 * write request dispatch progress in this case, mark the queue as needing a
873 * restart to ensure that the queue is run again after completion of the
874 * request and zones being unlocked.
875 */
dd_finish_request(struct request * rq)876 static void dd_finish_request(struct request *rq)
877 {
878 struct request_queue *q = rq->q;
879 struct deadline_data *dd = q->elevator->elevator_data;
880 const u8 ioprio_class = dd_rq_ioclass(rq);
881 const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
882 struct dd_per_prio *per_prio = &dd->per_prio[prio];
883
884 /*
885 * The block layer core may call dd_finish_request() without having
886 * called dd_insert_requests(). Skip requests that bypassed I/O
887 * scheduling. See also blk_mq_request_bypass_insert().
888 */
889 if (!rq->elv.priv[0])
890 return;
891
892 atomic_inc(&per_prio->stats.completed);
893
894 if (blk_queue_is_zoned(q)) {
895 unsigned long flags;
896
897 spin_lock_irqsave(&dd->zone_lock, flags);
898 blk_req_zone_write_unlock(rq);
899 spin_unlock_irqrestore(&dd->zone_lock, flags);
900
901 if (dd_has_write_work(rq->mq_hctx))
902 blk_mq_sched_mark_restart_hctx(rq->mq_hctx);
903 }
904 }
905
dd_has_work_for_prio(struct dd_per_prio * per_prio)906 static bool dd_has_work_for_prio(struct dd_per_prio *per_prio)
907 {
908 return !list_empty_careful(&per_prio->dispatch) ||
909 !list_empty_careful(&per_prio->fifo_list[DD_READ]) ||
910 !list_empty_careful(&per_prio->fifo_list[DD_WRITE]);
911 }
912
dd_has_work(struct blk_mq_hw_ctx * hctx)913 static bool dd_has_work(struct blk_mq_hw_ctx *hctx)
914 {
915 struct deadline_data *dd = hctx->queue->elevator->elevator_data;
916 enum dd_prio prio;
917
918 for (prio = 0; prio <= DD_PRIO_MAX; prio++)
919 if (dd_has_work_for_prio(&dd->per_prio[prio]))
920 return true;
921
922 return false;
923 }
924
925 /*
926 * sysfs parts below
927 */
928 #define SHOW_INT(__FUNC, __VAR) \
929 static ssize_t __FUNC(struct elevator_queue *e, char *page) \
930 { \
931 struct deadline_data *dd = e->elevator_data; \
932 \
933 return sysfs_emit(page, "%d\n", __VAR); \
934 }
935 #define SHOW_JIFFIES(__FUNC, __VAR) SHOW_INT(__FUNC, jiffies_to_msecs(__VAR))
936 SHOW_JIFFIES(deadline_read_expire_show, dd->fifo_expire[DD_READ]);
937 SHOW_JIFFIES(deadline_write_expire_show, dd->fifo_expire[DD_WRITE]);
938 SHOW_JIFFIES(deadline_prio_aging_expire_show, dd->prio_aging_expire);
939 SHOW_INT(deadline_writes_starved_show, dd->writes_starved);
940 SHOW_INT(deadline_front_merges_show, dd->front_merges);
941 SHOW_INT(deadline_async_depth_show, dd->async_depth);
942 SHOW_INT(deadline_fifo_batch_show, dd->fifo_batch);
943 #undef SHOW_INT
944 #undef SHOW_JIFFIES
945
946 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
947 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
948 { \
949 struct deadline_data *dd = e->elevator_data; \
950 int __data, __ret; \
951 \
952 __ret = kstrtoint(page, 0, &__data); \
953 if (__ret < 0) \
954 return __ret; \
955 if (__data < (MIN)) \
956 __data = (MIN); \
957 else if (__data > (MAX)) \
958 __data = (MAX); \
959 *(__PTR) = __CONV(__data); \
960 return count; \
961 }
962 #define STORE_INT(__FUNC, __PTR, MIN, MAX) \
963 STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, )
964 #define STORE_JIFFIES(__FUNC, __PTR, MIN, MAX) \
965 STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, msecs_to_jiffies)
966 STORE_JIFFIES(deadline_read_expire_store, &dd->fifo_expire[DD_READ], 0, INT_MAX);
967 STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MAX);
968 STORE_JIFFIES(deadline_prio_aging_expire_store, &dd->prio_aging_expire, 0, INT_MAX);
969 STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX);
970 STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1);
971 STORE_INT(deadline_async_depth_store, &dd->async_depth, 1, INT_MAX);
972 STORE_INT(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX);
973 #undef STORE_FUNCTION
974 #undef STORE_INT
975 #undef STORE_JIFFIES
976
977 #define DD_ATTR(name) \
978 __ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store)
979
980 static struct elv_fs_entry deadline_attrs[] = {
981 DD_ATTR(read_expire),
982 DD_ATTR(write_expire),
983 DD_ATTR(writes_starved),
984 DD_ATTR(front_merges),
985 DD_ATTR(async_depth),
986 DD_ATTR(fifo_batch),
987 DD_ATTR(prio_aging_expire),
988 __ATTR_NULL
989 };
990
991 #ifdef CONFIG_BLK_DEBUG_FS
992 #define DEADLINE_DEBUGFS_DDIR_ATTRS(prio, data_dir, name) \
993 static void *deadline_##name##_fifo_start(struct seq_file *m, \
994 loff_t *pos) \
995 __acquires(&dd->lock) \
996 { \
997 struct request_queue *q = m->private; \
998 struct deadline_data *dd = q->elevator->elevator_data; \
999 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
1000 \
1001 spin_lock(&dd->lock); \
1002 return seq_list_start(&per_prio->fifo_list[data_dir], *pos); \
1003 } \
1004 \
1005 static void *deadline_##name##_fifo_next(struct seq_file *m, void *v, \
1006 loff_t *pos) \
1007 { \
1008 struct request_queue *q = m->private; \
1009 struct deadline_data *dd = q->elevator->elevator_data; \
1010 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
1011 \
1012 return seq_list_next(v, &per_prio->fifo_list[data_dir], pos); \
1013 } \
1014 \
1015 static void deadline_##name##_fifo_stop(struct seq_file *m, void *v) \
1016 __releases(&dd->lock) \
1017 { \
1018 struct request_queue *q = m->private; \
1019 struct deadline_data *dd = q->elevator->elevator_data; \
1020 \
1021 spin_unlock(&dd->lock); \
1022 } \
1023 \
1024 static const struct seq_operations deadline_##name##_fifo_seq_ops = { \
1025 .start = deadline_##name##_fifo_start, \
1026 .next = deadline_##name##_fifo_next, \
1027 .stop = deadline_##name##_fifo_stop, \
1028 .show = blk_mq_debugfs_rq_show, \
1029 }; \
1030 \
1031 static int deadline_##name##_next_rq_show(void *data, \
1032 struct seq_file *m) \
1033 { \
1034 struct request_queue *q = data; \
1035 struct deadline_data *dd = q->elevator->elevator_data; \
1036 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
1037 struct request *rq = per_prio->next_rq[data_dir]; \
1038 \
1039 if (rq) \
1040 __blk_mq_debugfs_rq_show(m, rq); \
1041 return 0; \
1042 }
1043
1044 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_READ, read0);
1045 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_WRITE, write0);
1046 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_READ, read1);
1047 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_WRITE, write1);
1048 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_READ, read2);
1049 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_WRITE, write2);
1050 #undef DEADLINE_DEBUGFS_DDIR_ATTRS
1051
deadline_batching_show(void * data,struct seq_file * m)1052 static int deadline_batching_show(void *data, struct seq_file *m)
1053 {
1054 struct request_queue *q = data;
1055 struct deadline_data *dd = q->elevator->elevator_data;
1056
1057 seq_printf(m, "%u\n", dd->batching);
1058 return 0;
1059 }
1060
deadline_starved_show(void * data,struct seq_file * m)1061 static int deadline_starved_show(void *data, struct seq_file *m)
1062 {
1063 struct request_queue *q = data;
1064 struct deadline_data *dd = q->elevator->elevator_data;
1065
1066 seq_printf(m, "%u\n", dd->starved);
1067 return 0;
1068 }
1069
dd_async_depth_show(void * data,struct seq_file * m)1070 static int dd_async_depth_show(void *data, struct seq_file *m)
1071 {
1072 struct request_queue *q = data;
1073 struct deadline_data *dd = q->elevator->elevator_data;
1074
1075 seq_printf(m, "%u\n", dd->async_depth);
1076 return 0;
1077 }
1078
dd_queued_show(void * data,struct seq_file * m)1079 static int dd_queued_show(void *data, struct seq_file *m)
1080 {
1081 struct request_queue *q = data;
1082 struct deadline_data *dd = q->elevator->elevator_data;
1083 u32 rt, be, idle;
1084
1085 spin_lock(&dd->lock);
1086 rt = dd_queued(dd, DD_RT_PRIO);
1087 be = dd_queued(dd, DD_BE_PRIO);
1088 idle = dd_queued(dd, DD_IDLE_PRIO);
1089 spin_unlock(&dd->lock);
1090
1091 seq_printf(m, "%u %u %u\n", rt, be, idle);
1092
1093 return 0;
1094 }
1095
1096 /* Number of requests owned by the block driver for a given priority. */
dd_owned_by_driver(struct deadline_data * dd,enum dd_prio prio)1097 static u32 dd_owned_by_driver(struct deadline_data *dd, enum dd_prio prio)
1098 {
1099 const struct io_stats_per_prio *stats = &dd->per_prio[prio].stats;
1100
1101 lockdep_assert_held(&dd->lock);
1102
1103 return stats->dispatched + stats->merged -
1104 atomic_read(&stats->completed);
1105 }
1106
dd_owned_by_driver_show(void * data,struct seq_file * m)1107 static int dd_owned_by_driver_show(void *data, struct seq_file *m)
1108 {
1109 struct request_queue *q = data;
1110 struct deadline_data *dd = q->elevator->elevator_data;
1111 u32 rt, be, idle;
1112
1113 spin_lock(&dd->lock);
1114 rt = dd_owned_by_driver(dd, DD_RT_PRIO);
1115 be = dd_owned_by_driver(dd, DD_BE_PRIO);
1116 idle = dd_owned_by_driver(dd, DD_IDLE_PRIO);
1117 spin_unlock(&dd->lock);
1118
1119 seq_printf(m, "%u %u %u\n", rt, be, idle);
1120
1121 return 0;
1122 }
1123
1124 #define DEADLINE_DISPATCH_ATTR(prio) \
1125 static void *deadline_dispatch##prio##_start(struct seq_file *m, \
1126 loff_t *pos) \
1127 __acquires(&dd->lock) \
1128 { \
1129 struct request_queue *q = m->private; \
1130 struct deadline_data *dd = q->elevator->elevator_data; \
1131 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
1132 \
1133 spin_lock(&dd->lock); \
1134 return seq_list_start(&per_prio->dispatch, *pos); \
1135 } \
1136 \
1137 static void *deadline_dispatch##prio##_next(struct seq_file *m, \
1138 void *v, loff_t *pos) \
1139 { \
1140 struct request_queue *q = m->private; \
1141 struct deadline_data *dd = q->elevator->elevator_data; \
1142 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
1143 \
1144 return seq_list_next(v, &per_prio->dispatch, pos); \
1145 } \
1146 \
1147 static void deadline_dispatch##prio##_stop(struct seq_file *m, void *v) \
1148 __releases(&dd->lock) \
1149 { \
1150 struct request_queue *q = m->private; \
1151 struct deadline_data *dd = q->elevator->elevator_data; \
1152 \
1153 spin_unlock(&dd->lock); \
1154 } \
1155 \
1156 static const struct seq_operations deadline_dispatch##prio##_seq_ops = { \
1157 .start = deadline_dispatch##prio##_start, \
1158 .next = deadline_dispatch##prio##_next, \
1159 .stop = deadline_dispatch##prio##_stop, \
1160 .show = blk_mq_debugfs_rq_show, \
1161 }
1162
1163 DEADLINE_DISPATCH_ATTR(0);
1164 DEADLINE_DISPATCH_ATTR(1);
1165 DEADLINE_DISPATCH_ATTR(2);
1166 #undef DEADLINE_DISPATCH_ATTR
1167
1168 #define DEADLINE_QUEUE_DDIR_ATTRS(name) \
1169 {#name "_fifo_list", 0400, \
1170 .seq_ops = &deadline_##name##_fifo_seq_ops}
1171 #define DEADLINE_NEXT_RQ_ATTR(name) \
1172 {#name "_next_rq", 0400, deadline_##name##_next_rq_show}
1173 static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = {
1174 DEADLINE_QUEUE_DDIR_ATTRS(read0),
1175 DEADLINE_QUEUE_DDIR_ATTRS(write0),
1176 DEADLINE_QUEUE_DDIR_ATTRS(read1),
1177 DEADLINE_QUEUE_DDIR_ATTRS(write1),
1178 DEADLINE_QUEUE_DDIR_ATTRS(read2),
1179 DEADLINE_QUEUE_DDIR_ATTRS(write2),
1180 DEADLINE_NEXT_RQ_ATTR(read0),
1181 DEADLINE_NEXT_RQ_ATTR(write0),
1182 DEADLINE_NEXT_RQ_ATTR(read1),
1183 DEADLINE_NEXT_RQ_ATTR(write1),
1184 DEADLINE_NEXT_RQ_ATTR(read2),
1185 DEADLINE_NEXT_RQ_ATTR(write2),
1186 {"batching", 0400, deadline_batching_show},
1187 {"starved", 0400, deadline_starved_show},
1188 {"async_depth", 0400, dd_async_depth_show},
1189 {"dispatch0", 0400, .seq_ops = &deadline_dispatch0_seq_ops},
1190 {"dispatch1", 0400, .seq_ops = &deadline_dispatch1_seq_ops},
1191 {"dispatch2", 0400, .seq_ops = &deadline_dispatch2_seq_ops},
1192 {"owned_by_driver", 0400, dd_owned_by_driver_show},
1193 {"queued", 0400, dd_queued_show},
1194 {},
1195 };
1196 #undef DEADLINE_QUEUE_DDIR_ATTRS
1197 #endif
1198
1199 static struct elevator_type mq_deadline = {
1200 .ops = {
1201 .depth_updated = dd_depth_updated,
1202 .limit_depth = dd_limit_depth,
1203 .insert_requests = dd_insert_requests,
1204 .dispatch_request = dd_dispatch_request,
1205 .prepare_request = dd_prepare_request,
1206 .finish_request = dd_finish_request,
1207 .next_request = elv_rb_latter_request,
1208 .former_request = elv_rb_former_request,
1209 .bio_merge = dd_bio_merge,
1210 .request_merge = dd_request_merge,
1211 .requests_merged = dd_merged_requests,
1212 .request_merged = dd_request_merged,
1213 .has_work = dd_has_work,
1214 .init_sched = dd_init_sched,
1215 .exit_sched = dd_exit_sched,
1216 .init_hctx = dd_init_hctx,
1217 },
1218
1219 #ifdef CONFIG_BLK_DEBUG_FS
1220 .queue_debugfs_attrs = deadline_queue_debugfs_attrs,
1221 #endif
1222 .elevator_attrs = deadline_attrs,
1223 .elevator_name = "mq-deadline",
1224 .elevator_alias = "deadline",
1225 .elevator_features = ELEVATOR_F_ZBD_SEQ_WRITE,
1226 .elevator_owner = THIS_MODULE,
1227 };
1228 MODULE_ALIAS("mq-deadline-iosched");
1229
deadline_init(void)1230 static int __init deadline_init(void)
1231 {
1232 return elv_register(&mq_deadline);
1233 }
1234
deadline_exit(void)1235 static void __exit deadline_exit(void)
1236 {
1237 elv_unregister(&mq_deadline);
1238 }
1239
1240 module_init(deadline_init);
1241 module_exit(deadline_exit);
1242
1243 MODULE_AUTHOR("Jens Axboe, Damien Le Moal and Bart Van Assche");
1244 MODULE_LICENSE("GPL");
1245 MODULE_DESCRIPTION("MQ deadline IO scheduler");
1246