1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler,
4 * for the blk-mq scheduling framework
5 *
6 * Copyright (C) 2016 Jens Axboe <axboe@kernel.dk>
7 */
8 #include <linux/kernel.h>
9 #include <linux/fs.h>
10 #include <linux/blkdev.h>
11 #include <linux/blk-mq.h>
12 #include <linux/elevator.h>
13 #include <linux/bio.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/compiler.h>
18 #include <linux/rbtree.h>
19 #include <linux/sbitmap.h>
20
21 #include <trace/events/block.h>
22
23 #include "blk.h"
24 #include "blk-mq.h"
25 #include "blk-mq-debugfs.h"
26 #include "blk-mq-tag.h"
27 #include "blk-mq-sched.h"
28
29 /*
30 * See Documentation/block/deadline-iosched.rst
31 */
32 static const int read_expire = HZ / 2; /* max time before a read is submitted. */
33 static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
34 /*
35 * Time after which to dispatch lower priority requests even if higher
36 * priority requests are pending.
37 */
38 static const int prio_aging_expire = 10 * HZ;
39 static const int writes_starved = 2; /* max times reads can starve a write */
40 static const int fifo_batch = 16; /* # of sequential requests treated as one
41 by the above parameters. For throughput. */
42
43 enum dd_data_dir {
44 DD_READ = READ,
45 DD_WRITE = WRITE,
46 };
47
48 enum { DD_DIR_COUNT = 2 };
49
50 enum dd_prio {
51 DD_RT_PRIO = 0,
52 DD_BE_PRIO = 1,
53 DD_IDLE_PRIO = 2,
54 DD_PRIO_MAX = 2,
55 };
56
57 enum { DD_PRIO_COUNT = 3 };
58
59 /*
60 * I/O statistics per I/O priority. It is fine if these counters overflow.
61 * What matters is that these counters are at least as wide as
62 * log2(max_outstanding_requests).
63 */
64 struct io_stats_per_prio {
65 uint32_t inserted;
66 uint32_t merged;
67 uint32_t dispatched;
68 atomic_t completed;
69 };
70
71 /*
72 * Deadline scheduler data per I/O priority (enum dd_prio). Requests are
73 * present on both sort_list[] and fifo_list[].
74 */
75 struct dd_per_prio {
76 struct list_head dispatch;
77 struct rb_root sort_list[DD_DIR_COUNT];
78 struct list_head fifo_list[DD_DIR_COUNT];
79 /* Position of the most recently dispatched request. */
80 sector_t latest_pos[DD_DIR_COUNT];
81 struct io_stats_per_prio stats;
82 };
83
84 struct deadline_data {
85 /*
86 * run time data
87 */
88
89 struct dd_per_prio per_prio[DD_PRIO_COUNT];
90
91 /* Data direction of latest dispatched request. */
92 enum dd_data_dir last_dir;
93 unsigned int batching; /* number of sequential requests made */
94 unsigned int starved; /* times reads have starved writes */
95
96 /*
97 * settings that change how the i/o scheduler behaves
98 */
99 int fifo_expire[DD_DIR_COUNT];
100 int fifo_batch;
101 int writes_starved;
102 int front_merges;
103 u32 async_depth;
104 int prio_aging_expire;
105
106 spinlock_t lock;
107 spinlock_t zone_lock;
108 };
109
110 /* Maps an I/O priority class to a deadline scheduler priority. */
111 static const enum dd_prio ioprio_class_to_prio[] = {
112 [IOPRIO_CLASS_NONE] = DD_BE_PRIO,
113 [IOPRIO_CLASS_RT] = DD_RT_PRIO,
114 [IOPRIO_CLASS_BE] = DD_BE_PRIO,
115 [IOPRIO_CLASS_IDLE] = DD_IDLE_PRIO,
116 };
117
118 static inline struct rb_root *
deadline_rb_root(struct dd_per_prio * per_prio,struct request * rq)119 deadline_rb_root(struct dd_per_prio *per_prio, struct request *rq)
120 {
121 return &per_prio->sort_list[rq_data_dir(rq)];
122 }
123
124 /*
125 * Returns the I/O priority class (IOPRIO_CLASS_*) that has been assigned to a
126 * request.
127 */
dd_rq_ioclass(struct request * rq)128 static u8 dd_rq_ioclass(struct request *rq)
129 {
130 return IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
131 }
132
133 /*
134 * get the request before `rq' in sector-sorted order
135 */
136 static inline struct request *
deadline_earlier_request(struct request * rq)137 deadline_earlier_request(struct request *rq)
138 {
139 struct rb_node *node = rb_prev(&rq->rb_node);
140
141 if (node)
142 return rb_entry_rq(node);
143
144 return NULL;
145 }
146
147 /*
148 * get the request after `rq' in sector-sorted order
149 */
150 static inline struct request *
deadline_latter_request(struct request * rq)151 deadline_latter_request(struct request *rq)
152 {
153 struct rb_node *node = rb_next(&rq->rb_node);
154
155 if (node)
156 return rb_entry_rq(node);
157
158 return NULL;
159 }
160
161 /*
162 * Return the first request for which blk_rq_pos() >= @pos. For zoned devices,
163 * return the first request after the start of the zone containing @pos.
164 */
deadline_from_pos(struct dd_per_prio * per_prio,enum dd_data_dir data_dir,sector_t pos)165 static inline struct request *deadline_from_pos(struct dd_per_prio *per_prio,
166 enum dd_data_dir data_dir, sector_t pos)
167 {
168 struct rb_node *node = per_prio->sort_list[data_dir].rb_node;
169 struct request *rq, *res = NULL;
170
171 if (!node)
172 return NULL;
173
174 rq = rb_entry_rq(node);
175 /*
176 * A zoned write may have been requeued with a starting position that
177 * is below that of the most recently dispatched request. Hence, for
178 * zoned writes, start searching from the start of a zone.
179 */
180 if (blk_rq_is_seq_zoned_write(rq))
181 pos -= bdev_offset_from_zone_start(rq->q->disk->part0, pos);
182
183 while (node) {
184 rq = rb_entry_rq(node);
185 if (blk_rq_pos(rq) >= pos) {
186 res = rq;
187 node = node->rb_left;
188 } else {
189 node = node->rb_right;
190 }
191 }
192 return res;
193 }
194
195 static void
deadline_add_rq_rb(struct dd_per_prio * per_prio,struct request * rq)196 deadline_add_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
197 {
198 struct rb_root *root = deadline_rb_root(per_prio, rq);
199
200 elv_rb_add(root, rq);
201 }
202
203 static inline void
deadline_del_rq_rb(struct dd_per_prio * per_prio,struct request * rq)204 deadline_del_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
205 {
206 elv_rb_del(deadline_rb_root(per_prio, rq), rq);
207 }
208
209 /*
210 * remove rq from rbtree and fifo.
211 */
deadline_remove_request(struct request_queue * q,struct dd_per_prio * per_prio,struct request * rq)212 static void deadline_remove_request(struct request_queue *q,
213 struct dd_per_prio *per_prio,
214 struct request *rq)
215 {
216 list_del_init(&rq->queuelist);
217
218 /*
219 * We might not be on the rbtree, if we are doing an insert merge
220 */
221 if (!RB_EMPTY_NODE(&rq->rb_node))
222 deadline_del_rq_rb(per_prio, rq);
223
224 elv_rqhash_del(q, rq);
225 if (q->last_merge == rq)
226 q->last_merge = NULL;
227 }
228
dd_request_merged(struct request_queue * q,struct request * req,enum elv_merge type)229 static void dd_request_merged(struct request_queue *q, struct request *req,
230 enum elv_merge type)
231 {
232 struct deadline_data *dd = q->elevator->elevator_data;
233 const u8 ioprio_class = dd_rq_ioclass(req);
234 const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
235 struct dd_per_prio *per_prio = &dd->per_prio[prio];
236
237 /*
238 * if the merge was a front merge, we need to reposition request
239 */
240 if (type == ELEVATOR_FRONT_MERGE) {
241 elv_rb_del(deadline_rb_root(per_prio, req), req);
242 deadline_add_rq_rb(per_prio, req);
243 }
244 }
245
246 /*
247 * Callback function that is invoked after @next has been merged into @req.
248 */
dd_merged_requests(struct request_queue * q,struct request * req,struct request * next)249 static void dd_merged_requests(struct request_queue *q, struct request *req,
250 struct request *next)
251 {
252 struct deadline_data *dd = q->elevator->elevator_data;
253 const u8 ioprio_class = dd_rq_ioclass(next);
254 const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
255
256 lockdep_assert_held(&dd->lock);
257
258 dd->per_prio[prio].stats.merged++;
259
260 /*
261 * if next expires before rq, assign its expire time to rq
262 * and move into next position (next will be deleted) in fifo
263 */
264 if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
265 if (time_before((unsigned long)next->fifo_time,
266 (unsigned long)req->fifo_time)) {
267 list_move(&req->queuelist, &next->queuelist);
268 req->fifo_time = next->fifo_time;
269 }
270 }
271
272 /*
273 * kill knowledge of next, this one is a goner
274 */
275 deadline_remove_request(q, &dd->per_prio[prio], next);
276 }
277
278 /*
279 * move an entry to dispatch queue
280 */
281 static void
deadline_move_request(struct deadline_data * dd,struct dd_per_prio * per_prio,struct request * rq)282 deadline_move_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
283 struct request *rq)
284 {
285 /*
286 * take it off the sort and fifo list
287 */
288 deadline_remove_request(rq->q, per_prio, rq);
289 }
290
291 /* Number of requests queued for a given priority level. */
dd_queued(struct deadline_data * dd,enum dd_prio prio)292 static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio)
293 {
294 const struct io_stats_per_prio *stats = &dd->per_prio[prio].stats;
295
296 lockdep_assert_held(&dd->lock);
297
298 return stats->inserted - atomic_read(&stats->completed);
299 }
300
301 /*
302 * deadline_check_fifo returns true if and only if there are expired requests
303 * in the FIFO list. Requires !list_empty(&dd->fifo_list[data_dir]).
304 */
deadline_check_fifo(struct dd_per_prio * per_prio,enum dd_data_dir data_dir)305 static inline bool deadline_check_fifo(struct dd_per_prio *per_prio,
306 enum dd_data_dir data_dir)
307 {
308 struct request *rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
309
310 return time_is_before_eq_jiffies((unsigned long)rq->fifo_time);
311 }
312
313 /*
314 * Check if rq has a sequential request preceding it.
315 */
deadline_is_seq_write(struct deadline_data * dd,struct request * rq)316 static bool deadline_is_seq_write(struct deadline_data *dd, struct request *rq)
317 {
318 struct request *prev = deadline_earlier_request(rq);
319
320 if (!prev)
321 return false;
322
323 return blk_rq_pos(prev) + blk_rq_sectors(prev) == blk_rq_pos(rq);
324 }
325
326 /*
327 * Skip all write requests that are sequential from @rq, even if we cross
328 * a zone boundary.
329 */
deadline_skip_seq_writes(struct deadline_data * dd,struct request * rq)330 static struct request *deadline_skip_seq_writes(struct deadline_data *dd,
331 struct request *rq)
332 {
333 sector_t pos = blk_rq_pos(rq);
334
335 do {
336 pos += blk_rq_sectors(rq);
337 rq = deadline_latter_request(rq);
338 } while (rq && blk_rq_pos(rq) == pos);
339
340 return rq;
341 }
342
343 /*
344 * Use write locking if either QUEUE_FLAG_NO_ZONE_WRITE_LOCK has not been set.
345 * Not using zone write locking is only safe if the block driver preserves the
346 * request order.
347 */
dd_use_zone_write_locking(struct request_queue * q)348 static bool dd_use_zone_write_locking(struct request_queue *q)
349 {
350 return blk_queue_is_zoned(q) && !blk_queue_no_zone_write_lock(q);
351 }
352
353 /*
354 * For the specified data direction, return the next request to
355 * dispatch using arrival ordered lists.
356 */
357 static struct request *
deadline_fifo_request(struct deadline_data * dd,struct dd_per_prio * per_prio,enum dd_data_dir data_dir)358 deadline_fifo_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
359 enum dd_data_dir data_dir)
360 {
361 struct request *rq, *rb_rq, *next;
362 unsigned long flags;
363
364 if (list_empty(&per_prio->fifo_list[data_dir]))
365 return NULL;
366
367 rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
368 if (data_dir == DD_READ || !dd_use_zone_write_locking(rq->q))
369 return rq;
370
371 /*
372 * Look for a write request that can be dispatched, that is one with
373 * an unlocked target zone. For some HDDs, breaking a sequential
374 * write stream can lead to lower throughput, so make sure to preserve
375 * sequential write streams, even if that stream crosses into the next
376 * zones and these zones are unlocked.
377 */
378 spin_lock_irqsave(&dd->zone_lock, flags);
379 list_for_each_entry_safe(rq, next, &per_prio->fifo_list[DD_WRITE],
380 queuelist) {
381 /* Check whether a prior request exists for the same zone. */
382 rb_rq = deadline_from_pos(per_prio, data_dir, blk_rq_pos(rq));
383 if (rb_rq && blk_rq_pos(rb_rq) < blk_rq_pos(rq))
384 rq = rb_rq;
385 if (blk_req_can_dispatch_to_zone(rq) &&
386 (blk_queue_nonrot(rq->q) ||
387 !deadline_is_seq_write(dd, rq)))
388 goto out;
389 }
390 rq = NULL;
391 out:
392 spin_unlock_irqrestore(&dd->zone_lock, flags);
393
394 return rq;
395 }
396
397 /*
398 * For the specified data direction, return the next request to
399 * dispatch using sector position sorted lists.
400 */
401 static struct request *
deadline_next_request(struct deadline_data * dd,struct dd_per_prio * per_prio,enum dd_data_dir data_dir)402 deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
403 enum dd_data_dir data_dir)
404 {
405 struct request *rq;
406 unsigned long flags;
407
408 rq = deadline_from_pos(per_prio, data_dir,
409 per_prio->latest_pos[data_dir]);
410 if (!rq)
411 return NULL;
412
413 if (data_dir == DD_READ || !dd_use_zone_write_locking(rq->q))
414 return rq;
415
416 /*
417 * Look for a write request that can be dispatched, that is one with
418 * an unlocked target zone. For some HDDs, breaking a sequential
419 * write stream can lead to lower throughput, so make sure to preserve
420 * sequential write streams, even if that stream crosses into the next
421 * zones and these zones are unlocked.
422 */
423 spin_lock_irqsave(&dd->zone_lock, flags);
424 while (rq) {
425 if (blk_req_can_dispatch_to_zone(rq))
426 break;
427 if (blk_queue_nonrot(rq->q))
428 rq = deadline_latter_request(rq);
429 else
430 rq = deadline_skip_seq_writes(dd, rq);
431 }
432 spin_unlock_irqrestore(&dd->zone_lock, flags);
433
434 return rq;
435 }
436
437 /*
438 * Returns true if and only if @rq started after @latest_start where
439 * @latest_start is in jiffies.
440 */
started_after(struct deadline_data * dd,struct request * rq,unsigned long latest_start)441 static bool started_after(struct deadline_data *dd, struct request *rq,
442 unsigned long latest_start)
443 {
444 unsigned long start_time = (unsigned long)rq->fifo_time;
445
446 start_time -= dd->fifo_expire[rq_data_dir(rq)];
447
448 return time_after(start_time, latest_start);
449 }
450
451 /*
452 * deadline_dispatch_requests selects the best request according to
453 * read/write expire, fifo_batch, etc and with a start time <= @latest_start.
454 */
__dd_dispatch_request(struct deadline_data * dd,struct dd_per_prio * per_prio,unsigned long latest_start)455 static struct request *__dd_dispatch_request(struct deadline_data *dd,
456 struct dd_per_prio *per_prio,
457 unsigned long latest_start)
458 {
459 struct request *rq, *next_rq;
460 enum dd_data_dir data_dir;
461 enum dd_prio prio;
462 u8 ioprio_class;
463
464 lockdep_assert_held(&dd->lock);
465
466 if (!list_empty(&per_prio->dispatch)) {
467 rq = list_first_entry(&per_prio->dispatch, struct request,
468 queuelist);
469 if (started_after(dd, rq, latest_start))
470 return NULL;
471 list_del_init(&rq->queuelist);
472 data_dir = rq_data_dir(rq);
473 goto done;
474 }
475
476 /*
477 * batches are currently reads XOR writes
478 */
479 rq = deadline_next_request(dd, per_prio, dd->last_dir);
480 if (rq && dd->batching < dd->fifo_batch) {
481 /* we have a next request and are still entitled to batch */
482 data_dir = rq_data_dir(rq);
483 goto dispatch_request;
484 }
485
486 /*
487 * at this point we are not running a batch. select the appropriate
488 * data direction (read / write)
489 */
490
491 if (!list_empty(&per_prio->fifo_list[DD_READ])) {
492 BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_READ]));
493
494 if (deadline_fifo_request(dd, per_prio, DD_WRITE) &&
495 (dd->starved++ >= dd->writes_starved))
496 goto dispatch_writes;
497
498 data_dir = DD_READ;
499
500 goto dispatch_find_request;
501 }
502
503 /*
504 * there are either no reads or writes have been starved
505 */
506
507 if (!list_empty(&per_prio->fifo_list[DD_WRITE])) {
508 dispatch_writes:
509 BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_WRITE]));
510
511 dd->starved = 0;
512
513 data_dir = DD_WRITE;
514
515 goto dispatch_find_request;
516 }
517
518 return NULL;
519
520 dispatch_find_request:
521 /*
522 * we are not running a batch, find best request for selected data_dir
523 */
524 next_rq = deadline_next_request(dd, per_prio, data_dir);
525 if (deadline_check_fifo(per_prio, data_dir) || !next_rq) {
526 /*
527 * A deadline has expired, the last request was in the other
528 * direction, or we have run out of higher-sectored requests.
529 * Start again from the request with the earliest expiry time.
530 */
531 rq = deadline_fifo_request(dd, per_prio, data_dir);
532 } else {
533 /*
534 * The last req was the same dir and we have a next request in
535 * sort order. No expired requests so continue on from here.
536 */
537 rq = next_rq;
538 }
539
540 /*
541 * For a zoned block device that requires write serialization, if we
542 * only have writes queued and none of them can be dispatched, rq will
543 * be NULL.
544 */
545 if (!rq)
546 return NULL;
547
548 dd->last_dir = data_dir;
549 dd->batching = 0;
550
551 dispatch_request:
552 if (started_after(dd, rq, latest_start))
553 return NULL;
554
555 /*
556 * rq is the selected appropriate request.
557 */
558 dd->batching++;
559 deadline_move_request(dd, per_prio, rq);
560 done:
561 ioprio_class = dd_rq_ioclass(rq);
562 prio = ioprio_class_to_prio[ioprio_class];
563 dd->per_prio[prio].latest_pos[data_dir] = blk_rq_pos(rq);
564 dd->per_prio[prio].stats.dispatched++;
565 /*
566 * If the request needs its target zone locked, do it.
567 */
568 if (dd_use_zone_write_locking(rq->q))
569 blk_req_zone_write_lock(rq);
570 rq->rq_flags |= RQF_STARTED;
571 return rq;
572 }
573
574 /*
575 * Check whether there are any requests with priority other than DD_RT_PRIO
576 * that were inserted more than prio_aging_expire jiffies ago.
577 */
dd_dispatch_prio_aged_requests(struct deadline_data * dd,unsigned long now)578 static struct request *dd_dispatch_prio_aged_requests(struct deadline_data *dd,
579 unsigned long now)
580 {
581 struct request *rq;
582 enum dd_prio prio;
583 int prio_cnt;
584
585 lockdep_assert_held(&dd->lock);
586
587 prio_cnt = !!dd_queued(dd, DD_RT_PRIO) + !!dd_queued(dd, DD_BE_PRIO) +
588 !!dd_queued(dd, DD_IDLE_PRIO);
589 if (prio_cnt < 2)
590 return NULL;
591
592 for (prio = DD_BE_PRIO; prio <= DD_PRIO_MAX; prio++) {
593 rq = __dd_dispatch_request(dd, &dd->per_prio[prio],
594 now - dd->prio_aging_expire);
595 if (rq)
596 return rq;
597 }
598
599 return NULL;
600 }
601
602 /*
603 * Called from blk_mq_run_hw_queue() -> __blk_mq_sched_dispatch_requests().
604 *
605 * One confusing aspect here is that we get called for a specific
606 * hardware queue, but we may return a request that is for a
607 * different hardware queue. This is because mq-deadline has shared
608 * state for all hardware queues, in terms of sorting, FIFOs, etc.
609 */
dd_dispatch_request(struct blk_mq_hw_ctx * hctx)610 static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
611 {
612 struct deadline_data *dd = hctx->queue->elevator->elevator_data;
613 const unsigned long now = jiffies;
614 struct request *rq;
615 enum dd_prio prio;
616
617 spin_lock(&dd->lock);
618 rq = dd_dispatch_prio_aged_requests(dd, now);
619 if (rq)
620 goto unlock;
621
622 /*
623 * Next, dispatch requests in priority order. Ignore lower priority
624 * requests if any higher priority requests are pending.
625 */
626 for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
627 rq = __dd_dispatch_request(dd, &dd->per_prio[prio], now);
628 if (rq || dd_queued(dd, prio))
629 break;
630 }
631
632 unlock:
633 spin_unlock(&dd->lock);
634
635 return rq;
636 }
637
638 /*
639 * Called by __blk_mq_alloc_request(). The shallow_depth value set by this
640 * function is used by __blk_mq_get_tag().
641 */
dd_limit_depth(unsigned int op,struct blk_mq_alloc_data * data)642 static void dd_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
643 {
644 struct deadline_data *dd = data->q->elevator->elevator_data;
645
646 /* Do not throttle synchronous reads. */
647 if (op_is_sync(op) && !op_is_write(op))
648 return;
649
650 /*
651 * Throttle asynchronous requests and writes such that these requests
652 * do not block the allocation of synchronous requests.
653 */
654 data->shallow_depth = dd->async_depth;
655 }
656
657 /* Called by blk_mq_update_nr_requests(). */
dd_depth_updated(struct blk_mq_hw_ctx * hctx)658 static void dd_depth_updated(struct blk_mq_hw_ctx *hctx)
659 {
660 struct request_queue *q = hctx->queue;
661 struct deadline_data *dd = q->elevator->elevator_data;
662 struct blk_mq_tags *tags = hctx->sched_tags;
663
664 dd->async_depth = max(1UL, 3 * q->nr_requests / 4);
665
666 sbitmap_queue_min_shallow_depth(tags->bitmap_tags, dd->async_depth);
667 }
668
669 /* Called by blk_mq_init_hctx() and blk_mq_init_sched(). */
dd_init_hctx(struct blk_mq_hw_ctx * hctx,unsigned int hctx_idx)670 static int dd_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
671 {
672 dd_depth_updated(hctx);
673 return 0;
674 }
675
dd_exit_sched(struct elevator_queue * e)676 static void dd_exit_sched(struct elevator_queue *e)
677 {
678 struct deadline_data *dd = e->elevator_data;
679 enum dd_prio prio;
680
681 for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
682 struct dd_per_prio *per_prio = &dd->per_prio[prio];
683 const struct io_stats_per_prio *stats = &per_prio->stats;
684 uint32_t queued;
685
686 WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_READ]));
687 WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_WRITE]));
688
689 spin_lock(&dd->lock);
690 queued = dd_queued(dd, prio);
691 spin_unlock(&dd->lock);
692
693 WARN_ONCE(queued != 0,
694 "statistics for priority %d: i %u m %u d %u c %u\n",
695 prio, stats->inserted, stats->merged,
696 stats->dispatched, atomic_read(&stats->completed));
697 }
698
699 kfree(dd);
700 }
701
702 /*
703 * initialize elevator private data (deadline_data).
704 */
dd_init_sched(struct request_queue * q,struct elevator_type * e)705 static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
706 {
707 struct deadline_data *dd;
708 struct elevator_queue *eq;
709 enum dd_prio prio;
710 int ret = -ENOMEM;
711
712 eq = elevator_alloc(q, e);
713 if (!eq)
714 return ret;
715
716 dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
717 if (!dd)
718 goto put_eq;
719
720 eq->elevator_data = dd;
721
722 for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
723 struct dd_per_prio *per_prio = &dd->per_prio[prio];
724
725 INIT_LIST_HEAD(&per_prio->dispatch);
726 INIT_LIST_HEAD(&per_prio->fifo_list[DD_READ]);
727 INIT_LIST_HEAD(&per_prio->fifo_list[DD_WRITE]);
728 per_prio->sort_list[DD_READ] = RB_ROOT;
729 per_prio->sort_list[DD_WRITE] = RB_ROOT;
730 }
731 dd->fifo_expire[DD_READ] = read_expire;
732 dd->fifo_expire[DD_WRITE] = write_expire;
733 dd->writes_starved = writes_starved;
734 dd->front_merges = 1;
735 dd->last_dir = DD_WRITE;
736 dd->fifo_batch = fifo_batch;
737 dd->prio_aging_expire = prio_aging_expire;
738 spin_lock_init(&dd->lock);
739 spin_lock_init(&dd->zone_lock);
740
741 q->elevator = eq;
742 return 0;
743
744 put_eq:
745 kobject_put(&eq->kobj);
746 return ret;
747 }
748
749 /*
750 * Try to merge @bio into an existing request. If @bio has been merged into
751 * an existing request, store the pointer to that request into *@rq.
752 */
dd_request_merge(struct request_queue * q,struct request ** rq,struct bio * bio)753 static int dd_request_merge(struct request_queue *q, struct request **rq,
754 struct bio *bio)
755 {
756 struct deadline_data *dd = q->elevator->elevator_data;
757 const u8 ioprio_class = IOPRIO_PRIO_CLASS(bio->bi_ioprio);
758 const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
759 struct dd_per_prio *per_prio = &dd->per_prio[prio];
760 sector_t sector = bio_end_sector(bio);
761 struct request *__rq;
762
763 if (!dd->front_merges)
764 return ELEVATOR_NO_MERGE;
765
766 __rq = elv_rb_find(&per_prio->sort_list[bio_data_dir(bio)], sector);
767 if (__rq) {
768 BUG_ON(sector != blk_rq_pos(__rq));
769
770 if (elv_bio_merge_ok(__rq, bio)) {
771 *rq = __rq;
772 if (blk_discard_mergable(__rq))
773 return ELEVATOR_DISCARD_MERGE;
774 return ELEVATOR_FRONT_MERGE;
775 }
776 }
777
778 return ELEVATOR_NO_MERGE;
779 }
780
781 /*
782 * Attempt to merge a bio into an existing request. This function is called
783 * before @bio is associated with a request.
784 */
dd_bio_merge(struct request_queue * q,struct bio * bio,unsigned int nr_segs)785 static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
786 unsigned int nr_segs)
787 {
788 struct deadline_data *dd = q->elevator->elevator_data;
789 struct request *free = NULL;
790 bool ret;
791
792 spin_lock(&dd->lock);
793 ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
794 spin_unlock(&dd->lock);
795
796 if (free)
797 blk_mq_free_request(free);
798
799 return ret;
800 }
801
802 /*
803 * add rq to rbtree and fifo
804 */
dd_insert_request(struct blk_mq_hw_ctx * hctx,struct request * rq,bool at_head,struct list_head * free)805 static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
806 bool at_head, struct list_head *free)
807 {
808 struct request_queue *q = hctx->queue;
809 struct deadline_data *dd = q->elevator->elevator_data;
810 const enum dd_data_dir data_dir = rq_data_dir(rq);
811 u16 ioprio = req_get_ioprio(rq);
812 u8 ioprio_class = IOPRIO_PRIO_CLASS(ioprio);
813 struct dd_per_prio *per_prio;
814 enum dd_prio prio;
815
816 lockdep_assert_held(&dd->lock);
817
818 /*
819 * This may be a requeue of a write request that has locked its
820 * target zone. If it is the case, this releases the zone lock.
821 */
822 blk_req_zone_write_unlock(rq);
823
824 prio = ioprio_class_to_prio[ioprio_class];
825 per_prio = &dd->per_prio[prio];
826 if (!rq->elv.priv[0]) {
827 per_prio->stats.inserted++;
828 rq->elv.priv[0] = (void *)(uintptr_t)1;
829 }
830
831 if (blk_mq_sched_try_insert_merge(q, rq, free))
832 return;
833
834 trace_block_rq_insert(rq);
835
836 if (at_head) {
837 list_add(&rq->queuelist, &per_prio->dispatch);
838 rq->fifo_time = jiffies;
839 } else {
840 struct list_head *insert_before;
841
842 deadline_add_rq_rb(per_prio, rq);
843
844 if (rq_mergeable(rq)) {
845 elv_rqhash_add(q, rq);
846 if (!q->last_merge)
847 q->last_merge = rq;
848 }
849
850 /*
851 * set expire time and add to fifo list
852 */
853 rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
854 insert_before = &per_prio->fifo_list[data_dir];
855 #ifdef CONFIG_BLK_DEV_ZONED
856 /*
857 * Insert zoned writes such that requests are sorted by
858 * position per zone.
859 */
860 if (blk_rq_is_seq_zoned_write(rq)) {
861 struct request *rq2 = deadline_latter_request(rq);
862
863 if (rq2 && blk_rq_zone_no(rq2) == blk_rq_zone_no(rq))
864 insert_before = &rq2->queuelist;
865 }
866 #endif
867 list_add_tail(&rq->queuelist, insert_before);
868 }
869 }
870
871 /*
872 * Called from blk_mq_sched_insert_request() or blk_mq_sched_insert_requests().
873 */
dd_insert_requests(struct blk_mq_hw_ctx * hctx,struct list_head * list,bool at_head)874 static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
875 struct list_head *list, bool at_head)
876 {
877 struct request_queue *q = hctx->queue;
878 struct deadline_data *dd = q->elevator->elevator_data;
879 LIST_HEAD(free);
880
881 spin_lock(&dd->lock);
882 while (!list_empty(list)) {
883 struct request *rq;
884
885 rq = list_first_entry(list, struct request, queuelist);
886 list_del_init(&rq->queuelist);
887 dd_insert_request(hctx, rq, at_head, &free);
888 }
889 spin_unlock(&dd->lock);
890
891 blk_mq_free_requests(&free);
892 }
893
894 /* Callback from inside blk_mq_rq_ctx_init(). */
dd_prepare_request(struct request * rq)895 static void dd_prepare_request(struct request *rq)
896 {
897 rq->elv.priv[0] = NULL;
898 }
899
dd_has_write_work(struct blk_mq_hw_ctx * hctx)900 static bool dd_has_write_work(struct blk_mq_hw_ctx *hctx)
901 {
902 struct deadline_data *dd = hctx->queue->elevator->elevator_data;
903 enum dd_prio p;
904
905 for (p = 0; p <= DD_PRIO_MAX; p++)
906 if (!list_empty_careful(&dd->per_prio[p].fifo_list[DD_WRITE]))
907 return true;
908
909 return false;
910 }
911
912 /*
913 * Callback from inside blk_mq_free_request().
914 *
915 * For zoned block devices, write unlock the target zone of
916 * completed write requests. Do this while holding the zone lock
917 * spinlock so that the zone is never unlocked while deadline_fifo_request()
918 * or deadline_next_request() are executing. This function is called for
919 * all requests, whether or not these requests complete successfully.
920 *
921 * For a zoned block device, __dd_dispatch_request() may have stopped
922 * dispatching requests if all the queued requests are write requests directed
923 * at zones that are already locked due to on-going write requests. To ensure
924 * write request dispatch progress in this case, mark the queue as needing a
925 * restart to ensure that the queue is run again after completion of the
926 * request and zones being unlocked.
927 */
dd_finish_request(struct request * rq)928 static void dd_finish_request(struct request *rq)
929 {
930 struct request_queue *q = rq->q;
931 struct deadline_data *dd = q->elevator->elevator_data;
932 const u8 ioprio_class = dd_rq_ioclass(rq);
933 const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
934 struct dd_per_prio *per_prio = &dd->per_prio[prio];
935
936 /*
937 * The block layer core may call dd_finish_request() without having
938 * called dd_insert_requests(). Skip requests that bypassed I/O
939 * scheduling. See also blk_mq_request_bypass_insert().
940 */
941 if (!rq->elv.priv[0])
942 return;
943
944 atomic_inc(&per_prio->stats.completed);
945
946 if (dd_use_zone_write_locking(rq->q)) {
947 unsigned long flags;
948
949 spin_lock_irqsave(&dd->zone_lock, flags);
950 blk_req_zone_write_unlock(rq);
951 spin_unlock_irqrestore(&dd->zone_lock, flags);
952
953 if (dd_has_write_work(rq->mq_hctx))
954 blk_mq_sched_mark_restart_hctx(rq->mq_hctx);
955 }
956 }
957
dd_has_work_for_prio(struct dd_per_prio * per_prio)958 static bool dd_has_work_for_prio(struct dd_per_prio *per_prio)
959 {
960 return !list_empty_careful(&per_prio->dispatch) ||
961 !list_empty_careful(&per_prio->fifo_list[DD_READ]) ||
962 !list_empty_careful(&per_prio->fifo_list[DD_WRITE]);
963 }
964
dd_has_work(struct blk_mq_hw_ctx * hctx)965 static bool dd_has_work(struct blk_mq_hw_ctx *hctx)
966 {
967 struct deadline_data *dd = hctx->queue->elevator->elevator_data;
968 enum dd_prio prio;
969
970 for (prio = 0; prio <= DD_PRIO_MAX; prio++)
971 if (dd_has_work_for_prio(&dd->per_prio[prio]))
972 return true;
973
974 return false;
975 }
976
977 /*
978 * sysfs parts below
979 */
980 #define SHOW_INT(__FUNC, __VAR) \
981 static ssize_t __FUNC(struct elevator_queue *e, char *page) \
982 { \
983 struct deadline_data *dd = e->elevator_data; \
984 \
985 return sysfs_emit(page, "%d\n", __VAR); \
986 }
987 #define SHOW_JIFFIES(__FUNC, __VAR) SHOW_INT(__FUNC, jiffies_to_msecs(__VAR))
988 SHOW_JIFFIES(deadline_read_expire_show, dd->fifo_expire[DD_READ]);
989 SHOW_JIFFIES(deadline_write_expire_show, dd->fifo_expire[DD_WRITE]);
990 SHOW_JIFFIES(deadline_prio_aging_expire_show, dd->prio_aging_expire);
991 SHOW_INT(deadline_writes_starved_show, dd->writes_starved);
992 SHOW_INT(deadline_front_merges_show, dd->front_merges);
993 SHOW_INT(deadline_async_depth_show, dd->async_depth);
994 SHOW_INT(deadline_fifo_batch_show, dd->fifo_batch);
995 #undef SHOW_INT
996 #undef SHOW_JIFFIES
997
998 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
999 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
1000 { \
1001 struct deadline_data *dd = e->elevator_data; \
1002 int __data, __ret; \
1003 \
1004 __ret = kstrtoint(page, 0, &__data); \
1005 if (__ret < 0) \
1006 return __ret; \
1007 if (__data < (MIN)) \
1008 __data = (MIN); \
1009 else if (__data > (MAX)) \
1010 __data = (MAX); \
1011 *(__PTR) = __CONV(__data); \
1012 return count; \
1013 }
1014 #define STORE_INT(__FUNC, __PTR, MIN, MAX) \
1015 STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, )
1016 #define STORE_JIFFIES(__FUNC, __PTR, MIN, MAX) \
1017 STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, msecs_to_jiffies)
1018 STORE_JIFFIES(deadline_read_expire_store, &dd->fifo_expire[DD_READ], 0, INT_MAX);
1019 STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MAX);
1020 STORE_JIFFIES(deadline_prio_aging_expire_store, &dd->prio_aging_expire, 0, INT_MAX);
1021 STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX);
1022 STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1);
1023 STORE_INT(deadline_async_depth_store, &dd->async_depth, 1, INT_MAX);
1024 STORE_INT(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX);
1025 #undef STORE_FUNCTION
1026 #undef STORE_INT
1027 #undef STORE_JIFFIES
1028
1029 #define DD_ATTR(name) \
1030 __ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store)
1031
1032 static struct elv_fs_entry deadline_attrs[] = {
1033 DD_ATTR(read_expire),
1034 DD_ATTR(write_expire),
1035 DD_ATTR(writes_starved),
1036 DD_ATTR(front_merges),
1037 DD_ATTR(async_depth),
1038 DD_ATTR(fifo_batch),
1039 DD_ATTR(prio_aging_expire),
1040 __ATTR_NULL
1041 };
1042
1043 #ifdef CONFIG_BLK_DEBUG_FS
1044 #define DEADLINE_DEBUGFS_DDIR_ATTRS(prio, data_dir, name) \
1045 static void *deadline_##name##_fifo_start(struct seq_file *m, \
1046 loff_t *pos) \
1047 __acquires(&dd->lock) \
1048 { \
1049 struct request_queue *q = m->private; \
1050 struct deadline_data *dd = q->elevator->elevator_data; \
1051 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
1052 \
1053 spin_lock(&dd->lock); \
1054 return seq_list_start(&per_prio->fifo_list[data_dir], *pos); \
1055 } \
1056 \
1057 static void *deadline_##name##_fifo_next(struct seq_file *m, void *v, \
1058 loff_t *pos) \
1059 { \
1060 struct request_queue *q = m->private; \
1061 struct deadline_data *dd = q->elevator->elevator_data; \
1062 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
1063 \
1064 return seq_list_next(v, &per_prio->fifo_list[data_dir], pos); \
1065 } \
1066 \
1067 static void deadline_##name##_fifo_stop(struct seq_file *m, void *v) \
1068 __releases(&dd->lock) \
1069 { \
1070 struct request_queue *q = m->private; \
1071 struct deadline_data *dd = q->elevator->elevator_data; \
1072 \
1073 spin_unlock(&dd->lock); \
1074 } \
1075 \
1076 static const struct seq_operations deadline_##name##_fifo_seq_ops = { \
1077 .start = deadline_##name##_fifo_start, \
1078 .next = deadline_##name##_fifo_next, \
1079 .stop = deadline_##name##_fifo_stop, \
1080 .show = blk_mq_debugfs_rq_show, \
1081 }; \
1082 \
1083 static int deadline_##name##_next_rq_show(void *data, \
1084 struct seq_file *m) \
1085 { \
1086 struct request_queue *q = data; \
1087 struct deadline_data *dd = q->elevator->elevator_data; \
1088 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
1089 struct request *rq; \
1090 \
1091 rq = deadline_from_pos(per_prio, data_dir, \
1092 per_prio->latest_pos[data_dir]); \
1093 if (rq) \
1094 __blk_mq_debugfs_rq_show(m, rq); \
1095 return 0; \
1096 }
1097
1098 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_READ, read0);
1099 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_WRITE, write0);
1100 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_READ, read1);
1101 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_WRITE, write1);
1102 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_READ, read2);
1103 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_WRITE, write2);
1104 #undef DEADLINE_DEBUGFS_DDIR_ATTRS
1105
deadline_batching_show(void * data,struct seq_file * m)1106 static int deadline_batching_show(void *data, struct seq_file *m)
1107 {
1108 struct request_queue *q = data;
1109 struct deadline_data *dd = q->elevator->elevator_data;
1110
1111 seq_printf(m, "%u\n", dd->batching);
1112 return 0;
1113 }
1114
deadline_starved_show(void * data,struct seq_file * m)1115 static int deadline_starved_show(void *data, struct seq_file *m)
1116 {
1117 struct request_queue *q = data;
1118 struct deadline_data *dd = q->elevator->elevator_data;
1119
1120 seq_printf(m, "%u\n", dd->starved);
1121 return 0;
1122 }
1123
dd_async_depth_show(void * data,struct seq_file * m)1124 static int dd_async_depth_show(void *data, struct seq_file *m)
1125 {
1126 struct request_queue *q = data;
1127 struct deadline_data *dd = q->elevator->elevator_data;
1128
1129 seq_printf(m, "%u\n", dd->async_depth);
1130 return 0;
1131 }
1132
dd_queued_show(void * data,struct seq_file * m)1133 static int dd_queued_show(void *data, struct seq_file *m)
1134 {
1135 struct request_queue *q = data;
1136 struct deadline_data *dd = q->elevator->elevator_data;
1137 u32 rt, be, idle;
1138
1139 spin_lock(&dd->lock);
1140 rt = dd_queued(dd, DD_RT_PRIO);
1141 be = dd_queued(dd, DD_BE_PRIO);
1142 idle = dd_queued(dd, DD_IDLE_PRIO);
1143 spin_unlock(&dd->lock);
1144
1145 seq_printf(m, "%u %u %u\n", rt, be, idle);
1146
1147 return 0;
1148 }
1149
1150 /* Number of requests owned by the block driver for a given priority. */
dd_owned_by_driver(struct deadline_data * dd,enum dd_prio prio)1151 static u32 dd_owned_by_driver(struct deadline_data *dd, enum dd_prio prio)
1152 {
1153 const struct io_stats_per_prio *stats = &dd->per_prio[prio].stats;
1154
1155 lockdep_assert_held(&dd->lock);
1156
1157 return stats->dispatched + stats->merged -
1158 atomic_read(&stats->completed);
1159 }
1160
dd_owned_by_driver_show(void * data,struct seq_file * m)1161 static int dd_owned_by_driver_show(void *data, struct seq_file *m)
1162 {
1163 struct request_queue *q = data;
1164 struct deadline_data *dd = q->elevator->elevator_data;
1165 u32 rt, be, idle;
1166
1167 spin_lock(&dd->lock);
1168 rt = dd_owned_by_driver(dd, DD_RT_PRIO);
1169 be = dd_owned_by_driver(dd, DD_BE_PRIO);
1170 idle = dd_owned_by_driver(dd, DD_IDLE_PRIO);
1171 spin_unlock(&dd->lock);
1172
1173 seq_printf(m, "%u %u %u\n", rt, be, idle);
1174
1175 return 0;
1176 }
1177
1178 #define DEADLINE_DISPATCH_ATTR(prio) \
1179 static void *deadline_dispatch##prio##_start(struct seq_file *m, \
1180 loff_t *pos) \
1181 __acquires(&dd->lock) \
1182 { \
1183 struct request_queue *q = m->private; \
1184 struct deadline_data *dd = q->elevator->elevator_data; \
1185 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
1186 \
1187 spin_lock(&dd->lock); \
1188 return seq_list_start(&per_prio->dispatch, *pos); \
1189 } \
1190 \
1191 static void *deadline_dispatch##prio##_next(struct seq_file *m, \
1192 void *v, loff_t *pos) \
1193 { \
1194 struct request_queue *q = m->private; \
1195 struct deadline_data *dd = q->elevator->elevator_data; \
1196 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
1197 \
1198 return seq_list_next(v, &per_prio->dispatch, pos); \
1199 } \
1200 \
1201 static void deadline_dispatch##prio##_stop(struct seq_file *m, void *v) \
1202 __releases(&dd->lock) \
1203 { \
1204 struct request_queue *q = m->private; \
1205 struct deadline_data *dd = q->elevator->elevator_data; \
1206 \
1207 spin_unlock(&dd->lock); \
1208 } \
1209 \
1210 static const struct seq_operations deadline_dispatch##prio##_seq_ops = { \
1211 .start = deadline_dispatch##prio##_start, \
1212 .next = deadline_dispatch##prio##_next, \
1213 .stop = deadline_dispatch##prio##_stop, \
1214 .show = blk_mq_debugfs_rq_show, \
1215 }
1216
1217 DEADLINE_DISPATCH_ATTR(0);
1218 DEADLINE_DISPATCH_ATTR(1);
1219 DEADLINE_DISPATCH_ATTR(2);
1220 #undef DEADLINE_DISPATCH_ATTR
1221
1222 #define DEADLINE_QUEUE_DDIR_ATTRS(name) \
1223 {#name "_fifo_list", 0400, \
1224 .seq_ops = &deadline_##name##_fifo_seq_ops}
1225 #define DEADLINE_NEXT_RQ_ATTR(name) \
1226 {#name "_next_rq", 0400, deadline_##name##_next_rq_show}
1227 static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = {
1228 DEADLINE_QUEUE_DDIR_ATTRS(read0),
1229 DEADLINE_QUEUE_DDIR_ATTRS(write0),
1230 DEADLINE_QUEUE_DDIR_ATTRS(read1),
1231 DEADLINE_QUEUE_DDIR_ATTRS(write1),
1232 DEADLINE_QUEUE_DDIR_ATTRS(read2),
1233 DEADLINE_QUEUE_DDIR_ATTRS(write2),
1234 DEADLINE_NEXT_RQ_ATTR(read0),
1235 DEADLINE_NEXT_RQ_ATTR(write0),
1236 DEADLINE_NEXT_RQ_ATTR(read1),
1237 DEADLINE_NEXT_RQ_ATTR(write1),
1238 DEADLINE_NEXT_RQ_ATTR(read2),
1239 DEADLINE_NEXT_RQ_ATTR(write2),
1240 {"batching", 0400, deadline_batching_show},
1241 {"starved", 0400, deadline_starved_show},
1242 {"async_depth", 0400, dd_async_depth_show},
1243 {"dispatch0", 0400, .seq_ops = &deadline_dispatch0_seq_ops},
1244 {"dispatch1", 0400, .seq_ops = &deadline_dispatch1_seq_ops},
1245 {"dispatch2", 0400, .seq_ops = &deadline_dispatch2_seq_ops},
1246 {"owned_by_driver", 0400, dd_owned_by_driver_show},
1247 {"queued", 0400, dd_queued_show},
1248 {},
1249 };
1250 #undef DEADLINE_QUEUE_DDIR_ATTRS
1251 #endif
1252
1253 static struct elevator_type mq_deadline = {
1254 .ops = {
1255 .depth_updated = dd_depth_updated,
1256 .limit_depth = dd_limit_depth,
1257 .insert_requests = dd_insert_requests,
1258 .dispatch_request = dd_dispatch_request,
1259 .prepare_request = dd_prepare_request,
1260 .finish_request = dd_finish_request,
1261 .next_request = elv_rb_latter_request,
1262 .former_request = elv_rb_former_request,
1263 .bio_merge = dd_bio_merge,
1264 .request_merge = dd_request_merge,
1265 .requests_merged = dd_merged_requests,
1266 .request_merged = dd_request_merged,
1267 .has_work = dd_has_work,
1268 .init_sched = dd_init_sched,
1269 .exit_sched = dd_exit_sched,
1270 .init_hctx = dd_init_hctx,
1271 },
1272
1273 #ifdef CONFIG_BLK_DEBUG_FS
1274 .queue_debugfs_attrs = deadline_queue_debugfs_attrs,
1275 #endif
1276 .elevator_attrs = deadline_attrs,
1277 .elevator_name = "mq-deadline",
1278 .elevator_alias = "deadline",
1279 .elevator_features = ELEVATOR_F_ZBD_SEQ_WRITE,
1280 .elevator_owner = THIS_MODULE,
1281 };
1282 MODULE_ALIAS("mq-deadline-iosched");
1283
deadline_init(void)1284 static int __init deadline_init(void)
1285 {
1286 return elv_register(&mq_deadline);
1287 }
1288
deadline_exit(void)1289 static void __exit deadline_exit(void)
1290 {
1291 elv_unregister(&mq_deadline);
1292 }
1293
1294 module_init(deadline_init);
1295 module_exit(deadline_exit);
1296
1297 MODULE_AUTHOR("Jens Axboe, Damien Le Moal and Bart Van Assche");
1298 MODULE_LICENSE("GPL");
1299 MODULE_DESCRIPTION("MQ deadline IO scheduler");
1300