• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Block device elevator/IO-scheduler.
3  *
4  *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5  *
6  * 30042000 Jens Axboe <axboe@kernel.dk> :
7  *
8  * Split the elevator a bit so that it is possible to choose a different
9  * one or even write a new "plug in". There are three pieces:
10  * - elevator_fn, inserts a new request in the queue list
11  * - elevator_merge_fn, decides whether a new buffer can be merged with
12  *   an existing request
13  * - elevator_dequeue_fn, called when a request is taken off the active list
14  *
15  * 20082000 Dave Jones <davej@suse.de> :
16  * Removed tests for max-bomb-segments, which was breaking elvtune
17  *  when run without -bN
18  *
19  * Jens:
20  * - Rework again to work with bio instead of buffer_heads
21  * - loose bi_dev comparisons, partition handling is right now
22  * - completely modularize elevator setup and teardown
23  *
24  */
25 #include <linux/kernel.h>
26 #include <linux/fs.h>
27 #include <linux/blkdev.h>
28 #include <linux/elevator.h>
29 #include <linux/bio.h>
30 #include <linux/module.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/compiler.h>
34 #include <linux/blktrace_api.h>
35 #include <linux/hash.h>
36 #include <linux/uaccess.h>
37 #include <linux/pm_runtime.h>
38 #include <linux/blk-cgroup.h>
39 
40 #include <trace/events/block.h>
41 
42 #include "blk.h"
43 #include "blk-mq-sched.h"
44 #include "blk-wbt.h"
45 
46 static DEFINE_SPINLOCK(elv_list_lock);
47 static LIST_HEAD(elv_list);
48 
49 /*
50  * Merge hash stuff.
51  */
52 #define rq_hash_key(rq)		(blk_rq_pos(rq) + blk_rq_sectors(rq))
53 
54 /*
55  * Query io scheduler to see if the current process issuing bio may be
56  * merged with rq.
57  */
elv_iosched_allow_bio_merge(struct request * rq,struct bio * bio)58 static int elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio)
59 {
60 	struct request_queue *q = rq->q;
61 	struct elevator_queue *e = q->elevator;
62 
63 	if (e->uses_mq && e->type->ops.mq.allow_merge)
64 		return e->type->ops.mq.allow_merge(q, rq, bio);
65 	else if (!e->uses_mq && e->type->ops.sq.elevator_allow_bio_merge_fn)
66 		return e->type->ops.sq.elevator_allow_bio_merge_fn(q, rq, bio);
67 
68 	return 1;
69 }
70 
71 /*
72  * can we safely merge with this request?
73  */
elv_bio_merge_ok(struct request * rq,struct bio * bio)74 bool elv_bio_merge_ok(struct request *rq, struct bio *bio)
75 {
76 	if (!blk_rq_merge_ok(rq, bio))
77 		return false;
78 
79 	if (!elv_iosched_allow_bio_merge(rq, bio))
80 		return false;
81 
82 	return true;
83 }
84 EXPORT_SYMBOL(elv_bio_merge_ok);
85 
86 /*
87  * Return scheduler with name 'name' and with matching 'mq capability
88  */
elevator_find(const char * name,bool mq)89 static struct elevator_type *elevator_find(const char *name, bool mq)
90 {
91 	struct elevator_type *e;
92 
93 	list_for_each_entry(e, &elv_list, list) {
94 		if (!strcmp(e->elevator_name, name) && (mq == e->uses_mq))
95 			return e;
96 	}
97 
98 	return NULL;
99 }
100 
elevator_put(struct elevator_type * e)101 static void elevator_put(struct elevator_type *e)
102 {
103 	module_put(e->elevator_owner);
104 }
105 
elevator_get(struct request_queue * q,const char * name,bool try_loading)106 static struct elevator_type *elevator_get(struct request_queue *q,
107 					  const char *name, bool try_loading)
108 {
109 	struct elevator_type *e;
110 
111 	spin_lock(&elv_list_lock);
112 
113 	e = elevator_find(name, q->mq_ops != NULL);
114 	if (!e && try_loading) {
115 		spin_unlock(&elv_list_lock);
116 		request_module("%s-iosched", name);
117 		spin_lock(&elv_list_lock);
118 		e = elevator_find(name, q->mq_ops != NULL);
119 	}
120 
121 	if (e && !try_module_get(e->elevator_owner))
122 		e = NULL;
123 
124 	spin_unlock(&elv_list_lock);
125 	return e;
126 }
127 
128 static char chosen_elevator[ELV_NAME_MAX];
129 
elevator_setup(char * str)130 static int __init elevator_setup(char *str)
131 {
132 	/*
133 	 * Be backwards-compatible with previous kernels, so users
134 	 * won't get the wrong elevator.
135 	 */
136 	strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
137 	return 1;
138 }
139 
140 __setup("elevator=", elevator_setup);
141 
142 /* called during boot to load the elevator chosen by the elevator param */
load_default_elevator_module(void)143 void __init load_default_elevator_module(void)
144 {
145 	struct elevator_type *e;
146 
147 	if (!chosen_elevator[0])
148 		return;
149 
150 	/*
151 	 * Boot parameter is deprecated, we haven't supported that for MQ.
152 	 * Only look for non-mq schedulers from here.
153 	 */
154 	spin_lock(&elv_list_lock);
155 	e = elevator_find(chosen_elevator, false);
156 	spin_unlock(&elv_list_lock);
157 
158 	if (!e)
159 		request_module("%s-iosched", chosen_elevator);
160 }
161 
162 static struct kobj_type elv_ktype;
163 
elevator_alloc(struct request_queue * q,struct elevator_type * e)164 struct elevator_queue *elevator_alloc(struct request_queue *q,
165 				  struct elevator_type *e)
166 {
167 	struct elevator_queue *eq;
168 
169 	eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
170 	if (unlikely(!eq))
171 		return NULL;
172 
173 	eq->type = e;
174 	kobject_init(&eq->kobj, &elv_ktype);
175 	mutex_init(&eq->sysfs_lock);
176 	hash_init(eq->hash);
177 	eq->uses_mq = e->uses_mq;
178 
179 	return eq;
180 }
181 EXPORT_SYMBOL(elevator_alloc);
182 
elevator_release(struct kobject * kobj)183 static void elevator_release(struct kobject *kobj)
184 {
185 	struct elevator_queue *e;
186 
187 	e = container_of(kobj, struct elevator_queue, kobj);
188 	elevator_put(e->type);
189 	kfree(e);
190 }
191 
elevator_init(struct request_queue * q,char * name)192 int elevator_init(struct request_queue *q, char *name)
193 {
194 	struct elevator_type *e = NULL;
195 	int err;
196 
197 	/*
198 	 * q->sysfs_lock must be held to provide mutual exclusion between
199 	 * elevator_switch() and here.
200 	 */
201 	lockdep_assert_held(&q->sysfs_lock);
202 
203 	if (unlikely(q->elevator))
204 		return 0;
205 
206 	INIT_LIST_HEAD(&q->queue_head);
207 	q->last_merge = NULL;
208 	q->end_sector = 0;
209 	q->boundary_rq = NULL;
210 
211 	if (name) {
212 		e = elevator_get(q, name, true);
213 		if (!e)
214 			return -EINVAL;
215 	}
216 
217 	/*
218 	 * Use the default elevator specified by config boot param for
219 	 * non-mq devices, or by config option. Don't try to load modules
220 	 * as we could be running off async and request_module() isn't
221 	 * allowed from async.
222 	 */
223 	if (!e && !q->mq_ops && *chosen_elevator) {
224 		e = elevator_get(q, chosen_elevator, false);
225 		if (!e)
226 			printk(KERN_ERR "I/O scheduler %s not found\n",
227 							chosen_elevator);
228 	}
229 
230 	if (!e) {
231 		/*
232 		 * For blk-mq devices, we default to using mq-deadline,
233 		 * if available, for single queue devices. If deadline
234 		 * isn't available OR we have multiple queues, default
235 		 * to "none".
236 		 */
237 		if (q->mq_ops) {
238 			if (q->nr_hw_queues == 1)
239 				e = elevator_get(q, "mq-deadline", false);
240 			if (!e)
241 				return 0;
242 		} else
243 			e = elevator_get(q, CONFIG_DEFAULT_IOSCHED, false);
244 
245 		if (!e) {
246 			printk(KERN_ERR
247 				"Default I/O scheduler not found. " \
248 				"Using noop.\n");
249 			e = elevator_get(q, "noop", false);
250 		}
251 	}
252 
253 	if (e->uses_mq)
254 		err = blk_mq_init_sched(q, e);
255 	else
256 		err = e->ops.sq.elevator_init_fn(q, e);
257 	if (err)
258 		elevator_put(e);
259 	return err;
260 }
261 EXPORT_SYMBOL(elevator_init);
262 
elevator_exit(struct request_queue * q,struct elevator_queue * e)263 void elevator_exit(struct request_queue *q, struct elevator_queue *e)
264 {
265 	mutex_lock(&e->sysfs_lock);
266 	if (e->uses_mq && e->type->ops.mq.exit_sched)
267 		blk_mq_exit_sched(q, e);
268 	else if (!e->uses_mq && e->type->ops.sq.elevator_exit_fn)
269 		e->type->ops.sq.elevator_exit_fn(e);
270 	mutex_unlock(&e->sysfs_lock);
271 
272 	kobject_put(&e->kobj);
273 }
274 EXPORT_SYMBOL(elevator_exit);
275 
__elv_rqhash_del(struct request * rq)276 static inline void __elv_rqhash_del(struct request *rq)
277 {
278 	hash_del(&rq->hash);
279 	rq->rq_flags &= ~RQF_HASHED;
280 }
281 
elv_rqhash_del(struct request_queue * q,struct request * rq)282 void elv_rqhash_del(struct request_queue *q, struct request *rq)
283 {
284 	if (ELV_ON_HASH(rq))
285 		__elv_rqhash_del(rq);
286 }
287 EXPORT_SYMBOL_GPL(elv_rqhash_del);
288 
elv_rqhash_add(struct request_queue * q,struct request * rq)289 void elv_rqhash_add(struct request_queue *q, struct request *rq)
290 {
291 	struct elevator_queue *e = q->elevator;
292 
293 	BUG_ON(ELV_ON_HASH(rq));
294 	hash_add(e->hash, &rq->hash, rq_hash_key(rq));
295 	rq->rq_flags |= RQF_HASHED;
296 }
297 EXPORT_SYMBOL_GPL(elv_rqhash_add);
298 
elv_rqhash_reposition(struct request_queue * q,struct request * rq)299 void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
300 {
301 	__elv_rqhash_del(rq);
302 	elv_rqhash_add(q, rq);
303 }
304 
elv_rqhash_find(struct request_queue * q,sector_t offset)305 struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
306 {
307 	struct elevator_queue *e = q->elevator;
308 	struct hlist_node *next;
309 	struct request *rq;
310 
311 	hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
312 		BUG_ON(!ELV_ON_HASH(rq));
313 
314 		if (unlikely(!rq_mergeable(rq))) {
315 			__elv_rqhash_del(rq);
316 			continue;
317 		}
318 
319 		if (rq_hash_key(rq) == offset)
320 			return rq;
321 	}
322 
323 	return NULL;
324 }
325 
326 /*
327  * RB-tree support functions for inserting/lookup/removal of requests
328  * in a sorted RB tree.
329  */
elv_rb_add(struct rb_root * root,struct request * rq)330 void elv_rb_add(struct rb_root *root, struct request *rq)
331 {
332 	struct rb_node **p = &root->rb_node;
333 	struct rb_node *parent = NULL;
334 	struct request *__rq;
335 
336 	while (*p) {
337 		parent = *p;
338 		__rq = rb_entry(parent, struct request, rb_node);
339 
340 		if (blk_rq_pos(rq) < blk_rq_pos(__rq))
341 			p = &(*p)->rb_left;
342 		else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
343 			p = &(*p)->rb_right;
344 	}
345 
346 	rb_link_node(&rq->rb_node, parent, p);
347 	rb_insert_color(&rq->rb_node, root);
348 }
349 EXPORT_SYMBOL(elv_rb_add);
350 
elv_rb_del(struct rb_root * root,struct request * rq)351 void elv_rb_del(struct rb_root *root, struct request *rq)
352 {
353 	BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
354 	rb_erase(&rq->rb_node, root);
355 	RB_CLEAR_NODE(&rq->rb_node);
356 }
357 EXPORT_SYMBOL(elv_rb_del);
358 
elv_rb_find(struct rb_root * root,sector_t sector)359 struct request *elv_rb_find(struct rb_root *root, sector_t sector)
360 {
361 	struct rb_node *n = root->rb_node;
362 	struct request *rq;
363 
364 	while (n) {
365 		rq = rb_entry(n, struct request, rb_node);
366 
367 		if (sector < blk_rq_pos(rq))
368 			n = n->rb_left;
369 		else if (sector > blk_rq_pos(rq))
370 			n = n->rb_right;
371 		else
372 			return rq;
373 	}
374 
375 	return NULL;
376 }
377 EXPORT_SYMBOL(elv_rb_find);
378 
379 /*
380  * Insert rq into dispatch queue of q.  Queue lock must be held on
381  * entry.  rq is sort instead into the dispatch queue. To be used by
382  * specific elevators.
383  */
elv_dispatch_sort(struct request_queue * q,struct request * rq)384 void elv_dispatch_sort(struct request_queue *q, struct request *rq)
385 {
386 	sector_t boundary;
387 	struct list_head *entry;
388 
389 	if (q->last_merge == rq)
390 		q->last_merge = NULL;
391 
392 	elv_rqhash_del(q, rq);
393 
394 	q->nr_sorted--;
395 
396 	boundary = q->end_sector;
397 	list_for_each_prev(entry, &q->queue_head) {
398 		struct request *pos = list_entry_rq(entry);
399 
400 		if (req_op(rq) != req_op(pos))
401 			break;
402 		if (rq_data_dir(rq) != rq_data_dir(pos))
403 			break;
404 		if (pos->rq_flags & (RQF_STARTED | RQF_SOFTBARRIER))
405 			break;
406 		if (blk_rq_pos(rq) >= boundary) {
407 			if (blk_rq_pos(pos) < boundary)
408 				continue;
409 		} else {
410 			if (blk_rq_pos(pos) >= boundary)
411 				break;
412 		}
413 		if (blk_rq_pos(rq) >= blk_rq_pos(pos))
414 			break;
415 	}
416 
417 	list_add(&rq->queuelist, entry);
418 }
419 EXPORT_SYMBOL(elv_dispatch_sort);
420 
421 /*
422  * Insert rq into dispatch queue of q.  Queue lock must be held on
423  * entry.  rq is added to the back of the dispatch queue. To be used by
424  * specific elevators.
425  */
elv_dispatch_add_tail(struct request_queue * q,struct request * rq)426 void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
427 {
428 	if (q->last_merge == rq)
429 		q->last_merge = NULL;
430 
431 	elv_rqhash_del(q, rq);
432 
433 	q->nr_sorted--;
434 
435 	q->end_sector = rq_end_sector(rq);
436 	q->boundary_rq = rq;
437 	list_add_tail(&rq->queuelist, &q->queue_head);
438 }
439 EXPORT_SYMBOL(elv_dispatch_add_tail);
440 
elv_merge(struct request_queue * q,struct request ** req,struct bio * bio)441 enum elv_merge elv_merge(struct request_queue *q, struct request **req,
442 		struct bio *bio)
443 {
444 	struct elevator_queue *e = q->elevator;
445 	struct request *__rq;
446 
447 	/*
448 	 * Levels of merges:
449 	 * 	nomerges:  No merges at all attempted
450 	 * 	noxmerges: Only simple one-hit cache try
451 	 * 	merges:	   All merge tries attempted
452 	 */
453 	if (blk_queue_nomerges(q) || !bio_mergeable(bio))
454 		return ELEVATOR_NO_MERGE;
455 
456 	/*
457 	 * First try one-hit cache.
458 	 */
459 	if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) {
460 		enum elv_merge ret = blk_try_merge(q->last_merge, bio);
461 
462 		if (ret != ELEVATOR_NO_MERGE) {
463 			*req = q->last_merge;
464 			return ret;
465 		}
466 	}
467 
468 	if (blk_queue_noxmerges(q))
469 		return ELEVATOR_NO_MERGE;
470 
471 	/*
472 	 * See if our hash lookup can find a potential backmerge.
473 	 */
474 	__rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
475 	if (__rq && elv_bio_merge_ok(__rq, bio)) {
476 		*req = __rq;
477 		return ELEVATOR_BACK_MERGE;
478 	}
479 
480 	if (e->uses_mq && e->type->ops.mq.request_merge)
481 		return e->type->ops.mq.request_merge(q, req, bio);
482 	else if (!e->uses_mq && e->type->ops.sq.elevator_merge_fn)
483 		return e->type->ops.sq.elevator_merge_fn(q, req, bio);
484 
485 	return ELEVATOR_NO_MERGE;
486 }
487 
488 /*
489  * Attempt to do an insertion back merge. Only check for the case where
490  * we can append 'rq' to an existing request, so we can throw 'rq' away
491  * afterwards.
492  *
493  * Returns true if we merged, false otherwise
494  */
elv_attempt_insert_merge(struct request_queue * q,struct request * rq)495 bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq)
496 {
497 	struct request *__rq;
498 	bool ret;
499 
500 	if (blk_queue_nomerges(q))
501 		return false;
502 
503 	/*
504 	 * First try one-hit cache.
505 	 */
506 	if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq))
507 		return true;
508 
509 	if (blk_queue_noxmerges(q))
510 		return false;
511 
512 	ret = false;
513 	/*
514 	 * See if our hash lookup can find a potential backmerge.
515 	 */
516 	while (1) {
517 		__rq = elv_rqhash_find(q, blk_rq_pos(rq));
518 		if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
519 			break;
520 
521 		/* The merged request could be merged with others, try again */
522 		ret = true;
523 		rq = __rq;
524 	}
525 
526 	return ret;
527 }
528 
elv_merged_request(struct request_queue * q,struct request * rq,enum elv_merge type)529 void elv_merged_request(struct request_queue *q, struct request *rq,
530 		enum elv_merge type)
531 {
532 	struct elevator_queue *e = q->elevator;
533 
534 	if (e->uses_mq && e->type->ops.mq.request_merged)
535 		e->type->ops.mq.request_merged(q, rq, type);
536 	else if (!e->uses_mq && e->type->ops.sq.elevator_merged_fn)
537 		e->type->ops.sq.elevator_merged_fn(q, rq, type);
538 
539 	if (type == ELEVATOR_BACK_MERGE)
540 		elv_rqhash_reposition(q, rq);
541 
542 	q->last_merge = rq;
543 }
544 
elv_merge_requests(struct request_queue * q,struct request * rq,struct request * next)545 void elv_merge_requests(struct request_queue *q, struct request *rq,
546 			     struct request *next)
547 {
548 	struct elevator_queue *e = q->elevator;
549 	bool next_sorted = false;
550 
551 	if (e->uses_mq && e->type->ops.mq.requests_merged)
552 		e->type->ops.mq.requests_merged(q, rq, next);
553 	else if (e->type->ops.sq.elevator_merge_req_fn) {
554 		next_sorted = (__force bool)(next->rq_flags & RQF_SORTED);
555 		if (next_sorted)
556 			e->type->ops.sq.elevator_merge_req_fn(q, rq, next);
557 	}
558 
559 	elv_rqhash_reposition(q, rq);
560 
561 	if (next_sorted) {
562 		elv_rqhash_del(q, next);
563 		q->nr_sorted--;
564 	}
565 
566 	q->last_merge = rq;
567 }
568 
elv_bio_merged(struct request_queue * q,struct request * rq,struct bio * bio)569 void elv_bio_merged(struct request_queue *q, struct request *rq,
570 			struct bio *bio)
571 {
572 	struct elevator_queue *e = q->elevator;
573 
574 	if (WARN_ON_ONCE(e->uses_mq))
575 		return;
576 
577 	if (e->type->ops.sq.elevator_bio_merged_fn)
578 		e->type->ops.sq.elevator_bio_merged_fn(q, rq, bio);
579 }
580 
581 #ifdef CONFIG_PM
blk_pm_requeue_request(struct request * rq)582 static void blk_pm_requeue_request(struct request *rq)
583 {
584 	if (rq->q->dev && !(rq->rq_flags & RQF_PM))
585 		rq->q->nr_pending--;
586 }
587 
blk_pm_add_request(struct request_queue * q,struct request * rq)588 static void blk_pm_add_request(struct request_queue *q, struct request *rq)
589 {
590 	if (q->dev && !(rq->rq_flags & RQF_PM) && q->nr_pending++ == 0 &&
591 	    (q->rpm_status == RPM_SUSPENDED || q->rpm_status == RPM_SUSPENDING))
592 		pm_request_resume(q->dev);
593 }
594 #else
blk_pm_requeue_request(struct request * rq)595 static inline void blk_pm_requeue_request(struct request *rq) {}
blk_pm_add_request(struct request_queue * q,struct request * rq)596 static inline void blk_pm_add_request(struct request_queue *q,
597 				      struct request *rq)
598 {
599 }
600 #endif
601 
elv_requeue_request(struct request_queue * q,struct request * rq)602 void elv_requeue_request(struct request_queue *q, struct request *rq)
603 {
604 	/*
605 	 * it already went through dequeue, we need to decrement the
606 	 * in_flight count again
607 	 */
608 	if (blk_account_rq(rq)) {
609 		q->in_flight[rq_is_sync(rq)]--;
610 		if (rq->rq_flags & RQF_SORTED)
611 			elv_deactivate_rq(q, rq);
612 	}
613 
614 	rq->rq_flags &= ~RQF_STARTED;
615 
616 	blk_pm_requeue_request(rq);
617 
618 	__elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE);
619 }
620 
elv_drain_elevator(struct request_queue * q)621 void elv_drain_elevator(struct request_queue *q)
622 {
623 	struct elevator_queue *e = q->elevator;
624 	static int printed;
625 
626 	if (WARN_ON_ONCE(e->uses_mq))
627 		return;
628 
629 	lockdep_assert_held(q->queue_lock);
630 
631 	while (e->type->ops.sq.elevator_dispatch_fn(q, 1))
632 		;
633 	if (q->nr_sorted && printed++ < 10) {
634 		printk(KERN_ERR "%s: forced dispatching is broken "
635 		       "(nr_sorted=%u), please report this\n",
636 		       q->elevator->type->elevator_name, q->nr_sorted);
637 	}
638 }
639 
__elv_add_request(struct request_queue * q,struct request * rq,int where)640 void __elv_add_request(struct request_queue *q, struct request *rq, int where)
641 {
642 	trace_block_rq_insert(q, rq);
643 
644 	blk_pm_add_request(q, rq);
645 
646 	rq->q = q;
647 
648 	if (rq->rq_flags & RQF_SOFTBARRIER) {
649 		/* barriers are scheduling boundary, update end_sector */
650 		if (!blk_rq_is_passthrough(rq)) {
651 			q->end_sector = rq_end_sector(rq);
652 			q->boundary_rq = rq;
653 		}
654 	} else if (!(rq->rq_flags & RQF_ELVPRIV) &&
655 		    (where == ELEVATOR_INSERT_SORT ||
656 		     where == ELEVATOR_INSERT_SORT_MERGE))
657 		where = ELEVATOR_INSERT_BACK;
658 
659 	switch (where) {
660 	case ELEVATOR_INSERT_REQUEUE:
661 	case ELEVATOR_INSERT_FRONT:
662 		rq->rq_flags |= RQF_SOFTBARRIER;
663 		list_add(&rq->queuelist, &q->queue_head);
664 		break;
665 
666 	case ELEVATOR_INSERT_BACK:
667 		rq->rq_flags |= RQF_SOFTBARRIER;
668 		elv_drain_elevator(q);
669 		list_add_tail(&rq->queuelist, &q->queue_head);
670 		/*
671 		 * We kick the queue here for the following reasons.
672 		 * - The elevator might have returned NULL previously
673 		 *   to delay requests and returned them now.  As the
674 		 *   queue wasn't empty before this request, ll_rw_blk
675 		 *   won't run the queue on return, resulting in hang.
676 		 * - Usually, back inserted requests won't be merged
677 		 *   with anything.  There's no point in delaying queue
678 		 *   processing.
679 		 */
680 		__blk_run_queue(q);
681 		break;
682 
683 	case ELEVATOR_INSERT_SORT_MERGE:
684 		/*
685 		 * If we succeed in merging this request with one in the
686 		 * queue already, we are done - rq has now been freed,
687 		 * so no need to do anything further.
688 		 */
689 		if (elv_attempt_insert_merge(q, rq))
690 			break;
691 		/* fall through */
692 	case ELEVATOR_INSERT_SORT:
693 		BUG_ON(blk_rq_is_passthrough(rq));
694 		rq->rq_flags |= RQF_SORTED;
695 		q->nr_sorted++;
696 		if (rq_mergeable(rq)) {
697 			elv_rqhash_add(q, rq);
698 			if (!q->last_merge)
699 				q->last_merge = rq;
700 		}
701 
702 		/*
703 		 * Some ioscheds (cfq) run q->request_fn directly, so
704 		 * rq cannot be accessed after calling
705 		 * elevator_add_req_fn.
706 		 */
707 		q->elevator->type->ops.sq.elevator_add_req_fn(q, rq);
708 		break;
709 
710 	case ELEVATOR_INSERT_FLUSH:
711 		rq->rq_flags |= RQF_SOFTBARRIER;
712 		blk_insert_flush(rq);
713 		break;
714 	default:
715 		printk(KERN_ERR "%s: bad insertion point %d\n",
716 		       __func__, where);
717 		BUG();
718 	}
719 }
720 EXPORT_SYMBOL(__elv_add_request);
721 
elv_add_request(struct request_queue * q,struct request * rq,int where)722 void elv_add_request(struct request_queue *q, struct request *rq, int where)
723 {
724 	unsigned long flags;
725 
726 	spin_lock_irqsave(q->queue_lock, flags);
727 	__elv_add_request(q, rq, where);
728 	spin_unlock_irqrestore(q->queue_lock, flags);
729 }
730 EXPORT_SYMBOL(elv_add_request);
731 
elv_latter_request(struct request_queue * q,struct request * rq)732 struct request *elv_latter_request(struct request_queue *q, struct request *rq)
733 {
734 	struct elevator_queue *e = q->elevator;
735 
736 	if (e->uses_mq && e->type->ops.mq.next_request)
737 		return e->type->ops.mq.next_request(q, rq);
738 	else if (!e->uses_mq && e->type->ops.sq.elevator_latter_req_fn)
739 		return e->type->ops.sq.elevator_latter_req_fn(q, rq);
740 
741 	return NULL;
742 }
743 
elv_former_request(struct request_queue * q,struct request * rq)744 struct request *elv_former_request(struct request_queue *q, struct request *rq)
745 {
746 	struct elevator_queue *e = q->elevator;
747 
748 	if (e->uses_mq && e->type->ops.mq.former_request)
749 		return e->type->ops.mq.former_request(q, rq);
750 	if (!e->uses_mq && e->type->ops.sq.elevator_former_req_fn)
751 		return e->type->ops.sq.elevator_former_req_fn(q, rq);
752 	return NULL;
753 }
754 
elv_set_request(struct request_queue * q,struct request * rq,struct bio * bio,gfp_t gfp_mask)755 int elv_set_request(struct request_queue *q, struct request *rq,
756 		    struct bio *bio, gfp_t gfp_mask)
757 {
758 	struct elevator_queue *e = q->elevator;
759 
760 	if (WARN_ON_ONCE(e->uses_mq))
761 		return 0;
762 
763 	if (e->type->ops.sq.elevator_set_req_fn)
764 		return e->type->ops.sq.elevator_set_req_fn(q, rq, bio, gfp_mask);
765 	return 0;
766 }
767 
elv_put_request(struct request_queue * q,struct request * rq)768 void elv_put_request(struct request_queue *q, struct request *rq)
769 {
770 	struct elevator_queue *e = q->elevator;
771 
772 	if (WARN_ON_ONCE(e->uses_mq))
773 		return;
774 
775 	if (e->type->ops.sq.elevator_put_req_fn)
776 		e->type->ops.sq.elevator_put_req_fn(rq);
777 }
778 
elv_may_queue(struct request_queue * q,unsigned int op)779 int elv_may_queue(struct request_queue *q, unsigned int op)
780 {
781 	struct elevator_queue *e = q->elevator;
782 
783 	if (WARN_ON_ONCE(e->uses_mq))
784 		return 0;
785 
786 	if (e->type->ops.sq.elevator_may_queue_fn)
787 		return e->type->ops.sq.elevator_may_queue_fn(q, op);
788 
789 	return ELV_MQUEUE_MAY;
790 }
791 
elv_completed_request(struct request_queue * q,struct request * rq)792 void elv_completed_request(struct request_queue *q, struct request *rq)
793 {
794 	struct elevator_queue *e = q->elevator;
795 
796 	if (WARN_ON_ONCE(e->uses_mq))
797 		return;
798 
799 	/*
800 	 * request is released from the driver, io must be done
801 	 */
802 	if (blk_account_rq(rq)) {
803 		q->in_flight[rq_is_sync(rq)]--;
804 		if ((rq->rq_flags & RQF_SORTED) &&
805 		    e->type->ops.sq.elevator_completed_req_fn)
806 			e->type->ops.sq.elevator_completed_req_fn(q, rq);
807 	}
808 }
809 
810 #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
811 
812 static ssize_t
elv_attr_show(struct kobject * kobj,struct attribute * attr,char * page)813 elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
814 {
815 	struct elv_fs_entry *entry = to_elv(attr);
816 	struct elevator_queue *e;
817 	ssize_t error;
818 
819 	if (!entry->show)
820 		return -EIO;
821 
822 	e = container_of(kobj, struct elevator_queue, kobj);
823 	mutex_lock(&e->sysfs_lock);
824 	error = e->type ? entry->show(e, page) : -ENOENT;
825 	mutex_unlock(&e->sysfs_lock);
826 	return error;
827 }
828 
829 static ssize_t
elv_attr_store(struct kobject * kobj,struct attribute * attr,const char * page,size_t length)830 elv_attr_store(struct kobject *kobj, struct attribute *attr,
831 	       const char *page, size_t length)
832 {
833 	struct elv_fs_entry *entry = to_elv(attr);
834 	struct elevator_queue *e;
835 	ssize_t error;
836 
837 	if (!entry->store)
838 		return -EIO;
839 
840 	e = container_of(kobj, struct elevator_queue, kobj);
841 	mutex_lock(&e->sysfs_lock);
842 	error = e->type ? entry->store(e, page, length) : -ENOENT;
843 	mutex_unlock(&e->sysfs_lock);
844 	return error;
845 }
846 
847 static const struct sysfs_ops elv_sysfs_ops = {
848 	.show	= elv_attr_show,
849 	.store	= elv_attr_store,
850 };
851 
852 static struct kobj_type elv_ktype = {
853 	.sysfs_ops	= &elv_sysfs_ops,
854 	.release	= elevator_release,
855 };
856 
elv_register_queue(struct request_queue * q)857 int elv_register_queue(struct request_queue *q)
858 {
859 	struct elevator_queue *e = q->elevator;
860 	int error;
861 
862 	error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
863 	if (!error) {
864 		struct elv_fs_entry *attr = e->type->elevator_attrs;
865 		if (attr) {
866 			while (attr->attr.name) {
867 				if (sysfs_create_file(&e->kobj, &attr->attr))
868 					break;
869 				attr++;
870 			}
871 		}
872 		kobject_uevent(&e->kobj, KOBJ_ADD);
873 		e->registered = 1;
874 		if (!e->uses_mq && e->type->ops.sq.elevator_registered_fn)
875 			e->type->ops.sq.elevator_registered_fn(q);
876 	}
877 	return error;
878 }
879 EXPORT_SYMBOL(elv_register_queue);
880 
elv_unregister_queue(struct request_queue * q)881 void elv_unregister_queue(struct request_queue *q)
882 {
883 	if (q) {
884 		struct elevator_queue *e = q->elevator;
885 
886 		kobject_uevent(&e->kobj, KOBJ_REMOVE);
887 		kobject_del(&e->kobj);
888 		e->registered = 0;
889 		/* Re-enable throttling in case elevator disabled it */
890 		wbt_enable_default(q);
891 	}
892 }
893 EXPORT_SYMBOL(elv_unregister_queue);
894 
elv_register(struct elevator_type * e)895 int elv_register(struct elevator_type *e)
896 {
897 	char *def = "";
898 
899 	/* create icq_cache if requested */
900 	if (e->icq_size) {
901 		if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
902 		    WARN_ON(e->icq_align < __alignof__(struct io_cq)))
903 			return -EINVAL;
904 
905 		snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
906 			 "%s_io_cq", e->elevator_name);
907 		e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
908 						 e->icq_align, 0, NULL);
909 		if (!e->icq_cache)
910 			return -ENOMEM;
911 	}
912 
913 	/* register, don't allow duplicate names */
914 	spin_lock(&elv_list_lock);
915 	if (elevator_find(e->elevator_name, e->uses_mq)) {
916 		spin_unlock(&elv_list_lock);
917 		if (e->icq_cache)
918 			kmem_cache_destroy(e->icq_cache);
919 		return -EBUSY;
920 	}
921 	list_add_tail(&e->list, &elv_list);
922 	spin_unlock(&elv_list_lock);
923 
924 	/* print pretty message */
925 	if (!strcmp(e->elevator_name, chosen_elevator) ||
926 			(!*chosen_elevator &&
927 			 !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
928 				def = " (default)";
929 
930 	printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
931 								def);
932 	return 0;
933 }
934 EXPORT_SYMBOL_GPL(elv_register);
935 
elv_unregister(struct elevator_type * e)936 void elv_unregister(struct elevator_type *e)
937 {
938 	/* unregister */
939 	spin_lock(&elv_list_lock);
940 	list_del_init(&e->list);
941 	spin_unlock(&elv_list_lock);
942 
943 	/*
944 	 * Destroy icq_cache if it exists.  icq's are RCU managed.  Make
945 	 * sure all RCU operations are complete before proceeding.
946 	 */
947 	if (e->icq_cache) {
948 		rcu_barrier();
949 		kmem_cache_destroy(e->icq_cache);
950 		e->icq_cache = NULL;
951 	}
952 }
953 EXPORT_SYMBOL_GPL(elv_unregister);
954 
elevator_switch_mq(struct request_queue * q,struct elevator_type * new_e)955 static int elevator_switch_mq(struct request_queue *q,
956 			      struct elevator_type *new_e)
957 {
958 	int ret;
959 
960 	blk_mq_freeze_queue(q);
961 
962 	if (q->elevator) {
963 		if (q->elevator->registered)
964 			elv_unregister_queue(q);
965 		ioc_clear_queue(q);
966 		elevator_exit(q, q->elevator);
967 	}
968 
969 	ret = blk_mq_init_sched(q, new_e);
970 	if (ret)
971 		goto out;
972 
973 	if (new_e) {
974 		ret = elv_register_queue(q);
975 		if (ret) {
976 			elevator_exit(q, q->elevator);
977 			goto out;
978 		}
979 	}
980 
981 	if (new_e)
982 		blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
983 	else
984 		blk_add_trace_msg(q, "elv switch: none");
985 
986 out:
987 	blk_mq_unfreeze_queue(q);
988 	return ret;
989 }
990 
991 /*
992  * switch to new_e io scheduler. be careful not to introduce deadlocks -
993  * we don't free the old io scheduler, before we have allocated what we
994  * need for the new one. this way we have a chance of going back to the old
995  * one, if the new one fails init for some reason.
996  */
elevator_switch(struct request_queue * q,struct elevator_type * new_e)997 static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
998 {
999 	struct elevator_queue *old = q->elevator;
1000 	bool old_registered = false;
1001 	int err;
1002 
1003 	if (q->mq_ops)
1004 		return elevator_switch_mq(q, new_e);
1005 
1006 	/*
1007 	 * Turn on BYPASS and drain all requests w/ elevator private data.
1008 	 * Block layer doesn't call into a quiesced elevator - all requests
1009 	 * are directly put on the dispatch list without elevator data
1010 	 * using INSERT_BACK.  All requests have SOFTBARRIER set and no
1011 	 * merge happens either.
1012 	 */
1013 	if (old) {
1014 		old_registered = old->registered;
1015 
1016 		blk_queue_bypass_start(q);
1017 
1018 		/* unregister and clear all auxiliary data of the old elevator */
1019 		if (old_registered)
1020 			elv_unregister_queue(q);
1021 
1022 		ioc_clear_queue(q);
1023 	}
1024 
1025 	/* allocate, init and register new elevator */
1026 	err = new_e->ops.sq.elevator_init_fn(q, new_e);
1027 	if (err)
1028 		goto fail_init;
1029 
1030 	err = elv_register_queue(q);
1031 	if (err)
1032 		goto fail_register;
1033 
1034 	/* done, kill the old one and finish */
1035 	if (old) {
1036 		elevator_exit(q, old);
1037 		blk_queue_bypass_end(q);
1038 	}
1039 
1040 	blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
1041 
1042 	return 0;
1043 
1044 fail_register:
1045 	elevator_exit(q, q->elevator);
1046 fail_init:
1047 	/* switch failed, restore and re-register old elevator */
1048 	if (old) {
1049 		q->elevator = old;
1050 		elv_register_queue(q);
1051 		blk_queue_bypass_end(q);
1052 	}
1053 
1054 	return err;
1055 }
1056 
1057 /*
1058  * Switch this queue to the given IO scheduler.
1059  */
__elevator_change(struct request_queue * q,const char * name)1060 static int __elevator_change(struct request_queue *q, const char *name)
1061 {
1062 	char elevator_name[ELV_NAME_MAX];
1063 	struct elevator_type *e;
1064 
1065 	/* Make sure queue is not in the middle of being removed */
1066 	if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags))
1067 		return -ENOENT;
1068 
1069 	/*
1070 	 * Special case for mq, turn off scheduling
1071 	 */
1072 	if (q->mq_ops && !strncmp(name, "none", 4))
1073 		return elevator_switch(q, NULL);
1074 
1075 	strlcpy(elevator_name, name, sizeof(elevator_name));
1076 	e = elevator_get(q, strstrip(elevator_name), true);
1077 	if (!e)
1078 		return -EINVAL;
1079 
1080 	if (q->elevator &&
1081 	    !strcmp(elevator_name, q->elevator->type->elevator_name)) {
1082 		elevator_put(e);
1083 		return 0;
1084 	}
1085 
1086 	return elevator_switch(q, e);
1087 }
1088 
elv_support_iosched(struct request_queue * q)1089 static inline bool elv_support_iosched(struct request_queue *q)
1090 {
1091 	if (q->mq_ops && q->tag_set && (q->tag_set->flags &
1092 				BLK_MQ_F_NO_SCHED))
1093 		return false;
1094 	return true;
1095 }
1096 
elv_iosched_store(struct request_queue * q,const char * name,size_t count)1097 ssize_t elv_iosched_store(struct request_queue *q, const char *name,
1098 			  size_t count)
1099 {
1100 	int ret;
1101 
1102 	if (!(q->mq_ops || q->request_fn) || !elv_support_iosched(q))
1103 		return count;
1104 
1105 	ret = __elevator_change(q, name);
1106 	if (!ret)
1107 		return count;
1108 
1109 	return ret;
1110 }
1111 
elv_iosched_show(struct request_queue * q,char * name)1112 ssize_t elv_iosched_show(struct request_queue *q, char *name)
1113 {
1114 	struct elevator_queue *e = q->elevator;
1115 	struct elevator_type *elv = NULL;
1116 	struct elevator_type *__e;
1117 	int len = 0;
1118 
1119 	if (!blk_queue_stackable(q))
1120 		return sprintf(name, "none\n");
1121 
1122 	if (!q->elevator)
1123 		len += sprintf(name+len, "[none] ");
1124 	else
1125 		elv = e->type;
1126 
1127 	spin_lock(&elv_list_lock);
1128 	list_for_each_entry(__e, &elv_list, list) {
1129 		if (elv && !strcmp(elv->elevator_name, __e->elevator_name)) {
1130 			len += sprintf(name+len, "[%s] ", elv->elevator_name);
1131 			continue;
1132 		}
1133 		if (__e->uses_mq && q->mq_ops && elv_support_iosched(q))
1134 			len += sprintf(name+len, "%s ", __e->elevator_name);
1135 		else if (!__e->uses_mq && !q->mq_ops)
1136 			len += sprintf(name+len, "%s ", __e->elevator_name);
1137 	}
1138 	spin_unlock(&elv_list_lock);
1139 
1140 	if (q->mq_ops && q->elevator)
1141 		len += sprintf(name+len, "none");
1142 
1143 	len += sprintf(len+name, "\n");
1144 	return len;
1145 }
1146 
elv_rb_former_request(struct request_queue * q,struct request * rq)1147 struct request *elv_rb_former_request(struct request_queue *q,
1148 				      struct request *rq)
1149 {
1150 	struct rb_node *rbprev = rb_prev(&rq->rb_node);
1151 
1152 	if (rbprev)
1153 		return rb_entry_rq(rbprev);
1154 
1155 	return NULL;
1156 }
1157 EXPORT_SYMBOL(elv_rb_former_request);
1158 
elv_rb_latter_request(struct request_queue * q,struct request * rq)1159 struct request *elv_rb_latter_request(struct request_queue *q,
1160 				      struct request *rq)
1161 {
1162 	struct rb_node *rbnext = rb_next(&rq->rb_node);
1163 
1164 	if (rbnext)
1165 		return rb_entry_rq(rbnext);
1166 
1167 	return NULL;
1168 }
1169 EXPORT_SYMBOL(elv_rb_latter_request);
1170