• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Block multiqueue core code
3  *
4  * Copyright (C) 2013-2014 Jens Axboe
5  * Copyright (C) 2013-2014 Christoph Hellwig
6  */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/backing-dev.h>
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/mm.h>
13 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/workqueue.h>
16 #include <linux/smp.h>
17 #include <linux/llist.h>
18 #include <linux/list_sort.h>
19 #include <linux/cpu.h>
20 #include <linux/cache.h>
21 #include <linux/sched/sysctl.h>
22 #include <linux/delay.h>
23 #include <linux/crash_dump.h>
24 
25 #include <trace/events/block.h>
26 
27 #include <linux/blk-mq.h>
28 #include "blk.h"
29 #include "blk-mq.h"
30 #include "blk-mq-tag.h"
31 
32 static DEFINE_MUTEX(all_q_mutex);
33 static LIST_HEAD(all_q_list);
34 
35 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
36 
37 /*
38  * Check if any of the ctx's have pending work in this hardware queue
39  */
blk_mq_hctx_has_pending(struct blk_mq_hw_ctx * hctx)40 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
41 {
42 	unsigned int i;
43 
44 	for (i = 0; i < hctx->ctx_map.map_size; i++)
45 		if (hctx->ctx_map.map[i].word)
46 			return true;
47 
48 	return false;
49 }
50 
get_bm(struct blk_mq_hw_ctx * hctx,struct blk_mq_ctx * ctx)51 static inline struct blk_align_bitmap *get_bm(struct blk_mq_hw_ctx *hctx,
52 					      struct blk_mq_ctx *ctx)
53 {
54 	return &hctx->ctx_map.map[ctx->index_hw / hctx->ctx_map.bits_per_word];
55 }
56 
57 #define CTX_TO_BIT(hctx, ctx)	\
58 	((ctx)->index_hw & ((hctx)->ctx_map.bits_per_word - 1))
59 
60 /*
61  * Mark this ctx as having pending work in this hardware queue
62  */
blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx * hctx,struct blk_mq_ctx * ctx)63 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
64 				     struct blk_mq_ctx *ctx)
65 {
66 	struct blk_align_bitmap *bm = get_bm(hctx, ctx);
67 
68 	if (!test_bit(CTX_TO_BIT(hctx, ctx), &bm->word))
69 		set_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
70 }
71 
blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx * hctx,struct blk_mq_ctx * ctx)72 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
73 				      struct blk_mq_ctx *ctx)
74 {
75 	struct blk_align_bitmap *bm = get_bm(hctx, ctx);
76 
77 	clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
78 }
79 
blk_mq_queue_enter(struct request_queue * q)80 static int blk_mq_queue_enter(struct request_queue *q)
81 {
82 	while (true) {
83 		int ret;
84 
85 		if (percpu_ref_tryget_live(&q->mq_usage_counter))
86 			return 0;
87 
88 		ret = wait_event_interruptible(q->mq_freeze_wq,
89 				!q->mq_freeze_depth || blk_queue_dying(q));
90 		if (blk_queue_dying(q))
91 			return -ENODEV;
92 		if (ret)
93 			return ret;
94 	}
95 }
96 
blk_mq_queue_exit(struct request_queue * q)97 static void blk_mq_queue_exit(struct request_queue *q)
98 {
99 	percpu_ref_put(&q->mq_usage_counter);
100 }
101 
blk_mq_usage_counter_release(struct percpu_ref * ref)102 static void blk_mq_usage_counter_release(struct percpu_ref *ref)
103 {
104 	struct request_queue *q =
105 		container_of(ref, struct request_queue, mq_usage_counter);
106 
107 	wake_up_all(&q->mq_freeze_wq);
108 }
109 
blk_mq_freeze_queue_start(struct request_queue * q)110 static void blk_mq_freeze_queue_start(struct request_queue *q)
111 {
112 	bool freeze;
113 
114 	spin_lock_irq(q->queue_lock);
115 	freeze = !q->mq_freeze_depth++;
116 	spin_unlock_irq(q->queue_lock);
117 
118 	if (freeze) {
119 		percpu_ref_kill(&q->mq_usage_counter);
120 		blk_mq_run_queues(q, false);
121 	}
122 }
123 
blk_mq_freeze_queue_wait(struct request_queue * q)124 static void blk_mq_freeze_queue_wait(struct request_queue *q)
125 {
126 	wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
127 }
128 
129 /*
130  * Guarantee no request is in use, so we can change any data structure of
131  * the queue afterward.
132  */
blk_mq_freeze_queue(struct request_queue * q)133 void blk_mq_freeze_queue(struct request_queue *q)
134 {
135 	blk_mq_freeze_queue_start(q);
136 	blk_mq_freeze_queue_wait(q);
137 }
138 
blk_mq_unfreeze_queue(struct request_queue * q)139 static void blk_mq_unfreeze_queue(struct request_queue *q)
140 {
141 	bool wake;
142 
143 	spin_lock_irq(q->queue_lock);
144 	wake = !--q->mq_freeze_depth;
145 	WARN_ON_ONCE(q->mq_freeze_depth < 0);
146 	spin_unlock_irq(q->queue_lock);
147 	if (wake) {
148 		percpu_ref_reinit(&q->mq_usage_counter);
149 		wake_up_all(&q->mq_freeze_wq);
150 	}
151 }
152 
blk_mq_can_queue(struct blk_mq_hw_ctx * hctx)153 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
154 {
155 	return blk_mq_has_free_tags(hctx->tags);
156 }
157 EXPORT_SYMBOL(blk_mq_can_queue);
158 
blk_mq_rq_ctx_init(struct request_queue * q,struct blk_mq_ctx * ctx,struct request * rq,unsigned int rw_flags)159 static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
160 			       struct request *rq, unsigned int rw_flags)
161 {
162 	if (blk_queue_io_stat(q))
163 		rw_flags |= REQ_IO_STAT;
164 
165 	INIT_LIST_HEAD(&rq->queuelist);
166 	/* csd/requeue_work/fifo_time is initialized before use */
167 	rq->q = q;
168 	rq->mq_ctx = ctx;
169 	rq->cmd_flags |= rw_flags;
170 	/* do not touch atomic flags, it needs atomic ops against the timer */
171 	rq->cpu = -1;
172 	INIT_HLIST_NODE(&rq->hash);
173 	RB_CLEAR_NODE(&rq->rb_node);
174 	rq->rq_disk = NULL;
175 	rq->part = NULL;
176 	rq->start_time = jiffies;
177 #ifdef CONFIG_BLK_CGROUP
178 	rq->rl = NULL;
179 	set_start_time_ns(rq);
180 	rq->io_start_time_ns = 0;
181 #endif
182 	rq->nr_phys_segments = 0;
183 #if defined(CONFIG_BLK_DEV_INTEGRITY)
184 	rq->nr_integrity_segments = 0;
185 #endif
186 	rq->special = NULL;
187 	/* tag was already set */
188 	rq->errors = 0;
189 
190 	rq->cmd = rq->__cmd;
191 
192 	rq->extra_len = 0;
193 	rq->sense_len = 0;
194 	rq->resid_len = 0;
195 	rq->sense = NULL;
196 
197 	INIT_LIST_HEAD(&rq->timeout_list);
198 	rq->timeout = 0;
199 
200 	rq->end_io = NULL;
201 	rq->end_io_data = NULL;
202 	rq->next_rq = NULL;
203 
204 	ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
205 }
206 
207 static struct request *
__blk_mq_alloc_request(struct blk_mq_alloc_data * data,int rw)208 __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw)
209 {
210 	struct request *rq;
211 	unsigned int tag;
212 
213 	tag = blk_mq_get_tag(data);
214 	if (tag != BLK_MQ_TAG_FAIL) {
215 		rq = data->hctx->tags->rqs[tag];
216 
217 		if (blk_mq_tag_busy(data->hctx)) {
218 			rq->cmd_flags = REQ_MQ_INFLIGHT;
219 			atomic_inc(&data->hctx->nr_active);
220 		}
221 
222 		rq->tag = tag;
223 		blk_mq_rq_ctx_init(data->q, data->ctx, rq, rw);
224 		return rq;
225 	}
226 
227 	return NULL;
228 }
229 
blk_mq_alloc_request(struct request_queue * q,int rw,gfp_t gfp,bool reserved)230 struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
231 		bool reserved)
232 {
233 	struct blk_mq_ctx *ctx;
234 	struct blk_mq_hw_ctx *hctx;
235 	struct request *rq;
236 	struct blk_mq_alloc_data alloc_data;
237 	int ret;
238 
239 	ret = blk_mq_queue_enter(q);
240 	if (ret)
241 		return ERR_PTR(ret);
242 
243 	ctx = blk_mq_get_ctx(q);
244 	hctx = q->mq_ops->map_queue(q, ctx->cpu);
245 	blk_mq_set_alloc_data(&alloc_data, q, gfp & ~__GFP_WAIT,
246 			reserved, ctx, hctx);
247 
248 	rq = __blk_mq_alloc_request(&alloc_data, rw);
249 	if (!rq && (gfp & __GFP_WAIT)) {
250 		__blk_mq_run_hw_queue(hctx);
251 		blk_mq_put_ctx(ctx);
252 
253 		ctx = blk_mq_get_ctx(q);
254 		hctx = q->mq_ops->map_queue(q, ctx->cpu);
255 		blk_mq_set_alloc_data(&alloc_data, q, gfp, reserved, ctx,
256 				hctx);
257 		rq =  __blk_mq_alloc_request(&alloc_data, rw);
258 		ctx = alloc_data.ctx;
259 	}
260 	blk_mq_put_ctx(ctx);
261 	if (!rq)
262 		return ERR_PTR(-EWOULDBLOCK);
263 	return rq;
264 }
265 EXPORT_SYMBOL(blk_mq_alloc_request);
266 
__blk_mq_free_request(struct blk_mq_hw_ctx * hctx,struct blk_mq_ctx * ctx,struct request * rq)267 static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
268 				  struct blk_mq_ctx *ctx, struct request *rq)
269 {
270 	const int tag = rq->tag;
271 	struct request_queue *q = rq->q;
272 
273 	if (rq->cmd_flags & REQ_MQ_INFLIGHT)
274 		atomic_dec(&hctx->nr_active);
275 	rq->cmd_flags = 0;
276 
277 	clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
278 	blk_mq_put_tag(hctx, tag, &ctx->last_tag);
279 	blk_mq_queue_exit(q);
280 }
281 
blk_mq_free_request(struct request * rq)282 void blk_mq_free_request(struct request *rq)
283 {
284 	struct blk_mq_ctx *ctx = rq->mq_ctx;
285 	struct blk_mq_hw_ctx *hctx;
286 	struct request_queue *q = rq->q;
287 
288 	ctx->rq_completed[rq_is_sync(rq)]++;
289 
290 	hctx = q->mq_ops->map_queue(q, ctx->cpu);
291 	__blk_mq_free_request(hctx, ctx, rq);
292 }
293 
__blk_mq_end_request(struct request * rq,int error)294 inline void __blk_mq_end_request(struct request *rq, int error)
295 {
296 	blk_account_io_done(rq);
297 
298 	if (rq->end_io) {
299 		rq->end_io(rq, error);
300 	} else {
301 		if (unlikely(blk_bidi_rq(rq)))
302 			blk_mq_free_request(rq->next_rq);
303 		blk_mq_free_request(rq);
304 	}
305 }
306 EXPORT_SYMBOL(__blk_mq_end_request);
307 
blk_mq_end_request(struct request * rq,int error)308 void blk_mq_end_request(struct request *rq, int error)
309 {
310 	if (blk_update_request(rq, error, blk_rq_bytes(rq)))
311 		BUG();
312 	__blk_mq_end_request(rq, error);
313 }
314 EXPORT_SYMBOL(blk_mq_end_request);
315 
__blk_mq_complete_request_remote(void * data)316 static void __blk_mq_complete_request_remote(void *data)
317 {
318 	struct request *rq = data;
319 
320 	rq->q->softirq_done_fn(rq);
321 }
322 
blk_mq_ipi_complete_request(struct request * rq)323 static void blk_mq_ipi_complete_request(struct request *rq)
324 {
325 	struct blk_mq_ctx *ctx = rq->mq_ctx;
326 	bool shared = false;
327 	int cpu;
328 
329 	if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
330 		rq->q->softirq_done_fn(rq);
331 		return;
332 	}
333 
334 	cpu = get_cpu();
335 	if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
336 		shared = cpus_share_cache(cpu, ctx->cpu);
337 
338 	if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
339 		rq->csd.func = __blk_mq_complete_request_remote;
340 		rq->csd.info = rq;
341 		rq->csd.flags = 0;
342 		smp_call_function_single_async(ctx->cpu, &rq->csd);
343 	} else {
344 		rq->q->softirq_done_fn(rq);
345 	}
346 	put_cpu();
347 }
348 
__blk_mq_complete_request(struct request * rq)349 void __blk_mq_complete_request(struct request *rq)
350 {
351 	struct request_queue *q = rq->q;
352 
353 	if (!q->softirq_done_fn)
354 		blk_mq_end_request(rq, rq->errors);
355 	else
356 		blk_mq_ipi_complete_request(rq);
357 }
358 
359 /**
360  * blk_mq_complete_request - end I/O on a request
361  * @rq:		the request being processed
362  *
363  * Description:
364  *	Ends all I/O on a request. It does not handle partial completions.
365  *	The actual completion happens out-of-order, through a IPI handler.
366  **/
blk_mq_complete_request(struct request * rq)367 void blk_mq_complete_request(struct request *rq)
368 {
369 	struct request_queue *q = rq->q;
370 
371 	if (unlikely(blk_should_fake_timeout(q)))
372 		return;
373 	if (!blk_mark_rq_complete(rq))
374 		__blk_mq_complete_request(rq);
375 }
376 EXPORT_SYMBOL(blk_mq_complete_request);
377 
blk_mq_start_request(struct request * rq)378 void blk_mq_start_request(struct request *rq)
379 {
380 	struct request_queue *q = rq->q;
381 
382 	trace_block_rq_issue(q, rq);
383 
384 	rq->resid_len = blk_rq_bytes(rq);
385 	if (unlikely(blk_bidi_rq(rq)))
386 		rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
387 
388 	blk_add_timer(rq);
389 
390 	/*
391 	 * Ensure that ->deadline is visible before set the started
392 	 * flag and clear the completed flag.
393 	 */
394 	smp_mb__before_atomic();
395 
396 	/*
397 	 * Mark us as started and clear complete. Complete might have been
398 	 * set if requeue raced with timeout, which then marked it as
399 	 * complete. So be sure to clear complete again when we start
400 	 * the request, otherwise we'll ignore the completion event.
401 	 */
402 	if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
403 		set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
404 	if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
405 		clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
406 
407 	if (q->dma_drain_size && blk_rq_bytes(rq)) {
408 		/*
409 		 * Make sure space for the drain appears.  We know we can do
410 		 * this because max_hw_segments has been adjusted to be one
411 		 * fewer than the device can handle.
412 		 */
413 		rq->nr_phys_segments++;
414 	}
415 }
416 EXPORT_SYMBOL(blk_mq_start_request);
417 
__blk_mq_requeue_request(struct request * rq)418 static void __blk_mq_requeue_request(struct request *rq)
419 {
420 	struct request_queue *q = rq->q;
421 
422 	trace_block_rq_requeue(q, rq);
423 
424 	if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
425 		if (q->dma_drain_size && blk_rq_bytes(rq))
426 			rq->nr_phys_segments--;
427 	}
428 }
429 
blk_mq_requeue_request(struct request * rq)430 void blk_mq_requeue_request(struct request *rq)
431 {
432 	__blk_mq_requeue_request(rq);
433 
434 	BUG_ON(blk_queued_rq(rq));
435 	blk_mq_add_to_requeue_list(rq, true);
436 }
437 EXPORT_SYMBOL(blk_mq_requeue_request);
438 
blk_mq_requeue_work(struct work_struct * work)439 static void blk_mq_requeue_work(struct work_struct *work)
440 {
441 	struct request_queue *q =
442 		container_of(work, struct request_queue, requeue_work);
443 	LIST_HEAD(rq_list);
444 	struct request *rq, *next;
445 	unsigned long flags;
446 
447 	spin_lock_irqsave(&q->requeue_lock, flags);
448 	list_splice_init(&q->requeue_list, &rq_list);
449 	spin_unlock_irqrestore(&q->requeue_lock, flags);
450 
451 	list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
452 		if (!(rq->cmd_flags & REQ_SOFTBARRIER))
453 			continue;
454 
455 		rq->cmd_flags &= ~REQ_SOFTBARRIER;
456 		list_del_init(&rq->queuelist);
457 		blk_mq_insert_request(rq, true, false, false);
458 	}
459 
460 	while (!list_empty(&rq_list)) {
461 		rq = list_entry(rq_list.next, struct request, queuelist);
462 		list_del_init(&rq->queuelist);
463 		blk_mq_insert_request(rq, false, false, false);
464 	}
465 
466 	/*
467 	 * Use the start variant of queue running here, so that running
468 	 * the requeue work will kick stopped queues.
469 	 */
470 	blk_mq_start_hw_queues(q);
471 }
472 
blk_mq_add_to_requeue_list(struct request * rq,bool at_head)473 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
474 {
475 	struct request_queue *q = rq->q;
476 	unsigned long flags;
477 
478 	/*
479 	 * We abuse this flag that is otherwise used by the I/O scheduler to
480 	 * request head insertation from the workqueue.
481 	 */
482 	BUG_ON(rq->cmd_flags & REQ_SOFTBARRIER);
483 
484 	spin_lock_irqsave(&q->requeue_lock, flags);
485 	if (at_head) {
486 		rq->cmd_flags |= REQ_SOFTBARRIER;
487 		list_add(&rq->queuelist, &q->requeue_list);
488 	} else {
489 		list_add_tail(&rq->queuelist, &q->requeue_list);
490 	}
491 	spin_unlock_irqrestore(&q->requeue_lock, flags);
492 }
493 EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
494 
blk_mq_kick_requeue_list(struct request_queue * q)495 void blk_mq_kick_requeue_list(struct request_queue *q)
496 {
497 	kblockd_schedule_work(&q->requeue_work);
498 }
499 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
500 
blk_mq_tag_to_rq(struct blk_mq_tags * tags,unsigned int tag)501 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
502 {
503 	return tags->rqs[tag];
504 }
505 EXPORT_SYMBOL(blk_mq_tag_to_rq);
506 
507 struct blk_mq_timeout_data {
508 	unsigned long next;
509 	unsigned int next_set;
510 };
511 
blk_mq_rq_timed_out(struct request * req,bool reserved)512 void blk_mq_rq_timed_out(struct request *req, bool reserved)
513 {
514 	struct blk_mq_ops *ops = req->q->mq_ops;
515 	enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
516 
517 	/*
518 	 * We know that complete is set at this point. If STARTED isn't set
519 	 * anymore, then the request isn't active and the "timeout" should
520 	 * just be ignored. This can happen due to the bitflag ordering.
521 	 * Timeout first checks if STARTED is set, and if it is, assumes
522 	 * the request is active. But if we race with completion, then
523 	 * we both flags will get cleared. So check here again, and ignore
524 	 * a timeout event with a request that isn't active.
525 	 */
526 	if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
527 		return;
528 
529 	if (ops->timeout)
530 		ret = ops->timeout(req, reserved);
531 
532 	switch (ret) {
533 	case BLK_EH_HANDLED:
534 		__blk_mq_complete_request(req);
535 		break;
536 	case BLK_EH_RESET_TIMER:
537 		blk_add_timer(req);
538 		blk_clear_rq_complete(req);
539 		break;
540 	case BLK_EH_NOT_HANDLED:
541 		break;
542 	default:
543 		printk(KERN_ERR "block: bad eh return: %d\n", ret);
544 		break;
545 	}
546 }
547 
blk_mq_check_expired(struct blk_mq_hw_ctx * hctx,struct request * rq,void * priv,bool reserved)548 static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
549 		struct request *rq, void *priv, bool reserved)
550 {
551 	struct blk_mq_timeout_data *data = priv;
552 
553 	if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
554 		return;
555 
556 	if (time_after_eq(jiffies, rq->deadline)) {
557 		if (!blk_mark_rq_complete(rq))
558 			blk_mq_rq_timed_out(rq, reserved);
559 	} else if (!data->next_set || time_after(data->next, rq->deadline)) {
560 		data->next = rq->deadline;
561 		data->next_set = 1;
562 	}
563 }
564 
blk_mq_rq_timer(unsigned long priv)565 static void blk_mq_rq_timer(unsigned long priv)
566 {
567 	struct request_queue *q = (struct request_queue *)priv;
568 	struct blk_mq_timeout_data data = {
569 		.next		= 0,
570 		.next_set	= 0,
571 	};
572 	struct blk_mq_hw_ctx *hctx;
573 	int i;
574 
575 	queue_for_each_hw_ctx(q, hctx, i) {
576 		/*
577 		 * If not software queues are currently mapped to this
578 		 * hardware queue, there's nothing to check
579 		 */
580 		if (!hctx->nr_ctx || !hctx->tags)
581 			continue;
582 
583 		blk_mq_tag_busy_iter(hctx, blk_mq_check_expired, &data);
584 	}
585 
586 	if (data.next_set) {
587 		data.next = blk_rq_timeout(round_jiffies_up(data.next));
588 		mod_timer(&q->timeout, data.next);
589 	} else {
590 		queue_for_each_hw_ctx(q, hctx, i)
591 			blk_mq_tag_idle(hctx);
592 	}
593 }
594 
595 /*
596  * Reverse check our software queue for entries that we could potentially
597  * merge with. Currently includes a hand-wavy stop count of 8, to not spend
598  * too much time checking for merges.
599  */
blk_mq_attempt_merge(struct request_queue * q,struct blk_mq_ctx * ctx,struct bio * bio)600 static bool blk_mq_attempt_merge(struct request_queue *q,
601 				 struct blk_mq_ctx *ctx, struct bio *bio)
602 {
603 	struct request *rq;
604 	int checked = 8;
605 
606 	list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
607 		int el_ret;
608 
609 		if (!checked--)
610 			break;
611 
612 		if (!blk_rq_merge_ok(rq, bio))
613 			continue;
614 
615 		el_ret = blk_try_merge(rq, bio);
616 		if (el_ret == ELEVATOR_BACK_MERGE) {
617 			if (bio_attempt_back_merge(q, rq, bio)) {
618 				ctx->rq_merged++;
619 				return true;
620 			}
621 			break;
622 		} else if (el_ret == ELEVATOR_FRONT_MERGE) {
623 			if (bio_attempt_front_merge(q, rq, bio)) {
624 				ctx->rq_merged++;
625 				return true;
626 			}
627 			break;
628 		}
629 	}
630 
631 	return false;
632 }
633 
634 /*
635  * Process software queues that have been marked busy, splicing them
636  * to the for-dispatch
637  */
flush_busy_ctxs(struct blk_mq_hw_ctx * hctx,struct list_head * list)638 static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
639 {
640 	struct blk_mq_ctx *ctx;
641 	int i;
642 
643 	for (i = 0; i < hctx->ctx_map.map_size; i++) {
644 		struct blk_align_bitmap *bm = &hctx->ctx_map.map[i];
645 		unsigned int off, bit;
646 
647 		if (!bm->word)
648 			continue;
649 
650 		bit = 0;
651 		off = i * hctx->ctx_map.bits_per_word;
652 		do {
653 			bit = find_next_bit(&bm->word, bm->depth, bit);
654 			if (bit >= bm->depth)
655 				break;
656 
657 			ctx = hctx->ctxs[bit + off];
658 			clear_bit(bit, &bm->word);
659 			spin_lock(&ctx->lock);
660 			list_splice_tail_init(&ctx->rq_list, list);
661 			spin_unlock(&ctx->lock);
662 
663 			bit++;
664 		} while (1);
665 	}
666 }
667 
668 /*
669  * Run this hardware queue, pulling any software queues mapped to it in.
670  * Note that this function currently has various problems around ordering
671  * of IO. In particular, we'd like FIFO behaviour on handling existing
672  * items on the hctx->dispatch list. Ignore that for now.
673  */
__blk_mq_run_hw_queue(struct blk_mq_hw_ctx * hctx)674 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
675 {
676 	struct request_queue *q = hctx->queue;
677 	struct request *rq;
678 	LIST_HEAD(rq_list);
679 	int queued;
680 
681 	WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask));
682 
683 	if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
684 		return;
685 
686 	hctx->run++;
687 
688 	/*
689 	 * Touch any software queue that has pending entries.
690 	 */
691 	flush_busy_ctxs(hctx, &rq_list);
692 
693 	/*
694 	 * If we have previous entries on our dispatch list, grab them
695 	 * and stuff them at the front for more fair dispatch.
696 	 */
697 	if (!list_empty_careful(&hctx->dispatch)) {
698 		spin_lock(&hctx->lock);
699 		if (!list_empty(&hctx->dispatch))
700 			list_splice_init(&hctx->dispatch, &rq_list);
701 		spin_unlock(&hctx->lock);
702 	}
703 
704 	/*
705 	 * Now process all the entries, sending them to the driver.
706 	 */
707 	queued = 0;
708 	while (!list_empty(&rq_list)) {
709 		int ret;
710 
711 		rq = list_first_entry(&rq_list, struct request, queuelist);
712 		list_del_init(&rq->queuelist);
713 
714 		ret = q->mq_ops->queue_rq(hctx, rq, list_empty(&rq_list));
715 		switch (ret) {
716 		case BLK_MQ_RQ_QUEUE_OK:
717 			queued++;
718 			continue;
719 		case BLK_MQ_RQ_QUEUE_BUSY:
720 			list_add(&rq->queuelist, &rq_list);
721 			__blk_mq_requeue_request(rq);
722 			break;
723 		default:
724 			pr_err("blk-mq: bad return on queue: %d\n", ret);
725 		case BLK_MQ_RQ_QUEUE_ERROR:
726 			rq->errors = -EIO;
727 			blk_mq_end_request(rq, rq->errors);
728 			break;
729 		}
730 
731 		if (ret == BLK_MQ_RQ_QUEUE_BUSY)
732 			break;
733 	}
734 
735 	if (!queued)
736 		hctx->dispatched[0]++;
737 	else if (queued < (1 << (BLK_MQ_MAX_DISPATCH_ORDER - 1)))
738 		hctx->dispatched[ilog2(queued) + 1]++;
739 
740 	/*
741 	 * Any items that need requeuing? Stuff them into hctx->dispatch,
742 	 * that is where we will continue on next queue run.
743 	 */
744 	if (!list_empty(&rq_list)) {
745 		spin_lock(&hctx->lock);
746 		list_splice(&rq_list, &hctx->dispatch);
747 		spin_unlock(&hctx->lock);
748 	}
749 }
750 
751 /*
752  * It'd be great if the workqueue API had a way to pass
753  * in a mask and had some smarts for more clever placement.
754  * For now we just round-robin here, switching for every
755  * BLK_MQ_CPU_WORK_BATCH queued items.
756  */
blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx * hctx)757 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
758 {
759 	int cpu = hctx->next_cpu;
760 
761 	if (--hctx->next_cpu_batch <= 0) {
762 		int next_cpu;
763 
764 		next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
765 		if (next_cpu >= nr_cpu_ids)
766 			next_cpu = cpumask_first(hctx->cpumask);
767 
768 		hctx->next_cpu = next_cpu;
769 		hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
770 	}
771 
772 	return cpu;
773 }
774 
blk_mq_run_hw_queue(struct blk_mq_hw_ctx * hctx,bool async)775 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
776 {
777 	if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
778 		return;
779 
780 	if (!async && cpumask_test_cpu(smp_processor_id(), hctx->cpumask))
781 		__blk_mq_run_hw_queue(hctx);
782 	else if (hctx->queue->nr_hw_queues == 1)
783 		kblockd_schedule_delayed_work(&hctx->run_work, 0);
784 	else {
785 		unsigned int cpu;
786 
787 		cpu = blk_mq_hctx_next_cpu(hctx);
788 		kblockd_schedule_delayed_work_on(cpu, &hctx->run_work, 0);
789 	}
790 }
791 
blk_mq_run_queues(struct request_queue * q,bool async)792 void blk_mq_run_queues(struct request_queue *q, bool async)
793 {
794 	struct blk_mq_hw_ctx *hctx;
795 	int i;
796 
797 	queue_for_each_hw_ctx(q, hctx, i) {
798 		if ((!blk_mq_hctx_has_pending(hctx) &&
799 		    list_empty_careful(&hctx->dispatch)) ||
800 		    test_bit(BLK_MQ_S_STOPPED, &hctx->state))
801 			continue;
802 
803 		preempt_disable();
804 		blk_mq_run_hw_queue(hctx, async);
805 		preempt_enable();
806 	}
807 }
808 EXPORT_SYMBOL(blk_mq_run_queues);
809 
blk_mq_stop_hw_queue(struct blk_mq_hw_ctx * hctx)810 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
811 {
812 	cancel_delayed_work(&hctx->run_work);
813 	cancel_delayed_work(&hctx->delay_work);
814 	set_bit(BLK_MQ_S_STOPPED, &hctx->state);
815 }
816 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
817 
blk_mq_stop_hw_queues(struct request_queue * q)818 void blk_mq_stop_hw_queues(struct request_queue *q)
819 {
820 	struct blk_mq_hw_ctx *hctx;
821 	int i;
822 
823 	queue_for_each_hw_ctx(q, hctx, i)
824 		blk_mq_stop_hw_queue(hctx);
825 }
826 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
827 
blk_mq_start_hw_queue(struct blk_mq_hw_ctx * hctx)828 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
829 {
830 	clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
831 
832 	preempt_disable();
833 	blk_mq_run_hw_queue(hctx, false);
834 	preempt_enable();
835 }
836 EXPORT_SYMBOL(blk_mq_start_hw_queue);
837 
blk_mq_start_hw_queues(struct request_queue * q)838 void blk_mq_start_hw_queues(struct request_queue *q)
839 {
840 	struct blk_mq_hw_ctx *hctx;
841 	int i;
842 
843 	queue_for_each_hw_ctx(q, hctx, i)
844 		blk_mq_start_hw_queue(hctx);
845 }
846 EXPORT_SYMBOL(blk_mq_start_hw_queues);
847 
848 
blk_mq_start_stopped_hw_queues(struct request_queue * q,bool async)849 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
850 {
851 	struct blk_mq_hw_ctx *hctx;
852 	int i;
853 
854 	queue_for_each_hw_ctx(q, hctx, i) {
855 		if (!test_bit(BLK_MQ_S_STOPPED, &hctx->state))
856 			continue;
857 
858 		clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
859 		preempt_disable();
860 		blk_mq_run_hw_queue(hctx, async);
861 		preempt_enable();
862 	}
863 }
864 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
865 
blk_mq_run_work_fn(struct work_struct * work)866 static void blk_mq_run_work_fn(struct work_struct *work)
867 {
868 	struct blk_mq_hw_ctx *hctx;
869 
870 	hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
871 
872 	__blk_mq_run_hw_queue(hctx);
873 }
874 
blk_mq_delay_work_fn(struct work_struct * work)875 static void blk_mq_delay_work_fn(struct work_struct *work)
876 {
877 	struct blk_mq_hw_ctx *hctx;
878 
879 	hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work);
880 
881 	if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state))
882 		__blk_mq_run_hw_queue(hctx);
883 }
884 
blk_mq_delay_queue(struct blk_mq_hw_ctx * hctx,unsigned long msecs)885 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
886 {
887 	unsigned long tmo = msecs_to_jiffies(msecs);
888 
889 	if (hctx->queue->nr_hw_queues == 1)
890 		kblockd_schedule_delayed_work(&hctx->delay_work, tmo);
891 	else {
892 		unsigned int cpu;
893 
894 		cpu = blk_mq_hctx_next_cpu(hctx);
895 		kblockd_schedule_delayed_work_on(cpu, &hctx->delay_work, tmo);
896 	}
897 }
898 EXPORT_SYMBOL(blk_mq_delay_queue);
899 
__blk_mq_insert_request(struct blk_mq_hw_ctx * hctx,struct request * rq,bool at_head)900 static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
901 				    struct request *rq, bool at_head)
902 {
903 	struct blk_mq_ctx *ctx = rq->mq_ctx;
904 
905 	trace_block_rq_insert(hctx->queue, rq);
906 
907 	if (at_head)
908 		list_add(&rq->queuelist, &ctx->rq_list);
909 	else
910 		list_add_tail(&rq->queuelist, &ctx->rq_list);
911 
912 	blk_mq_hctx_mark_pending(hctx, ctx);
913 }
914 
blk_mq_insert_request(struct request * rq,bool at_head,bool run_queue,bool async)915 void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
916 		bool async)
917 {
918 	struct request_queue *q = rq->q;
919 	struct blk_mq_hw_ctx *hctx;
920 	struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx;
921 
922 	current_ctx = blk_mq_get_ctx(q);
923 	if (!cpu_online(ctx->cpu))
924 		rq->mq_ctx = ctx = current_ctx;
925 
926 	hctx = q->mq_ops->map_queue(q, ctx->cpu);
927 
928 	spin_lock(&ctx->lock);
929 	__blk_mq_insert_request(hctx, rq, at_head);
930 	spin_unlock(&ctx->lock);
931 
932 	if (run_queue)
933 		blk_mq_run_hw_queue(hctx, async);
934 
935 	blk_mq_put_ctx(current_ctx);
936 }
937 
blk_mq_insert_requests(struct request_queue * q,struct blk_mq_ctx * ctx,struct list_head * list,int depth,bool from_schedule)938 static void blk_mq_insert_requests(struct request_queue *q,
939 				     struct blk_mq_ctx *ctx,
940 				     struct list_head *list,
941 				     int depth,
942 				     bool from_schedule)
943 
944 {
945 	struct blk_mq_hw_ctx *hctx;
946 	struct blk_mq_ctx *current_ctx;
947 
948 	trace_block_unplug(q, depth, !from_schedule);
949 
950 	current_ctx = blk_mq_get_ctx(q);
951 
952 	if (!cpu_online(ctx->cpu))
953 		ctx = current_ctx;
954 	hctx = q->mq_ops->map_queue(q, ctx->cpu);
955 
956 	/*
957 	 * preemption doesn't flush plug list, so it's possible ctx->cpu is
958 	 * offline now
959 	 */
960 	spin_lock(&ctx->lock);
961 	while (!list_empty(list)) {
962 		struct request *rq;
963 
964 		rq = list_first_entry(list, struct request, queuelist);
965 		list_del_init(&rq->queuelist);
966 		rq->mq_ctx = ctx;
967 		__blk_mq_insert_request(hctx, rq, false);
968 	}
969 	spin_unlock(&ctx->lock);
970 
971 	blk_mq_run_hw_queue(hctx, from_schedule);
972 	blk_mq_put_ctx(current_ctx);
973 }
974 
plug_ctx_cmp(void * priv,struct list_head * a,struct list_head * b)975 static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
976 {
977 	struct request *rqa = container_of(a, struct request, queuelist);
978 	struct request *rqb = container_of(b, struct request, queuelist);
979 
980 	return !(rqa->mq_ctx < rqb->mq_ctx ||
981 		 (rqa->mq_ctx == rqb->mq_ctx &&
982 		  blk_rq_pos(rqa) < blk_rq_pos(rqb)));
983 }
984 
blk_mq_flush_plug_list(struct blk_plug * plug,bool from_schedule)985 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
986 {
987 	struct blk_mq_ctx *this_ctx;
988 	struct request_queue *this_q;
989 	struct request *rq;
990 	LIST_HEAD(list);
991 	LIST_HEAD(ctx_list);
992 	unsigned int depth;
993 
994 	list_splice_init(&plug->mq_list, &list);
995 
996 	list_sort(NULL, &list, plug_ctx_cmp);
997 
998 	this_q = NULL;
999 	this_ctx = NULL;
1000 	depth = 0;
1001 
1002 	while (!list_empty(&list)) {
1003 		rq = list_entry_rq(list.next);
1004 		list_del_init(&rq->queuelist);
1005 		BUG_ON(!rq->q);
1006 		if (rq->mq_ctx != this_ctx) {
1007 			if (this_ctx) {
1008 				blk_mq_insert_requests(this_q, this_ctx,
1009 							&ctx_list, depth,
1010 							from_schedule);
1011 			}
1012 
1013 			this_ctx = rq->mq_ctx;
1014 			this_q = rq->q;
1015 			depth = 0;
1016 		}
1017 
1018 		depth++;
1019 		list_add_tail(&rq->queuelist, &ctx_list);
1020 	}
1021 
1022 	/*
1023 	 * If 'this_ctx' is set, we know we have entries to complete
1024 	 * on 'ctx_list'. Do those.
1025 	 */
1026 	if (this_ctx) {
1027 		blk_mq_insert_requests(this_q, this_ctx, &ctx_list, depth,
1028 				       from_schedule);
1029 	}
1030 }
1031 
blk_mq_bio_to_request(struct request * rq,struct bio * bio)1032 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1033 {
1034 	init_request_from_bio(rq, bio);
1035 
1036 	if (blk_do_io_stat(rq))
1037 		blk_account_io_start(rq, 1);
1038 }
1039 
hctx_allow_merges(struct blk_mq_hw_ctx * hctx)1040 static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
1041 {
1042 	return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
1043 		!blk_queue_nomerges(hctx->queue);
1044 }
1045 
blk_mq_merge_queue_io(struct blk_mq_hw_ctx * hctx,struct blk_mq_ctx * ctx,struct request * rq,struct bio * bio)1046 static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
1047 					 struct blk_mq_ctx *ctx,
1048 					 struct request *rq, struct bio *bio)
1049 {
1050 	if (!hctx_allow_merges(hctx)) {
1051 		blk_mq_bio_to_request(rq, bio);
1052 		spin_lock(&ctx->lock);
1053 insert_rq:
1054 		__blk_mq_insert_request(hctx, rq, false);
1055 		spin_unlock(&ctx->lock);
1056 		return false;
1057 	} else {
1058 		struct request_queue *q = hctx->queue;
1059 
1060 		spin_lock(&ctx->lock);
1061 		if (!blk_mq_attempt_merge(q, ctx, bio)) {
1062 			blk_mq_bio_to_request(rq, bio);
1063 			goto insert_rq;
1064 		}
1065 
1066 		spin_unlock(&ctx->lock);
1067 		__blk_mq_free_request(hctx, ctx, rq);
1068 		return true;
1069 	}
1070 }
1071 
1072 struct blk_map_ctx {
1073 	struct blk_mq_hw_ctx *hctx;
1074 	struct blk_mq_ctx *ctx;
1075 };
1076 
blk_mq_map_request(struct request_queue * q,struct bio * bio,struct blk_map_ctx * data)1077 static struct request *blk_mq_map_request(struct request_queue *q,
1078 					  struct bio *bio,
1079 					  struct blk_map_ctx *data)
1080 {
1081 	struct blk_mq_hw_ctx *hctx;
1082 	struct blk_mq_ctx *ctx;
1083 	struct request *rq;
1084 	int rw = bio_data_dir(bio);
1085 	struct blk_mq_alloc_data alloc_data;
1086 
1087 	if (unlikely(blk_mq_queue_enter(q))) {
1088 		bio_endio(bio, -EIO);
1089 		return NULL;
1090 	}
1091 
1092 	ctx = blk_mq_get_ctx(q);
1093 	hctx = q->mq_ops->map_queue(q, ctx->cpu);
1094 
1095 	if (rw_is_sync(bio->bi_rw))
1096 		rw |= REQ_SYNC;
1097 
1098 	trace_block_getrq(q, bio, rw);
1099 	blk_mq_set_alloc_data(&alloc_data, q, GFP_ATOMIC, false, ctx,
1100 			hctx);
1101 	rq = __blk_mq_alloc_request(&alloc_data, rw);
1102 	if (unlikely(!rq)) {
1103 		__blk_mq_run_hw_queue(hctx);
1104 		blk_mq_put_ctx(ctx);
1105 		trace_block_sleeprq(q, bio, rw);
1106 
1107 		ctx = blk_mq_get_ctx(q);
1108 		hctx = q->mq_ops->map_queue(q, ctx->cpu);
1109 		blk_mq_set_alloc_data(&alloc_data, q,
1110 				__GFP_WAIT|GFP_ATOMIC, false, ctx, hctx);
1111 		rq = __blk_mq_alloc_request(&alloc_data, rw);
1112 		ctx = alloc_data.ctx;
1113 		hctx = alloc_data.hctx;
1114 	}
1115 
1116 	hctx->queued++;
1117 	data->hctx = hctx;
1118 	data->ctx = ctx;
1119 	return rq;
1120 }
1121 
1122 /*
1123  * Multiple hardware queue variant. This will not use per-process plugs,
1124  * but will attempt to bypass the hctx queueing if we can go straight to
1125  * hardware for SYNC IO.
1126  */
blk_mq_make_request(struct request_queue * q,struct bio * bio)1127 static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
1128 {
1129 	const int is_sync = rw_is_sync(bio->bi_rw);
1130 	const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1131 	struct blk_map_ctx data;
1132 	struct request *rq;
1133 
1134 	blk_queue_bounce(q, &bio);
1135 
1136 	if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1137 		bio_endio(bio, -EIO);
1138 		return;
1139 	}
1140 
1141 	rq = blk_mq_map_request(q, bio, &data);
1142 	if (unlikely(!rq))
1143 		return;
1144 
1145 	if (unlikely(is_flush_fua)) {
1146 		blk_mq_bio_to_request(rq, bio);
1147 		blk_insert_flush(rq);
1148 		goto run_queue;
1149 	}
1150 
1151 	if (is_sync) {
1152 		int ret;
1153 
1154 		blk_mq_bio_to_request(rq, bio);
1155 
1156 		/*
1157 		 * For OK queue, we are done. For error, kill it. Any other
1158 		 * error (busy), just add it to our list as we previously
1159 		 * would have done
1160 		 */
1161 		ret = q->mq_ops->queue_rq(data.hctx, rq, true);
1162 		if (ret == BLK_MQ_RQ_QUEUE_OK)
1163 			goto done;
1164 		else {
1165 			__blk_mq_requeue_request(rq);
1166 
1167 			if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
1168 				rq->errors = -EIO;
1169 				blk_mq_end_request(rq, rq->errors);
1170 				goto done;
1171 			}
1172 		}
1173 	}
1174 
1175 	if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1176 		/*
1177 		 * For a SYNC request, send it to the hardware immediately. For
1178 		 * an ASYNC request, just ensure that we run it later on. The
1179 		 * latter allows for merging opportunities and more efficient
1180 		 * dispatching.
1181 		 */
1182 run_queue:
1183 		blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1184 	}
1185 done:
1186 	blk_mq_put_ctx(data.ctx);
1187 }
1188 
1189 /*
1190  * Single hardware queue variant. This will attempt to use any per-process
1191  * plug for merging and IO deferral.
1192  */
blk_sq_make_request(struct request_queue * q,struct bio * bio)1193 static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
1194 {
1195 	const int is_sync = rw_is_sync(bio->bi_rw);
1196 	const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1197 	unsigned int use_plug, request_count = 0;
1198 	struct blk_map_ctx data;
1199 	struct request *rq;
1200 
1201 	/*
1202 	 * If we have multiple hardware queues, just go directly to
1203 	 * one of those for sync IO.
1204 	 */
1205 	use_plug = !is_flush_fua && !is_sync;
1206 
1207 	blk_queue_bounce(q, &bio);
1208 
1209 	if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1210 		bio_endio(bio, -EIO);
1211 		return;
1212 	}
1213 
1214 	if (use_plug && !blk_queue_nomerges(q) &&
1215 	    blk_attempt_plug_merge(q, bio, &request_count))
1216 		return;
1217 
1218 	rq = blk_mq_map_request(q, bio, &data);
1219 	if (unlikely(!rq))
1220 		return;
1221 
1222 	if (unlikely(is_flush_fua)) {
1223 		blk_mq_bio_to_request(rq, bio);
1224 		blk_insert_flush(rq);
1225 		goto run_queue;
1226 	}
1227 
1228 	/*
1229 	 * A task plug currently exists. Since this is completely lockless,
1230 	 * utilize that to temporarily store requests until the task is
1231 	 * either done or scheduled away.
1232 	 */
1233 	if (use_plug) {
1234 		struct blk_plug *plug = current->plug;
1235 
1236 		if (plug) {
1237 			blk_mq_bio_to_request(rq, bio);
1238 			if (list_empty(&plug->mq_list))
1239 				trace_block_plug(q);
1240 			else if (request_count >= BLK_MAX_REQUEST_COUNT) {
1241 				blk_flush_plug_list(plug, false);
1242 				trace_block_plug(q);
1243 			}
1244 			list_add_tail(&rq->queuelist, &plug->mq_list);
1245 			blk_mq_put_ctx(data.ctx);
1246 			return;
1247 		}
1248 	}
1249 
1250 	if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1251 		/*
1252 		 * For a SYNC request, send it to the hardware immediately. For
1253 		 * an ASYNC request, just ensure that we run it later on. The
1254 		 * latter allows for merging opportunities and more efficient
1255 		 * dispatching.
1256 		 */
1257 run_queue:
1258 		blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1259 	}
1260 
1261 	blk_mq_put_ctx(data.ctx);
1262 }
1263 
1264 /*
1265  * Default mapping to a software queue, since we use one per CPU.
1266  */
blk_mq_map_queue(struct request_queue * q,const int cpu)1267 struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu)
1268 {
1269 	return q->queue_hw_ctx[q->mq_map[cpu]];
1270 }
1271 EXPORT_SYMBOL(blk_mq_map_queue);
1272 
blk_mq_free_rq_map(struct blk_mq_tag_set * set,struct blk_mq_tags * tags,unsigned int hctx_idx)1273 static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
1274 		struct blk_mq_tags *tags, unsigned int hctx_idx)
1275 {
1276 	struct page *page;
1277 
1278 	if (tags->rqs && set->ops->exit_request) {
1279 		int i;
1280 
1281 		for (i = 0; i < tags->nr_tags; i++) {
1282 			if (!tags->rqs[i])
1283 				continue;
1284 			set->ops->exit_request(set->driver_data, tags->rqs[i],
1285 						hctx_idx, i);
1286 			tags->rqs[i] = NULL;
1287 		}
1288 	}
1289 
1290 	while (!list_empty(&tags->page_list)) {
1291 		page = list_first_entry(&tags->page_list, struct page, lru);
1292 		list_del_init(&page->lru);
1293 		__free_pages(page, page->private);
1294 	}
1295 
1296 	kfree(tags->rqs);
1297 
1298 	blk_mq_free_tags(tags);
1299 }
1300 
order_to_size(unsigned int order)1301 static size_t order_to_size(unsigned int order)
1302 {
1303 	return (size_t)PAGE_SIZE << order;
1304 }
1305 
blk_mq_init_rq_map(struct blk_mq_tag_set * set,unsigned int hctx_idx)1306 static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
1307 		unsigned int hctx_idx)
1308 {
1309 	struct blk_mq_tags *tags;
1310 	unsigned int i, j, entries_per_page, max_order = 4;
1311 	size_t rq_size, left;
1312 
1313 	tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags,
1314 				set->numa_node);
1315 	if (!tags)
1316 		return NULL;
1317 
1318 	INIT_LIST_HEAD(&tags->page_list);
1319 
1320 	tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *),
1321 				 GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
1322 				 set->numa_node);
1323 	if (!tags->rqs) {
1324 		blk_mq_free_tags(tags);
1325 		return NULL;
1326 	}
1327 
1328 	/*
1329 	 * rq_size is the size of the request plus driver payload, rounded
1330 	 * to the cacheline size
1331 	 */
1332 	rq_size = round_up(sizeof(struct request) + set->cmd_size,
1333 				cache_line_size());
1334 	left = rq_size * set->queue_depth;
1335 
1336 	for (i = 0; i < set->queue_depth; ) {
1337 		int this_order = max_order;
1338 		struct page *page;
1339 		int to_do;
1340 		void *p;
1341 
1342 		while (left < order_to_size(this_order - 1) && this_order)
1343 			this_order--;
1344 
1345 		do {
1346 			page = alloc_pages_node(set->numa_node,
1347 				GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
1348 				this_order);
1349 			if (page)
1350 				break;
1351 			if (!this_order--)
1352 				break;
1353 			if (order_to_size(this_order) < rq_size)
1354 				break;
1355 		} while (1);
1356 
1357 		if (!page)
1358 			goto fail;
1359 
1360 		page->private = this_order;
1361 		list_add_tail(&page->lru, &tags->page_list);
1362 
1363 		p = page_address(page);
1364 		entries_per_page = order_to_size(this_order) / rq_size;
1365 		to_do = min(entries_per_page, set->queue_depth - i);
1366 		left -= to_do * rq_size;
1367 		for (j = 0; j < to_do; j++) {
1368 			tags->rqs[i] = p;
1369 			tags->rqs[i]->atomic_flags = 0;
1370 			tags->rqs[i]->cmd_flags = 0;
1371 			if (set->ops->init_request) {
1372 				if (set->ops->init_request(set->driver_data,
1373 						tags->rqs[i], hctx_idx, i,
1374 						set->numa_node)) {
1375 					tags->rqs[i] = NULL;
1376 					goto fail;
1377 				}
1378 			}
1379 
1380 			p += rq_size;
1381 			i++;
1382 		}
1383 	}
1384 
1385 	return tags;
1386 
1387 fail:
1388 	blk_mq_free_rq_map(set, tags, hctx_idx);
1389 	return NULL;
1390 }
1391 
blk_mq_free_bitmap(struct blk_mq_ctxmap * bitmap)1392 static void blk_mq_free_bitmap(struct blk_mq_ctxmap *bitmap)
1393 {
1394 	kfree(bitmap->map);
1395 }
1396 
blk_mq_alloc_bitmap(struct blk_mq_ctxmap * bitmap,int node)1397 static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node)
1398 {
1399 	unsigned int bpw = 8, total, num_maps, i;
1400 
1401 	bitmap->bits_per_word = bpw;
1402 
1403 	num_maps = ALIGN(nr_cpu_ids, bpw) / bpw;
1404 	bitmap->map = kzalloc_node(num_maps * sizeof(struct blk_align_bitmap),
1405 					GFP_KERNEL, node);
1406 	if (!bitmap->map)
1407 		return -ENOMEM;
1408 
1409 	bitmap->map_size = num_maps;
1410 
1411 	total = nr_cpu_ids;
1412 	for (i = 0; i < num_maps; i++) {
1413 		bitmap->map[i].depth = min(total, bitmap->bits_per_word);
1414 		total -= bitmap->map[i].depth;
1415 	}
1416 
1417 	return 0;
1418 }
1419 
blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx * hctx,int cpu)1420 static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
1421 {
1422 	struct request_queue *q = hctx->queue;
1423 	struct blk_mq_ctx *ctx;
1424 	LIST_HEAD(tmp);
1425 
1426 	/*
1427 	 * Move ctx entries to new CPU, if this one is going away.
1428 	 */
1429 	ctx = __blk_mq_get_ctx(q, cpu);
1430 
1431 	spin_lock(&ctx->lock);
1432 	if (!list_empty(&ctx->rq_list)) {
1433 		list_splice_init(&ctx->rq_list, &tmp);
1434 		blk_mq_hctx_clear_pending(hctx, ctx);
1435 	}
1436 	spin_unlock(&ctx->lock);
1437 
1438 	if (list_empty(&tmp))
1439 		return NOTIFY_OK;
1440 
1441 	ctx = blk_mq_get_ctx(q);
1442 	spin_lock(&ctx->lock);
1443 
1444 	while (!list_empty(&tmp)) {
1445 		struct request *rq;
1446 
1447 		rq = list_first_entry(&tmp, struct request, queuelist);
1448 		rq->mq_ctx = ctx;
1449 		list_move_tail(&rq->queuelist, &ctx->rq_list);
1450 	}
1451 
1452 	hctx = q->mq_ops->map_queue(q, ctx->cpu);
1453 	blk_mq_hctx_mark_pending(hctx, ctx);
1454 
1455 	spin_unlock(&ctx->lock);
1456 
1457 	blk_mq_run_hw_queue(hctx, true);
1458 	blk_mq_put_ctx(ctx);
1459 	return NOTIFY_OK;
1460 }
1461 
blk_mq_hctx_notify(void * data,unsigned long action,unsigned int cpu)1462 static int blk_mq_hctx_notify(void *data, unsigned long action,
1463 			      unsigned int cpu)
1464 {
1465 	struct blk_mq_hw_ctx *hctx = data;
1466 
1467 	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
1468 		return blk_mq_hctx_cpu_offline(hctx, cpu);
1469 
1470 	/*
1471 	 * In case of CPU online, tags may be reallocated
1472 	 * in blk_mq_map_swqueue() after mapping is updated.
1473 	 */
1474 
1475 	return NOTIFY_OK;
1476 }
1477 
blk_mq_exit_hctx(struct request_queue * q,struct blk_mq_tag_set * set,struct blk_mq_hw_ctx * hctx,unsigned int hctx_idx)1478 static void blk_mq_exit_hctx(struct request_queue *q,
1479 		struct blk_mq_tag_set *set,
1480 		struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
1481 {
1482 	unsigned flush_start_tag = set->queue_depth;
1483 
1484 	blk_mq_tag_idle(hctx);
1485 
1486 	if (set->ops->exit_request)
1487 		set->ops->exit_request(set->driver_data,
1488 				       hctx->fq->flush_rq, hctx_idx,
1489 				       flush_start_tag + hctx_idx);
1490 
1491 	if (set->ops->exit_hctx)
1492 		set->ops->exit_hctx(hctx, hctx_idx);
1493 
1494 	blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
1495 	blk_free_flush_queue(hctx->fq);
1496 	kfree(hctx->ctxs);
1497 	blk_mq_free_bitmap(&hctx->ctx_map);
1498 }
1499 
blk_mq_exit_hw_queues(struct request_queue * q,struct blk_mq_tag_set * set,int nr_queue)1500 static void blk_mq_exit_hw_queues(struct request_queue *q,
1501 		struct blk_mq_tag_set *set, int nr_queue)
1502 {
1503 	struct blk_mq_hw_ctx *hctx;
1504 	unsigned int i;
1505 
1506 	queue_for_each_hw_ctx(q, hctx, i) {
1507 		if (i == nr_queue)
1508 			break;
1509 		blk_mq_exit_hctx(q, set, hctx, i);
1510 	}
1511 }
1512 
blk_mq_free_hw_queues(struct request_queue * q,struct blk_mq_tag_set * set)1513 static void blk_mq_free_hw_queues(struct request_queue *q,
1514 		struct blk_mq_tag_set *set)
1515 {
1516 	struct blk_mq_hw_ctx *hctx;
1517 	unsigned int i;
1518 
1519 	queue_for_each_hw_ctx(q, hctx, i) {
1520 		free_cpumask_var(hctx->cpumask);
1521 		kfree(hctx);
1522 	}
1523 }
1524 
blk_mq_init_hctx(struct request_queue * q,struct blk_mq_tag_set * set,struct blk_mq_hw_ctx * hctx,unsigned hctx_idx)1525 static int blk_mq_init_hctx(struct request_queue *q,
1526 		struct blk_mq_tag_set *set,
1527 		struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
1528 {
1529 	int node;
1530 	unsigned flush_start_tag = set->queue_depth;
1531 
1532 	node = hctx->numa_node;
1533 	if (node == NUMA_NO_NODE)
1534 		node = hctx->numa_node = set->numa_node;
1535 
1536 	INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
1537 	INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
1538 	spin_lock_init(&hctx->lock);
1539 	INIT_LIST_HEAD(&hctx->dispatch);
1540 	hctx->queue = q;
1541 	hctx->queue_num = hctx_idx;
1542 	hctx->flags = set->flags;
1543 	hctx->cmd_size = set->cmd_size;
1544 
1545 	blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
1546 					blk_mq_hctx_notify, hctx);
1547 	blk_mq_register_cpu_notifier(&hctx->cpu_notifier);
1548 
1549 	hctx->tags = set->tags[hctx_idx];
1550 
1551 	/*
1552 	 * Allocate space for all possible cpus to avoid allocation at
1553 	 * runtime
1554 	 */
1555 	hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
1556 					GFP_KERNEL, node);
1557 	if (!hctx->ctxs)
1558 		goto unregister_cpu_notifier;
1559 
1560 	if (blk_mq_alloc_bitmap(&hctx->ctx_map, node))
1561 		goto free_ctxs;
1562 
1563 	hctx->nr_ctx = 0;
1564 
1565 	if (set->ops->init_hctx &&
1566 	    set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
1567 		goto free_bitmap;
1568 
1569 	hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
1570 	if (!hctx->fq)
1571 		goto exit_hctx;
1572 
1573 	if (set->ops->init_request &&
1574 	    set->ops->init_request(set->driver_data,
1575 				   hctx->fq->flush_rq, hctx_idx,
1576 				   flush_start_tag + hctx_idx, node))
1577 		goto free_fq;
1578 
1579 	return 0;
1580 
1581  free_fq:
1582 	kfree(hctx->fq);
1583  exit_hctx:
1584 	if (set->ops->exit_hctx)
1585 		set->ops->exit_hctx(hctx, hctx_idx);
1586  free_bitmap:
1587 	blk_mq_free_bitmap(&hctx->ctx_map);
1588  free_ctxs:
1589 	kfree(hctx->ctxs);
1590  unregister_cpu_notifier:
1591 	blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
1592 
1593 	return -1;
1594 }
1595 
blk_mq_init_hw_queues(struct request_queue * q,struct blk_mq_tag_set * set)1596 static int blk_mq_init_hw_queues(struct request_queue *q,
1597 		struct blk_mq_tag_set *set)
1598 {
1599 	struct blk_mq_hw_ctx *hctx;
1600 	unsigned int i;
1601 
1602 	/*
1603 	 * Initialize hardware queues
1604 	 */
1605 	queue_for_each_hw_ctx(q, hctx, i) {
1606 		if (blk_mq_init_hctx(q, set, hctx, i))
1607 			break;
1608 	}
1609 
1610 	if (i == q->nr_hw_queues)
1611 		return 0;
1612 
1613 	/*
1614 	 * Init failed
1615 	 */
1616 	blk_mq_exit_hw_queues(q, set, i);
1617 
1618 	return 1;
1619 }
1620 
blk_mq_init_cpu_queues(struct request_queue * q,unsigned int nr_hw_queues)1621 static void blk_mq_init_cpu_queues(struct request_queue *q,
1622 				   unsigned int nr_hw_queues)
1623 {
1624 	unsigned int i;
1625 
1626 	for_each_possible_cpu(i) {
1627 		struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
1628 		struct blk_mq_hw_ctx *hctx;
1629 
1630 		memset(__ctx, 0, sizeof(*__ctx));
1631 		__ctx->cpu = i;
1632 		spin_lock_init(&__ctx->lock);
1633 		INIT_LIST_HEAD(&__ctx->rq_list);
1634 		__ctx->queue = q;
1635 
1636 		/* If the cpu isn't online, the cpu is mapped to first hctx */
1637 		if (!cpu_online(i))
1638 			continue;
1639 
1640 		hctx = q->mq_ops->map_queue(q, i);
1641 		cpumask_set_cpu(i, hctx->cpumask);
1642 		hctx->nr_ctx++;
1643 
1644 		/*
1645 		 * Set local node, IFF we have more than one hw queue. If
1646 		 * not, we remain on the home node of the device
1647 		 */
1648 		if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
1649 			hctx->numa_node = cpu_to_node(i);
1650 	}
1651 }
1652 
blk_mq_map_swqueue(struct request_queue * q)1653 static void blk_mq_map_swqueue(struct request_queue *q)
1654 {
1655 	unsigned int i;
1656 	struct blk_mq_hw_ctx *hctx;
1657 	struct blk_mq_ctx *ctx;
1658 	struct blk_mq_tag_set *set = q->tag_set;
1659 
1660 	queue_for_each_hw_ctx(q, hctx, i) {
1661 		cpumask_clear(hctx->cpumask);
1662 		hctx->nr_ctx = 0;
1663 	}
1664 
1665 	/*
1666 	 * Map software to hardware queues
1667 	 */
1668 	queue_for_each_ctx(q, ctx, i) {
1669 		/* If the cpu isn't online, the cpu is mapped to first hctx */
1670 		if (!cpu_online(i))
1671 			continue;
1672 
1673 		hctx = q->mq_ops->map_queue(q, i);
1674 		cpumask_set_cpu(i, hctx->cpumask);
1675 		ctx->index_hw = hctx->nr_ctx;
1676 		hctx->ctxs[hctx->nr_ctx++] = ctx;
1677 	}
1678 
1679 	queue_for_each_hw_ctx(q, hctx, i) {
1680 		/*
1681 		 * If no software queues are mapped to this hardware queue,
1682 		 * disable it and free the request entries.
1683 		 */
1684 		if (!hctx->nr_ctx) {
1685 			if (set->tags[i]) {
1686 				blk_mq_free_rq_map(set, set->tags[i], i);
1687 				set->tags[i] = NULL;
1688 			}
1689 			hctx->tags = NULL;
1690 			continue;
1691 		}
1692 
1693 		/* unmapped hw queue can be remapped after CPU topo changed */
1694 		if (!set->tags[i])
1695 			set->tags[i] = blk_mq_init_rq_map(set, i);
1696 		hctx->tags = set->tags[i];
1697 		WARN_ON(!hctx->tags);
1698 
1699 		/*
1700 		 * Initialize batch roundrobin counts
1701 		 */
1702 		hctx->next_cpu = cpumask_first(hctx->cpumask);
1703 		hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1704 	}
1705 }
1706 
blk_mq_update_tag_set_depth(struct blk_mq_tag_set * set)1707 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set)
1708 {
1709 	struct blk_mq_hw_ctx *hctx;
1710 	struct request_queue *q;
1711 	bool shared;
1712 	int i;
1713 
1714 	if (set->tag_list.next == set->tag_list.prev)
1715 		shared = false;
1716 	else
1717 		shared = true;
1718 
1719 	list_for_each_entry(q, &set->tag_list, tag_set_list) {
1720 		blk_mq_freeze_queue(q);
1721 
1722 		queue_for_each_hw_ctx(q, hctx, i) {
1723 			if (shared)
1724 				hctx->flags |= BLK_MQ_F_TAG_SHARED;
1725 			else
1726 				hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
1727 		}
1728 		blk_mq_unfreeze_queue(q);
1729 	}
1730 }
1731 
blk_mq_del_queue_tag_set(struct request_queue * q)1732 static void blk_mq_del_queue_tag_set(struct request_queue *q)
1733 {
1734 	struct blk_mq_tag_set *set = q->tag_set;
1735 
1736 	mutex_lock(&set->tag_list_lock);
1737 	list_del_init(&q->tag_set_list);
1738 	blk_mq_update_tag_set_depth(set);
1739 	mutex_unlock(&set->tag_list_lock);
1740 }
1741 
blk_mq_add_queue_tag_set(struct blk_mq_tag_set * set,struct request_queue * q)1742 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
1743 				     struct request_queue *q)
1744 {
1745 	q->tag_set = set;
1746 
1747 	mutex_lock(&set->tag_list_lock);
1748 	list_add_tail(&q->tag_set_list, &set->tag_list);
1749 	blk_mq_update_tag_set_depth(set);
1750 	mutex_unlock(&set->tag_list_lock);
1751 }
1752 
blk_mq_init_queue(struct blk_mq_tag_set * set)1753 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1754 {
1755 	struct blk_mq_hw_ctx **hctxs;
1756 	struct blk_mq_ctx __percpu *ctx;
1757 	struct request_queue *q;
1758 	unsigned int *map;
1759 	int i;
1760 
1761 	ctx = alloc_percpu(struct blk_mq_ctx);
1762 	if (!ctx)
1763 		return ERR_PTR(-ENOMEM);
1764 
1765 	/*
1766 	 * If a crashdump is active, then we are potentially in a very
1767 	 * memory constrained environment. Limit us to 1 queue and
1768 	 * 64 tags to prevent using too much memory.
1769 	 */
1770 	if (is_kdump_kernel()) {
1771 		set->nr_hw_queues = 1;
1772 		set->queue_depth = min(64U, set->queue_depth);
1773 	}
1774 
1775 	hctxs = kmalloc_node(set->nr_hw_queues * sizeof(*hctxs), GFP_KERNEL,
1776 			set->numa_node);
1777 
1778 	if (!hctxs)
1779 		goto err_percpu;
1780 
1781 	map = blk_mq_make_queue_map(set);
1782 	if (!map)
1783 		goto err_map;
1784 
1785 	for (i = 0; i < set->nr_hw_queues; i++) {
1786 		int node = blk_mq_hw_queue_to_node(map, i);
1787 
1788 		hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
1789 					GFP_KERNEL, node);
1790 		if (!hctxs[i])
1791 			goto err_hctxs;
1792 
1793 		if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
1794 						node))
1795 			goto err_hctxs;
1796 
1797 		atomic_set(&hctxs[i]->nr_active, 0);
1798 		hctxs[i]->numa_node = node;
1799 		hctxs[i]->queue_num = i;
1800 	}
1801 
1802 	q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
1803 	if (!q)
1804 		goto err_hctxs;
1805 
1806 	/*
1807 	 * Init percpu_ref in atomic mode so that it's faster to shutdown.
1808 	 * See blk_register_queue() for details.
1809 	 */
1810 	if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
1811 			    PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
1812 		goto err_mq_usage;
1813 
1814 	setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
1815 	blk_queue_rq_timeout(q, 30000);
1816 
1817 	q->nr_queues = nr_cpu_ids;
1818 	q->nr_hw_queues = set->nr_hw_queues;
1819 	q->mq_map = map;
1820 
1821 	q->queue_ctx = ctx;
1822 	q->queue_hw_ctx = hctxs;
1823 
1824 	q->mq_ops = set->ops;
1825 	q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
1826 
1827 	if (!(set->flags & BLK_MQ_F_SG_MERGE))
1828 		q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
1829 
1830 	q->sg_reserved_size = INT_MAX;
1831 
1832 	INIT_WORK(&q->requeue_work, blk_mq_requeue_work);
1833 	INIT_LIST_HEAD(&q->requeue_list);
1834 	spin_lock_init(&q->requeue_lock);
1835 
1836 	if (q->nr_hw_queues > 1)
1837 		blk_queue_make_request(q, blk_mq_make_request);
1838 	else
1839 		blk_queue_make_request(q, blk_sq_make_request);
1840 
1841 	if (set->timeout)
1842 		blk_queue_rq_timeout(q, set->timeout);
1843 
1844 	/*
1845 	 * Do this after blk_queue_make_request() overrides it...
1846 	 */
1847 	q->nr_requests = set->queue_depth;
1848 
1849 	if (set->ops->complete)
1850 		blk_queue_softirq_done(q, set->ops->complete);
1851 
1852 	blk_mq_init_cpu_queues(q, set->nr_hw_queues);
1853 
1854 	if (blk_mq_init_hw_queues(q, set))
1855 		goto err_mq_usage;
1856 
1857 	mutex_lock(&all_q_mutex);
1858 	list_add_tail(&q->all_q_node, &all_q_list);
1859 	mutex_unlock(&all_q_mutex);
1860 
1861 	blk_mq_add_queue_tag_set(set, q);
1862 
1863 	blk_mq_map_swqueue(q);
1864 
1865 	return q;
1866 
1867 err_mq_usage:
1868 	blk_cleanup_queue(q);
1869 err_hctxs:
1870 	kfree(map);
1871 	for (i = 0; i < set->nr_hw_queues; i++) {
1872 		if (!hctxs[i])
1873 			break;
1874 		free_cpumask_var(hctxs[i]->cpumask);
1875 		kfree(hctxs[i]);
1876 	}
1877 err_map:
1878 	kfree(hctxs);
1879 err_percpu:
1880 	free_percpu(ctx);
1881 	return ERR_PTR(-ENOMEM);
1882 }
1883 EXPORT_SYMBOL(blk_mq_init_queue);
1884 
blk_mq_free_queue(struct request_queue * q)1885 void blk_mq_free_queue(struct request_queue *q)
1886 {
1887 	struct blk_mq_tag_set	*set = q->tag_set;
1888 
1889 	blk_mq_del_queue_tag_set(q);
1890 
1891 	blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
1892 	blk_mq_free_hw_queues(q, set);
1893 
1894 	percpu_ref_exit(&q->mq_usage_counter);
1895 
1896 	free_percpu(q->queue_ctx);
1897 	kfree(q->queue_hw_ctx);
1898 	kfree(q->mq_map);
1899 
1900 	q->queue_ctx = NULL;
1901 	q->queue_hw_ctx = NULL;
1902 	q->mq_map = NULL;
1903 
1904 	mutex_lock(&all_q_mutex);
1905 	list_del_init(&q->all_q_node);
1906 	mutex_unlock(&all_q_mutex);
1907 }
1908 
1909 /* Basically redo blk_mq_init_queue with queue frozen */
blk_mq_queue_reinit(struct request_queue * q)1910 static void blk_mq_queue_reinit(struct request_queue *q)
1911 {
1912 	WARN_ON_ONCE(!q->mq_freeze_depth);
1913 
1914 	blk_mq_sysfs_unregister(q);
1915 
1916 	blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues);
1917 
1918 	/*
1919 	 * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
1920 	 * we should change hctx numa_node according to new topology (this
1921 	 * involves free and re-allocate memory, worthy doing?)
1922 	 */
1923 
1924 	blk_mq_map_swqueue(q);
1925 
1926 	blk_mq_sysfs_register(q);
1927 }
1928 
blk_mq_queue_reinit_notify(struct notifier_block * nb,unsigned long action,void * hcpu)1929 static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
1930 				      unsigned long action, void *hcpu)
1931 {
1932 	struct request_queue *q;
1933 
1934 	/*
1935 	 * Before new mappings are established, hotadded cpu might already
1936 	 * start handling requests. This doesn't break anything as we map
1937 	 * offline CPUs to first hardware queue. We will re-init the queue
1938 	 * below to get optimal settings.
1939 	 */
1940 	if (action != CPU_DEAD && action != CPU_DEAD_FROZEN &&
1941 	    action != CPU_ONLINE && action != CPU_ONLINE_FROZEN)
1942 		return NOTIFY_OK;
1943 
1944 	mutex_lock(&all_q_mutex);
1945 
1946 	/*
1947 	 * We need to freeze and reinit all existing queues.  Freezing
1948 	 * involves synchronous wait for an RCU grace period and doing it
1949 	 * one by one may take a long time.  Start freezing all queues in
1950 	 * one swoop and then wait for the completions so that freezing can
1951 	 * take place in parallel.
1952 	 */
1953 	list_for_each_entry(q, &all_q_list, all_q_node)
1954 		blk_mq_freeze_queue_start(q);
1955 	list_for_each_entry(q, &all_q_list, all_q_node)
1956 		blk_mq_freeze_queue_wait(q);
1957 
1958 	list_for_each_entry(q, &all_q_list, all_q_node)
1959 		blk_mq_queue_reinit(q);
1960 
1961 	list_for_each_entry(q, &all_q_list, all_q_node)
1962 		blk_mq_unfreeze_queue(q);
1963 
1964 	mutex_unlock(&all_q_mutex);
1965 	return NOTIFY_OK;
1966 }
1967 
__blk_mq_alloc_rq_maps(struct blk_mq_tag_set * set)1968 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
1969 {
1970 	int i;
1971 
1972 	for (i = 0; i < set->nr_hw_queues; i++) {
1973 		set->tags[i] = blk_mq_init_rq_map(set, i);
1974 		if (!set->tags[i])
1975 			goto out_unwind;
1976 	}
1977 
1978 	return 0;
1979 
1980 out_unwind:
1981 	while (--i >= 0)
1982 		blk_mq_free_rq_map(set, set->tags[i], i);
1983 
1984 	return -ENOMEM;
1985 }
1986 
1987 /*
1988  * Allocate the request maps associated with this tag_set. Note that this
1989  * may reduce the depth asked for, if memory is tight. set->queue_depth
1990  * will be updated to reflect the allocated depth.
1991  */
blk_mq_alloc_rq_maps(struct blk_mq_tag_set * set)1992 static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
1993 {
1994 	unsigned int depth;
1995 	int err;
1996 
1997 	depth = set->queue_depth;
1998 	do {
1999 		err = __blk_mq_alloc_rq_maps(set);
2000 		if (!err)
2001 			break;
2002 
2003 		set->queue_depth >>= 1;
2004 		if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
2005 			err = -ENOMEM;
2006 			break;
2007 		}
2008 	} while (set->queue_depth);
2009 
2010 	if (!set->queue_depth || err) {
2011 		pr_err("blk-mq: failed to allocate request map\n");
2012 		return -ENOMEM;
2013 	}
2014 
2015 	if (depth != set->queue_depth)
2016 		pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
2017 						depth, set->queue_depth);
2018 
2019 	return 0;
2020 }
2021 
2022 /*
2023  * Alloc a tag set to be associated with one or more request queues.
2024  * May fail with EINVAL for various error conditions. May adjust the
2025  * requested depth down, if if it too large. In that case, the set
2026  * value will be stored in set->queue_depth.
2027  */
blk_mq_alloc_tag_set(struct blk_mq_tag_set * set)2028 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2029 {
2030 	if (!set->nr_hw_queues)
2031 		return -EINVAL;
2032 	if (!set->queue_depth)
2033 		return -EINVAL;
2034 	if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
2035 		return -EINVAL;
2036 
2037 	if (!set->nr_hw_queues || !set->ops->queue_rq || !set->ops->map_queue)
2038 		return -EINVAL;
2039 
2040 	if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
2041 		pr_info("blk-mq: reduced tag depth to %u\n",
2042 			BLK_MQ_MAX_DEPTH);
2043 		set->queue_depth = BLK_MQ_MAX_DEPTH;
2044 	}
2045 
2046 	set->tags = kmalloc_node(set->nr_hw_queues *
2047 				 sizeof(struct blk_mq_tags *),
2048 				 GFP_KERNEL, set->numa_node);
2049 	if (!set->tags)
2050 		return -ENOMEM;
2051 
2052 	if (blk_mq_alloc_rq_maps(set))
2053 		goto enomem;
2054 
2055 	mutex_init(&set->tag_list_lock);
2056 	INIT_LIST_HEAD(&set->tag_list);
2057 
2058 	return 0;
2059 enomem:
2060 	kfree(set->tags);
2061 	set->tags = NULL;
2062 	return -ENOMEM;
2063 }
2064 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
2065 
blk_mq_free_tag_set(struct blk_mq_tag_set * set)2066 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2067 {
2068 	int i;
2069 
2070 	for (i = 0; i < set->nr_hw_queues; i++) {
2071 		if (set->tags[i])
2072 			blk_mq_free_rq_map(set, set->tags[i], i);
2073 	}
2074 
2075 	kfree(set->tags);
2076 	set->tags = NULL;
2077 }
2078 EXPORT_SYMBOL(blk_mq_free_tag_set);
2079 
blk_mq_update_nr_requests(struct request_queue * q,unsigned int nr)2080 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2081 {
2082 	struct blk_mq_tag_set *set = q->tag_set;
2083 	struct blk_mq_hw_ctx *hctx;
2084 	int i, ret;
2085 
2086 	if (!set || nr > set->queue_depth)
2087 		return -EINVAL;
2088 
2089 	ret = 0;
2090 	queue_for_each_hw_ctx(q, hctx, i) {
2091 		ret = blk_mq_tag_update_depth(hctx->tags, nr);
2092 		if (ret)
2093 			break;
2094 	}
2095 
2096 	if (!ret)
2097 		q->nr_requests = nr;
2098 
2099 	return ret;
2100 }
2101 
blk_mq_disable_hotplug(void)2102 void blk_mq_disable_hotplug(void)
2103 {
2104 	mutex_lock(&all_q_mutex);
2105 }
2106 
blk_mq_enable_hotplug(void)2107 void blk_mq_enable_hotplug(void)
2108 {
2109 	mutex_unlock(&all_q_mutex);
2110 }
2111 
blk_mq_init(void)2112 static int __init blk_mq_init(void)
2113 {
2114 	blk_mq_cpu_init();
2115 
2116 	hotcpu_notifier(blk_mq_queue_reinit_notify, 0);
2117 
2118 	return 0;
2119 }
2120 subsys_initcall(blk_mq_init);
2121