1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Block multiqueue core code
4 *
5 * Copyright (C) 2013-2014 Jens Axboe
6 * Copyright (C) 2013-2014 Christoph Hellwig
7 */
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/backing-dev.h>
11 #include <linux/bio.h>
12 #include <linux/blkdev.h>
13 #include <linux/blk-integrity.h>
14 #include <linux/kmemleak.h>
15 #include <linux/mm.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18 #include <linux/workqueue.h>
19 #include <linux/smp.h>
20 #include <linux/interrupt.h>
21 #include <linux/llist.h>
22 #include <linux/cpu.h>
23 #include <linux/cache.h>
24 #include <linux/sched/sysctl.h>
25 #include <linux/sched/topology.h>
26 #include <linux/sched/signal.h>
27 #include <linux/delay.h>
28 #include <linux/crash_dump.h>
29 #include <linux/prefetch.h>
30 #include <linux/blk-crypto.h>
31 #include <linux/part_stat.h>
32
33 #include <trace/events/block.h>
34
35 #include <linux/t10-pi.h>
36 #include "blk.h"
37 #include "blk-mq.h"
38 #include "blk-mq-debugfs.h"
39 #include "blk-pm.h"
40 #include "blk-stat.h"
41 #include "blk-mq-sched.h"
42 #include "blk-rq-qos.h"
43
44 static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
45 static DEFINE_PER_CPU(call_single_data_t, blk_cpu_csd);
46
47 static void blk_mq_insert_request(struct request *rq, blk_insert_t flags);
48 static void blk_mq_request_bypass_insert(struct request *rq,
49 blk_insert_t flags);
50 static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
51 struct list_head *list);
52 static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
53 struct io_comp_batch *iob, unsigned int flags);
54
55 /*
56 * Check if any of the ctx, dispatch list or elevator
57 * have pending work in this hardware queue.
58 */
blk_mq_hctx_has_pending(struct blk_mq_hw_ctx * hctx)59 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
60 {
61 return !list_empty_careful(&hctx->dispatch) ||
62 sbitmap_any_bit_set(&hctx->ctx_map) ||
63 blk_mq_sched_has_work(hctx);
64 }
65
66 /*
67 * Mark this ctx as having pending work in this hardware queue
68 */
blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx * hctx,struct blk_mq_ctx * ctx)69 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
70 struct blk_mq_ctx *ctx)
71 {
72 const int bit = ctx->index_hw[hctx->type];
73
74 if (!sbitmap_test_bit(&hctx->ctx_map, bit))
75 sbitmap_set_bit(&hctx->ctx_map, bit);
76 }
77
blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx * hctx,struct blk_mq_ctx * ctx)78 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
79 struct blk_mq_ctx *ctx)
80 {
81 const int bit = ctx->index_hw[hctx->type];
82
83 sbitmap_clear_bit(&hctx->ctx_map, bit);
84 }
85
86 struct mq_inflight {
87 struct block_device *part;
88 unsigned int inflight[2];
89 };
90
blk_mq_check_inflight(struct request * rq,void * priv)91 static bool blk_mq_check_inflight(struct request *rq, void *priv)
92 {
93 struct mq_inflight *mi = priv;
94
95 if (rq->part && blk_do_io_stat(rq) &&
96 (!mi->part->bd_partno || rq->part == mi->part) &&
97 blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT)
98 mi->inflight[rq_data_dir(rq)]++;
99
100 return true;
101 }
102
blk_mq_in_flight(struct request_queue * q,struct block_device * part)103 unsigned int blk_mq_in_flight(struct request_queue *q,
104 struct block_device *part)
105 {
106 struct mq_inflight mi = { .part = part };
107
108 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
109
110 return mi.inflight[0] + mi.inflight[1];
111 }
112
blk_mq_in_flight_rw(struct request_queue * q,struct block_device * part,unsigned int inflight[2])113 void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
114 unsigned int inflight[2])
115 {
116 struct mq_inflight mi = { .part = part };
117
118 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
119 inflight[0] = mi.inflight[0];
120 inflight[1] = mi.inflight[1];
121 }
122
blk_freeze_queue_start(struct request_queue * q)123 void blk_freeze_queue_start(struct request_queue *q)
124 {
125 mutex_lock(&q->mq_freeze_lock);
126 if (++q->mq_freeze_depth == 1) {
127 percpu_ref_kill(&q->q_usage_counter);
128 mutex_unlock(&q->mq_freeze_lock);
129 if (queue_is_mq(q))
130 blk_mq_run_hw_queues(q, false);
131 } else {
132 mutex_unlock(&q->mq_freeze_lock);
133 }
134 }
135 EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
136
blk_mq_freeze_queue_wait(struct request_queue * q)137 void blk_mq_freeze_queue_wait(struct request_queue *q)
138 {
139 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
140 }
141 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
142
blk_mq_freeze_queue_wait_timeout(struct request_queue * q,unsigned long timeout)143 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
144 unsigned long timeout)
145 {
146 return wait_event_timeout(q->mq_freeze_wq,
147 percpu_ref_is_zero(&q->q_usage_counter),
148 timeout);
149 }
150 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
151
152 /*
153 * Guarantee no request is in use, so we can change any data structure of
154 * the queue afterward.
155 */
blk_freeze_queue(struct request_queue * q)156 void blk_freeze_queue(struct request_queue *q)
157 {
158 /*
159 * In the !blk_mq case we are only calling this to kill the
160 * q_usage_counter, otherwise this increases the freeze depth
161 * and waits for it to return to zero. For this reason there is
162 * no blk_unfreeze_queue(), and blk_freeze_queue() is not
163 * exported to drivers as the only user for unfreeze is blk_mq.
164 */
165 blk_freeze_queue_start(q);
166 blk_mq_freeze_queue_wait(q);
167 }
168
blk_mq_freeze_queue(struct request_queue * q)169 void blk_mq_freeze_queue(struct request_queue *q)
170 {
171 /*
172 * ...just an alias to keep freeze and unfreeze actions balanced
173 * in the blk_mq_* namespace
174 */
175 blk_freeze_queue(q);
176 }
177 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
178
__blk_mq_unfreeze_queue(struct request_queue * q,bool force_atomic)179 void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
180 {
181 mutex_lock(&q->mq_freeze_lock);
182 if (force_atomic)
183 q->q_usage_counter.data->force_atomic = true;
184 q->mq_freeze_depth--;
185 WARN_ON_ONCE(q->mq_freeze_depth < 0);
186 if (!q->mq_freeze_depth) {
187 percpu_ref_resurrect(&q->q_usage_counter);
188 wake_up_all(&q->mq_freeze_wq);
189 }
190 mutex_unlock(&q->mq_freeze_lock);
191 }
192
blk_mq_unfreeze_queue(struct request_queue * q)193 void blk_mq_unfreeze_queue(struct request_queue *q)
194 {
195 __blk_mq_unfreeze_queue(q, false);
196 }
197 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
198
199 /*
200 * FIXME: replace the scsi_internal_device_*block_nowait() calls in the
201 * mpt3sas driver such that this function can be removed.
202 */
blk_mq_quiesce_queue_nowait(struct request_queue * q)203 void blk_mq_quiesce_queue_nowait(struct request_queue *q)
204 {
205 unsigned long flags;
206
207 spin_lock_irqsave(&q->queue_lock, flags);
208 if (!q->quiesce_depth++)
209 blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q);
210 spin_unlock_irqrestore(&q->queue_lock, flags);
211 }
212 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
213
214 /**
215 * blk_mq_wait_quiesce_done() - wait until in-progress quiesce is done
216 * @set: tag_set to wait on
217 *
218 * Note: it is driver's responsibility for making sure that quiesce has
219 * been started on or more of the request_queues of the tag_set. This
220 * function only waits for the quiesce on those request_queues that had
221 * the quiesce flag set using blk_mq_quiesce_queue_nowait.
222 */
blk_mq_wait_quiesce_done(struct blk_mq_tag_set * set)223 void blk_mq_wait_quiesce_done(struct blk_mq_tag_set *set)
224 {
225 if (set->flags & BLK_MQ_F_BLOCKING)
226 synchronize_srcu(set->srcu);
227 else
228 synchronize_rcu();
229 }
230 EXPORT_SYMBOL_GPL(blk_mq_wait_quiesce_done);
231
232 /**
233 * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished
234 * @q: request queue.
235 *
236 * Note: this function does not prevent that the struct request end_io()
237 * callback function is invoked. Once this function is returned, we make
238 * sure no dispatch can happen until the queue is unquiesced via
239 * blk_mq_unquiesce_queue().
240 */
blk_mq_quiesce_queue(struct request_queue * q)241 void blk_mq_quiesce_queue(struct request_queue *q)
242 {
243 blk_mq_quiesce_queue_nowait(q);
244 /* nothing to wait for non-mq queues */
245 if (queue_is_mq(q))
246 blk_mq_wait_quiesce_done(q->tag_set);
247 }
248 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
249
250 /*
251 * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue()
252 * @q: request queue.
253 *
254 * This function recovers queue into the state before quiescing
255 * which is done by blk_mq_quiesce_queue.
256 */
blk_mq_unquiesce_queue(struct request_queue * q)257 void blk_mq_unquiesce_queue(struct request_queue *q)
258 {
259 unsigned long flags;
260 bool run_queue = false;
261
262 spin_lock_irqsave(&q->queue_lock, flags);
263 if (WARN_ON_ONCE(q->quiesce_depth <= 0)) {
264 ;
265 } else if (!--q->quiesce_depth) {
266 blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
267 run_queue = true;
268 }
269 spin_unlock_irqrestore(&q->queue_lock, flags);
270
271 /* dispatch requests which are inserted during quiescing */
272 if (run_queue)
273 blk_mq_run_hw_queues(q, true);
274 }
275 EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
276
blk_mq_quiesce_tagset(struct blk_mq_tag_set * set)277 void blk_mq_quiesce_tagset(struct blk_mq_tag_set *set)
278 {
279 struct request_queue *q;
280
281 mutex_lock(&set->tag_list_lock);
282 list_for_each_entry(q, &set->tag_list, tag_set_list) {
283 if (!blk_queue_skip_tagset_quiesce(q))
284 blk_mq_quiesce_queue_nowait(q);
285 }
286 blk_mq_wait_quiesce_done(set);
287 mutex_unlock(&set->tag_list_lock);
288 }
289 EXPORT_SYMBOL_GPL(blk_mq_quiesce_tagset);
290
blk_mq_unquiesce_tagset(struct blk_mq_tag_set * set)291 void blk_mq_unquiesce_tagset(struct blk_mq_tag_set *set)
292 {
293 struct request_queue *q;
294
295 mutex_lock(&set->tag_list_lock);
296 list_for_each_entry(q, &set->tag_list, tag_set_list) {
297 if (!blk_queue_skip_tagset_quiesce(q))
298 blk_mq_unquiesce_queue(q);
299 }
300 mutex_unlock(&set->tag_list_lock);
301 }
302 EXPORT_SYMBOL_GPL(blk_mq_unquiesce_tagset);
303
blk_mq_wake_waiters(struct request_queue * q)304 void blk_mq_wake_waiters(struct request_queue *q)
305 {
306 struct blk_mq_hw_ctx *hctx;
307 unsigned long i;
308
309 queue_for_each_hw_ctx(q, hctx, i)
310 if (blk_mq_hw_queue_mapped(hctx))
311 blk_mq_tag_wakeup_all(hctx->tags, true);
312 }
313
blk_rq_init(struct request_queue * q,struct request * rq)314 void blk_rq_init(struct request_queue *q, struct request *rq)
315 {
316 memset(rq, 0, sizeof(*rq));
317
318 INIT_LIST_HEAD(&rq->queuelist);
319 rq->q = q;
320 rq->__sector = (sector_t) -1;
321 INIT_HLIST_NODE(&rq->hash);
322 RB_CLEAR_NODE(&rq->rb_node);
323 rq->tag = BLK_MQ_NO_TAG;
324 rq->internal_tag = BLK_MQ_NO_TAG;
325 rq->start_time_ns = ktime_get_ns();
326 rq->part = NULL;
327 blk_crypto_rq_set_defaults(rq);
328 }
329 EXPORT_SYMBOL(blk_rq_init);
330
331 /* Set start and alloc time when the allocated request is actually used */
blk_mq_rq_time_init(struct request * rq,u64 alloc_time_ns)332 static inline void blk_mq_rq_time_init(struct request *rq, u64 alloc_time_ns)
333 {
334 if (blk_mq_need_time_stamp(rq))
335 rq->start_time_ns = ktime_get_ns();
336 else
337 rq->start_time_ns = 0;
338
339 #ifdef CONFIG_BLK_RQ_ALLOC_TIME
340 if (blk_queue_rq_alloc_time(rq->q))
341 rq->alloc_time_ns = alloc_time_ns ?: rq->start_time_ns;
342 else
343 rq->alloc_time_ns = 0;
344 #endif
345 }
346
blk_mq_rq_ctx_init(struct blk_mq_alloc_data * data,struct blk_mq_tags * tags,unsigned int tag)347 static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
348 struct blk_mq_tags *tags, unsigned int tag)
349 {
350 struct blk_mq_ctx *ctx = data->ctx;
351 struct blk_mq_hw_ctx *hctx = data->hctx;
352 struct request_queue *q = data->q;
353 struct request *rq = tags->static_rqs[tag];
354
355 rq->q = q;
356 rq->mq_ctx = ctx;
357 rq->mq_hctx = hctx;
358 rq->cmd_flags = data->cmd_flags;
359
360 if (data->flags & BLK_MQ_REQ_PM)
361 data->rq_flags |= RQF_PM;
362 if (blk_queue_io_stat(q))
363 data->rq_flags |= RQF_IO_STAT;
364 rq->rq_flags = data->rq_flags;
365
366 if (data->rq_flags & RQF_SCHED_TAGS) {
367 rq->tag = BLK_MQ_NO_TAG;
368 rq->internal_tag = tag;
369 } else {
370 rq->tag = tag;
371 rq->internal_tag = BLK_MQ_NO_TAG;
372 }
373 rq->timeout = 0;
374
375 rq->part = NULL;
376 rq->io_start_time_ns = 0;
377 rq->stats_sectors = 0;
378 rq->nr_phys_segments = 0;
379 #if defined(CONFIG_BLK_DEV_INTEGRITY)
380 rq->nr_integrity_segments = 0;
381 #endif
382 rq->end_io = NULL;
383 rq->end_io_data = NULL;
384
385 blk_crypto_rq_set_defaults(rq);
386 INIT_LIST_HEAD(&rq->queuelist);
387 /* tag was already set */
388 WRITE_ONCE(rq->deadline, 0);
389 req_ref_set(rq, 1);
390
391 if (rq->rq_flags & RQF_USE_SCHED) {
392 struct elevator_queue *e = data->q->elevator;
393
394 INIT_HLIST_NODE(&rq->hash);
395 RB_CLEAR_NODE(&rq->rb_node);
396
397 if (e->type->ops.prepare_request)
398 e->type->ops.prepare_request(rq);
399 }
400
401 return rq;
402 }
403
404 static inline struct request *
__blk_mq_alloc_requests_batch(struct blk_mq_alloc_data * data)405 __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data)
406 {
407 unsigned int tag, tag_offset;
408 struct blk_mq_tags *tags;
409 struct request *rq;
410 unsigned long tag_mask;
411 int i, nr = 0;
412
413 tag_mask = blk_mq_get_tags(data, data->nr_tags, &tag_offset);
414 if (unlikely(!tag_mask))
415 return NULL;
416
417 tags = blk_mq_tags_from_data(data);
418 for (i = 0; tag_mask; i++) {
419 if (!(tag_mask & (1UL << i)))
420 continue;
421 tag = tag_offset + i;
422 prefetch(tags->static_rqs[tag]);
423 tag_mask &= ~(1UL << i);
424 rq = blk_mq_rq_ctx_init(data, tags, tag);
425 rq_list_add(data->cached_rq, rq);
426 nr++;
427 }
428 /* caller already holds a reference, add for remainder */
429 percpu_ref_get_many(&data->q->q_usage_counter, nr - 1);
430 data->nr_tags -= nr;
431
432 return rq_list_pop(data->cached_rq);
433 }
434
__blk_mq_alloc_requests(struct blk_mq_alloc_data * data)435 static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
436 {
437 struct request_queue *q = data->q;
438 u64 alloc_time_ns = 0;
439 struct request *rq;
440 unsigned int tag;
441
442 /* alloc_time includes depth and tag waits */
443 if (blk_queue_rq_alloc_time(q))
444 alloc_time_ns = ktime_get_ns();
445
446 if (data->cmd_flags & REQ_NOWAIT)
447 data->flags |= BLK_MQ_REQ_NOWAIT;
448
449 retry:
450 data->ctx = blk_mq_get_ctx(q);
451 data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx);
452
453 if (q->elevator) {
454 /*
455 * All requests use scheduler tags when an I/O scheduler is
456 * enabled for the queue.
457 */
458 data->rq_flags |= RQF_SCHED_TAGS;
459
460 /*
461 * Flush/passthrough requests are special and go directly to the
462 * dispatch list.
463 */
464 if ((data->cmd_flags & REQ_OP_MASK) != REQ_OP_FLUSH &&
465 !blk_op_is_passthrough(data->cmd_flags)) {
466 struct elevator_mq_ops *ops = &q->elevator->type->ops;
467
468 WARN_ON_ONCE(data->flags & BLK_MQ_REQ_RESERVED);
469
470 data->rq_flags |= RQF_USE_SCHED;
471 if (ops->limit_depth)
472 ops->limit_depth(data->cmd_flags, data);
473 }
474 } else {
475 blk_mq_tag_busy(data->hctx);
476 }
477
478 if (data->flags & BLK_MQ_REQ_RESERVED)
479 data->rq_flags |= RQF_RESV;
480
481 /*
482 * Try batched alloc if we want more than 1 tag.
483 */
484 if (data->nr_tags > 1) {
485 rq = __blk_mq_alloc_requests_batch(data);
486 if (rq) {
487 blk_mq_rq_time_init(rq, alloc_time_ns);
488 return rq;
489 }
490 data->nr_tags = 1;
491 }
492
493 /*
494 * Waiting allocations only fail because of an inactive hctx. In that
495 * case just retry the hctx assignment and tag allocation as CPU hotplug
496 * should have migrated us to an online CPU by now.
497 */
498 tag = blk_mq_get_tag(data);
499 if (tag == BLK_MQ_NO_TAG) {
500 if (data->flags & BLK_MQ_REQ_NOWAIT)
501 return NULL;
502 /*
503 * Give up the CPU and sleep for a random short time to
504 * ensure that thread using a realtime scheduling class
505 * are migrated off the CPU, and thus off the hctx that
506 * is going away.
507 */
508 msleep(3);
509 goto retry;
510 }
511
512 rq = blk_mq_rq_ctx_init(data, blk_mq_tags_from_data(data), tag);
513 blk_mq_rq_time_init(rq, alloc_time_ns);
514 return rq;
515 }
516
blk_mq_rq_cache_fill(struct request_queue * q,struct blk_plug * plug,blk_opf_t opf,blk_mq_req_flags_t flags)517 static struct request *blk_mq_rq_cache_fill(struct request_queue *q,
518 struct blk_plug *plug,
519 blk_opf_t opf,
520 blk_mq_req_flags_t flags)
521 {
522 struct blk_mq_alloc_data data = {
523 .q = q,
524 .flags = flags,
525 .cmd_flags = opf,
526 .nr_tags = plug->nr_ios,
527 .cached_rq = &plug->cached_rq,
528 };
529 struct request *rq;
530
531 if (blk_queue_enter(q, flags))
532 return NULL;
533
534 plug->nr_ios = 1;
535
536 rq = __blk_mq_alloc_requests(&data);
537 if (unlikely(!rq))
538 blk_queue_exit(q);
539 return rq;
540 }
541
blk_mq_alloc_cached_request(struct request_queue * q,blk_opf_t opf,blk_mq_req_flags_t flags)542 static struct request *blk_mq_alloc_cached_request(struct request_queue *q,
543 blk_opf_t opf,
544 blk_mq_req_flags_t flags)
545 {
546 struct blk_plug *plug = current->plug;
547 struct request *rq;
548
549 if (!plug)
550 return NULL;
551
552 if (rq_list_empty(plug->cached_rq)) {
553 if (plug->nr_ios == 1)
554 return NULL;
555 rq = blk_mq_rq_cache_fill(q, plug, opf, flags);
556 if (!rq)
557 return NULL;
558 } else {
559 rq = rq_list_peek(&plug->cached_rq);
560 if (!rq || rq->q != q)
561 return NULL;
562
563 if (blk_mq_get_hctx_type(opf) != rq->mq_hctx->type)
564 return NULL;
565 if (op_is_flush(rq->cmd_flags) != op_is_flush(opf))
566 return NULL;
567
568 plug->cached_rq = rq_list_next(rq);
569 blk_mq_rq_time_init(rq, 0);
570 }
571
572 rq->cmd_flags = opf;
573 INIT_LIST_HEAD(&rq->queuelist);
574 return rq;
575 }
576
blk_mq_alloc_request(struct request_queue * q,blk_opf_t opf,blk_mq_req_flags_t flags)577 struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
578 blk_mq_req_flags_t flags)
579 {
580 struct request *rq;
581
582 rq = blk_mq_alloc_cached_request(q, opf, flags);
583 if (!rq) {
584 struct blk_mq_alloc_data data = {
585 .q = q,
586 .flags = flags,
587 .cmd_flags = opf,
588 .nr_tags = 1,
589 };
590 int ret;
591
592 ret = blk_queue_enter(q, flags);
593 if (ret)
594 return ERR_PTR(ret);
595
596 rq = __blk_mq_alloc_requests(&data);
597 if (!rq)
598 goto out_queue_exit;
599 }
600 rq->__data_len = 0;
601 rq->__sector = (sector_t) -1;
602 rq->bio = rq->biotail = NULL;
603 return rq;
604 out_queue_exit:
605 blk_queue_exit(q);
606 return ERR_PTR(-EWOULDBLOCK);
607 }
608 EXPORT_SYMBOL(blk_mq_alloc_request);
609
blk_mq_alloc_request_hctx(struct request_queue * q,blk_opf_t opf,blk_mq_req_flags_t flags,unsigned int hctx_idx)610 struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
611 blk_opf_t opf, blk_mq_req_flags_t flags, unsigned int hctx_idx)
612 {
613 struct blk_mq_alloc_data data = {
614 .q = q,
615 .flags = flags,
616 .cmd_flags = opf,
617 .nr_tags = 1,
618 };
619 u64 alloc_time_ns = 0;
620 struct request *rq;
621 unsigned int cpu;
622 unsigned int tag;
623 int ret;
624
625 /* alloc_time includes depth and tag waits */
626 if (blk_queue_rq_alloc_time(q))
627 alloc_time_ns = ktime_get_ns();
628
629 /*
630 * If the tag allocator sleeps we could get an allocation for a
631 * different hardware context. No need to complicate the low level
632 * allocator for this for the rare use case of a command tied to
633 * a specific queue.
634 */
635 if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)) ||
636 WARN_ON_ONCE(!(flags & BLK_MQ_REQ_RESERVED)))
637 return ERR_PTR(-EINVAL);
638
639 if (hctx_idx >= q->nr_hw_queues)
640 return ERR_PTR(-EIO);
641
642 ret = blk_queue_enter(q, flags);
643 if (ret)
644 return ERR_PTR(ret);
645
646 /*
647 * Check if the hardware context is actually mapped to anything.
648 * If not tell the caller that it should skip this queue.
649 */
650 ret = -EXDEV;
651 data.hctx = xa_load(&q->hctx_table, hctx_idx);
652 if (!blk_mq_hw_queue_mapped(data.hctx))
653 goto out_queue_exit;
654 cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask);
655 if (cpu >= nr_cpu_ids)
656 goto out_queue_exit;
657 data.ctx = __blk_mq_get_ctx(q, cpu);
658
659 if (q->elevator)
660 data.rq_flags |= RQF_SCHED_TAGS;
661 else
662 blk_mq_tag_busy(data.hctx);
663
664 if (flags & BLK_MQ_REQ_RESERVED)
665 data.rq_flags |= RQF_RESV;
666
667 ret = -EWOULDBLOCK;
668 tag = blk_mq_get_tag(&data);
669 if (tag == BLK_MQ_NO_TAG)
670 goto out_queue_exit;
671 rq = blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag);
672 blk_mq_rq_time_init(rq, alloc_time_ns);
673 rq->__data_len = 0;
674 rq->__sector = (sector_t) -1;
675 rq->bio = rq->biotail = NULL;
676 return rq;
677
678 out_queue_exit:
679 blk_queue_exit(q);
680 return ERR_PTR(ret);
681 }
682 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
683
blk_mq_finish_request(struct request * rq)684 static void blk_mq_finish_request(struct request *rq)
685 {
686 struct request_queue *q = rq->q;
687
688 if (rq->rq_flags & RQF_USE_SCHED) {
689 q->elevator->type->ops.finish_request(rq);
690 /*
691 * For postflush request that may need to be
692 * completed twice, we should clear this flag
693 * to avoid double finish_request() on the rq.
694 */
695 rq->rq_flags &= ~RQF_USE_SCHED;
696 }
697 }
698
__blk_mq_free_request(struct request * rq)699 static void __blk_mq_free_request(struct request *rq)
700 {
701 struct request_queue *q = rq->q;
702 struct blk_mq_ctx *ctx = rq->mq_ctx;
703 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
704 const int sched_tag = rq->internal_tag;
705
706 blk_crypto_free_request(rq);
707 blk_pm_mark_last_busy(rq);
708 rq->mq_hctx = NULL;
709
710 if (rq->rq_flags & RQF_MQ_INFLIGHT)
711 __blk_mq_dec_active_requests(hctx);
712
713 if (rq->tag != BLK_MQ_NO_TAG)
714 blk_mq_put_tag(hctx->tags, ctx, rq->tag);
715 if (sched_tag != BLK_MQ_NO_TAG)
716 blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag);
717 blk_mq_sched_restart(hctx);
718 blk_queue_exit(q);
719 }
720
blk_mq_free_request(struct request * rq)721 void blk_mq_free_request(struct request *rq)
722 {
723 struct request_queue *q = rq->q;
724
725 blk_mq_finish_request(rq);
726
727 if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
728 laptop_io_completion(q->disk->bdi);
729
730 rq_qos_done(q, rq);
731
732 WRITE_ONCE(rq->state, MQ_RQ_IDLE);
733 if (req_ref_put_and_test(rq))
734 __blk_mq_free_request(rq);
735 }
736 EXPORT_SYMBOL_GPL(blk_mq_free_request);
737
blk_mq_free_plug_rqs(struct blk_plug * plug)738 void blk_mq_free_plug_rqs(struct blk_plug *plug)
739 {
740 struct request *rq;
741
742 while ((rq = rq_list_pop(&plug->cached_rq)) != NULL)
743 blk_mq_free_request(rq);
744 }
745
blk_dump_rq_flags(struct request * rq,char * msg)746 void blk_dump_rq_flags(struct request *rq, char *msg)
747 {
748 printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
749 rq->q->disk ? rq->q->disk->disk_name : "?",
750 (__force unsigned long long) rq->cmd_flags);
751
752 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
753 (unsigned long long)blk_rq_pos(rq),
754 blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
755 printk(KERN_INFO " bio %p, biotail %p, len %u\n",
756 rq->bio, rq->biotail, blk_rq_bytes(rq));
757 }
758 EXPORT_SYMBOL(blk_dump_rq_flags);
759
req_bio_endio(struct request * rq,struct bio * bio,unsigned int nbytes,blk_status_t error)760 static void req_bio_endio(struct request *rq, struct bio *bio,
761 unsigned int nbytes, blk_status_t error)
762 {
763 if (unlikely(error)) {
764 bio->bi_status = error;
765 } else if (req_op(rq) == REQ_OP_ZONE_APPEND) {
766 /*
767 * Partial zone append completions cannot be supported as the
768 * BIO fragments may end up not being written sequentially.
769 */
770 if (bio->bi_iter.bi_size != nbytes)
771 bio->bi_status = BLK_STS_IOERR;
772 else
773 bio->bi_iter.bi_sector = rq->__sector;
774 }
775
776 bio_advance(bio, nbytes);
777
778 if (unlikely(rq->rq_flags & RQF_QUIET))
779 bio_set_flag(bio, BIO_QUIET);
780 /* don't actually finish bio if it's part of flush sequence */
781 if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
782 bio_endio(bio);
783 }
784
blk_account_io_completion(struct request * req,unsigned int bytes)785 static void blk_account_io_completion(struct request *req, unsigned int bytes)
786 {
787 if (req->part && blk_do_io_stat(req)) {
788 const int sgrp = op_stat_group(req_op(req));
789
790 part_stat_lock();
791 part_stat_add(req->part, sectors[sgrp], bytes >> 9);
792 part_stat_unlock();
793 }
794 }
795
blk_print_req_error(struct request * req,blk_status_t status)796 static void blk_print_req_error(struct request *req, blk_status_t status)
797 {
798 printk_ratelimited(KERN_ERR
799 "%s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x "
800 "phys_seg %u prio class %u\n",
801 blk_status_to_str(status),
802 req->q->disk ? req->q->disk->disk_name : "?",
803 blk_rq_pos(req), (__force u32)req_op(req),
804 blk_op_str(req_op(req)),
805 (__force u32)(req->cmd_flags & ~REQ_OP_MASK),
806 req->nr_phys_segments,
807 IOPRIO_PRIO_CLASS(req->ioprio));
808 }
809
810 /*
811 * Fully end IO on a request. Does not support partial completions, or
812 * errors.
813 */
blk_complete_request(struct request * req)814 static void blk_complete_request(struct request *req)
815 {
816 const bool is_flush = (req->rq_flags & RQF_FLUSH_SEQ) != 0;
817 int total_bytes = blk_rq_bytes(req);
818 struct bio *bio = req->bio;
819
820 trace_block_rq_complete(req, BLK_STS_OK, total_bytes);
821
822 if (!bio)
823 return;
824
825 #ifdef CONFIG_BLK_DEV_INTEGRITY
826 if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ)
827 req->q->integrity.profile->complete_fn(req, total_bytes);
828 #endif
829
830 /*
831 * Upper layers may call blk_crypto_evict_key() anytime after the last
832 * bio_endio(). Therefore, the keyslot must be released before that.
833 */
834 blk_crypto_rq_put_keyslot(req);
835
836 blk_account_io_completion(req, total_bytes);
837
838 do {
839 struct bio *next = bio->bi_next;
840
841 /* Completion has already been traced */
842 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
843
844 if (req_op(req) == REQ_OP_ZONE_APPEND)
845 bio->bi_iter.bi_sector = req->__sector;
846
847 if (!is_flush)
848 bio_endio(bio);
849 bio = next;
850 } while (bio);
851
852 /*
853 * Reset counters so that the request stacking driver
854 * can find how many bytes remain in the request
855 * later.
856 */
857 if (!req->end_io) {
858 req->bio = NULL;
859 req->__data_len = 0;
860 }
861 }
862
863 /**
864 * blk_update_request - Complete multiple bytes without completing the request
865 * @req: the request being processed
866 * @error: block status code
867 * @nr_bytes: number of bytes to complete for @req
868 *
869 * Description:
870 * Ends I/O on a number of bytes attached to @req, but doesn't complete
871 * the request structure even if @req doesn't have leftover.
872 * If @req has leftover, sets it up for the next range of segments.
873 *
874 * Passing the result of blk_rq_bytes() as @nr_bytes guarantees
875 * %false return from this function.
876 *
877 * Note:
878 * The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in this function
879 * except in the consistency check at the end of this function.
880 *
881 * Return:
882 * %false - this request doesn't have any more data
883 * %true - this request has more data
884 **/
blk_update_request(struct request * req,blk_status_t error,unsigned int nr_bytes)885 bool blk_update_request(struct request *req, blk_status_t error,
886 unsigned int nr_bytes)
887 {
888 int total_bytes;
889
890 trace_block_rq_complete(req, error, nr_bytes);
891
892 if (!req->bio)
893 return false;
894
895 #ifdef CONFIG_BLK_DEV_INTEGRITY
896 if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
897 error == BLK_STS_OK)
898 req->q->integrity.profile->complete_fn(req, nr_bytes);
899 #endif
900
901 /*
902 * Upper layers may call blk_crypto_evict_key() anytime after the last
903 * bio_endio(). Therefore, the keyslot must be released before that.
904 */
905 if (blk_crypto_rq_has_keyslot(req) && nr_bytes >= blk_rq_bytes(req))
906 __blk_crypto_rq_put_keyslot(req);
907
908 if (unlikely(error && !blk_rq_is_passthrough(req) &&
909 !(req->rq_flags & RQF_QUIET)) &&
910 !test_bit(GD_DEAD, &req->q->disk->state)) {
911 blk_print_req_error(req, error);
912 trace_block_rq_error(req, error, nr_bytes);
913 }
914
915 blk_account_io_completion(req, nr_bytes);
916
917 total_bytes = 0;
918 while (req->bio) {
919 struct bio *bio = req->bio;
920 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
921
922 if (bio_bytes == bio->bi_iter.bi_size)
923 req->bio = bio->bi_next;
924
925 /* Completion has already been traced */
926 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
927 req_bio_endio(req, bio, bio_bytes, error);
928
929 total_bytes += bio_bytes;
930 nr_bytes -= bio_bytes;
931
932 if (!nr_bytes)
933 break;
934 }
935
936 /*
937 * completely done
938 */
939 if (!req->bio) {
940 /*
941 * Reset counters so that the request stacking driver
942 * can find how many bytes remain in the request
943 * later.
944 */
945 req->__data_len = 0;
946 return false;
947 }
948
949 req->__data_len -= total_bytes;
950
951 /* update sector only for requests with clear definition of sector */
952 if (!blk_rq_is_passthrough(req))
953 req->__sector += total_bytes >> 9;
954
955 /* mixed attributes always follow the first bio */
956 if (req->rq_flags & RQF_MIXED_MERGE) {
957 req->cmd_flags &= ~REQ_FAILFAST_MASK;
958 req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
959 }
960
961 if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) {
962 /*
963 * If total number of sectors is less than the first segment
964 * size, something has gone terribly wrong.
965 */
966 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
967 blk_dump_rq_flags(req, "request botched");
968 req->__data_len = blk_rq_cur_bytes(req);
969 }
970
971 /* recalculate the number of segments */
972 req->nr_phys_segments = blk_recalc_rq_segments(req);
973 }
974
975 return true;
976 }
977 EXPORT_SYMBOL_GPL(blk_update_request);
978
blk_account_io_done(struct request * req,u64 now)979 static inline void blk_account_io_done(struct request *req, u64 now)
980 {
981 trace_block_io_done(req);
982
983 /*
984 * Account IO completion. flush_rq isn't accounted as a
985 * normal IO on queueing nor completion. Accounting the
986 * containing request is enough.
987 */
988 if (blk_do_io_stat(req) && req->part &&
989 !(req->rq_flags & RQF_FLUSH_SEQ)) {
990 const int sgrp = op_stat_group(req_op(req));
991
992 part_stat_lock();
993 update_io_ticks(req->part, jiffies, true);
994 part_stat_inc(req->part, ios[sgrp]);
995 part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns);
996 part_stat_local_dec(req->part,
997 in_flight[op_is_write(req_op(req))]);
998 part_stat_unlock();
999 }
1000 }
1001
blk_account_io_start(struct request * req)1002 static inline void blk_account_io_start(struct request *req)
1003 {
1004 trace_block_io_start(req);
1005
1006 if (blk_do_io_stat(req)) {
1007 /*
1008 * All non-passthrough requests are created from a bio with one
1009 * exception: when a flush command that is part of a flush sequence
1010 * generated by the state machine in blk-flush.c is cloned onto the
1011 * lower device by dm-multipath we can get here without a bio.
1012 */
1013 if (req->bio)
1014 req->part = req->bio->bi_bdev;
1015 else
1016 req->part = req->q->disk->part0;
1017
1018 part_stat_lock();
1019 update_io_ticks(req->part, jiffies, false);
1020 part_stat_local_inc(req->part,
1021 in_flight[op_is_write(req_op(req))]);
1022 part_stat_unlock();
1023 }
1024 }
1025
__blk_mq_end_request_acct(struct request * rq,u64 now)1026 static inline void __blk_mq_end_request_acct(struct request *rq, u64 now)
1027 {
1028 if (rq->rq_flags & RQF_STATS)
1029 blk_stat_add(rq, now);
1030
1031 blk_mq_sched_completed_request(rq, now);
1032 blk_account_io_done(rq, now);
1033 }
1034
__blk_mq_end_request(struct request * rq,blk_status_t error)1035 inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
1036 {
1037 if (blk_mq_need_time_stamp(rq))
1038 __blk_mq_end_request_acct(rq, ktime_get_ns());
1039
1040 blk_mq_finish_request(rq);
1041
1042 if (rq->end_io) {
1043 rq_qos_done(rq->q, rq);
1044 if (rq->end_io(rq, error) == RQ_END_IO_FREE)
1045 blk_mq_free_request(rq);
1046 } else {
1047 blk_mq_free_request(rq);
1048 }
1049 }
1050 EXPORT_SYMBOL(__blk_mq_end_request);
1051
blk_mq_end_request(struct request * rq,blk_status_t error)1052 void blk_mq_end_request(struct request *rq, blk_status_t error)
1053 {
1054 if (blk_update_request(rq, error, blk_rq_bytes(rq)))
1055 BUG();
1056 __blk_mq_end_request(rq, error);
1057 }
1058 EXPORT_SYMBOL(blk_mq_end_request);
1059
1060 #define TAG_COMP_BATCH 32
1061
blk_mq_flush_tag_batch(struct blk_mq_hw_ctx * hctx,int * tag_array,int nr_tags)1062 static inline void blk_mq_flush_tag_batch(struct blk_mq_hw_ctx *hctx,
1063 int *tag_array, int nr_tags)
1064 {
1065 struct request_queue *q = hctx->queue;
1066
1067 /*
1068 * All requests should have been marked as RQF_MQ_INFLIGHT, so
1069 * update hctx->nr_active in batch
1070 */
1071 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
1072 __blk_mq_sub_active_requests(hctx, nr_tags);
1073
1074 blk_mq_put_tags(hctx->tags, tag_array, nr_tags);
1075 percpu_ref_put_many(&q->q_usage_counter, nr_tags);
1076 }
1077
blk_mq_end_request_batch(struct io_comp_batch * iob)1078 void blk_mq_end_request_batch(struct io_comp_batch *iob)
1079 {
1080 int tags[TAG_COMP_BATCH], nr_tags = 0;
1081 struct blk_mq_hw_ctx *cur_hctx = NULL;
1082 struct request *rq;
1083 u64 now = 0;
1084
1085 if (iob->need_ts)
1086 now = ktime_get_ns();
1087
1088 while ((rq = rq_list_pop(&iob->req_list)) != NULL) {
1089 prefetch(rq->bio);
1090 prefetch(rq->rq_next);
1091
1092 blk_complete_request(rq);
1093 if (iob->need_ts)
1094 __blk_mq_end_request_acct(rq, now);
1095
1096 blk_mq_finish_request(rq);
1097
1098 rq_qos_done(rq->q, rq);
1099
1100 /*
1101 * If end_io handler returns NONE, then it still has
1102 * ownership of the request.
1103 */
1104 if (rq->end_io && rq->end_io(rq, 0) == RQ_END_IO_NONE)
1105 continue;
1106
1107 WRITE_ONCE(rq->state, MQ_RQ_IDLE);
1108 if (!req_ref_put_and_test(rq))
1109 continue;
1110
1111 blk_crypto_free_request(rq);
1112 blk_pm_mark_last_busy(rq);
1113
1114 if (nr_tags == TAG_COMP_BATCH || cur_hctx != rq->mq_hctx) {
1115 if (cur_hctx)
1116 blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags);
1117 nr_tags = 0;
1118 cur_hctx = rq->mq_hctx;
1119 }
1120 tags[nr_tags++] = rq->tag;
1121 }
1122
1123 if (nr_tags)
1124 blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags);
1125 }
1126 EXPORT_SYMBOL_GPL(blk_mq_end_request_batch);
1127
blk_complete_reqs(struct llist_head * list)1128 static void blk_complete_reqs(struct llist_head *list)
1129 {
1130 struct llist_node *entry = llist_reverse_order(llist_del_all(list));
1131 struct request *rq, *next;
1132
1133 llist_for_each_entry_safe(rq, next, entry, ipi_list)
1134 rq->q->mq_ops->complete(rq);
1135 }
1136
blk_done_softirq(struct softirq_action * h)1137 static __latent_entropy void blk_done_softirq(struct softirq_action *h)
1138 {
1139 blk_complete_reqs(this_cpu_ptr(&blk_cpu_done));
1140 }
1141
blk_softirq_cpu_dead(unsigned int cpu)1142 static int blk_softirq_cpu_dead(unsigned int cpu)
1143 {
1144 blk_complete_reqs(&per_cpu(blk_cpu_done, cpu));
1145 return 0;
1146 }
1147
__blk_mq_complete_request_remote(void * data)1148 static void __blk_mq_complete_request_remote(void *data)
1149 {
1150 __raise_softirq_irqoff(BLOCK_SOFTIRQ);
1151 }
1152
blk_mq_complete_need_ipi(struct request * rq)1153 static inline bool blk_mq_complete_need_ipi(struct request *rq)
1154 {
1155 int cpu = raw_smp_processor_id();
1156
1157 if (!IS_ENABLED(CONFIG_SMP) ||
1158 !test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags))
1159 return false;
1160 /*
1161 * With force threaded interrupts enabled, raising softirq from an SMP
1162 * function call will always result in waking the ksoftirqd thread.
1163 * This is probably worse than completing the request on a different
1164 * cache domain.
1165 */
1166 if (force_irqthreads())
1167 return false;
1168
1169 /* same CPU or cache domain and capacity? Complete locally */
1170 if (cpu == rq->mq_ctx->cpu ||
1171 (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags) &&
1172 cpus_share_cache(cpu, rq->mq_ctx->cpu) &&
1173 cpus_equal_capacity(cpu, rq->mq_ctx->cpu)))
1174 return false;
1175
1176 /* don't try to IPI to an offline CPU */
1177 return cpu_online(rq->mq_ctx->cpu);
1178 }
1179
blk_mq_complete_send_ipi(struct request * rq)1180 static void blk_mq_complete_send_ipi(struct request *rq)
1181 {
1182 unsigned int cpu;
1183
1184 cpu = rq->mq_ctx->cpu;
1185 if (llist_add(&rq->ipi_list, &per_cpu(blk_cpu_done, cpu)))
1186 smp_call_function_single_async(cpu, &per_cpu(blk_cpu_csd, cpu));
1187 }
1188
blk_mq_raise_softirq(struct request * rq)1189 static void blk_mq_raise_softirq(struct request *rq)
1190 {
1191 struct llist_head *list;
1192
1193 preempt_disable();
1194 list = this_cpu_ptr(&blk_cpu_done);
1195 if (llist_add(&rq->ipi_list, list))
1196 raise_softirq(BLOCK_SOFTIRQ);
1197 preempt_enable();
1198 }
1199
blk_mq_complete_request_remote(struct request * rq)1200 bool blk_mq_complete_request_remote(struct request *rq)
1201 {
1202 WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
1203
1204 /*
1205 * For request which hctx has only one ctx mapping,
1206 * or a polled request, always complete locally,
1207 * it's pointless to redirect the completion.
1208 */
1209 if ((rq->mq_hctx->nr_ctx == 1 &&
1210 rq->mq_ctx->cpu == raw_smp_processor_id()) ||
1211 rq->cmd_flags & REQ_POLLED)
1212 return false;
1213
1214 if (blk_mq_complete_need_ipi(rq)) {
1215 blk_mq_complete_send_ipi(rq);
1216 return true;
1217 }
1218
1219 if (rq->q->nr_hw_queues == 1) {
1220 blk_mq_raise_softirq(rq);
1221 return true;
1222 }
1223 return false;
1224 }
1225 EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote);
1226
1227 /**
1228 * blk_mq_complete_request - end I/O on a request
1229 * @rq: the request being processed
1230 *
1231 * Description:
1232 * Complete a request by scheduling the ->complete_rq operation.
1233 **/
blk_mq_complete_request(struct request * rq)1234 void blk_mq_complete_request(struct request *rq)
1235 {
1236 if (!blk_mq_complete_request_remote(rq))
1237 rq->q->mq_ops->complete(rq);
1238 }
1239 EXPORT_SYMBOL(blk_mq_complete_request);
1240
1241 /**
1242 * blk_mq_start_request - Start processing a request
1243 * @rq: Pointer to request to be started
1244 *
1245 * Function used by device drivers to notify the block layer that a request
1246 * is going to be processed now, so blk layer can do proper initializations
1247 * such as starting the timeout timer.
1248 */
blk_mq_start_request(struct request * rq)1249 void blk_mq_start_request(struct request *rq)
1250 {
1251 struct request_queue *q = rq->q;
1252
1253 trace_block_rq_issue(rq);
1254
1255 if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
1256 rq->io_start_time_ns = ktime_get_ns();
1257 rq->stats_sectors = blk_rq_sectors(rq);
1258 rq->rq_flags |= RQF_STATS;
1259 rq_qos_issue(q, rq);
1260 }
1261
1262 WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE);
1263
1264 blk_add_timer(rq);
1265 WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT);
1266
1267 #ifdef CONFIG_BLK_DEV_INTEGRITY
1268 if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE)
1269 q->integrity.profile->prepare_fn(rq);
1270 #endif
1271 if (rq->bio && rq->bio->bi_opf & REQ_POLLED)
1272 WRITE_ONCE(rq->bio->bi_cookie, rq->mq_hctx->queue_num);
1273 }
1274 EXPORT_SYMBOL(blk_mq_start_request);
1275
1276 /*
1277 * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
1278 * queues. This is important for md arrays to benefit from merging
1279 * requests.
1280 */
blk_plug_max_rq_count(struct blk_plug * plug)1281 static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
1282 {
1283 if (plug->multiple_queues)
1284 return BLK_MAX_REQUEST_COUNT * 2;
1285 return BLK_MAX_REQUEST_COUNT;
1286 }
1287
blk_add_rq_to_plug(struct blk_plug * plug,struct request * rq)1288 static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
1289 {
1290 struct request *last = rq_list_peek(&plug->mq_list);
1291
1292 if (!plug->rq_count) {
1293 trace_block_plug(rq->q);
1294 } else if (plug->rq_count >= blk_plug_max_rq_count(plug) ||
1295 (!blk_queue_nomerges(rq->q) &&
1296 blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
1297 blk_mq_flush_plug_list(plug, false);
1298 last = NULL;
1299 trace_block_plug(rq->q);
1300 }
1301
1302 if (!plug->multiple_queues && last && last->q != rq->q)
1303 plug->multiple_queues = true;
1304 /*
1305 * Any request allocated from sched tags can't be issued to
1306 * ->queue_rqs() directly
1307 */
1308 if (!plug->has_elevator && (rq->rq_flags & RQF_SCHED_TAGS))
1309 plug->has_elevator = true;
1310 rq->rq_next = NULL;
1311 rq_list_add(&plug->mq_list, rq);
1312 plug->rq_count++;
1313 }
1314
1315 /**
1316 * blk_execute_rq_nowait - insert a request to I/O scheduler for execution
1317 * @rq: request to insert
1318 * @at_head: insert request at head or tail of queue
1319 *
1320 * Description:
1321 * Insert a fully prepared request at the back of the I/O scheduler queue
1322 * for execution. Don't wait for completion.
1323 *
1324 * Note:
1325 * This function will invoke @done directly if the queue is dead.
1326 */
blk_execute_rq_nowait(struct request * rq,bool at_head)1327 void blk_execute_rq_nowait(struct request *rq, bool at_head)
1328 {
1329 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1330
1331 WARN_ON(irqs_disabled());
1332 WARN_ON(!blk_rq_is_passthrough(rq));
1333
1334 blk_account_io_start(rq);
1335
1336 /*
1337 * As plugging can be enabled for passthrough requests on a zoned
1338 * device, directly accessing the plug instead of using blk_mq_plug()
1339 * should not have any consequences.
1340 */
1341 if (current->plug && !at_head) {
1342 blk_add_rq_to_plug(current->plug, rq);
1343 return;
1344 }
1345
1346 blk_mq_insert_request(rq, at_head ? BLK_MQ_INSERT_AT_HEAD : 0);
1347 blk_mq_run_hw_queue(hctx, hctx->flags & BLK_MQ_F_BLOCKING);
1348 }
1349 EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
1350
1351 struct blk_rq_wait {
1352 struct completion done;
1353 blk_status_t ret;
1354 };
1355
blk_end_sync_rq(struct request * rq,blk_status_t ret)1356 static enum rq_end_io_ret blk_end_sync_rq(struct request *rq, blk_status_t ret)
1357 {
1358 struct blk_rq_wait *wait = rq->end_io_data;
1359
1360 wait->ret = ret;
1361 complete(&wait->done);
1362 return RQ_END_IO_NONE;
1363 }
1364
blk_rq_is_poll(struct request * rq)1365 bool blk_rq_is_poll(struct request *rq)
1366 {
1367 if (!rq->mq_hctx)
1368 return false;
1369 if (rq->mq_hctx->type != HCTX_TYPE_POLL)
1370 return false;
1371 return true;
1372 }
1373 EXPORT_SYMBOL_GPL(blk_rq_is_poll);
1374
blk_rq_poll_completion(struct request * rq,struct completion * wait)1375 static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
1376 {
1377 do {
1378 blk_hctx_poll(rq->q, rq->mq_hctx, NULL, 0);
1379 cond_resched();
1380 } while (!completion_done(wait));
1381 }
1382
1383 /**
1384 * blk_execute_rq - insert a request into queue for execution
1385 * @rq: request to insert
1386 * @at_head: insert request at head or tail of queue
1387 *
1388 * Description:
1389 * Insert a fully prepared request at the back of the I/O scheduler queue
1390 * for execution and wait for completion.
1391 * Return: The blk_status_t result provided to blk_mq_end_request().
1392 */
blk_execute_rq(struct request * rq,bool at_head)1393 blk_status_t blk_execute_rq(struct request *rq, bool at_head)
1394 {
1395 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1396 struct blk_rq_wait wait = {
1397 .done = COMPLETION_INITIALIZER_ONSTACK(wait.done),
1398 };
1399
1400 WARN_ON(irqs_disabled());
1401 WARN_ON(!blk_rq_is_passthrough(rq));
1402
1403 rq->end_io_data = &wait;
1404 rq->end_io = blk_end_sync_rq;
1405
1406 blk_account_io_start(rq);
1407 blk_mq_insert_request(rq, at_head ? BLK_MQ_INSERT_AT_HEAD : 0);
1408 blk_mq_run_hw_queue(hctx, false);
1409
1410 if (blk_rq_is_poll(rq)) {
1411 blk_rq_poll_completion(rq, &wait.done);
1412 } else {
1413 /*
1414 * Prevent hang_check timer from firing at us during very long
1415 * I/O
1416 */
1417 unsigned long hang_check = sysctl_hung_task_timeout_secs;
1418
1419 if (hang_check)
1420 while (!wait_for_completion_io_timeout(&wait.done,
1421 hang_check * (HZ/2)))
1422 ;
1423 else
1424 wait_for_completion_io(&wait.done);
1425 }
1426
1427 return wait.ret;
1428 }
1429 EXPORT_SYMBOL(blk_execute_rq);
1430
__blk_mq_requeue_request(struct request * rq)1431 static void __blk_mq_requeue_request(struct request *rq)
1432 {
1433 struct request_queue *q = rq->q;
1434
1435 blk_mq_put_driver_tag(rq);
1436
1437 trace_block_rq_requeue(rq);
1438 rq_qos_requeue(q, rq);
1439
1440 if (blk_mq_request_started(rq)) {
1441 WRITE_ONCE(rq->state, MQ_RQ_IDLE);
1442 rq->rq_flags &= ~RQF_TIMED_OUT;
1443 }
1444 }
1445
blk_mq_requeue_request(struct request * rq,bool kick_requeue_list)1446 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
1447 {
1448 struct request_queue *q = rq->q;
1449 unsigned long flags;
1450
1451 __blk_mq_requeue_request(rq);
1452
1453 /* this request will be re-inserted to io scheduler queue */
1454 blk_mq_sched_requeue_request(rq);
1455
1456 spin_lock_irqsave(&q->requeue_lock, flags);
1457 list_add_tail(&rq->queuelist, &q->requeue_list);
1458 spin_unlock_irqrestore(&q->requeue_lock, flags);
1459
1460 if (kick_requeue_list)
1461 blk_mq_kick_requeue_list(q);
1462 }
1463 EXPORT_SYMBOL(blk_mq_requeue_request);
1464
blk_mq_requeue_work(struct work_struct * work)1465 static void blk_mq_requeue_work(struct work_struct *work)
1466 {
1467 struct request_queue *q =
1468 container_of(work, struct request_queue, requeue_work.work);
1469 LIST_HEAD(rq_list);
1470 LIST_HEAD(flush_list);
1471 struct request *rq;
1472
1473 spin_lock_irq(&q->requeue_lock);
1474 list_splice_init(&q->requeue_list, &rq_list);
1475 list_splice_init(&q->flush_list, &flush_list);
1476 spin_unlock_irq(&q->requeue_lock);
1477
1478 while (!list_empty(&rq_list)) {
1479 rq = list_entry(rq_list.next, struct request, queuelist);
1480 /*
1481 * If RQF_DONTPREP ist set, the request has been started by the
1482 * driver already and might have driver-specific data allocated
1483 * already. Insert it into the hctx dispatch list to avoid
1484 * block layer merges for the request.
1485 */
1486 if (rq->rq_flags & RQF_DONTPREP) {
1487 list_del_init(&rq->queuelist);
1488 blk_mq_request_bypass_insert(rq, 0);
1489 } else {
1490 list_del_init(&rq->queuelist);
1491 blk_mq_insert_request(rq, BLK_MQ_INSERT_AT_HEAD);
1492 }
1493 }
1494
1495 while (!list_empty(&flush_list)) {
1496 rq = list_entry(flush_list.next, struct request, queuelist);
1497 list_del_init(&rq->queuelist);
1498 blk_mq_insert_request(rq, 0);
1499 }
1500
1501 blk_mq_run_hw_queues(q, false);
1502 }
1503
blk_mq_kick_requeue_list(struct request_queue * q)1504 void blk_mq_kick_requeue_list(struct request_queue *q)
1505 {
1506 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0);
1507 }
1508 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
1509
blk_mq_delay_kick_requeue_list(struct request_queue * q,unsigned long msecs)1510 void blk_mq_delay_kick_requeue_list(struct request_queue *q,
1511 unsigned long msecs)
1512 {
1513 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
1514 msecs_to_jiffies(msecs));
1515 }
1516 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
1517
blk_is_flush_data_rq(struct request * rq)1518 static bool blk_is_flush_data_rq(struct request *rq)
1519 {
1520 return (rq->rq_flags & RQF_FLUSH_SEQ) && !is_flush_rq(rq);
1521 }
1522
blk_mq_rq_inflight(struct request * rq,void * priv)1523 static bool blk_mq_rq_inflight(struct request *rq, void *priv)
1524 {
1525 /*
1526 * If we find a request that isn't idle we know the queue is busy
1527 * as it's checked in the iter.
1528 * Return false to stop the iteration.
1529 *
1530 * In case of queue quiesce, if one flush data request is completed,
1531 * don't count it as inflight given the flush sequence is suspended,
1532 * and the original flush data request is invisible to driver, just
1533 * like other pending requests because of quiesce
1534 */
1535 if (blk_mq_request_started(rq) && !(blk_queue_quiesced(rq->q) &&
1536 blk_is_flush_data_rq(rq) &&
1537 blk_mq_request_completed(rq))) {
1538 bool *busy = priv;
1539
1540 *busy = true;
1541 return false;
1542 }
1543
1544 return true;
1545 }
1546
blk_mq_queue_inflight(struct request_queue * q)1547 bool blk_mq_queue_inflight(struct request_queue *q)
1548 {
1549 bool busy = false;
1550
1551 blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight, &busy);
1552 return busy;
1553 }
1554 EXPORT_SYMBOL_GPL(blk_mq_queue_inflight);
1555
blk_mq_rq_timed_out(struct request * req)1556 static void blk_mq_rq_timed_out(struct request *req)
1557 {
1558 req->rq_flags |= RQF_TIMED_OUT;
1559 if (req->q->mq_ops->timeout) {
1560 enum blk_eh_timer_return ret;
1561
1562 ret = req->q->mq_ops->timeout(req);
1563 if (ret == BLK_EH_DONE)
1564 return;
1565 WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER);
1566 }
1567
1568 blk_add_timer(req);
1569 }
1570
1571 struct blk_expired_data {
1572 bool has_timedout_rq;
1573 unsigned long next;
1574 unsigned long timeout_start;
1575 };
1576
blk_mq_req_expired(struct request * rq,struct blk_expired_data * expired)1577 static bool blk_mq_req_expired(struct request *rq, struct blk_expired_data *expired)
1578 {
1579 unsigned long deadline;
1580
1581 if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT)
1582 return false;
1583 if (rq->rq_flags & RQF_TIMED_OUT)
1584 return false;
1585
1586 deadline = READ_ONCE(rq->deadline);
1587 if (time_after_eq(expired->timeout_start, deadline))
1588 return true;
1589
1590 if (expired->next == 0)
1591 expired->next = deadline;
1592 else if (time_after(expired->next, deadline))
1593 expired->next = deadline;
1594 return false;
1595 }
1596
blk_mq_put_rq_ref(struct request * rq)1597 void blk_mq_put_rq_ref(struct request *rq)
1598 {
1599 if (is_flush_rq(rq)) {
1600 if (rq->end_io(rq, 0) == RQ_END_IO_FREE)
1601 blk_mq_free_request(rq);
1602 } else if (req_ref_put_and_test(rq)) {
1603 __blk_mq_free_request(rq);
1604 }
1605 }
1606
blk_mq_check_expired(struct request * rq,void * priv)1607 static bool blk_mq_check_expired(struct request *rq, void *priv)
1608 {
1609 struct blk_expired_data *expired = priv;
1610
1611 /*
1612 * blk_mq_queue_tag_busy_iter() has locked the request, so it cannot
1613 * be reallocated underneath the timeout handler's processing, then
1614 * the expire check is reliable. If the request is not expired, then
1615 * it was completed and reallocated as a new request after returning
1616 * from blk_mq_check_expired().
1617 */
1618 if (blk_mq_req_expired(rq, expired)) {
1619 expired->has_timedout_rq = true;
1620 return false;
1621 }
1622 return true;
1623 }
1624
blk_mq_handle_expired(struct request * rq,void * priv)1625 static bool blk_mq_handle_expired(struct request *rq, void *priv)
1626 {
1627 struct blk_expired_data *expired = priv;
1628
1629 if (blk_mq_req_expired(rq, expired))
1630 blk_mq_rq_timed_out(rq);
1631 return true;
1632 }
1633
blk_mq_timeout_work(struct work_struct * work)1634 static void blk_mq_timeout_work(struct work_struct *work)
1635 {
1636 struct request_queue *q =
1637 container_of(work, struct request_queue, timeout_work);
1638 struct blk_expired_data expired = {
1639 .timeout_start = jiffies,
1640 };
1641 struct blk_mq_hw_ctx *hctx;
1642 unsigned long i;
1643
1644 /* A deadlock might occur if a request is stuck requiring a
1645 * timeout at the same time a queue freeze is waiting
1646 * completion, since the timeout code would not be able to
1647 * acquire the queue reference here.
1648 *
1649 * That's why we don't use blk_queue_enter here; instead, we use
1650 * percpu_ref_tryget directly, because we need to be able to
1651 * obtain a reference even in the short window between the queue
1652 * starting to freeze, by dropping the first reference in
1653 * blk_freeze_queue_start, and the moment the last request is
1654 * consumed, marked by the instant q_usage_counter reaches
1655 * zero.
1656 */
1657 if (!percpu_ref_tryget(&q->q_usage_counter))
1658 return;
1659
1660 /* check if there is any timed-out request */
1661 blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &expired);
1662 if (expired.has_timedout_rq) {
1663 /*
1664 * Before walking tags, we must ensure any submit started
1665 * before the current time has finished. Since the submit
1666 * uses srcu or rcu, wait for a synchronization point to
1667 * ensure all running submits have finished
1668 */
1669 blk_mq_wait_quiesce_done(q->tag_set);
1670
1671 expired.next = 0;
1672 blk_mq_queue_tag_busy_iter(q, blk_mq_handle_expired, &expired);
1673 }
1674
1675 if (expired.next != 0) {
1676 mod_timer(&q->timeout, expired.next);
1677 } else {
1678 /*
1679 * Request timeouts are handled as a forward rolling timer. If
1680 * we end up here it means that no requests are pending and
1681 * also that no request has been pending for a while. Mark
1682 * each hctx as idle.
1683 */
1684 queue_for_each_hw_ctx(q, hctx, i) {
1685 /* the hctx may be unmapped, so check it here */
1686 if (blk_mq_hw_queue_mapped(hctx))
1687 blk_mq_tag_idle(hctx);
1688 }
1689 }
1690 blk_queue_exit(q);
1691 }
1692
1693 struct flush_busy_ctx_data {
1694 struct blk_mq_hw_ctx *hctx;
1695 struct list_head *list;
1696 };
1697
flush_busy_ctx(struct sbitmap * sb,unsigned int bitnr,void * data)1698 static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
1699 {
1700 struct flush_busy_ctx_data *flush_data = data;
1701 struct blk_mq_hw_ctx *hctx = flush_data->hctx;
1702 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
1703 enum hctx_type type = hctx->type;
1704
1705 spin_lock(&ctx->lock);
1706 list_splice_tail_init(&ctx->rq_lists[type], flush_data->list);
1707 sbitmap_clear_bit(sb, bitnr);
1708 spin_unlock(&ctx->lock);
1709 return true;
1710 }
1711
1712 /*
1713 * Process software queues that have been marked busy, splicing them
1714 * to the for-dispatch
1715 */
blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx * hctx,struct list_head * list)1716 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
1717 {
1718 struct flush_busy_ctx_data data = {
1719 .hctx = hctx,
1720 .list = list,
1721 };
1722
1723 sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
1724 }
1725 EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs);
1726
1727 struct dispatch_rq_data {
1728 struct blk_mq_hw_ctx *hctx;
1729 struct request *rq;
1730 };
1731
dispatch_rq_from_ctx(struct sbitmap * sb,unsigned int bitnr,void * data)1732 static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr,
1733 void *data)
1734 {
1735 struct dispatch_rq_data *dispatch_data = data;
1736 struct blk_mq_hw_ctx *hctx = dispatch_data->hctx;
1737 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
1738 enum hctx_type type = hctx->type;
1739
1740 spin_lock(&ctx->lock);
1741 if (!list_empty(&ctx->rq_lists[type])) {
1742 dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next);
1743 list_del_init(&dispatch_data->rq->queuelist);
1744 if (list_empty(&ctx->rq_lists[type]))
1745 sbitmap_clear_bit(sb, bitnr);
1746 }
1747 spin_unlock(&ctx->lock);
1748
1749 return !dispatch_data->rq;
1750 }
1751
blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx * hctx,struct blk_mq_ctx * start)1752 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
1753 struct blk_mq_ctx *start)
1754 {
1755 unsigned off = start ? start->index_hw[hctx->type] : 0;
1756 struct dispatch_rq_data data = {
1757 .hctx = hctx,
1758 .rq = NULL,
1759 };
1760
1761 __sbitmap_for_each_set(&hctx->ctx_map, off,
1762 dispatch_rq_from_ctx, &data);
1763
1764 return data.rq;
1765 }
1766
__blk_mq_alloc_driver_tag(struct request * rq)1767 static bool __blk_mq_alloc_driver_tag(struct request *rq)
1768 {
1769 struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags;
1770 unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags;
1771 int tag;
1772
1773 blk_mq_tag_busy(rq->mq_hctx);
1774
1775 if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) {
1776 bt = &rq->mq_hctx->tags->breserved_tags;
1777 tag_offset = 0;
1778 }
1779
1780 tag = __sbitmap_queue_get(bt);
1781 if (tag == BLK_MQ_NO_TAG)
1782 return false;
1783
1784 rq->tag = tag + tag_offset;
1785 return true;
1786 }
1787
__blk_mq_get_driver_tag(struct blk_mq_hw_ctx * hctx,struct request * rq)1788 bool __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq)
1789 {
1790 if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_alloc_driver_tag(rq))
1791 return false;
1792
1793 if ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) &&
1794 !(rq->rq_flags & RQF_MQ_INFLIGHT)) {
1795 rq->rq_flags |= RQF_MQ_INFLIGHT;
1796 __blk_mq_inc_active_requests(hctx);
1797 }
1798 hctx->tags->rqs[rq->tag] = rq;
1799 return true;
1800 }
1801
blk_mq_dispatch_wake(wait_queue_entry_t * wait,unsigned mode,int flags,void * key)1802 static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
1803 int flags, void *key)
1804 {
1805 struct blk_mq_hw_ctx *hctx;
1806
1807 hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
1808
1809 spin_lock(&hctx->dispatch_wait_lock);
1810 if (!list_empty(&wait->entry)) {
1811 struct sbitmap_queue *sbq;
1812
1813 list_del_init(&wait->entry);
1814 sbq = &hctx->tags->bitmap_tags;
1815 atomic_dec(&sbq->ws_active);
1816 }
1817 spin_unlock(&hctx->dispatch_wait_lock);
1818
1819 blk_mq_run_hw_queue(hctx, true);
1820 return 1;
1821 }
1822
1823 /*
1824 * Mark us waiting for a tag. For shared tags, this involves hooking us into
1825 * the tag wakeups. For non-shared tags, we can simply mark us needing a
1826 * restart. For both cases, take care to check the condition again after
1827 * marking us as waiting.
1828 */
blk_mq_mark_tag_wait(struct blk_mq_hw_ctx * hctx,struct request * rq)1829 static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
1830 struct request *rq)
1831 {
1832 struct sbitmap_queue *sbq;
1833 struct wait_queue_head *wq;
1834 wait_queue_entry_t *wait;
1835 bool ret;
1836
1837 if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) &&
1838 !(blk_mq_is_shared_tags(hctx->flags))) {
1839 blk_mq_sched_mark_restart_hctx(hctx);
1840
1841 /*
1842 * It's possible that a tag was freed in the window between the
1843 * allocation failure and adding the hardware queue to the wait
1844 * queue.
1845 *
1846 * Don't clear RESTART here, someone else could have set it.
1847 * At most this will cost an extra queue run.
1848 */
1849 return blk_mq_get_driver_tag(rq);
1850 }
1851
1852 wait = &hctx->dispatch_wait;
1853 if (!list_empty_careful(&wait->entry))
1854 return false;
1855
1856 if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag))
1857 sbq = &hctx->tags->breserved_tags;
1858 else
1859 sbq = &hctx->tags->bitmap_tags;
1860 wq = &bt_wait_ptr(sbq, hctx)->wait;
1861
1862 spin_lock_irq(&wq->lock);
1863 spin_lock(&hctx->dispatch_wait_lock);
1864 if (!list_empty(&wait->entry)) {
1865 spin_unlock(&hctx->dispatch_wait_lock);
1866 spin_unlock_irq(&wq->lock);
1867 return false;
1868 }
1869
1870 atomic_inc(&sbq->ws_active);
1871 wait->flags &= ~WQ_FLAG_EXCLUSIVE;
1872 __add_wait_queue(wq, wait);
1873
1874 /*
1875 * Add one explicit barrier since blk_mq_get_driver_tag() may
1876 * not imply barrier in case of failure.
1877 *
1878 * Order adding us to wait queue and allocating driver tag.
1879 *
1880 * The pair is the one implied in sbitmap_queue_wake_up() which
1881 * orders clearing sbitmap tag bits and waitqueue_active() in
1882 * __sbitmap_queue_wake_up(), since waitqueue_active() is lockless
1883 *
1884 * Otherwise, re-order of adding wait queue and getting driver tag
1885 * may cause __sbitmap_queue_wake_up() to wake up nothing because
1886 * the waitqueue_active() may not observe us in wait queue.
1887 */
1888 smp_mb();
1889
1890 /*
1891 * It's possible that a tag was freed in the window between the
1892 * allocation failure and adding the hardware queue to the wait
1893 * queue.
1894 */
1895 ret = blk_mq_get_driver_tag(rq);
1896 if (!ret) {
1897 spin_unlock(&hctx->dispatch_wait_lock);
1898 spin_unlock_irq(&wq->lock);
1899 return false;
1900 }
1901
1902 /*
1903 * We got a tag, remove ourselves from the wait queue to ensure
1904 * someone else gets the wakeup.
1905 */
1906 list_del_init(&wait->entry);
1907 atomic_dec(&sbq->ws_active);
1908 spin_unlock(&hctx->dispatch_wait_lock);
1909 spin_unlock_irq(&wq->lock);
1910
1911 return true;
1912 }
1913
1914 #define BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT 8
1915 #define BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR 4
1916 /*
1917 * Update dispatch busy with the Exponential Weighted Moving Average(EWMA):
1918 * - EWMA is one simple way to compute running average value
1919 * - weight(7/8 and 1/8) is applied so that it can decrease exponentially
1920 * - take 4 as factor for avoiding to get too small(0) result, and this
1921 * factor doesn't matter because EWMA decreases exponentially
1922 */
blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx * hctx,bool busy)1923 static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy)
1924 {
1925 unsigned int ewma;
1926
1927 ewma = hctx->dispatch_busy;
1928
1929 if (!ewma && !busy)
1930 return;
1931
1932 ewma *= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT - 1;
1933 if (busy)
1934 ewma += 1 << BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR;
1935 ewma /= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT;
1936
1937 hctx->dispatch_busy = ewma;
1938 }
1939
1940 #define BLK_MQ_RESOURCE_DELAY 3 /* ms units */
1941
blk_mq_handle_dev_resource(struct request * rq,struct list_head * list)1942 static void blk_mq_handle_dev_resource(struct request *rq,
1943 struct list_head *list)
1944 {
1945 list_add(&rq->queuelist, list);
1946 __blk_mq_requeue_request(rq);
1947 }
1948
blk_mq_handle_zone_resource(struct request * rq,struct list_head * zone_list)1949 static void blk_mq_handle_zone_resource(struct request *rq,
1950 struct list_head *zone_list)
1951 {
1952 /*
1953 * If we end up here it is because we cannot dispatch a request to a
1954 * specific zone due to LLD level zone-write locking or other zone
1955 * related resource not being available. In this case, set the request
1956 * aside in zone_list for retrying it later.
1957 */
1958 list_add(&rq->queuelist, zone_list);
1959 __blk_mq_requeue_request(rq);
1960 }
1961
1962 enum prep_dispatch {
1963 PREP_DISPATCH_OK,
1964 PREP_DISPATCH_NO_TAG,
1965 PREP_DISPATCH_NO_BUDGET,
1966 };
1967
blk_mq_prep_dispatch_rq(struct request * rq,bool need_budget)1968 static enum prep_dispatch blk_mq_prep_dispatch_rq(struct request *rq,
1969 bool need_budget)
1970 {
1971 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1972 int budget_token = -1;
1973
1974 if (need_budget) {
1975 budget_token = blk_mq_get_dispatch_budget(rq->q);
1976 if (budget_token < 0) {
1977 blk_mq_put_driver_tag(rq);
1978 return PREP_DISPATCH_NO_BUDGET;
1979 }
1980 blk_mq_set_rq_budget_token(rq, budget_token);
1981 }
1982
1983 if (!blk_mq_get_driver_tag(rq)) {
1984 /*
1985 * The initial allocation attempt failed, so we need to
1986 * rerun the hardware queue when a tag is freed. The
1987 * waitqueue takes care of that. If the queue is run
1988 * before we add this entry back on the dispatch list,
1989 * we'll re-run it below.
1990 */
1991 if (!blk_mq_mark_tag_wait(hctx, rq)) {
1992 /*
1993 * All budgets not got from this function will be put
1994 * together during handling partial dispatch
1995 */
1996 if (need_budget)
1997 blk_mq_put_dispatch_budget(rq->q, budget_token);
1998 return PREP_DISPATCH_NO_TAG;
1999 }
2000 }
2001
2002 return PREP_DISPATCH_OK;
2003 }
2004
2005 /* release all allocated budgets before calling to blk_mq_dispatch_rq_list */
blk_mq_release_budgets(struct request_queue * q,struct list_head * list)2006 static void blk_mq_release_budgets(struct request_queue *q,
2007 struct list_head *list)
2008 {
2009 struct request *rq;
2010
2011 list_for_each_entry(rq, list, queuelist) {
2012 int budget_token = blk_mq_get_rq_budget_token(rq);
2013
2014 if (budget_token >= 0)
2015 blk_mq_put_dispatch_budget(q, budget_token);
2016 }
2017 }
2018
2019 /*
2020 * blk_mq_commit_rqs will notify driver using bd->last that there is no
2021 * more requests. (See comment in struct blk_mq_ops for commit_rqs for
2022 * details)
2023 * Attention, we should explicitly call this in unusual cases:
2024 * 1) did not queue everything initially scheduled to queue
2025 * 2) the last attempt to queue a request failed
2026 */
blk_mq_commit_rqs(struct blk_mq_hw_ctx * hctx,int queued,bool from_schedule)2027 static void blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int queued,
2028 bool from_schedule)
2029 {
2030 if (hctx->queue->mq_ops->commit_rqs && queued) {
2031 trace_block_unplug(hctx->queue, queued, !from_schedule);
2032 hctx->queue->mq_ops->commit_rqs(hctx);
2033 }
2034 }
2035
2036 /*
2037 * Returns true if we did some work AND can potentially do more.
2038 */
blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx * hctx,struct list_head * list,unsigned int nr_budgets)2039 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
2040 unsigned int nr_budgets)
2041 {
2042 enum prep_dispatch prep;
2043 struct request_queue *q = hctx->queue;
2044 struct request *rq;
2045 int queued;
2046 blk_status_t ret = BLK_STS_OK;
2047 LIST_HEAD(zone_list);
2048 bool needs_resource = false;
2049
2050 if (list_empty(list))
2051 return false;
2052
2053 /*
2054 * Now process all the entries, sending them to the driver.
2055 */
2056 queued = 0;
2057 do {
2058 struct blk_mq_queue_data bd;
2059
2060 rq = list_first_entry(list, struct request, queuelist);
2061
2062 WARN_ON_ONCE(hctx != rq->mq_hctx);
2063 prep = blk_mq_prep_dispatch_rq(rq, !nr_budgets);
2064 if (prep != PREP_DISPATCH_OK)
2065 break;
2066
2067 list_del_init(&rq->queuelist);
2068
2069 bd.rq = rq;
2070 bd.last = list_empty(list);
2071
2072 /*
2073 * once the request is queued to lld, no need to cover the
2074 * budget any more
2075 */
2076 if (nr_budgets)
2077 nr_budgets--;
2078 ret = q->mq_ops->queue_rq(hctx, &bd);
2079 switch (ret) {
2080 case BLK_STS_OK:
2081 queued++;
2082 break;
2083 case BLK_STS_RESOURCE:
2084 needs_resource = true;
2085 fallthrough;
2086 case BLK_STS_DEV_RESOURCE:
2087 blk_mq_handle_dev_resource(rq, list);
2088 goto out;
2089 case BLK_STS_ZONE_RESOURCE:
2090 /*
2091 * Move the request to zone_list and keep going through
2092 * the dispatch list to find more requests the drive can
2093 * accept.
2094 */
2095 blk_mq_handle_zone_resource(rq, &zone_list);
2096 needs_resource = true;
2097 break;
2098 default:
2099 blk_mq_end_request(rq, ret);
2100 }
2101 } while (!list_empty(list));
2102 out:
2103 if (!list_empty(&zone_list))
2104 list_splice_tail_init(&zone_list, list);
2105
2106 /* If we didn't flush the entire list, we could have told the driver
2107 * there was more coming, but that turned out to be a lie.
2108 */
2109 if (!list_empty(list) || ret != BLK_STS_OK)
2110 blk_mq_commit_rqs(hctx, queued, false);
2111
2112 /*
2113 * Any items that need requeuing? Stuff them into hctx->dispatch,
2114 * that is where we will continue on next queue run.
2115 */
2116 if (!list_empty(list)) {
2117 bool needs_restart;
2118 /* For non-shared tags, the RESTART check will suffice */
2119 bool no_tag = prep == PREP_DISPATCH_NO_TAG &&
2120 ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) ||
2121 blk_mq_is_shared_tags(hctx->flags));
2122
2123 if (nr_budgets)
2124 blk_mq_release_budgets(q, list);
2125
2126 spin_lock(&hctx->lock);
2127 list_splice_tail_init(list, &hctx->dispatch);
2128 spin_unlock(&hctx->lock);
2129
2130 /*
2131 * Order adding requests to hctx->dispatch and checking
2132 * SCHED_RESTART flag. The pair of this smp_mb() is the one
2133 * in blk_mq_sched_restart(). Avoid restart code path to
2134 * miss the new added requests to hctx->dispatch, meantime
2135 * SCHED_RESTART is observed here.
2136 */
2137 smp_mb();
2138
2139 /*
2140 * If SCHED_RESTART was set by the caller of this function and
2141 * it is no longer set that means that it was cleared by another
2142 * thread and hence that a queue rerun is needed.
2143 *
2144 * If 'no_tag' is set, that means that we failed getting
2145 * a driver tag with an I/O scheduler attached. If our dispatch
2146 * waitqueue is no longer active, ensure that we run the queue
2147 * AFTER adding our entries back to the list.
2148 *
2149 * If no I/O scheduler has been configured it is possible that
2150 * the hardware queue got stopped and restarted before requests
2151 * were pushed back onto the dispatch list. Rerun the queue to
2152 * avoid starvation. Notes:
2153 * - blk_mq_run_hw_queue() checks whether or not a queue has
2154 * been stopped before rerunning a queue.
2155 * - Some but not all block drivers stop a queue before
2156 * returning BLK_STS_RESOURCE. Two exceptions are scsi-mq
2157 * and dm-rq.
2158 *
2159 * If driver returns BLK_STS_RESOURCE and SCHED_RESTART
2160 * bit is set, run queue after a delay to avoid IO stalls
2161 * that could otherwise occur if the queue is idle. We'll do
2162 * similar if we couldn't get budget or couldn't lock a zone
2163 * and SCHED_RESTART is set.
2164 */
2165 needs_restart = blk_mq_sched_needs_restart(hctx);
2166 if (prep == PREP_DISPATCH_NO_BUDGET)
2167 needs_resource = true;
2168 if (!needs_restart ||
2169 (no_tag && list_empty_careful(&hctx->dispatch_wait.entry)))
2170 blk_mq_run_hw_queue(hctx, true);
2171 else if (needs_resource)
2172 blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
2173
2174 blk_mq_update_dispatch_busy(hctx, true);
2175 return false;
2176 }
2177
2178 blk_mq_update_dispatch_busy(hctx, false);
2179 return true;
2180 }
2181
blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx * hctx)2182 static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
2183 {
2184 int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask);
2185
2186 if (cpu >= nr_cpu_ids)
2187 cpu = cpumask_first(hctx->cpumask);
2188 return cpu;
2189 }
2190
2191 /*
2192 * It'd be great if the workqueue API had a way to pass
2193 * in a mask and had some smarts for more clever placement.
2194 * For now we just round-robin here, switching for every
2195 * BLK_MQ_CPU_WORK_BATCH queued items.
2196 */
blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx * hctx)2197 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
2198 {
2199 bool tried = false;
2200 int next_cpu = hctx->next_cpu;
2201
2202 if (hctx->queue->nr_hw_queues == 1)
2203 return WORK_CPU_UNBOUND;
2204
2205 if (--hctx->next_cpu_batch <= 0) {
2206 select_cpu:
2207 next_cpu = cpumask_next_and(next_cpu, hctx->cpumask,
2208 cpu_online_mask);
2209 if (next_cpu >= nr_cpu_ids)
2210 next_cpu = blk_mq_first_mapped_cpu(hctx);
2211 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
2212 }
2213
2214 /*
2215 * Do unbound schedule if we can't find a online CPU for this hctx,
2216 * and it should only happen in the path of handling CPU DEAD.
2217 */
2218 if (!cpu_online(next_cpu)) {
2219 if (!tried) {
2220 tried = true;
2221 goto select_cpu;
2222 }
2223
2224 /*
2225 * Make sure to re-select CPU next time once after CPUs
2226 * in hctx->cpumask become online again.
2227 */
2228 hctx->next_cpu = next_cpu;
2229 hctx->next_cpu_batch = 1;
2230 return WORK_CPU_UNBOUND;
2231 }
2232
2233 hctx->next_cpu = next_cpu;
2234 return next_cpu;
2235 }
2236
2237 /**
2238 * blk_mq_delay_run_hw_queue - Run a hardware queue asynchronously.
2239 * @hctx: Pointer to the hardware queue to run.
2240 * @msecs: Milliseconds of delay to wait before running the queue.
2241 *
2242 * Run a hardware queue asynchronously with a delay of @msecs.
2243 */
blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx * hctx,unsigned long msecs)2244 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
2245 {
2246 if (unlikely(blk_mq_hctx_stopped(hctx)))
2247 return;
2248 kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work,
2249 msecs_to_jiffies(msecs));
2250 }
2251 EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
2252
2253 /**
2254 * blk_mq_run_hw_queue - Start to run a hardware queue.
2255 * @hctx: Pointer to the hardware queue to run.
2256 * @async: If we want to run the queue asynchronously.
2257 *
2258 * Check if the request queue is not in a quiesced state and if there are
2259 * pending requests to be sent. If this is true, run the queue to send requests
2260 * to hardware.
2261 */
blk_mq_run_hw_queue(struct blk_mq_hw_ctx * hctx,bool async)2262 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
2263 {
2264 bool need_run;
2265
2266 /*
2267 * We can't run the queue inline with interrupts disabled.
2268 */
2269 WARN_ON_ONCE(!async && in_interrupt());
2270
2271 might_sleep_if(!async && hctx->flags & BLK_MQ_F_BLOCKING);
2272
2273 /*
2274 * When queue is quiesced, we may be switching io scheduler, or
2275 * updating nr_hw_queues, or other things, and we can't run queue
2276 * any more, even __blk_mq_hctx_has_pending() can't be called safely.
2277 *
2278 * And queue will be rerun in blk_mq_unquiesce_queue() if it is
2279 * quiesced.
2280 */
2281 __blk_mq_run_dispatch_ops(hctx->queue, false,
2282 need_run = !blk_queue_quiesced(hctx->queue) &&
2283 blk_mq_hctx_has_pending(hctx));
2284
2285 if (!need_run)
2286 return;
2287
2288 if (async || !cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)) {
2289 blk_mq_delay_run_hw_queue(hctx, 0);
2290 return;
2291 }
2292
2293 blk_mq_run_dispatch_ops(hctx->queue,
2294 blk_mq_sched_dispatch_requests(hctx));
2295 }
2296 EXPORT_SYMBOL(blk_mq_run_hw_queue);
2297
2298 /*
2299 * Return prefered queue to dispatch from (if any) for non-mq aware IO
2300 * scheduler.
2301 */
blk_mq_get_sq_hctx(struct request_queue * q)2302 static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q)
2303 {
2304 struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
2305 /*
2306 * If the IO scheduler does not respect hardware queues when
2307 * dispatching, we just don't bother with multiple HW queues and
2308 * dispatch from hctx for the current CPU since running multiple queues
2309 * just causes lock contention inside the scheduler and pointless cache
2310 * bouncing.
2311 */
2312 struct blk_mq_hw_ctx *hctx = ctx->hctxs[HCTX_TYPE_DEFAULT];
2313
2314 if (!blk_mq_hctx_stopped(hctx))
2315 return hctx;
2316 return NULL;
2317 }
2318
2319 /**
2320 * blk_mq_run_hw_queues - Run all hardware queues in a request queue.
2321 * @q: Pointer to the request queue to run.
2322 * @async: If we want to run the queue asynchronously.
2323 */
blk_mq_run_hw_queues(struct request_queue * q,bool async)2324 void blk_mq_run_hw_queues(struct request_queue *q, bool async)
2325 {
2326 struct blk_mq_hw_ctx *hctx, *sq_hctx;
2327 unsigned long i;
2328
2329 sq_hctx = NULL;
2330 if (blk_queue_sq_sched(q))
2331 sq_hctx = blk_mq_get_sq_hctx(q);
2332 queue_for_each_hw_ctx(q, hctx, i) {
2333 if (blk_mq_hctx_stopped(hctx))
2334 continue;
2335 /*
2336 * Dispatch from this hctx either if there's no hctx preferred
2337 * by IO scheduler or if it has requests that bypass the
2338 * scheduler.
2339 */
2340 if (!sq_hctx || sq_hctx == hctx ||
2341 !list_empty_careful(&hctx->dispatch))
2342 blk_mq_run_hw_queue(hctx, async);
2343 }
2344 }
2345 EXPORT_SYMBOL(blk_mq_run_hw_queues);
2346
2347 /**
2348 * blk_mq_delay_run_hw_queues - Run all hardware queues asynchronously.
2349 * @q: Pointer to the request queue to run.
2350 * @msecs: Milliseconds of delay to wait before running the queues.
2351 */
blk_mq_delay_run_hw_queues(struct request_queue * q,unsigned long msecs)2352 void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs)
2353 {
2354 struct blk_mq_hw_ctx *hctx, *sq_hctx;
2355 unsigned long i;
2356
2357 sq_hctx = NULL;
2358 if (blk_queue_sq_sched(q))
2359 sq_hctx = blk_mq_get_sq_hctx(q);
2360 queue_for_each_hw_ctx(q, hctx, i) {
2361 if (blk_mq_hctx_stopped(hctx))
2362 continue;
2363 /*
2364 * If there is already a run_work pending, leave the
2365 * pending delay untouched. Otherwise, a hctx can stall
2366 * if another hctx is re-delaying the other's work
2367 * before the work executes.
2368 */
2369 if (delayed_work_pending(&hctx->run_work))
2370 continue;
2371 /*
2372 * Dispatch from this hctx either if there's no hctx preferred
2373 * by IO scheduler or if it has requests that bypass the
2374 * scheduler.
2375 */
2376 if (!sq_hctx || sq_hctx == hctx ||
2377 !list_empty_careful(&hctx->dispatch))
2378 blk_mq_delay_run_hw_queue(hctx, msecs);
2379 }
2380 }
2381 EXPORT_SYMBOL(blk_mq_delay_run_hw_queues);
2382
2383 /*
2384 * This function is often used for pausing .queue_rq() by driver when
2385 * there isn't enough resource or some conditions aren't satisfied, and
2386 * BLK_STS_RESOURCE is usually returned.
2387 *
2388 * We do not guarantee that dispatch can be drained or blocked
2389 * after blk_mq_stop_hw_queue() returns. Please use
2390 * blk_mq_quiesce_queue() for that requirement.
2391 */
blk_mq_stop_hw_queue(struct blk_mq_hw_ctx * hctx)2392 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
2393 {
2394 cancel_delayed_work(&hctx->run_work);
2395
2396 set_bit(BLK_MQ_S_STOPPED, &hctx->state);
2397 }
2398 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
2399
2400 /*
2401 * This function is often used for pausing .queue_rq() by driver when
2402 * there isn't enough resource or some conditions aren't satisfied, and
2403 * BLK_STS_RESOURCE is usually returned.
2404 *
2405 * We do not guarantee that dispatch can be drained or blocked
2406 * after blk_mq_stop_hw_queues() returns. Please use
2407 * blk_mq_quiesce_queue() for that requirement.
2408 */
blk_mq_stop_hw_queues(struct request_queue * q)2409 void blk_mq_stop_hw_queues(struct request_queue *q)
2410 {
2411 struct blk_mq_hw_ctx *hctx;
2412 unsigned long i;
2413
2414 queue_for_each_hw_ctx(q, hctx, i)
2415 blk_mq_stop_hw_queue(hctx);
2416 }
2417 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
2418
blk_mq_start_hw_queue(struct blk_mq_hw_ctx * hctx)2419 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
2420 {
2421 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
2422
2423 blk_mq_run_hw_queue(hctx, hctx->flags & BLK_MQ_F_BLOCKING);
2424 }
2425 EXPORT_SYMBOL(blk_mq_start_hw_queue);
2426
blk_mq_start_hw_queues(struct request_queue * q)2427 void blk_mq_start_hw_queues(struct request_queue *q)
2428 {
2429 struct blk_mq_hw_ctx *hctx;
2430 unsigned long i;
2431
2432 queue_for_each_hw_ctx(q, hctx, i)
2433 blk_mq_start_hw_queue(hctx);
2434 }
2435 EXPORT_SYMBOL(blk_mq_start_hw_queues);
2436
blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx * hctx,bool async)2437 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
2438 {
2439 if (!blk_mq_hctx_stopped(hctx))
2440 return;
2441
2442 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
2443 blk_mq_run_hw_queue(hctx, async);
2444 }
2445 EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
2446
blk_mq_start_stopped_hw_queues(struct request_queue * q,bool async)2447 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
2448 {
2449 struct blk_mq_hw_ctx *hctx;
2450 unsigned long i;
2451
2452 queue_for_each_hw_ctx(q, hctx, i)
2453 blk_mq_start_stopped_hw_queue(hctx, async ||
2454 (hctx->flags & BLK_MQ_F_BLOCKING));
2455 }
2456 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
2457
blk_mq_run_work_fn(struct work_struct * work)2458 static void blk_mq_run_work_fn(struct work_struct *work)
2459 {
2460 struct blk_mq_hw_ctx *hctx =
2461 container_of(work, struct blk_mq_hw_ctx, run_work.work);
2462
2463 blk_mq_run_dispatch_ops(hctx->queue,
2464 blk_mq_sched_dispatch_requests(hctx));
2465 }
2466
2467 /**
2468 * blk_mq_request_bypass_insert - Insert a request at dispatch list.
2469 * @rq: Pointer to request to be inserted.
2470 * @flags: BLK_MQ_INSERT_*
2471 *
2472 * Should only be used carefully, when the caller knows we want to
2473 * bypass a potential IO scheduler on the target device.
2474 */
blk_mq_request_bypass_insert(struct request * rq,blk_insert_t flags)2475 static void blk_mq_request_bypass_insert(struct request *rq, blk_insert_t flags)
2476 {
2477 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
2478
2479 spin_lock(&hctx->lock);
2480 if (flags & BLK_MQ_INSERT_AT_HEAD)
2481 list_add(&rq->queuelist, &hctx->dispatch);
2482 else
2483 list_add_tail(&rq->queuelist, &hctx->dispatch);
2484 spin_unlock(&hctx->lock);
2485 }
2486
blk_mq_insert_requests(struct blk_mq_hw_ctx * hctx,struct blk_mq_ctx * ctx,struct list_head * list,bool run_queue_async)2487 static void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx,
2488 struct blk_mq_ctx *ctx, struct list_head *list,
2489 bool run_queue_async)
2490 {
2491 struct request *rq;
2492 enum hctx_type type = hctx->type;
2493
2494 /*
2495 * Try to issue requests directly if the hw queue isn't busy to save an
2496 * extra enqueue & dequeue to the sw queue.
2497 */
2498 if (!hctx->dispatch_busy && !run_queue_async) {
2499 blk_mq_run_dispatch_ops(hctx->queue,
2500 blk_mq_try_issue_list_directly(hctx, list));
2501 if (list_empty(list))
2502 goto out;
2503 }
2504
2505 /*
2506 * preemption doesn't flush plug list, so it's possible ctx->cpu is
2507 * offline now
2508 */
2509 list_for_each_entry(rq, list, queuelist) {
2510 BUG_ON(rq->mq_ctx != ctx);
2511 trace_block_rq_insert(rq);
2512 if (rq->cmd_flags & REQ_NOWAIT)
2513 run_queue_async = true;
2514 }
2515
2516 spin_lock(&ctx->lock);
2517 list_splice_tail_init(list, &ctx->rq_lists[type]);
2518 blk_mq_hctx_mark_pending(hctx, ctx);
2519 spin_unlock(&ctx->lock);
2520 out:
2521 blk_mq_run_hw_queue(hctx, run_queue_async);
2522 }
2523
blk_mq_insert_request(struct request * rq,blk_insert_t flags)2524 static void blk_mq_insert_request(struct request *rq, blk_insert_t flags)
2525 {
2526 struct request_queue *q = rq->q;
2527 struct blk_mq_ctx *ctx = rq->mq_ctx;
2528 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
2529
2530 if (blk_rq_is_passthrough(rq)) {
2531 /*
2532 * Passthrough request have to be added to hctx->dispatch
2533 * directly. The device may be in a situation where it can't
2534 * handle FS request, and always returns BLK_STS_RESOURCE for
2535 * them, which gets them added to hctx->dispatch.
2536 *
2537 * If a passthrough request is required to unblock the queues,
2538 * and it is added to the scheduler queue, there is no chance to
2539 * dispatch it given we prioritize requests in hctx->dispatch.
2540 */
2541 blk_mq_request_bypass_insert(rq, flags);
2542 } else if (req_op(rq) == REQ_OP_FLUSH) {
2543 /*
2544 * Firstly normal IO request is inserted to scheduler queue or
2545 * sw queue, meantime we add flush request to dispatch queue(
2546 * hctx->dispatch) directly and there is at most one in-flight
2547 * flush request for each hw queue, so it doesn't matter to add
2548 * flush request to tail or front of the dispatch queue.
2549 *
2550 * Secondly in case of NCQ, flush request belongs to non-NCQ
2551 * command, and queueing it will fail when there is any
2552 * in-flight normal IO request(NCQ command). When adding flush
2553 * rq to the front of hctx->dispatch, it is easier to introduce
2554 * extra time to flush rq's latency because of S_SCHED_RESTART
2555 * compared with adding to the tail of dispatch queue, then
2556 * chance of flush merge is increased, and less flush requests
2557 * will be issued to controller. It is observed that ~10% time
2558 * is saved in blktests block/004 on disk attached to AHCI/NCQ
2559 * drive when adding flush rq to the front of hctx->dispatch.
2560 *
2561 * Simply queue flush rq to the front of hctx->dispatch so that
2562 * intensive flush workloads can benefit in case of NCQ HW.
2563 */
2564 blk_mq_request_bypass_insert(rq, BLK_MQ_INSERT_AT_HEAD);
2565 } else if (q->elevator) {
2566 LIST_HEAD(list);
2567
2568 WARN_ON_ONCE(rq->tag != BLK_MQ_NO_TAG);
2569
2570 list_add(&rq->queuelist, &list);
2571 q->elevator->type->ops.insert_requests(hctx, &list, flags);
2572 } else {
2573 trace_block_rq_insert(rq);
2574
2575 spin_lock(&ctx->lock);
2576 if (flags & BLK_MQ_INSERT_AT_HEAD)
2577 list_add(&rq->queuelist, &ctx->rq_lists[hctx->type]);
2578 else
2579 list_add_tail(&rq->queuelist,
2580 &ctx->rq_lists[hctx->type]);
2581 blk_mq_hctx_mark_pending(hctx, ctx);
2582 spin_unlock(&ctx->lock);
2583 }
2584 }
2585
blk_mq_bio_to_request(struct request * rq,struct bio * bio,unsigned int nr_segs)2586 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
2587 unsigned int nr_segs)
2588 {
2589 int err;
2590
2591 if (bio->bi_opf & REQ_RAHEAD)
2592 rq->cmd_flags |= REQ_FAILFAST_MASK;
2593
2594 rq->__sector = bio->bi_iter.bi_sector;
2595 rq->write_hint = bio->bi_write_hint;
2596 blk_rq_bio_prep(rq, bio, nr_segs);
2597
2598 /* This can't fail, since GFP_NOIO includes __GFP_DIRECT_RECLAIM. */
2599 err = blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO);
2600 WARN_ON_ONCE(err);
2601
2602 blk_account_io_start(rq);
2603 }
2604
__blk_mq_issue_directly(struct blk_mq_hw_ctx * hctx,struct request * rq,bool last)2605 static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
2606 struct request *rq, bool last)
2607 {
2608 struct request_queue *q = rq->q;
2609 struct blk_mq_queue_data bd = {
2610 .rq = rq,
2611 .last = last,
2612 };
2613 blk_status_t ret;
2614
2615 /*
2616 * For OK queue, we are done. For error, caller may kill it.
2617 * Any other error (busy), just add it to our list as we
2618 * previously would have done.
2619 */
2620 ret = q->mq_ops->queue_rq(hctx, &bd);
2621 switch (ret) {
2622 case BLK_STS_OK:
2623 blk_mq_update_dispatch_busy(hctx, false);
2624 break;
2625 case BLK_STS_RESOURCE:
2626 case BLK_STS_DEV_RESOURCE:
2627 blk_mq_update_dispatch_busy(hctx, true);
2628 __blk_mq_requeue_request(rq);
2629 break;
2630 default:
2631 blk_mq_update_dispatch_busy(hctx, false);
2632 break;
2633 }
2634
2635 return ret;
2636 }
2637
blk_mq_get_budget_and_tag(struct request * rq)2638 static bool blk_mq_get_budget_and_tag(struct request *rq)
2639 {
2640 int budget_token;
2641
2642 budget_token = blk_mq_get_dispatch_budget(rq->q);
2643 if (budget_token < 0)
2644 return false;
2645 blk_mq_set_rq_budget_token(rq, budget_token);
2646 if (!blk_mq_get_driver_tag(rq)) {
2647 blk_mq_put_dispatch_budget(rq->q, budget_token);
2648 return false;
2649 }
2650 return true;
2651 }
2652
2653 /**
2654 * blk_mq_try_issue_directly - Try to send a request directly to device driver.
2655 * @hctx: Pointer of the associated hardware queue.
2656 * @rq: Pointer to request to be sent.
2657 *
2658 * If the device has enough resources to accept a new request now, send the
2659 * request directly to device driver. Else, insert at hctx->dispatch queue, so
2660 * we can try send it another time in the future. Requests inserted at this
2661 * queue have higher priority.
2662 */
blk_mq_try_issue_directly(struct blk_mq_hw_ctx * hctx,struct request * rq)2663 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
2664 struct request *rq)
2665 {
2666 blk_status_t ret;
2667
2668 if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) {
2669 blk_mq_insert_request(rq, 0);
2670 return;
2671 }
2672
2673 if ((rq->rq_flags & RQF_USE_SCHED) || !blk_mq_get_budget_and_tag(rq)) {
2674 blk_mq_insert_request(rq, 0);
2675 blk_mq_run_hw_queue(hctx, rq->cmd_flags & REQ_NOWAIT);
2676 return;
2677 }
2678
2679 ret = __blk_mq_issue_directly(hctx, rq, true);
2680 switch (ret) {
2681 case BLK_STS_OK:
2682 break;
2683 case BLK_STS_RESOURCE:
2684 case BLK_STS_DEV_RESOURCE:
2685 blk_mq_request_bypass_insert(rq, 0);
2686 blk_mq_run_hw_queue(hctx, false);
2687 break;
2688 default:
2689 blk_mq_end_request(rq, ret);
2690 break;
2691 }
2692 }
2693
blk_mq_request_issue_directly(struct request * rq,bool last)2694 static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
2695 {
2696 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
2697
2698 if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) {
2699 blk_mq_insert_request(rq, 0);
2700 return BLK_STS_OK;
2701 }
2702
2703 if (!blk_mq_get_budget_and_tag(rq))
2704 return BLK_STS_RESOURCE;
2705 return __blk_mq_issue_directly(hctx, rq, last);
2706 }
2707
blk_mq_plug_issue_direct(struct blk_plug * plug)2708 static void blk_mq_plug_issue_direct(struct blk_plug *plug)
2709 {
2710 struct blk_mq_hw_ctx *hctx = NULL;
2711 struct request *rq;
2712 int queued = 0;
2713 blk_status_t ret = BLK_STS_OK;
2714
2715 while ((rq = rq_list_pop(&plug->mq_list))) {
2716 bool last = rq_list_empty(plug->mq_list);
2717
2718 if (hctx != rq->mq_hctx) {
2719 if (hctx) {
2720 blk_mq_commit_rqs(hctx, queued, false);
2721 queued = 0;
2722 }
2723 hctx = rq->mq_hctx;
2724 }
2725
2726 ret = blk_mq_request_issue_directly(rq, last);
2727 switch (ret) {
2728 case BLK_STS_OK:
2729 queued++;
2730 break;
2731 case BLK_STS_RESOURCE:
2732 case BLK_STS_DEV_RESOURCE:
2733 blk_mq_request_bypass_insert(rq, 0);
2734 blk_mq_run_hw_queue(hctx, false);
2735 goto out;
2736 default:
2737 blk_mq_end_request(rq, ret);
2738 break;
2739 }
2740 }
2741
2742 out:
2743 if (ret != BLK_STS_OK)
2744 blk_mq_commit_rqs(hctx, queued, false);
2745 }
2746
__blk_mq_flush_plug_list(struct request_queue * q,struct blk_plug * plug)2747 static void __blk_mq_flush_plug_list(struct request_queue *q,
2748 struct blk_plug *plug)
2749 {
2750 if (blk_queue_quiesced(q))
2751 return;
2752 q->mq_ops->queue_rqs(&plug->mq_list);
2753 }
2754
blk_mq_dispatch_plug_list(struct blk_plug * plug,bool from_sched)2755 static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
2756 {
2757 struct blk_mq_hw_ctx *this_hctx = NULL;
2758 struct blk_mq_ctx *this_ctx = NULL;
2759 struct request *requeue_list = NULL;
2760 struct request **requeue_lastp = &requeue_list;
2761 unsigned int depth = 0;
2762 bool is_passthrough = false;
2763 LIST_HEAD(list);
2764
2765 do {
2766 struct request *rq = rq_list_pop(&plug->mq_list);
2767
2768 if (!this_hctx) {
2769 this_hctx = rq->mq_hctx;
2770 this_ctx = rq->mq_ctx;
2771 is_passthrough = blk_rq_is_passthrough(rq);
2772 } else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx ||
2773 is_passthrough != blk_rq_is_passthrough(rq)) {
2774 rq_list_add_tail(&requeue_lastp, rq);
2775 continue;
2776 }
2777 list_add(&rq->queuelist, &list);
2778 depth++;
2779 } while (!rq_list_empty(plug->mq_list));
2780
2781 plug->mq_list = requeue_list;
2782 trace_block_unplug(this_hctx->queue, depth, !from_sched);
2783
2784 percpu_ref_get(&this_hctx->queue->q_usage_counter);
2785 /* passthrough requests should never be issued to the I/O scheduler */
2786 if (is_passthrough) {
2787 spin_lock(&this_hctx->lock);
2788 list_splice_tail_init(&list, &this_hctx->dispatch);
2789 spin_unlock(&this_hctx->lock);
2790 blk_mq_run_hw_queue(this_hctx, from_sched);
2791 } else if (this_hctx->queue->elevator) {
2792 this_hctx->queue->elevator->type->ops.insert_requests(this_hctx,
2793 &list, 0);
2794 blk_mq_run_hw_queue(this_hctx, from_sched);
2795 } else {
2796 blk_mq_insert_requests(this_hctx, this_ctx, &list, from_sched);
2797 }
2798 percpu_ref_put(&this_hctx->queue->q_usage_counter);
2799 }
2800
blk_mq_flush_plug_list(struct blk_plug * plug,bool from_schedule)2801 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
2802 {
2803 struct request *rq;
2804
2805 /*
2806 * We may have been called recursively midway through handling
2807 * plug->mq_list via a schedule() in the driver's queue_rq() callback.
2808 * To avoid mq_list changing under our feet, clear rq_count early and
2809 * bail out specifically if rq_count is 0 rather than checking
2810 * whether the mq_list is empty.
2811 */
2812 if (plug->rq_count == 0)
2813 return;
2814 plug->rq_count = 0;
2815
2816 if (!plug->multiple_queues && !plug->has_elevator && !from_schedule) {
2817 struct request_queue *q;
2818
2819 rq = rq_list_peek(&plug->mq_list);
2820 q = rq->q;
2821
2822 /*
2823 * Peek first request and see if we have a ->queue_rqs() hook.
2824 * If we do, we can dispatch the whole plug list in one go. We
2825 * already know at this point that all requests belong to the
2826 * same queue, caller must ensure that's the case.
2827 *
2828 * Since we pass off the full list to the driver at this point,
2829 * we do not increment the active request count for the queue.
2830 * Bypass shared tags for now because of that.
2831 */
2832 if (q->mq_ops->queue_rqs &&
2833 !(rq->mq_hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
2834 blk_mq_run_dispatch_ops(q,
2835 __blk_mq_flush_plug_list(q, plug));
2836 if (rq_list_empty(plug->mq_list))
2837 return;
2838 }
2839
2840 blk_mq_run_dispatch_ops(q,
2841 blk_mq_plug_issue_direct(plug));
2842 if (rq_list_empty(plug->mq_list))
2843 return;
2844 }
2845
2846 do {
2847 blk_mq_dispatch_plug_list(plug, from_schedule);
2848 } while (!rq_list_empty(plug->mq_list));
2849 }
2850
blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx * hctx,struct list_head * list)2851 static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
2852 struct list_head *list)
2853 {
2854 int queued = 0;
2855 blk_status_t ret = BLK_STS_OK;
2856
2857 while (!list_empty(list)) {
2858 struct request *rq = list_first_entry(list, struct request,
2859 queuelist);
2860
2861 list_del_init(&rq->queuelist);
2862 ret = blk_mq_request_issue_directly(rq, list_empty(list));
2863 switch (ret) {
2864 case BLK_STS_OK:
2865 queued++;
2866 break;
2867 case BLK_STS_RESOURCE:
2868 case BLK_STS_DEV_RESOURCE:
2869 blk_mq_request_bypass_insert(rq, 0);
2870 if (list_empty(list))
2871 blk_mq_run_hw_queue(hctx, false);
2872 goto out;
2873 default:
2874 blk_mq_end_request(rq, ret);
2875 break;
2876 }
2877 }
2878
2879 out:
2880 if (ret != BLK_STS_OK)
2881 blk_mq_commit_rqs(hctx, queued, false);
2882 }
2883
blk_mq_attempt_bio_merge(struct request_queue * q,struct bio * bio,unsigned int nr_segs)2884 static bool blk_mq_attempt_bio_merge(struct request_queue *q,
2885 struct bio *bio, unsigned int nr_segs)
2886 {
2887 if (!blk_queue_nomerges(q) && bio_mergeable(bio)) {
2888 if (blk_attempt_plug_merge(q, bio, nr_segs))
2889 return true;
2890 if (blk_mq_sched_bio_merge(q, bio, nr_segs))
2891 return true;
2892 }
2893 return false;
2894 }
2895
blk_mq_get_new_requests(struct request_queue * q,struct blk_plug * plug,struct bio * bio,unsigned int nsegs)2896 static struct request *blk_mq_get_new_requests(struct request_queue *q,
2897 struct blk_plug *plug,
2898 struct bio *bio,
2899 unsigned int nsegs)
2900 {
2901 struct blk_mq_alloc_data data = {
2902 .q = q,
2903 .nr_tags = 1,
2904 .cmd_flags = bio->bi_opf,
2905 };
2906 struct request *rq;
2907
2908 if (blk_mq_attempt_bio_merge(q, bio, nsegs))
2909 return NULL;
2910
2911 rq_qos_throttle(q, bio);
2912
2913 if (plug) {
2914 data.nr_tags = plug->nr_ios;
2915 plug->nr_ios = 1;
2916 data.cached_rq = &plug->cached_rq;
2917 }
2918
2919 rq = __blk_mq_alloc_requests(&data);
2920 if (rq)
2921 return rq;
2922 rq_qos_cleanup(q, bio);
2923 if (bio->bi_opf & REQ_NOWAIT)
2924 bio_wouldblock_error(bio);
2925 return NULL;
2926 }
2927
2928 /* return true if this @rq can be used for @bio */
blk_mq_can_use_cached_rq(struct request * rq,struct blk_plug * plug,struct bio * bio)2929 static bool blk_mq_can_use_cached_rq(struct request *rq, struct blk_plug *plug,
2930 struct bio *bio)
2931 {
2932 enum hctx_type type = blk_mq_get_hctx_type(bio->bi_opf);
2933 enum hctx_type hctx_type = rq->mq_hctx->type;
2934
2935 WARN_ON_ONCE(rq_list_peek(&plug->cached_rq) != rq);
2936
2937 if (type != hctx_type &&
2938 !(type == HCTX_TYPE_READ && hctx_type == HCTX_TYPE_DEFAULT))
2939 return false;
2940 if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
2941 return false;
2942
2943 /*
2944 * If any qos ->throttle() end up blocking, we will have flushed the
2945 * plug and hence killed the cached_rq list as well. Pop this entry
2946 * before we throttle.
2947 */
2948 plug->cached_rq = rq_list_next(rq);
2949 rq_qos_throttle(rq->q, bio);
2950
2951 blk_mq_rq_time_init(rq, 0);
2952 rq->cmd_flags = bio->bi_opf;
2953 INIT_LIST_HEAD(&rq->queuelist);
2954 return true;
2955 }
2956
2957 /**
2958 * blk_mq_submit_bio - Create and send a request to block device.
2959 * @bio: Bio pointer.
2960 *
2961 * Builds up a request structure from @q and @bio and send to the device. The
2962 * request may not be queued directly to hardware if:
2963 * * This request can be merged with another one
2964 * * We want to place request at plug queue for possible future merging
2965 * * There is an IO scheduler active at this queue
2966 *
2967 * It will not queue the request if there is an error with the bio, or at the
2968 * request creation.
2969 */
blk_mq_submit_bio(struct bio * bio)2970 void blk_mq_submit_bio(struct bio *bio)
2971 {
2972 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
2973 struct blk_plug *plug = blk_mq_plug(bio);
2974 const int is_sync = op_is_sync(bio->bi_opf);
2975 struct blk_mq_hw_ctx *hctx;
2976 struct request *rq = NULL;
2977 unsigned int nr_segs = 1;
2978 blk_status_t ret;
2979
2980 bio = blk_queue_bounce(bio, q);
2981
2982 if (plug) {
2983 rq = rq_list_peek(&plug->cached_rq);
2984 if (rq && rq->q != q)
2985 rq = NULL;
2986 }
2987 if (rq) {
2988 if (unlikely(bio_may_exceed_limits(bio, &q->limits))) {
2989 bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
2990 if (!bio)
2991 return;
2992 } else if (bio->bi_vcnt == 1) {
2993 nr_segs = blk_segments(&q->limits, bio->bi_io_vec[0].bv_len);
2994 }
2995 if (!bio_integrity_prep(bio))
2996 return;
2997 if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
2998 return;
2999 if (blk_mq_can_use_cached_rq(rq, plug, bio))
3000 goto done;
3001 percpu_ref_get(&q->q_usage_counter);
3002 } else {
3003 if (unlikely(bio_queue_enter(bio)))
3004 return;
3005 if (unlikely(bio_may_exceed_limits(bio, &q->limits))) {
3006 bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
3007 if (!bio)
3008 goto fail;
3009 } else if (bio->bi_vcnt == 1) {
3010 nr_segs = blk_segments(&q->limits, bio->bi_io_vec[0].bv_len);
3011 }
3012 if (!bio_integrity_prep(bio))
3013 goto fail;
3014 }
3015
3016 rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
3017 if (unlikely(!rq)) {
3018 fail:
3019 blk_queue_exit(q);
3020 return;
3021 }
3022
3023 done:
3024 trace_block_getrq(bio);
3025
3026 rq_qos_track(q, rq, bio);
3027
3028 blk_mq_bio_to_request(rq, bio, nr_segs);
3029
3030 ret = blk_crypto_rq_get_keyslot(rq);
3031 if (ret != BLK_STS_OK) {
3032 bio->bi_status = ret;
3033 bio_endio(bio);
3034 blk_mq_free_request(rq);
3035 return;
3036 }
3037
3038 if (op_is_flush(bio->bi_opf) && blk_insert_flush(rq))
3039 return;
3040
3041 if (plug) {
3042 blk_add_rq_to_plug(plug, rq);
3043 return;
3044 }
3045
3046 hctx = rq->mq_hctx;
3047 if ((rq->rq_flags & RQF_USE_SCHED) ||
3048 (hctx->dispatch_busy && (q->nr_hw_queues == 1 || !is_sync))) {
3049 blk_mq_insert_request(rq, 0);
3050 blk_mq_run_hw_queue(hctx, true);
3051 } else {
3052 blk_mq_run_dispatch_ops(q, blk_mq_try_issue_directly(hctx, rq));
3053 }
3054 }
3055
3056 #ifdef CONFIG_BLK_MQ_STACKING
3057 /**
3058 * blk_insert_cloned_request - Helper for stacking drivers to submit a request
3059 * @rq: the request being queued
3060 */
blk_insert_cloned_request(struct request * rq)3061 blk_status_t blk_insert_cloned_request(struct request *rq)
3062 {
3063 struct request_queue *q = rq->q;
3064 unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
3065 unsigned int max_segments = blk_rq_get_max_segments(rq);
3066 blk_status_t ret;
3067
3068 if (blk_rq_sectors(rq) > max_sectors) {
3069 /*
3070 * SCSI device does not have a good way to return if
3071 * Write Same/Zero is actually supported. If a device rejects
3072 * a non-read/write command (discard, write same,etc.) the
3073 * low-level device driver will set the relevant queue limit to
3074 * 0 to prevent blk-lib from issuing more of the offending
3075 * operations. Commands queued prior to the queue limit being
3076 * reset need to be completed with BLK_STS_NOTSUPP to avoid I/O
3077 * errors being propagated to upper layers.
3078 */
3079 if (max_sectors == 0)
3080 return BLK_STS_NOTSUPP;
3081
3082 printk(KERN_ERR "%s: over max size limit. (%u > %u)\n",
3083 __func__, blk_rq_sectors(rq), max_sectors);
3084 return BLK_STS_IOERR;
3085 }
3086
3087 /*
3088 * The queue settings related to segment counting may differ from the
3089 * original queue.
3090 */
3091 rq->nr_phys_segments = blk_recalc_rq_segments(rq);
3092 if (rq->nr_phys_segments > max_segments) {
3093 printk(KERN_ERR "%s: over max segments limit. (%u > %u)\n",
3094 __func__, rq->nr_phys_segments, max_segments);
3095 return BLK_STS_IOERR;
3096 }
3097
3098 if (q->disk && should_fail_request(q->disk->part0, blk_rq_bytes(rq)))
3099 return BLK_STS_IOERR;
3100
3101 ret = blk_crypto_rq_get_keyslot(rq);
3102 if (ret != BLK_STS_OK)
3103 return ret;
3104
3105 blk_account_io_start(rq);
3106
3107 /*
3108 * Since we have a scheduler attached on the top device,
3109 * bypass a potential scheduler on the bottom device for
3110 * insert.
3111 */
3112 blk_mq_run_dispatch_ops(q,
3113 ret = blk_mq_request_issue_directly(rq, true));
3114 if (ret)
3115 blk_account_io_done(rq, ktime_get_ns());
3116 return ret;
3117 }
3118 EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
3119
3120 /**
3121 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
3122 * @rq: the clone request to be cleaned up
3123 *
3124 * Description:
3125 * Free all bios in @rq for a cloned request.
3126 */
blk_rq_unprep_clone(struct request * rq)3127 void blk_rq_unprep_clone(struct request *rq)
3128 {
3129 struct bio *bio;
3130
3131 while ((bio = rq->bio) != NULL) {
3132 rq->bio = bio->bi_next;
3133
3134 bio_put(bio);
3135 }
3136 }
3137 EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
3138
3139 /**
3140 * blk_rq_prep_clone - Helper function to setup clone request
3141 * @rq: the request to be setup
3142 * @rq_src: original request to be cloned
3143 * @bs: bio_set that bios for clone are allocated from
3144 * @gfp_mask: memory allocation mask for bio
3145 * @bio_ctr: setup function to be called for each clone bio.
3146 * Returns %0 for success, non %0 for failure.
3147 * @data: private data to be passed to @bio_ctr
3148 *
3149 * Description:
3150 * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
3151 * Also, pages which the original bios are pointing to are not copied
3152 * and the cloned bios just point same pages.
3153 * So cloned bios must be completed before original bios, which means
3154 * the caller must complete @rq before @rq_src.
3155 */
blk_rq_prep_clone(struct request * rq,struct request * rq_src,struct bio_set * bs,gfp_t gfp_mask,int (* bio_ctr)(struct bio *,struct bio *,void *),void * data)3156 int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
3157 struct bio_set *bs, gfp_t gfp_mask,
3158 int (*bio_ctr)(struct bio *, struct bio *, void *),
3159 void *data)
3160 {
3161 struct bio *bio, *bio_src;
3162
3163 if (!bs)
3164 bs = &fs_bio_set;
3165
3166 __rq_for_each_bio(bio_src, rq_src) {
3167 bio = bio_alloc_clone(rq->q->disk->part0, bio_src, gfp_mask,
3168 bs);
3169 if (!bio)
3170 goto free_and_out;
3171
3172 if (bio_ctr && bio_ctr(bio, bio_src, data))
3173 goto free_and_out;
3174
3175 if (rq->bio) {
3176 rq->biotail->bi_next = bio;
3177 rq->biotail = bio;
3178 } else {
3179 rq->bio = rq->biotail = bio;
3180 }
3181 bio = NULL;
3182 }
3183
3184 /* Copy attributes of the original request to the clone request. */
3185 rq->__sector = blk_rq_pos(rq_src);
3186 rq->__data_len = blk_rq_bytes(rq_src);
3187 if (rq_src->rq_flags & RQF_SPECIAL_PAYLOAD) {
3188 rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
3189 rq->special_vec = rq_src->special_vec;
3190 }
3191 rq->nr_phys_segments = rq_src->nr_phys_segments;
3192 rq->ioprio = rq_src->ioprio;
3193 rq->write_hint = rq_src->write_hint;
3194
3195 if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0)
3196 goto free_and_out;
3197
3198 return 0;
3199
3200 free_and_out:
3201 if (bio)
3202 bio_put(bio);
3203 blk_rq_unprep_clone(rq);
3204
3205 return -ENOMEM;
3206 }
3207 EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
3208 #endif /* CONFIG_BLK_MQ_STACKING */
3209
3210 /*
3211 * Steal bios from a request and add them to a bio list.
3212 * The request must not have been partially completed before.
3213 */
blk_steal_bios(struct bio_list * list,struct request * rq)3214 void blk_steal_bios(struct bio_list *list, struct request *rq)
3215 {
3216 if (rq->bio) {
3217 if (list->tail)
3218 list->tail->bi_next = rq->bio;
3219 else
3220 list->head = rq->bio;
3221 list->tail = rq->biotail;
3222
3223 rq->bio = NULL;
3224 rq->biotail = NULL;
3225 }
3226
3227 rq->__data_len = 0;
3228 }
3229 EXPORT_SYMBOL_GPL(blk_steal_bios);
3230
order_to_size(unsigned int order)3231 static size_t order_to_size(unsigned int order)
3232 {
3233 return (size_t)PAGE_SIZE << order;
3234 }
3235
3236 /* called before freeing request pool in @tags */
blk_mq_clear_rq_mapping(struct blk_mq_tags * drv_tags,struct blk_mq_tags * tags)3237 static void blk_mq_clear_rq_mapping(struct blk_mq_tags *drv_tags,
3238 struct blk_mq_tags *tags)
3239 {
3240 struct page *page;
3241 unsigned long flags;
3242
3243 /*
3244 * There is no need to clear mapping if driver tags is not initialized
3245 * or the mapping belongs to the driver tags.
3246 */
3247 if (!drv_tags || drv_tags == tags)
3248 return;
3249
3250 list_for_each_entry(page, &tags->page_list, lru) {
3251 unsigned long start = (unsigned long)page_address(page);
3252 unsigned long end = start + order_to_size(page->private);
3253 int i;
3254
3255 for (i = 0; i < drv_tags->nr_tags; i++) {
3256 struct request *rq = drv_tags->rqs[i];
3257 unsigned long rq_addr = (unsigned long)rq;
3258
3259 if (rq_addr >= start && rq_addr < end) {
3260 WARN_ON_ONCE(req_ref_read(rq) != 0);
3261 cmpxchg(&drv_tags->rqs[i], rq, NULL);
3262 }
3263 }
3264 }
3265
3266 /*
3267 * Wait until all pending iteration is done.
3268 *
3269 * Request reference is cleared and it is guaranteed to be observed
3270 * after the ->lock is released.
3271 */
3272 spin_lock_irqsave(&drv_tags->lock, flags);
3273 spin_unlock_irqrestore(&drv_tags->lock, flags);
3274 }
3275
blk_mq_free_rqs(struct blk_mq_tag_set * set,struct blk_mq_tags * tags,unsigned int hctx_idx)3276 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
3277 unsigned int hctx_idx)
3278 {
3279 struct blk_mq_tags *drv_tags;
3280 struct page *page;
3281
3282 if (list_empty(&tags->page_list))
3283 return;
3284
3285 if (blk_mq_is_shared_tags(set->flags))
3286 drv_tags = set->shared_tags;
3287 else
3288 drv_tags = set->tags[hctx_idx];
3289
3290 if (tags->static_rqs && set->ops->exit_request) {
3291 int i;
3292
3293 for (i = 0; i < tags->nr_tags; i++) {
3294 struct request *rq = tags->static_rqs[i];
3295
3296 if (!rq)
3297 continue;
3298 set->ops->exit_request(set, rq, hctx_idx);
3299 tags->static_rqs[i] = NULL;
3300 }
3301 }
3302
3303 blk_mq_clear_rq_mapping(drv_tags, tags);
3304
3305 while (!list_empty(&tags->page_list)) {
3306 page = list_first_entry(&tags->page_list, struct page, lru);
3307 list_del_init(&page->lru);
3308 /*
3309 * Remove kmemleak object previously allocated in
3310 * blk_mq_alloc_rqs().
3311 */
3312 kmemleak_free(page_address(page));
3313 __free_pages(page, page->private);
3314 }
3315 }
3316
blk_mq_free_rq_map(struct blk_mq_tags * tags)3317 void blk_mq_free_rq_map(struct blk_mq_tags *tags)
3318 {
3319 kfree(tags->rqs);
3320 tags->rqs = NULL;
3321 kfree(tags->static_rqs);
3322 tags->static_rqs = NULL;
3323
3324 blk_mq_free_tags(tags);
3325 }
3326
hctx_idx_to_type(struct blk_mq_tag_set * set,unsigned int hctx_idx)3327 static enum hctx_type hctx_idx_to_type(struct blk_mq_tag_set *set,
3328 unsigned int hctx_idx)
3329 {
3330 int i;
3331
3332 for (i = 0; i < set->nr_maps; i++) {
3333 unsigned int start = set->map[i].queue_offset;
3334 unsigned int end = start + set->map[i].nr_queues;
3335
3336 if (hctx_idx >= start && hctx_idx < end)
3337 break;
3338 }
3339
3340 if (i >= set->nr_maps)
3341 i = HCTX_TYPE_DEFAULT;
3342
3343 return i;
3344 }
3345
blk_mq_get_hctx_node(struct blk_mq_tag_set * set,unsigned int hctx_idx)3346 static int blk_mq_get_hctx_node(struct blk_mq_tag_set *set,
3347 unsigned int hctx_idx)
3348 {
3349 enum hctx_type type = hctx_idx_to_type(set, hctx_idx);
3350
3351 return blk_mq_hw_queue_to_node(&set->map[type], hctx_idx);
3352 }
3353
blk_mq_alloc_rq_map(struct blk_mq_tag_set * set,unsigned int hctx_idx,unsigned int nr_tags,unsigned int reserved_tags)3354 static struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
3355 unsigned int hctx_idx,
3356 unsigned int nr_tags,
3357 unsigned int reserved_tags)
3358 {
3359 int node = blk_mq_get_hctx_node(set, hctx_idx);
3360 struct blk_mq_tags *tags;
3361
3362 if (node == NUMA_NO_NODE)
3363 node = set->numa_node;
3364
3365 tags = blk_mq_init_tags(nr_tags, reserved_tags, node,
3366 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
3367 if (!tags)
3368 return NULL;
3369
3370 tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *),
3371 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
3372 node);
3373 if (!tags->rqs)
3374 goto err_free_tags;
3375
3376 tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *),
3377 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
3378 node);
3379 if (!tags->static_rqs)
3380 goto err_free_rqs;
3381
3382 return tags;
3383
3384 err_free_rqs:
3385 kfree(tags->rqs);
3386 err_free_tags:
3387 blk_mq_free_tags(tags);
3388 return NULL;
3389 }
3390
blk_mq_init_request(struct blk_mq_tag_set * set,struct request * rq,unsigned int hctx_idx,int node)3391 static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
3392 unsigned int hctx_idx, int node)
3393 {
3394 int ret;
3395
3396 if (set->ops->init_request) {
3397 ret = set->ops->init_request(set, rq, hctx_idx, node);
3398 if (ret)
3399 return ret;
3400 }
3401
3402 WRITE_ONCE(rq->state, MQ_RQ_IDLE);
3403 return 0;
3404 }
3405
blk_mq_alloc_rqs(struct blk_mq_tag_set * set,struct blk_mq_tags * tags,unsigned int hctx_idx,unsigned int depth)3406 static int blk_mq_alloc_rqs(struct blk_mq_tag_set *set,
3407 struct blk_mq_tags *tags,
3408 unsigned int hctx_idx, unsigned int depth)
3409 {
3410 unsigned int i, j, entries_per_page, max_order = 4;
3411 int node = blk_mq_get_hctx_node(set, hctx_idx);
3412 size_t rq_size, left;
3413
3414 if (node == NUMA_NO_NODE)
3415 node = set->numa_node;
3416
3417 INIT_LIST_HEAD(&tags->page_list);
3418
3419 /*
3420 * rq_size is the size of the request plus driver payload, rounded
3421 * to the cacheline size
3422 */
3423 rq_size = round_up(sizeof(struct request) + set->cmd_size,
3424 cache_line_size());
3425 left = rq_size * depth;
3426
3427 for (i = 0; i < depth; ) {
3428 int this_order = max_order;
3429 struct page *page;
3430 int to_do;
3431 void *p;
3432
3433 while (this_order && left < order_to_size(this_order - 1))
3434 this_order--;
3435
3436 do {
3437 page = alloc_pages_node(node,
3438 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
3439 this_order);
3440 if (page)
3441 break;
3442 if (!this_order--)
3443 break;
3444 if (order_to_size(this_order) < rq_size)
3445 break;
3446 } while (1);
3447
3448 if (!page)
3449 goto fail;
3450
3451 page->private = this_order;
3452 list_add_tail(&page->lru, &tags->page_list);
3453
3454 p = page_address(page);
3455 /*
3456 * Allow kmemleak to scan these pages as they contain pointers
3457 * to additional allocations like via ops->init_request().
3458 */
3459 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
3460 entries_per_page = order_to_size(this_order) / rq_size;
3461 to_do = min(entries_per_page, depth - i);
3462 left -= to_do * rq_size;
3463 for (j = 0; j < to_do; j++) {
3464 struct request *rq = p;
3465
3466 tags->static_rqs[i] = rq;
3467 if (blk_mq_init_request(set, rq, hctx_idx, node)) {
3468 tags->static_rqs[i] = NULL;
3469 goto fail;
3470 }
3471
3472 p += rq_size;
3473 i++;
3474 }
3475 }
3476 return 0;
3477
3478 fail:
3479 blk_mq_free_rqs(set, tags, hctx_idx);
3480 return -ENOMEM;
3481 }
3482
3483 struct rq_iter_data {
3484 struct blk_mq_hw_ctx *hctx;
3485 bool has_rq;
3486 };
3487
blk_mq_has_request(struct request * rq,void * data)3488 static bool blk_mq_has_request(struct request *rq, void *data)
3489 {
3490 struct rq_iter_data *iter_data = data;
3491
3492 if (rq->mq_hctx != iter_data->hctx)
3493 return true;
3494 iter_data->has_rq = true;
3495 return false;
3496 }
3497
blk_mq_hctx_has_requests(struct blk_mq_hw_ctx * hctx)3498 static bool blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx)
3499 {
3500 struct blk_mq_tags *tags = hctx->sched_tags ?
3501 hctx->sched_tags : hctx->tags;
3502 struct rq_iter_data data = {
3503 .hctx = hctx,
3504 };
3505
3506 blk_mq_all_tag_iter(tags, blk_mq_has_request, &data);
3507 return data.has_rq;
3508 }
3509
blk_mq_last_cpu_in_hctx(unsigned int cpu,struct blk_mq_hw_ctx * hctx)3510 static inline bool blk_mq_last_cpu_in_hctx(unsigned int cpu,
3511 struct blk_mq_hw_ctx *hctx)
3512 {
3513 if (cpumask_first_and(hctx->cpumask, cpu_online_mask) != cpu)
3514 return false;
3515 if (cpumask_next_and(cpu, hctx->cpumask, cpu_online_mask) < nr_cpu_ids)
3516 return false;
3517 return true;
3518 }
3519
blk_mq_hctx_notify_offline(unsigned int cpu,struct hlist_node * node)3520 static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node)
3521 {
3522 struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
3523 struct blk_mq_hw_ctx, cpuhp_online);
3524
3525 if (!cpumask_test_cpu(cpu, hctx->cpumask) ||
3526 !blk_mq_last_cpu_in_hctx(cpu, hctx))
3527 return 0;
3528
3529 /*
3530 * Prevent new request from being allocated on the current hctx.
3531 *
3532 * The smp_mb__after_atomic() Pairs with the implied barrier in
3533 * test_and_set_bit_lock in sbitmap_get(). Ensures the inactive flag is
3534 * seen once we return from the tag allocator.
3535 */
3536 set_bit(BLK_MQ_S_INACTIVE, &hctx->state);
3537 smp_mb__after_atomic();
3538
3539 /*
3540 * Try to grab a reference to the queue and wait for any outstanding
3541 * requests. If we could not grab a reference the queue has been
3542 * frozen and there are no requests.
3543 */
3544 if (percpu_ref_tryget(&hctx->queue->q_usage_counter)) {
3545 while (blk_mq_hctx_has_requests(hctx))
3546 msleep(5);
3547 percpu_ref_put(&hctx->queue->q_usage_counter);
3548 }
3549
3550 return 0;
3551 }
3552
blk_mq_hctx_notify_online(unsigned int cpu,struct hlist_node * node)3553 static int blk_mq_hctx_notify_online(unsigned int cpu, struct hlist_node *node)
3554 {
3555 struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
3556 struct blk_mq_hw_ctx, cpuhp_online);
3557
3558 if (cpumask_test_cpu(cpu, hctx->cpumask))
3559 clear_bit(BLK_MQ_S_INACTIVE, &hctx->state);
3560 return 0;
3561 }
3562
3563 /*
3564 * 'cpu' is going away. splice any existing rq_list entries from this
3565 * software queue to the hw queue dispatch list, and ensure that it
3566 * gets run.
3567 */
blk_mq_hctx_notify_dead(unsigned int cpu,struct hlist_node * node)3568 static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
3569 {
3570 struct blk_mq_hw_ctx *hctx;
3571 struct blk_mq_ctx *ctx;
3572 LIST_HEAD(tmp);
3573 enum hctx_type type;
3574
3575 hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
3576 if (!cpumask_test_cpu(cpu, hctx->cpumask))
3577 return 0;
3578
3579 ctx = __blk_mq_get_ctx(hctx->queue, cpu);
3580 type = hctx->type;
3581
3582 spin_lock(&ctx->lock);
3583 if (!list_empty(&ctx->rq_lists[type])) {
3584 list_splice_init(&ctx->rq_lists[type], &tmp);
3585 blk_mq_hctx_clear_pending(hctx, ctx);
3586 }
3587 spin_unlock(&ctx->lock);
3588
3589 if (list_empty(&tmp))
3590 return 0;
3591
3592 spin_lock(&hctx->lock);
3593 list_splice_tail_init(&tmp, &hctx->dispatch);
3594 spin_unlock(&hctx->lock);
3595
3596 blk_mq_run_hw_queue(hctx, true);
3597 return 0;
3598 }
3599
blk_mq_remove_cpuhp(struct blk_mq_hw_ctx * hctx)3600 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
3601 {
3602 if (!(hctx->flags & BLK_MQ_F_STACKING))
3603 cpuhp_state_remove_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
3604 &hctx->cpuhp_online);
3605 cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
3606 &hctx->cpuhp_dead);
3607 }
3608
3609 /*
3610 * Before freeing hw queue, clearing the flush request reference in
3611 * tags->rqs[] for avoiding potential UAF.
3612 */
blk_mq_clear_flush_rq_mapping(struct blk_mq_tags * tags,unsigned int queue_depth,struct request * flush_rq)3613 static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags,
3614 unsigned int queue_depth, struct request *flush_rq)
3615 {
3616 int i;
3617 unsigned long flags;
3618
3619 /* The hw queue may not be mapped yet */
3620 if (!tags)
3621 return;
3622
3623 WARN_ON_ONCE(req_ref_read(flush_rq) != 0);
3624
3625 for (i = 0; i < queue_depth; i++)
3626 cmpxchg(&tags->rqs[i], flush_rq, NULL);
3627
3628 /*
3629 * Wait until all pending iteration is done.
3630 *
3631 * Request reference is cleared and it is guaranteed to be observed
3632 * after the ->lock is released.
3633 */
3634 spin_lock_irqsave(&tags->lock, flags);
3635 spin_unlock_irqrestore(&tags->lock, flags);
3636 }
3637
3638 /* hctx->ctxs will be freed in queue's release handler */
blk_mq_exit_hctx(struct request_queue * q,struct blk_mq_tag_set * set,struct blk_mq_hw_ctx * hctx,unsigned int hctx_idx)3639 static void blk_mq_exit_hctx(struct request_queue *q,
3640 struct blk_mq_tag_set *set,
3641 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
3642 {
3643 struct request *flush_rq = hctx->fq->flush_rq;
3644
3645 if (blk_mq_hw_queue_mapped(hctx))
3646 blk_mq_tag_idle(hctx);
3647
3648 if (blk_queue_init_done(q))
3649 blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx],
3650 set->queue_depth, flush_rq);
3651 if (set->ops->exit_request)
3652 set->ops->exit_request(set, flush_rq, hctx_idx);
3653
3654 if (set->ops->exit_hctx)
3655 set->ops->exit_hctx(hctx, hctx_idx);
3656
3657 blk_mq_remove_cpuhp(hctx);
3658
3659 xa_erase(&q->hctx_table, hctx_idx);
3660
3661 spin_lock(&q->unused_hctx_lock);
3662 list_add(&hctx->hctx_list, &q->unused_hctx_list);
3663 spin_unlock(&q->unused_hctx_lock);
3664 }
3665
blk_mq_exit_hw_queues(struct request_queue * q,struct blk_mq_tag_set * set,int nr_queue)3666 static void blk_mq_exit_hw_queues(struct request_queue *q,
3667 struct blk_mq_tag_set *set, int nr_queue)
3668 {
3669 struct blk_mq_hw_ctx *hctx;
3670 unsigned long i;
3671
3672 queue_for_each_hw_ctx(q, hctx, i) {
3673 if (i == nr_queue)
3674 break;
3675 blk_mq_exit_hctx(q, set, hctx, i);
3676 }
3677 }
3678
blk_mq_init_hctx(struct request_queue * q,struct blk_mq_tag_set * set,struct blk_mq_hw_ctx * hctx,unsigned hctx_idx)3679 static int blk_mq_init_hctx(struct request_queue *q,
3680 struct blk_mq_tag_set *set,
3681 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
3682 {
3683 hctx->queue_num = hctx_idx;
3684
3685 if (!(hctx->flags & BLK_MQ_F_STACKING))
3686 cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
3687 &hctx->cpuhp_online);
3688 cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
3689
3690 hctx->tags = set->tags[hctx_idx];
3691
3692 if (set->ops->init_hctx &&
3693 set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
3694 goto unregister_cpu_notifier;
3695
3696 if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx,
3697 hctx->numa_node))
3698 goto exit_hctx;
3699
3700 if (xa_insert(&q->hctx_table, hctx_idx, hctx, GFP_KERNEL))
3701 goto exit_flush_rq;
3702
3703 return 0;
3704
3705 exit_flush_rq:
3706 if (set->ops->exit_request)
3707 set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
3708 exit_hctx:
3709 if (set->ops->exit_hctx)
3710 set->ops->exit_hctx(hctx, hctx_idx);
3711 unregister_cpu_notifier:
3712 blk_mq_remove_cpuhp(hctx);
3713 return -1;
3714 }
3715
3716 static struct blk_mq_hw_ctx *
blk_mq_alloc_hctx(struct request_queue * q,struct blk_mq_tag_set * set,int node)3717 blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
3718 int node)
3719 {
3720 struct blk_mq_hw_ctx *hctx;
3721 gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY;
3722
3723 hctx = kzalloc_node(sizeof(struct blk_mq_hw_ctx), gfp, node);
3724 if (!hctx)
3725 goto fail_alloc_hctx;
3726
3727 if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node))
3728 goto free_hctx;
3729
3730 atomic_set(&hctx->nr_active, 0);
3731 if (node == NUMA_NO_NODE)
3732 node = set->numa_node;
3733 hctx->numa_node = node;
3734
3735 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
3736 spin_lock_init(&hctx->lock);
3737 INIT_LIST_HEAD(&hctx->dispatch);
3738 hctx->queue = q;
3739 hctx->flags = set->flags & ~BLK_MQ_F_TAG_QUEUE_SHARED;
3740
3741 INIT_LIST_HEAD(&hctx->hctx_list);
3742
3743 /*
3744 * Allocate space for all possible cpus to avoid allocation at
3745 * runtime
3746 */
3747 hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
3748 gfp, node);
3749 if (!hctx->ctxs)
3750 goto free_cpumask;
3751
3752 if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8),
3753 gfp, node, false, false))
3754 goto free_ctxs;
3755 hctx->nr_ctx = 0;
3756
3757 spin_lock_init(&hctx->dispatch_wait_lock);
3758 init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
3759 INIT_LIST_HEAD(&hctx->dispatch_wait.entry);
3760
3761 hctx->fq = blk_alloc_flush_queue(hctx->numa_node, set->cmd_size, gfp);
3762 if (!hctx->fq)
3763 goto free_bitmap;
3764
3765 blk_mq_hctx_kobj_init(hctx);
3766
3767 return hctx;
3768
3769 free_bitmap:
3770 sbitmap_free(&hctx->ctx_map);
3771 free_ctxs:
3772 kfree(hctx->ctxs);
3773 free_cpumask:
3774 free_cpumask_var(hctx->cpumask);
3775 free_hctx:
3776 kfree(hctx);
3777 fail_alloc_hctx:
3778 return NULL;
3779 }
3780
blk_mq_init_cpu_queues(struct request_queue * q,unsigned int nr_hw_queues)3781 static void blk_mq_init_cpu_queues(struct request_queue *q,
3782 unsigned int nr_hw_queues)
3783 {
3784 struct blk_mq_tag_set *set = q->tag_set;
3785 unsigned int i, j;
3786
3787 for_each_possible_cpu(i) {
3788 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
3789 struct blk_mq_hw_ctx *hctx;
3790 int k;
3791
3792 __ctx->cpu = i;
3793 spin_lock_init(&__ctx->lock);
3794 for (k = HCTX_TYPE_DEFAULT; k < HCTX_MAX_TYPES; k++)
3795 INIT_LIST_HEAD(&__ctx->rq_lists[k]);
3796
3797 __ctx->queue = q;
3798
3799 /*
3800 * Set local node, IFF we have more than one hw queue. If
3801 * not, we remain on the home node of the device
3802 */
3803 for (j = 0; j < set->nr_maps; j++) {
3804 hctx = blk_mq_map_queue_type(q, j, i);
3805 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
3806 hctx->numa_node = cpu_to_node(i);
3807 }
3808 }
3809 }
3810
blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set * set,unsigned int hctx_idx,unsigned int depth)3811 struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
3812 unsigned int hctx_idx,
3813 unsigned int depth)
3814 {
3815 struct blk_mq_tags *tags;
3816 int ret;
3817
3818 tags = blk_mq_alloc_rq_map(set, hctx_idx, depth, set->reserved_tags);
3819 if (!tags)
3820 return NULL;
3821
3822 ret = blk_mq_alloc_rqs(set, tags, hctx_idx, depth);
3823 if (ret) {
3824 blk_mq_free_rq_map(tags);
3825 return NULL;
3826 }
3827
3828 return tags;
3829 }
3830
__blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set * set,int hctx_idx)3831 static bool __blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
3832 int hctx_idx)
3833 {
3834 if (blk_mq_is_shared_tags(set->flags)) {
3835 set->tags[hctx_idx] = set->shared_tags;
3836
3837 return true;
3838 }
3839
3840 set->tags[hctx_idx] = blk_mq_alloc_map_and_rqs(set, hctx_idx,
3841 set->queue_depth);
3842
3843 return set->tags[hctx_idx];
3844 }
3845
blk_mq_free_map_and_rqs(struct blk_mq_tag_set * set,struct blk_mq_tags * tags,unsigned int hctx_idx)3846 void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
3847 struct blk_mq_tags *tags,
3848 unsigned int hctx_idx)
3849 {
3850 if (tags) {
3851 blk_mq_free_rqs(set, tags, hctx_idx);
3852 blk_mq_free_rq_map(tags);
3853 }
3854 }
3855
__blk_mq_free_map_and_rqs(struct blk_mq_tag_set * set,unsigned int hctx_idx)3856 static void __blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
3857 unsigned int hctx_idx)
3858 {
3859 if (!blk_mq_is_shared_tags(set->flags))
3860 blk_mq_free_map_and_rqs(set, set->tags[hctx_idx], hctx_idx);
3861
3862 set->tags[hctx_idx] = NULL;
3863 }
3864
blk_mq_map_swqueue(struct request_queue * q)3865 static void blk_mq_map_swqueue(struct request_queue *q)
3866 {
3867 unsigned int j, hctx_idx;
3868 unsigned long i;
3869 struct blk_mq_hw_ctx *hctx;
3870 struct blk_mq_ctx *ctx;
3871 struct blk_mq_tag_set *set = q->tag_set;
3872
3873 queue_for_each_hw_ctx(q, hctx, i) {
3874 cpumask_clear(hctx->cpumask);
3875 hctx->nr_ctx = 0;
3876 hctx->dispatch_from = NULL;
3877 }
3878
3879 /*
3880 * Map software to hardware queues.
3881 *
3882 * If the cpu isn't present, the cpu is mapped to first hctx.
3883 */
3884 for_each_possible_cpu(i) {
3885
3886 ctx = per_cpu_ptr(q->queue_ctx, i);
3887 for (j = 0; j < set->nr_maps; j++) {
3888 if (!set->map[j].nr_queues) {
3889 ctx->hctxs[j] = blk_mq_map_queue_type(q,
3890 HCTX_TYPE_DEFAULT, i);
3891 continue;
3892 }
3893 hctx_idx = set->map[j].mq_map[i];
3894 /* unmapped hw queue can be remapped after CPU topo changed */
3895 if (!set->tags[hctx_idx] &&
3896 !__blk_mq_alloc_map_and_rqs(set, hctx_idx)) {
3897 /*
3898 * If tags initialization fail for some hctx,
3899 * that hctx won't be brought online. In this
3900 * case, remap the current ctx to hctx[0] which
3901 * is guaranteed to always have tags allocated
3902 */
3903 set->map[j].mq_map[i] = 0;
3904 }
3905
3906 hctx = blk_mq_map_queue_type(q, j, i);
3907 ctx->hctxs[j] = hctx;
3908 /*
3909 * If the CPU is already set in the mask, then we've
3910 * mapped this one already. This can happen if
3911 * devices share queues across queue maps.
3912 */
3913 if (cpumask_test_cpu(i, hctx->cpumask))
3914 continue;
3915
3916 cpumask_set_cpu(i, hctx->cpumask);
3917 hctx->type = j;
3918 ctx->index_hw[hctx->type] = hctx->nr_ctx;
3919 hctx->ctxs[hctx->nr_ctx++] = ctx;
3920
3921 /*
3922 * If the nr_ctx type overflows, we have exceeded the
3923 * amount of sw queues we can support.
3924 */
3925 BUG_ON(!hctx->nr_ctx);
3926 }
3927
3928 for (; j < HCTX_MAX_TYPES; j++)
3929 ctx->hctxs[j] = blk_mq_map_queue_type(q,
3930 HCTX_TYPE_DEFAULT, i);
3931 }
3932
3933 queue_for_each_hw_ctx(q, hctx, i) {
3934 /*
3935 * If no software queues are mapped to this hardware queue,
3936 * disable it and free the request entries.
3937 */
3938 if (!hctx->nr_ctx) {
3939 /* Never unmap queue 0. We need it as a
3940 * fallback in case of a new remap fails
3941 * allocation
3942 */
3943 if (i)
3944 __blk_mq_free_map_and_rqs(set, i);
3945
3946 hctx->tags = NULL;
3947 continue;
3948 }
3949
3950 hctx->tags = set->tags[i];
3951 WARN_ON(!hctx->tags);
3952
3953 /*
3954 * Set the map size to the number of mapped software queues.
3955 * This is more accurate and more efficient than looping
3956 * over all possibly mapped software queues.
3957 */
3958 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
3959
3960 /*
3961 * Initialize batch roundrobin counts
3962 */
3963 hctx->next_cpu = blk_mq_first_mapped_cpu(hctx);
3964 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
3965 }
3966 }
3967
3968 /*
3969 * Caller needs to ensure that we're either frozen/quiesced, or that
3970 * the queue isn't live yet.
3971 */
queue_set_hctx_shared(struct request_queue * q,bool shared)3972 static void queue_set_hctx_shared(struct request_queue *q, bool shared)
3973 {
3974 struct blk_mq_hw_ctx *hctx;
3975 unsigned long i;
3976
3977 queue_for_each_hw_ctx(q, hctx, i) {
3978 if (shared) {
3979 hctx->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
3980 } else {
3981 blk_mq_tag_idle(hctx);
3982 hctx->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
3983 }
3984 }
3985 }
3986
blk_mq_update_tag_set_shared(struct blk_mq_tag_set * set,bool shared)3987 static void blk_mq_update_tag_set_shared(struct blk_mq_tag_set *set,
3988 bool shared)
3989 {
3990 struct request_queue *q;
3991
3992 lockdep_assert_held(&set->tag_list_lock);
3993
3994 list_for_each_entry(q, &set->tag_list, tag_set_list) {
3995 blk_mq_freeze_queue(q);
3996 queue_set_hctx_shared(q, shared);
3997 blk_mq_unfreeze_queue(q);
3998 }
3999 }
4000
blk_mq_del_queue_tag_set(struct request_queue * q)4001 static void blk_mq_del_queue_tag_set(struct request_queue *q)
4002 {
4003 struct blk_mq_tag_set *set = q->tag_set;
4004
4005 mutex_lock(&set->tag_list_lock);
4006 list_del(&q->tag_set_list);
4007 if (list_is_singular(&set->tag_list)) {
4008 /* just transitioned to unshared */
4009 set->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
4010 /* update existing queue */
4011 blk_mq_update_tag_set_shared(set, false);
4012 }
4013 mutex_unlock(&set->tag_list_lock);
4014 INIT_LIST_HEAD(&q->tag_set_list);
4015 }
4016
blk_mq_add_queue_tag_set(struct blk_mq_tag_set * set,struct request_queue * q)4017 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
4018 struct request_queue *q)
4019 {
4020 mutex_lock(&set->tag_list_lock);
4021
4022 /*
4023 * Check to see if we're transitioning to shared (from 1 to 2 queues).
4024 */
4025 if (!list_empty(&set->tag_list) &&
4026 !(set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
4027 set->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
4028 /* update existing queue */
4029 blk_mq_update_tag_set_shared(set, true);
4030 }
4031 if (set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
4032 queue_set_hctx_shared(q, true);
4033 list_add_tail(&q->tag_set_list, &set->tag_list);
4034
4035 mutex_unlock(&set->tag_list_lock);
4036 }
4037
4038 /* All allocations will be freed in release handler of q->mq_kobj */
blk_mq_alloc_ctxs(struct request_queue * q)4039 static int blk_mq_alloc_ctxs(struct request_queue *q)
4040 {
4041 struct blk_mq_ctxs *ctxs;
4042 int cpu;
4043
4044 ctxs = kzalloc(sizeof(*ctxs), GFP_KERNEL);
4045 if (!ctxs)
4046 return -ENOMEM;
4047
4048 ctxs->queue_ctx = alloc_percpu(struct blk_mq_ctx);
4049 if (!ctxs->queue_ctx)
4050 goto fail;
4051
4052 for_each_possible_cpu(cpu) {
4053 struct blk_mq_ctx *ctx = per_cpu_ptr(ctxs->queue_ctx, cpu);
4054 ctx->ctxs = ctxs;
4055 }
4056
4057 q->mq_kobj = &ctxs->kobj;
4058 q->queue_ctx = ctxs->queue_ctx;
4059
4060 return 0;
4061 fail:
4062 kfree(ctxs);
4063 return -ENOMEM;
4064 }
4065
4066 /*
4067 * It is the actual release handler for mq, but we do it from
4068 * request queue's release handler for avoiding use-after-free
4069 * and headache because q->mq_kobj shouldn't have been introduced,
4070 * but we can't group ctx/kctx kobj without it.
4071 */
blk_mq_release(struct request_queue * q)4072 void blk_mq_release(struct request_queue *q)
4073 {
4074 struct blk_mq_hw_ctx *hctx, *next;
4075 unsigned long i;
4076
4077 queue_for_each_hw_ctx(q, hctx, i)
4078 WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list));
4079
4080 /* all hctx are in .unused_hctx_list now */
4081 list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) {
4082 list_del_init(&hctx->hctx_list);
4083 kobject_put(&hctx->kobj);
4084 }
4085
4086 xa_destroy(&q->hctx_table);
4087
4088 /*
4089 * release .mq_kobj and sw queue's kobject now because
4090 * both share lifetime with request queue.
4091 */
4092 blk_mq_sysfs_deinit(q);
4093 }
4094
blk_mq_init_queue_data(struct blk_mq_tag_set * set,void * queuedata)4095 static struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
4096 void *queuedata)
4097 {
4098 struct request_queue *q;
4099 int ret;
4100
4101 q = blk_alloc_queue(set->numa_node);
4102 if (!q)
4103 return ERR_PTR(-ENOMEM);
4104 q->queuedata = queuedata;
4105 ret = blk_mq_init_allocated_queue(set, q);
4106 if (ret) {
4107 blk_put_queue(q);
4108 return ERR_PTR(ret);
4109 }
4110 return q;
4111 }
4112
blk_mq_init_queue(struct blk_mq_tag_set * set)4113 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
4114 {
4115 return blk_mq_init_queue_data(set, NULL);
4116 }
4117 EXPORT_SYMBOL(blk_mq_init_queue);
4118
4119 /**
4120 * blk_mq_destroy_queue - shutdown a request queue
4121 * @q: request queue to shutdown
4122 *
4123 * This shuts down a request queue allocated by blk_mq_init_queue(). All future
4124 * requests will be failed with -ENODEV. The caller is responsible for dropping
4125 * the reference from blk_mq_init_queue() by calling blk_put_queue().
4126 *
4127 * Context: can sleep
4128 */
blk_mq_destroy_queue(struct request_queue * q)4129 void blk_mq_destroy_queue(struct request_queue *q)
4130 {
4131 WARN_ON_ONCE(!queue_is_mq(q));
4132 WARN_ON_ONCE(blk_queue_registered(q));
4133
4134 might_sleep();
4135
4136 blk_queue_flag_set(QUEUE_FLAG_DYING, q);
4137 blk_queue_start_drain(q);
4138 blk_mq_freeze_queue_wait(q);
4139
4140 blk_sync_queue(q);
4141 blk_mq_cancel_work_sync(q);
4142 blk_mq_exit_queue(q);
4143 }
4144 EXPORT_SYMBOL(blk_mq_destroy_queue);
4145
__blk_mq_alloc_disk(struct blk_mq_tag_set * set,void * queuedata,struct lock_class_key * lkclass)4146 struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
4147 struct lock_class_key *lkclass)
4148 {
4149 struct request_queue *q;
4150 struct gendisk *disk;
4151
4152 q = blk_mq_init_queue_data(set, queuedata);
4153 if (IS_ERR(q))
4154 return ERR_CAST(q);
4155
4156 disk = __alloc_disk_node(q, set->numa_node, lkclass);
4157 if (!disk) {
4158 blk_mq_destroy_queue(q);
4159 blk_put_queue(q);
4160 return ERR_PTR(-ENOMEM);
4161 }
4162 set_bit(GD_OWNS_QUEUE, &disk->state);
4163 return disk;
4164 }
4165 EXPORT_SYMBOL(__blk_mq_alloc_disk);
4166
blk_mq_alloc_disk_for_queue(struct request_queue * q,struct lock_class_key * lkclass)4167 struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
4168 struct lock_class_key *lkclass)
4169 {
4170 struct gendisk *disk;
4171
4172 if (!blk_get_queue(q))
4173 return NULL;
4174 disk = __alloc_disk_node(q, NUMA_NO_NODE, lkclass);
4175 if (!disk)
4176 blk_put_queue(q);
4177 return disk;
4178 }
4179 EXPORT_SYMBOL(blk_mq_alloc_disk_for_queue);
4180
blk_mq_alloc_and_init_hctx(struct blk_mq_tag_set * set,struct request_queue * q,int hctx_idx,int node)4181 static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
4182 struct blk_mq_tag_set *set, struct request_queue *q,
4183 int hctx_idx, int node)
4184 {
4185 struct blk_mq_hw_ctx *hctx = NULL, *tmp;
4186
4187 /* reuse dead hctx first */
4188 spin_lock(&q->unused_hctx_lock);
4189 list_for_each_entry(tmp, &q->unused_hctx_list, hctx_list) {
4190 if (tmp->numa_node == node) {
4191 hctx = tmp;
4192 break;
4193 }
4194 }
4195 if (hctx)
4196 list_del_init(&hctx->hctx_list);
4197 spin_unlock(&q->unused_hctx_lock);
4198
4199 if (!hctx)
4200 hctx = blk_mq_alloc_hctx(q, set, node);
4201 if (!hctx)
4202 goto fail;
4203
4204 if (blk_mq_init_hctx(q, set, hctx, hctx_idx))
4205 goto free_hctx;
4206
4207 return hctx;
4208
4209 free_hctx:
4210 kobject_put(&hctx->kobj);
4211 fail:
4212 return NULL;
4213 }
4214
blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set * set,struct request_queue * q)4215 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
4216 struct request_queue *q)
4217 {
4218 struct blk_mq_hw_ctx *hctx;
4219 unsigned long i, j;
4220
4221 /* protect against switching io scheduler */
4222 mutex_lock(&q->sysfs_lock);
4223 for (i = 0; i < set->nr_hw_queues; i++) {
4224 int old_node;
4225 int node = blk_mq_get_hctx_node(set, i);
4226 struct blk_mq_hw_ctx *old_hctx = xa_load(&q->hctx_table, i);
4227
4228 if (old_hctx) {
4229 old_node = old_hctx->numa_node;
4230 blk_mq_exit_hctx(q, set, old_hctx, i);
4231 }
4232
4233 if (!blk_mq_alloc_and_init_hctx(set, q, i, node)) {
4234 if (!old_hctx)
4235 break;
4236 pr_warn("Allocate new hctx on node %d fails, fallback to previous one on node %d\n",
4237 node, old_node);
4238 hctx = blk_mq_alloc_and_init_hctx(set, q, i, old_node);
4239 WARN_ON_ONCE(!hctx);
4240 }
4241 }
4242 /*
4243 * Increasing nr_hw_queues fails. Free the newly allocated
4244 * hctxs and keep the previous q->nr_hw_queues.
4245 */
4246 if (i != set->nr_hw_queues) {
4247 j = q->nr_hw_queues;
4248 } else {
4249 j = i;
4250 q->nr_hw_queues = set->nr_hw_queues;
4251 }
4252
4253 xa_for_each_start(&q->hctx_table, j, hctx, j)
4254 blk_mq_exit_hctx(q, set, hctx, j);
4255 mutex_unlock(&q->sysfs_lock);
4256 }
4257
blk_mq_update_poll_flag(struct request_queue * q)4258 static void blk_mq_update_poll_flag(struct request_queue *q)
4259 {
4260 struct blk_mq_tag_set *set = q->tag_set;
4261
4262 if (set->nr_maps > HCTX_TYPE_POLL &&
4263 set->map[HCTX_TYPE_POLL].nr_queues)
4264 blk_queue_flag_set(QUEUE_FLAG_POLL, q);
4265 else
4266 blk_queue_flag_clear(QUEUE_FLAG_POLL, q);
4267 }
4268
blk_mq_init_allocated_queue(struct blk_mq_tag_set * set,struct request_queue * q)4269 int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
4270 struct request_queue *q)
4271 {
4272 /* mark the queue as mq asap */
4273 q->mq_ops = set->ops;
4274
4275 if (blk_mq_alloc_ctxs(q))
4276 goto err_exit;
4277
4278 /* init q->mq_kobj and sw queues' kobjects */
4279 blk_mq_sysfs_init(q);
4280
4281 INIT_LIST_HEAD(&q->unused_hctx_list);
4282 spin_lock_init(&q->unused_hctx_lock);
4283
4284 xa_init(&q->hctx_table);
4285
4286 blk_mq_realloc_hw_ctxs(set, q);
4287 if (!q->nr_hw_queues)
4288 goto err_hctxs;
4289
4290 INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
4291 blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
4292
4293 q->tag_set = set;
4294
4295 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
4296 blk_mq_update_poll_flag(q);
4297
4298 INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
4299 INIT_LIST_HEAD(&q->flush_list);
4300 INIT_LIST_HEAD(&q->requeue_list);
4301 spin_lock_init(&q->requeue_lock);
4302
4303 q->nr_requests = set->queue_depth;
4304
4305 blk_mq_init_cpu_queues(q, set->nr_hw_queues);
4306 blk_mq_add_queue_tag_set(set, q);
4307 blk_mq_map_swqueue(q);
4308 return 0;
4309
4310 err_hctxs:
4311 blk_mq_release(q);
4312 err_exit:
4313 q->mq_ops = NULL;
4314 return -ENOMEM;
4315 }
4316 EXPORT_SYMBOL(blk_mq_init_allocated_queue);
4317
4318 /* tags can _not_ be used after returning from blk_mq_exit_queue */
blk_mq_exit_queue(struct request_queue * q)4319 void blk_mq_exit_queue(struct request_queue *q)
4320 {
4321 struct blk_mq_tag_set *set = q->tag_set;
4322
4323 /* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */
4324 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
4325 /* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */
4326 blk_mq_del_queue_tag_set(q);
4327 }
4328
__blk_mq_alloc_rq_maps(struct blk_mq_tag_set * set)4329 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
4330 {
4331 int i;
4332
4333 if (blk_mq_is_shared_tags(set->flags)) {
4334 set->shared_tags = blk_mq_alloc_map_and_rqs(set,
4335 BLK_MQ_NO_HCTX_IDX,
4336 set->queue_depth);
4337 if (!set->shared_tags)
4338 return -ENOMEM;
4339 }
4340
4341 for (i = 0; i < set->nr_hw_queues; i++) {
4342 if (!__blk_mq_alloc_map_and_rqs(set, i))
4343 goto out_unwind;
4344 cond_resched();
4345 }
4346
4347 return 0;
4348
4349 out_unwind:
4350 while (--i >= 0)
4351 __blk_mq_free_map_and_rqs(set, i);
4352
4353 if (blk_mq_is_shared_tags(set->flags)) {
4354 blk_mq_free_map_and_rqs(set, set->shared_tags,
4355 BLK_MQ_NO_HCTX_IDX);
4356 }
4357
4358 return -ENOMEM;
4359 }
4360
4361 /*
4362 * Allocate the request maps associated with this tag_set. Note that this
4363 * may reduce the depth asked for, if memory is tight. set->queue_depth
4364 * will be updated to reflect the allocated depth.
4365 */
blk_mq_alloc_set_map_and_rqs(struct blk_mq_tag_set * set)4366 static int blk_mq_alloc_set_map_and_rqs(struct blk_mq_tag_set *set)
4367 {
4368 unsigned int depth;
4369 int err;
4370
4371 depth = set->queue_depth;
4372 do {
4373 err = __blk_mq_alloc_rq_maps(set);
4374 if (!err)
4375 break;
4376
4377 set->queue_depth >>= 1;
4378 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
4379 err = -ENOMEM;
4380 break;
4381 }
4382 } while (set->queue_depth);
4383
4384 if (!set->queue_depth || err) {
4385 pr_err("blk-mq: failed to allocate request map\n");
4386 return -ENOMEM;
4387 }
4388
4389 if (depth != set->queue_depth)
4390 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
4391 depth, set->queue_depth);
4392
4393 return 0;
4394 }
4395
blk_mq_update_queue_map(struct blk_mq_tag_set * set)4396 static void blk_mq_update_queue_map(struct blk_mq_tag_set *set)
4397 {
4398 /*
4399 * blk_mq_map_queues() and multiple .map_queues() implementations
4400 * expect that set->map[HCTX_TYPE_DEFAULT].nr_queues is set to the
4401 * number of hardware queues.
4402 */
4403 if (set->nr_maps == 1)
4404 set->map[HCTX_TYPE_DEFAULT].nr_queues = set->nr_hw_queues;
4405
4406 if (set->ops->map_queues && !is_kdump_kernel()) {
4407 int i;
4408
4409 /*
4410 * transport .map_queues is usually done in the following
4411 * way:
4412 *
4413 * for (queue = 0; queue < set->nr_hw_queues; queue++) {
4414 * mask = get_cpu_mask(queue)
4415 * for_each_cpu(cpu, mask)
4416 * set->map[x].mq_map[cpu] = queue;
4417 * }
4418 *
4419 * When we need to remap, the table has to be cleared for
4420 * killing stale mapping since one CPU may not be mapped
4421 * to any hw queue.
4422 */
4423 for (i = 0; i < set->nr_maps; i++)
4424 blk_mq_clear_mq_map(&set->map[i]);
4425
4426 set->ops->map_queues(set);
4427 } else {
4428 BUG_ON(set->nr_maps > 1);
4429 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
4430 }
4431 }
4432
blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set * set,int new_nr_hw_queues)4433 static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set,
4434 int new_nr_hw_queues)
4435 {
4436 struct blk_mq_tags **new_tags;
4437 int i;
4438
4439 if (set->nr_hw_queues >= new_nr_hw_queues)
4440 goto done;
4441
4442 new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *),
4443 GFP_KERNEL, set->numa_node);
4444 if (!new_tags)
4445 return -ENOMEM;
4446
4447 if (set->tags)
4448 memcpy(new_tags, set->tags, set->nr_hw_queues *
4449 sizeof(*set->tags));
4450 kfree(set->tags);
4451 set->tags = new_tags;
4452
4453 for (i = set->nr_hw_queues; i < new_nr_hw_queues; i++) {
4454 if (!__blk_mq_alloc_map_and_rqs(set, i)) {
4455 while (--i >= set->nr_hw_queues)
4456 __blk_mq_free_map_and_rqs(set, i);
4457 return -ENOMEM;
4458 }
4459 cond_resched();
4460 }
4461
4462 done:
4463 set->nr_hw_queues = new_nr_hw_queues;
4464 return 0;
4465 }
4466
4467 /*
4468 * Alloc a tag set to be associated with one or more request queues.
4469 * May fail with EINVAL for various error conditions. May adjust the
4470 * requested depth down, if it's too large. In that case, the set
4471 * value will be stored in set->queue_depth.
4472 */
blk_mq_alloc_tag_set(struct blk_mq_tag_set * set)4473 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
4474 {
4475 int i, ret;
4476
4477 BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
4478
4479 if (!set->nr_hw_queues)
4480 return -EINVAL;
4481 if (!set->queue_depth)
4482 return -EINVAL;
4483 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
4484 return -EINVAL;
4485
4486 if (!set->ops->queue_rq)
4487 return -EINVAL;
4488
4489 if (!set->ops->get_budget ^ !set->ops->put_budget)
4490 return -EINVAL;
4491
4492 if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
4493 pr_info("blk-mq: reduced tag depth to %u\n",
4494 BLK_MQ_MAX_DEPTH);
4495 set->queue_depth = BLK_MQ_MAX_DEPTH;
4496 }
4497
4498 if (!set->nr_maps)
4499 set->nr_maps = 1;
4500 else if (set->nr_maps > HCTX_MAX_TYPES)
4501 return -EINVAL;
4502
4503 /*
4504 * If a crashdump is active, then we are potentially in a very
4505 * memory constrained environment. Limit us to 1 queue and
4506 * 64 tags to prevent using too much memory.
4507 */
4508 if (is_kdump_kernel()) {
4509 set->nr_hw_queues = 1;
4510 set->nr_maps = 1;
4511 set->queue_depth = min(64U, set->queue_depth);
4512 }
4513 /*
4514 * There is no use for more h/w queues than cpus if we just have
4515 * a single map
4516 */
4517 if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids)
4518 set->nr_hw_queues = nr_cpu_ids;
4519
4520 if (set->flags & BLK_MQ_F_BLOCKING) {
4521 set->srcu = kmalloc(sizeof(*set->srcu), GFP_KERNEL);
4522 if (!set->srcu)
4523 return -ENOMEM;
4524 ret = init_srcu_struct(set->srcu);
4525 if (ret)
4526 goto out_free_srcu;
4527 }
4528
4529 ret = -ENOMEM;
4530 set->tags = kcalloc_node(set->nr_hw_queues,
4531 sizeof(struct blk_mq_tags *), GFP_KERNEL,
4532 set->numa_node);
4533 if (!set->tags)
4534 goto out_cleanup_srcu;
4535
4536 for (i = 0; i < set->nr_maps; i++) {
4537 set->map[i].mq_map = kcalloc_node(nr_cpu_ids,
4538 sizeof(set->map[i].mq_map[0]),
4539 GFP_KERNEL, set->numa_node);
4540 if (!set->map[i].mq_map)
4541 goto out_free_mq_map;
4542 set->map[i].nr_queues = is_kdump_kernel() ? 1 : set->nr_hw_queues;
4543 }
4544
4545 blk_mq_update_queue_map(set);
4546
4547 ret = blk_mq_alloc_set_map_and_rqs(set);
4548 if (ret)
4549 goto out_free_mq_map;
4550
4551 mutex_init(&set->tag_list_lock);
4552 INIT_LIST_HEAD(&set->tag_list);
4553
4554 return 0;
4555
4556 out_free_mq_map:
4557 for (i = 0; i < set->nr_maps; i++) {
4558 kfree(set->map[i].mq_map);
4559 set->map[i].mq_map = NULL;
4560 }
4561 kfree(set->tags);
4562 set->tags = NULL;
4563 out_cleanup_srcu:
4564 if (set->flags & BLK_MQ_F_BLOCKING)
4565 cleanup_srcu_struct(set->srcu);
4566 out_free_srcu:
4567 if (set->flags & BLK_MQ_F_BLOCKING)
4568 kfree(set->srcu);
4569 return ret;
4570 }
4571 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
4572
4573 /* allocate and initialize a tagset for a simple single-queue device */
blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set * set,const struct blk_mq_ops * ops,unsigned int queue_depth,unsigned int set_flags)4574 int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
4575 const struct blk_mq_ops *ops, unsigned int queue_depth,
4576 unsigned int set_flags)
4577 {
4578 memset(set, 0, sizeof(*set));
4579 set->ops = ops;
4580 set->nr_hw_queues = 1;
4581 set->nr_maps = 1;
4582 set->queue_depth = queue_depth;
4583 set->numa_node = NUMA_NO_NODE;
4584 set->flags = set_flags;
4585 return blk_mq_alloc_tag_set(set);
4586 }
4587 EXPORT_SYMBOL_GPL(blk_mq_alloc_sq_tag_set);
4588
blk_mq_free_tag_set(struct blk_mq_tag_set * set)4589 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
4590 {
4591 int i, j;
4592
4593 for (i = 0; i < set->nr_hw_queues; i++)
4594 __blk_mq_free_map_and_rqs(set, i);
4595
4596 if (blk_mq_is_shared_tags(set->flags)) {
4597 blk_mq_free_map_and_rqs(set, set->shared_tags,
4598 BLK_MQ_NO_HCTX_IDX);
4599 }
4600
4601 for (j = 0; j < set->nr_maps; j++) {
4602 kfree(set->map[j].mq_map);
4603 set->map[j].mq_map = NULL;
4604 }
4605
4606 kfree(set->tags);
4607 set->tags = NULL;
4608 if (set->flags & BLK_MQ_F_BLOCKING) {
4609 cleanup_srcu_struct(set->srcu);
4610 kfree(set->srcu);
4611 }
4612 }
4613 EXPORT_SYMBOL(blk_mq_free_tag_set);
4614
blk_mq_update_nr_requests(struct request_queue * q,unsigned int nr)4615 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
4616 {
4617 struct blk_mq_tag_set *set = q->tag_set;
4618 struct blk_mq_hw_ctx *hctx;
4619 int ret;
4620 unsigned long i;
4621
4622 if (!set)
4623 return -EINVAL;
4624
4625 if (q->nr_requests == nr)
4626 return 0;
4627
4628 blk_mq_freeze_queue(q);
4629 blk_mq_quiesce_queue(q);
4630
4631 ret = 0;
4632 queue_for_each_hw_ctx(q, hctx, i) {
4633 if (!hctx->tags)
4634 continue;
4635 /*
4636 * If we're using an MQ scheduler, just update the scheduler
4637 * queue depth. This is similar to what the old code would do.
4638 */
4639 if (hctx->sched_tags) {
4640 ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
4641 nr, true);
4642 } else {
4643 ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr,
4644 false);
4645 }
4646 if (ret)
4647 break;
4648 if (q->elevator && q->elevator->type->ops.depth_updated)
4649 q->elevator->type->ops.depth_updated(hctx);
4650 }
4651 if (!ret) {
4652 q->nr_requests = nr;
4653 if (blk_mq_is_shared_tags(set->flags)) {
4654 if (q->elevator)
4655 blk_mq_tag_update_sched_shared_tags(q);
4656 else
4657 blk_mq_tag_resize_shared_tags(set, nr);
4658 }
4659 }
4660
4661 blk_mq_unquiesce_queue(q);
4662 blk_mq_unfreeze_queue(q);
4663
4664 return ret;
4665 }
4666
4667 /*
4668 * request_queue and elevator_type pair.
4669 * It is just used by __blk_mq_update_nr_hw_queues to cache
4670 * the elevator_type associated with a request_queue.
4671 */
4672 struct blk_mq_qe_pair {
4673 struct list_head node;
4674 struct request_queue *q;
4675 struct elevator_type *type;
4676 };
4677
4678 /*
4679 * Cache the elevator_type in qe pair list and switch the
4680 * io scheduler to 'none'
4681 */
blk_mq_elv_switch_none(struct list_head * head,struct request_queue * q)4682 static bool blk_mq_elv_switch_none(struct list_head *head,
4683 struct request_queue *q)
4684 {
4685 struct blk_mq_qe_pair *qe;
4686
4687 qe = kmalloc(sizeof(*qe), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
4688 if (!qe)
4689 return false;
4690
4691 /* q->elevator needs protection from ->sysfs_lock */
4692 mutex_lock(&q->sysfs_lock);
4693
4694 /* the check has to be done with holding sysfs_lock */
4695 if (!q->elevator) {
4696 kfree(qe);
4697 goto unlock;
4698 }
4699
4700 INIT_LIST_HEAD(&qe->node);
4701 qe->q = q;
4702 qe->type = q->elevator->type;
4703 /* keep a reference to the elevator module as we'll switch back */
4704 __elevator_get(qe->type);
4705 list_add(&qe->node, head);
4706 elevator_disable(q);
4707 unlock:
4708 mutex_unlock(&q->sysfs_lock);
4709
4710 return true;
4711 }
4712
blk_lookup_qe_pair(struct list_head * head,struct request_queue * q)4713 static struct blk_mq_qe_pair *blk_lookup_qe_pair(struct list_head *head,
4714 struct request_queue *q)
4715 {
4716 struct blk_mq_qe_pair *qe;
4717
4718 list_for_each_entry(qe, head, node)
4719 if (qe->q == q)
4720 return qe;
4721
4722 return NULL;
4723 }
4724
blk_mq_elv_switch_back(struct list_head * head,struct request_queue * q)4725 static void blk_mq_elv_switch_back(struct list_head *head,
4726 struct request_queue *q)
4727 {
4728 struct blk_mq_qe_pair *qe;
4729 struct elevator_type *t;
4730
4731 qe = blk_lookup_qe_pair(head, q);
4732 if (!qe)
4733 return;
4734 t = qe->type;
4735 list_del(&qe->node);
4736 kfree(qe);
4737
4738 mutex_lock(&q->sysfs_lock);
4739 elevator_switch(q, t);
4740 /* drop the reference acquired in blk_mq_elv_switch_none */
4741 elevator_put(t);
4742 mutex_unlock(&q->sysfs_lock);
4743 }
4744
__blk_mq_update_nr_hw_queues(struct blk_mq_tag_set * set,int nr_hw_queues)4745 static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
4746 int nr_hw_queues)
4747 {
4748 struct request_queue *q;
4749 LIST_HEAD(head);
4750 int prev_nr_hw_queues = set->nr_hw_queues;
4751 int i;
4752
4753 lockdep_assert_held(&set->tag_list_lock);
4754
4755 if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids)
4756 nr_hw_queues = nr_cpu_ids;
4757 if (nr_hw_queues < 1)
4758 return;
4759 if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues)
4760 return;
4761
4762 list_for_each_entry(q, &set->tag_list, tag_set_list)
4763 blk_mq_freeze_queue(q);
4764 /*
4765 * Switch IO scheduler to 'none', cleaning up the data associated
4766 * with the previous scheduler. We will switch back once we are done
4767 * updating the new sw to hw queue mappings.
4768 */
4769 list_for_each_entry(q, &set->tag_list, tag_set_list)
4770 if (!blk_mq_elv_switch_none(&head, q))
4771 goto switch_back;
4772
4773 list_for_each_entry(q, &set->tag_list, tag_set_list) {
4774 blk_mq_debugfs_unregister_hctxs(q);
4775 blk_mq_sysfs_unregister_hctxs(q);
4776 }
4777
4778 if (blk_mq_realloc_tag_set_tags(set, nr_hw_queues) < 0)
4779 goto reregister;
4780
4781 fallback:
4782 blk_mq_update_queue_map(set);
4783 list_for_each_entry(q, &set->tag_list, tag_set_list) {
4784 blk_mq_realloc_hw_ctxs(set, q);
4785 blk_mq_update_poll_flag(q);
4786 if (q->nr_hw_queues != set->nr_hw_queues) {
4787 int i = prev_nr_hw_queues;
4788
4789 pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n",
4790 nr_hw_queues, prev_nr_hw_queues);
4791 for (; i < set->nr_hw_queues; i++)
4792 __blk_mq_free_map_and_rqs(set, i);
4793
4794 set->nr_hw_queues = prev_nr_hw_queues;
4795 goto fallback;
4796 }
4797 blk_mq_map_swqueue(q);
4798 }
4799
4800 reregister:
4801 list_for_each_entry(q, &set->tag_list, tag_set_list) {
4802 blk_mq_sysfs_register_hctxs(q);
4803 blk_mq_debugfs_register_hctxs(q);
4804 }
4805
4806 switch_back:
4807 list_for_each_entry(q, &set->tag_list, tag_set_list)
4808 blk_mq_elv_switch_back(&head, q);
4809
4810 list_for_each_entry(q, &set->tag_list, tag_set_list)
4811 blk_mq_unfreeze_queue(q);
4812
4813 /* Free the excess tags when nr_hw_queues shrink. */
4814 for (i = set->nr_hw_queues; i < prev_nr_hw_queues; i++)
4815 __blk_mq_free_map_and_rqs(set, i);
4816 }
4817
blk_mq_update_nr_hw_queues(struct blk_mq_tag_set * set,int nr_hw_queues)4818 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
4819 {
4820 mutex_lock(&set->tag_list_lock);
4821 __blk_mq_update_nr_hw_queues(set, nr_hw_queues);
4822 mutex_unlock(&set->tag_list_lock);
4823 }
4824 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
4825
blk_hctx_poll(struct request_queue * q,struct blk_mq_hw_ctx * hctx,struct io_comp_batch * iob,unsigned int flags)4826 static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
4827 struct io_comp_batch *iob, unsigned int flags)
4828 {
4829 long state = get_current_state();
4830 int ret;
4831
4832 do {
4833 ret = q->mq_ops->poll(hctx, iob);
4834 if (ret > 0) {
4835 __set_current_state(TASK_RUNNING);
4836 return ret;
4837 }
4838
4839 if (signal_pending_state(state, current))
4840 __set_current_state(TASK_RUNNING);
4841 if (task_is_running(current))
4842 return 1;
4843
4844 if (ret < 0 || (flags & BLK_POLL_ONESHOT))
4845 break;
4846 cpu_relax();
4847 } while (!need_resched());
4848
4849 __set_current_state(TASK_RUNNING);
4850 return 0;
4851 }
4852
blk_mq_poll(struct request_queue * q,blk_qc_t cookie,struct io_comp_batch * iob,unsigned int flags)4853 int blk_mq_poll(struct request_queue *q, blk_qc_t cookie,
4854 struct io_comp_batch *iob, unsigned int flags)
4855 {
4856 struct blk_mq_hw_ctx *hctx = xa_load(&q->hctx_table, cookie);
4857
4858 return blk_hctx_poll(q, hctx, iob, flags);
4859 }
4860
blk_rq_poll(struct request * rq,struct io_comp_batch * iob,unsigned int poll_flags)4861 int blk_rq_poll(struct request *rq, struct io_comp_batch *iob,
4862 unsigned int poll_flags)
4863 {
4864 struct request_queue *q = rq->q;
4865 int ret;
4866
4867 if (!blk_rq_is_poll(rq))
4868 return 0;
4869 if (!percpu_ref_tryget(&q->q_usage_counter))
4870 return 0;
4871
4872 ret = blk_hctx_poll(q, rq->mq_hctx, iob, poll_flags);
4873 blk_queue_exit(q);
4874
4875 return ret;
4876 }
4877 EXPORT_SYMBOL_GPL(blk_rq_poll);
4878
blk_mq_rq_cpu(struct request * rq)4879 unsigned int blk_mq_rq_cpu(struct request *rq)
4880 {
4881 return rq->mq_ctx->cpu;
4882 }
4883 EXPORT_SYMBOL(blk_mq_rq_cpu);
4884
blk_mq_cancel_work_sync(struct request_queue * q)4885 void blk_mq_cancel_work_sync(struct request_queue *q)
4886 {
4887 struct blk_mq_hw_ctx *hctx;
4888 unsigned long i;
4889
4890 cancel_delayed_work_sync(&q->requeue_work);
4891
4892 queue_for_each_hw_ctx(q, hctx, i)
4893 cancel_delayed_work_sync(&hctx->run_work);
4894 }
4895
blk_mq_init(void)4896 static int __init blk_mq_init(void)
4897 {
4898 int i;
4899
4900 for_each_possible_cpu(i)
4901 init_llist_head(&per_cpu(blk_cpu_done, i));
4902 for_each_possible_cpu(i)
4903 INIT_CSD(&per_cpu(blk_cpu_csd, i),
4904 __blk_mq_complete_request_remote, NULL);
4905 open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
4906
4907 cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD,
4908 "block/softirq:dead", NULL,
4909 blk_softirq_cpu_dead);
4910 cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
4911 blk_mq_hctx_notify_dead);
4912 cpuhp_setup_state_multi(CPUHP_AP_BLK_MQ_ONLINE, "block/mq:online",
4913 blk_mq_hctx_notify_online,
4914 blk_mq_hctx_notify_offline);
4915 return 0;
4916 }
4917 subsys_initcall(blk_mq_init);
4918