1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Block multiqueue core code
4 *
5 * Copyright (C) 2013-2014 Jens Axboe
6 * Copyright (C) 2013-2014 Christoph Hellwig
7 */
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/backing-dev.h>
11 #include <linux/bio.h>
12 #include <linux/blkdev.h>
13 #include <linux/kmemleak.h>
14 #include <linux/mm.h>
15 #include <linux/init.h>
16 #include <linux/slab.h>
17 #include <linux/workqueue.h>
18 #include <linux/smp.h>
19 #include <linux/llist.h>
20 #include <linux/list_sort.h>
21 #include <linux/cpu.h>
22 #include <linux/cache.h>
23 #include <linux/sched/sysctl.h>
24 #include <linux/sched/topology.h>
25 #include <linux/sched/signal.h>
26 #include <linux/delay.h>
27 #include <linux/crash_dump.h>
28 #include <linux/prefetch.h>
29 #include <linux/blk-crypto.h>
30
31 #include <trace/events/block.h>
32
33 #include <linux/blk-mq.h>
34 #include <linux/t10-pi.h>
35 #include "blk.h"
36 #include "blk-mq.h"
37 #include "blk-mq-debugfs.h"
38 #include "blk-mq-tag.h"
39 #include "blk-pm.h"
40 #include "blk-stat.h"
41 #include "blk-mq-sched.h"
42 #include "blk-rq-qos.h"
43
44 #include <trace/hooks/block.h>
45
46 static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
47
48 static void blk_mq_poll_stats_start(struct request_queue *q);
49 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
50
blk_mq_poll_stats_bkt(const struct request * rq)51 static int blk_mq_poll_stats_bkt(const struct request *rq)
52 {
53 int ddir, sectors, bucket;
54
55 ddir = rq_data_dir(rq);
56 sectors = blk_rq_stats_sectors(rq);
57
58 bucket = ddir + 2 * ilog2(sectors);
59
60 if (bucket < 0)
61 return -1;
62 else if (bucket >= BLK_MQ_POLL_STATS_BKTS)
63 return ddir + BLK_MQ_POLL_STATS_BKTS - 2;
64
65 return bucket;
66 }
67
68 /*
69 * Check if any of the ctx, dispatch list or elevator
70 * have pending work in this hardware queue.
71 */
blk_mq_hctx_has_pending(struct blk_mq_hw_ctx * hctx)72 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
73 {
74 return !list_empty_careful(&hctx->dispatch) ||
75 sbitmap_any_bit_set(&hctx->ctx_map) ||
76 blk_mq_sched_has_work(hctx);
77 }
78
79 /*
80 * Mark this ctx as having pending work in this hardware queue
81 */
blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx * hctx,struct blk_mq_ctx * ctx)82 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
83 struct blk_mq_ctx *ctx)
84 {
85 const int bit = ctx->index_hw[hctx->type];
86
87 if (!sbitmap_test_bit(&hctx->ctx_map, bit))
88 sbitmap_set_bit(&hctx->ctx_map, bit);
89 }
90
blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx * hctx,struct blk_mq_ctx * ctx)91 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
92 struct blk_mq_ctx *ctx)
93 {
94 const int bit = ctx->index_hw[hctx->type];
95
96 sbitmap_clear_bit(&hctx->ctx_map, bit);
97 }
98
99 struct mq_inflight {
100 struct block_device *part;
101 unsigned int inflight[2];
102 };
103
blk_mq_check_inflight(struct blk_mq_hw_ctx * hctx,struct request * rq,void * priv,bool reserved)104 static bool blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
105 struct request *rq, void *priv,
106 bool reserved)
107 {
108 struct mq_inflight *mi = priv;
109
110 if ((!mi->part->bd_partno || rq->part == mi->part) &&
111 blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT)
112 mi->inflight[rq_data_dir(rq)]++;
113
114 return true;
115 }
116
blk_mq_in_flight(struct request_queue * q,struct block_device * part)117 unsigned int blk_mq_in_flight(struct request_queue *q,
118 struct block_device *part)
119 {
120 struct mq_inflight mi = { .part = part };
121
122 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
123
124 return mi.inflight[0] + mi.inflight[1];
125 }
126
blk_mq_in_flight_rw(struct request_queue * q,struct block_device * part,unsigned int inflight[2])127 void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
128 unsigned int inflight[2])
129 {
130 struct mq_inflight mi = { .part = part };
131
132 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
133 inflight[0] = mi.inflight[0];
134 inflight[1] = mi.inflight[1];
135 }
136
blk_freeze_queue_start(struct request_queue * q)137 void blk_freeze_queue_start(struct request_queue *q)
138 {
139 mutex_lock(&q->mq_freeze_lock);
140 if (++q->mq_freeze_depth == 1) {
141 percpu_ref_kill(&q->q_usage_counter);
142 mutex_unlock(&q->mq_freeze_lock);
143 if (queue_is_mq(q))
144 blk_mq_run_hw_queues(q, false);
145 } else {
146 mutex_unlock(&q->mq_freeze_lock);
147 }
148 }
149 EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
150
blk_mq_freeze_queue_wait(struct request_queue * q)151 void blk_mq_freeze_queue_wait(struct request_queue *q)
152 {
153 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
154 }
155 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
156
blk_mq_freeze_queue_wait_timeout(struct request_queue * q,unsigned long timeout)157 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
158 unsigned long timeout)
159 {
160 return wait_event_timeout(q->mq_freeze_wq,
161 percpu_ref_is_zero(&q->q_usage_counter),
162 timeout);
163 }
164 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
165
166 /*
167 * Guarantee no request is in use, so we can change any data structure of
168 * the queue afterward.
169 */
blk_freeze_queue(struct request_queue * q)170 void blk_freeze_queue(struct request_queue *q)
171 {
172 /*
173 * In the !blk_mq case we are only calling this to kill the
174 * q_usage_counter, otherwise this increases the freeze depth
175 * and waits for it to return to zero. For this reason there is
176 * no blk_unfreeze_queue(), and blk_freeze_queue() is not
177 * exported to drivers as the only user for unfreeze is blk_mq.
178 */
179 blk_freeze_queue_start(q);
180 blk_mq_freeze_queue_wait(q);
181 }
182
blk_mq_freeze_queue(struct request_queue * q)183 void blk_mq_freeze_queue(struct request_queue *q)
184 {
185 /*
186 * ...just an alias to keep freeze and unfreeze actions balanced
187 * in the blk_mq_* namespace
188 */
189 blk_freeze_queue(q);
190 }
191 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
192
__blk_mq_unfreeze_queue(struct request_queue * q,bool force_atomic)193 void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
194 {
195 mutex_lock(&q->mq_freeze_lock);
196 if (force_atomic)
197 q->q_usage_counter.data->force_atomic = true;
198 q->mq_freeze_depth--;
199 WARN_ON_ONCE(q->mq_freeze_depth < 0);
200 if (!q->mq_freeze_depth) {
201 percpu_ref_resurrect(&q->q_usage_counter);
202 wake_up_all(&q->mq_freeze_wq);
203 }
204 mutex_unlock(&q->mq_freeze_lock);
205 }
206
blk_mq_unfreeze_queue(struct request_queue * q)207 void blk_mq_unfreeze_queue(struct request_queue *q)
208 {
209 __blk_mq_unfreeze_queue(q, false);
210 }
211 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
212
213 /*
214 * FIXME: replace the scsi_internal_device_*block_nowait() calls in the
215 * mpt3sas driver such that this function can be removed.
216 */
blk_mq_quiesce_queue_nowait(struct request_queue * q)217 void blk_mq_quiesce_queue_nowait(struct request_queue *q)
218 {
219 blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q);
220 }
221 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
222
223 /**
224 * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished
225 * @q: request queue.
226 *
227 * Note: this function does not prevent that the struct request end_io()
228 * callback function is invoked. Once this function is returned, we make
229 * sure no dispatch can happen until the queue is unquiesced via
230 * blk_mq_unquiesce_queue().
231 */
blk_mq_quiesce_queue(struct request_queue * q)232 void blk_mq_quiesce_queue(struct request_queue *q)
233 {
234 struct blk_mq_hw_ctx *hctx;
235 unsigned int i;
236 bool rcu = false;
237
238 blk_mq_quiesce_queue_nowait(q);
239
240 queue_for_each_hw_ctx(q, hctx, i) {
241 if (hctx->flags & BLK_MQ_F_BLOCKING)
242 synchronize_srcu(hctx->srcu);
243 else
244 rcu = true;
245 }
246 if (rcu)
247 synchronize_rcu();
248 }
249 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
250
251 /*
252 * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue()
253 * @q: request queue.
254 *
255 * This function recovers queue into the state before quiescing
256 * which is done by blk_mq_quiesce_queue.
257 */
blk_mq_unquiesce_queue(struct request_queue * q)258 void blk_mq_unquiesce_queue(struct request_queue *q)
259 {
260 blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
261
262 /* dispatch requests which are inserted during quiescing */
263 blk_mq_run_hw_queues(q, true);
264 }
265 EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
266
blk_mq_wake_waiters(struct request_queue * q)267 void blk_mq_wake_waiters(struct request_queue *q)
268 {
269 struct blk_mq_hw_ctx *hctx;
270 unsigned int i;
271
272 queue_for_each_hw_ctx(q, hctx, i)
273 if (blk_mq_hw_queue_mapped(hctx))
274 blk_mq_tag_wakeup_all(hctx->tags, true);
275 }
276
277 /*
278 * Only need start/end time stamping if we have iostat or
279 * blk stats enabled, or using an IO scheduler.
280 */
blk_mq_need_time_stamp(struct request * rq)281 static inline bool blk_mq_need_time_stamp(struct request *rq)
282 {
283 return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS)) || rq->q->elevator;
284 }
285
blk_mq_rq_ctx_init(struct blk_mq_alloc_data * data,unsigned int tag,u64 alloc_time_ns)286 static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
287 unsigned int tag, u64 alloc_time_ns)
288 {
289 struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
290 struct request *rq = tags->static_rqs[tag];
291
292 if (data->q->elevator) {
293 rq->tag = BLK_MQ_NO_TAG;
294 rq->internal_tag = tag;
295 } else {
296 rq->tag = tag;
297 rq->internal_tag = BLK_MQ_NO_TAG;
298 }
299
300 /* csd/requeue_work/fifo_time is initialized before use */
301 rq->q = data->q;
302 rq->mq_ctx = data->ctx;
303 rq->mq_hctx = data->hctx;
304 rq->rq_flags = 0;
305 rq->cmd_flags = data->cmd_flags;
306 if (data->flags & BLK_MQ_REQ_PM)
307 rq->rq_flags |= RQF_PM;
308 if (blk_queue_io_stat(data->q))
309 rq->rq_flags |= RQF_IO_STAT;
310 INIT_LIST_HEAD(&rq->queuelist);
311 INIT_HLIST_NODE(&rq->hash);
312 RB_CLEAR_NODE(&rq->rb_node);
313 rq->rq_disk = NULL;
314 rq->part = NULL;
315 #ifdef CONFIG_BLK_RQ_ALLOC_TIME
316 rq->alloc_time_ns = alloc_time_ns;
317 #endif
318 if (blk_mq_need_time_stamp(rq))
319 rq->start_time_ns = ktime_get_ns();
320 else
321 rq->start_time_ns = 0;
322 rq->io_start_time_ns = 0;
323 rq->stats_sectors = 0;
324 rq->nr_phys_segments = 0;
325 #if defined(CONFIG_BLK_DEV_INTEGRITY)
326 rq->nr_integrity_segments = 0;
327 #endif
328 blk_crypto_rq_set_defaults(rq);
329 /* tag was already set */
330 WRITE_ONCE(rq->deadline, 0);
331
332 rq->timeout = 0;
333
334 rq->end_io = NULL;
335 rq->end_io_data = NULL;
336
337 data->ctx->rq_dispatched[op_is_sync(data->cmd_flags)]++;
338 refcount_set(&rq->ref, 1);
339
340 if (!op_is_flush(data->cmd_flags)) {
341 struct elevator_queue *e = data->q->elevator;
342
343 rq->elv.icq = NULL;
344 if (e && e->type->ops.prepare_request) {
345 if (e->type->icq_cache)
346 blk_mq_sched_assign_ioc(rq);
347
348 e->type->ops.prepare_request(rq);
349 rq->rq_flags |= RQF_ELVPRIV;
350 }
351 }
352
353 data->hctx->queued++;
354 trace_android_vh_blk_rq_ctx_init(rq, tags, data, alloc_time_ns);
355 return rq;
356 }
357
__blk_mq_alloc_request(struct blk_mq_alloc_data * data)358 static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data)
359 {
360 struct request_queue *q = data->q;
361 struct elevator_queue *e = q->elevator;
362 u64 alloc_time_ns = 0;
363 unsigned int tag;
364
365 /* alloc_time includes depth and tag waits */
366 if (blk_queue_rq_alloc_time(q))
367 alloc_time_ns = ktime_get_ns();
368
369 if (data->cmd_flags & REQ_NOWAIT)
370 data->flags |= BLK_MQ_REQ_NOWAIT;
371
372 if (e) {
373 /*
374 * Flush/passthrough requests are special and go directly to the
375 * dispatch list. Don't include reserved tags in the
376 * limiting, as it isn't useful.
377 */
378 if (!op_is_flush(data->cmd_flags) &&
379 !blk_op_is_passthrough(data->cmd_flags) &&
380 e->type->ops.limit_depth &&
381 !(data->flags & BLK_MQ_REQ_RESERVED))
382 e->type->ops.limit_depth(data->cmd_flags, data);
383 }
384
385 retry:
386 data->ctx = blk_mq_get_ctx(q);
387 data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx);
388 if (!e)
389 blk_mq_tag_busy(data->hctx);
390
391 /*
392 * Waiting allocations only fail because of an inactive hctx. In that
393 * case just retry the hctx assignment and tag allocation as CPU hotplug
394 * should have migrated us to an online CPU by now.
395 */
396 tag = blk_mq_get_tag(data);
397 if (tag == BLK_MQ_NO_TAG) {
398 if (data->flags & BLK_MQ_REQ_NOWAIT)
399 return NULL;
400
401 /*
402 * Give up the CPU and sleep for a random short time to ensure
403 * that thread using a realtime scheduling class are migrated
404 * off the CPU, and thus off the hctx that is going away.
405 */
406 msleep(3);
407 goto retry;
408 }
409 return blk_mq_rq_ctx_init(data, tag, alloc_time_ns);
410 }
411
blk_mq_alloc_request(struct request_queue * q,unsigned int op,blk_mq_req_flags_t flags)412 struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
413 blk_mq_req_flags_t flags)
414 {
415 struct blk_mq_alloc_data data = {
416 .q = q,
417 .flags = flags,
418 .cmd_flags = op,
419 };
420 struct request *rq;
421 int ret;
422
423 ret = blk_queue_enter(q, flags);
424 if (ret)
425 return ERR_PTR(ret);
426
427 rq = __blk_mq_alloc_request(&data);
428 if (!rq)
429 goto out_queue_exit;
430 rq->__data_len = 0;
431 rq->__sector = (sector_t) -1;
432 rq->bio = rq->biotail = NULL;
433 return rq;
434 out_queue_exit:
435 blk_queue_exit(q);
436 return ERR_PTR(-EWOULDBLOCK);
437 }
438 EXPORT_SYMBOL(blk_mq_alloc_request);
439
blk_mq_alloc_request_hctx(struct request_queue * q,unsigned int op,blk_mq_req_flags_t flags,unsigned int hctx_idx)440 struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
441 unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx)
442 {
443 struct blk_mq_alloc_data data = {
444 .q = q,
445 .flags = flags,
446 .cmd_flags = op,
447 };
448 u64 alloc_time_ns = 0;
449 unsigned int cpu;
450 unsigned int tag;
451 int ret;
452
453 /* alloc_time includes depth and tag waits */
454 if (blk_queue_rq_alloc_time(q))
455 alloc_time_ns = ktime_get_ns();
456
457 /*
458 * If the tag allocator sleeps we could get an allocation for a
459 * different hardware context. No need to complicate the low level
460 * allocator for this for the rare use case of a command tied to
461 * a specific queue.
462 */
463 if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)) ||
464 WARN_ON_ONCE(!(flags & BLK_MQ_REQ_RESERVED)))
465 return ERR_PTR(-EINVAL);
466
467 if (hctx_idx >= q->nr_hw_queues)
468 return ERR_PTR(-EIO);
469
470 ret = blk_queue_enter(q, flags);
471 if (ret)
472 return ERR_PTR(ret);
473
474 /*
475 * Check if the hardware context is actually mapped to anything.
476 * If not tell the caller that it should skip this queue.
477 */
478 ret = -EXDEV;
479 data.hctx = q->queue_hw_ctx[hctx_idx];
480 if (!blk_mq_hw_queue_mapped(data.hctx))
481 goto out_queue_exit;
482 cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask);
483 if (cpu >= nr_cpu_ids)
484 goto out_queue_exit;
485 data.ctx = __blk_mq_get_ctx(q, cpu);
486
487 if (!q->elevator)
488 blk_mq_tag_busy(data.hctx);
489
490 ret = -EWOULDBLOCK;
491 tag = blk_mq_get_tag(&data);
492 if (tag == BLK_MQ_NO_TAG)
493 goto out_queue_exit;
494 return blk_mq_rq_ctx_init(&data, tag, alloc_time_ns);
495
496 out_queue_exit:
497 blk_queue_exit(q);
498 return ERR_PTR(ret);
499 }
500 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
501
__blk_mq_free_request(struct request * rq)502 static void __blk_mq_free_request(struct request *rq)
503 {
504 struct request_queue *q = rq->q;
505 struct blk_mq_ctx *ctx = rq->mq_ctx;
506 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
507 const int sched_tag = rq->internal_tag;
508
509 blk_crypto_free_request(rq);
510 blk_pm_mark_last_busy(rq);
511 rq->mq_hctx = NULL;
512 if (rq->tag != BLK_MQ_NO_TAG)
513 blk_mq_put_tag(hctx->tags, ctx, rq->tag);
514 if (sched_tag != BLK_MQ_NO_TAG)
515 blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag);
516 blk_mq_sched_restart(hctx);
517 blk_queue_exit(q);
518 }
519
blk_mq_free_request(struct request * rq)520 void blk_mq_free_request(struct request *rq)
521 {
522 struct request_queue *q = rq->q;
523 struct elevator_queue *e = q->elevator;
524 struct blk_mq_ctx *ctx = rq->mq_ctx;
525 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
526
527 if (rq->rq_flags & RQF_ELVPRIV) {
528 if (e && e->type->ops.finish_request)
529 e->type->ops.finish_request(rq);
530 if (rq->elv.icq) {
531 put_io_context(rq->elv.icq->ioc);
532 rq->elv.icq = NULL;
533 }
534 }
535
536 ctx->rq_completed[rq_is_sync(rq)]++;
537 if (rq->rq_flags & RQF_MQ_INFLIGHT)
538 __blk_mq_dec_active_requests(hctx);
539
540 if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
541 laptop_io_completion(q->disk->bdi);
542
543 rq_qos_done(q, rq);
544
545 WRITE_ONCE(rq->state, MQ_RQ_IDLE);
546 if (refcount_dec_and_test(&rq->ref))
547 __blk_mq_free_request(rq);
548 }
549 EXPORT_SYMBOL_GPL(blk_mq_free_request);
550
__blk_mq_end_request(struct request * rq,blk_status_t error)551 inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
552 {
553 u64 now = 0;
554
555 if (blk_mq_need_time_stamp(rq))
556 now = ktime_get_ns();
557
558 if (rq->rq_flags & RQF_STATS) {
559 blk_mq_poll_stats_start(rq->q);
560 blk_stat_add(rq, now);
561 }
562
563 blk_mq_sched_completed_request(rq, now);
564
565 blk_account_io_done(rq, now);
566
567 if (rq->end_io) {
568 rq_qos_done(rq->q, rq);
569 rq->end_io(rq, error);
570 } else {
571 blk_mq_free_request(rq);
572 }
573 }
574 EXPORT_SYMBOL(__blk_mq_end_request);
575
blk_mq_end_request(struct request * rq,blk_status_t error)576 void blk_mq_end_request(struct request *rq, blk_status_t error)
577 {
578 if (blk_update_request(rq, error, blk_rq_bytes(rq)))
579 BUG();
580 __blk_mq_end_request(rq, error);
581 }
582 EXPORT_SYMBOL(blk_mq_end_request);
583
blk_complete_reqs(struct llist_head * list)584 static void blk_complete_reqs(struct llist_head *list)
585 {
586 struct llist_node *entry = llist_reverse_order(llist_del_all(list));
587 struct request *rq, *next;
588
589 llist_for_each_entry_safe(rq, next, entry, ipi_list)
590 rq->q->mq_ops->complete(rq);
591 }
592
blk_done_softirq(struct softirq_action * h)593 static __latent_entropy void blk_done_softirq(struct softirq_action *h)
594 {
595 blk_complete_reqs(this_cpu_ptr(&blk_cpu_done));
596 }
597
blk_softirq_cpu_dead(unsigned int cpu)598 static int blk_softirq_cpu_dead(unsigned int cpu)
599 {
600 blk_complete_reqs(&per_cpu(blk_cpu_done, cpu));
601 return 0;
602 }
603
__blk_mq_complete_request_remote(void * data)604 static void __blk_mq_complete_request_remote(void *data)
605 {
606 __raise_softirq_irqoff(BLOCK_SOFTIRQ);
607 }
608
blk_mq_complete_need_ipi(struct request * rq)609 static inline bool blk_mq_complete_need_ipi(struct request *rq)
610 {
611 int cpu = raw_smp_processor_id();
612
613 if (!IS_ENABLED(CONFIG_SMP) ||
614 !test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags))
615 return false;
616 /*
617 * With force threaded interrupts enabled, raising softirq from an SMP
618 * function call will always result in waking the ksoftirqd thread.
619 * This is probably worse than completing the request on a different
620 * cache domain.
621 */
622 if (force_irqthreads())
623 return false;
624
625 /* same CPU or cache domain? Complete locally */
626 if (cpu == rq->mq_ctx->cpu ||
627 (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags) &&
628 cpus_share_cache(cpu, rq->mq_ctx->cpu)))
629 return false;
630
631 /* don't try to IPI to an offline CPU */
632 return cpu_online(rq->mq_ctx->cpu);
633 }
634
blk_mq_complete_send_ipi(struct request * rq)635 static void blk_mq_complete_send_ipi(struct request *rq)
636 {
637 struct llist_head *list;
638 unsigned int cpu;
639
640 cpu = rq->mq_ctx->cpu;
641 list = &per_cpu(blk_cpu_done, cpu);
642 if (llist_add(&rq->ipi_list, list)) {
643 INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq);
644 smp_call_function_single_async(cpu, &rq->csd);
645 }
646 }
647
blk_mq_raise_softirq(struct request * rq)648 static void blk_mq_raise_softirq(struct request *rq)
649 {
650 struct llist_head *list;
651
652 preempt_disable();
653 list = this_cpu_ptr(&blk_cpu_done);
654 if (llist_add(&rq->ipi_list, list))
655 raise_softirq(BLOCK_SOFTIRQ);
656 preempt_enable();
657 }
658
blk_mq_complete_request_remote(struct request * rq)659 bool blk_mq_complete_request_remote(struct request *rq)
660 {
661 WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
662
663 /*
664 * For a polled request, always complete locallly, it's pointless
665 * to redirect the completion.
666 */
667 if (rq->cmd_flags & REQ_HIPRI)
668 return false;
669
670 if (blk_mq_complete_need_ipi(rq)) {
671 blk_mq_complete_send_ipi(rq);
672 return true;
673 }
674
675 if (rq->q->nr_hw_queues == 1) {
676 blk_mq_raise_softirq(rq);
677 return true;
678 }
679 return false;
680 }
681 EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote);
682
683 /**
684 * blk_mq_complete_request - end I/O on a request
685 * @rq: the request being processed
686 *
687 * Description:
688 * Complete a request by scheduling the ->complete_rq operation.
689 **/
blk_mq_complete_request(struct request * rq)690 void blk_mq_complete_request(struct request *rq)
691 {
692 if (!blk_mq_complete_request_remote(rq))
693 rq->q->mq_ops->complete(rq);
694 }
695 EXPORT_SYMBOL(blk_mq_complete_request);
696
hctx_unlock(struct blk_mq_hw_ctx * hctx,int srcu_idx)697 static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx)
698 __releases(hctx->srcu)
699 {
700 if (!(hctx->flags & BLK_MQ_F_BLOCKING))
701 rcu_read_unlock();
702 else
703 srcu_read_unlock(hctx->srcu, srcu_idx);
704 }
705
hctx_lock(struct blk_mq_hw_ctx * hctx,int * srcu_idx)706 static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx)
707 __acquires(hctx->srcu)
708 {
709 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
710 /* shut up gcc false positive */
711 *srcu_idx = 0;
712 rcu_read_lock();
713 } else
714 *srcu_idx = srcu_read_lock(hctx->srcu);
715 }
716
717 /**
718 * blk_mq_start_request - Start processing a request
719 * @rq: Pointer to request to be started
720 *
721 * Function used by device drivers to notify the block layer that a request
722 * is going to be processed now, so blk layer can do proper initializations
723 * such as starting the timeout timer.
724 */
blk_mq_start_request(struct request * rq)725 void blk_mq_start_request(struct request *rq)
726 {
727 struct request_queue *q = rq->q;
728
729 trace_block_rq_issue(rq);
730
731 if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
732 rq->io_start_time_ns = ktime_get_ns();
733 rq->stats_sectors = blk_rq_sectors(rq);
734 rq->rq_flags |= RQF_STATS;
735 rq_qos_issue(q, rq);
736 }
737
738 WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE);
739
740 blk_add_timer(rq);
741 WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT);
742
743 #ifdef CONFIG_BLK_DEV_INTEGRITY
744 if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE)
745 q->integrity.profile->prepare_fn(rq);
746 #endif
747 }
748 EXPORT_SYMBOL(blk_mq_start_request);
749
__blk_mq_requeue_request(struct request * rq)750 static void __blk_mq_requeue_request(struct request *rq)
751 {
752 struct request_queue *q = rq->q;
753
754 blk_mq_put_driver_tag(rq);
755
756 trace_block_rq_requeue(rq);
757 rq_qos_requeue(q, rq);
758
759 if (blk_mq_request_started(rq)) {
760 WRITE_ONCE(rq->state, MQ_RQ_IDLE);
761 rq->rq_flags &= ~RQF_TIMED_OUT;
762 }
763 }
764
blk_mq_requeue_request(struct request * rq,bool kick_requeue_list)765 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
766 {
767 __blk_mq_requeue_request(rq);
768
769 /* this request will be re-inserted to io scheduler queue */
770 blk_mq_sched_requeue_request(rq);
771
772 blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
773 }
774 EXPORT_SYMBOL(blk_mq_requeue_request);
775
blk_mq_requeue_work(struct work_struct * work)776 static void blk_mq_requeue_work(struct work_struct *work)
777 {
778 struct request_queue *q =
779 container_of(work, struct request_queue, requeue_work.work);
780 LIST_HEAD(rq_list);
781 struct request *rq, *next;
782
783 spin_lock_irq(&q->requeue_lock);
784 list_splice_init(&q->requeue_list, &rq_list);
785 spin_unlock_irq(&q->requeue_lock);
786
787 list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
788 if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP)))
789 continue;
790
791 rq->rq_flags &= ~RQF_SOFTBARRIER;
792 list_del_init(&rq->queuelist);
793 /*
794 * If RQF_DONTPREP, rq has contained some driver specific
795 * data, so insert it to hctx dispatch list to avoid any
796 * merge.
797 */
798 if (rq->rq_flags & RQF_DONTPREP)
799 blk_mq_request_bypass_insert(rq, false, false);
800 else
801 blk_mq_sched_insert_request(rq, true, false, false);
802 }
803
804 while (!list_empty(&rq_list)) {
805 rq = list_entry(rq_list.next, struct request, queuelist);
806 list_del_init(&rq->queuelist);
807 blk_mq_sched_insert_request(rq, false, false, false);
808 }
809
810 blk_mq_run_hw_queues(q, false);
811 }
812
blk_mq_add_to_requeue_list(struct request * rq,bool at_head,bool kick_requeue_list)813 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
814 bool kick_requeue_list)
815 {
816 struct request_queue *q = rq->q;
817 unsigned long flags;
818
819 /*
820 * We abuse this flag that is otherwise used by the I/O scheduler to
821 * request head insertion from the workqueue.
822 */
823 BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
824
825 spin_lock_irqsave(&q->requeue_lock, flags);
826 if (at_head) {
827 rq->rq_flags |= RQF_SOFTBARRIER;
828 list_add(&rq->queuelist, &q->requeue_list);
829 } else {
830 list_add_tail(&rq->queuelist, &q->requeue_list);
831 }
832 spin_unlock_irqrestore(&q->requeue_lock, flags);
833
834 if (kick_requeue_list)
835 blk_mq_kick_requeue_list(q);
836 }
837
blk_mq_kick_requeue_list(struct request_queue * q)838 void blk_mq_kick_requeue_list(struct request_queue *q)
839 {
840 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0);
841 }
842 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
843
blk_mq_delay_kick_requeue_list(struct request_queue * q,unsigned long msecs)844 void blk_mq_delay_kick_requeue_list(struct request_queue *q,
845 unsigned long msecs)
846 {
847 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
848 msecs_to_jiffies(msecs));
849 }
850 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
851
blk_mq_tag_to_rq(struct blk_mq_tags * tags,unsigned int tag)852 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
853 {
854 if (tag < tags->nr_tags) {
855 prefetch(tags->rqs[tag]);
856 return tags->rqs[tag];
857 }
858
859 return NULL;
860 }
861 EXPORT_SYMBOL(blk_mq_tag_to_rq);
862
blk_mq_rq_inflight(struct blk_mq_hw_ctx * hctx,struct request * rq,void * priv,bool reserved)863 static bool blk_mq_rq_inflight(struct blk_mq_hw_ctx *hctx, struct request *rq,
864 void *priv, bool reserved)
865 {
866 /*
867 * If we find a request that isn't idle and the queue matches,
868 * we know the queue is busy. Return false to stop the iteration.
869 */
870 if (blk_mq_request_started(rq) && rq->q == hctx->queue) {
871 bool *busy = priv;
872
873 *busy = true;
874 return false;
875 }
876
877 return true;
878 }
879
blk_mq_queue_inflight(struct request_queue * q)880 bool blk_mq_queue_inflight(struct request_queue *q)
881 {
882 bool busy = false;
883
884 blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight, &busy);
885 return busy;
886 }
887 EXPORT_SYMBOL_GPL(blk_mq_queue_inflight);
888
blk_mq_rq_timed_out(struct request * req,bool reserved)889 static void blk_mq_rq_timed_out(struct request *req, bool reserved)
890 {
891 req->rq_flags |= RQF_TIMED_OUT;
892 if (req->q->mq_ops->timeout) {
893 enum blk_eh_timer_return ret;
894
895 ret = req->q->mq_ops->timeout(req, reserved);
896 if (ret == BLK_EH_DONE)
897 return;
898 WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER);
899 }
900
901 blk_add_timer(req);
902 }
903
blk_mq_req_expired(struct request * rq,unsigned long * next)904 static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
905 {
906 unsigned long deadline;
907
908 if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT)
909 return false;
910 if (rq->rq_flags & RQF_TIMED_OUT)
911 return false;
912
913 deadline = READ_ONCE(rq->deadline);
914 if (time_after_eq(jiffies, deadline))
915 return true;
916
917 if (*next == 0)
918 *next = deadline;
919 else if (time_after(*next, deadline))
920 *next = deadline;
921 return false;
922 }
923
blk_mq_put_rq_ref(struct request * rq)924 void blk_mq_put_rq_ref(struct request *rq)
925 {
926 if (is_flush_rq(rq))
927 rq->end_io(rq, 0);
928 else if (refcount_dec_and_test(&rq->ref))
929 __blk_mq_free_request(rq);
930 }
931
blk_mq_check_expired(struct blk_mq_hw_ctx * hctx,struct request * rq,void * priv,bool reserved)932 static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
933 struct request *rq, void *priv, bool reserved)
934 {
935 unsigned long *next = priv;
936
937 /*
938 * blk_mq_queue_tag_busy_iter() has locked the request, so it cannot
939 * be reallocated underneath the timeout handler's processing, then
940 * the expire check is reliable. If the request is not expired, then
941 * it was completed and reallocated as a new request after returning
942 * from blk_mq_check_expired().
943 */
944 if (blk_mq_req_expired(rq, next))
945 blk_mq_rq_timed_out(rq, reserved);
946 return true;
947 }
948
blk_mq_timeout_work(struct work_struct * work)949 static void blk_mq_timeout_work(struct work_struct *work)
950 {
951 struct request_queue *q =
952 container_of(work, struct request_queue, timeout_work);
953 unsigned long next = 0;
954 struct blk_mq_hw_ctx *hctx;
955 int i;
956
957 /* A deadlock might occur if a request is stuck requiring a
958 * timeout at the same time a queue freeze is waiting
959 * completion, since the timeout code would not be able to
960 * acquire the queue reference here.
961 *
962 * That's why we don't use blk_queue_enter here; instead, we use
963 * percpu_ref_tryget directly, because we need to be able to
964 * obtain a reference even in the short window between the queue
965 * starting to freeze, by dropping the first reference in
966 * blk_freeze_queue_start, and the moment the last request is
967 * consumed, marked by the instant q_usage_counter reaches
968 * zero.
969 */
970 if (!percpu_ref_tryget(&q->q_usage_counter))
971 return;
972
973 blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &next);
974
975 if (next != 0) {
976 mod_timer(&q->timeout, next);
977 } else {
978 /*
979 * Request timeouts are handled as a forward rolling timer. If
980 * we end up here it means that no requests are pending and
981 * also that no request has been pending for a while. Mark
982 * each hctx as idle.
983 */
984 queue_for_each_hw_ctx(q, hctx, i) {
985 /* the hctx may be unmapped, so check it here */
986 if (blk_mq_hw_queue_mapped(hctx))
987 blk_mq_tag_idle(hctx);
988 }
989 }
990 blk_queue_exit(q);
991 }
992
993 struct flush_busy_ctx_data {
994 struct blk_mq_hw_ctx *hctx;
995 struct list_head *list;
996 };
997
flush_busy_ctx(struct sbitmap * sb,unsigned int bitnr,void * data)998 static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
999 {
1000 struct flush_busy_ctx_data *flush_data = data;
1001 struct blk_mq_hw_ctx *hctx = flush_data->hctx;
1002 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
1003 enum hctx_type type = hctx->type;
1004
1005 spin_lock(&ctx->lock);
1006 list_splice_tail_init(&ctx->rq_lists[type], flush_data->list);
1007 sbitmap_clear_bit(sb, bitnr);
1008 spin_unlock(&ctx->lock);
1009 return true;
1010 }
1011
1012 /*
1013 * Process software queues that have been marked busy, splicing them
1014 * to the for-dispatch
1015 */
blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx * hctx,struct list_head * list)1016 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
1017 {
1018 struct flush_busy_ctx_data data = {
1019 .hctx = hctx,
1020 .list = list,
1021 };
1022
1023 sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
1024 }
1025 EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs);
1026
1027 struct dispatch_rq_data {
1028 struct blk_mq_hw_ctx *hctx;
1029 struct request *rq;
1030 };
1031
dispatch_rq_from_ctx(struct sbitmap * sb,unsigned int bitnr,void * data)1032 static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr,
1033 void *data)
1034 {
1035 struct dispatch_rq_data *dispatch_data = data;
1036 struct blk_mq_hw_ctx *hctx = dispatch_data->hctx;
1037 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
1038 enum hctx_type type = hctx->type;
1039
1040 spin_lock(&ctx->lock);
1041 if (!list_empty(&ctx->rq_lists[type])) {
1042 dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next);
1043 list_del_init(&dispatch_data->rq->queuelist);
1044 if (list_empty(&ctx->rq_lists[type]))
1045 sbitmap_clear_bit(sb, bitnr);
1046 }
1047 spin_unlock(&ctx->lock);
1048
1049 return !dispatch_data->rq;
1050 }
1051
blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx * hctx,struct blk_mq_ctx * start)1052 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
1053 struct blk_mq_ctx *start)
1054 {
1055 unsigned off = start ? start->index_hw[hctx->type] : 0;
1056 struct dispatch_rq_data data = {
1057 .hctx = hctx,
1058 .rq = NULL,
1059 };
1060
1061 __sbitmap_for_each_set(&hctx->ctx_map, off,
1062 dispatch_rq_from_ctx, &data);
1063
1064 return data.rq;
1065 }
1066
queued_to_index(unsigned int queued)1067 static inline unsigned int queued_to_index(unsigned int queued)
1068 {
1069 if (!queued)
1070 return 0;
1071
1072 return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
1073 }
1074
__blk_mq_get_driver_tag(struct request * rq)1075 static bool __blk_mq_get_driver_tag(struct request *rq)
1076 {
1077 struct sbitmap_queue *bt = rq->mq_hctx->tags->bitmap_tags;
1078 unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags;
1079 int tag;
1080
1081 blk_mq_tag_busy(rq->mq_hctx);
1082
1083 if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) {
1084 bt = rq->mq_hctx->tags->breserved_tags;
1085 tag_offset = 0;
1086 } else {
1087 if (!hctx_may_queue(rq->mq_hctx, bt))
1088 return false;
1089 }
1090
1091 tag = __sbitmap_queue_get(bt);
1092 if (tag == BLK_MQ_NO_TAG)
1093 return false;
1094
1095 rq->tag = tag + tag_offset;
1096 return true;
1097 }
1098
blk_mq_get_driver_tag(struct request * rq)1099 bool blk_mq_get_driver_tag(struct request *rq)
1100 {
1101 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1102
1103 if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_get_driver_tag(rq))
1104 return false;
1105
1106 if ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) &&
1107 !(rq->rq_flags & RQF_MQ_INFLIGHT)) {
1108 rq->rq_flags |= RQF_MQ_INFLIGHT;
1109 __blk_mq_inc_active_requests(hctx);
1110 }
1111 hctx->tags->rqs[rq->tag] = rq;
1112 return true;
1113 }
1114
blk_mq_dispatch_wake(wait_queue_entry_t * wait,unsigned mode,int flags,void * key)1115 static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
1116 int flags, void *key)
1117 {
1118 struct blk_mq_hw_ctx *hctx;
1119
1120 hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
1121
1122 spin_lock(&hctx->dispatch_wait_lock);
1123 if (!list_empty(&wait->entry)) {
1124 struct sbitmap_queue *sbq;
1125
1126 list_del_init(&wait->entry);
1127 sbq = hctx->tags->bitmap_tags;
1128 atomic_dec(&sbq->ws_active);
1129 }
1130 spin_unlock(&hctx->dispatch_wait_lock);
1131
1132 blk_mq_run_hw_queue(hctx, true);
1133 return 1;
1134 }
1135
1136 /*
1137 * Mark us waiting for a tag. For shared tags, this involves hooking us into
1138 * the tag wakeups. For non-shared tags, we can simply mark us needing a
1139 * restart. For both cases, take care to check the condition again after
1140 * marking us as waiting.
1141 */
blk_mq_mark_tag_wait(struct blk_mq_hw_ctx * hctx,struct request * rq)1142 static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
1143 struct request *rq)
1144 {
1145 struct sbitmap_queue *sbq = hctx->tags->bitmap_tags;
1146 struct wait_queue_head *wq;
1147 wait_queue_entry_t *wait;
1148 bool ret;
1149
1150 if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
1151 blk_mq_sched_mark_restart_hctx(hctx);
1152
1153 /*
1154 * It's possible that a tag was freed in the window between the
1155 * allocation failure and adding the hardware queue to the wait
1156 * queue.
1157 *
1158 * Don't clear RESTART here, someone else could have set it.
1159 * At most this will cost an extra queue run.
1160 */
1161 return blk_mq_get_driver_tag(rq);
1162 }
1163
1164 wait = &hctx->dispatch_wait;
1165 if (!list_empty_careful(&wait->entry))
1166 return false;
1167
1168 wq = &bt_wait_ptr(sbq, hctx)->wait;
1169
1170 spin_lock_irq(&wq->lock);
1171 spin_lock(&hctx->dispatch_wait_lock);
1172 if (!list_empty(&wait->entry)) {
1173 spin_unlock(&hctx->dispatch_wait_lock);
1174 spin_unlock_irq(&wq->lock);
1175 return false;
1176 }
1177
1178 atomic_inc(&sbq->ws_active);
1179 wait->flags &= ~WQ_FLAG_EXCLUSIVE;
1180 __add_wait_queue(wq, wait);
1181
1182 /*
1183 * Add one explicit barrier since blk_mq_get_driver_tag() may
1184 * not imply barrier in case of failure.
1185 *
1186 * Order adding us to wait queue and allocating driver tag.
1187 *
1188 * The pair is the one implied in sbitmap_queue_wake_up() which
1189 * orders clearing sbitmap tag bits and waitqueue_active() in
1190 * __sbitmap_queue_wake_up(), since waitqueue_active() is lockless
1191 *
1192 * Otherwise, re-order of adding wait queue and getting driver tag
1193 * may cause __sbitmap_queue_wake_up() to wake up nothing because
1194 * the waitqueue_active() may not observe us in wait queue.
1195 */
1196 smp_mb();
1197
1198 /*
1199 * It's possible that a tag was freed in the window between the
1200 * allocation failure and adding the hardware queue to the wait
1201 * queue.
1202 */
1203 ret = blk_mq_get_driver_tag(rq);
1204 if (!ret) {
1205 spin_unlock(&hctx->dispatch_wait_lock);
1206 spin_unlock_irq(&wq->lock);
1207 return false;
1208 }
1209
1210 /*
1211 * We got a tag, remove ourselves from the wait queue to ensure
1212 * someone else gets the wakeup.
1213 */
1214 list_del_init(&wait->entry);
1215 atomic_dec(&sbq->ws_active);
1216 spin_unlock(&hctx->dispatch_wait_lock);
1217 spin_unlock_irq(&wq->lock);
1218
1219 return true;
1220 }
1221
1222 #define BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT 8
1223 #define BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR 4
1224 /*
1225 * Update dispatch busy with the Exponential Weighted Moving Average(EWMA):
1226 * - EWMA is one simple way to compute running average value
1227 * - weight(7/8 and 1/8) is applied so that it can decrease exponentially
1228 * - take 4 as factor for avoiding to get too small(0) result, and this
1229 * factor doesn't matter because EWMA decreases exponentially
1230 */
blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx * hctx,bool busy)1231 static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy)
1232 {
1233 unsigned int ewma;
1234
1235 ewma = hctx->dispatch_busy;
1236
1237 if (!ewma && !busy)
1238 return;
1239
1240 ewma *= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT - 1;
1241 if (busy)
1242 ewma += 1 << BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR;
1243 ewma /= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT;
1244
1245 hctx->dispatch_busy = ewma;
1246 }
1247
1248 #define BLK_MQ_RESOURCE_DELAY 3 /* ms units */
1249
blk_mq_handle_dev_resource(struct request * rq,struct list_head * list)1250 static void blk_mq_handle_dev_resource(struct request *rq,
1251 struct list_head *list)
1252 {
1253 struct request *next =
1254 list_first_entry_or_null(list, struct request, queuelist);
1255
1256 /*
1257 * If an I/O scheduler has been configured and we got a driver tag for
1258 * the next request already, free it.
1259 */
1260 if (next)
1261 blk_mq_put_driver_tag(next);
1262
1263 list_add(&rq->queuelist, list);
1264 __blk_mq_requeue_request(rq);
1265 }
1266
blk_mq_handle_zone_resource(struct request * rq,struct list_head * zone_list)1267 static void blk_mq_handle_zone_resource(struct request *rq,
1268 struct list_head *zone_list)
1269 {
1270 /*
1271 * If we end up here it is because we cannot dispatch a request to a
1272 * specific zone due to LLD level zone-write locking or other zone
1273 * related resource not being available. In this case, set the request
1274 * aside in zone_list for retrying it later.
1275 */
1276 list_add(&rq->queuelist, zone_list);
1277 __blk_mq_requeue_request(rq);
1278 }
1279
1280 enum prep_dispatch {
1281 PREP_DISPATCH_OK,
1282 PREP_DISPATCH_NO_TAG,
1283 PREP_DISPATCH_NO_BUDGET,
1284 };
1285
blk_mq_prep_dispatch_rq(struct request * rq,bool need_budget)1286 static enum prep_dispatch blk_mq_prep_dispatch_rq(struct request *rq,
1287 bool need_budget)
1288 {
1289 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1290 int budget_token = -1;
1291
1292 if (need_budget) {
1293 budget_token = blk_mq_get_dispatch_budget(rq->q);
1294 if (budget_token < 0) {
1295 blk_mq_put_driver_tag(rq);
1296 return PREP_DISPATCH_NO_BUDGET;
1297 }
1298 blk_mq_set_rq_budget_token(rq, budget_token);
1299 }
1300
1301 if (!blk_mq_get_driver_tag(rq)) {
1302 /*
1303 * The initial allocation attempt failed, so we need to
1304 * rerun the hardware queue when a tag is freed. The
1305 * waitqueue takes care of that. If the queue is run
1306 * before we add this entry back on the dispatch list,
1307 * we'll re-run it below.
1308 */
1309 if (!blk_mq_mark_tag_wait(hctx, rq)) {
1310 /*
1311 * All budgets not got from this function will be put
1312 * together during handling partial dispatch
1313 */
1314 if (need_budget)
1315 blk_mq_put_dispatch_budget(rq->q, budget_token);
1316 return PREP_DISPATCH_NO_TAG;
1317 }
1318 }
1319
1320 return PREP_DISPATCH_OK;
1321 }
1322
1323 /* release all allocated budgets before calling to blk_mq_dispatch_rq_list */
blk_mq_release_budgets(struct request_queue * q,struct list_head * list)1324 static void blk_mq_release_budgets(struct request_queue *q,
1325 struct list_head *list)
1326 {
1327 struct request *rq;
1328
1329 list_for_each_entry(rq, list, queuelist) {
1330 int budget_token = blk_mq_get_rq_budget_token(rq);
1331
1332 if (budget_token >= 0)
1333 blk_mq_put_dispatch_budget(q, budget_token);
1334 }
1335 }
1336
1337 /*
1338 * Returns true if we did some work AND can potentially do more.
1339 */
blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx * hctx,struct list_head * list,unsigned int nr_budgets)1340 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
1341 unsigned int nr_budgets)
1342 {
1343 enum prep_dispatch prep;
1344 struct request_queue *q = hctx->queue;
1345 struct request *rq, *nxt;
1346 int errors, queued;
1347 blk_status_t ret = BLK_STS_OK;
1348 LIST_HEAD(zone_list);
1349 bool needs_resource = false;
1350
1351 if (list_empty(list))
1352 return false;
1353
1354 /*
1355 * Now process all the entries, sending them to the driver.
1356 */
1357 errors = queued = 0;
1358 do {
1359 struct blk_mq_queue_data bd;
1360
1361 rq = list_first_entry(list, struct request, queuelist);
1362
1363 WARN_ON_ONCE(hctx != rq->mq_hctx);
1364 prep = blk_mq_prep_dispatch_rq(rq, !nr_budgets);
1365 if (prep != PREP_DISPATCH_OK)
1366 break;
1367
1368 list_del_init(&rq->queuelist);
1369
1370 bd.rq = rq;
1371
1372 /*
1373 * Flag last if we have no more requests, or if we have more
1374 * but can't assign a driver tag to it.
1375 */
1376 if (list_empty(list))
1377 bd.last = true;
1378 else {
1379 nxt = list_first_entry(list, struct request, queuelist);
1380 bd.last = !blk_mq_get_driver_tag(nxt);
1381 }
1382
1383 /*
1384 * once the request is queued to lld, no need to cover the
1385 * budget any more
1386 */
1387 if (nr_budgets)
1388 nr_budgets--;
1389 ret = q->mq_ops->queue_rq(hctx, &bd);
1390 switch (ret) {
1391 case BLK_STS_OK:
1392 queued++;
1393 break;
1394 case BLK_STS_RESOURCE:
1395 needs_resource = true;
1396 fallthrough;
1397 case BLK_STS_DEV_RESOURCE:
1398 blk_mq_handle_dev_resource(rq, list);
1399 goto out;
1400 case BLK_STS_ZONE_RESOURCE:
1401 /*
1402 * Move the request to zone_list and keep going through
1403 * the dispatch list to find more requests the drive can
1404 * accept.
1405 */
1406 blk_mq_handle_zone_resource(rq, &zone_list);
1407 needs_resource = true;
1408 break;
1409 default:
1410 errors++;
1411 blk_mq_end_request(rq, ret);
1412 }
1413 } while (!list_empty(list));
1414 out:
1415 if (!list_empty(&zone_list))
1416 list_splice_tail_init(&zone_list, list);
1417
1418 hctx->dispatched[queued_to_index(queued)]++;
1419
1420 /* If we didn't flush the entire list, we could have told the driver
1421 * there was more coming, but that turned out to be a lie.
1422 */
1423 if ((!list_empty(list) || errors || needs_resource ||
1424 ret == BLK_STS_DEV_RESOURCE) && q->mq_ops->commit_rqs && queued)
1425 q->mq_ops->commit_rqs(hctx);
1426 /*
1427 * Any items that need requeuing? Stuff them into hctx->dispatch,
1428 * that is where we will continue on next queue run.
1429 */
1430 if (!list_empty(list)) {
1431 bool needs_restart;
1432 /* For non-shared tags, the RESTART check will suffice */
1433 bool no_tag = prep == PREP_DISPATCH_NO_TAG &&
1434 (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED);
1435
1436 if (nr_budgets)
1437 blk_mq_release_budgets(q, list);
1438
1439 spin_lock(&hctx->lock);
1440 list_splice_tail_init(list, &hctx->dispatch);
1441 spin_unlock(&hctx->lock);
1442
1443 /*
1444 * Order adding requests to hctx->dispatch and checking
1445 * SCHED_RESTART flag. The pair of this smp_mb() is the one
1446 * in blk_mq_sched_restart(). Avoid restart code path to
1447 * miss the new added requests to hctx->dispatch, meantime
1448 * SCHED_RESTART is observed here.
1449 */
1450 smp_mb();
1451
1452 /*
1453 * If SCHED_RESTART was set by the caller of this function and
1454 * it is no longer set that means that it was cleared by another
1455 * thread and hence that a queue rerun is needed.
1456 *
1457 * If 'no_tag' is set, that means that we failed getting
1458 * a driver tag with an I/O scheduler attached. If our dispatch
1459 * waitqueue is no longer active, ensure that we run the queue
1460 * AFTER adding our entries back to the list.
1461 *
1462 * If no I/O scheduler has been configured it is possible that
1463 * the hardware queue got stopped and restarted before requests
1464 * were pushed back onto the dispatch list. Rerun the queue to
1465 * avoid starvation. Notes:
1466 * - blk_mq_run_hw_queue() checks whether or not a queue has
1467 * been stopped before rerunning a queue.
1468 * - Some but not all block drivers stop a queue before
1469 * returning BLK_STS_RESOURCE. Two exceptions are scsi-mq
1470 * and dm-rq.
1471 *
1472 * If driver returns BLK_STS_RESOURCE and SCHED_RESTART
1473 * bit is set, run queue after a delay to avoid IO stalls
1474 * that could otherwise occur if the queue is idle. We'll do
1475 * similar if we couldn't get budget or couldn't lock a zone
1476 * and SCHED_RESTART is set.
1477 */
1478 needs_restart = blk_mq_sched_needs_restart(hctx);
1479 if (prep == PREP_DISPATCH_NO_BUDGET)
1480 needs_resource = true;
1481 if (!needs_restart ||
1482 (no_tag && list_empty_careful(&hctx->dispatch_wait.entry)))
1483 blk_mq_run_hw_queue(hctx, true);
1484 else if (needs_restart && needs_resource)
1485 blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
1486
1487 blk_mq_update_dispatch_busy(hctx, true);
1488 return false;
1489 } else
1490 blk_mq_update_dispatch_busy(hctx, false);
1491
1492 return (queued + errors) != 0;
1493 }
1494
1495 /**
1496 * __blk_mq_run_hw_queue - Run a hardware queue.
1497 * @hctx: Pointer to the hardware queue to run.
1498 *
1499 * Send pending requests to the hardware.
1500 */
__blk_mq_run_hw_queue(struct blk_mq_hw_ctx * hctx)1501 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
1502 {
1503 int srcu_idx;
1504
1505 /*
1506 * We can't run the queue inline with ints disabled. Ensure that
1507 * we catch bad users of this early.
1508 */
1509 WARN_ON_ONCE(in_interrupt());
1510
1511 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
1512
1513 hctx_lock(hctx, &srcu_idx);
1514 blk_mq_sched_dispatch_requests(hctx);
1515 hctx_unlock(hctx, srcu_idx);
1516 }
1517
blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx * hctx)1518 static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
1519 {
1520 int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask);
1521
1522 if (cpu >= nr_cpu_ids)
1523 cpu = cpumask_first(hctx->cpumask);
1524 return cpu;
1525 }
1526
1527 /*
1528 * It'd be great if the workqueue API had a way to pass
1529 * in a mask and had some smarts for more clever placement.
1530 * For now we just round-robin here, switching for every
1531 * BLK_MQ_CPU_WORK_BATCH queued items.
1532 */
blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx * hctx)1533 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
1534 {
1535 bool tried = false;
1536 int next_cpu = hctx->next_cpu;
1537
1538 if (hctx->queue->nr_hw_queues == 1)
1539 return WORK_CPU_UNBOUND;
1540
1541 if (--hctx->next_cpu_batch <= 0) {
1542 select_cpu:
1543 next_cpu = cpumask_next_and(next_cpu, hctx->cpumask,
1544 cpu_online_mask);
1545 if (next_cpu >= nr_cpu_ids)
1546 next_cpu = blk_mq_first_mapped_cpu(hctx);
1547 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1548 }
1549
1550 /*
1551 * Do unbound schedule if we can't find a online CPU for this hctx,
1552 * and it should only happen in the path of handling CPU DEAD.
1553 */
1554 if (!cpu_online(next_cpu)) {
1555 if (!tried) {
1556 tried = true;
1557 goto select_cpu;
1558 }
1559
1560 /*
1561 * Make sure to re-select CPU next time once after CPUs
1562 * in hctx->cpumask become online again.
1563 */
1564 hctx->next_cpu = next_cpu;
1565 hctx->next_cpu_batch = 1;
1566 return WORK_CPU_UNBOUND;
1567 }
1568
1569 hctx->next_cpu = next_cpu;
1570 return next_cpu;
1571 }
1572
1573 /**
1574 * __blk_mq_delay_run_hw_queue - Run (or schedule to run) a hardware queue.
1575 * @hctx: Pointer to the hardware queue to run.
1576 * @async: If we want to run the queue asynchronously.
1577 * @msecs: Milliseconds of delay to wait before running the queue.
1578 *
1579 * If !@async, try to run the queue now. Else, run the queue asynchronously and
1580 * with a delay of @msecs.
1581 */
__blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx * hctx,bool async,unsigned long msecs)1582 static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
1583 unsigned long msecs)
1584 {
1585 if (unlikely(blk_mq_hctx_stopped(hctx)))
1586 return;
1587
1588 if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
1589 int cpu = get_cpu();
1590 if (cpumask_test_cpu(cpu, hctx->cpumask)) {
1591 __blk_mq_run_hw_queue(hctx);
1592 put_cpu();
1593 return;
1594 }
1595
1596 put_cpu();
1597 }
1598
1599 kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work,
1600 msecs_to_jiffies(msecs));
1601 }
1602
1603 /**
1604 * blk_mq_delay_run_hw_queue - Run a hardware queue asynchronously.
1605 * @hctx: Pointer to the hardware queue to run.
1606 * @msecs: Milliseconds of delay to wait before running the queue.
1607 *
1608 * Run a hardware queue asynchronously with a delay of @msecs.
1609 */
blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx * hctx,unsigned long msecs)1610 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1611 {
1612 __blk_mq_delay_run_hw_queue(hctx, true, msecs);
1613 }
1614 EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
1615
1616 /**
1617 * blk_mq_run_hw_queue - Start to run a hardware queue.
1618 * @hctx: Pointer to the hardware queue to run.
1619 * @async: If we want to run the queue asynchronously.
1620 *
1621 * Check if the request queue is not in a quiesced state and if there are
1622 * pending requests to be sent. If this is true, run the queue to send requests
1623 * to hardware.
1624 */
blk_mq_run_hw_queue(struct blk_mq_hw_ctx * hctx,bool async)1625 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1626 {
1627 int srcu_idx;
1628 bool need_run;
1629
1630 /*
1631 * When queue is quiesced, we may be switching io scheduler, or
1632 * updating nr_hw_queues, or other things, and we can't run queue
1633 * any more, even __blk_mq_hctx_has_pending() can't be called safely.
1634 *
1635 * And queue will be rerun in blk_mq_unquiesce_queue() if it is
1636 * quiesced.
1637 */
1638 hctx_lock(hctx, &srcu_idx);
1639 need_run = !blk_queue_quiesced(hctx->queue) &&
1640 blk_mq_hctx_has_pending(hctx);
1641 hctx_unlock(hctx, srcu_idx);
1642
1643 if (need_run)
1644 __blk_mq_delay_run_hw_queue(hctx, async, 0);
1645 }
1646 EXPORT_SYMBOL(blk_mq_run_hw_queue);
1647
1648 /*
1649 * Is the request queue handled by an IO scheduler that does not respect
1650 * hardware queues when dispatching?
1651 */
blk_mq_has_sqsched(struct request_queue * q)1652 static bool blk_mq_has_sqsched(struct request_queue *q)
1653 {
1654 struct elevator_queue *e = q->elevator;
1655
1656 if (e && e->type->ops.dispatch_request &&
1657 !(e->type->elevator_features & ELEVATOR_F_MQ_AWARE))
1658 return true;
1659 return false;
1660 }
1661
1662 /*
1663 * Return prefered queue to dispatch from (if any) for non-mq aware IO
1664 * scheduler.
1665 */
blk_mq_get_sq_hctx(struct request_queue * q)1666 static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q)
1667 {
1668 struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
1669 /*
1670 * If the IO scheduler does not respect hardware queues when
1671 * dispatching, we just don't bother with multiple HW queues and
1672 * dispatch from hctx for the current CPU since running multiple queues
1673 * just causes lock contention inside the scheduler and pointless cache
1674 * bouncing.
1675 */
1676 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, 0, ctx);
1677
1678 if (!blk_mq_hctx_stopped(hctx))
1679 return hctx;
1680 return NULL;
1681 }
1682
1683 /**
1684 * blk_mq_run_hw_queues - Run all hardware queues in a request queue.
1685 * @q: Pointer to the request queue to run.
1686 * @async: If we want to run the queue asynchronously.
1687 */
blk_mq_run_hw_queues(struct request_queue * q,bool async)1688 void blk_mq_run_hw_queues(struct request_queue *q, bool async)
1689 {
1690 struct blk_mq_hw_ctx *hctx, *sq_hctx;
1691 int i;
1692
1693 sq_hctx = NULL;
1694 if (blk_mq_has_sqsched(q))
1695 sq_hctx = blk_mq_get_sq_hctx(q);
1696 queue_for_each_hw_ctx(q, hctx, i) {
1697 if (blk_mq_hctx_stopped(hctx))
1698 continue;
1699 /*
1700 * Dispatch from this hctx either if there's no hctx preferred
1701 * by IO scheduler or if it has requests that bypass the
1702 * scheduler.
1703 */
1704 if (!sq_hctx || sq_hctx == hctx ||
1705 !list_empty_careful(&hctx->dispatch))
1706 blk_mq_run_hw_queue(hctx, async);
1707 }
1708 }
1709 EXPORT_SYMBOL(blk_mq_run_hw_queues);
1710
1711 /**
1712 * blk_mq_delay_run_hw_queues - Run all hardware queues asynchronously.
1713 * @q: Pointer to the request queue to run.
1714 * @msecs: Milliseconds of delay to wait before running the queues.
1715 */
blk_mq_delay_run_hw_queues(struct request_queue * q,unsigned long msecs)1716 void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs)
1717 {
1718 struct blk_mq_hw_ctx *hctx, *sq_hctx;
1719 int i;
1720
1721 sq_hctx = NULL;
1722 if (blk_mq_has_sqsched(q))
1723 sq_hctx = blk_mq_get_sq_hctx(q);
1724 queue_for_each_hw_ctx(q, hctx, i) {
1725 if (blk_mq_hctx_stopped(hctx))
1726 continue;
1727 /*
1728 * Dispatch from this hctx either if there's no hctx preferred
1729 * by IO scheduler or if it has requests that bypass the
1730 * scheduler.
1731 */
1732 if (!sq_hctx || sq_hctx == hctx ||
1733 !list_empty_careful(&hctx->dispatch))
1734 blk_mq_delay_run_hw_queue(hctx, msecs);
1735 }
1736 }
1737 EXPORT_SYMBOL(blk_mq_delay_run_hw_queues);
1738
1739 /**
1740 * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped
1741 * @q: request queue.
1742 *
1743 * The caller is responsible for serializing this function against
1744 * blk_mq_{start,stop}_hw_queue().
1745 */
blk_mq_queue_stopped(struct request_queue * q)1746 bool blk_mq_queue_stopped(struct request_queue *q)
1747 {
1748 struct blk_mq_hw_ctx *hctx;
1749 int i;
1750
1751 queue_for_each_hw_ctx(q, hctx, i)
1752 if (blk_mq_hctx_stopped(hctx))
1753 return true;
1754
1755 return false;
1756 }
1757 EXPORT_SYMBOL(blk_mq_queue_stopped);
1758
1759 /*
1760 * This function is often used for pausing .queue_rq() by driver when
1761 * there isn't enough resource or some conditions aren't satisfied, and
1762 * BLK_STS_RESOURCE is usually returned.
1763 *
1764 * We do not guarantee that dispatch can be drained or blocked
1765 * after blk_mq_stop_hw_queue() returns. Please use
1766 * blk_mq_quiesce_queue() for that requirement.
1767 */
blk_mq_stop_hw_queue(struct blk_mq_hw_ctx * hctx)1768 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
1769 {
1770 cancel_delayed_work(&hctx->run_work);
1771
1772 set_bit(BLK_MQ_S_STOPPED, &hctx->state);
1773 }
1774 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
1775
1776 /*
1777 * This function is often used for pausing .queue_rq() by driver when
1778 * there isn't enough resource or some conditions aren't satisfied, and
1779 * BLK_STS_RESOURCE is usually returned.
1780 *
1781 * We do not guarantee that dispatch can be drained or blocked
1782 * after blk_mq_stop_hw_queues() returns. Please use
1783 * blk_mq_quiesce_queue() for that requirement.
1784 */
blk_mq_stop_hw_queues(struct request_queue * q)1785 void blk_mq_stop_hw_queues(struct request_queue *q)
1786 {
1787 struct blk_mq_hw_ctx *hctx;
1788 int i;
1789
1790 queue_for_each_hw_ctx(q, hctx, i)
1791 blk_mq_stop_hw_queue(hctx);
1792 }
1793 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
1794
blk_mq_start_hw_queue(struct blk_mq_hw_ctx * hctx)1795 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
1796 {
1797 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1798
1799 blk_mq_run_hw_queue(hctx, false);
1800 }
1801 EXPORT_SYMBOL(blk_mq_start_hw_queue);
1802
blk_mq_start_hw_queues(struct request_queue * q)1803 void blk_mq_start_hw_queues(struct request_queue *q)
1804 {
1805 struct blk_mq_hw_ctx *hctx;
1806 int i;
1807
1808 queue_for_each_hw_ctx(q, hctx, i)
1809 blk_mq_start_hw_queue(hctx);
1810 }
1811 EXPORT_SYMBOL(blk_mq_start_hw_queues);
1812
blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx * hctx,bool async)1813 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1814 {
1815 if (!blk_mq_hctx_stopped(hctx))
1816 return;
1817
1818 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1819 blk_mq_run_hw_queue(hctx, async);
1820 }
1821 EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
1822
blk_mq_start_stopped_hw_queues(struct request_queue * q,bool async)1823 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
1824 {
1825 struct blk_mq_hw_ctx *hctx;
1826 int i;
1827
1828 queue_for_each_hw_ctx(q, hctx, i)
1829 blk_mq_start_stopped_hw_queue(hctx, async);
1830 }
1831 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
1832
blk_mq_run_work_fn(struct work_struct * work)1833 static void blk_mq_run_work_fn(struct work_struct *work)
1834 {
1835 struct blk_mq_hw_ctx *hctx;
1836
1837 hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
1838
1839 /*
1840 * If we are stopped, don't run the queue.
1841 */
1842 if (blk_mq_hctx_stopped(hctx))
1843 return;
1844
1845 __blk_mq_run_hw_queue(hctx);
1846 }
1847
__blk_mq_insert_req_list(struct blk_mq_hw_ctx * hctx,struct request * rq,bool at_head)1848 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
1849 struct request *rq,
1850 bool at_head)
1851 {
1852 struct blk_mq_ctx *ctx = rq->mq_ctx;
1853 enum hctx_type type = hctx->type;
1854
1855 lockdep_assert_held(&ctx->lock);
1856
1857 trace_block_rq_insert(rq);
1858
1859 if (at_head)
1860 list_add(&rq->queuelist, &ctx->rq_lists[type]);
1861 else
1862 list_add_tail(&rq->queuelist, &ctx->rq_lists[type]);
1863 }
1864
__blk_mq_insert_request(struct blk_mq_hw_ctx * hctx,struct request * rq,bool at_head)1865 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
1866 bool at_head)
1867 {
1868 struct blk_mq_ctx *ctx = rq->mq_ctx;
1869
1870 lockdep_assert_held(&ctx->lock);
1871
1872 __blk_mq_insert_req_list(hctx, rq, at_head);
1873 blk_mq_hctx_mark_pending(hctx, ctx);
1874 }
1875
1876 /**
1877 * blk_mq_request_bypass_insert - Insert a request at dispatch list.
1878 * @rq: Pointer to request to be inserted.
1879 * @at_head: true if the request should be inserted at the head of the list.
1880 * @run_queue: If we should run the hardware queue after inserting the request.
1881 *
1882 * Should only be used carefully, when the caller knows we want to
1883 * bypass a potential IO scheduler on the target device.
1884 */
blk_mq_request_bypass_insert(struct request * rq,bool at_head,bool run_queue)1885 void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
1886 bool run_queue)
1887 {
1888 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1889
1890 spin_lock(&hctx->lock);
1891 if (at_head)
1892 list_add(&rq->queuelist, &hctx->dispatch);
1893 else
1894 list_add_tail(&rq->queuelist, &hctx->dispatch);
1895 spin_unlock(&hctx->lock);
1896
1897 if (run_queue)
1898 blk_mq_run_hw_queue(hctx, false);
1899 }
1900
blk_mq_insert_requests(struct blk_mq_hw_ctx * hctx,struct blk_mq_ctx * ctx,struct list_head * list)1901 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
1902 struct list_head *list)
1903
1904 {
1905 struct request *rq;
1906 enum hctx_type type = hctx->type;
1907
1908 /*
1909 * preemption doesn't flush plug list, so it's possible ctx->cpu is
1910 * offline now
1911 */
1912 list_for_each_entry(rq, list, queuelist) {
1913 BUG_ON(rq->mq_ctx != ctx);
1914 trace_block_rq_insert(rq);
1915 }
1916
1917 spin_lock(&ctx->lock);
1918 list_splice_tail_init(list, &ctx->rq_lists[type]);
1919 blk_mq_hctx_mark_pending(hctx, ctx);
1920 spin_unlock(&ctx->lock);
1921 }
1922
plug_rq_cmp(void * priv,const struct list_head * a,const struct list_head * b)1923 static int plug_rq_cmp(void *priv, const struct list_head *a,
1924 const struct list_head *b)
1925 {
1926 struct request *rqa = container_of(a, struct request, queuelist);
1927 struct request *rqb = container_of(b, struct request, queuelist);
1928
1929 if (rqa->mq_ctx != rqb->mq_ctx)
1930 return rqa->mq_ctx > rqb->mq_ctx;
1931 if (rqa->mq_hctx != rqb->mq_hctx)
1932 return rqa->mq_hctx > rqb->mq_hctx;
1933
1934 return blk_rq_pos(rqa) > blk_rq_pos(rqb);
1935 }
1936
blk_mq_flush_plug_list(struct blk_plug * plug,bool from_schedule)1937 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1938 {
1939 LIST_HEAD(list);
1940
1941 if (list_empty(&plug->mq_list))
1942 return;
1943 list_splice_init(&plug->mq_list, &list);
1944
1945 if (plug->rq_count > 2 && plug->multiple_queues)
1946 list_sort(NULL, &list, plug_rq_cmp);
1947
1948 plug->rq_count = 0;
1949
1950 do {
1951 struct list_head rq_list;
1952 struct request *rq, *head_rq = list_entry_rq(list.next);
1953 struct list_head *pos = &head_rq->queuelist; /* skip first */
1954 struct blk_mq_hw_ctx *this_hctx = head_rq->mq_hctx;
1955 struct blk_mq_ctx *this_ctx = head_rq->mq_ctx;
1956 unsigned int depth = 1;
1957
1958 list_for_each_continue(pos, &list) {
1959 rq = list_entry_rq(pos);
1960 BUG_ON(!rq->q);
1961 if (rq->mq_hctx != this_hctx || rq->mq_ctx != this_ctx)
1962 break;
1963 depth++;
1964 }
1965
1966 list_cut_before(&rq_list, &list, pos);
1967 trace_block_unplug(head_rq->q, depth, !from_schedule);
1968 blk_mq_sched_insert_requests(this_hctx, this_ctx, &rq_list,
1969 from_schedule);
1970 } while(!list_empty(&list));
1971 }
1972
blk_mq_bio_to_request(struct request * rq,struct bio * bio,unsigned int nr_segs)1973 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
1974 unsigned int nr_segs)
1975 {
1976 int err;
1977
1978 if (bio->bi_opf & REQ_RAHEAD)
1979 rq->cmd_flags |= REQ_FAILFAST_MASK;
1980
1981 rq->__sector = bio->bi_iter.bi_sector;
1982 rq->write_hint = bio->bi_write_hint;
1983 blk_rq_bio_prep(rq, bio, nr_segs);
1984
1985 /* This can't fail, since GFP_NOIO includes __GFP_DIRECT_RECLAIM. */
1986 err = blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO);
1987 WARN_ON_ONCE(err);
1988
1989 blk_account_io_start(rq);
1990 }
1991
__blk_mq_issue_directly(struct blk_mq_hw_ctx * hctx,struct request * rq,blk_qc_t * cookie,bool last)1992 static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
1993 struct request *rq,
1994 blk_qc_t *cookie, bool last)
1995 {
1996 struct request_queue *q = rq->q;
1997 struct blk_mq_queue_data bd = {
1998 .rq = rq,
1999 .last = last,
2000 };
2001 blk_qc_t new_cookie;
2002 blk_status_t ret;
2003
2004 new_cookie = request_to_qc_t(hctx, rq);
2005
2006 /*
2007 * For OK queue, we are done. For error, caller may kill it.
2008 * Any other error (busy), just add it to our list as we
2009 * previously would have done.
2010 */
2011 ret = q->mq_ops->queue_rq(hctx, &bd);
2012 switch (ret) {
2013 case BLK_STS_OK:
2014 blk_mq_update_dispatch_busy(hctx, false);
2015 *cookie = new_cookie;
2016 break;
2017 case BLK_STS_RESOURCE:
2018 case BLK_STS_DEV_RESOURCE:
2019 blk_mq_update_dispatch_busy(hctx, true);
2020 __blk_mq_requeue_request(rq);
2021 break;
2022 default:
2023 blk_mq_update_dispatch_busy(hctx, false);
2024 *cookie = BLK_QC_T_NONE;
2025 break;
2026 }
2027
2028 return ret;
2029 }
2030
__blk_mq_try_issue_directly(struct blk_mq_hw_ctx * hctx,struct request * rq,blk_qc_t * cookie,bool bypass_insert,bool last)2031 static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
2032 struct request *rq,
2033 blk_qc_t *cookie,
2034 bool bypass_insert, bool last)
2035 {
2036 struct request_queue *q = rq->q;
2037 bool run_queue = true;
2038 int budget_token;
2039
2040 /*
2041 * RCU or SRCU read lock is needed before checking quiesced flag.
2042 *
2043 * When queue is stopped or quiesced, ignore 'bypass_insert' from
2044 * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller,
2045 * and avoid driver to try to dispatch again.
2046 */
2047 if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
2048 run_queue = false;
2049 bypass_insert = false;
2050 goto insert;
2051 }
2052
2053 if (q->elevator && !bypass_insert)
2054 goto insert;
2055
2056 budget_token = blk_mq_get_dispatch_budget(q);
2057 if (budget_token < 0)
2058 goto insert;
2059
2060 blk_mq_set_rq_budget_token(rq, budget_token);
2061
2062 if (!blk_mq_get_driver_tag(rq)) {
2063 blk_mq_put_dispatch_budget(q, budget_token);
2064 goto insert;
2065 }
2066
2067 return __blk_mq_issue_directly(hctx, rq, cookie, last);
2068 insert:
2069 if (bypass_insert)
2070 return BLK_STS_RESOURCE;
2071
2072 blk_mq_sched_insert_request(rq, false, run_queue, false);
2073
2074 return BLK_STS_OK;
2075 }
2076
2077 /**
2078 * blk_mq_try_issue_directly - Try to send a request directly to device driver.
2079 * @hctx: Pointer of the associated hardware queue.
2080 * @rq: Pointer to request to be sent.
2081 * @cookie: Request queue cookie.
2082 *
2083 * If the device has enough resources to accept a new request now, send the
2084 * request directly to device driver. Else, insert at hctx->dispatch queue, so
2085 * we can try send it another time in the future. Requests inserted at this
2086 * queue have higher priority.
2087 */
blk_mq_try_issue_directly(struct blk_mq_hw_ctx * hctx,struct request * rq,blk_qc_t * cookie)2088 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
2089 struct request *rq, blk_qc_t *cookie)
2090 {
2091 blk_status_t ret;
2092 int srcu_idx;
2093
2094 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
2095
2096 hctx_lock(hctx, &srcu_idx);
2097
2098 ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true);
2099 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
2100 blk_mq_request_bypass_insert(rq, false, true);
2101 else if (ret != BLK_STS_OK)
2102 blk_mq_end_request(rq, ret);
2103
2104 hctx_unlock(hctx, srcu_idx);
2105 }
2106
blk_mq_request_issue_directly(struct request * rq,bool last)2107 blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
2108 {
2109 blk_status_t ret;
2110 int srcu_idx;
2111 blk_qc_t unused_cookie;
2112 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
2113
2114 hctx_lock(hctx, &srcu_idx);
2115 ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last);
2116 hctx_unlock(hctx, srcu_idx);
2117
2118 return ret;
2119 }
2120
blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx * hctx,struct list_head * list)2121 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
2122 struct list_head *list)
2123 {
2124 int queued = 0;
2125 int errors = 0;
2126
2127 while (!list_empty(list)) {
2128 blk_status_t ret;
2129 struct request *rq = list_first_entry(list, struct request,
2130 queuelist);
2131
2132 list_del_init(&rq->queuelist);
2133 ret = blk_mq_request_issue_directly(rq, list_empty(list));
2134 if (ret != BLK_STS_OK) {
2135 errors++;
2136 if (ret == BLK_STS_RESOURCE ||
2137 ret == BLK_STS_DEV_RESOURCE) {
2138 blk_mq_request_bypass_insert(rq, false,
2139 list_empty(list));
2140 break;
2141 }
2142 blk_mq_end_request(rq, ret);
2143 } else
2144 queued++;
2145 }
2146
2147 /*
2148 * If we didn't flush the entire list, we could have told
2149 * the driver there was more coming, but that turned out to
2150 * be a lie.
2151 */
2152 if ((!list_empty(list) || errors) &&
2153 hctx->queue->mq_ops->commit_rqs && queued)
2154 hctx->queue->mq_ops->commit_rqs(hctx);
2155 }
2156
blk_add_rq_to_plug(struct blk_plug * plug,struct request * rq)2157 static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
2158 {
2159 list_add_tail(&rq->queuelist, &plug->mq_list);
2160 plug->rq_count++;
2161 if (!plug->multiple_queues && !list_is_singular(&plug->mq_list)) {
2162 struct request *tmp;
2163
2164 tmp = list_first_entry(&plug->mq_list, struct request,
2165 queuelist);
2166 if (tmp->q != rq->q)
2167 plug->multiple_queues = true;
2168 }
2169 }
2170
2171 /*
2172 * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
2173 * queues. This is important for md arrays to benefit from merging
2174 * requests.
2175 */
blk_plug_max_rq_count(struct blk_plug * plug)2176 static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
2177 {
2178 if (plug->multiple_queues)
2179 return BLK_MAX_REQUEST_COUNT * 2;
2180 return BLK_MAX_REQUEST_COUNT;
2181 }
2182
2183 /**
2184 * blk_mq_submit_bio - Create and send a request to block device.
2185 * @bio: Bio pointer.
2186 *
2187 * Builds up a request structure from @q and @bio and send to the device. The
2188 * request may not be queued directly to hardware if:
2189 * * This request can be merged with another one
2190 * * We want to place request at plug queue for possible future merging
2191 * * There is an IO scheduler active at this queue
2192 *
2193 * It will not queue the request if there is an error with the bio, or at the
2194 * request creation.
2195 *
2196 * Returns: Request queue cookie.
2197 */
blk_mq_submit_bio(struct bio * bio)2198 blk_qc_t blk_mq_submit_bio(struct bio *bio)
2199 {
2200 struct request_queue *q = bio->bi_bdev->bd_disk->queue;
2201 const int is_sync = op_is_sync(bio->bi_opf);
2202 const int is_flush_fua = op_is_flush(bio->bi_opf);
2203 struct blk_mq_alloc_data data = {
2204 .q = q,
2205 };
2206 struct request *rq;
2207 struct blk_plug *plug;
2208 struct request *same_queue_rq = NULL;
2209 unsigned int nr_segs;
2210 blk_qc_t cookie;
2211 blk_status_t ret;
2212 bool hipri;
2213
2214 blk_queue_bounce(q, &bio);
2215 __blk_queue_split(&bio, &nr_segs);
2216 if (!bio)
2217 goto queue_exit;
2218
2219 if (!bio_integrity_prep(bio))
2220 goto queue_exit;
2221
2222 if (!is_flush_fua && !blk_queue_nomerges(q) &&
2223 blk_attempt_plug_merge(q, bio, nr_segs, &same_queue_rq))
2224 goto queue_exit;
2225
2226 if (blk_mq_sched_bio_merge(q, bio, nr_segs))
2227 goto queue_exit;
2228
2229 rq_qos_throttle(q, bio);
2230
2231 hipri = bio->bi_opf & REQ_HIPRI;
2232
2233 data.cmd_flags = bio->bi_opf;
2234 rq = __blk_mq_alloc_request(&data);
2235 if (unlikely(!rq)) {
2236 rq_qos_cleanup(q, bio);
2237 if (bio->bi_opf & REQ_NOWAIT)
2238 bio_wouldblock_error(bio);
2239 goto queue_exit;
2240 }
2241
2242 trace_block_getrq(bio);
2243
2244 rq_qos_track(q, rq, bio);
2245
2246 cookie = request_to_qc_t(data.hctx, rq);
2247
2248 blk_mq_bio_to_request(rq, bio, nr_segs);
2249
2250 ret = blk_crypto_rq_get_keyslot(rq);
2251 if (ret != BLK_STS_OK) {
2252 bio->bi_status = ret;
2253 bio_endio(bio);
2254 blk_mq_free_request(rq);
2255 return BLK_QC_T_NONE;
2256 }
2257
2258 plug = blk_mq_plug(q, bio);
2259 if (unlikely(is_flush_fua)) {
2260 /* Bypass scheduler for flush requests */
2261 blk_insert_flush(rq);
2262 blk_mq_run_hw_queue(data.hctx, true);
2263 } else if (plug && (q->nr_hw_queues == 1 ||
2264 blk_mq_is_sbitmap_shared(rq->mq_hctx->flags) ||
2265 q->mq_ops->commit_rqs || !blk_queue_nonrot(q))) {
2266 /*
2267 * Use plugging if we have a ->commit_rqs() hook as well, as
2268 * we know the driver uses bd->last in a smart fashion.
2269 *
2270 * Use normal plugging if this disk is slow HDD, as sequential
2271 * IO may benefit a lot from plug merging.
2272 */
2273 unsigned int request_count = plug->rq_count;
2274 struct request *last = NULL;
2275
2276 if (!request_count)
2277 trace_block_plug(q);
2278 else
2279 last = list_entry_rq(plug->mq_list.prev);
2280
2281 if (request_count >= blk_plug_max_rq_count(plug) || (last &&
2282 blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
2283 blk_flush_plug_list(plug, false);
2284 trace_block_plug(q);
2285 }
2286
2287 blk_add_rq_to_plug(plug, rq);
2288 } else if (q->elevator) {
2289 /* Insert the request at the IO scheduler queue */
2290 blk_mq_sched_insert_request(rq, false, true, true);
2291 } else if (plug && !blk_queue_nomerges(q)) {
2292 /*
2293 * We do limited plugging. If the bio can be merged, do that.
2294 * Otherwise the existing request in the plug list will be
2295 * issued. So the plug list will have one request at most
2296 * The plug list might get flushed before this. If that happens,
2297 * the plug list is empty, and same_queue_rq is invalid.
2298 */
2299 if (list_empty(&plug->mq_list))
2300 same_queue_rq = NULL;
2301 if (same_queue_rq) {
2302 list_del_init(&same_queue_rq->queuelist);
2303 plug->rq_count--;
2304 }
2305 blk_add_rq_to_plug(plug, rq);
2306 trace_block_plug(q);
2307
2308 if (same_queue_rq) {
2309 data.hctx = same_queue_rq->mq_hctx;
2310 trace_block_unplug(q, 1, true);
2311 blk_mq_try_issue_directly(data.hctx, same_queue_rq,
2312 &cookie);
2313 }
2314 } else if ((q->nr_hw_queues > 1 && is_sync) ||
2315 !data.hctx->dispatch_busy) {
2316 /*
2317 * There is no scheduler and we can try to send directly
2318 * to the hardware.
2319 */
2320 blk_mq_try_issue_directly(data.hctx, rq, &cookie);
2321 } else {
2322 /* Default case. */
2323 blk_mq_sched_insert_request(rq, false, true, true);
2324 }
2325
2326 if (!hipri)
2327 return BLK_QC_T_NONE;
2328 return cookie;
2329 queue_exit:
2330 blk_queue_exit(q);
2331 return BLK_QC_T_NONE;
2332 }
2333
order_to_size(unsigned int order)2334 static size_t order_to_size(unsigned int order)
2335 {
2336 return (size_t)PAGE_SIZE << order;
2337 }
2338
2339 /* called before freeing request pool in @tags */
blk_mq_clear_rq_mapping(struct blk_mq_tag_set * set,struct blk_mq_tags * tags,unsigned int hctx_idx)2340 static void blk_mq_clear_rq_mapping(struct blk_mq_tag_set *set,
2341 struct blk_mq_tags *tags, unsigned int hctx_idx)
2342 {
2343 struct blk_mq_tags *drv_tags = set->tags[hctx_idx];
2344 struct page *page;
2345 unsigned long flags;
2346
2347 list_for_each_entry(page, &tags->page_list, lru) {
2348 unsigned long start = (unsigned long)page_address(page);
2349 unsigned long end = start + order_to_size(page->private);
2350 int i;
2351
2352 for (i = 0; i < set->queue_depth; i++) {
2353 struct request *rq = drv_tags->rqs[i];
2354 unsigned long rq_addr = (unsigned long)rq;
2355
2356 if (rq_addr >= start && rq_addr < end) {
2357 WARN_ON_ONCE(refcount_read(&rq->ref) != 0);
2358 cmpxchg(&drv_tags->rqs[i], rq, NULL);
2359 }
2360 }
2361 }
2362
2363 /*
2364 * Wait until all pending iteration is done.
2365 *
2366 * Request reference is cleared and it is guaranteed to be observed
2367 * after the ->lock is released.
2368 */
2369 spin_lock_irqsave(&drv_tags->lock, flags);
2370 spin_unlock_irqrestore(&drv_tags->lock, flags);
2371 }
2372
blk_mq_free_rqs(struct blk_mq_tag_set * set,struct blk_mq_tags * tags,unsigned int hctx_idx)2373 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
2374 unsigned int hctx_idx)
2375 {
2376 struct page *page;
2377
2378 if (tags->rqs && set->ops->exit_request) {
2379 int i;
2380
2381 for (i = 0; i < tags->nr_tags; i++) {
2382 struct request *rq = tags->static_rqs[i];
2383
2384 if (!rq)
2385 continue;
2386 set->ops->exit_request(set, rq, hctx_idx);
2387 tags->static_rqs[i] = NULL;
2388 }
2389 }
2390
2391 blk_mq_clear_rq_mapping(set, tags, hctx_idx);
2392
2393 while (!list_empty(&tags->page_list)) {
2394 page = list_first_entry(&tags->page_list, struct page, lru);
2395 list_del_init(&page->lru);
2396 /*
2397 * Remove kmemleak object previously allocated in
2398 * blk_mq_alloc_rqs().
2399 */
2400 kmemleak_free(page_address(page));
2401 __free_pages(page, page->private);
2402 }
2403 }
2404
blk_mq_free_rq_map(struct blk_mq_tags * tags,unsigned int flags)2405 void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags)
2406 {
2407 kfree(tags->rqs);
2408 tags->rqs = NULL;
2409 kfree(tags->static_rqs);
2410 tags->static_rqs = NULL;
2411
2412 blk_mq_free_tags(tags, flags);
2413 }
2414
blk_mq_alloc_rq_map(struct blk_mq_tag_set * set,unsigned int hctx_idx,unsigned int nr_tags,unsigned int reserved_tags,unsigned int flags)2415 struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
2416 unsigned int hctx_idx,
2417 unsigned int nr_tags,
2418 unsigned int reserved_tags,
2419 unsigned int flags)
2420 {
2421 struct blk_mq_tags *tags;
2422 int node;
2423
2424 node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx);
2425 if (node == NUMA_NO_NODE)
2426 node = set->numa_node;
2427
2428 tags = blk_mq_init_tags(nr_tags, reserved_tags, node, flags);
2429 if (!tags)
2430 return NULL;
2431
2432 tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *),
2433 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
2434 node);
2435 if (!tags->rqs) {
2436 blk_mq_free_tags(tags, flags);
2437 return NULL;
2438 }
2439
2440 tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *),
2441 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
2442 node);
2443 if (!tags->static_rqs) {
2444 kfree(tags->rqs);
2445 blk_mq_free_tags(tags, flags);
2446 return NULL;
2447 }
2448
2449 return tags;
2450 }
2451
blk_mq_init_request(struct blk_mq_tag_set * set,struct request * rq,unsigned int hctx_idx,int node)2452 static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
2453 unsigned int hctx_idx, int node)
2454 {
2455 int ret;
2456
2457 if (set->ops->init_request) {
2458 ret = set->ops->init_request(set, rq, hctx_idx, node);
2459 if (ret)
2460 return ret;
2461 }
2462
2463 WRITE_ONCE(rq->state, MQ_RQ_IDLE);
2464 return 0;
2465 }
2466
blk_mq_alloc_rqs(struct blk_mq_tag_set * set,struct blk_mq_tags * tags,unsigned int hctx_idx,unsigned int depth)2467 int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
2468 unsigned int hctx_idx, unsigned int depth)
2469 {
2470 unsigned int i, j, entries_per_page, max_order = 4;
2471 size_t rq_size, left;
2472 int node;
2473
2474 node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx);
2475 if (node == NUMA_NO_NODE)
2476 node = set->numa_node;
2477
2478 INIT_LIST_HEAD(&tags->page_list);
2479
2480 /*
2481 * rq_size is the size of the request plus driver payload, rounded
2482 * to the cacheline size
2483 */
2484 rq_size = round_up(sizeof(struct request) + set->cmd_size,
2485 cache_line_size());
2486 trace_android_vh_blk_alloc_rqs(&rq_size, set, tags, hctx_idx);
2487 left = rq_size * depth;
2488
2489 for (i = 0; i < depth; ) {
2490 int this_order = max_order;
2491 struct page *page;
2492 int to_do;
2493 void *p;
2494
2495 while (this_order && left < order_to_size(this_order - 1))
2496 this_order--;
2497
2498 do {
2499 page = alloc_pages_node(node,
2500 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
2501 this_order);
2502 if (page)
2503 break;
2504 if (!this_order--)
2505 break;
2506 if (order_to_size(this_order) < rq_size)
2507 break;
2508 } while (1);
2509
2510 if (!page)
2511 goto fail;
2512
2513 page->private = this_order;
2514 list_add_tail(&page->lru, &tags->page_list);
2515
2516 p = page_address(page);
2517 /*
2518 * Allow kmemleak to scan these pages as they contain pointers
2519 * to additional allocations like via ops->init_request().
2520 */
2521 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
2522 entries_per_page = order_to_size(this_order) / rq_size;
2523 to_do = min(entries_per_page, depth - i);
2524 left -= to_do * rq_size;
2525 for (j = 0; j < to_do; j++) {
2526 struct request *rq = p;
2527
2528 tags->static_rqs[i] = rq;
2529 if (blk_mq_init_request(set, rq, hctx_idx, node)) {
2530 tags->static_rqs[i] = NULL;
2531 goto fail;
2532 }
2533
2534 p += rq_size;
2535 i++;
2536 }
2537 }
2538 return 0;
2539
2540 fail:
2541 blk_mq_free_rqs(set, tags, hctx_idx);
2542 return -ENOMEM;
2543 }
2544
2545 struct rq_iter_data {
2546 struct blk_mq_hw_ctx *hctx;
2547 bool has_rq;
2548 };
2549
blk_mq_has_request(struct request * rq,void * data,bool reserved)2550 static bool blk_mq_has_request(struct request *rq, void *data, bool reserved)
2551 {
2552 struct rq_iter_data *iter_data = data;
2553
2554 if (rq->mq_hctx != iter_data->hctx)
2555 return true;
2556 iter_data->has_rq = true;
2557 return false;
2558 }
2559
blk_mq_hctx_has_requests(struct blk_mq_hw_ctx * hctx)2560 static bool blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx)
2561 {
2562 struct blk_mq_tags *tags = hctx->sched_tags ?
2563 hctx->sched_tags : hctx->tags;
2564 struct rq_iter_data data = {
2565 .hctx = hctx,
2566 };
2567
2568 blk_mq_all_tag_iter(tags, blk_mq_has_request, &data);
2569 return data.has_rq;
2570 }
2571
blk_mq_last_cpu_in_hctx(unsigned int cpu,struct blk_mq_hw_ctx * hctx)2572 static inline bool blk_mq_last_cpu_in_hctx(unsigned int cpu,
2573 struct blk_mq_hw_ctx *hctx)
2574 {
2575 if (cpumask_next_and(-1, hctx->cpumask, cpu_online_mask) != cpu)
2576 return false;
2577 if (cpumask_next_and(cpu, hctx->cpumask, cpu_online_mask) < nr_cpu_ids)
2578 return false;
2579 return true;
2580 }
2581
blk_mq_hctx_notify_offline(unsigned int cpu,struct hlist_node * node)2582 static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node)
2583 {
2584 struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
2585 struct blk_mq_hw_ctx, cpuhp_online);
2586
2587 if (!cpumask_test_cpu(cpu, hctx->cpumask) ||
2588 !blk_mq_last_cpu_in_hctx(cpu, hctx))
2589 return 0;
2590
2591 /*
2592 * Prevent new request from being allocated on the current hctx.
2593 *
2594 * The smp_mb__after_atomic() Pairs with the implied barrier in
2595 * test_and_set_bit_lock in sbitmap_get(). Ensures the inactive flag is
2596 * seen once we return from the tag allocator.
2597 */
2598 set_bit(BLK_MQ_S_INACTIVE, &hctx->state);
2599 smp_mb__after_atomic();
2600
2601 /*
2602 * Try to grab a reference to the queue and wait for any outstanding
2603 * requests. If we could not grab a reference the queue has been
2604 * frozen and there are no requests.
2605 */
2606 if (percpu_ref_tryget(&hctx->queue->q_usage_counter)) {
2607 while (blk_mq_hctx_has_requests(hctx))
2608 msleep(5);
2609 percpu_ref_put(&hctx->queue->q_usage_counter);
2610 }
2611
2612 return 0;
2613 }
2614
blk_mq_hctx_notify_online(unsigned int cpu,struct hlist_node * node)2615 static int blk_mq_hctx_notify_online(unsigned int cpu, struct hlist_node *node)
2616 {
2617 struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
2618 struct blk_mq_hw_ctx, cpuhp_online);
2619
2620 if (cpumask_test_cpu(cpu, hctx->cpumask))
2621 clear_bit(BLK_MQ_S_INACTIVE, &hctx->state);
2622 return 0;
2623 }
2624
2625 /*
2626 * 'cpu' is going away. splice any existing rq_list entries from this
2627 * software queue to the hw queue dispatch list, and ensure that it
2628 * gets run.
2629 */
blk_mq_hctx_notify_dead(unsigned int cpu,struct hlist_node * node)2630 static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
2631 {
2632 struct blk_mq_hw_ctx *hctx;
2633 struct blk_mq_ctx *ctx;
2634 LIST_HEAD(tmp);
2635 enum hctx_type type;
2636
2637 hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
2638 if (!cpumask_test_cpu(cpu, hctx->cpumask))
2639 return 0;
2640
2641 ctx = __blk_mq_get_ctx(hctx->queue, cpu);
2642 type = hctx->type;
2643
2644 spin_lock(&ctx->lock);
2645 if (!list_empty(&ctx->rq_lists[type])) {
2646 list_splice_init(&ctx->rq_lists[type], &tmp);
2647 blk_mq_hctx_clear_pending(hctx, ctx);
2648 }
2649 spin_unlock(&ctx->lock);
2650
2651 if (list_empty(&tmp))
2652 return 0;
2653
2654 spin_lock(&hctx->lock);
2655 list_splice_tail_init(&tmp, &hctx->dispatch);
2656 spin_unlock(&hctx->lock);
2657
2658 blk_mq_run_hw_queue(hctx, true);
2659 return 0;
2660 }
2661
blk_mq_remove_cpuhp(struct blk_mq_hw_ctx * hctx)2662 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
2663 {
2664 if (!(hctx->flags & BLK_MQ_F_STACKING))
2665 cpuhp_state_remove_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
2666 &hctx->cpuhp_online);
2667 cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
2668 &hctx->cpuhp_dead);
2669 }
2670
2671 /*
2672 * Before freeing hw queue, clearing the flush request reference in
2673 * tags->rqs[] for avoiding potential UAF.
2674 */
blk_mq_clear_flush_rq_mapping(struct blk_mq_tags * tags,unsigned int queue_depth,struct request * flush_rq)2675 static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags,
2676 unsigned int queue_depth, struct request *flush_rq)
2677 {
2678 int i;
2679 unsigned long flags;
2680
2681 /* The hw queue may not be mapped yet */
2682 if (!tags)
2683 return;
2684
2685 WARN_ON_ONCE(refcount_read(&flush_rq->ref) != 0);
2686
2687 for (i = 0; i < queue_depth; i++)
2688 cmpxchg(&tags->rqs[i], flush_rq, NULL);
2689
2690 /*
2691 * Wait until all pending iteration is done.
2692 *
2693 * Request reference is cleared and it is guaranteed to be observed
2694 * after the ->lock is released.
2695 */
2696 spin_lock_irqsave(&tags->lock, flags);
2697 spin_unlock_irqrestore(&tags->lock, flags);
2698 }
2699
2700 /* hctx->ctxs will be freed in queue's release handler */
blk_mq_exit_hctx(struct request_queue * q,struct blk_mq_tag_set * set,struct blk_mq_hw_ctx * hctx,unsigned int hctx_idx)2701 static void blk_mq_exit_hctx(struct request_queue *q,
2702 struct blk_mq_tag_set *set,
2703 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
2704 {
2705 struct request *flush_rq = hctx->fq->flush_rq;
2706
2707 if (blk_mq_hw_queue_mapped(hctx))
2708 blk_mq_tag_idle(hctx);
2709
2710 blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx],
2711 set->queue_depth, flush_rq);
2712 if (set->ops->exit_request)
2713 set->ops->exit_request(set, flush_rq, hctx_idx);
2714
2715 if (set->ops->exit_hctx)
2716 set->ops->exit_hctx(hctx, hctx_idx);
2717
2718 blk_mq_remove_cpuhp(hctx);
2719
2720 spin_lock(&q->unused_hctx_lock);
2721 list_add(&hctx->hctx_list, &q->unused_hctx_list);
2722 spin_unlock(&q->unused_hctx_lock);
2723 }
2724
blk_mq_exit_hw_queues(struct request_queue * q,struct blk_mq_tag_set * set,int nr_queue)2725 static void blk_mq_exit_hw_queues(struct request_queue *q,
2726 struct blk_mq_tag_set *set, int nr_queue)
2727 {
2728 struct blk_mq_hw_ctx *hctx;
2729 unsigned int i;
2730
2731 queue_for_each_hw_ctx(q, hctx, i) {
2732 if (i == nr_queue)
2733 break;
2734 blk_mq_debugfs_unregister_hctx(hctx);
2735 blk_mq_exit_hctx(q, set, hctx, i);
2736 }
2737 }
2738
blk_mq_hw_ctx_size(struct blk_mq_tag_set * tag_set)2739 static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
2740 {
2741 int hw_ctx_size = sizeof(struct blk_mq_hw_ctx);
2742
2743 BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, srcu),
2744 __alignof__(struct blk_mq_hw_ctx)) !=
2745 sizeof(struct blk_mq_hw_ctx));
2746
2747 if (tag_set->flags & BLK_MQ_F_BLOCKING)
2748 hw_ctx_size += sizeof(struct srcu_struct);
2749
2750 return hw_ctx_size;
2751 }
2752
blk_mq_init_hctx(struct request_queue * q,struct blk_mq_tag_set * set,struct blk_mq_hw_ctx * hctx,unsigned hctx_idx)2753 static int blk_mq_init_hctx(struct request_queue *q,
2754 struct blk_mq_tag_set *set,
2755 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
2756 {
2757 hctx->queue_num = hctx_idx;
2758
2759 if (!(hctx->flags & BLK_MQ_F_STACKING))
2760 cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
2761 &hctx->cpuhp_online);
2762 cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
2763
2764 hctx->tags = set->tags[hctx_idx];
2765
2766 if (set->ops->init_hctx &&
2767 set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
2768 goto unregister_cpu_notifier;
2769
2770 if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx,
2771 hctx->numa_node))
2772 goto exit_hctx;
2773 return 0;
2774
2775 exit_hctx:
2776 if (set->ops->exit_hctx)
2777 set->ops->exit_hctx(hctx, hctx_idx);
2778 unregister_cpu_notifier:
2779 blk_mq_remove_cpuhp(hctx);
2780 return -1;
2781 }
2782
2783 static struct blk_mq_hw_ctx *
blk_mq_alloc_hctx(struct request_queue * q,struct blk_mq_tag_set * set,int node)2784 blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
2785 int node)
2786 {
2787 struct blk_mq_hw_ctx *hctx;
2788 gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY;
2789
2790 hctx = kzalloc_node(blk_mq_hw_ctx_size(set), gfp, node);
2791 if (!hctx)
2792 goto fail_alloc_hctx;
2793
2794 if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node))
2795 goto free_hctx;
2796
2797 atomic_set(&hctx->nr_active, 0);
2798 if (node == NUMA_NO_NODE)
2799 node = set->numa_node;
2800 hctx->numa_node = node;
2801
2802 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
2803 spin_lock_init(&hctx->lock);
2804 INIT_LIST_HEAD(&hctx->dispatch);
2805 hctx->queue = q;
2806 hctx->flags = set->flags & ~BLK_MQ_F_TAG_QUEUE_SHARED;
2807
2808 INIT_LIST_HEAD(&hctx->hctx_list);
2809
2810 /*
2811 * Allocate space for all possible cpus to avoid allocation at
2812 * runtime
2813 */
2814 hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
2815 gfp, node);
2816 if (!hctx->ctxs)
2817 goto free_cpumask;
2818
2819 if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8),
2820 gfp, node, false, false))
2821 goto free_ctxs;
2822 hctx->nr_ctx = 0;
2823
2824 spin_lock_init(&hctx->dispatch_wait_lock);
2825 init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
2826 INIT_LIST_HEAD(&hctx->dispatch_wait.entry);
2827
2828 hctx->fq = blk_alloc_flush_queue(hctx->numa_node, set->cmd_size, gfp);
2829 if (!hctx->fq)
2830 goto free_bitmap;
2831
2832 if (hctx->flags & BLK_MQ_F_BLOCKING)
2833 init_srcu_struct(hctx->srcu);
2834 blk_mq_hctx_kobj_init(hctx);
2835
2836 return hctx;
2837
2838 free_bitmap:
2839 sbitmap_free(&hctx->ctx_map);
2840 free_ctxs:
2841 kfree(hctx->ctxs);
2842 free_cpumask:
2843 free_cpumask_var(hctx->cpumask);
2844 free_hctx:
2845 kfree(hctx);
2846 fail_alloc_hctx:
2847 return NULL;
2848 }
2849
blk_mq_init_cpu_queues(struct request_queue * q,unsigned int nr_hw_queues)2850 static void blk_mq_init_cpu_queues(struct request_queue *q,
2851 unsigned int nr_hw_queues)
2852 {
2853 struct blk_mq_tag_set *set = q->tag_set;
2854 unsigned int i, j;
2855
2856 for_each_possible_cpu(i) {
2857 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
2858 struct blk_mq_hw_ctx *hctx;
2859 int k;
2860
2861 __ctx->cpu = i;
2862 spin_lock_init(&__ctx->lock);
2863 for (k = HCTX_TYPE_DEFAULT; k < HCTX_MAX_TYPES; k++)
2864 INIT_LIST_HEAD(&__ctx->rq_lists[k]);
2865
2866 __ctx->queue = q;
2867
2868 /*
2869 * Set local node, IFF we have more than one hw queue. If
2870 * not, we remain on the home node of the device
2871 */
2872 for (j = 0; j < set->nr_maps; j++) {
2873 hctx = blk_mq_map_queue_type(q, j, i);
2874 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
2875 hctx->numa_node = cpu_to_node(i);
2876 }
2877 }
2878 }
2879
__blk_mq_alloc_map_and_request(struct blk_mq_tag_set * set,int hctx_idx)2880 static bool __blk_mq_alloc_map_and_request(struct blk_mq_tag_set *set,
2881 int hctx_idx)
2882 {
2883 unsigned int flags = set->flags;
2884 int ret = 0;
2885
2886 set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx,
2887 set->queue_depth, set->reserved_tags, flags);
2888 if (!set->tags[hctx_idx])
2889 return false;
2890
2891 ret = blk_mq_alloc_rqs(set, set->tags[hctx_idx], hctx_idx,
2892 set->queue_depth);
2893 if (!ret)
2894 return true;
2895
2896 blk_mq_free_rq_map(set->tags[hctx_idx], flags);
2897 set->tags[hctx_idx] = NULL;
2898 return false;
2899 }
2900
blk_mq_free_map_and_requests(struct blk_mq_tag_set * set,unsigned int hctx_idx)2901 static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
2902 unsigned int hctx_idx)
2903 {
2904 unsigned int flags = set->flags;
2905
2906 if (set->tags && set->tags[hctx_idx]) {
2907 blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx);
2908 blk_mq_free_rq_map(set->tags[hctx_idx], flags);
2909 set->tags[hctx_idx] = NULL;
2910 }
2911 }
2912
blk_mq_map_swqueue(struct request_queue * q)2913 static void blk_mq_map_swqueue(struct request_queue *q)
2914 {
2915 unsigned int i, j, hctx_idx;
2916 struct blk_mq_hw_ctx *hctx;
2917 struct blk_mq_ctx *ctx;
2918 struct blk_mq_tag_set *set = q->tag_set;
2919
2920 queue_for_each_hw_ctx(q, hctx, i) {
2921 cpumask_clear(hctx->cpumask);
2922 hctx->nr_ctx = 0;
2923 hctx->dispatch_from = NULL;
2924 }
2925
2926 /*
2927 * Map software to hardware queues.
2928 *
2929 * If the cpu isn't present, the cpu is mapped to first hctx.
2930 */
2931 for_each_possible_cpu(i) {
2932
2933 ctx = per_cpu_ptr(q->queue_ctx, i);
2934 for (j = 0; j < set->nr_maps; j++) {
2935 if (!set->map[j].nr_queues) {
2936 ctx->hctxs[j] = blk_mq_map_queue_type(q,
2937 HCTX_TYPE_DEFAULT, i);
2938 continue;
2939 }
2940 hctx_idx = set->map[j].mq_map[i];
2941 /* unmapped hw queue can be remapped after CPU topo changed */
2942 if (!set->tags[hctx_idx] &&
2943 !__blk_mq_alloc_map_and_request(set, hctx_idx)) {
2944 /*
2945 * If tags initialization fail for some hctx,
2946 * that hctx won't be brought online. In this
2947 * case, remap the current ctx to hctx[0] which
2948 * is guaranteed to always have tags allocated
2949 */
2950 set->map[j].mq_map[i] = 0;
2951 }
2952
2953 hctx = blk_mq_map_queue_type(q, j, i);
2954 ctx->hctxs[j] = hctx;
2955 /*
2956 * If the CPU is already set in the mask, then we've
2957 * mapped this one already. This can happen if
2958 * devices share queues across queue maps.
2959 */
2960 if (cpumask_test_cpu(i, hctx->cpumask))
2961 continue;
2962
2963 cpumask_set_cpu(i, hctx->cpumask);
2964 hctx->type = j;
2965 ctx->index_hw[hctx->type] = hctx->nr_ctx;
2966 hctx->ctxs[hctx->nr_ctx++] = ctx;
2967
2968 /*
2969 * If the nr_ctx type overflows, we have exceeded the
2970 * amount of sw queues we can support.
2971 */
2972 BUG_ON(!hctx->nr_ctx);
2973 }
2974
2975 for (; j < HCTX_MAX_TYPES; j++)
2976 ctx->hctxs[j] = blk_mq_map_queue_type(q,
2977 HCTX_TYPE_DEFAULT, i);
2978 }
2979
2980 queue_for_each_hw_ctx(q, hctx, i) {
2981 /*
2982 * If no software queues are mapped to this hardware queue,
2983 * disable it and free the request entries.
2984 */
2985 if (!hctx->nr_ctx) {
2986 /* Never unmap queue 0. We need it as a
2987 * fallback in case of a new remap fails
2988 * allocation
2989 */
2990 if (i && set->tags[i])
2991 blk_mq_free_map_and_requests(set, i);
2992
2993 hctx->tags = NULL;
2994 continue;
2995 }
2996
2997 hctx->tags = set->tags[i];
2998 WARN_ON(!hctx->tags);
2999
3000 /*
3001 * Set the map size to the number of mapped software queues.
3002 * This is more accurate and more efficient than looping
3003 * over all possibly mapped software queues.
3004 */
3005 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
3006
3007 /*
3008 * Initialize batch roundrobin counts
3009 */
3010 hctx->next_cpu = blk_mq_first_mapped_cpu(hctx);
3011 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
3012 }
3013 }
3014
3015 /*
3016 * Caller needs to ensure that we're either frozen/quiesced, or that
3017 * the queue isn't live yet.
3018 */
queue_set_hctx_shared(struct request_queue * q,bool shared)3019 static void queue_set_hctx_shared(struct request_queue *q, bool shared)
3020 {
3021 struct blk_mq_hw_ctx *hctx;
3022 int i;
3023
3024 queue_for_each_hw_ctx(q, hctx, i) {
3025 if (shared) {
3026 hctx->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
3027 } else {
3028 blk_mq_tag_idle(hctx);
3029 hctx->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
3030 }
3031 }
3032 }
3033
blk_mq_update_tag_set_shared(struct blk_mq_tag_set * set,bool shared)3034 static void blk_mq_update_tag_set_shared(struct blk_mq_tag_set *set,
3035 bool shared)
3036 {
3037 struct request_queue *q;
3038
3039 lockdep_assert_held(&set->tag_list_lock);
3040
3041 list_for_each_entry(q, &set->tag_list, tag_set_list) {
3042 blk_mq_freeze_queue(q);
3043 queue_set_hctx_shared(q, shared);
3044 blk_mq_unfreeze_queue(q);
3045 }
3046 }
3047
blk_mq_del_queue_tag_set(struct request_queue * q)3048 static void blk_mq_del_queue_tag_set(struct request_queue *q)
3049 {
3050 struct blk_mq_tag_set *set = q->tag_set;
3051
3052 mutex_lock(&set->tag_list_lock);
3053 list_del(&q->tag_set_list);
3054 if (list_is_singular(&set->tag_list)) {
3055 /* just transitioned to unshared */
3056 set->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
3057 /* update existing queue */
3058 blk_mq_update_tag_set_shared(set, false);
3059 }
3060 mutex_unlock(&set->tag_list_lock);
3061 INIT_LIST_HEAD(&q->tag_set_list);
3062 }
3063
blk_mq_add_queue_tag_set(struct blk_mq_tag_set * set,struct request_queue * q)3064 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
3065 struct request_queue *q)
3066 {
3067 mutex_lock(&set->tag_list_lock);
3068
3069 /*
3070 * Check to see if we're transitioning to shared (from 1 to 2 queues).
3071 */
3072 if (!list_empty(&set->tag_list) &&
3073 !(set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
3074 set->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
3075 /* update existing queue */
3076 blk_mq_update_tag_set_shared(set, true);
3077 }
3078 if (set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
3079 queue_set_hctx_shared(q, true);
3080 list_add_tail(&q->tag_set_list, &set->tag_list);
3081
3082 mutex_unlock(&set->tag_list_lock);
3083 }
3084
3085 /* All allocations will be freed in release handler of q->mq_kobj */
blk_mq_alloc_ctxs(struct request_queue * q)3086 static int blk_mq_alloc_ctxs(struct request_queue *q)
3087 {
3088 struct blk_mq_ctxs *ctxs;
3089 int cpu;
3090
3091 ctxs = kzalloc(sizeof(*ctxs), GFP_KERNEL);
3092 if (!ctxs)
3093 return -ENOMEM;
3094
3095 ctxs->queue_ctx = alloc_percpu(struct blk_mq_ctx);
3096 if (!ctxs->queue_ctx)
3097 goto fail;
3098
3099 for_each_possible_cpu(cpu) {
3100 struct blk_mq_ctx *ctx = per_cpu_ptr(ctxs->queue_ctx, cpu);
3101 ctx->ctxs = ctxs;
3102 }
3103
3104 q->mq_kobj = &ctxs->kobj;
3105 q->queue_ctx = ctxs->queue_ctx;
3106
3107 return 0;
3108 fail:
3109 kfree(ctxs);
3110 return -ENOMEM;
3111 }
3112
3113 /*
3114 * It is the actual release handler for mq, but we do it from
3115 * request queue's release handler for avoiding use-after-free
3116 * and headache because q->mq_kobj shouldn't have been introduced,
3117 * but we can't group ctx/kctx kobj without it.
3118 */
blk_mq_release(struct request_queue * q)3119 void blk_mq_release(struct request_queue *q)
3120 {
3121 struct blk_mq_hw_ctx *hctx, *next;
3122 int i;
3123
3124 queue_for_each_hw_ctx(q, hctx, i)
3125 WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list));
3126
3127 /* all hctx are in .unused_hctx_list now */
3128 list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) {
3129 list_del_init(&hctx->hctx_list);
3130 kobject_put(&hctx->kobj);
3131 }
3132
3133 kfree(q->queue_hw_ctx);
3134
3135 /*
3136 * release .mq_kobj and sw queue's kobject now because
3137 * both share lifetime with request queue.
3138 */
3139 blk_mq_sysfs_deinit(q);
3140 }
3141
blk_mq_init_queue_data(struct blk_mq_tag_set * set,void * queuedata)3142 static struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
3143 void *queuedata)
3144 {
3145 struct request_queue *q;
3146 int ret;
3147
3148 q = blk_alloc_queue(set->numa_node);
3149 if (!q)
3150 return ERR_PTR(-ENOMEM);
3151 q->queuedata = queuedata;
3152 ret = blk_mq_init_allocated_queue(set, q);
3153 if (ret) {
3154 blk_cleanup_queue(q);
3155 return ERR_PTR(ret);
3156 }
3157 return q;
3158 }
3159
blk_mq_init_queue(struct blk_mq_tag_set * set)3160 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
3161 {
3162 return blk_mq_init_queue_data(set, NULL);
3163 }
3164 EXPORT_SYMBOL(blk_mq_init_queue);
3165
__blk_mq_alloc_disk(struct blk_mq_tag_set * set,void * queuedata,struct lock_class_key * lkclass)3166 struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
3167 struct lock_class_key *lkclass)
3168 {
3169 struct request_queue *q;
3170 struct gendisk *disk;
3171
3172 q = blk_mq_init_queue_data(set, queuedata);
3173 if (IS_ERR(q))
3174 return ERR_CAST(q);
3175
3176 disk = __alloc_disk_node(q, set->numa_node, lkclass);
3177 if (!disk) {
3178 blk_cleanup_queue(q);
3179 return ERR_PTR(-ENOMEM);
3180 }
3181 return disk;
3182 }
3183 EXPORT_SYMBOL(__blk_mq_alloc_disk);
3184
blk_mq_alloc_and_init_hctx(struct blk_mq_tag_set * set,struct request_queue * q,int hctx_idx,int node)3185 static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
3186 struct blk_mq_tag_set *set, struct request_queue *q,
3187 int hctx_idx, int node)
3188 {
3189 struct blk_mq_hw_ctx *hctx = NULL, *tmp;
3190
3191 /* reuse dead hctx first */
3192 spin_lock(&q->unused_hctx_lock);
3193 list_for_each_entry(tmp, &q->unused_hctx_list, hctx_list) {
3194 if (tmp->numa_node == node) {
3195 hctx = tmp;
3196 break;
3197 }
3198 }
3199 if (hctx)
3200 list_del_init(&hctx->hctx_list);
3201 spin_unlock(&q->unused_hctx_lock);
3202
3203 if (!hctx)
3204 hctx = blk_mq_alloc_hctx(q, set, node);
3205 if (!hctx)
3206 goto fail;
3207
3208 if (blk_mq_init_hctx(q, set, hctx, hctx_idx))
3209 goto free_hctx;
3210
3211 return hctx;
3212
3213 free_hctx:
3214 kobject_put(&hctx->kobj);
3215 fail:
3216 return NULL;
3217 }
3218
blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set * set,struct request_queue * q)3219 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
3220 struct request_queue *q)
3221 {
3222 int i, j, end;
3223 struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
3224
3225 if (q->nr_hw_queues < set->nr_hw_queues) {
3226 struct blk_mq_hw_ctx **new_hctxs;
3227
3228 new_hctxs = kcalloc_node(set->nr_hw_queues,
3229 sizeof(*new_hctxs), GFP_KERNEL,
3230 set->numa_node);
3231 if (!new_hctxs)
3232 return;
3233 if (hctxs)
3234 memcpy(new_hctxs, hctxs, q->nr_hw_queues *
3235 sizeof(*hctxs));
3236 q->queue_hw_ctx = new_hctxs;
3237 kfree(hctxs);
3238 hctxs = new_hctxs;
3239 }
3240
3241 /* protect against switching io scheduler */
3242 mutex_lock(&q->sysfs_lock);
3243 for (i = 0; i < set->nr_hw_queues; i++) {
3244 int node;
3245 struct blk_mq_hw_ctx *hctx;
3246
3247 node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], i);
3248 /*
3249 * If the hw queue has been mapped to another numa node,
3250 * we need to realloc the hctx. If allocation fails, fallback
3251 * to use the previous one.
3252 */
3253 if (hctxs[i] && (hctxs[i]->numa_node == node))
3254 continue;
3255
3256 hctx = blk_mq_alloc_and_init_hctx(set, q, i, node);
3257 if (hctx) {
3258 if (hctxs[i])
3259 blk_mq_exit_hctx(q, set, hctxs[i], i);
3260 hctxs[i] = hctx;
3261 } else {
3262 if (hctxs[i])
3263 pr_warn("Allocate new hctx on node %d fails,\
3264 fallback to previous one on node %d\n",
3265 node, hctxs[i]->numa_node);
3266 else
3267 break;
3268 }
3269 }
3270 /*
3271 * Increasing nr_hw_queues fails. Free the newly allocated
3272 * hctxs and keep the previous q->nr_hw_queues.
3273 */
3274 if (i != set->nr_hw_queues) {
3275 j = q->nr_hw_queues;
3276 end = i;
3277 } else {
3278 j = i;
3279 end = q->nr_hw_queues;
3280 q->nr_hw_queues = set->nr_hw_queues;
3281 }
3282
3283 for (; j < end; j++) {
3284 struct blk_mq_hw_ctx *hctx = hctxs[j];
3285
3286 if (hctx) {
3287 if (hctx->tags)
3288 blk_mq_free_map_and_requests(set, j);
3289 blk_mq_exit_hctx(q, set, hctx, j);
3290 hctxs[j] = NULL;
3291 }
3292 }
3293 mutex_unlock(&q->sysfs_lock);
3294 }
3295
blk_mq_init_allocated_queue(struct blk_mq_tag_set * set,struct request_queue * q)3296 int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
3297 struct request_queue *q)
3298 {
3299 /* mark the queue as mq asap */
3300 q->mq_ops = set->ops;
3301
3302 q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn,
3303 blk_mq_poll_stats_bkt,
3304 BLK_MQ_POLL_STATS_BKTS, q);
3305 if (!q->poll_cb)
3306 goto err_exit;
3307
3308 if (blk_mq_alloc_ctxs(q))
3309 goto err_poll;
3310
3311 /* init q->mq_kobj and sw queues' kobjects */
3312 blk_mq_sysfs_init(q);
3313
3314 INIT_LIST_HEAD(&q->unused_hctx_list);
3315 spin_lock_init(&q->unused_hctx_lock);
3316
3317 blk_mq_realloc_hw_ctxs(set, q);
3318 if (!q->nr_hw_queues)
3319 goto err_hctxs;
3320
3321 INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
3322 blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
3323
3324 q->tag_set = set;
3325
3326 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
3327 if (set->nr_maps > HCTX_TYPE_POLL &&
3328 set->map[HCTX_TYPE_POLL].nr_queues)
3329 blk_queue_flag_set(QUEUE_FLAG_POLL, q);
3330
3331 INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
3332 INIT_LIST_HEAD(&q->requeue_list);
3333 spin_lock_init(&q->requeue_lock);
3334
3335 q->nr_requests = set->queue_depth;
3336
3337 /*
3338 * Default to classic polling
3339 */
3340 q->poll_nsec = BLK_MQ_POLL_CLASSIC;
3341
3342 blk_mq_init_cpu_queues(q, set->nr_hw_queues);
3343 blk_mq_add_queue_tag_set(set, q);
3344 blk_mq_map_swqueue(q);
3345 return 0;
3346
3347 err_hctxs:
3348 kfree(q->queue_hw_ctx);
3349 q->nr_hw_queues = 0;
3350 blk_mq_sysfs_deinit(q);
3351 err_poll:
3352 blk_stat_free_callback(q->poll_cb);
3353 q->poll_cb = NULL;
3354 err_exit:
3355 q->mq_ops = NULL;
3356 return -ENOMEM;
3357 }
3358 EXPORT_SYMBOL(blk_mq_init_allocated_queue);
3359
3360 /* tags can _not_ be used after returning from blk_mq_exit_queue */
blk_mq_exit_queue(struct request_queue * q)3361 void blk_mq_exit_queue(struct request_queue *q)
3362 {
3363 struct blk_mq_tag_set *set = q->tag_set;
3364
3365 /* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */
3366 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
3367 /* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */
3368 blk_mq_del_queue_tag_set(q);
3369 }
3370
__blk_mq_alloc_rq_maps(struct blk_mq_tag_set * set)3371 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
3372 {
3373 int i;
3374
3375 for (i = 0; i < set->nr_hw_queues; i++) {
3376 if (!__blk_mq_alloc_map_and_request(set, i))
3377 goto out_unwind;
3378 cond_resched();
3379 }
3380
3381 return 0;
3382
3383 out_unwind:
3384 while (--i >= 0)
3385 blk_mq_free_map_and_requests(set, i);
3386
3387 return -ENOMEM;
3388 }
3389
3390 /*
3391 * Allocate the request maps associated with this tag_set. Note that this
3392 * may reduce the depth asked for, if memory is tight. set->queue_depth
3393 * will be updated to reflect the allocated depth.
3394 */
blk_mq_alloc_map_and_requests(struct blk_mq_tag_set * set)3395 static int blk_mq_alloc_map_and_requests(struct blk_mq_tag_set *set)
3396 {
3397 unsigned int depth;
3398 int err;
3399
3400 depth = set->queue_depth;
3401 do {
3402 err = __blk_mq_alloc_rq_maps(set);
3403 if (!err)
3404 break;
3405
3406 set->queue_depth >>= 1;
3407 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
3408 err = -ENOMEM;
3409 break;
3410 }
3411 } while (set->queue_depth);
3412
3413 if (!set->queue_depth || err) {
3414 pr_err("blk-mq: failed to allocate request map\n");
3415 return -ENOMEM;
3416 }
3417
3418 if (depth != set->queue_depth)
3419 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
3420 depth, set->queue_depth);
3421
3422 return 0;
3423 }
3424
blk_mq_update_queue_map(struct blk_mq_tag_set * set)3425 static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
3426 {
3427 /*
3428 * blk_mq_map_queues() and multiple .map_queues() implementations
3429 * expect that set->map[HCTX_TYPE_DEFAULT].nr_queues is set to the
3430 * number of hardware queues.
3431 */
3432 if (set->nr_maps == 1)
3433 set->map[HCTX_TYPE_DEFAULT].nr_queues = set->nr_hw_queues;
3434
3435 if (set->ops->map_queues && !is_kdump_kernel()) {
3436 int i;
3437
3438 /*
3439 * transport .map_queues is usually done in the following
3440 * way:
3441 *
3442 * for (queue = 0; queue < set->nr_hw_queues; queue++) {
3443 * mask = get_cpu_mask(queue)
3444 * for_each_cpu(cpu, mask)
3445 * set->map[x].mq_map[cpu] = queue;
3446 * }
3447 *
3448 * When we need to remap, the table has to be cleared for
3449 * killing stale mapping since one CPU may not be mapped
3450 * to any hw queue.
3451 */
3452 for (i = 0; i < set->nr_maps; i++)
3453 blk_mq_clear_mq_map(&set->map[i]);
3454
3455 return set->ops->map_queues(set);
3456 } else {
3457 BUG_ON(set->nr_maps > 1);
3458 return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
3459 }
3460 }
3461
blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set * set,int cur_nr_hw_queues,int new_nr_hw_queues)3462 static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set,
3463 int cur_nr_hw_queues, int new_nr_hw_queues)
3464 {
3465 struct blk_mq_tags **new_tags;
3466
3467 if (cur_nr_hw_queues >= new_nr_hw_queues)
3468 return 0;
3469
3470 new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *),
3471 GFP_KERNEL, set->numa_node);
3472 if (!new_tags)
3473 return -ENOMEM;
3474
3475 if (set->tags)
3476 memcpy(new_tags, set->tags, cur_nr_hw_queues *
3477 sizeof(*set->tags));
3478 kfree(set->tags);
3479 set->tags = new_tags;
3480 set->nr_hw_queues = new_nr_hw_queues;
3481
3482 return 0;
3483 }
3484
blk_mq_alloc_tag_set_tags(struct blk_mq_tag_set * set,int new_nr_hw_queues)3485 static int blk_mq_alloc_tag_set_tags(struct blk_mq_tag_set *set,
3486 int new_nr_hw_queues)
3487 {
3488 return blk_mq_realloc_tag_set_tags(set, 0, new_nr_hw_queues);
3489 }
3490
3491 /*
3492 * Alloc a tag set to be associated with one or more request queues.
3493 * May fail with EINVAL for various error conditions. May adjust the
3494 * requested depth down, if it's too large. In that case, the set
3495 * value will be stored in set->queue_depth.
3496 */
blk_mq_alloc_tag_set(struct blk_mq_tag_set * set)3497 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
3498 {
3499 int i, ret;
3500
3501 BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
3502
3503 if (!set->nr_hw_queues)
3504 return -EINVAL;
3505 if (!set->queue_depth)
3506 return -EINVAL;
3507 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
3508 return -EINVAL;
3509
3510 if (!set->ops->queue_rq)
3511 return -EINVAL;
3512
3513 if (!set->ops->get_budget ^ !set->ops->put_budget)
3514 return -EINVAL;
3515
3516 if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
3517 pr_info("blk-mq: reduced tag depth to %u\n",
3518 BLK_MQ_MAX_DEPTH);
3519 set->queue_depth = BLK_MQ_MAX_DEPTH;
3520 }
3521
3522 if (!set->nr_maps)
3523 set->nr_maps = 1;
3524 else if (set->nr_maps > HCTX_MAX_TYPES)
3525 return -EINVAL;
3526
3527 /*
3528 * If a crashdump is active, then we are potentially in a very
3529 * memory constrained environment. Limit us to 1 queue and
3530 * 64 tags to prevent using too much memory.
3531 */
3532 if (is_kdump_kernel()) {
3533 set->nr_hw_queues = 1;
3534 set->nr_maps = 1;
3535 set->queue_depth = min(64U, set->queue_depth);
3536 }
3537 /*
3538 * There is no use for more h/w queues than cpus if we just have
3539 * a single map
3540 */
3541 if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids)
3542 set->nr_hw_queues = nr_cpu_ids;
3543
3544 if (blk_mq_alloc_tag_set_tags(set, set->nr_hw_queues) < 0)
3545 return -ENOMEM;
3546
3547 ret = -ENOMEM;
3548 for (i = 0; i < set->nr_maps; i++) {
3549 set->map[i].mq_map = kcalloc_node(nr_cpu_ids,
3550 sizeof(set->map[i].mq_map[0]),
3551 GFP_KERNEL, set->numa_node);
3552 if (!set->map[i].mq_map)
3553 goto out_free_mq_map;
3554 set->map[i].nr_queues = is_kdump_kernel() ? 1 : set->nr_hw_queues;
3555 }
3556
3557 ret = blk_mq_update_queue_map(set);
3558 if (ret)
3559 goto out_free_mq_map;
3560
3561 ret = blk_mq_alloc_map_and_requests(set);
3562 if (ret)
3563 goto out_free_mq_map;
3564
3565 if (blk_mq_is_sbitmap_shared(set->flags)) {
3566 atomic_set(&set->active_queues_shared_sbitmap, 0);
3567
3568 if (blk_mq_init_shared_sbitmap(set)) {
3569 ret = -ENOMEM;
3570 goto out_free_mq_rq_maps;
3571 }
3572 }
3573
3574 mutex_init(&set->tag_list_lock);
3575 INIT_LIST_HEAD(&set->tag_list);
3576
3577 return 0;
3578
3579 out_free_mq_rq_maps:
3580 for (i = 0; i < set->nr_hw_queues; i++)
3581 blk_mq_free_map_and_requests(set, i);
3582 out_free_mq_map:
3583 for (i = 0; i < set->nr_maps; i++) {
3584 kfree(set->map[i].mq_map);
3585 set->map[i].mq_map = NULL;
3586 }
3587 kfree(set->tags);
3588 set->tags = NULL;
3589 return ret;
3590 }
3591 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
3592
3593 /* allocate and initialize a tagset for a simple single-queue device */
blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set * set,const struct blk_mq_ops * ops,unsigned int queue_depth,unsigned int set_flags)3594 int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
3595 const struct blk_mq_ops *ops, unsigned int queue_depth,
3596 unsigned int set_flags)
3597 {
3598 memset(set, 0, sizeof(*set));
3599 set->ops = ops;
3600 set->nr_hw_queues = 1;
3601 set->nr_maps = 1;
3602 set->queue_depth = queue_depth;
3603 set->numa_node = NUMA_NO_NODE;
3604 set->flags = set_flags;
3605 return blk_mq_alloc_tag_set(set);
3606 }
3607 EXPORT_SYMBOL_GPL(blk_mq_alloc_sq_tag_set);
3608
blk_mq_free_tag_set(struct blk_mq_tag_set * set)3609 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
3610 {
3611 int i, j;
3612
3613 for (i = 0; i < set->nr_hw_queues; i++)
3614 blk_mq_free_map_and_requests(set, i);
3615
3616 if (blk_mq_is_sbitmap_shared(set->flags))
3617 blk_mq_exit_shared_sbitmap(set);
3618
3619 for (j = 0; j < set->nr_maps; j++) {
3620 kfree(set->map[j].mq_map);
3621 set->map[j].mq_map = NULL;
3622 }
3623
3624 kfree(set->tags);
3625 set->tags = NULL;
3626 }
3627 EXPORT_SYMBOL(blk_mq_free_tag_set);
3628
blk_mq_update_nr_requests(struct request_queue * q,unsigned int nr)3629 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
3630 {
3631 struct blk_mq_tag_set *set = q->tag_set;
3632 struct blk_mq_hw_ctx *hctx;
3633 int i, ret;
3634
3635 if (!set)
3636 return -EINVAL;
3637
3638 if (q->nr_requests == nr)
3639 return 0;
3640
3641 blk_mq_freeze_queue(q);
3642 blk_mq_quiesce_queue(q);
3643
3644 ret = 0;
3645 queue_for_each_hw_ctx(q, hctx, i) {
3646 if (!hctx->tags)
3647 continue;
3648 /*
3649 * If we're using an MQ scheduler, just update the scheduler
3650 * queue depth. This is similar to what the old code would do.
3651 */
3652 if (!hctx->sched_tags) {
3653 ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr,
3654 false);
3655 if (!ret && blk_mq_is_sbitmap_shared(set->flags))
3656 blk_mq_tag_resize_shared_sbitmap(set, nr);
3657 } else {
3658 ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
3659 nr, true);
3660 if (blk_mq_is_sbitmap_shared(set->flags)) {
3661 hctx->sched_tags->bitmap_tags =
3662 &q->sched_bitmap_tags;
3663 hctx->sched_tags->breserved_tags =
3664 &q->sched_breserved_tags;
3665 }
3666 }
3667 if (ret)
3668 break;
3669 if (q->elevator && q->elevator->type->ops.depth_updated)
3670 q->elevator->type->ops.depth_updated(hctx);
3671 }
3672 if (!ret) {
3673 q->nr_requests = nr;
3674 if (q->elevator && blk_mq_is_sbitmap_shared(set->flags))
3675 sbitmap_queue_resize(&q->sched_bitmap_tags,
3676 nr - set->reserved_tags);
3677 }
3678
3679 blk_mq_unquiesce_queue(q);
3680 blk_mq_unfreeze_queue(q);
3681
3682 return ret;
3683 }
3684
3685 /*
3686 * request_queue and elevator_type pair.
3687 * It is just used by __blk_mq_update_nr_hw_queues to cache
3688 * the elevator_type associated with a request_queue.
3689 */
3690 struct blk_mq_qe_pair {
3691 struct list_head node;
3692 struct request_queue *q;
3693 struct elevator_type *type;
3694 };
3695
3696 /*
3697 * Cache the elevator_type in qe pair list and switch the
3698 * io scheduler to 'none'
3699 */
blk_mq_elv_switch_none(struct list_head * head,struct request_queue * q)3700 static bool blk_mq_elv_switch_none(struct list_head *head,
3701 struct request_queue *q)
3702 {
3703 struct blk_mq_qe_pair *qe;
3704
3705 if (!q->elevator)
3706 return true;
3707
3708 qe = kmalloc(sizeof(*qe), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
3709 if (!qe)
3710 return false;
3711
3712 INIT_LIST_HEAD(&qe->node);
3713 qe->q = q;
3714 qe->type = q->elevator->type;
3715 list_add(&qe->node, head);
3716
3717 mutex_lock(&q->sysfs_lock);
3718 /*
3719 * After elevator_switch_mq, the previous elevator_queue will be
3720 * released by elevator_release. The reference of the io scheduler
3721 * module get by elevator_get will also be put. So we need to get
3722 * a reference of the io scheduler module here to prevent it to be
3723 * removed.
3724 */
3725 __module_get(qe->type->elevator_owner);
3726 elevator_switch_mq(q, NULL);
3727 mutex_unlock(&q->sysfs_lock);
3728
3729 return true;
3730 }
3731
blk_mq_elv_switch_back(struct list_head * head,struct request_queue * q)3732 static void blk_mq_elv_switch_back(struct list_head *head,
3733 struct request_queue *q)
3734 {
3735 struct blk_mq_qe_pair *qe;
3736 struct elevator_type *t = NULL;
3737
3738 list_for_each_entry(qe, head, node)
3739 if (qe->q == q) {
3740 t = qe->type;
3741 break;
3742 }
3743
3744 if (!t)
3745 return;
3746
3747 list_del(&qe->node);
3748 kfree(qe);
3749
3750 mutex_lock(&q->sysfs_lock);
3751 elevator_switch_mq(q, t);
3752 mutex_unlock(&q->sysfs_lock);
3753 }
3754
__blk_mq_update_nr_hw_queues(struct blk_mq_tag_set * set,int nr_hw_queues)3755 static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
3756 int nr_hw_queues)
3757 {
3758 struct request_queue *q;
3759 LIST_HEAD(head);
3760 int prev_nr_hw_queues;
3761
3762 lockdep_assert_held(&set->tag_list_lock);
3763
3764 if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids)
3765 nr_hw_queues = nr_cpu_ids;
3766 if (nr_hw_queues < 1)
3767 return;
3768 if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues)
3769 return;
3770
3771 list_for_each_entry(q, &set->tag_list, tag_set_list)
3772 blk_mq_freeze_queue(q);
3773 /*
3774 * Switch IO scheduler to 'none', cleaning up the data associated
3775 * with the previous scheduler. We will switch back once we are done
3776 * updating the new sw to hw queue mappings.
3777 */
3778 list_for_each_entry(q, &set->tag_list, tag_set_list)
3779 if (!blk_mq_elv_switch_none(&head, q))
3780 goto switch_back;
3781
3782 list_for_each_entry(q, &set->tag_list, tag_set_list) {
3783 blk_mq_debugfs_unregister_hctxs(q);
3784 blk_mq_sysfs_unregister(q);
3785 }
3786
3787 prev_nr_hw_queues = set->nr_hw_queues;
3788 if (blk_mq_realloc_tag_set_tags(set, set->nr_hw_queues, nr_hw_queues) <
3789 0)
3790 goto reregister;
3791
3792 set->nr_hw_queues = nr_hw_queues;
3793 fallback:
3794 blk_mq_update_queue_map(set);
3795 list_for_each_entry(q, &set->tag_list, tag_set_list) {
3796 blk_mq_realloc_hw_ctxs(set, q);
3797 if (q->nr_hw_queues != set->nr_hw_queues) {
3798 pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n",
3799 nr_hw_queues, prev_nr_hw_queues);
3800 set->nr_hw_queues = prev_nr_hw_queues;
3801 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
3802 goto fallback;
3803 }
3804 blk_mq_map_swqueue(q);
3805 }
3806
3807 reregister:
3808 list_for_each_entry(q, &set->tag_list, tag_set_list) {
3809 blk_mq_sysfs_register(q);
3810 blk_mq_debugfs_register_hctxs(q);
3811 }
3812
3813 switch_back:
3814 list_for_each_entry(q, &set->tag_list, tag_set_list)
3815 blk_mq_elv_switch_back(&head, q);
3816
3817 list_for_each_entry(q, &set->tag_list, tag_set_list)
3818 blk_mq_unfreeze_queue(q);
3819 }
3820
blk_mq_update_nr_hw_queues(struct blk_mq_tag_set * set,int nr_hw_queues)3821 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
3822 {
3823 mutex_lock(&set->tag_list_lock);
3824 __blk_mq_update_nr_hw_queues(set, nr_hw_queues);
3825 mutex_unlock(&set->tag_list_lock);
3826 }
3827 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
3828
3829 /* Enable polling stats and return whether they were already enabled. */
blk_poll_stats_enable(struct request_queue * q)3830 static bool blk_poll_stats_enable(struct request_queue *q)
3831 {
3832 if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
3833 blk_queue_flag_test_and_set(QUEUE_FLAG_POLL_STATS, q))
3834 return true;
3835 blk_stat_add_callback(q, q->poll_cb);
3836 return false;
3837 }
3838
blk_mq_poll_stats_start(struct request_queue * q)3839 static void blk_mq_poll_stats_start(struct request_queue *q)
3840 {
3841 /*
3842 * We don't arm the callback if polling stats are not enabled or the
3843 * callback is already active.
3844 */
3845 if (!test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
3846 blk_stat_is_active(q->poll_cb))
3847 return;
3848
3849 blk_stat_activate_msecs(q->poll_cb, 100);
3850 }
3851
blk_mq_poll_stats_fn(struct blk_stat_callback * cb)3852 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb)
3853 {
3854 struct request_queue *q = cb->data;
3855 int bucket;
3856
3857 for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS; bucket++) {
3858 if (cb->stat[bucket].nr_samples)
3859 q->poll_stat[bucket] = cb->stat[bucket];
3860 }
3861 }
3862
blk_mq_poll_nsecs(struct request_queue * q,struct request * rq)3863 static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
3864 struct request *rq)
3865 {
3866 unsigned long ret = 0;
3867 int bucket;
3868
3869 /*
3870 * If stats collection isn't on, don't sleep but turn it on for
3871 * future users
3872 */
3873 if (!blk_poll_stats_enable(q))
3874 return 0;
3875
3876 /*
3877 * As an optimistic guess, use half of the mean service time
3878 * for this type of request. We can (and should) make this smarter.
3879 * For instance, if the completion latencies are tight, we can
3880 * get closer than just half the mean. This is especially
3881 * important on devices where the completion latencies are longer
3882 * than ~10 usec. We do use the stats for the relevant IO size
3883 * if available which does lead to better estimates.
3884 */
3885 bucket = blk_mq_poll_stats_bkt(rq);
3886 if (bucket < 0)
3887 return ret;
3888
3889 if (q->poll_stat[bucket].nr_samples)
3890 ret = (q->poll_stat[bucket].mean + 1) / 2;
3891
3892 return ret;
3893 }
3894
blk_mq_poll_hybrid_sleep(struct request_queue * q,struct request * rq)3895 static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
3896 struct request *rq)
3897 {
3898 struct hrtimer_sleeper hs;
3899 enum hrtimer_mode mode;
3900 unsigned int nsecs;
3901 ktime_t kt;
3902
3903 if (rq->rq_flags & RQF_MQ_POLL_SLEPT)
3904 return false;
3905
3906 /*
3907 * If we get here, hybrid polling is enabled. Hence poll_nsec can be:
3908 *
3909 * 0: use half of prev avg
3910 * >0: use this specific value
3911 */
3912 if (q->poll_nsec > 0)
3913 nsecs = q->poll_nsec;
3914 else
3915 nsecs = blk_mq_poll_nsecs(q, rq);
3916
3917 if (!nsecs)
3918 return false;
3919
3920 rq->rq_flags |= RQF_MQ_POLL_SLEPT;
3921
3922 /*
3923 * This will be replaced with the stats tracking code, using
3924 * 'avg_completion_time / 2' as the pre-sleep target.
3925 */
3926 kt = nsecs;
3927
3928 mode = HRTIMER_MODE_REL;
3929 hrtimer_init_sleeper_on_stack(&hs, CLOCK_MONOTONIC, mode);
3930 hrtimer_set_expires(&hs.timer, kt);
3931
3932 do {
3933 if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE)
3934 break;
3935 set_current_state(TASK_UNINTERRUPTIBLE);
3936 hrtimer_sleeper_start_expires(&hs, mode);
3937 if (hs.task)
3938 io_schedule();
3939 hrtimer_cancel(&hs.timer);
3940 mode = HRTIMER_MODE_ABS;
3941 } while (hs.task && !signal_pending(current));
3942
3943 __set_current_state(TASK_RUNNING);
3944 destroy_hrtimer_on_stack(&hs.timer);
3945 return true;
3946 }
3947
blk_mq_poll_hybrid(struct request_queue * q,struct blk_mq_hw_ctx * hctx,blk_qc_t cookie)3948 static bool blk_mq_poll_hybrid(struct request_queue *q,
3949 struct blk_mq_hw_ctx *hctx, blk_qc_t cookie)
3950 {
3951 struct request *rq;
3952
3953 if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
3954 return false;
3955
3956 if (!blk_qc_t_is_internal(cookie))
3957 rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
3958 else {
3959 rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie));
3960 /*
3961 * With scheduling, if the request has completed, we'll
3962 * get a NULL return here, as we clear the sched tag when
3963 * that happens. The request still remains valid, like always,
3964 * so we should be safe with just the NULL check.
3965 */
3966 if (!rq)
3967 return false;
3968 }
3969
3970 return blk_mq_poll_hybrid_sleep(q, rq);
3971 }
3972
3973 /**
3974 * blk_poll - poll for IO completions
3975 * @q: the queue
3976 * @cookie: cookie passed back at IO submission time
3977 * @spin: whether to spin for completions
3978 *
3979 * Description:
3980 * Poll for completions on the passed in queue. Returns number of
3981 * completed entries found. If @spin is true, then blk_poll will continue
3982 * looping until at least one completion is found, unless the task is
3983 * otherwise marked running (or we need to reschedule).
3984 */
blk_poll(struct request_queue * q,blk_qc_t cookie,bool spin)3985 int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
3986 {
3987 struct blk_mq_hw_ctx *hctx;
3988 unsigned int state;
3989
3990 if (!blk_qc_t_valid(cookie) ||
3991 !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
3992 return 0;
3993
3994 if (current->plug)
3995 blk_flush_plug_list(current->plug, false);
3996
3997 hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
3998
3999 /*
4000 * If we sleep, have the caller restart the poll loop to reset
4001 * the state. Like for the other success return cases, the
4002 * caller is responsible for checking if the IO completed. If
4003 * the IO isn't complete, we'll get called again and will go
4004 * straight to the busy poll loop. If specified not to spin,
4005 * we also should not sleep.
4006 */
4007 if (spin && blk_mq_poll_hybrid(q, hctx, cookie))
4008 return 1;
4009
4010 hctx->poll_considered++;
4011
4012 state = get_current_state();
4013 do {
4014 int ret;
4015
4016 hctx->poll_invoked++;
4017
4018 ret = q->mq_ops->poll(hctx);
4019 if (ret > 0) {
4020 hctx->poll_success++;
4021 __set_current_state(TASK_RUNNING);
4022 return ret;
4023 }
4024
4025 if (signal_pending_state(state, current))
4026 __set_current_state(TASK_RUNNING);
4027
4028 if (task_is_running(current))
4029 return 1;
4030 if (ret < 0 || !spin)
4031 break;
4032 cpu_relax();
4033 } while (!need_resched());
4034
4035 __set_current_state(TASK_RUNNING);
4036 return 0;
4037 }
4038 EXPORT_SYMBOL_GPL(blk_poll);
4039
blk_mq_rq_cpu(struct request * rq)4040 unsigned int blk_mq_rq_cpu(struct request *rq)
4041 {
4042 return rq->mq_ctx->cpu;
4043 }
4044 EXPORT_SYMBOL(blk_mq_rq_cpu);
4045
blk_mq_cancel_work_sync(struct request_queue * q)4046 void blk_mq_cancel_work_sync(struct request_queue *q)
4047 {
4048 if (queue_is_mq(q)) {
4049 struct blk_mq_hw_ctx *hctx;
4050 int i;
4051
4052 cancel_delayed_work_sync(&q->requeue_work);
4053
4054 queue_for_each_hw_ctx(q, hctx, i)
4055 cancel_delayed_work_sync(&hctx->run_work);
4056 }
4057 }
4058
blk_mq_init(void)4059 static int __init blk_mq_init(void)
4060 {
4061 int i;
4062
4063 for_each_possible_cpu(i)
4064 init_llist_head(&per_cpu(blk_cpu_done, i));
4065 open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
4066
4067 cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD,
4068 "block/softirq:dead", NULL,
4069 blk_softirq_cpu_dead);
4070 cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
4071 blk_mq_hctx_notify_dead);
4072 cpuhp_setup_state_multi(CPUHP_AP_BLK_MQ_ONLINE, "block/mq:online",
4073 blk_mq_hctx_notify_online,
4074 blk_mq_hctx_notify_offline);
4075 return 0;
4076 }
4077 subsys_initcall(blk_mq_init);
4078