1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
5 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
7 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
8 * - July2000
9 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
10 */
11
12 /*
13 * This handles all read/write requests to block devices
14 */
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/backing-dev.h>
18 #include <linux/bio.h>
19 #include <linux/blkdev.h>
20 #include <linux/blk-mq.h>
21 #include <linux/blk-pm.h>
22 #include <linux/highmem.h>
23 #include <linux/mm.h>
24 #include <linux/pagemap.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/string.h>
27 #include <linux/init.h>
28 #include <linux/completion.h>
29 #include <linux/slab.h>
30 #include <linux/swap.h>
31 #include <linux/writeback.h>
32 #include <linux/task_io_accounting_ops.h>
33 #include <linux/fault-inject.h>
34 #include <linux/list_sort.h>
35 #include <linux/delay.h>
36 #include <linux/ratelimit.h>
37 #include <linux/pm_runtime.h>
38 #include <linux/blk-cgroup.h>
39 #include <linux/t10-pi.h>
40 #include <linux/debugfs.h>
41 #include <linux/bpf.h>
42 #include <linux/psi.h>
43 #include <linux/sched/sysctl.h>
44 #include <linux/blk-crypto.h>
45
46 #define CREATE_TRACE_POINTS
47 #include <trace/events/block.h>
48
49 #include "blk.h"
50 #include "blk-mq.h"
51 #include "blk-mq-sched.h"
52 #include "blk-pm.h"
53 #include "blk-rq-qos.h"
54
55 struct dentry *blk_debugfs_root;
56
57 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
58 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
59 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
60 EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
61 EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
62
63 DEFINE_IDA(blk_queue_ida);
64
65 /*
66 * For queue allocation
67 */
68 struct kmem_cache *blk_requestq_cachep;
69
70 /*
71 * Controlling structure to kblockd
72 */
73 static struct workqueue_struct *kblockd_workqueue;
74
75 /**
76 * blk_queue_flag_set - atomically set a queue flag
77 * @flag: flag to be set
78 * @q: request queue
79 */
blk_queue_flag_set(unsigned int flag,struct request_queue * q)80 void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
81 {
82 set_bit(flag, &q->queue_flags);
83 }
84 EXPORT_SYMBOL(blk_queue_flag_set);
85
86 /**
87 * blk_queue_flag_clear - atomically clear a queue flag
88 * @flag: flag to be cleared
89 * @q: request queue
90 */
blk_queue_flag_clear(unsigned int flag,struct request_queue * q)91 void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
92 {
93 clear_bit(flag, &q->queue_flags);
94 }
95 EXPORT_SYMBOL(blk_queue_flag_clear);
96
97 /**
98 * blk_queue_flag_test_and_set - atomically test and set a queue flag
99 * @flag: flag to be set
100 * @q: request queue
101 *
102 * Returns the previous value of @flag - 0 if the flag was not set and 1 if
103 * the flag was already set.
104 */
blk_queue_flag_test_and_set(unsigned int flag,struct request_queue * q)105 bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
106 {
107 return test_and_set_bit(flag, &q->queue_flags);
108 }
109 EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set);
110
blk_rq_init(struct request_queue * q,struct request * rq)111 void blk_rq_init(struct request_queue *q, struct request *rq)
112 {
113 memset(rq, 0, sizeof(*rq));
114
115 INIT_LIST_HEAD(&rq->queuelist);
116 rq->q = q;
117 rq->__sector = (sector_t) -1;
118 INIT_HLIST_NODE(&rq->hash);
119 RB_CLEAR_NODE(&rq->rb_node);
120 rq->tag = BLK_MQ_NO_TAG;
121 rq->internal_tag = BLK_MQ_NO_TAG;
122 rq->start_time_ns = ktime_get_ns();
123 rq->part = NULL;
124 blk_crypto_rq_set_defaults(rq);
125 }
126 EXPORT_SYMBOL(blk_rq_init);
127
128 #define REQ_OP_NAME(name) [REQ_OP_##name] = #name
129 static const char *const blk_op_name[] = {
130 REQ_OP_NAME(READ),
131 REQ_OP_NAME(WRITE),
132 REQ_OP_NAME(FLUSH),
133 REQ_OP_NAME(DISCARD),
134 REQ_OP_NAME(SECURE_ERASE),
135 REQ_OP_NAME(ZONE_RESET),
136 REQ_OP_NAME(ZONE_RESET_ALL),
137 REQ_OP_NAME(ZONE_OPEN),
138 REQ_OP_NAME(ZONE_CLOSE),
139 REQ_OP_NAME(ZONE_FINISH),
140 REQ_OP_NAME(ZONE_APPEND),
141 REQ_OP_NAME(WRITE_SAME),
142 REQ_OP_NAME(WRITE_ZEROES),
143 REQ_OP_NAME(SCSI_IN),
144 REQ_OP_NAME(SCSI_OUT),
145 REQ_OP_NAME(DRV_IN),
146 REQ_OP_NAME(DRV_OUT),
147 };
148 #undef REQ_OP_NAME
149
150 /**
151 * blk_op_str - Return string XXX in the REQ_OP_XXX.
152 * @op: REQ_OP_XXX.
153 *
154 * Description: Centralize block layer function to convert REQ_OP_XXX into
155 * string format. Useful in the debugging and tracing bio or request. For
156 * invalid REQ_OP_XXX it returns string "UNKNOWN".
157 */
blk_op_str(unsigned int op)158 inline const char *blk_op_str(unsigned int op)
159 {
160 const char *op_str = "UNKNOWN";
161
162 if (op < ARRAY_SIZE(blk_op_name) && blk_op_name[op])
163 op_str = blk_op_name[op];
164
165 return op_str;
166 }
167 EXPORT_SYMBOL_GPL(blk_op_str);
168
169 static const struct {
170 int errno;
171 const char *name;
172 } blk_errors[] = {
173 [BLK_STS_OK] = { 0, "" },
174 [BLK_STS_NOTSUPP] = { -EOPNOTSUPP, "operation not supported" },
175 [BLK_STS_TIMEOUT] = { -ETIMEDOUT, "timeout" },
176 [BLK_STS_NOSPC] = { -ENOSPC, "critical space allocation" },
177 [BLK_STS_TRANSPORT] = { -ENOLINK, "recoverable transport" },
178 [BLK_STS_TARGET] = { -EREMOTEIO, "critical target" },
179 [BLK_STS_NEXUS] = { -EBADE, "critical nexus" },
180 [BLK_STS_MEDIUM] = { -ENODATA, "critical medium" },
181 [BLK_STS_PROTECTION] = { -EILSEQ, "protection" },
182 [BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" },
183 [BLK_STS_DEV_RESOURCE] = { -EBUSY, "device resource" },
184 [BLK_STS_AGAIN] = { -EAGAIN, "nonblocking retry" },
185
186 /* device mapper special case, should not leak out: */
187 [BLK_STS_DM_REQUEUE] = { -EREMCHG, "dm internal retry" },
188
189 /* zone device specific errors */
190 [BLK_STS_ZONE_OPEN_RESOURCE] = { -ETOOMANYREFS, "open zones exceeded" },
191 [BLK_STS_ZONE_ACTIVE_RESOURCE] = { -EOVERFLOW, "active zones exceeded" },
192
193 /* everything else not covered above: */
194 [BLK_STS_IOERR] = { -EIO, "I/O" },
195 };
196
errno_to_blk_status(int errno)197 blk_status_t errno_to_blk_status(int errno)
198 {
199 int i;
200
201 for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
202 if (blk_errors[i].errno == errno)
203 return (__force blk_status_t)i;
204 }
205
206 return BLK_STS_IOERR;
207 }
208 EXPORT_SYMBOL_GPL(errno_to_blk_status);
209
blk_status_to_errno(blk_status_t status)210 int blk_status_to_errno(blk_status_t status)
211 {
212 int idx = (__force int)status;
213
214 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
215 return -EIO;
216 return blk_errors[idx].errno;
217 }
218 EXPORT_SYMBOL_GPL(blk_status_to_errno);
219
print_req_error(struct request * req,blk_status_t status,const char * caller)220 static void print_req_error(struct request *req, blk_status_t status,
221 const char *caller)
222 {
223 int idx = (__force int)status;
224
225 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
226 return;
227
228 printk_ratelimited(KERN_ERR
229 "%s: %s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x "
230 "phys_seg %u prio class %u\n",
231 caller, blk_errors[idx].name,
232 req->rq_disk ? req->rq_disk->disk_name : "?",
233 blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)),
234 req->cmd_flags & ~REQ_OP_MASK,
235 req->nr_phys_segments,
236 IOPRIO_PRIO_CLASS(req->ioprio));
237 }
238
req_bio_endio(struct request * rq,struct bio * bio,unsigned int nbytes,blk_status_t error)239 static void req_bio_endio(struct request *rq, struct bio *bio,
240 unsigned int nbytes, blk_status_t error)
241 {
242 if (error)
243 bio->bi_status = error;
244
245 if (unlikely(rq->rq_flags & RQF_QUIET))
246 bio_set_flag(bio, BIO_QUIET);
247
248 bio_advance(bio, nbytes);
249
250 if (req_op(rq) == REQ_OP_ZONE_APPEND && error == BLK_STS_OK) {
251 /*
252 * Partial zone append completions cannot be supported as the
253 * BIO fragments may end up not being written sequentially.
254 */
255 if (bio->bi_iter.bi_size)
256 bio->bi_status = BLK_STS_IOERR;
257 else
258 bio->bi_iter.bi_sector = rq->__sector;
259 }
260
261 /* don't actually finish bio if it's part of flush sequence */
262 if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
263 bio_endio(bio);
264 }
265
blk_dump_rq_flags(struct request * rq,char * msg)266 void blk_dump_rq_flags(struct request *rq, char *msg)
267 {
268 printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
269 rq->rq_disk ? rq->rq_disk->disk_name : "?",
270 (unsigned long long) rq->cmd_flags);
271
272 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
273 (unsigned long long)blk_rq_pos(rq),
274 blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
275 printk(KERN_INFO " bio %p, biotail %p, len %u\n",
276 rq->bio, rq->biotail, blk_rq_bytes(rq));
277 }
278 EXPORT_SYMBOL(blk_dump_rq_flags);
279
280 /**
281 * blk_sync_queue - cancel any pending callbacks on a queue
282 * @q: the queue
283 *
284 * Description:
285 * The block layer may perform asynchronous callback activity
286 * on a queue, such as calling the unplug function after a timeout.
287 * A block device may call blk_sync_queue to ensure that any
288 * such activity is cancelled, thus allowing it to release resources
289 * that the callbacks might use. The caller must already have made sure
290 * that its ->submit_bio will not re-add plugging prior to calling
291 * this function.
292 *
293 * This function does not cancel any asynchronous activity arising
294 * out of elevator or throttling code. That would require elevator_exit()
295 * and blkcg_exit_queue() to be called with queue lock initialized.
296 *
297 */
blk_sync_queue(struct request_queue * q)298 void blk_sync_queue(struct request_queue *q)
299 {
300 del_timer_sync(&q->timeout);
301 cancel_work_sync(&q->timeout_work);
302 }
303 EXPORT_SYMBOL(blk_sync_queue);
304
305 /**
306 * blk_set_pm_only - increment pm_only counter
307 * @q: request queue pointer
308 */
blk_set_pm_only(struct request_queue * q)309 void blk_set_pm_only(struct request_queue *q)
310 {
311 atomic_inc(&q->pm_only);
312 }
313 EXPORT_SYMBOL_GPL(blk_set_pm_only);
314
blk_clear_pm_only(struct request_queue * q)315 void blk_clear_pm_only(struct request_queue *q)
316 {
317 int pm_only;
318
319 pm_only = atomic_dec_return(&q->pm_only);
320 WARN_ON_ONCE(pm_only < 0);
321 if (pm_only == 0)
322 wake_up_all(&q->mq_freeze_wq);
323 }
324 EXPORT_SYMBOL_GPL(blk_clear_pm_only);
325
326 /**
327 * blk_put_queue - decrement the request_queue refcount
328 * @q: the request_queue structure to decrement the refcount for
329 *
330 * Decrements the refcount of the request_queue kobject. When this reaches 0
331 * we'll have blk_release_queue() called.
332 *
333 * Context: Any context, but the last reference must not be dropped from
334 * atomic context.
335 */
blk_put_queue(struct request_queue * q)336 void blk_put_queue(struct request_queue *q)
337 {
338 kobject_put(&q->kobj);
339 }
340 EXPORT_SYMBOL(blk_put_queue);
341
blk_set_queue_dying(struct request_queue * q)342 void blk_set_queue_dying(struct request_queue *q)
343 {
344 blk_queue_flag_set(QUEUE_FLAG_DYING, q);
345
346 /*
347 * When queue DYING flag is set, we need to block new req
348 * entering queue, so we call blk_freeze_queue_start() to
349 * prevent I/O from crossing blk_queue_enter().
350 */
351 blk_freeze_queue_start(q);
352
353 if (queue_is_mq(q))
354 blk_mq_wake_waiters(q);
355
356 /* Make blk_queue_enter() reexamine the DYING flag. */
357 wake_up_all(&q->mq_freeze_wq);
358 }
359 EXPORT_SYMBOL_GPL(blk_set_queue_dying);
360
361 /**
362 * blk_cleanup_queue - shutdown a request queue
363 * @q: request queue to shutdown
364 *
365 * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
366 * put it. All future requests will be failed immediately with -ENODEV.
367 *
368 * Context: can sleep
369 */
blk_cleanup_queue(struct request_queue * q)370 void blk_cleanup_queue(struct request_queue *q)
371 {
372 /* cannot be called from atomic context */
373 might_sleep();
374
375 WARN_ON_ONCE(blk_queue_registered(q));
376
377 /* mark @q DYING, no new request or merges will be allowed afterwards */
378 blk_set_queue_dying(q);
379
380 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
381 blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
382
383 /*
384 * Drain all requests queued before DYING marking. Set DEAD flag to
385 * prevent that blk_mq_run_hw_queues() accesses the hardware queues
386 * after draining finished.
387 */
388 blk_freeze_queue(q);
389
390 rq_qos_exit(q);
391
392 blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
393
394 /* for synchronous bio-based driver finish in-flight integrity i/o */
395 blk_flush_integrity();
396
397 /* @q won't process any more request, flush async actions */
398 del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer);
399 blk_sync_queue(q);
400 if (queue_is_mq(q)) {
401 blk_mq_cancel_work_sync(q);
402 blk_mq_exit_queue(q);
403 }
404
405 /*
406 * In theory, request pool of sched_tags belongs to request queue.
407 * However, the current implementation requires tag_set for freeing
408 * requests, so free the pool now.
409 *
410 * Queue has become frozen, there can't be any in-queue requests, so
411 * it is safe to free requests now.
412 */
413 mutex_lock(&q->sysfs_lock);
414 if (q->elevator)
415 blk_mq_sched_free_requests(q);
416 mutex_unlock(&q->sysfs_lock);
417
418 /* @q is and will stay empty, shutdown and put */
419 blk_put_queue(q);
420 }
421 EXPORT_SYMBOL(blk_cleanup_queue);
422
423 /**
424 * blk_queue_enter() - try to increase q->q_usage_counter
425 * @q: request queue pointer
426 * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM
427 */
blk_queue_enter(struct request_queue * q,blk_mq_req_flags_t flags)428 int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
429 {
430 const bool pm = flags & BLK_MQ_REQ_PM;
431
432 while (true) {
433 bool success = false;
434
435 rcu_read_lock();
436 if (percpu_ref_tryget_live(&q->q_usage_counter)) {
437 /*
438 * The code that increments the pm_only counter is
439 * responsible for ensuring that that counter is
440 * globally visible before the queue is unfrozen.
441 */
442 if ((pm && queue_rpm_status(q) != RPM_SUSPENDED) ||
443 !blk_queue_pm_only(q)) {
444 success = true;
445 } else {
446 percpu_ref_put(&q->q_usage_counter);
447 }
448 }
449 rcu_read_unlock();
450
451 if (success)
452 return 0;
453
454 if (flags & BLK_MQ_REQ_NOWAIT)
455 return -EBUSY;
456
457 /*
458 * read pair of barrier in blk_freeze_queue_start(),
459 * we need to order reading __PERCPU_REF_DEAD flag of
460 * .q_usage_counter and reading .mq_freeze_depth or
461 * queue dying flag, otherwise the following wait may
462 * never return if the two reads are reordered.
463 */
464 smp_rmb();
465
466 wait_event(q->mq_freeze_wq,
467 (!q->mq_freeze_depth &&
468 blk_pm_resume_queue(pm, q)) ||
469 blk_queue_dying(q));
470 if (blk_queue_dying(q))
471 return -ENODEV;
472 }
473 }
474
bio_queue_enter(struct bio * bio)475 static inline int bio_queue_enter(struct bio *bio)
476 {
477 struct request_queue *q = bio->bi_disk->queue;
478 bool nowait = bio->bi_opf & REQ_NOWAIT;
479 int ret;
480
481 ret = blk_queue_enter(q, nowait ? BLK_MQ_REQ_NOWAIT : 0);
482 if (unlikely(ret)) {
483 if (nowait && !blk_queue_dying(q))
484 bio_wouldblock_error(bio);
485 else
486 bio_io_error(bio);
487 }
488
489 return ret;
490 }
491
blk_queue_exit(struct request_queue * q)492 void blk_queue_exit(struct request_queue *q)
493 {
494 percpu_ref_put(&q->q_usage_counter);
495 }
496
blk_queue_usage_counter_release(struct percpu_ref * ref)497 static void blk_queue_usage_counter_release(struct percpu_ref *ref)
498 {
499 struct request_queue *q =
500 container_of(ref, struct request_queue, q_usage_counter);
501
502 wake_up_all(&q->mq_freeze_wq);
503 }
504
blk_rq_timed_out_timer(struct timer_list * t)505 static void blk_rq_timed_out_timer(struct timer_list *t)
506 {
507 struct request_queue *q = from_timer(q, t, timeout);
508
509 kblockd_schedule_work(&q->timeout_work);
510 }
511
blk_timeout_work(struct work_struct * work)512 static void blk_timeout_work(struct work_struct *work)
513 {
514 }
515
blk_alloc_queue(int node_id)516 struct request_queue *blk_alloc_queue(int node_id)
517 {
518 struct request_queue *q;
519 int ret;
520
521 q = kmem_cache_alloc_node(blk_requestq_cachep,
522 GFP_KERNEL | __GFP_ZERO, node_id);
523 if (!q)
524 return NULL;
525
526 q->last_merge = NULL;
527
528 q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL);
529 if (q->id < 0)
530 goto fail_q;
531
532 ret = bioset_init(&q->bio_split, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
533 if (ret)
534 goto fail_id;
535
536 q->backing_dev_info = bdi_alloc(node_id);
537 if (!q->backing_dev_info)
538 goto fail_split;
539
540 q->stats = blk_alloc_queue_stats();
541 if (!q->stats)
542 goto fail_stats;
543
544 q->node = node_id;
545
546 atomic_set(&q->nr_active_requests_shared_sbitmap, 0);
547
548 timer_setup(&q->backing_dev_info->laptop_mode_wb_timer,
549 laptop_mode_timer_fn, 0);
550 timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
551 INIT_WORK(&q->timeout_work, blk_timeout_work);
552 INIT_LIST_HEAD(&q->icq_list);
553 #ifdef CONFIG_BLK_CGROUP
554 INIT_LIST_HEAD(&q->blkg_list);
555 #endif
556
557 kobject_init(&q->kobj, &blk_queue_ktype);
558
559 mutex_init(&q->debugfs_mutex);
560 mutex_init(&q->sysfs_lock);
561 mutex_init(&q->sysfs_dir_lock);
562 spin_lock_init(&q->queue_lock);
563
564 init_waitqueue_head(&q->mq_freeze_wq);
565 mutex_init(&q->mq_freeze_lock);
566
567 /*
568 * Init percpu_ref in atomic mode so that it's faster to shutdown.
569 * See blk_register_queue() for details.
570 */
571 if (percpu_ref_init(&q->q_usage_counter,
572 blk_queue_usage_counter_release,
573 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
574 goto fail_bdi;
575
576 if (blkcg_init_queue(q))
577 goto fail_ref;
578
579 blk_queue_dma_alignment(q, 511);
580 blk_set_default_limits(&q->limits);
581 q->nr_requests = BLKDEV_MAX_RQ;
582
583 return q;
584
585 fail_ref:
586 percpu_ref_exit(&q->q_usage_counter);
587 fail_bdi:
588 blk_free_queue_stats(q->stats);
589 fail_stats:
590 bdi_put(q->backing_dev_info);
591 fail_split:
592 bioset_exit(&q->bio_split);
593 fail_id:
594 ida_simple_remove(&blk_queue_ida, q->id);
595 fail_q:
596 kmem_cache_free(blk_requestq_cachep, q);
597 return NULL;
598 }
599 EXPORT_SYMBOL(blk_alloc_queue);
600
601 /**
602 * blk_get_queue - increment the request_queue refcount
603 * @q: the request_queue structure to increment the refcount for
604 *
605 * Increment the refcount of the request_queue kobject.
606 *
607 * Context: Any context.
608 */
blk_get_queue(struct request_queue * q)609 bool blk_get_queue(struct request_queue *q)
610 {
611 if (likely(!blk_queue_dying(q))) {
612 __blk_get_queue(q);
613 return true;
614 }
615
616 return false;
617 }
618 EXPORT_SYMBOL(blk_get_queue);
619
620 /**
621 * blk_get_request - allocate a request
622 * @q: request queue to allocate a request for
623 * @op: operation (REQ_OP_*) and REQ_* flags, e.g. REQ_SYNC.
624 * @flags: BLK_MQ_REQ_* flags, e.g. BLK_MQ_REQ_NOWAIT.
625 */
blk_get_request(struct request_queue * q,unsigned int op,blk_mq_req_flags_t flags)626 struct request *blk_get_request(struct request_queue *q, unsigned int op,
627 blk_mq_req_flags_t flags)
628 {
629 struct request *req;
630
631 WARN_ON_ONCE(op & REQ_NOWAIT);
632 WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PM));
633
634 req = blk_mq_alloc_request(q, op, flags);
635 if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
636 q->mq_ops->initialize_rq_fn(req);
637
638 return req;
639 }
640 EXPORT_SYMBOL(blk_get_request);
641
blk_put_request(struct request * req)642 void blk_put_request(struct request *req)
643 {
644 blk_mq_free_request(req);
645 }
646 EXPORT_SYMBOL(blk_put_request);
647
handle_bad_sector(struct bio * bio,sector_t maxsector)648 static void handle_bad_sector(struct bio *bio, sector_t maxsector)
649 {
650 char b[BDEVNAME_SIZE];
651
652 pr_info_ratelimited("attempt to access beyond end of device\n"
653 "%s: rw=%d, want=%llu, limit=%llu\n",
654 bio_devname(bio, b), bio->bi_opf,
655 bio_end_sector(bio), maxsector);
656 }
657
658 #ifdef CONFIG_FAIL_MAKE_REQUEST
659
660 static DECLARE_FAULT_ATTR(fail_make_request);
661
setup_fail_make_request(char * str)662 static int __init setup_fail_make_request(char *str)
663 {
664 return setup_fault_attr(&fail_make_request, str);
665 }
666 __setup("fail_make_request=", setup_fail_make_request);
667
should_fail_request(struct hd_struct * part,unsigned int bytes)668 static bool should_fail_request(struct hd_struct *part, unsigned int bytes)
669 {
670 return part->make_it_fail && should_fail(&fail_make_request, bytes);
671 }
672
fail_make_request_debugfs(void)673 static int __init fail_make_request_debugfs(void)
674 {
675 struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
676 NULL, &fail_make_request);
677
678 return PTR_ERR_OR_ZERO(dir);
679 }
680
681 late_initcall(fail_make_request_debugfs);
682
683 #else /* CONFIG_FAIL_MAKE_REQUEST */
684
should_fail_request(struct hd_struct * part,unsigned int bytes)685 static inline bool should_fail_request(struct hd_struct *part,
686 unsigned int bytes)
687 {
688 return false;
689 }
690
691 #endif /* CONFIG_FAIL_MAKE_REQUEST */
692
bio_check_ro(struct bio * bio,struct hd_struct * part)693 static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
694 {
695 const int op = bio_op(bio);
696
697 if (part->policy && op_is_write(op)) {
698 char b[BDEVNAME_SIZE];
699
700 if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
701 return false;
702 pr_warn("Trying to write to read-only block-device %s (partno %d)\n",
703 bio_devname(bio, b), part->partno);
704 /* Older lvm-tools actually trigger this */
705 return false;
706 }
707
708 return false;
709 }
710
should_fail_bio(struct bio * bio)711 static noinline int should_fail_bio(struct bio *bio)
712 {
713 if (should_fail_request(&bio->bi_disk->part0, bio->bi_iter.bi_size))
714 return -EIO;
715 return 0;
716 }
717 ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO);
718
719 /*
720 * Check whether this bio extends beyond the end of the device or partition.
721 * This may well happen - the kernel calls bread() without checking the size of
722 * the device, e.g., when mounting a file system.
723 */
bio_check_eod(struct bio * bio,sector_t maxsector)724 static inline int bio_check_eod(struct bio *bio, sector_t maxsector)
725 {
726 unsigned int nr_sectors = bio_sectors(bio);
727
728 if (nr_sectors && maxsector &&
729 (nr_sectors > maxsector ||
730 bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
731 handle_bad_sector(bio, maxsector);
732 return -EIO;
733 }
734 return 0;
735 }
736
737 /*
738 * Remap block n of partition p to block n+start(p) of the disk.
739 */
blk_partition_remap(struct bio * bio)740 static inline int blk_partition_remap(struct bio *bio)
741 {
742 struct hd_struct *p;
743 int ret = -EIO;
744
745 rcu_read_lock();
746 p = __disk_get_part(bio->bi_disk, bio->bi_partno);
747 if (unlikely(!p))
748 goto out;
749 if (unlikely(should_fail_request(p, bio->bi_iter.bi_size)))
750 goto out;
751 if (unlikely(bio_check_ro(bio, p)))
752 goto out;
753
754 if (bio_sectors(bio)) {
755 if (bio_check_eod(bio, part_nr_sects_read(p)))
756 goto out;
757 bio->bi_iter.bi_sector += p->start_sect;
758 trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p),
759 bio->bi_iter.bi_sector - p->start_sect);
760 }
761 bio->bi_partno = 0;
762 ret = 0;
763 out:
764 rcu_read_unlock();
765 return ret;
766 }
767
768 /*
769 * Check write append to a zoned block device.
770 */
blk_check_zone_append(struct request_queue * q,struct bio * bio)771 static inline blk_status_t blk_check_zone_append(struct request_queue *q,
772 struct bio *bio)
773 {
774 sector_t pos = bio->bi_iter.bi_sector;
775 int nr_sectors = bio_sectors(bio);
776
777 /* Only applicable to zoned block devices */
778 if (!blk_queue_is_zoned(q))
779 return BLK_STS_NOTSUPP;
780
781 /* The bio sector must point to the start of a sequential zone */
782 if (pos & (blk_queue_zone_sectors(q) - 1) ||
783 !blk_queue_zone_is_seq(q, pos))
784 return BLK_STS_IOERR;
785
786 /*
787 * Not allowed to cross zone boundaries. Otherwise, the BIO will be
788 * split and could result in non-contiguous sectors being written in
789 * different zones.
790 */
791 if (nr_sectors > q->limits.chunk_sectors)
792 return BLK_STS_IOERR;
793
794 /* Make sure the BIO is small enough and will not get split */
795 if (nr_sectors > q->limits.max_zone_append_sectors)
796 return BLK_STS_IOERR;
797
798 bio->bi_opf |= REQ_NOMERGE;
799
800 return BLK_STS_OK;
801 }
802
submit_bio_checks(struct bio * bio)803 static noinline_for_stack bool submit_bio_checks(struct bio *bio)
804 {
805 struct request_queue *q = bio->bi_disk->queue;
806 blk_status_t status = BLK_STS_IOERR;
807 struct blk_plug *plug;
808
809 might_sleep();
810
811 plug = blk_mq_plug(q, bio);
812 if (plug && plug->nowait)
813 bio->bi_opf |= REQ_NOWAIT;
814
815 /*
816 * For a REQ_NOWAIT based request, return -EOPNOTSUPP
817 * if queue does not support NOWAIT.
818 */
819 if ((bio->bi_opf & REQ_NOWAIT) && !blk_queue_nowait(q))
820 goto not_supported;
821
822 if (should_fail_bio(bio))
823 goto end_io;
824
825 if (bio->bi_partno) {
826 if (unlikely(blk_partition_remap(bio)))
827 goto end_io;
828 } else {
829 if (unlikely(bio_check_ro(bio, &bio->bi_disk->part0)))
830 goto end_io;
831 if (unlikely(bio_check_eod(bio, get_capacity(bio->bi_disk))))
832 goto end_io;
833 }
834
835 /*
836 * Filter flush bio's early so that bio based drivers without flush
837 * support don't have to worry about them.
838 */
839 if (op_is_flush(bio->bi_opf) &&
840 !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
841 bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
842 if (!bio_sectors(bio)) {
843 status = BLK_STS_OK;
844 goto end_io;
845 }
846 }
847
848 if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
849 bio->bi_opf &= ~REQ_HIPRI;
850
851 switch (bio_op(bio)) {
852 case REQ_OP_DISCARD:
853 if (!blk_queue_discard(q))
854 goto not_supported;
855 break;
856 case REQ_OP_SECURE_ERASE:
857 if (!blk_queue_secure_erase(q))
858 goto not_supported;
859 break;
860 case REQ_OP_WRITE_SAME:
861 if (!q->limits.max_write_same_sectors)
862 goto not_supported;
863 break;
864 case REQ_OP_ZONE_APPEND:
865 status = blk_check_zone_append(q, bio);
866 if (status != BLK_STS_OK)
867 goto end_io;
868 break;
869 case REQ_OP_ZONE_RESET:
870 case REQ_OP_ZONE_OPEN:
871 case REQ_OP_ZONE_CLOSE:
872 case REQ_OP_ZONE_FINISH:
873 if (!blk_queue_is_zoned(q))
874 goto not_supported;
875 break;
876 case REQ_OP_ZONE_RESET_ALL:
877 if (!blk_queue_is_zoned(q) || !blk_queue_zone_resetall(q))
878 goto not_supported;
879 break;
880 case REQ_OP_WRITE_ZEROES:
881 if (!q->limits.max_write_zeroes_sectors)
882 goto not_supported;
883 break;
884 default:
885 break;
886 }
887
888 /*
889 * Various block parts want %current->io_context, so allocate it up
890 * front rather than dealing with lots of pain to allocate it only
891 * where needed. This may fail and the block layer knows how to live
892 * with it.
893 */
894 if (unlikely(!current->io_context))
895 create_task_io_context(current, GFP_ATOMIC, q->node);
896
897 if (blk_throtl_bio(bio))
898 return false;
899
900 blk_cgroup_bio_start(bio);
901 blkcg_bio_issue_init(bio);
902
903 if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
904 trace_block_bio_queue(q, bio);
905 /* Now that enqueuing has been traced, we need to trace
906 * completion as well.
907 */
908 bio_set_flag(bio, BIO_TRACE_COMPLETION);
909 }
910 return true;
911
912 not_supported:
913 status = BLK_STS_NOTSUPP;
914 end_io:
915 bio->bi_status = status;
916 bio_endio(bio);
917 return false;
918 }
919
__submit_bio(struct bio * bio)920 static blk_qc_t __submit_bio(struct bio *bio)
921 {
922 struct gendisk *disk = bio->bi_disk;
923 blk_qc_t ret = BLK_QC_T_NONE;
924
925 if (blk_crypto_bio_prep(&bio)) {
926 if (!disk->fops->submit_bio)
927 return blk_mq_submit_bio(bio);
928 ret = disk->fops->submit_bio(bio);
929 }
930 blk_queue_exit(disk->queue);
931 return ret;
932 }
933
934 /*
935 * The loop in this function may be a bit non-obvious, and so deserves some
936 * explanation:
937 *
938 * - Before entering the loop, bio->bi_next is NULL (as all callers ensure
939 * that), so we have a list with a single bio.
940 * - We pretend that we have just taken it off a longer list, so we assign
941 * bio_list to a pointer to the bio_list_on_stack, thus initialising the
942 * bio_list of new bios to be added. ->submit_bio() may indeed add some more
943 * bios through a recursive call to submit_bio_noacct. If it did, we find a
944 * non-NULL value in bio_list and re-enter the loop from the top.
945 * - In this case we really did just take the bio of the top of the list (no
946 * pretending) and so remove it from bio_list, and call into ->submit_bio()
947 * again.
948 *
949 * bio_list_on_stack[0] contains bios submitted by the current ->submit_bio.
950 * bio_list_on_stack[1] contains bios that were submitted before the current
951 * ->submit_bio_bio, but that haven't been processed yet.
952 */
__submit_bio_noacct(struct bio * bio)953 static blk_qc_t __submit_bio_noacct(struct bio *bio)
954 {
955 struct bio_list bio_list_on_stack[2];
956 blk_qc_t ret = BLK_QC_T_NONE;
957
958 BUG_ON(bio->bi_next);
959
960 bio_list_init(&bio_list_on_stack[0]);
961 current->bio_list = bio_list_on_stack;
962
963 do {
964 struct request_queue *q = bio->bi_disk->queue;
965 struct bio_list lower, same;
966
967 if (unlikely(bio_queue_enter(bio) != 0))
968 continue;
969
970 /*
971 * Create a fresh bio_list for all subordinate requests.
972 */
973 bio_list_on_stack[1] = bio_list_on_stack[0];
974 bio_list_init(&bio_list_on_stack[0]);
975
976 ret = __submit_bio(bio);
977
978 /*
979 * Sort new bios into those for a lower level and those for the
980 * same level.
981 */
982 bio_list_init(&lower);
983 bio_list_init(&same);
984 while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
985 if (q == bio->bi_disk->queue)
986 bio_list_add(&same, bio);
987 else
988 bio_list_add(&lower, bio);
989
990 /*
991 * Now assemble so we handle the lowest level first.
992 */
993 bio_list_merge(&bio_list_on_stack[0], &lower);
994 bio_list_merge(&bio_list_on_stack[0], &same);
995 bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
996 } while ((bio = bio_list_pop(&bio_list_on_stack[0])));
997
998 current->bio_list = NULL;
999 return ret;
1000 }
1001
__submit_bio_noacct_mq(struct bio * bio)1002 static blk_qc_t __submit_bio_noacct_mq(struct bio *bio)
1003 {
1004 struct bio_list bio_list[2] = { };
1005 blk_qc_t ret = BLK_QC_T_NONE;
1006
1007 current->bio_list = bio_list;
1008
1009 do {
1010 struct gendisk *disk = bio->bi_disk;
1011
1012 if (unlikely(bio_queue_enter(bio) != 0))
1013 continue;
1014
1015 if (!blk_crypto_bio_prep(&bio)) {
1016 blk_queue_exit(disk->queue);
1017 ret = BLK_QC_T_NONE;
1018 continue;
1019 }
1020
1021 ret = blk_mq_submit_bio(bio);
1022 } while ((bio = bio_list_pop(&bio_list[0])));
1023
1024 current->bio_list = NULL;
1025 return ret;
1026 }
1027
1028 /**
1029 * submit_bio_noacct - re-submit a bio to the block device layer for I/O
1030 * @bio: The bio describing the location in memory and on the device.
1031 *
1032 * This is a version of submit_bio() that shall only be used for I/O that is
1033 * resubmitted to lower level drivers by stacking block drivers. All file
1034 * systems and other upper level users of the block layer should use
1035 * submit_bio() instead.
1036 */
submit_bio_noacct(struct bio * bio)1037 blk_qc_t submit_bio_noacct(struct bio *bio)
1038 {
1039 if (!submit_bio_checks(bio))
1040 return BLK_QC_T_NONE;
1041
1042 /*
1043 * We only want one ->submit_bio to be active at a time, else stack
1044 * usage with stacked devices could be a problem. Use current->bio_list
1045 * to collect a list of requests submited by a ->submit_bio method while
1046 * it is active, and then process them after it returned.
1047 */
1048 if (current->bio_list) {
1049 bio_list_add(¤t->bio_list[0], bio);
1050 return BLK_QC_T_NONE;
1051 }
1052
1053 if (!bio->bi_disk->fops->submit_bio)
1054 return __submit_bio_noacct_mq(bio);
1055 return __submit_bio_noacct(bio);
1056 }
1057 EXPORT_SYMBOL(submit_bio_noacct);
1058
1059 /**
1060 * submit_bio - submit a bio to the block device layer for I/O
1061 * @bio: The &struct bio which describes the I/O
1062 *
1063 * submit_bio() is used to submit I/O requests to block devices. It is passed a
1064 * fully set up &struct bio that describes the I/O that needs to be done. The
1065 * bio will be send to the device described by the bi_disk and bi_partno fields.
1066 *
1067 * The success/failure status of the request, along with notification of
1068 * completion, is delivered asynchronously through the ->bi_end_io() callback
1069 * in @bio. The bio must NOT be touched by thecaller until ->bi_end_io() has
1070 * been called.
1071 */
submit_bio(struct bio * bio)1072 blk_qc_t submit_bio(struct bio *bio)
1073 {
1074 if (blkcg_punt_bio_submit(bio))
1075 return BLK_QC_T_NONE;
1076
1077 /*
1078 * If it's a regular read/write or a barrier with data attached,
1079 * go through the normal accounting stuff before submission.
1080 */
1081 if (bio_has_data(bio)) {
1082 unsigned int count;
1083
1084 if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
1085 count = queue_logical_block_size(bio->bi_disk->queue) >> 9;
1086 else
1087 count = bio_sectors(bio);
1088
1089 if (op_is_write(bio_op(bio))) {
1090 count_vm_events(PGPGOUT, count);
1091 } else {
1092 task_io_account_read(bio->bi_iter.bi_size);
1093 count_vm_events(PGPGIN, count);
1094 }
1095 }
1096
1097 /*
1098 * If we're reading data that is part of the userspace workingset, count
1099 * submission time as memory stall. When the device is congested, or
1100 * the submitting cgroup IO-throttled, submission can be a significant
1101 * part of overall IO time.
1102 */
1103 if (unlikely(bio_op(bio) == REQ_OP_READ &&
1104 bio_flagged(bio, BIO_WORKINGSET))) {
1105 unsigned long pflags;
1106 blk_qc_t ret;
1107
1108 psi_memstall_enter(&pflags);
1109 ret = submit_bio_noacct(bio);
1110 psi_memstall_leave(&pflags);
1111
1112 return ret;
1113 }
1114
1115 return submit_bio_noacct(bio);
1116 }
1117 EXPORT_SYMBOL(submit_bio);
1118
1119 /**
1120 * blk_cloned_rq_check_limits - Helper function to check a cloned request
1121 * for the new queue limits
1122 * @q: the queue
1123 * @rq: the request being checked
1124 *
1125 * Description:
1126 * @rq may have been made based on weaker limitations of upper-level queues
1127 * in request stacking drivers, and it may violate the limitation of @q.
1128 * Since the block layer and the underlying device driver trust @rq
1129 * after it is inserted to @q, it should be checked against @q before
1130 * the insertion using this generic function.
1131 *
1132 * Request stacking drivers like request-based dm may change the queue
1133 * limits when retrying requests on other queues. Those requests need
1134 * to be checked against the new queue limits again during dispatch.
1135 */
blk_cloned_rq_check_limits(struct request_queue * q,struct request * rq)1136 static blk_status_t blk_cloned_rq_check_limits(struct request_queue *q,
1137 struct request *rq)
1138 {
1139 unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
1140
1141 if (blk_rq_sectors(rq) > max_sectors) {
1142 /*
1143 * SCSI device does not have a good way to return if
1144 * Write Same/Zero is actually supported. If a device rejects
1145 * a non-read/write command (discard, write same,etc.) the
1146 * low-level device driver will set the relevant queue limit to
1147 * 0 to prevent blk-lib from issuing more of the offending
1148 * operations. Commands queued prior to the queue limit being
1149 * reset need to be completed with BLK_STS_NOTSUPP to avoid I/O
1150 * errors being propagated to upper layers.
1151 */
1152 if (max_sectors == 0)
1153 return BLK_STS_NOTSUPP;
1154
1155 printk(KERN_ERR "%s: over max size limit. (%u > %u)\n",
1156 __func__, blk_rq_sectors(rq), max_sectors);
1157 return BLK_STS_IOERR;
1158 }
1159
1160 /*
1161 * queue's settings related to segment counting like q->bounce_pfn
1162 * may differ from that of other stacking queues.
1163 * Recalculate it to check the request correctly on this queue's
1164 * limitation.
1165 */
1166 rq->nr_phys_segments = blk_recalc_rq_segments(rq);
1167 if (rq->nr_phys_segments > queue_max_segments(q)) {
1168 printk(KERN_ERR "%s: over max segments limit. (%hu > %hu)\n",
1169 __func__, rq->nr_phys_segments, queue_max_segments(q));
1170 return BLK_STS_IOERR;
1171 }
1172
1173 return BLK_STS_OK;
1174 }
1175
1176 /**
1177 * blk_insert_cloned_request - Helper for stacking drivers to submit a request
1178 * @q: the queue to submit the request
1179 * @rq: the request being queued
1180 */
blk_insert_cloned_request(struct request_queue * q,struct request * rq)1181 blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
1182 {
1183 blk_status_t ret;
1184
1185 ret = blk_cloned_rq_check_limits(q, rq);
1186 if (ret != BLK_STS_OK)
1187 return ret;
1188
1189 if (rq->rq_disk &&
1190 should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
1191 return BLK_STS_IOERR;
1192
1193 if (blk_crypto_insert_cloned_request(rq))
1194 return BLK_STS_IOERR;
1195
1196 if (blk_queue_io_stat(q))
1197 blk_account_io_start(rq);
1198
1199 /*
1200 * Since we have a scheduler attached on the top device,
1201 * bypass a potential scheduler on the bottom device for
1202 * insert.
1203 */
1204 return blk_mq_request_issue_directly(rq, true);
1205 }
1206 EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
1207
1208 /**
1209 * blk_rq_err_bytes - determine number of bytes till the next failure boundary
1210 * @rq: request to examine
1211 *
1212 * Description:
1213 * A request could be merge of IOs which require different failure
1214 * handling. This function determines the number of bytes which
1215 * can be failed from the beginning of the request without
1216 * crossing into area which need to be retried further.
1217 *
1218 * Return:
1219 * The number of bytes to fail.
1220 */
blk_rq_err_bytes(const struct request * rq)1221 unsigned int blk_rq_err_bytes(const struct request *rq)
1222 {
1223 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
1224 unsigned int bytes = 0;
1225 struct bio *bio;
1226
1227 if (!(rq->rq_flags & RQF_MIXED_MERGE))
1228 return blk_rq_bytes(rq);
1229
1230 /*
1231 * Currently the only 'mixing' which can happen is between
1232 * different fastfail types. We can safely fail portions
1233 * which have all the failfast bits that the first one has -
1234 * the ones which are at least as eager to fail as the first
1235 * one.
1236 */
1237 for (bio = rq->bio; bio; bio = bio->bi_next) {
1238 if ((bio->bi_opf & ff) != ff)
1239 break;
1240 bytes += bio->bi_iter.bi_size;
1241 }
1242
1243 /* this could lead to infinite loop */
1244 BUG_ON(blk_rq_bytes(rq) && !bytes);
1245 return bytes;
1246 }
1247 EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
1248
update_io_ticks(struct hd_struct * part,unsigned long now,bool end)1249 static void update_io_ticks(struct hd_struct *part, unsigned long now, bool end)
1250 {
1251 unsigned long stamp;
1252 again:
1253 stamp = READ_ONCE(part->stamp);
1254 if (unlikely(stamp != now)) {
1255 if (likely(cmpxchg(&part->stamp, stamp, now) == stamp))
1256 __part_stat_add(part, io_ticks, end ? now - stamp : 1);
1257 }
1258 if (part->partno) {
1259 part = &part_to_disk(part)->part0;
1260 goto again;
1261 }
1262 }
1263
blk_account_io_completion(struct request * req,unsigned int bytes)1264 static void blk_account_io_completion(struct request *req, unsigned int bytes)
1265 {
1266 if (req->part && blk_do_io_stat(req)) {
1267 const int sgrp = op_stat_group(req_op(req));
1268 struct hd_struct *part;
1269
1270 part_stat_lock();
1271 part = req->part;
1272 part_stat_add(part, sectors[sgrp], bytes >> 9);
1273 part_stat_unlock();
1274 }
1275 }
1276
blk_account_io_done(struct request * req,u64 now)1277 void blk_account_io_done(struct request *req, u64 now)
1278 {
1279 /*
1280 * Account IO completion. flush_rq isn't accounted as a
1281 * normal IO on queueing nor completion. Accounting the
1282 * containing request is enough.
1283 */
1284 if (req->part && blk_do_io_stat(req) &&
1285 !(req->rq_flags & RQF_FLUSH_SEQ)) {
1286 const int sgrp = op_stat_group(req_op(req));
1287 struct hd_struct *part;
1288
1289 part_stat_lock();
1290 part = req->part;
1291
1292 update_io_ticks(part, jiffies, true);
1293 part_stat_inc(part, ios[sgrp]);
1294 part_stat_add(part, nsecs[sgrp], now - req->start_time_ns);
1295 part_stat_unlock();
1296
1297 hd_struct_put(part);
1298 }
1299 }
1300
blk_account_io_start(struct request * rq)1301 void blk_account_io_start(struct request *rq)
1302 {
1303 if (!blk_do_io_stat(rq))
1304 return;
1305
1306 rq->part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
1307
1308 part_stat_lock();
1309 update_io_ticks(rq->part, jiffies, false);
1310 part_stat_unlock();
1311 }
1312
__part_start_io_acct(struct hd_struct * part,unsigned int sectors,unsigned int op)1313 static unsigned long __part_start_io_acct(struct hd_struct *part,
1314 unsigned int sectors, unsigned int op)
1315 {
1316 const int sgrp = op_stat_group(op);
1317 unsigned long now = READ_ONCE(jiffies);
1318
1319 part_stat_lock();
1320 update_io_ticks(part, now, false);
1321 part_stat_inc(part, ios[sgrp]);
1322 part_stat_add(part, sectors[sgrp], sectors);
1323 part_stat_local_inc(part, in_flight[op_is_write(op)]);
1324 part_stat_unlock();
1325
1326 return now;
1327 }
1328
part_start_io_acct(struct gendisk * disk,struct hd_struct ** part,struct bio * bio)1329 unsigned long part_start_io_acct(struct gendisk *disk, struct hd_struct **part,
1330 struct bio *bio)
1331 {
1332 *part = disk_map_sector_rcu(disk, bio->bi_iter.bi_sector);
1333
1334 return __part_start_io_acct(*part, bio_sectors(bio), bio_op(bio));
1335 }
1336 EXPORT_SYMBOL_GPL(part_start_io_acct);
1337
disk_start_io_acct(struct gendisk * disk,unsigned int sectors,unsigned int op)1338 unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
1339 unsigned int op)
1340 {
1341 return __part_start_io_acct(&disk->part0, sectors, op);
1342 }
1343 EXPORT_SYMBOL(disk_start_io_acct);
1344
__part_end_io_acct(struct hd_struct * part,unsigned int op,unsigned long start_time)1345 static void __part_end_io_acct(struct hd_struct *part, unsigned int op,
1346 unsigned long start_time)
1347 {
1348 const int sgrp = op_stat_group(op);
1349 unsigned long now = READ_ONCE(jiffies);
1350 unsigned long duration = now - start_time;
1351
1352 part_stat_lock();
1353 update_io_ticks(part, now, true);
1354 part_stat_add(part, nsecs[sgrp], jiffies_to_nsecs(duration));
1355 part_stat_local_dec(part, in_flight[op_is_write(op)]);
1356 part_stat_unlock();
1357 }
1358
part_end_io_acct(struct hd_struct * part,struct bio * bio,unsigned long start_time)1359 void part_end_io_acct(struct hd_struct *part, struct bio *bio,
1360 unsigned long start_time)
1361 {
1362 __part_end_io_acct(part, bio_op(bio), start_time);
1363 hd_struct_put(part);
1364 }
1365 EXPORT_SYMBOL_GPL(part_end_io_acct);
1366
disk_end_io_acct(struct gendisk * disk,unsigned int op,unsigned long start_time)1367 void disk_end_io_acct(struct gendisk *disk, unsigned int op,
1368 unsigned long start_time)
1369 {
1370 __part_end_io_acct(&disk->part0, op, start_time);
1371 }
1372 EXPORT_SYMBOL(disk_end_io_acct);
1373
1374 /*
1375 * Steal bios from a request and add them to a bio list.
1376 * The request must not have been partially completed before.
1377 */
blk_steal_bios(struct bio_list * list,struct request * rq)1378 void blk_steal_bios(struct bio_list *list, struct request *rq)
1379 {
1380 if (rq->bio) {
1381 if (list->tail)
1382 list->tail->bi_next = rq->bio;
1383 else
1384 list->head = rq->bio;
1385 list->tail = rq->biotail;
1386
1387 rq->bio = NULL;
1388 rq->biotail = NULL;
1389 }
1390
1391 rq->__data_len = 0;
1392 }
1393 EXPORT_SYMBOL_GPL(blk_steal_bios);
1394
1395 /**
1396 * blk_update_request - Special helper function for request stacking drivers
1397 * @req: the request being processed
1398 * @error: block status code
1399 * @nr_bytes: number of bytes to complete @req
1400 *
1401 * Description:
1402 * Ends I/O on a number of bytes attached to @req, but doesn't complete
1403 * the request structure even if @req doesn't have leftover.
1404 * If @req has leftover, sets it up for the next range of segments.
1405 *
1406 * This special helper function is only for request stacking drivers
1407 * (e.g. request-based dm) so that they can handle partial completion.
1408 * Actual device drivers should use blk_mq_end_request instead.
1409 *
1410 * Passing the result of blk_rq_bytes() as @nr_bytes guarantees
1411 * %false return from this function.
1412 *
1413 * Note:
1414 * The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in both
1415 * blk_rq_bytes() and in blk_update_request().
1416 *
1417 * Return:
1418 * %false - this request doesn't have any more data
1419 * %true - this request has more data
1420 **/
blk_update_request(struct request * req,blk_status_t error,unsigned int nr_bytes)1421 bool blk_update_request(struct request *req, blk_status_t error,
1422 unsigned int nr_bytes)
1423 {
1424 int total_bytes;
1425
1426 trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes);
1427
1428 if (!req->bio)
1429 return false;
1430
1431 #ifdef CONFIG_BLK_DEV_INTEGRITY
1432 if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
1433 error == BLK_STS_OK)
1434 req->q->integrity.profile->complete_fn(req, nr_bytes);
1435 #endif
1436
1437 /*
1438 * Upper layers may call blk_crypto_evict_key() anytime after the last
1439 * bio_endio(). Therefore, the keyslot must be released before that.
1440 */
1441 if (blk_crypto_rq_has_keyslot(req) && nr_bytes >= blk_rq_bytes(req))
1442 __blk_crypto_rq_put_keyslot(req);
1443
1444 if (unlikely(error && !blk_rq_is_passthrough(req) &&
1445 !(req->rq_flags & RQF_QUIET)))
1446 print_req_error(req, error, __func__);
1447
1448 blk_account_io_completion(req, nr_bytes);
1449
1450 total_bytes = 0;
1451 while (req->bio) {
1452 struct bio *bio = req->bio;
1453 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
1454
1455 if (bio_bytes == bio->bi_iter.bi_size)
1456 req->bio = bio->bi_next;
1457
1458 /* Completion has already been traced */
1459 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1460 req_bio_endio(req, bio, bio_bytes, error);
1461
1462 total_bytes += bio_bytes;
1463 nr_bytes -= bio_bytes;
1464
1465 if (!nr_bytes)
1466 break;
1467 }
1468
1469 /*
1470 * completely done
1471 */
1472 if (!req->bio) {
1473 /*
1474 * Reset counters so that the request stacking driver
1475 * can find how many bytes remain in the request
1476 * later.
1477 */
1478 req->__data_len = 0;
1479 return false;
1480 }
1481
1482 req->__data_len -= total_bytes;
1483
1484 /* update sector only for requests with clear definition of sector */
1485 if (!blk_rq_is_passthrough(req))
1486 req->__sector += total_bytes >> 9;
1487
1488 /* mixed attributes always follow the first bio */
1489 if (req->rq_flags & RQF_MIXED_MERGE) {
1490 req->cmd_flags &= ~REQ_FAILFAST_MASK;
1491 req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
1492 }
1493
1494 if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) {
1495 /*
1496 * If total number of sectors is less than the first segment
1497 * size, something has gone terribly wrong.
1498 */
1499 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
1500 blk_dump_rq_flags(req, "request botched");
1501 req->__data_len = blk_rq_cur_bytes(req);
1502 }
1503
1504 /* recalculate the number of segments */
1505 req->nr_phys_segments = blk_recalc_rq_segments(req);
1506 }
1507
1508 return true;
1509 }
1510 EXPORT_SYMBOL_GPL(blk_update_request);
1511
1512 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
1513 /**
1514 * rq_flush_dcache_pages - Helper function to flush all pages in a request
1515 * @rq: the request to be flushed
1516 *
1517 * Description:
1518 * Flush all pages in @rq.
1519 */
rq_flush_dcache_pages(struct request * rq)1520 void rq_flush_dcache_pages(struct request *rq)
1521 {
1522 struct req_iterator iter;
1523 struct bio_vec bvec;
1524
1525 rq_for_each_segment(bvec, rq, iter)
1526 flush_dcache_page(bvec.bv_page);
1527 }
1528 EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
1529 #endif
1530
1531 /**
1532 * blk_lld_busy - Check if underlying low-level drivers of a device are busy
1533 * @q : the queue of the device being checked
1534 *
1535 * Description:
1536 * Check if underlying low-level drivers of a device are busy.
1537 * If the drivers want to export their busy state, they must set own
1538 * exporting function using blk_queue_lld_busy() first.
1539 *
1540 * Basically, this function is used only by request stacking drivers
1541 * to stop dispatching requests to underlying devices when underlying
1542 * devices are busy. This behavior helps more I/O merging on the queue
1543 * of the request stacking driver and prevents I/O throughput regression
1544 * on burst I/O load.
1545 *
1546 * Return:
1547 * 0 - Not busy (The request stacking driver should dispatch request)
1548 * 1 - Busy (The request stacking driver should stop dispatching request)
1549 */
blk_lld_busy(struct request_queue * q)1550 int blk_lld_busy(struct request_queue *q)
1551 {
1552 if (queue_is_mq(q) && q->mq_ops->busy)
1553 return q->mq_ops->busy(q);
1554
1555 return 0;
1556 }
1557 EXPORT_SYMBOL_GPL(blk_lld_busy);
1558
1559 /**
1560 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
1561 * @rq: the clone request to be cleaned up
1562 *
1563 * Description:
1564 * Free all bios in @rq for a cloned request.
1565 */
blk_rq_unprep_clone(struct request * rq)1566 void blk_rq_unprep_clone(struct request *rq)
1567 {
1568 struct bio *bio;
1569
1570 while ((bio = rq->bio) != NULL) {
1571 rq->bio = bio->bi_next;
1572
1573 bio_put(bio);
1574 }
1575 }
1576 EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
1577
1578 /**
1579 * blk_rq_prep_clone - Helper function to setup clone request
1580 * @rq: the request to be setup
1581 * @rq_src: original request to be cloned
1582 * @bs: bio_set that bios for clone are allocated from
1583 * @gfp_mask: memory allocation mask for bio
1584 * @bio_ctr: setup function to be called for each clone bio.
1585 * Returns %0 for success, non %0 for failure.
1586 * @data: private data to be passed to @bio_ctr
1587 *
1588 * Description:
1589 * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
1590 * Also, pages which the original bios are pointing to are not copied
1591 * and the cloned bios just point same pages.
1592 * So cloned bios must be completed before original bios, which means
1593 * the caller must complete @rq before @rq_src.
1594 */
blk_rq_prep_clone(struct request * rq,struct request * rq_src,struct bio_set * bs,gfp_t gfp_mask,int (* bio_ctr)(struct bio *,struct bio *,void *),void * data)1595 int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
1596 struct bio_set *bs, gfp_t gfp_mask,
1597 int (*bio_ctr)(struct bio *, struct bio *, void *),
1598 void *data)
1599 {
1600 struct bio *bio, *bio_src;
1601
1602 if (!bs)
1603 bs = &fs_bio_set;
1604
1605 __rq_for_each_bio(bio_src, rq_src) {
1606 bio = bio_clone_fast(bio_src, gfp_mask, bs);
1607 if (!bio)
1608 goto free_and_out;
1609
1610 if (bio_ctr && bio_ctr(bio, bio_src, data))
1611 goto free_and_out;
1612
1613 if (rq->bio) {
1614 rq->biotail->bi_next = bio;
1615 rq->biotail = bio;
1616 } else {
1617 rq->bio = rq->biotail = bio;
1618 }
1619 bio = NULL;
1620 }
1621
1622 /* Copy attributes of the original request to the clone request. */
1623 rq->__sector = blk_rq_pos(rq_src);
1624 rq->__data_len = blk_rq_bytes(rq_src);
1625 if (rq_src->rq_flags & RQF_SPECIAL_PAYLOAD) {
1626 rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
1627 rq->special_vec = rq_src->special_vec;
1628 }
1629 rq->nr_phys_segments = rq_src->nr_phys_segments;
1630 rq->ioprio = rq_src->ioprio;
1631
1632 if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0)
1633 goto free_and_out;
1634
1635 return 0;
1636
1637 free_and_out:
1638 if (bio)
1639 bio_put(bio);
1640 blk_rq_unprep_clone(rq);
1641
1642 return -ENOMEM;
1643 }
1644 EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
1645
kblockd_schedule_work(struct work_struct * work)1646 int kblockd_schedule_work(struct work_struct *work)
1647 {
1648 return queue_work(kblockd_workqueue, work);
1649 }
1650 EXPORT_SYMBOL(kblockd_schedule_work);
1651
kblockd_mod_delayed_work_on(int cpu,struct delayed_work * dwork,unsigned long delay)1652 int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
1653 unsigned long delay)
1654 {
1655 return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
1656 }
1657 EXPORT_SYMBOL(kblockd_mod_delayed_work_on);
1658
1659 /**
1660 * blk_start_plug - initialize blk_plug and track it inside the task_struct
1661 * @plug: The &struct blk_plug that needs to be initialized
1662 *
1663 * Description:
1664 * blk_start_plug() indicates to the block layer an intent by the caller
1665 * to submit multiple I/O requests in a batch. The block layer may use
1666 * this hint to defer submitting I/Os from the caller until blk_finish_plug()
1667 * is called. However, the block layer may choose to submit requests
1668 * before a call to blk_finish_plug() if the number of queued I/Os
1669 * exceeds %BLK_MAX_REQUEST_COUNT, or if the size of the I/O is larger than
1670 * %BLK_PLUG_FLUSH_SIZE. The queued I/Os may also be submitted early if
1671 * the task schedules (see below).
1672 *
1673 * Tracking blk_plug inside the task_struct will help with auto-flushing the
1674 * pending I/O should the task end up blocking between blk_start_plug() and
1675 * blk_finish_plug(). This is important from a performance perspective, but
1676 * also ensures that we don't deadlock. For instance, if the task is blocking
1677 * for a memory allocation, memory reclaim could end up wanting to free a
1678 * page belonging to that request that is currently residing in our private
1679 * plug. By flushing the pending I/O when the process goes to sleep, we avoid
1680 * this kind of deadlock.
1681 */
blk_start_plug(struct blk_plug * plug)1682 void blk_start_plug(struct blk_plug *plug)
1683 {
1684 struct task_struct *tsk = current;
1685
1686 /*
1687 * If this is a nested plug, don't actually assign it.
1688 */
1689 if (tsk->plug)
1690 return;
1691
1692 INIT_LIST_HEAD(&plug->mq_list);
1693 INIT_LIST_HEAD(&plug->cb_list);
1694 plug->rq_count = 0;
1695 plug->multiple_queues = false;
1696 plug->nowait = false;
1697
1698 /*
1699 * Store ordering should not be needed here, since a potential
1700 * preempt will imply a full memory barrier
1701 */
1702 tsk->plug = plug;
1703 }
1704 EXPORT_SYMBOL(blk_start_plug);
1705
flush_plug_callbacks(struct blk_plug * plug,bool from_schedule)1706 static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
1707 {
1708 LIST_HEAD(callbacks);
1709
1710 while (!list_empty(&plug->cb_list)) {
1711 list_splice_init(&plug->cb_list, &callbacks);
1712
1713 while (!list_empty(&callbacks)) {
1714 struct blk_plug_cb *cb = list_first_entry(&callbacks,
1715 struct blk_plug_cb,
1716 list);
1717 list_del(&cb->list);
1718 cb->callback(cb, from_schedule);
1719 }
1720 }
1721 }
1722
blk_check_plugged(blk_plug_cb_fn unplug,void * data,int size)1723 struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
1724 int size)
1725 {
1726 struct blk_plug *plug = current->plug;
1727 struct blk_plug_cb *cb;
1728
1729 if (!plug)
1730 return NULL;
1731
1732 list_for_each_entry(cb, &plug->cb_list, list)
1733 if (cb->callback == unplug && cb->data == data)
1734 return cb;
1735
1736 /* Not currently on the callback list */
1737 BUG_ON(size < sizeof(*cb));
1738 cb = kzalloc(size, GFP_ATOMIC);
1739 if (cb) {
1740 cb->data = data;
1741 cb->callback = unplug;
1742 list_add(&cb->list, &plug->cb_list);
1743 }
1744 return cb;
1745 }
1746 EXPORT_SYMBOL(blk_check_plugged);
1747
blk_flush_plug_list(struct blk_plug * plug,bool from_schedule)1748 void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1749 {
1750 flush_plug_callbacks(plug, from_schedule);
1751
1752 if (!list_empty(&plug->mq_list))
1753 blk_mq_flush_plug_list(plug, from_schedule);
1754 }
1755
1756 /**
1757 * blk_finish_plug - mark the end of a batch of submitted I/O
1758 * @plug: The &struct blk_plug passed to blk_start_plug()
1759 *
1760 * Description:
1761 * Indicate that a batch of I/O submissions is complete. This function
1762 * must be paired with an initial call to blk_start_plug(). The intent
1763 * is to allow the block layer to optimize I/O submission. See the
1764 * documentation for blk_start_plug() for more information.
1765 */
blk_finish_plug(struct blk_plug * plug)1766 void blk_finish_plug(struct blk_plug *plug)
1767 {
1768 if (plug != current->plug)
1769 return;
1770 blk_flush_plug_list(plug, false);
1771
1772 current->plug = NULL;
1773 }
1774 EXPORT_SYMBOL(blk_finish_plug);
1775
blk_io_schedule(void)1776 void blk_io_schedule(void)
1777 {
1778 /* Prevent hang_check timer from firing at us during very long I/O */
1779 unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2;
1780
1781 if (timeout)
1782 io_schedule_timeout(timeout);
1783 else
1784 io_schedule();
1785 }
1786 EXPORT_SYMBOL_GPL(blk_io_schedule);
1787
blk_dev_init(void)1788 int __init blk_dev_init(void)
1789 {
1790 BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
1791 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1792 sizeof_field(struct request, cmd_flags));
1793 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1794 sizeof_field(struct bio, bi_opf));
1795
1796 /* used for unplugging and affects IO latency/throughput - HIGHPRI */
1797 kblockd_workqueue = alloc_workqueue("kblockd",
1798 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1799 if (!kblockd_workqueue)
1800 panic("Failed to create kblockd\n");
1801
1802 blk_requestq_cachep = kmem_cache_create("request_queue",
1803 sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
1804
1805 blk_debugfs_root = debugfs_create_dir("block", NULL);
1806
1807 return 0;
1808 }
1809