1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
5 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
7 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
8 * - July2000
9 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
10 */
11
12 /*
13 * This handles all read/write requests to block devices
14 */
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/bio.h>
18 #include <linux/blkdev.h>
19 #include <linux/blk-mq.h>
20 #include <linux/blk-pm.h>
21 #include <linux/highmem.h>
22 #include <linux/mm.h>
23 #include <linux/pagemap.h>
24 #include <linux/kernel_stat.h>
25 #include <linux/string.h>
26 #include <linux/init.h>
27 #include <linux/completion.h>
28 #include <linux/slab.h>
29 #include <linux/swap.h>
30 #include <linux/writeback.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/fault-inject.h>
33 #include <linux/list_sort.h>
34 #include <linux/delay.h>
35 #include <linux/ratelimit.h>
36 #include <linux/pm_runtime.h>
37 #include <linux/blk-cgroup.h>
38 #include <linux/t10-pi.h>
39 #include <linux/debugfs.h>
40 #include <linux/bpf.h>
41 #include <linux/psi.h>
42 #include <linux/sched/sysctl.h>
43 #include <linux/blk-crypto.h>
44
45 #define CREATE_TRACE_POINTS
46 #include <trace/events/block.h>
47 #undef CREATE_TRACE_POINTS
48 #include <trace/hooks/block.h>
49
50 #include "blk.h"
51 #include "blk-mq.h"
52 #include "blk-mq-sched.h"
53 #include "blk-pm.h"
54 #ifndef __GENKSYMS__
55 #include "blk-rq-qos.h"
56 #endif
57 #include "blk-ioprio.h"
58
59 struct dentry *blk_debugfs_root;
60
61 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
62 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
63 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
64 EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
65 EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
66 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_insert);
67 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_queue);
68 EXPORT_TRACEPOINT_SYMBOL_GPL(block_getrq);
69 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_issue);
70 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_merge);
71 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_requeue);
72 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_complete);
73
74 DEFINE_IDA(blk_queue_ida);
75
76 /*
77 * For queue allocation
78 */
79 struct kmem_cache *blk_requestq_cachep;
80
81 /*
82 * Controlling structure to kblockd
83 */
84 static struct workqueue_struct *kblockd_workqueue;
85
86 /**
87 * blk_queue_flag_set - atomically set a queue flag
88 * @flag: flag to be set
89 * @q: request queue
90 */
blk_queue_flag_set(unsigned int flag,struct request_queue * q)91 void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
92 {
93 set_bit(flag, &q->queue_flags);
94 }
95 EXPORT_SYMBOL(blk_queue_flag_set);
96
97 /**
98 * blk_queue_flag_clear - atomically clear a queue flag
99 * @flag: flag to be cleared
100 * @q: request queue
101 */
blk_queue_flag_clear(unsigned int flag,struct request_queue * q)102 void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
103 {
104 clear_bit(flag, &q->queue_flags);
105 }
106 EXPORT_SYMBOL(blk_queue_flag_clear);
107
108 /**
109 * blk_queue_flag_test_and_set - atomically test and set a queue flag
110 * @flag: flag to be set
111 * @q: request queue
112 *
113 * Returns the previous value of @flag - 0 if the flag was not set and 1 if
114 * the flag was already set.
115 */
blk_queue_flag_test_and_set(unsigned int flag,struct request_queue * q)116 bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
117 {
118 return test_and_set_bit(flag, &q->queue_flags);
119 }
120 EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set);
121
blk_rq_init(struct request_queue * q,struct request * rq)122 void blk_rq_init(struct request_queue *q, struct request *rq)
123 {
124 memset(rq, 0, sizeof(*rq));
125
126 INIT_LIST_HEAD(&rq->queuelist);
127 rq->q = q;
128 rq->__sector = (sector_t) -1;
129 INIT_HLIST_NODE(&rq->hash);
130 RB_CLEAR_NODE(&rq->rb_node);
131 rq->tag = BLK_MQ_NO_TAG;
132 rq->internal_tag = BLK_MQ_NO_TAG;
133 rq->start_time_ns = ktime_get_ns();
134 rq->part = NULL;
135 blk_crypto_rq_set_defaults(rq);
136 }
137 EXPORT_SYMBOL(blk_rq_init);
138
139 #define REQ_OP_NAME(name) [REQ_OP_##name] = #name
140 static const char *const blk_op_name[] = {
141 REQ_OP_NAME(READ),
142 REQ_OP_NAME(WRITE),
143 REQ_OP_NAME(FLUSH),
144 REQ_OP_NAME(DISCARD),
145 REQ_OP_NAME(SECURE_ERASE),
146 REQ_OP_NAME(ZONE_RESET),
147 REQ_OP_NAME(ZONE_RESET_ALL),
148 REQ_OP_NAME(ZONE_OPEN),
149 REQ_OP_NAME(ZONE_CLOSE),
150 REQ_OP_NAME(ZONE_FINISH),
151 REQ_OP_NAME(ZONE_APPEND),
152 REQ_OP_NAME(WRITE_SAME),
153 REQ_OP_NAME(WRITE_ZEROES),
154 REQ_OP_NAME(DRV_IN),
155 REQ_OP_NAME(DRV_OUT),
156 };
157 #undef REQ_OP_NAME
158
159 /**
160 * blk_op_str - Return string XXX in the REQ_OP_XXX.
161 * @op: REQ_OP_XXX.
162 *
163 * Description: Centralize block layer function to convert REQ_OP_XXX into
164 * string format. Useful in the debugging and tracing bio or request. For
165 * invalid REQ_OP_XXX it returns string "UNKNOWN".
166 */
blk_op_str(unsigned int op)167 inline const char *blk_op_str(unsigned int op)
168 {
169 const char *op_str = "UNKNOWN";
170
171 if (op < ARRAY_SIZE(blk_op_name) && blk_op_name[op])
172 op_str = blk_op_name[op];
173
174 return op_str;
175 }
176 EXPORT_SYMBOL_GPL(blk_op_str);
177
178 static const struct {
179 int errno;
180 const char *name;
181 } blk_errors[] = {
182 [BLK_STS_OK] = { 0, "" },
183 [BLK_STS_NOTSUPP] = { -EOPNOTSUPP, "operation not supported" },
184 [BLK_STS_TIMEOUT] = { -ETIMEDOUT, "timeout" },
185 [BLK_STS_NOSPC] = { -ENOSPC, "critical space allocation" },
186 [BLK_STS_TRANSPORT] = { -ENOLINK, "recoverable transport" },
187 [BLK_STS_TARGET] = { -EREMOTEIO, "critical target" },
188 [BLK_STS_NEXUS] = { -EBADE, "critical nexus" },
189 [BLK_STS_MEDIUM] = { -ENODATA, "critical medium" },
190 [BLK_STS_PROTECTION] = { -EILSEQ, "protection" },
191 [BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" },
192 [BLK_STS_DEV_RESOURCE] = { -EBUSY, "device resource" },
193 [BLK_STS_AGAIN] = { -EAGAIN, "nonblocking retry" },
194
195 /* device mapper special case, should not leak out: */
196 [BLK_STS_DM_REQUEUE] = { -EREMCHG, "dm internal retry" },
197
198 /* zone device specific errors */
199 [BLK_STS_ZONE_OPEN_RESOURCE] = { -ETOOMANYREFS, "open zones exceeded" },
200 [BLK_STS_ZONE_ACTIVE_RESOURCE] = { -EOVERFLOW, "active zones exceeded" },
201
202 /* everything else not covered above: */
203 [BLK_STS_IOERR] = { -EIO, "I/O" },
204 };
205
errno_to_blk_status(int errno)206 blk_status_t errno_to_blk_status(int errno)
207 {
208 int i;
209
210 for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
211 if (blk_errors[i].errno == errno)
212 return (__force blk_status_t)i;
213 }
214
215 return BLK_STS_IOERR;
216 }
217 EXPORT_SYMBOL_GPL(errno_to_blk_status);
218
blk_status_to_errno(blk_status_t status)219 int blk_status_to_errno(blk_status_t status)
220 {
221 int idx = (__force int)status;
222
223 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
224 return -EIO;
225 return blk_errors[idx].errno;
226 }
227 EXPORT_SYMBOL_GPL(blk_status_to_errno);
228
print_req_error(struct request * req,blk_status_t status,const char * caller)229 static void print_req_error(struct request *req, blk_status_t status,
230 const char *caller)
231 {
232 int idx = (__force int)status;
233
234 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
235 return;
236
237 printk_ratelimited(KERN_ERR
238 "%s: %s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x "
239 "phys_seg %u prio class %u\n",
240 caller, blk_errors[idx].name,
241 req->rq_disk ? req->rq_disk->disk_name : "?",
242 blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)),
243 req->cmd_flags & ~REQ_OP_MASK,
244 req->nr_phys_segments,
245 IOPRIO_PRIO_CLASS(req->ioprio));
246 }
247
req_bio_endio(struct request * rq,struct bio * bio,unsigned int nbytes,blk_status_t error)248 static void req_bio_endio(struct request *rq, struct bio *bio,
249 unsigned int nbytes, blk_status_t error)
250 {
251 if (error)
252 bio->bi_status = error;
253
254 if (unlikely(rq->rq_flags & RQF_QUIET))
255 bio_set_flag(bio, BIO_QUIET);
256
257 bio_advance(bio, nbytes);
258
259 if (req_op(rq) == REQ_OP_ZONE_APPEND && error == BLK_STS_OK) {
260 /*
261 * Partial zone append completions cannot be supported as the
262 * BIO fragments may end up not being written sequentially.
263 */
264 if (bio->bi_iter.bi_size)
265 bio->bi_status = BLK_STS_IOERR;
266 else
267 bio->bi_iter.bi_sector = rq->__sector;
268 }
269
270 /* don't actually finish bio if it's part of flush sequence */
271 if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
272 bio_endio(bio);
273 }
274
blk_dump_rq_flags(struct request * rq,char * msg)275 void blk_dump_rq_flags(struct request *rq, char *msg)
276 {
277 printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
278 rq->rq_disk ? rq->rq_disk->disk_name : "?",
279 (unsigned long long) rq->cmd_flags);
280
281 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
282 (unsigned long long)blk_rq_pos(rq),
283 blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
284 printk(KERN_INFO " bio %p, biotail %p, len %u\n",
285 rq->bio, rq->biotail, blk_rq_bytes(rq));
286 }
287 EXPORT_SYMBOL(blk_dump_rq_flags);
288
289 /**
290 * blk_sync_queue - cancel any pending callbacks on a queue
291 * @q: the queue
292 *
293 * Description:
294 * The block layer may perform asynchronous callback activity
295 * on a queue, such as calling the unplug function after a timeout.
296 * A block device may call blk_sync_queue to ensure that any
297 * such activity is cancelled, thus allowing it to release resources
298 * that the callbacks might use. The caller must already have made sure
299 * that its ->submit_bio will not re-add plugging prior to calling
300 * this function.
301 *
302 * This function does not cancel any asynchronous activity arising
303 * out of elevator or throttling code. That would require elevator_exit()
304 * and blkcg_exit_queue() to be called with queue lock initialized.
305 *
306 */
blk_sync_queue(struct request_queue * q)307 void blk_sync_queue(struct request_queue *q)
308 {
309 del_timer_sync(&q->timeout);
310 cancel_work_sync(&q->timeout_work);
311 }
312 EXPORT_SYMBOL(blk_sync_queue);
313
314 /**
315 * blk_set_pm_only - increment pm_only counter
316 * @q: request queue pointer
317 */
blk_set_pm_only(struct request_queue * q)318 void blk_set_pm_only(struct request_queue *q)
319 {
320 atomic_inc(&q->pm_only);
321 }
322 EXPORT_SYMBOL_GPL(blk_set_pm_only);
323
blk_clear_pm_only(struct request_queue * q)324 void blk_clear_pm_only(struct request_queue *q)
325 {
326 int pm_only;
327
328 pm_only = atomic_dec_return(&q->pm_only);
329 WARN_ON_ONCE(pm_only < 0);
330 if (pm_only == 0)
331 wake_up_all(&q->mq_freeze_wq);
332 }
333 EXPORT_SYMBOL_GPL(blk_clear_pm_only);
334
335 /**
336 * blk_put_queue - decrement the request_queue refcount
337 * @q: the request_queue structure to decrement the refcount for
338 *
339 * Decrements the refcount of the request_queue kobject. When this reaches 0
340 * we'll have blk_release_queue() called.
341 *
342 * Context: Any context, but the last reference must not be dropped from
343 * atomic context.
344 */
blk_put_queue(struct request_queue * q)345 void blk_put_queue(struct request_queue *q)
346 {
347 kobject_put(&q->kobj);
348 }
349 EXPORT_SYMBOL(blk_put_queue);
350
blk_queue_start_drain(struct request_queue * q)351 void blk_queue_start_drain(struct request_queue *q)
352 {
353 /*
354 * When queue DYING flag is set, we need to block new req
355 * entering queue, so we call blk_freeze_queue_start() to
356 * prevent I/O from crossing blk_queue_enter().
357 */
358 blk_freeze_queue_start(q);
359 if (queue_is_mq(q))
360 blk_mq_wake_waiters(q);
361 /* Make blk_queue_enter() reexamine the DYING flag. */
362 wake_up_all(&q->mq_freeze_wq);
363 }
364
365 /**
366 * blk_cleanup_queue - shutdown a request queue
367 * @q: request queue to shutdown
368 *
369 * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
370 * put it. All future requests will be failed immediately with -ENODEV.
371 *
372 * Context: can sleep
373 */
blk_cleanup_queue(struct request_queue * q)374 void blk_cleanup_queue(struct request_queue *q)
375 {
376 /* cannot be called from atomic context */
377 might_sleep();
378
379 WARN_ON_ONCE(blk_queue_registered(q));
380
381 /* mark @q DYING, no new request or merges will be allowed afterwards */
382 blk_queue_flag_set(QUEUE_FLAG_DYING, q);
383 blk_queue_start_drain(q);
384
385 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
386 blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
387
388 /*
389 * Drain all requests queued before DYING marking. Set DEAD flag to
390 * prevent that blk_mq_run_hw_queues() accesses the hardware queues
391 * after draining finished.
392 */
393 blk_freeze_queue(q);
394
395 /* cleanup rq qos structures for queue without disk */
396 rq_qos_exit(q);
397
398 blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
399
400 blk_sync_queue(q);
401 if (queue_is_mq(q)) {
402 blk_mq_cancel_work_sync(q);
403 blk_mq_exit_queue(q);
404 }
405
406 /*
407 * In theory, request pool of sched_tags belongs to request queue.
408 * However, the current implementation requires tag_set for freeing
409 * requests, so free the pool now.
410 *
411 * Queue has become frozen, there can't be any in-queue requests, so
412 * it is safe to free requests now.
413 */
414 mutex_lock(&q->sysfs_lock);
415 if (q->elevator)
416 blk_mq_sched_free_requests(q);
417 mutex_unlock(&q->sysfs_lock);
418
419 /* @q is and will stay empty, shutdown and put */
420 blk_put_queue(q);
421 }
422 EXPORT_SYMBOL(blk_cleanup_queue);
423
blk_try_enter_queue(struct request_queue * q,bool pm)424 static bool blk_try_enter_queue(struct request_queue *q, bool pm)
425 {
426 rcu_read_lock();
427 if (!percpu_ref_tryget_live(&q->q_usage_counter))
428 goto fail;
429
430 /*
431 * The code that increments the pm_only counter must ensure that the
432 * counter is globally visible before the queue is unfrozen.
433 */
434 if (blk_queue_pm_only(q) &&
435 (!pm || queue_rpm_status(q) == RPM_SUSPENDED))
436 goto fail_put;
437
438 rcu_read_unlock();
439 return true;
440
441 fail_put:
442 percpu_ref_put(&q->q_usage_counter);
443 fail:
444 rcu_read_unlock();
445 return false;
446 }
447
448 /**
449 * blk_queue_enter() - try to increase q->q_usage_counter
450 * @q: request queue pointer
451 * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM
452 */
blk_queue_enter(struct request_queue * q,blk_mq_req_flags_t flags)453 int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
454 {
455 const bool pm = flags & BLK_MQ_REQ_PM;
456
457 while (!blk_try_enter_queue(q, pm)) {
458 if (flags & BLK_MQ_REQ_NOWAIT)
459 return -EAGAIN;
460
461 /*
462 * read pair of barrier in blk_freeze_queue_start(), we need to
463 * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
464 * reading .mq_freeze_depth or queue dying flag, otherwise the
465 * following wait may never return if the two reads are
466 * reordered.
467 */
468 smp_rmb();
469 wait_event(q->mq_freeze_wq,
470 (!q->mq_freeze_depth &&
471 blk_pm_resume_queue(pm, q)) ||
472 blk_queue_dying(q));
473 if (blk_queue_dying(q))
474 return -ENODEV;
475 }
476
477 return 0;
478 }
479
bio_queue_enter(struct bio * bio)480 static inline int bio_queue_enter(struct bio *bio)
481 {
482 struct gendisk *disk = bio->bi_bdev->bd_disk;
483 struct request_queue *q = disk->queue;
484
485 while (!blk_try_enter_queue(q, false)) {
486 if (bio->bi_opf & REQ_NOWAIT) {
487 if (test_bit(GD_DEAD, &disk->state))
488 goto dead;
489 bio_wouldblock_error(bio);
490 return -EAGAIN;
491 }
492
493 /*
494 * read pair of barrier in blk_freeze_queue_start(), we need to
495 * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
496 * reading .mq_freeze_depth or queue dying flag, otherwise the
497 * following wait may never return if the two reads are
498 * reordered.
499 */
500 smp_rmb();
501 wait_event(q->mq_freeze_wq,
502 (!q->mq_freeze_depth &&
503 blk_pm_resume_queue(false, q)) ||
504 test_bit(GD_DEAD, &disk->state));
505 if (test_bit(GD_DEAD, &disk->state))
506 goto dead;
507 }
508
509 return 0;
510 dead:
511 bio_io_error(bio);
512 return -ENODEV;
513 }
514
blk_queue_exit(struct request_queue * q)515 void blk_queue_exit(struct request_queue *q)
516 {
517 percpu_ref_put(&q->q_usage_counter);
518 }
519
blk_queue_usage_counter_release(struct percpu_ref * ref)520 static void blk_queue_usage_counter_release(struct percpu_ref *ref)
521 {
522 struct request_queue *q =
523 container_of(ref, struct request_queue, q_usage_counter);
524
525 wake_up_all(&q->mq_freeze_wq);
526 }
527
blk_rq_timed_out_timer(struct timer_list * t)528 static void blk_rq_timed_out_timer(struct timer_list *t)
529 {
530 struct request_queue *q = from_timer(q, t, timeout);
531
532 kblockd_schedule_work(&q->timeout_work);
533 }
534
blk_timeout_work(struct work_struct * work)535 static void blk_timeout_work(struct work_struct *work)
536 {
537 }
538
blk_alloc_queue(int node_id)539 struct request_queue *blk_alloc_queue(int node_id)
540 {
541 struct request_queue *q;
542 int ret;
543
544 q = kmem_cache_alloc_node(blk_requestq_cachep,
545 GFP_KERNEL | __GFP_ZERO, node_id);
546 if (!q)
547 return NULL;
548
549 q->last_merge = NULL;
550
551 q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL);
552 if (q->id < 0)
553 goto fail_q;
554
555 ret = bioset_init(&q->bio_split, BIO_POOL_SIZE, 0, 0);
556 if (ret)
557 goto fail_id;
558
559 q->stats = blk_alloc_queue_stats();
560 if (!q->stats)
561 goto fail_split;
562
563 q->node = node_id;
564
565 atomic_set(&q->nr_active_requests_shared_sbitmap, 0);
566
567 timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
568 INIT_WORK(&q->timeout_work, blk_timeout_work);
569 INIT_LIST_HEAD(&q->icq_list);
570 #ifdef CONFIG_BLK_CGROUP
571 INIT_LIST_HEAD(&q->blkg_list);
572 #endif
573
574 kobject_init(&q->kobj, &blk_queue_ktype);
575
576 mutex_init(&q->debugfs_mutex);
577 mutex_init(&q->sysfs_lock);
578 mutex_init(&q->sysfs_dir_lock);
579 spin_lock_init(&q->queue_lock);
580
581 init_waitqueue_head(&q->mq_freeze_wq);
582 mutex_init(&q->mq_freeze_lock);
583
584 /*
585 * Init percpu_ref in atomic mode so that it's faster to shutdown.
586 * See blk_register_queue() for details.
587 */
588 if (percpu_ref_init(&q->q_usage_counter,
589 blk_queue_usage_counter_release,
590 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
591 goto fail_stats;
592
593 if (blkcg_init_queue(q))
594 goto fail_ref;
595
596 blk_queue_dma_alignment(q, 511);
597 blk_set_default_limits(&q->limits);
598 q->nr_requests = BLKDEV_MAX_RQ;
599
600 return q;
601
602 fail_ref:
603 percpu_ref_exit(&q->q_usage_counter);
604 fail_stats:
605 blk_free_queue_stats(q->stats);
606 fail_split:
607 bioset_exit(&q->bio_split);
608 fail_id:
609 ida_simple_remove(&blk_queue_ida, q->id);
610 fail_q:
611 kmem_cache_free(blk_requestq_cachep, q);
612 return NULL;
613 }
614
615 /**
616 * blk_get_queue - increment the request_queue refcount
617 * @q: the request_queue structure to increment the refcount for
618 *
619 * Increment the refcount of the request_queue kobject.
620 *
621 * Context: Any context.
622 */
blk_get_queue(struct request_queue * q)623 bool blk_get_queue(struct request_queue *q)
624 {
625 if (likely(!blk_queue_dying(q))) {
626 __blk_get_queue(q);
627 return true;
628 }
629
630 return false;
631 }
632 EXPORT_SYMBOL(blk_get_queue);
633
634 /**
635 * blk_get_request - allocate a request
636 * @q: request queue to allocate a request for
637 * @op: operation (REQ_OP_*) and REQ_* flags, e.g. REQ_SYNC.
638 * @flags: BLK_MQ_REQ_* flags, e.g. BLK_MQ_REQ_NOWAIT.
639 */
blk_get_request(struct request_queue * q,unsigned int op,blk_mq_req_flags_t flags)640 struct request *blk_get_request(struct request_queue *q, unsigned int op,
641 blk_mq_req_flags_t flags)
642 {
643 struct request *req;
644
645 WARN_ON_ONCE(op & REQ_NOWAIT);
646 WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PM));
647
648 req = blk_mq_alloc_request(q, op, flags);
649 if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
650 q->mq_ops->initialize_rq_fn(req);
651
652 return req;
653 }
654 EXPORT_SYMBOL(blk_get_request);
655
blk_put_request(struct request * req)656 void blk_put_request(struct request *req)
657 {
658 blk_mq_free_request(req);
659 }
660 EXPORT_SYMBOL(blk_put_request);
661
handle_bad_sector(struct bio * bio,sector_t maxsector)662 static void handle_bad_sector(struct bio *bio, sector_t maxsector)
663 {
664 char b[BDEVNAME_SIZE];
665
666 pr_info_ratelimited("attempt to access beyond end of device\n"
667 "%s: rw=%d, want=%llu, limit=%llu\n",
668 bio_devname(bio, b), bio->bi_opf,
669 bio_end_sector(bio), maxsector);
670 }
671
672 #ifdef CONFIG_FAIL_MAKE_REQUEST
673
674 static DECLARE_FAULT_ATTR(fail_make_request);
675
setup_fail_make_request(char * str)676 static int __init setup_fail_make_request(char *str)
677 {
678 return setup_fault_attr(&fail_make_request, str);
679 }
680 __setup("fail_make_request=", setup_fail_make_request);
681
should_fail_request(struct block_device * part,unsigned int bytes)682 static bool should_fail_request(struct block_device *part, unsigned int bytes)
683 {
684 return part->bd_make_it_fail && should_fail(&fail_make_request, bytes);
685 }
686
fail_make_request_debugfs(void)687 static int __init fail_make_request_debugfs(void)
688 {
689 struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
690 NULL, &fail_make_request);
691
692 return PTR_ERR_OR_ZERO(dir);
693 }
694
695 late_initcall(fail_make_request_debugfs);
696
697 #else /* CONFIG_FAIL_MAKE_REQUEST */
698
should_fail_request(struct block_device * part,unsigned int bytes)699 static inline bool should_fail_request(struct block_device *part,
700 unsigned int bytes)
701 {
702 return false;
703 }
704
705 #endif /* CONFIG_FAIL_MAKE_REQUEST */
706
bio_check_ro(struct bio * bio)707 static inline void bio_check_ro(struct bio *bio)
708 {
709 if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) {
710 if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
711 return;
712 pr_warn_ratelimited("Trying to write to read-only block-device %pg\n",
713 bio->bi_bdev);
714 /* Older lvm-tools actually trigger this */
715 }
716 }
717
should_fail_bio(struct bio * bio)718 static noinline int should_fail_bio(struct bio *bio)
719 {
720 if (should_fail_request(bdev_whole(bio->bi_bdev), bio->bi_iter.bi_size))
721 return -EIO;
722 return 0;
723 }
724 ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO);
725
726 /*
727 * Check whether this bio extends beyond the end of the device or partition.
728 * This may well happen - the kernel calls bread() without checking the size of
729 * the device, e.g., when mounting a file system.
730 */
bio_check_eod(struct bio * bio)731 static inline int bio_check_eod(struct bio *bio)
732 {
733 sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
734 unsigned int nr_sectors = bio_sectors(bio);
735
736 if (nr_sectors && maxsector &&
737 (nr_sectors > maxsector ||
738 bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
739 handle_bad_sector(bio, maxsector);
740 return -EIO;
741 }
742 return 0;
743 }
744
745 /*
746 * Remap block n of partition p to block n+start(p) of the disk.
747 */
blk_partition_remap(struct bio * bio)748 static int blk_partition_remap(struct bio *bio)
749 {
750 struct block_device *p = bio->bi_bdev;
751
752 if (unlikely(should_fail_request(p, bio->bi_iter.bi_size)))
753 return -EIO;
754 if (bio_sectors(bio)) {
755 bio->bi_iter.bi_sector += p->bd_start_sect;
756 trace_block_bio_remap(bio, p->bd_dev,
757 bio->bi_iter.bi_sector -
758 p->bd_start_sect);
759 }
760 bio_set_flag(bio, BIO_REMAPPED);
761 return 0;
762 }
763
764 /*
765 * Check write append to a zoned block device.
766 */
blk_check_zone_append(struct request_queue * q,struct bio * bio)767 static inline blk_status_t blk_check_zone_append(struct request_queue *q,
768 struct bio *bio)
769 {
770 sector_t pos = bio->bi_iter.bi_sector;
771 int nr_sectors = bio_sectors(bio);
772
773 /* Only applicable to zoned block devices */
774 if (!blk_queue_is_zoned(q))
775 return BLK_STS_NOTSUPP;
776
777 /* The bio sector must point to the start of a sequential zone */
778 if (pos & (blk_queue_zone_sectors(q) - 1) ||
779 !blk_queue_zone_is_seq(q, pos))
780 return BLK_STS_IOERR;
781
782 /*
783 * Not allowed to cross zone boundaries. Otherwise, the BIO will be
784 * split and could result in non-contiguous sectors being written in
785 * different zones.
786 */
787 if (nr_sectors > q->limits.chunk_sectors)
788 return BLK_STS_IOERR;
789
790 /* Make sure the BIO is small enough and will not get split */
791 if (nr_sectors > q->limits.max_zone_append_sectors)
792 return BLK_STS_IOERR;
793
794 bio->bi_opf |= REQ_NOMERGE;
795
796 return BLK_STS_OK;
797 }
798
submit_bio_checks(struct bio * bio)799 static noinline_for_stack bool submit_bio_checks(struct bio *bio)
800 {
801 struct block_device *bdev = bio->bi_bdev;
802 struct request_queue *q = bdev->bd_disk->queue;
803 blk_status_t status = BLK_STS_IOERR;
804 struct blk_plug *plug;
805
806 might_sleep();
807
808 plug = blk_mq_plug(q, bio);
809 if (plug && plug->nowait)
810 bio->bi_opf |= REQ_NOWAIT;
811
812 /*
813 * For a REQ_NOWAIT based request, return -EOPNOTSUPP
814 * if queue does not support NOWAIT.
815 */
816 if ((bio->bi_opf & REQ_NOWAIT) && !blk_queue_nowait(q))
817 goto not_supported;
818
819 if (should_fail_bio(bio))
820 goto end_io;
821 bio_check_ro(bio);
822 if (!bio_flagged(bio, BIO_REMAPPED)) {
823 if (unlikely(bio_check_eod(bio)))
824 goto end_io;
825 if (bdev->bd_partno && unlikely(blk_partition_remap(bio)))
826 goto end_io;
827 }
828
829 /*
830 * Filter flush bio's early so that bio based drivers without flush
831 * support don't have to worry about them.
832 */
833 if (op_is_flush(bio->bi_opf) &&
834 !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
835 bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
836 if (!bio_sectors(bio)) {
837 status = BLK_STS_OK;
838 goto end_io;
839 }
840 }
841
842 if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
843 bio_clear_hipri(bio);
844
845 switch (bio_op(bio)) {
846 case REQ_OP_DISCARD:
847 if (!blk_queue_discard(q))
848 goto not_supported;
849 break;
850 case REQ_OP_SECURE_ERASE:
851 if (!blk_queue_secure_erase(q))
852 goto not_supported;
853 break;
854 case REQ_OP_WRITE_SAME:
855 if (!q->limits.max_write_same_sectors)
856 goto not_supported;
857 break;
858 case REQ_OP_ZONE_APPEND:
859 status = blk_check_zone_append(q, bio);
860 if (status != BLK_STS_OK)
861 goto end_io;
862 break;
863 case REQ_OP_ZONE_RESET:
864 case REQ_OP_ZONE_OPEN:
865 case REQ_OP_ZONE_CLOSE:
866 case REQ_OP_ZONE_FINISH:
867 if (!blk_queue_is_zoned(q))
868 goto not_supported;
869 break;
870 case REQ_OP_ZONE_RESET_ALL:
871 if (!blk_queue_is_zoned(q) || !blk_queue_zone_resetall(q))
872 goto not_supported;
873 break;
874 case REQ_OP_WRITE_ZEROES:
875 if (!q->limits.max_write_zeroes_sectors)
876 goto not_supported;
877 break;
878 default:
879 break;
880 }
881
882 /*
883 * Various block parts want %current->io_context, so allocate it up
884 * front rather than dealing with lots of pain to allocate it only
885 * where needed. This may fail and the block layer knows how to live
886 * with it.
887 */
888 if (unlikely(!current->io_context))
889 create_task_io_context(current, GFP_ATOMIC, q->node);
890
891 if (blk_throtl_bio(bio))
892 return false;
893
894 blk_cgroup_bio_start(bio);
895 blkcg_bio_issue_init(bio);
896
897 if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
898 trace_block_bio_queue(bio);
899 /* Now that enqueuing has been traced, we need to trace
900 * completion as well.
901 */
902 bio_set_flag(bio, BIO_TRACE_COMPLETION);
903 }
904 return true;
905
906 not_supported:
907 status = BLK_STS_NOTSUPP;
908 end_io:
909 bio->bi_status = status;
910 bio_endio(bio);
911 return false;
912 }
913
__submit_bio(struct bio * bio)914 static blk_qc_t __submit_bio(struct bio *bio)
915 {
916 struct gendisk *disk = bio->bi_bdev->bd_disk;
917 blk_qc_t ret = BLK_QC_T_NONE;
918
919 if (unlikely(bio_queue_enter(bio) != 0))
920 return BLK_QC_T_NONE;
921
922 if (!submit_bio_checks(bio) || !blk_crypto_bio_prep(&bio))
923 goto queue_exit;
924 if (disk->fops->submit_bio) {
925 ret = disk->fops->submit_bio(bio);
926 goto queue_exit;
927 }
928 return blk_mq_submit_bio(bio);
929
930 queue_exit:
931 blk_queue_exit(disk->queue);
932 return ret;
933 }
934
935 /*
936 * The loop in this function may be a bit non-obvious, and so deserves some
937 * explanation:
938 *
939 * - Before entering the loop, bio->bi_next is NULL (as all callers ensure
940 * that), so we have a list with a single bio.
941 * - We pretend that we have just taken it off a longer list, so we assign
942 * bio_list to a pointer to the bio_list_on_stack, thus initialising the
943 * bio_list of new bios to be added. ->submit_bio() may indeed add some more
944 * bios through a recursive call to submit_bio_noacct. If it did, we find a
945 * non-NULL value in bio_list and re-enter the loop from the top.
946 * - In this case we really did just take the bio of the top of the list (no
947 * pretending) and so remove it from bio_list, and call into ->submit_bio()
948 * again.
949 *
950 * bio_list_on_stack[0] contains bios submitted by the current ->submit_bio.
951 * bio_list_on_stack[1] contains bios that were submitted before the current
952 * ->submit_bio_bio, but that haven't been processed yet.
953 */
__submit_bio_noacct(struct bio * bio)954 static blk_qc_t __submit_bio_noacct(struct bio *bio)
955 {
956 struct bio_list bio_list_on_stack[2];
957 blk_qc_t ret = BLK_QC_T_NONE;
958
959 BUG_ON(bio->bi_next);
960
961 bio_list_init(&bio_list_on_stack[0]);
962 current->bio_list = bio_list_on_stack;
963
964 do {
965 struct request_queue *q = bio->bi_bdev->bd_disk->queue;
966 struct bio_list lower, same;
967
968 /*
969 * Create a fresh bio_list for all subordinate requests.
970 */
971 bio_list_on_stack[1] = bio_list_on_stack[0];
972 bio_list_init(&bio_list_on_stack[0]);
973
974 ret = __submit_bio(bio);
975
976 /*
977 * Sort new bios into those for a lower level and those for the
978 * same level.
979 */
980 bio_list_init(&lower);
981 bio_list_init(&same);
982 while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
983 if (q == bio->bi_bdev->bd_disk->queue)
984 bio_list_add(&same, bio);
985 else
986 bio_list_add(&lower, bio);
987
988 /*
989 * Now assemble so we handle the lowest level first.
990 */
991 bio_list_merge(&bio_list_on_stack[0], &lower);
992 bio_list_merge(&bio_list_on_stack[0], &same);
993 bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
994 } while ((bio = bio_list_pop(&bio_list_on_stack[0])));
995
996 current->bio_list = NULL;
997 return ret;
998 }
999
__submit_bio_noacct_mq(struct bio * bio)1000 static blk_qc_t __submit_bio_noacct_mq(struct bio *bio)
1001 {
1002 struct bio_list bio_list[2] = { };
1003 blk_qc_t ret;
1004
1005 current->bio_list = bio_list;
1006
1007 do {
1008 ret = __submit_bio(bio);
1009 } while ((bio = bio_list_pop(&bio_list[0])));
1010
1011 current->bio_list = NULL;
1012 return ret;
1013 }
1014
1015 /**
1016 * submit_bio_noacct - re-submit a bio to the block device layer for I/O
1017 * @bio: The bio describing the location in memory and on the device.
1018 *
1019 * This is a version of submit_bio() that shall only be used for I/O that is
1020 * resubmitted to lower level drivers by stacking block drivers. All file
1021 * systems and other upper level users of the block layer should use
1022 * submit_bio() instead.
1023 */
submit_bio_noacct(struct bio * bio)1024 blk_qc_t submit_bio_noacct(struct bio *bio)
1025 {
1026 /*
1027 * We only want one ->submit_bio to be active at a time, else stack
1028 * usage with stacked devices could be a problem. Use current->bio_list
1029 * to collect a list of requests submited by a ->submit_bio method while
1030 * it is active, and then process them after it returned.
1031 */
1032 if (current->bio_list) {
1033 bio_list_add(¤t->bio_list[0], bio);
1034 return BLK_QC_T_NONE;
1035 }
1036
1037 if (!bio->bi_bdev->bd_disk->fops->submit_bio)
1038 return __submit_bio_noacct_mq(bio);
1039 return __submit_bio_noacct(bio);
1040 }
1041 EXPORT_SYMBOL(submit_bio_noacct);
1042
bio_set_ioprio(struct bio * bio)1043 static void bio_set_ioprio(struct bio *bio)
1044 {
1045 /* Nobody set ioprio so far? Initialize it based on task's nice value */
1046 if (IOPRIO_PRIO_CLASS(bio->bi_ioprio) == IOPRIO_CLASS_NONE)
1047 bio->bi_ioprio = get_current_ioprio();
1048 blkcg_set_ioprio(bio);
1049 }
1050
1051 /**
1052 * submit_bio - submit a bio to the block device layer for I/O
1053 * @bio: The &struct bio which describes the I/O
1054 *
1055 * submit_bio() is used to submit I/O requests to block devices. It is passed a
1056 * fully set up &struct bio that describes the I/O that needs to be done. The
1057 * bio will be send to the device described by the bi_bdev field.
1058 *
1059 * The success/failure status of the request, along with notification of
1060 * completion, is delivered asynchronously through the ->bi_end_io() callback
1061 * in @bio. The bio must NOT be touched by thecaller until ->bi_end_io() has
1062 * been called.
1063 */
submit_bio(struct bio * bio)1064 blk_qc_t submit_bio(struct bio *bio)
1065 {
1066 if (blkcg_punt_bio_submit(bio))
1067 return BLK_QC_T_NONE;
1068
1069 /*
1070 * If it's a regular read/write or a barrier with data attached,
1071 * go through the normal accounting stuff before submission.
1072 */
1073 if (bio_has_data(bio)) {
1074 unsigned int count;
1075
1076 if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
1077 count = queue_logical_block_size(
1078 bio->bi_bdev->bd_disk->queue) >> 9;
1079 else
1080 count = bio_sectors(bio);
1081
1082 if (op_is_write(bio_op(bio))) {
1083 count_vm_events(PGPGOUT, count);
1084 } else {
1085 task_io_account_read(bio->bi_iter.bi_size);
1086 count_vm_events(PGPGIN, count);
1087 }
1088 }
1089
1090 bio_set_ioprio(bio);
1091
1092 /*
1093 * If we're reading data that is part of the userspace workingset, count
1094 * submission time as memory stall. When the device is congested, or
1095 * the submitting cgroup IO-throttled, submission can be a significant
1096 * part of overall IO time.
1097 */
1098 if (unlikely(bio_op(bio) == REQ_OP_READ &&
1099 bio_flagged(bio, BIO_WORKINGSET))) {
1100 unsigned long pflags;
1101 blk_qc_t ret;
1102
1103 psi_memstall_enter(&pflags);
1104 ret = submit_bio_noacct(bio);
1105 psi_memstall_leave(&pflags);
1106
1107 return ret;
1108 }
1109
1110 return submit_bio_noacct(bio);
1111 }
1112 EXPORT_SYMBOL(submit_bio);
1113
1114 /**
1115 * blk_cloned_rq_check_limits - Helper function to check a cloned request
1116 * for the new queue limits
1117 * @q: the queue
1118 * @rq: the request being checked
1119 *
1120 * Description:
1121 * @rq may have been made based on weaker limitations of upper-level queues
1122 * in request stacking drivers, and it may violate the limitation of @q.
1123 * Since the block layer and the underlying device driver trust @rq
1124 * after it is inserted to @q, it should be checked against @q before
1125 * the insertion using this generic function.
1126 *
1127 * Request stacking drivers like request-based dm may change the queue
1128 * limits when retrying requests on other queues. Those requests need
1129 * to be checked against the new queue limits again during dispatch.
1130 */
blk_cloned_rq_check_limits(struct request_queue * q,struct request * rq)1131 static blk_status_t blk_cloned_rq_check_limits(struct request_queue *q,
1132 struct request *rq)
1133 {
1134 unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
1135
1136 if (blk_rq_sectors(rq) > max_sectors) {
1137 /*
1138 * SCSI device does not have a good way to return if
1139 * Write Same/Zero is actually supported. If a device rejects
1140 * a non-read/write command (discard, write same,etc.) the
1141 * low-level device driver will set the relevant queue limit to
1142 * 0 to prevent blk-lib from issuing more of the offending
1143 * operations. Commands queued prior to the queue limit being
1144 * reset need to be completed with BLK_STS_NOTSUPP to avoid I/O
1145 * errors being propagated to upper layers.
1146 */
1147 if (max_sectors == 0)
1148 return BLK_STS_NOTSUPP;
1149
1150 printk(KERN_ERR "%s: over max size limit. (%u > %u)\n",
1151 __func__, blk_rq_sectors(rq), max_sectors);
1152 return BLK_STS_IOERR;
1153 }
1154
1155 /*
1156 * The queue settings related to segment counting may differ from the
1157 * original queue.
1158 */
1159 rq->nr_phys_segments = blk_recalc_rq_segments(rq);
1160 if (rq->nr_phys_segments > queue_max_segments(q)) {
1161 printk(KERN_ERR "%s: over max segments limit. (%hu > %hu)\n",
1162 __func__, rq->nr_phys_segments, queue_max_segments(q));
1163 return BLK_STS_IOERR;
1164 }
1165
1166 return BLK_STS_OK;
1167 }
1168
1169 /**
1170 * blk_insert_cloned_request - Helper for stacking drivers to submit a request
1171 * @q: the queue to submit the request
1172 * @rq: the request being queued
1173 */
blk_insert_cloned_request(struct request_queue * q,struct request * rq)1174 blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
1175 {
1176 blk_status_t ret;
1177
1178 ret = blk_cloned_rq_check_limits(q, rq);
1179 if (ret != BLK_STS_OK)
1180 return ret;
1181
1182 if (rq->rq_disk &&
1183 should_fail_request(rq->rq_disk->part0, blk_rq_bytes(rq)))
1184 return BLK_STS_IOERR;
1185
1186 if (blk_crypto_insert_cloned_request(rq))
1187 return BLK_STS_IOERR;
1188
1189 if (blk_queue_io_stat(q))
1190 blk_account_io_start(rq);
1191
1192 /*
1193 * Since we have a scheduler attached on the top device,
1194 * bypass a potential scheduler on the bottom device for
1195 * insert.
1196 */
1197 return blk_mq_request_issue_directly(rq, true);
1198 }
1199 EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
1200
1201 /**
1202 * blk_rq_err_bytes - determine number of bytes till the next failure boundary
1203 * @rq: request to examine
1204 *
1205 * Description:
1206 * A request could be merge of IOs which require different failure
1207 * handling. This function determines the number of bytes which
1208 * can be failed from the beginning of the request without
1209 * crossing into area which need to be retried further.
1210 *
1211 * Return:
1212 * The number of bytes to fail.
1213 */
blk_rq_err_bytes(const struct request * rq)1214 unsigned int blk_rq_err_bytes(const struct request *rq)
1215 {
1216 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
1217 unsigned int bytes = 0;
1218 struct bio *bio;
1219
1220 if (!(rq->rq_flags & RQF_MIXED_MERGE))
1221 return blk_rq_bytes(rq);
1222
1223 /*
1224 * Currently the only 'mixing' which can happen is between
1225 * different fastfail types. We can safely fail portions
1226 * which have all the failfast bits that the first one has -
1227 * the ones which are at least as eager to fail as the first
1228 * one.
1229 */
1230 for (bio = rq->bio; bio; bio = bio->bi_next) {
1231 if ((bio->bi_opf & ff) != ff)
1232 break;
1233 bytes += bio->bi_iter.bi_size;
1234 }
1235
1236 /* this could lead to infinite loop */
1237 BUG_ON(blk_rq_bytes(rq) && !bytes);
1238 return bytes;
1239 }
1240 EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
1241
update_io_ticks(struct block_device * part,unsigned long now,bool end)1242 static void update_io_ticks(struct block_device *part, unsigned long now,
1243 bool end)
1244 {
1245 unsigned long stamp;
1246 again:
1247 stamp = READ_ONCE(part->bd_stamp);
1248 if (unlikely(time_after(now, stamp))) {
1249 if (likely(cmpxchg(&part->bd_stamp, stamp, now) == stamp))
1250 __part_stat_add(part, io_ticks, end ? now - stamp : 1);
1251 }
1252 if (part->bd_partno) {
1253 part = bdev_whole(part);
1254 goto again;
1255 }
1256 }
1257
blk_account_io_completion(struct request * req,unsigned int bytes)1258 static void blk_account_io_completion(struct request *req, unsigned int bytes)
1259 {
1260 if (req->part && blk_do_io_stat(req)) {
1261 const int sgrp = op_stat_group(req_op(req));
1262
1263 part_stat_lock();
1264 part_stat_add(req->part, sectors[sgrp], bytes >> 9);
1265 part_stat_unlock();
1266 }
1267 }
1268
blk_account_io_done(struct request * req,u64 now)1269 void blk_account_io_done(struct request *req, u64 now)
1270 {
1271 /*
1272 * Account IO completion. flush_rq isn't accounted as a
1273 * normal IO on queueing nor completion. Accounting the
1274 * containing request is enough.
1275 */
1276 trace_android_vh_blk_account_io_done(req);
1277 if (req->part && blk_do_io_stat(req) &&
1278 !(req->rq_flags & RQF_FLUSH_SEQ)) {
1279 const int sgrp = op_stat_group(req_op(req));
1280
1281 part_stat_lock();
1282 update_io_ticks(req->part, jiffies, true);
1283 part_stat_inc(req->part, ios[sgrp]);
1284 part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns);
1285 part_stat_unlock();
1286 }
1287 }
1288
blk_account_io_start(struct request * rq)1289 void blk_account_io_start(struct request *rq)
1290 {
1291 if (!blk_do_io_stat(rq))
1292 return;
1293
1294 /* passthrough requests can hold bios that do not have ->bi_bdev set */
1295 if (rq->bio && rq->bio->bi_bdev)
1296 rq->part = rq->bio->bi_bdev;
1297 else
1298 rq->part = rq->rq_disk->part0;
1299
1300 part_stat_lock();
1301 update_io_ticks(rq->part, jiffies, false);
1302 part_stat_unlock();
1303 }
1304
__part_start_io_acct(struct block_device * part,unsigned int sectors,unsigned int op,unsigned long start_time)1305 static unsigned long __part_start_io_acct(struct block_device *part,
1306 unsigned int sectors, unsigned int op,
1307 unsigned long start_time)
1308 {
1309 const int sgrp = op_stat_group(op);
1310
1311 part_stat_lock();
1312 update_io_ticks(part, start_time, false);
1313 part_stat_inc(part, ios[sgrp]);
1314 part_stat_add(part, sectors[sgrp], sectors);
1315 part_stat_local_inc(part, in_flight[op_is_write(op)]);
1316 part_stat_unlock();
1317
1318 return start_time;
1319 }
1320
1321 /**
1322 * bio_start_io_acct_time - start I/O accounting for bio based drivers
1323 * @bio: bio to start account for
1324 * @start_time: start time that should be passed back to bio_end_io_acct().
1325 */
bio_start_io_acct_time(struct bio * bio,unsigned long start_time)1326 void bio_start_io_acct_time(struct bio *bio, unsigned long start_time)
1327 {
1328 __part_start_io_acct(bio->bi_bdev, bio_sectors(bio),
1329 bio_op(bio), start_time);
1330 }
1331 EXPORT_SYMBOL_GPL(bio_start_io_acct_time);
1332
1333 /**
1334 * bio_start_io_acct - start I/O accounting for bio based drivers
1335 * @bio: bio to start account for
1336 *
1337 * Returns the start time that should be passed back to bio_end_io_acct().
1338 */
bio_start_io_acct(struct bio * bio)1339 unsigned long bio_start_io_acct(struct bio *bio)
1340 {
1341 return __part_start_io_acct(bio->bi_bdev, bio_sectors(bio),
1342 bio_op(bio), jiffies);
1343 }
1344 EXPORT_SYMBOL_GPL(bio_start_io_acct);
1345
disk_start_io_acct(struct gendisk * disk,unsigned int sectors,unsigned int op)1346 unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
1347 unsigned int op)
1348 {
1349 return __part_start_io_acct(disk->part0, sectors, op, jiffies);
1350 }
1351 EXPORT_SYMBOL(disk_start_io_acct);
1352
__part_end_io_acct(struct block_device * part,unsigned int op,unsigned long start_time)1353 static void __part_end_io_acct(struct block_device *part, unsigned int op,
1354 unsigned long start_time)
1355 {
1356 const int sgrp = op_stat_group(op);
1357 unsigned long now = READ_ONCE(jiffies);
1358 unsigned long duration = now - start_time;
1359
1360 part_stat_lock();
1361 update_io_ticks(part, now, true);
1362 part_stat_add(part, nsecs[sgrp], jiffies_to_nsecs(duration));
1363 part_stat_local_dec(part, in_flight[op_is_write(op)]);
1364 part_stat_unlock();
1365 }
1366
bio_end_io_acct_remapped(struct bio * bio,unsigned long start_time,struct block_device * orig_bdev)1367 void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
1368 struct block_device *orig_bdev)
1369 {
1370 __part_end_io_acct(orig_bdev, bio_op(bio), start_time);
1371 }
1372 EXPORT_SYMBOL_GPL(bio_end_io_acct_remapped);
1373
disk_end_io_acct(struct gendisk * disk,unsigned int op,unsigned long start_time)1374 void disk_end_io_acct(struct gendisk *disk, unsigned int op,
1375 unsigned long start_time)
1376 {
1377 __part_end_io_acct(disk->part0, op, start_time);
1378 }
1379 EXPORT_SYMBOL(disk_end_io_acct);
1380
1381 /*
1382 * Steal bios from a request and add them to a bio list.
1383 * The request must not have been partially completed before.
1384 */
blk_steal_bios(struct bio_list * list,struct request * rq)1385 void blk_steal_bios(struct bio_list *list, struct request *rq)
1386 {
1387 if (rq->bio) {
1388 if (list->tail)
1389 list->tail->bi_next = rq->bio;
1390 else
1391 list->head = rq->bio;
1392 list->tail = rq->biotail;
1393
1394 rq->bio = NULL;
1395 rq->biotail = NULL;
1396 }
1397
1398 rq->__data_len = 0;
1399 }
1400 EXPORT_SYMBOL_GPL(blk_steal_bios);
1401
1402 /**
1403 * blk_update_request - Complete multiple bytes without completing the request
1404 * @req: the request being processed
1405 * @error: block status code
1406 * @nr_bytes: number of bytes to complete for @req
1407 *
1408 * Description:
1409 * Ends I/O on a number of bytes attached to @req, but doesn't complete
1410 * the request structure even if @req doesn't have leftover.
1411 * If @req has leftover, sets it up for the next range of segments.
1412 *
1413 * Passing the result of blk_rq_bytes() as @nr_bytes guarantees
1414 * %false return from this function.
1415 *
1416 * Note:
1417 * The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in this function
1418 * except in the consistency check at the end of this function.
1419 *
1420 * Return:
1421 * %false - this request doesn't have any more data
1422 * %true - this request has more data
1423 **/
blk_update_request(struct request * req,blk_status_t error,unsigned int nr_bytes)1424 bool blk_update_request(struct request *req, blk_status_t error,
1425 unsigned int nr_bytes)
1426 {
1427 int total_bytes;
1428
1429 trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes);
1430
1431 if (!req->bio)
1432 return false;
1433
1434 #ifdef CONFIG_BLK_DEV_INTEGRITY
1435 if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
1436 error == BLK_STS_OK)
1437 req->q->integrity.profile->complete_fn(req, nr_bytes);
1438 #endif
1439
1440 /*
1441 * Upper layers may call blk_crypto_evict_key() anytime after the last
1442 * bio_endio(). Therefore, the keyslot must be released before that.
1443 */
1444 if (blk_crypto_rq_has_keyslot(req) && nr_bytes >= blk_rq_bytes(req))
1445 __blk_crypto_rq_put_keyslot(req);
1446
1447 if (unlikely(error && !blk_rq_is_passthrough(req) &&
1448 !(req->rq_flags & RQF_QUIET)))
1449 print_req_error(req, error, __func__);
1450
1451 blk_account_io_completion(req, nr_bytes);
1452
1453 total_bytes = 0;
1454 while (req->bio) {
1455 struct bio *bio = req->bio;
1456 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
1457
1458 if (bio_bytes == bio->bi_iter.bi_size)
1459 req->bio = bio->bi_next;
1460
1461 /* Completion has already been traced */
1462 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1463 req_bio_endio(req, bio, bio_bytes, error);
1464
1465 total_bytes += bio_bytes;
1466 nr_bytes -= bio_bytes;
1467
1468 if (!nr_bytes)
1469 break;
1470 }
1471
1472 /*
1473 * completely done
1474 */
1475 if (!req->bio) {
1476 /*
1477 * Reset counters so that the request stacking driver
1478 * can find how many bytes remain in the request
1479 * later.
1480 */
1481 req->__data_len = 0;
1482 return false;
1483 }
1484
1485 req->__data_len -= total_bytes;
1486
1487 /* update sector only for requests with clear definition of sector */
1488 if (!blk_rq_is_passthrough(req))
1489 req->__sector += total_bytes >> 9;
1490
1491 /* mixed attributes always follow the first bio */
1492 if (req->rq_flags & RQF_MIXED_MERGE) {
1493 req->cmd_flags &= ~REQ_FAILFAST_MASK;
1494 req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
1495 }
1496
1497 if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) {
1498 /*
1499 * If total number of sectors is less than the first segment
1500 * size, something has gone terribly wrong.
1501 */
1502 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
1503 blk_dump_rq_flags(req, "request botched");
1504 req->__data_len = blk_rq_cur_bytes(req);
1505 }
1506
1507 /* recalculate the number of segments */
1508 req->nr_phys_segments = blk_recalc_rq_segments(req);
1509 }
1510
1511 return true;
1512 }
1513 EXPORT_SYMBOL_GPL(blk_update_request);
1514
1515 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
1516 /**
1517 * rq_flush_dcache_pages - Helper function to flush all pages in a request
1518 * @rq: the request to be flushed
1519 *
1520 * Description:
1521 * Flush all pages in @rq.
1522 */
rq_flush_dcache_pages(struct request * rq)1523 void rq_flush_dcache_pages(struct request *rq)
1524 {
1525 struct req_iterator iter;
1526 struct bio_vec bvec;
1527
1528 rq_for_each_segment(bvec, rq, iter)
1529 flush_dcache_page(bvec.bv_page);
1530 }
1531 EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
1532 #endif
1533
1534 /**
1535 * blk_lld_busy - Check if underlying low-level drivers of a device are busy
1536 * @q : the queue of the device being checked
1537 *
1538 * Description:
1539 * Check if underlying low-level drivers of a device are busy.
1540 * If the drivers want to export their busy state, they must set own
1541 * exporting function using blk_queue_lld_busy() first.
1542 *
1543 * Basically, this function is used only by request stacking drivers
1544 * to stop dispatching requests to underlying devices when underlying
1545 * devices are busy. This behavior helps more I/O merging on the queue
1546 * of the request stacking driver and prevents I/O throughput regression
1547 * on burst I/O load.
1548 *
1549 * Return:
1550 * 0 - Not busy (The request stacking driver should dispatch request)
1551 * 1 - Busy (The request stacking driver should stop dispatching request)
1552 */
blk_lld_busy(struct request_queue * q)1553 int blk_lld_busy(struct request_queue *q)
1554 {
1555 if (queue_is_mq(q) && q->mq_ops->busy)
1556 return q->mq_ops->busy(q);
1557
1558 return 0;
1559 }
1560 EXPORT_SYMBOL_GPL(blk_lld_busy);
1561
1562 /**
1563 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
1564 * @rq: the clone request to be cleaned up
1565 *
1566 * Description:
1567 * Free all bios in @rq for a cloned request.
1568 */
blk_rq_unprep_clone(struct request * rq)1569 void blk_rq_unprep_clone(struct request *rq)
1570 {
1571 struct bio *bio;
1572
1573 while ((bio = rq->bio) != NULL) {
1574 rq->bio = bio->bi_next;
1575
1576 bio_put(bio);
1577 }
1578 }
1579 EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
1580
1581 /**
1582 * blk_rq_prep_clone - Helper function to setup clone request
1583 * @rq: the request to be setup
1584 * @rq_src: original request to be cloned
1585 * @bs: bio_set that bios for clone are allocated from
1586 * @gfp_mask: memory allocation mask for bio
1587 * @bio_ctr: setup function to be called for each clone bio.
1588 * Returns %0 for success, non %0 for failure.
1589 * @data: private data to be passed to @bio_ctr
1590 *
1591 * Description:
1592 * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
1593 * Also, pages which the original bios are pointing to are not copied
1594 * and the cloned bios just point same pages.
1595 * So cloned bios must be completed before original bios, which means
1596 * the caller must complete @rq before @rq_src.
1597 */
blk_rq_prep_clone(struct request * rq,struct request * rq_src,struct bio_set * bs,gfp_t gfp_mask,int (* bio_ctr)(struct bio *,struct bio *,void *),void * data)1598 int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
1599 struct bio_set *bs, gfp_t gfp_mask,
1600 int (*bio_ctr)(struct bio *, struct bio *, void *),
1601 void *data)
1602 {
1603 struct bio *bio, *bio_src;
1604
1605 if (!bs)
1606 bs = &fs_bio_set;
1607
1608 __rq_for_each_bio(bio_src, rq_src) {
1609 bio = bio_clone_fast(bio_src, gfp_mask, bs);
1610 if (!bio)
1611 goto free_and_out;
1612
1613 if (bio_ctr && bio_ctr(bio, bio_src, data))
1614 goto free_and_out;
1615
1616 if (rq->bio) {
1617 rq->biotail->bi_next = bio;
1618 rq->biotail = bio;
1619 } else {
1620 rq->bio = rq->biotail = bio;
1621 }
1622 bio = NULL;
1623 }
1624
1625 /* Copy attributes of the original request to the clone request. */
1626 rq->__sector = blk_rq_pos(rq_src);
1627 rq->__data_len = blk_rq_bytes(rq_src);
1628 if (rq_src->rq_flags & RQF_SPECIAL_PAYLOAD) {
1629 rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
1630 rq->special_vec = rq_src->special_vec;
1631 }
1632 rq->nr_phys_segments = rq_src->nr_phys_segments;
1633 rq->ioprio = rq_src->ioprio;
1634
1635 if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0)
1636 goto free_and_out;
1637
1638 return 0;
1639
1640 free_and_out:
1641 if (bio)
1642 bio_put(bio);
1643 blk_rq_unprep_clone(rq);
1644
1645 return -ENOMEM;
1646 }
1647 EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
1648
kblockd_schedule_work(struct work_struct * work)1649 int kblockd_schedule_work(struct work_struct *work)
1650 {
1651 return queue_work(kblockd_workqueue, work);
1652 }
1653 EXPORT_SYMBOL(kblockd_schedule_work);
1654
kblockd_mod_delayed_work_on(int cpu,struct delayed_work * dwork,unsigned long delay)1655 int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
1656 unsigned long delay)
1657 {
1658 return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
1659 }
1660 EXPORT_SYMBOL(kblockd_mod_delayed_work_on);
1661
1662 /**
1663 * blk_start_plug - initialize blk_plug and track it inside the task_struct
1664 * @plug: The &struct blk_plug that needs to be initialized
1665 *
1666 * Description:
1667 * blk_start_plug() indicates to the block layer an intent by the caller
1668 * to submit multiple I/O requests in a batch. The block layer may use
1669 * this hint to defer submitting I/Os from the caller until blk_finish_plug()
1670 * is called. However, the block layer may choose to submit requests
1671 * before a call to blk_finish_plug() if the number of queued I/Os
1672 * exceeds %BLK_MAX_REQUEST_COUNT, or if the size of the I/O is larger than
1673 * %BLK_PLUG_FLUSH_SIZE. The queued I/Os may also be submitted early if
1674 * the task schedules (see below).
1675 *
1676 * Tracking blk_plug inside the task_struct will help with auto-flushing the
1677 * pending I/O should the task end up blocking between blk_start_plug() and
1678 * blk_finish_plug(). This is important from a performance perspective, but
1679 * also ensures that we don't deadlock. For instance, if the task is blocking
1680 * for a memory allocation, memory reclaim could end up wanting to free a
1681 * page belonging to that request that is currently residing in our private
1682 * plug. By flushing the pending I/O when the process goes to sleep, we avoid
1683 * this kind of deadlock.
1684 */
blk_start_plug(struct blk_plug * plug)1685 void blk_start_plug(struct blk_plug *plug)
1686 {
1687 struct task_struct *tsk = current;
1688
1689 /*
1690 * If this is a nested plug, don't actually assign it.
1691 */
1692 if (tsk->plug)
1693 return;
1694
1695 INIT_LIST_HEAD(&plug->mq_list);
1696 INIT_LIST_HEAD(&plug->cb_list);
1697 plug->rq_count = 0;
1698 plug->multiple_queues = false;
1699 plug->nowait = false;
1700
1701 /*
1702 * Store ordering should not be needed here, since a potential
1703 * preempt will imply a full memory barrier
1704 */
1705 tsk->plug = plug;
1706 }
1707 EXPORT_SYMBOL(blk_start_plug);
1708
flush_plug_callbacks(struct blk_plug * plug,bool from_schedule)1709 static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
1710 {
1711 LIST_HEAD(callbacks);
1712
1713 while (!list_empty(&plug->cb_list)) {
1714 list_splice_init(&plug->cb_list, &callbacks);
1715
1716 while (!list_empty(&callbacks)) {
1717 struct blk_plug_cb *cb = list_first_entry(&callbacks,
1718 struct blk_plug_cb,
1719 list);
1720 list_del(&cb->list);
1721 cb->callback(cb, from_schedule);
1722 }
1723 }
1724 }
1725
blk_check_plugged(blk_plug_cb_fn unplug,void * data,int size)1726 struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
1727 int size)
1728 {
1729 struct blk_plug *plug = current->plug;
1730 struct blk_plug_cb *cb;
1731
1732 if (!plug)
1733 return NULL;
1734
1735 list_for_each_entry(cb, &plug->cb_list, list)
1736 if (cb->callback == unplug && cb->data == data)
1737 return cb;
1738
1739 /* Not currently on the callback list */
1740 BUG_ON(size < sizeof(*cb));
1741 cb = kzalloc(size, GFP_ATOMIC);
1742 if (cb) {
1743 cb->data = data;
1744 cb->callback = unplug;
1745 list_add(&cb->list, &plug->cb_list);
1746 }
1747 return cb;
1748 }
1749 EXPORT_SYMBOL(blk_check_plugged);
1750
blk_flush_plug_list(struct blk_plug * plug,bool from_schedule)1751 void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1752 {
1753 flush_plug_callbacks(plug, from_schedule);
1754
1755 if (!list_empty(&plug->mq_list))
1756 blk_mq_flush_plug_list(plug, from_schedule);
1757 }
1758
1759 /**
1760 * blk_finish_plug - mark the end of a batch of submitted I/O
1761 * @plug: The &struct blk_plug passed to blk_start_plug()
1762 *
1763 * Description:
1764 * Indicate that a batch of I/O submissions is complete. This function
1765 * must be paired with an initial call to blk_start_plug(). The intent
1766 * is to allow the block layer to optimize I/O submission. See the
1767 * documentation for blk_start_plug() for more information.
1768 */
blk_finish_plug(struct blk_plug * plug)1769 void blk_finish_plug(struct blk_plug *plug)
1770 {
1771 if (plug != current->plug)
1772 return;
1773 blk_flush_plug_list(plug, false);
1774
1775 current->plug = NULL;
1776 }
1777 EXPORT_SYMBOL(blk_finish_plug);
1778
blk_io_schedule(void)1779 void blk_io_schedule(void)
1780 {
1781 /* Prevent hang_check timer from firing at us during very long I/O */
1782 unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2;
1783
1784 if (timeout)
1785 io_schedule_timeout(timeout);
1786 else
1787 io_schedule();
1788 }
1789 EXPORT_SYMBOL_GPL(blk_io_schedule);
1790
blk_dev_init(void)1791 int __init blk_dev_init(void)
1792 {
1793 BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
1794 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1795 sizeof_field(struct request, cmd_flags));
1796 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1797 sizeof_field(struct bio, bi_opf));
1798
1799 /* used for unplugging and affects IO latency/throughput - HIGHPRI */
1800 kblockd_workqueue = alloc_workqueue("kblockd",
1801 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1802 if (!kblockd_workqueue)
1803 panic("Failed to create kblockd\n");
1804
1805 blk_requestq_cachep = kmem_cache_create("request_queue",
1806 sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
1807
1808 blk_debugfs_root = debugfs_create_dir("block", NULL);
1809
1810 return 0;
1811 }
1812