• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 1991, 1992 Linus Torvalds
4  * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
5  * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE
6  * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
7  * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
8  *	-  July2000
9  * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
10  */
11 
12 /*
13  * This handles all read/write requests to block devices
14  */
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/bio.h>
18 #include <linux/blkdev.h>
19 #include <linux/blk-pm.h>
20 #include <linux/blk-integrity.h>
21 #include <linux/highmem.h>
22 #include <linux/mm.h>
23 #include <linux/pagemap.h>
24 #include <linux/kernel_stat.h>
25 #include <linux/string.h>
26 #include <linux/init.h>
27 #include <linux/completion.h>
28 #include <linux/slab.h>
29 #include <linux/swap.h>
30 #include <linux/writeback.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/fault-inject.h>
33 #include <linux/list_sort.h>
34 #include <linux/delay.h>
35 #include <linux/ratelimit.h>
36 #include <linux/pm_runtime.h>
37 #include <linux/t10-pi.h>
38 #include <linux/debugfs.h>
39 #include <linux/bpf.h>
40 #include <linux/part_stat.h>
41 #include <linux/sched/sysctl.h>
42 #include <linux/blk-crypto.h>
43 
44 #define CREATE_TRACE_POINTS
45 #include <trace/events/block.h>
46 
47 #include "blk.h"
48 #ifndef __GENKSYMS__
49 #include "blk-mq-debugfs.h"
50 #endif
51 #include "blk-mq-sched.h"
52 #include "blk-pm.h"
53 #include "blk-cgroup.h"
54 #include "blk-throttle.h"
55 
56 struct dentry *blk_debugfs_root;
57 
58 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
59 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
60 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
61 EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
62 EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
63 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_insert);
64 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_queue);
65 EXPORT_TRACEPOINT_SYMBOL_GPL(block_getrq);
66 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_issue);
67 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_merge);
68 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_requeue);
69 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_complete);
70 
71 DEFINE_IDA(blk_queue_ida);
72 
73 /*
74  * For queue allocation
75  */
76 struct kmem_cache *blk_requestq_cachep;
77 struct kmem_cache *blk_requestq_srcu_cachep;
78 
79 /*
80  * Controlling structure to kblockd
81  */
82 static struct workqueue_struct *kblockd_workqueue;
83 
84 /**
85  * blk_queue_flag_set - atomically set a queue flag
86  * @flag: flag to be set
87  * @q: request queue
88  */
blk_queue_flag_set(unsigned int flag,struct request_queue * q)89 void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
90 {
91 	set_bit(flag, &q->queue_flags);
92 }
93 EXPORT_SYMBOL(blk_queue_flag_set);
94 
95 /**
96  * blk_queue_flag_clear - atomically clear a queue flag
97  * @flag: flag to be cleared
98  * @q: request queue
99  */
blk_queue_flag_clear(unsigned int flag,struct request_queue * q)100 void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
101 {
102 	clear_bit(flag, &q->queue_flags);
103 }
104 EXPORT_SYMBOL(blk_queue_flag_clear);
105 
106 /**
107  * blk_queue_flag_test_and_set - atomically test and set a queue flag
108  * @flag: flag to be set
109  * @q: request queue
110  *
111  * Returns the previous value of @flag - 0 if the flag was not set and 1 if
112  * the flag was already set.
113  */
blk_queue_flag_test_and_set(unsigned int flag,struct request_queue * q)114 bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
115 {
116 	return test_and_set_bit(flag, &q->queue_flags);
117 }
118 EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set);
119 
120 #define REQ_OP_NAME(name) [REQ_OP_##name] = #name
121 static const char *const blk_op_name[] = {
122 	REQ_OP_NAME(READ),
123 	REQ_OP_NAME(WRITE),
124 	REQ_OP_NAME(FLUSH),
125 	REQ_OP_NAME(DISCARD),
126 	REQ_OP_NAME(SECURE_ERASE),
127 	REQ_OP_NAME(ZONE_RESET),
128 	REQ_OP_NAME(ZONE_RESET_ALL),
129 	REQ_OP_NAME(ZONE_OPEN),
130 	REQ_OP_NAME(ZONE_CLOSE),
131 	REQ_OP_NAME(ZONE_FINISH),
132 	REQ_OP_NAME(ZONE_APPEND),
133 	REQ_OP_NAME(WRITE_ZEROES),
134 	REQ_OP_NAME(DRV_IN),
135 	REQ_OP_NAME(DRV_OUT),
136 };
137 #undef REQ_OP_NAME
138 
139 /**
140  * blk_op_str - Return string XXX in the REQ_OP_XXX.
141  * @op: REQ_OP_XXX.
142  *
143  * Description: Centralize block layer function to convert REQ_OP_XXX into
144  * string format. Useful in the debugging and tracing bio or request. For
145  * invalid REQ_OP_XXX it returns string "UNKNOWN".
146  */
blk_op_str(enum req_op op)147 inline const char *blk_op_str(enum req_op op)
148 {
149 	const char *op_str = "UNKNOWN";
150 
151 	if (op < ARRAY_SIZE(blk_op_name) && blk_op_name[op])
152 		op_str = blk_op_name[op];
153 
154 	return op_str;
155 }
156 EXPORT_SYMBOL_GPL(blk_op_str);
157 
158 static const struct {
159 	int		errno;
160 	const char	*name;
161 } blk_errors[] = {
162 	[BLK_STS_OK]		= { 0,		"" },
163 	[BLK_STS_NOTSUPP]	= { -EOPNOTSUPP, "operation not supported" },
164 	[BLK_STS_TIMEOUT]	= { -ETIMEDOUT,	"timeout" },
165 	[BLK_STS_NOSPC]		= { -ENOSPC,	"critical space allocation" },
166 	[BLK_STS_TRANSPORT]	= { -ENOLINK,	"recoverable transport" },
167 	[BLK_STS_TARGET]	= { -EREMOTEIO,	"critical target" },
168 	[BLK_STS_NEXUS]		= { -EBADE,	"critical nexus" },
169 	[BLK_STS_MEDIUM]	= { -ENODATA,	"critical medium" },
170 	[BLK_STS_PROTECTION]	= { -EILSEQ,	"protection" },
171 	[BLK_STS_RESOURCE]	= { -ENOMEM,	"kernel resource" },
172 	[BLK_STS_DEV_RESOURCE]	= { -EBUSY,	"device resource" },
173 	[BLK_STS_AGAIN]		= { -EAGAIN,	"nonblocking retry" },
174 	[BLK_STS_OFFLINE]	= { -ENODEV,	"device offline" },
175 
176 	/* device mapper special case, should not leak out: */
177 	[BLK_STS_DM_REQUEUE]	= { -EREMCHG, "dm internal retry" },
178 
179 	/* zone device specific errors */
180 	[BLK_STS_ZONE_OPEN_RESOURCE]	= { -ETOOMANYREFS, "open zones exceeded" },
181 	[BLK_STS_ZONE_ACTIVE_RESOURCE]	= { -EOVERFLOW, "active zones exceeded" },
182 
183 	/* everything else not covered above: */
184 	[BLK_STS_IOERR]		= { -EIO,	"I/O" },
185 };
186 
errno_to_blk_status(int errno)187 blk_status_t errno_to_blk_status(int errno)
188 {
189 	int i;
190 
191 	for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
192 		if (blk_errors[i].errno == errno)
193 			return (__force blk_status_t)i;
194 	}
195 
196 	return BLK_STS_IOERR;
197 }
198 EXPORT_SYMBOL_GPL(errno_to_blk_status);
199 
blk_status_to_errno(blk_status_t status)200 int blk_status_to_errno(blk_status_t status)
201 {
202 	int idx = (__force int)status;
203 
204 	if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
205 		return -EIO;
206 	return blk_errors[idx].errno;
207 }
208 EXPORT_SYMBOL_GPL(blk_status_to_errno);
209 
blk_status_to_str(blk_status_t status)210 const char *blk_status_to_str(blk_status_t status)
211 {
212 	int idx = (__force int)status;
213 
214 	if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
215 		return "<null>";
216 	return blk_errors[idx].name;
217 }
218 
219 /**
220  * blk_sync_queue - cancel any pending callbacks on a queue
221  * @q: the queue
222  *
223  * Description:
224  *     The block layer may perform asynchronous callback activity
225  *     on a queue, such as calling the unplug function after a timeout.
226  *     A block device may call blk_sync_queue to ensure that any
227  *     such activity is cancelled, thus allowing it to release resources
228  *     that the callbacks might use. The caller must already have made sure
229  *     that its ->submit_bio will not re-add plugging prior to calling
230  *     this function.
231  *
232  *     This function does not cancel any asynchronous activity arising
233  *     out of elevator or throttling code. That would require elevator_exit()
234  *     and blkcg_exit_queue() to be called with queue lock initialized.
235  *
236  */
blk_sync_queue(struct request_queue * q)237 void blk_sync_queue(struct request_queue *q)
238 {
239 	del_timer_sync(&q->timeout);
240 	cancel_work_sync(&q->timeout_work);
241 }
242 EXPORT_SYMBOL(blk_sync_queue);
243 
244 /**
245  * blk_set_pm_only - increment pm_only counter
246  * @q: request queue pointer
247  */
blk_set_pm_only(struct request_queue * q)248 void blk_set_pm_only(struct request_queue *q)
249 {
250 	atomic_inc(&q->pm_only);
251 }
252 EXPORT_SYMBOL_GPL(blk_set_pm_only);
253 
blk_clear_pm_only(struct request_queue * q)254 void blk_clear_pm_only(struct request_queue *q)
255 {
256 	int pm_only;
257 
258 	pm_only = atomic_dec_return(&q->pm_only);
259 	WARN_ON_ONCE(pm_only < 0);
260 	if (pm_only == 0)
261 		wake_up_all(&q->mq_freeze_wq);
262 }
263 EXPORT_SYMBOL_GPL(blk_clear_pm_only);
264 
265 /**
266  * blk_put_queue - decrement the request_queue refcount
267  * @q: the request_queue structure to decrement the refcount for
268  *
269  * Decrements the refcount of the request_queue kobject. When this reaches 0
270  * we'll have blk_release_queue() called.
271  *
272  * Context: Any context, but the last reference must not be dropped from
273  *          atomic context.
274  */
blk_put_queue(struct request_queue * q)275 void blk_put_queue(struct request_queue *q)
276 {
277 	kobject_put(&q->kobj);
278 }
279 EXPORT_SYMBOL(blk_put_queue);
280 
blk_queue_start_drain(struct request_queue * q)281 void blk_queue_start_drain(struct request_queue *q)
282 {
283 	/*
284 	 * When queue DYING flag is set, we need to block new req
285 	 * entering queue, so we call blk_freeze_queue_start() to
286 	 * prevent I/O from crossing blk_queue_enter().
287 	 */
288 	blk_freeze_queue_start(q);
289 	if (queue_is_mq(q))
290 		blk_mq_wake_waiters(q);
291 	/* Make blk_queue_enter() reexamine the DYING flag. */
292 	wake_up_all(&q->mq_freeze_wq);
293 }
294 
295 /**
296  * blk_queue_enter() - try to increase q->q_usage_counter
297  * @q: request queue pointer
298  * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM
299  */
blk_queue_enter(struct request_queue * q,blk_mq_req_flags_t flags)300 int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
301 {
302 	const bool pm = flags & BLK_MQ_REQ_PM;
303 
304 	while (!blk_try_enter_queue(q, pm)) {
305 		if (flags & BLK_MQ_REQ_NOWAIT)
306 			return -EAGAIN;
307 
308 		/*
309 		 * read pair of barrier in blk_freeze_queue_start(), we need to
310 		 * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
311 		 * reading .mq_freeze_depth or queue dying flag, otherwise the
312 		 * following wait may never return if the two reads are
313 		 * reordered.
314 		 */
315 		smp_rmb();
316 		wait_event(q->mq_freeze_wq,
317 			   (!q->mq_freeze_depth &&
318 			    blk_pm_resume_queue(pm, q)) ||
319 			   blk_queue_dying(q));
320 		if (blk_queue_dying(q))
321 			return -ENODEV;
322 	}
323 
324 	return 0;
325 }
326 
__bio_queue_enter(struct request_queue * q,struct bio * bio)327 int __bio_queue_enter(struct request_queue *q, struct bio *bio)
328 {
329 	while (!blk_try_enter_queue(q, false)) {
330 		struct gendisk *disk = bio->bi_bdev->bd_disk;
331 
332 		if (bio->bi_opf & REQ_NOWAIT) {
333 			if (test_bit(GD_DEAD, &disk->state))
334 				goto dead;
335 			bio_wouldblock_error(bio);
336 			return -EAGAIN;
337 		}
338 
339 		/*
340 		 * read pair of barrier in blk_freeze_queue_start(), we need to
341 		 * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
342 		 * reading .mq_freeze_depth or queue dying flag, otherwise the
343 		 * following wait may never return if the two reads are
344 		 * reordered.
345 		 */
346 		smp_rmb();
347 		wait_event(q->mq_freeze_wq,
348 			   (!q->mq_freeze_depth &&
349 			    blk_pm_resume_queue(false, q)) ||
350 			   test_bit(GD_DEAD, &disk->state));
351 		if (test_bit(GD_DEAD, &disk->state))
352 			goto dead;
353 	}
354 
355 	return 0;
356 dead:
357 	bio_io_error(bio);
358 	return -ENODEV;
359 }
360 
blk_queue_exit(struct request_queue * q)361 void blk_queue_exit(struct request_queue *q)
362 {
363 	percpu_ref_put(&q->q_usage_counter);
364 }
365 
blk_queue_usage_counter_release(struct percpu_ref * ref)366 static void blk_queue_usage_counter_release(struct percpu_ref *ref)
367 {
368 	struct request_queue *q =
369 		container_of(ref, struct request_queue, q_usage_counter);
370 
371 	wake_up_all(&q->mq_freeze_wq);
372 }
373 
blk_rq_timed_out_timer(struct timer_list * t)374 static void blk_rq_timed_out_timer(struct timer_list *t)
375 {
376 	struct request_queue *q = from_timer(q, t, timeout);
377 
378 	kblockd_schedule_work(&q->timeout_work);
379 }
380 
blk_timeout_work(struct work_struct * work)381 static void blk_timeout_work(struct work_struct *work)
382 {
383 }
384 
blk_alloc_queue(int node_id,bool alloc_srcu)385 struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu)
386 {
387 	struct request_queue *q;
388 
389 	q = kmem_cache_alloc_node(blk_get_queue_kmem_cache(alloc_srcu),
390 			GFP_KERNEL | __GFP_ZERO, node_id);
391 	if (!q)
392 		return NULL;
393 
394 	if (alloc_srcu) {
395 		blk_queue_flag_set(QUEUE_FLAG_HAS_SRCU, q);
396 		if (init_srcu_struct(q->srcu) != 0)
397 			goto fail_q;
398 	}
399 
400 	q->last_merge = NULL;
401 
402 	q->id = ida_alloc(&blk_queue_ida, GFP_KERNEL);
403 	if (q->id < 0)
404 		goto fail_srcu;
405 
406 	q->stats = blk_alloc_queue_stats();
407 	if (!q->stats)
408 		goto fail_id;
409 
410 	q->node = node_id;
411 
412 	atomic_set(&q->nr_active_requests_shared_tags, 0);
413 
414 	timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
415 	INIT_WORK(&q->timeout_work, blk_timeout_work);
416 	INIT_LIST_HEAD(&q->icq_list);
417 
418 	kobject_init(&q->kobj, &blk_queue_ktype);
419 
420 	mutex_init(&q->debugfs_mutex);
421 	mutex_init(&q->sysfs_lock);
422 	mutex_init(&q->sysfs_dir_lock);
423 	spin_lock_init(&q->queue_lock);
424 
425 	init_waitqueue_head(&q->mq_freeze_wq);
426 	mutex_init(&q->mq_freeze_lock);
427 
428 	/*
429 	 * Init percpu_ref in atomic mode so that it's faster to shutdown.
430 	 * See blk_register_queue() for details.
431 	 */
432 	if (percpu_ref_init(&q->q_usage_counter,
433 				blk_queue_usage_counter_release,
434 				PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
435 		goto fail_stats;
436 
437 	blk_set_default_limits(&q->limits);
438 	q->nr_requests = BLKDEV_DEFAULT_RQ;
439 
440 	return q;
441 
442 fail_stats:
443 	blk_free_queue_stats(q->stats);
444 fail_id:
445 	ida_free(&blk_queue_ida, q->id);
446 fail_srcu:
447 	if (alloc_srcu)
448 		cleanup_srcu_struct(q->srcu);
449 fail_q:
450 	kmem_cache_free(blk_get_queue_kmem_cache(alloc_srcu), q);
451 	return NULL;
452 }
453 
454 /**
455  * blk_get_queue - increment the request_queue refcount
456  * @q: the request_queue structure to increment the refcount for
457  *
458  * Increment the refcount of the request_queue kobject.
459  *
460  * Context: Any context.
461  */
blk_get_queue(struct request_queue * q)462 bool blk_get_queue(struct request_queue *q)
463 {
464 	if (unlikely(blk_queue_dying(q)))
465 		return false;
466 	kobject_get(&q->kobj);
467 	return true;
468 }
469 EXPORT_SYMBOL(blk_get_queue);
470 
471 #ifdef CONFIG_FAIL_MAKE_REQUEST
472 
473 static DECLARE_FAULT_ATTR(fail_make_request);
474 
setup_fail_make_request(char * str)475 static int __init setup_fail_make_request(char *str)
476 {
477 	return setup_fault_attr(&fail_make_request, str);
478 }
479 __setup("fail_make_request=", setup_fail_make_request);
480 
should_fail_request(struct block_device * part,unsigned int bytes)481 bool should_fail_request(struct block_device *part, unsigned int bytes)
482 {
483 	return part->bd_make_it_fail && should_fail(&fail_make_request, bytes);
484 }
485 
fail_make_request_debugfs(void)486 static int __init fail_make_request_debugfs(void)
487 {
488 	struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
489 						NULL, &fail_make_request);
490 
491 	return PTR_ERR_OR_ZERO(dir);
492 }
493 
494 late_initcall(fail_make_request_debugfs);
495 #endif /* CONFIG_FAIL_MAKE_REQUEST */
496 
bio_check_ro(struct bio * bio)497 static inline void bio_check_ro(struct bio *bio)
498 {
499 	if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) {
500 		if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
501 			return;
502 		pr_warn_ratelimited("Trying to write to read-only block-device %pg\n",
503 				    bio->bi_bdev);
504 		/* Older lvm-tools actually trigger this */
505 	}
506 }
507 
should_fail_bio(struct bio * bio)508 static noinline int should_fail_bio(struct bio *bio)
509 {
510 	if (should_fail_request(bdev_whole(bio->bi_bdev), bio->bi_iter.bi_size))
511 		return -EIO;
512 	return 0;
513 }
514 ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO);
515 
516 /*
517  * Check whether this bio extends beyond the end of the device or partition.
518  * This may well happen - the kernel calls bread() without checking the size of
519  * the device, e.g., when mounting a file system.
520  */
bio_check_eod(struct bio * bio)521 static inline int bio_check_eod(struct bio *bio)
522 {
523 	sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
524 	unsigned int nr_sectors = bio_sectors(bio);
525 
526 	if (nr_sectors && maxsector &&
527 	    (nr_sectors > maxsector ||
528 	     bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
529 		pr_info_ratelimited("%s: attempt to access beyond end of device\n"
530 				    "%pg: rw=%d, sector=%llu, nr_sectors = %u limit=%llu\n",
531 				    current->comm, bio->bi_bdev, bio->bi_opf,
532 				    bio->bi_iter.bi_sector, nr_sectors, maxsector);
533 		return -EIO;
534 	}
535 	return 0;
536 }
537 
538 /*
539  * Remap block n of partition p to block n+start(p) of the disk.
540  */
blk_partition_remap(struct bio * bio)541 static int blk_partition_remap(struct bio *bio)
542 {
543 	struct block_device *p = bio->bi_bdev;
544 
545 	if (unlikely(should_fail_request(p, bio->bi_iter.bi_size)))
546 		return -EIO;
547 	if (bio_sectors(bio)) {
548 		bio->bi_iter.bi_sector += p->bd_start_sect;
549 		trace_block_bio_remap(bio, p->bd_dev,
550 				      bio->bi_iter.bi_sector -
551 				      p->bd_start_sect);
552 	}
553 	bio_set_flag(bio, BIO_REMAPPED);
554 	return 0;
555 }
556 
557 /*
558  * Check write append to a zoned block device.
559  */
blk_check_zone_append(struct request_queue * q,struct bio * bio)560 static inline blk_status_t blk_check_zone_append(struct request_queue *q,
561 						 struct bio *bio)
562 {
563 	int nr_sectors = bio_sectors(bio);
564 
565 	/* Only applicable to zoned block devices */
566 	if (!bdev_is_zoned(bio->bi_bdev))
567 		return BLK_STS_NOTSUPP;
568 
569 	/* The bio sector must point to the start of a sequential zone */
570 	if (bio->bi_iter.bi_sector & (bdev_zone_sectors(bio->bi_bdev) - 1) ||
571 	    !bio_zone_is_seq(bio))
572 		return BLK_STS_IOERR;
573 
574 	/*
575 	 * Not allowed to cross zone boundaries. Otherwise, the BIO will be
576 	 * split and could result in non-contiguous sectors being written in
577 	 * different zones.
578 	 */
579 	if (nr_sectors > q->limits.chunk_sectors)
580 		return BLK_STS_IOERR;
581 
582 	/* Make sure the BIO is small enough and will not get split */
583 	if (nr_sectors > q->limits.max_zone_append_sectors)
584 		return BLK_STS_IOERR;
585 
586 	bio->bi_opf |= REQ_NOMERGE;
587 
588 	return BLK_STS_OK;
589 }
590 
__submit_bio(struct bio * bio)591 static void __submit_bio(struct bio *bio)
592 {
593 	struct gendisk *disk = bio->bi_bdev->bd_disk;
594 
595 	if (unlikely(!blk_crypto_bio_prep(&bio)))
596 		return;
597 
598 	if (!disk->fops->submit_bio) {
599 		blk_mq_submit_bio(bio);
600 	} else if (likely(bio_queue_enter(bio) == 0)) {
601 		disk->fops->submit_bio(bio);
602 		blk_queue_exit(disk->queue);
603 	}
604 }
605 
606 /*
607  * The loop in this function may be a bit non-obvious, and so deserves some
608  * explanation:
609  *
610  *  - Before entering the loop, bio->bi_next is NULL (as all callers ensure
611  *    that), so we have a list with a single bio.
612  *  - We pretend that we have just taken it off a longer list, so we assign
613  *    bio_list to a pointer to the bio_list_on_stack, thus initialising the
614  *    bio_list of new bios to be added.  ->submit_bio() may indeed add some more
615  *    bios through a recursive call to submit_bio_noacct.  If it did, we find a
616  *    non-NULL value in bio_list and re-enter the loop from the top.
617  *  - In this case we really did just take the bio of the top of the list (no
618  *    pretending) and so remove it from bio_list, and call into ->submit_bio()
619  *    again.
620  *
621  * bio_list_on_stack[0] contains bios submitted by the current ->submit_bio.
622  * bio_list_on_stack[1] contains bios that were submitted before the current
623  *	->submit_bio, but that haven't been processed yet.
624  */
__submit_bio_noacct(struct bio * bio)625 static void __submit_bio_noacct(struct bio *bio)
626 {
627 	struct bio_list bio_list_on_stack[2];
628 
629 	BUG_ON(bio->bi_next);
630 
631 	bio_list_init(&bio_list_on_stack[0]);
632 	current->bio_list = bio_list_on_stack;
633 
634 	do {
635 		struct request_queue *q = bdev_get_queue(bio->bi_bdev);
636 		struct bio_list lower, same;
637 
638 		/*
639 		 * Create a fresh bio_list for all subordinate requests.
640 		 */
641 		bio_list_on_stack[1] = bio_list_on_stack[0];
642 		bio_list_init(&bio_list_on_stack[0]);
643 
644 		__submit_bio(bio);
645 
646 		/*
647 		 * Sort new bios into those for a lower level and those for the
648 		 * same level.
649 		 */
650 		bio_list_init(&lower);
651 		bio_list_init(&same);
652 		while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
653 			if (q == bdev_get_queue(bio->bi_bdev))
654 				bio_list_add(&same, bio);
655 			else
656 				bio_list_add(&lower, bio);
657 
658 		/*
659 		 * Now assemble so we handle the lowest level first.
660 		 */
661 		bio_list_merge(&bio_list_on_stack[0], &lower);
662 		bio_list_merge(&bio_list_on_stack[0], &same);
663 		bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
664 	} while ((bio = bio_list_pop(&bio_list_on_stack[0])));
665 
666 	current->bio_list = NULL;
667 }
668 
__submit_bio_noacct_mq(struct bio * bio)669 static void __submit_bio_noacct_mq(struct bio *bio)
670 {
671 	struct bio_list bio_list[2] = { };
672 
673 	current->bio_list = bio_list;
674 
675 	do {
676 		__submit_bio(bio);
677 	} while ((bio = bio_list_pop(&bio_list[0])));
678 
679 	current->bio_list = NULL;
680 }
681 
submit_bio_noacct_nocheck(struct bio * bio)682 void submit_bio_noacct_nocheck(struct bio *bio)
683 {
684 	blk_cgroup_bio_start(bio);
685 	blkcg_bio_issue_init(bio);
686 
687 	if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
688 		trace_block_bio_queue(bio);
689 		/*
690 		 * Now that enqueuing has been traced, we need to trace
691 		 * completion as well.
692 		 */
693 		bio_set_flag(bio, BIO_TRACE_COMPLETION);
694 	}
695 
696 	/*
697 	 * We only want one ->submit_bio to be active at a time, else stack
698 	 * usage with stacked devices could be a problem.  Use current->bio_list
699 	 * to collect a list of requests submited by a ->submit_bio method while
700 	 * it is active, and then process them after it returned.
701 	 */
702 	if (current->bio_list)
703 		bio_list_add(&current->bio_list[0], bio);
704 	else if (!bio->bi_bdev->bd_disk->fops->submit_bio)
705 		__submit_bio_noacct_mq(bio);
706 	else
707 		__submit_bio_noacct(bio);
708 }
709 
710 /**
711  * submit_bio_noacct - re-submit a bio to the block device layer for I/O
712  * @bio:  The bio describing the location in memory and on the device.
713  *
714  * This is a version of submit_bio() that shall only be used for I/O that is
715  * resubmitted to lower level drivers by stacking block drivers.  All file
716  * systems and other upper level users of the block layer should use
717  * submit_bio() instead.
718  */
submit_bio_noacct(struct bio * bio)719 void submit_bio_noacct(struct bio *bio)
720 {
721 	struct block_device *bdev = bio->bi_bdev;
722 	struct request_queue *q = bdev_get_queue(bdev);
723 	blk_status_t status = BLK_STS_IOERR;
724 	struct blk_plug *plug;
725 
726 	might_sleep();
727 
728 	plug = blk_mq_plug(bio);
729 	if (plug && plug->nowait)
730 		bio->bi_opf |= REQ_NOWAIT;
731 
732 	/*
733 	 * For a REQ_NOWAIT based request, return -EOPNOTSUPP
734 	 * if queue does not support NOWAIT.
735 	 */
736 	if ((bio->bi_opf & REQ_NOWAIT) && !bdev_nowait(bdev))
737 		goto not_supported;
738 
739 	if (should_fail_bio(bio))
740 		goto end_io;
741 	bio_check_ro(bio);
742 	if (!bio_flagged(bio, BIO_REMAPPED)) {
743 		if (unlikely(bio_check_eod(bio)))
744 			goto end_io;
745 		if (bdev->bd_partno && unlikely(blk_partition_remap(bio)))
746 			goto end_io;
747 	}
748 
749 	/*
750 	 * Filter flush bio's early so that bio based drivers without flush
751 	 * support don't have to worry about them.
752 	 */
753 	if (op_is_flush(bio->bi_opf) &&
754 	    !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
755 		bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
756 		if (!bio_sectors(bio)) {
757 			status = BLK_STS_OK;
758 			goto end_io;
759 		}
760 	}
761 
762 	if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
763 		bio_clear_polled(bio);
764 
765 	switch (bio_op(bio)) {
766 	case REQ_OP_DISCARD:
767 		if (!bdev_max_discard_sectors(bdev))
768 			goto not_supported;
769 		break;
770 	case REQ_OP_SECURE_ERASE:
771 		if (!bdev_max_secure_erase_sectors(bdev))
772 			goto not_supported;
773 		break;
774 	case REQ_OP_ZONE_APPEND:
775 		status = blk_check_zone_append(q, bio);
776 		if (status != BLK_STS_OK)
777 			goto end_io;
778 		break;
779 	case REQ_OP_ZONE_RESET:
780 	case REQ_OP_ZONE_OPEN:
781 	case REQ_OP_ZONE_CLOSE:
782 	case REQ_OP_ZONE_FINISH:
783 		if (!bdev_is_zoned(bio->bi_bdev))
784 			goto not_supported;
785 		break;
786 	case REQ_OP_ZONE_RESET_ALL:
787 		if (!bdev_is_zoned(bio->bi_bdev) || !blk_queue_zone_resetall(q))
788 			goto not_supported;
789 		break;
790 	case REQ_OP_WRITE_ZEROES:
791 		if (!q->limits.max_write_zeroes_sectors)
792 			goto not_supported;
793 		break;
794 	default:
795 		break;
796 	}
797 
798 	if (blk_throtl_bio(bio))
799 		return;
800 	submit_bio_noacct_nocheck(bio);
801 	return;
802 
803 not_supported:
804 	status = BLK_STS_NOTSUPP;
805 end_io:
806 	bio->bi_status = status;
807 	bio_endio(bio);
808 }
809 EXPORT_SYMBOL(submit_bio_noacct);
810 
811 /**
812  * submit_bio - submit a bio to the block device layer for I/O
813  * @bio: The &struct bio which describes the I/O
814  *
815  * submit_bio() is used to submit I/O requests to block devices.  It is passed a
816  * fully set up &struct bio that describes the I/O that needs to be done.  The
817  * bio will be send to the device described by the bi_bdev field.
818  *
819  * The success/failure status of the request, along with notification of
820  * completion, is delivered asynchronously through the ->bi_end_io() callback
821  * in @bio.  The bio must NOT be touched by the caller until ->bi_end_io() has
822  * been called.
823  */
submit_bio(struct bio * bio)824 void submit_bio(struct bio *bio)
825 {
826 	if (blkcg_punt_bio_submit(bio))
827 		return;
828 
829 	if (bio_op(bio) == REQ_OP_READ) {
830 		task_io_account_read(bio->bi_iter.bi_size);
831 		count_vm_events(PGPGIN, bio_sectors(bio));
832 	} else if (bio_op(bio) == REQ_OP_WRITE) {
833 		count_vm_events(PGPGOUT, bio_sectors(bio));
834 	}
835 
836 	submit_bio_noacct(bio);
837 }
838 EXPORT_SYMBOL(submit_bio);
839 
840 /**
841  * bio_poll - poll for BIO completions
842  * @bio: bio to poll for
843  * @iob: batches of IO
844  * @flags: BLK_POLL_* flags that control the behavior
845  *
846  * Poll for completions on queue associated with the bio. Returns number of
847  * completed entries found.
848  *
849  * Note: the caller must either be the context that submitted @bio, or
850  * be in a RCU critical section to prevent freeing of @bio.
851  */
bio_poll(struct bio * bio,struct io_comp_batch * iob,unsigned int flags)852 int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
853 {
854 	blk_qc_t cookie = READ_ONCE(bio->bi_cookie);
855 	struct block_device *bdev;
856 	struct request_queue *q;
857 	int ret = 0;
858 
859 	bdev = READ_ONCE(bio->bi_bdev);
860 	if (!bdev)
861 		return 0;
862 
863 	q = bdev_get_queue(bdev);
864 	if (cookie == BLK_QC_T_NONE ||
865 	    !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
866 		return 0;
867 
868 	/*
869 	 * As the requests that require a zone lock are not plugged in the
870 	 * first place, directly accessing the plug instead of using
871 	 * blk_mq_plug() should not have any consequences during flushing for
872 	 * zoned devices.
873 	 */
874 	blk_flush_plug(current->plug, false);
875 
876 	/*
877 	 * We need to be able to enter a frozen queue, similar to how
878 	 * timeouts also need to do that. If that is blocked, then we can
879 	 * have pending IO when a queue freeze is started, and then the
880 	 * wait for the freeze to finish will wait for polled requests to
881 	 * timeout as the poller is preventer from entering the queue and
882 	 * completing them. As long as we prevent new IO from being queued,
883 	 * that should be all that matters.
884 	 */
885 	if (!percpu_ref_tryget(&q->q_usage_counter))
886 		return 0;
887 	if (queue_is_mq(q)) {
888 		ret = blk_mq_poll(q, cookie, iob, flags);
889 	} else {
890 		struct gendisk *disk = q->disk;
891 
892 		if (disk && disk->fops->poll_bio)
893 			ret = disk->fops->poll_bio(bio, iob, flags);
894 	}
895 	blk_queue_exit(q);
896 	return ret;
897 }
898 EXPORT_SYMBOL_GPL(bio_poll);
899 
900 /*
901  * Helper to implement file_operations.iopoll.  Requires the bio to be stored
902  * in iocb->private, and cleared before freeing the bio.
903  */
iocb_bio_iopoll(struct kiocb * kiocb,struct io_comp_batch * iob,unsigned int flags)904 int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
905 		    unsigned int flags)
906 {
907 	struct bio *bio;
908 	int ret = 0;
909 
910 	/*
911 	 * Note: the bio cache only uses SLAB_TYPESAFE_BY_RCU, so bio can
912 	 * point to a freshly allocated bio at this point.  If that happens
913 	 * we have a few cases to consider:
914 	 *
915 	 *  1) the bio is beeing initialized and bi_bdev is NULL.  We can just
916 	 *     simply nothing in this case
917 	 *  2) the bio points to a not poll enabled device.  bio_poll will catch
918 	 *     this and return 0
919 	 *  3) the bio points to a poll capable device, including but not
920 	 *     limited to the one that the original bio pointed to.  In this
921 	 *     case we will call into the actual poll method and poll for I/O,
922 	 *     even if we don't need to, but it won't cause harm either.
923 	 *
924 	 * For cases 2) and 3) above the RCU grace period ensures that bi_bdev
925 	 * is still allocated. Because partitions hold a reference to the whole
926 	 * device bdev and thus disk, the disk is also still valid.  Grabbing
927 	 * a reference to the queue in bio_poll() ensures the hctxs and requests
928 	 * are still valid as well.
929 	 */
930 	rcu_read_lock();
931 	bio = READ_ONCE(kiocb->private);
932 	if (bio)
933 		ret = bio_poll(bio, iob, flags);
934 	rcu_read_unlock();
935 
936 	return ret;
937 }
938 EXPORT_SYMBOL_GPL(iocb_bio_iopoll);
939 
update_io_ticks(struct block_device * part,unsigned long now,bool end)940 void update_io_ticks(struct block_device *part, unsigned long now, bool end)
941 {
942 	unsigned long stamp;
943 again:
944 	stamp = READ_ONCE(part->bd_stamp);
945 	if (unlikely(time_after(now, stamp))) {
946 		if (likely(try_cmpxchg(&part->bd_stamp, &stamp, now)))
947 			__part_stat_add(part, io_ticks, end ? now - stamp : 1);
948 	}
949 	if (part->bd_partno) {
950 		part = bdev_whole(part);
951 		goto again;
952 	}
953 }
954 
bdev_start_io_acct(struct block_device * bdev,unsigned int sectors,enum req_op op,unsigned long start_time)955 unsigned long bdev_start_io_acct(struct block_device *bdev,
956 				 unsigned int sectors, enum req_op op,
957 				 unsigned long start_time)
958 {
959 	const int sgrp = op_stat_group(op);
960 
961 	part_stat_lock();
962 	update_io_ticks(bdev, start_time, false);
963 	part_stat_inc(bdev, ios[sgrp]);
964 	part_stat_add(bdev, sectors[sgrp], sectors);
965 	part_stat_local_inc(bdev, in_flight[op_is_write(op)]);
966 	part_stat_unlock();
967 
968 	return start_time;
969 }
970 EXPORT_SYMBOL(bdev_start_io_acct);
971 
972 /**
973  * bio_start_io_acct_time - start I/O accounting for bio based drivers
974  * @bio:	bio to start account for
975  * @start_time:	start time that should be passed back to bio_end_io_acct().
976  */
bio_start_io_acct_time(struct bio * bio,unsigned long start_time)977 void bio_start_io_acct_time(struct bio *bio, unsigned long start_time)
978 {
979 	bdev_start_io_acct(bio->bi_bdev, bio_sectors(bio),
980 			   bio_op(bio), start_time);
981 }
982 EXPORT_SYMBOL_GPL(bio_start_io_acct_time);
983 
984 /**
985  * bio_start_io_acct - start I/O accounting for bio based drivers
986  * @bio:	bio to start account for
987  *
988  * Returns the start time that should be passed back to bio_end_io_acct().
989  */
bio_start_io_acct(struct bio * bio)990 unsigned long bio_start_io_acct(struct bio *bio)
991 {
992 	return bdev_start_io_acct(bio->bi_bdev, bio_sectors(bio),
993 				  bio_op(bio), jiffies);
994 }
995 EXPORT_SYMBOL_GPL(bio_start_io_acct);
996 
bdev_end_io_acct(struct block_device * bdev,enum req_op op,unsigned long start_time)997 void bdev_end_io_acct(struct block_device *bdev, enum req_op op,
998 		      unsigned long start_time)
999 {
1000 	const int sgrp = op_stat_group(op);
1001 	unsigned long now = READ_ONCE(jiffies);
1002 	unsigned long duration = now - start_time;
1003 
1004 	part_stat_lock();
1005 	update_io_ticks(bdev, now, true);
1006 	part_stat_add(bdev, nsecs[sgrp], jiffies_to_nsecs(duration));
1007 	part_stat_local_dec(bdev, in_flight[op_is_write(op)]);
1008 	part_stat_unlock();
1009 }
1010 EXPORT_SYMBOL(bdev_end_io_acct);
1011 
bio_end_io_acct_remapped(struct bio * bio,unsigned long start_time,struct block_device * orig_bdev)1012 void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
1013 			      struct block_device *orig_bdev)
1014 {
1015 	bdev_end_io_acct(orig_bdev, bio_op(bio), start_time);
1016 }
1017 EXPORT_SYMBOL_GPL(bio_end_io_acct_remapped);
1018 
1019 /**
1020  * blk_lld_busy - Check if underlying low-level drivers of a device are busy
1021  * @q : the queue of the device being checked
1022  *
1023  * Description:
1024  *    Check if underlying low-level drivers of a device are busy.
1025  *    If the drivers want to export their busy state, they must set own
1026  *    exporting function using blk_queue_lld_busy() first.
1027  *
1028  *    Basically, this function is used only by request stacking drivers
1029  *    to stop dispatching requests to underlying devices when underlying
1030  *    devices are busy.  This behavior helps more I/O merging on the queue
1031  *    of the request stacking driver and prevents I/O throughput regression
1032  *    on burst I/O load.
1033  *
1034  * Return:
1035  *    0 - Not busy (The request stacking driver should dispatch request)
1036  *    1 - Busy (The request stacking driver should stop dispatching request)
1037  */
blk_lld_busy(struct request_queue * q)1038 int blk_lld_busy(struct request_queue *q)
1039 {
1040 	if (queue_is_mq(q) && q->mq_ops->busy)
1041 		return q->mq_ops->busy(q);
1042 
1043 	return 0;
1044 }
1045 EXPORT_SYMBOL_GPL(blk_lld_busy);
1046 
kblockd_schedule_work(struct work_struct * work)1047 int kblockd_schedule_work(struct work_struct *work)
1048 {
1049 	return queue_work(kblockd_workqueue, work);
1050 }
1051 EXPORT_SYMBOL(kblockd_schedule_work);
1052 
kblockd_mod_delayed_work_on(int cpu,struct delayed_work * dwork,unsigned long delay)1053 int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
1054 				unsigned long delay)
1055 {
1056 	return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
1057 }
1058 EXPORT_SYMBOL(kblockd_mod_delayed_work_on);
1059 
blk_start_plug_nr_ios(struct blk_plug * plug,unsigned short nr_ios)1060 void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned short nr_ios)
1061 {
1062 	struct task_struct *tsk = current;
1063 
1064 	/*
1065 	 * If this is a nested plug, don't actually assign it.
1066 	 */
1067 	if (tsk->plug)
1068 		return;
1069 
1070 	plug->mq_list = NULL;
1071 	plug->cached_rq = NULL;
1072 	plug->nr_ios = min_t(unsigned short, nr_ios, BLK_MAX_REQUEST_COUNT);
1073 	plug->rq_count = 0;
1074 	plug->multiple_queues = false;
1075 	plug->has_elevator = false;
1076 	plug->nowait = false;
1077 	INIT_LIST_HEAD(&plug->cb_list);
1078 
1079 	/*
1080 	 * Store ordering should not be needed here, since a potential
1081 	 * preempt will imply a full memory barrier
1082 	 */
1083 	tsk->plug = plug;
1084 }
1085 
1086 /**
1087  * blk_start_plug - initialize blk_plug and track it inside the task_struct
1088  * @plug:	The &struct blk_plug that needs to be initialized
1089  *
1090  * Description:
1091  *   blk_start_plug() indicates to the block layer an intent by the caller
1092  *   to submit multiple I/O requests in a batch.  The block layer may use
1093  *   this hint to defer submitting I/Os from the caller until blk_finish_plug()
1094  *   is called.  However, the block layer may choose to submit requests
1095  *   before a call to blk_finish_plug() if the number of queued I/Os
1096  *   exceeds %BLK_MAX_REQUEST_COUNT, or if the size of the I/O is larger than
1097  *   %BLK_PLUG_FLUSH_SIZE.  The queued I/Os may also be submitted early if
1098  *   the task schedules (see below).
1099  *
1100  *   Tracking blk_plug inside the task_struct will help with auto-flushing the
1101  *   pending I/O should the task end up blocking between blk_start_plug() and
1102  *   blk_finish_plug(). This is important from a performance perspective, but
1103  *   also ensures that we don't deadlock. For instance, if the task is blocking
1104  *   for a memory allocation, memory reclaim could end up wanting to free a
1105  *   page belonging to that request that is currently residing in our private
1106  *   plug. By flushing the pending I/O when the process goes to sleep, we avoid
1107  *   this kind of deadlock.
1108  */
blk_start_plug(struct blk_plug * plug)1109 void blk_start_plug(struct blk_plug *plug)
1110 {
1111 	blk_start_plug_nr_ios(plug, 1);
1112 }
1113 EXPORT_SYMBOL(blk_start_plug);
1114 
flush_plug_callbacks(struct blk_plug * plug,bool from_schedule)1115 static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
1116 {
1117 	LIST_HEAD(callbacks);
1118 
1119 	while (!list_empty(&plug->cb_list)) {
1120 		list_splice_init(&plug->cb_list, &callbacks);
1121 
1122 		while (!list_empty(&callbacks)) {
1123 			struct blk_plug_cb *cb = list_first_entry(&callbacks,
1124 							  struct blk_plug_cb,
1125 							  list);
1126 			list_del(&cb->list);
1127 			cb->callback(cb, from_schedule);
1128 		}
1129 	}
1130 }
1131 
blk_check_plugged(blk_plug_cb_fn unplug,void * data,int size)1132 struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
1133 				      int size)
1134 {
1135 	struct blk_plug *plug = current->plug;
1136 	struct blk_plug_cb *cb;
1137 
1138 	if (!plug)
1139 		return NULL;
1140 
1141 	list_for_each_entry(cb, &plug->cb_list, list)
1142 		if (cb->callback == unplug && cb->data == data)
1143 			return cb;
1144 
1145 	/* Not currently on the callback list */
1146 	BUG_ON(size < sizeof(*cb));
1147 	cb = kzalloc(size, GFP_ATOMIC);
1148 	if (cb) {
1149 		cb->data = data;
1150 		cb->callback = unplug;
1151 		list_add(&cb->list, &plug->cb_list);
1152 	}
1153 	return cb;
1154 }
1155 EXPORT_SYMBOL(blk_check_plugged);
1156 
__blk_flush_plug(struct blk_plug * plug,bool from_schedule)1157 void __blk_flush_plug(struct blk_plug *plug, bool from_schedule)
1158 {
1159 	if (!list_empty(&plug->cb_list))
1160 		flush_plug_callbacks(plug, from_schedule);
1161 	blk_mq_flush_plug_list(plug, from_schedule);
1162 	/*
1163 	 * Unconditionally flush out cached requests, even if the unplug
1164 	 * event came from schedule. Since we know hold references to the
1165 	 * queue for cached requests, we don't want a blocked task holding
1166 	 * up a queue freeze/quiesce event.
1167 	 */
1168 	if (unlikely(!rq_list_empty(plug->cached_rq)))
1169 		blk_mq_free_plug_rqs(plug);
1170 }
1171 
1172 /**
1173  * blk_finish_plug - mark the end of a batch of submitted I/O
1174  * @plug:	The &struct blk_plug passed to blk_start_plug()
1175  *
1176  * Description:
1177  * Indicate that a batch of I/O submissions is complete.  This function
1178  * must be paired with an initial call to blk_start_plug().  The intent
1179  * is to allow the block layer to optimize I/O submission.  See the
1180  * documentation for blk_start_plug() for more information.
1181  */
blk_finish_plug(struct blk_plug * plug)1182 void blk_finish_plug(struct blk_plug *plug)
1183 {
1184 	if (plug == current->plug) {
1185 		__blk_flush_plug(plug, false);
1186 		current->plug = NULL;
1187 	}
1188 }
1189 EXPORT_SYMBOL(blk_finish_plug);
1190 
blk_io_schedule(void)1191 void blk_io_schedule(void)
1192 {
1193 	/* Prevent hang_check timer from firing at us during very long I/O */
1194 	unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2;
1195 
1196 	if (timeout)
1197 		io_schedule_timeout(timeout);
1198 	else
1199 		io_schedule();
1200 }
1201 EXPORT_SYMBOL_GPL(blk_io_schedule);
1202 
blk_dev_init(void)1203 int __init blk_dev_init(void)
1204 {
1205 	BUILD_BUG_ON((__force u32)REQ_OP_LAST >= (1 << REQ_OP_BITS));
1206 	BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1207 			sizeof_field(struct request, cmd_flags));
1208 	BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1209 			sizeof_field(struct bio, bi_opf));
1210 	BUILD_BUG_ON(ALIGN(offsetof(struct request_queue, srcu),
1211 			   __alignof__(struct request_queue)) !=
1212 		     sizeof(struct request_queue));
1213 
1214 	/* used for unplugging and affects IO latency/throughput - HIGHPRI */
1215 	kblockd_workqueue = alloc_workqueue("kblockd",
1216 					    WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1217 	if (!kblockd_workqueue)
1218 		panic("Failed to create kblockd\n");
1219 
1220 	blk_requestq_cachep = kmem_cache_create("request_queue",
1221 			sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
1222 
1223 	blk_requestq_srcu_cachep = kmem_cache_create("request_queue_srcu",
1224 			sizeof(struct request_queue) +
1225 			sizeof(struct srcu_struct), 0, SLAB_PANIC, NULL);
1226 
1227 	blk_debugfs_root = debugfs_create_dir("block", NULL);
1228 	blk_mq_debugfs_init();
1229 
1230 	return 0;
1231 }
1232