• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 1991, 1992 Linus Torvalds
4  * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
5  * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE
6  * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
7  * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
8  *	-  July2000
9  * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
10  */
11 
12 /*
13  * This handles all read/write requests to block devices
14  */
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/backing-dev.h>
18 #include <linux/bio.h>
19 #include <linux/blkdev.h>
20 #include <linux/blk-mq.h>
21 #include <linux/highmem.h>
22 #include <linux/mm.h>
23 #include <linux/pagemap.h>
24 #include <linux/kernel_stat.h>
25 #include <linux/string.h>
26 #include <linux/init.h>
27 #include <linux/completion.h>
28 #include <linux/slab.h>
29 #include <linux/swap.h>
30 #include <linux/writeback.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/fault-inject.h>
33 #include <linux/list_sort.h>
34 #include <linux/delay.h>
35 #include <linux/ratelimit.h>
36 #include <linux/pm_runtime.h>
37 #include <linux/blk-cgroup.h>
38 #include <linux/t10-pi.h>
39 #include <linux/debugfs.h>
40 #include <linux/bpf.h>
41 #include <linux/psi.h>
42 #include <linux/sched/sysctl.h>
43 #include <linux/blk-crypto.h>
44 
45 #define CREATE_TRACE_POINTS
46 #include <trace/events/block.h>
47 
48 #include "blk.h"
49 #include "blk-mq.h"
50 #include "blk-mq-sched.h"
51 #include "blk-pm.h"
52 #include "blk-rq-qos.h"
53 
54 struct dentry *blk_debugfs_root;
55 
56 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
57 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
58 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
59 EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
60 EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
61 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_queue);
62 EXPORT_TRACEPOINT_SYMBOL_GPL(block_getrq);
63 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_insert);
64 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_issue);
65 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_merge);
66 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_requeue);
67 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_complete);
68 
69 #undef CREATE_TRACE_POINTS
70 #include <trace/hooks/block.h>
71 
72 DEFINE_IDA(blk_queue_ida);
73 
74 /*
75  * For queue allocation
76  */
77 struct kmem_cache *blk_requestq_cachep;
78 
79 /*
80  * Controlling structure to kblockd
81  */
82 static struct workqueue_struct *kblockd_workqueue;
83 
84 /**
85  * blk_queue_flag_set - atomically set a queue flag
86  * @flag: flag to be set
87  * @q: request queue
88  */
blk_queue_flag_set(unsigned int flag,struct request_queue * q)89 void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
90 {
91 	set_bit(flag, &q->queue_flags);
92 }
93 EXPORT_SYMBOL(blk_queue_flag_set);
94 
95 /**
96  * blk_queue_flag_clear - atomically clear a queue flag
97  * @flag: flag to be cleared
98  * @q: request queue
99  */
blk_queue_flag_clear(unsigned int flag,struct request_queue * q)100 void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
101 {
102 	clear_bit(flag, &q->queue_flags);
103 }
104 EXPORT_SYMBOL(blk_queue_flag_clear);
105 
106 /**
107  * blk_queue_flag_test_and_set - atomically test and set a queue flag
108  * @flag: flag to be set
109  * @q: request queue
110  *
111  * Returns the previous value of @flag - 0 if the flag was not set and 1 if
112  * the flag was already set.
113  */
blk_queue_flag_test_and_set(unsigned int flag,struct request_queue * q)114 bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
115 {
116 	return test_and_set_bit(flag, &q->queue_flags);
117 }
118 EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set);
119 
blk_rq_init(struct request_queue * q,struct request * rq)120 void blk_rq_init(struct request_queue *q, struct request *rq)
121 {
122 	memset(rq, 0, sizeof(*rq));
123 
124 	INIT_LIST_HEAD(&rq->queuelist);
125 	rq->q = q;
126 	rq->__sector = (sector_t) -1;
127 	INIT_HLIST_NODE(&rq->hash);
128 	RB_CLEAR_NODE(&rq->rb_node);
129 	rq->tag = BLK_MQ_NO_TAG;
130 	rq->internal_tag = BLK_MQ_NO_TAG;
131 	rq->start_time_ns = ktime_get_ns();
132 	rq->part = NULL;
133 	blk_crypto_rq_set_defaults(rq);
134 }
135 EXPORT_SYMBOL(blk_rq_init);
136 
137 #define REQ_OP_NAME(name) [REQ_OP_##name] = #name
138 static const char *const blk_op_name[] = {
139 	REQ_OP_NAME(READ),
140 	REQ_OP_NAME(WRITE),
141 	REQ_OP_NAME(FLUSH),
142 	REQ_OP_NAME(DISCARD),
143 	REQ_OP_NAME(SECURE_ERASE),
144 	REQ_OP_NAME(ZONE_RESET),
145 	REQ_OP_NAME(ZONE_RESET_ALL),
146 	REQ_OP_NAME(ZONE_OPEN),
147 	REQ_OP_NAME(ZONE_CLOSE),
148 	REQ_OP_NAME(ZONE_FINISH),
149 	REQ_OP_NAME(ZONE_APPEND),
150 	REQ_OP_NAME(WRITE_SAME),
151 	REQ_OP_NAME(WRITE_ZEROES),
152 	REQ_OP_NAME(SCSI_IN),
153 	REQ_OP_NAME(SCSI_OUT),
154 	REQ_OP_NAME(DRV_IN),
155 	REQ_OP_NAME(DRV_OUT),
156 };
157 #undef REQ_OP_NAME
158 
159 /**
160  * blk_op_str - Return string XXX in the REQ_OP_XXX.
161  * @op: REQ_OP_XXX.
162  *
163  * Description: Centralize block layer function to convert REQ_OP_XXX into
164  * string format. Useful in the debugging and tracing bio or request. For
165  * invalid REQ_OP_XXX it returns string "UNKNOWN".
166  */
blk_op_str(unsigned int op)167 inline const char *blk_op_str(unsigned int op)
168 {
169 	const char *op_str = "UNKNOWN";
170 
171 	if (op < ARRAY_SIZE(blk_op_name) && blk_op_name[op])
172 		op_str = blk_op_name[op];
173 
174 	return op_str;
175 }
176 EXPORT_SYMBOL_GPL(blk_op_str);
177 
178 static const struct {
179 	int		errno;
180 	const char	*name;
181 } blk_errors[] = {
182 	[BLK_STS_OK]		= { 0,		"" },
183 	[BLK_STS_NOTSUPP]	= { -EOPNOTSUPP, "operation not supported" },
184 	[BLK_STS_TIMEOUT]	= { -ETIMEDOUT,	"timeout" },
185 	[BLK_STS_NOSPC]		= { -ENOSPC,	"critical space allocation" },
186 	[BLK_STS_TRANSPORT]	= { -ENOLINK,	"recoverable transport" },
187 	[BLK_STS_TARGET]	= { -EREMOTEIO,	"critical target" },
188 	[BLK_STS_NEXUS]		= { -EBADE,	"critical nexus" },
189 	[BLK_STS_MEDIUM]	= { -ENODATA,	"critical medium" },
190 	[BLK_STS_PROTECTION]	= { -EILSEQ,	"protection" },
191 	[BLK_STS_RESOURCE]	= { -ENOMEM,	"kernel resource" },
192 	[BLK_STS_DEV_RESOURCE]	= { -EBUSY,	"device resource" },
193 	[BLK_STS_AGAIN]		= { -EAGAIN,	"nonblocking retry" },
194 
195 	/* device mapper special case, should not leak out: */
196 	[BLK_STS_DM_REQUEUE]	= { -EREMCHG, "dm internal retry" },
197 
198 	/* zone device specific errors */
199 	[BLK_STS_ZONE_OPEN_RESOURCE]	= { -ETOOMANYREFS, "open zones exceeded" },
200 	[BLK_STS_ZONE_ACTIVE_RESOURCE]	= { -EOVERFLOW, "active zones exceeded" },
201 
202 	/* everything else not covered above: */
203 	[BLK_STS_IOERR]		= { -EIO,	"I/O" },
204 };
205 
errno_to_blk_status(int errno)206 blk_status_t errno_to_blk_status(int errno)
207 {
208 	int i;
209 
210 	for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
211 		if (blk_errors[i].errno == errno)
212 			return (__force blk_status_t)i;
213 	}
214 
215 	return BLK_STS_IOERR;
216 }
217 EXPORT_SYMBOL_GPL(errno_to_blk_status);
218 
blk_status_to_errno(blk_status_t status)219 int blk_status_to_errno(blk_status_t status)
220 {
221 	int idx = (__force int)status;
222 
223 	if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
224 		return -EIO;
225 	return blk_errors[idx].errno;
226 }
227 EXPORT_SYMBOL_GPL(blk_status_to_errno);
228 
print_req_error(struct request * req,blk_status_t status,const char * caller)229 static void print_req_error(struct request *req, blk_status_t status,
230 		const char *caller)
231 {
232 	int idx = (__force int)status;
233 
234 	if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
235 		return;
236 
237 	printk_ratelimited(KERN_ERR
238 		"%s: %s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x "
239 		"phys_seg %u prio class %u\n",
240 		caller, blk_errors[idx].name,
241 		req->rq_disk ? req->rq_disk->disk_name : "?",
242 		blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)),
243 		req->cmd_flags & ~REQ_OP_MASK,
244 		req->nr_phys_segments,
245 		IOPRIO_PRIO_CLASS(req->ioprio));
246 }
247 
req_bio_endio(struct request * rq,struct bio * bio,unsigned int nbytes,blk_status_t error)248 static void req_bio_endio(struct request *rq, struct bio *bio,
249 			  unsigned int nbytes, blk_status_t error)
250 {
251 	if (error)
252 		bio->bi_status = error;
253 
254 	if (unlikely(rq->rq_flags & RQF_QUIET))
255 		bio_set_flag(bio, BIO_QUIET);
256 
257 	bio_advance(bio, nbytes);
258 
259 	if (req_op(rq) == REQ_OP_ZONE_APPEND && error == BLK_STS_OK) {
260 		/*
261 		 * Partial zone append completions cannot be supported as the
262 		 * BIO fragments may end up not being written sequentially.
263 		 */
264 		if (bio->bi_iter.bi_size)
265 			bio->bi_status = BLK_STS_IOERR;
266 		else
267 			bio->bi_iter.bi_sector = rq->__sector;
268 	}
269 
270 	/* don't actually finish bio if it's part of flush sequence */
271 	if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
272 		bio_endio(bio);
273 }
274 
blk_dump_rq_flags(struct request * rq,char * msg)275 void blk_dump_rq_flags(struct request *rq, char *msg)
276 {
277 	printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
278 		rq->rq_disk ? rq->rq_disk->disk_name : "?",
279 		(unsigned long long) rq->cmd_flags);
280 
281 	printk(KERN_INFO "  sector %llu, nr/cnr %u/%u\n",
282 	       (unsigned long long)blk_rq_pos(rq),
283 	       blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
284 	printk(KERN_INFO "  bio %p, biotail %p, len %u\n",
285 	       rq->bio, rq->biotail, blk_rq_bytes(rq));
286 }
287 EXPORT_SYMBOL(blk_dump_rq_flags);
288 
289 /**
290  * blk_sync_queue - cancel any pending callbacks on a queue
291  * @q: the queue
292  *
293  * Description:
294  *     The block layer may perform asynchronous callback activity
295  *     on a queue, such as calling the unplug function after a timeout.
296  *     A block device may call blk_sync_queue to ensure that any
297  *     such activity is cancelled, thus allowing it to release resources
298  *     that the callbacks might use. The caller must already have made sure
299  *     that its ->submit_bio will not re-add plugging prior to calling
300  *     this function.
301  *
302  *     This function does not cancel any asynchronous activity arising
303  *     out of elevator or throttling code. That would require elevator_exit()
304  *     and blkcg_exit_queue() to be called with queue lock initialized.
305  *
306  */
blk_sync_queue(struct request_queue * q)307 void blk_sync_queue(struct request_queue *q)
308 {
309 	del_timer_sync(&q->timeout);
310 	cancel_work_sync(&q->timeout_work);
311 }
312 EXPORT_SYMBOL(blk_sync_queue);
313 
314 /**
315  * blk_set_pm_only - increment pm_only counter
316  * @q: request queue pointer
317  */
blk_set_pm_only(struct request_queue * q)318 void blk_set_pm_only(struct request_queue *q)
319 {
320 	atomic_inc(&q->pm_only);
321 }
322 EXPORT_SYMBOL_GPL(blk_set_pm_only);
323 
blk_clear_pm_only(struct request_queue * q)324 void blk_clear_pm_only(struct request_queue *q)
325 {
326 	int pm_only;
327 
328 	pm_only = atomic_dec_return(&q->pm_only);
329 	WARN_ON_ONCE(pm_only < 0);
330 	if (pm_only == 0)
331 		wake_up_all(&q->mq_freeze_wq);
332 }
333 EXPORT_SYMBOL_GPL(blk_clear_pm_only);
334 
335 /**
336  * blk_put_queue - decrement the request_queue refcount
337  * @q: the request_queue structure to decrement the refcount for
338  *
339  * Decrements the refcount of the request_queue kobject. When this reaches 0
340  * we'll have blk_release_queue() called.
341  *
342  * Context: Any context, but the last reference must not be dropped from
343  *          atomic context.
344  */
blk_put_queue(struct request_queue * q)345 void blk_put_queue(struct request_queue *q)
346 {
347 	kobject_put(&q->kobj);
348 }
349 EXPORT_SYMBOL(blk_put_queue);
350 
blk_set_queue_dying(struct request_queue * q)351 void blk_set_queue_dying(struct request_queue *q)
352 {
353 	blk_queue_flag_set(QUEUE_FLAG_DYING, q);
354 
355 	/*
356 	 * When queue DYING flag is set, we need to block new req
357 	 * entering queue, so we call blk_freeze_queue_start() to
358 	 * prevent I/O from crossing blk_queue_enter().
359 	 */
360 	blk_freeze_queue_start(q);
361 
362 	if (queue_is_mq(q))
363 		blk_mq_wake_waiters(q);
364 
365 	/* Make blk_queue_enter() reexamine the DYING flag. */
366 	wake_up_all(&q->mq_freeze_wq);
367 }
368 EXPORT_SYMBOL_GPL(blk_set_queue_dying);
369 
370 /**
371  * blk_cleanup_queue - shutdown a request queue
372  * @q: request queue to shutdown
373  *
374  * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
375  * put it.  All future requests will be failed immediately with -ENODEV.
376  *
377  * Context: can sleep
378  */
blk_cleanup_queue(struct request_queue * q)379 void blk_cleanup_queue(struct request_queue *q)
380 {
381 	/* cannot be called from atomic context */
382 	might_sleep();
383 
384 	WARN_ON_ONCE(blk_queue_registered(q));
385 
386 	/* mark @q DYING, no new request or merges will be allowed afterwards */
387 	blk_set_queue_dying(q);
388 
389 	blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
390 	blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
391 
392 	/*
393 	 * Drain all requests queued before DYING marking. Set DEAD flag to
394 	 * prevent that blk_mq_run_hw_queues() accesses the hardware queues
395 	 * after draining finished.
396 	 */
397 	blk_freeze_queue(q);
398 
399 	rq_qos_exit(q);
400 
401 	blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
402 
403 	/* for synchronous bio-based driver finish in-flight integrity i/o */
404 	blk_flush_integrity();
405 
406 	/* @q won't process any more request, flush async actions */
407 	del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer);
408 	blk_sync_queue(q);
409 
410 	if (queue_is_mq(q))
411 		blk_mq_exit_queue(q);
412 
413 	/*
414 	 * In theory, request pool of sched_tags belongs to request queue.
415 	 * However, the current implementation requires tag_set for freeing
416 	 * requests, so free the pool now.
417 	 *
418 	 * Queue has become frozen, there can't be any in-queue requests, so
419 	 * it is safe to free requests now.
420 	 */
421 	mutex_lock(&q->sysfs_lock);
422 	if (q->elevator)
423 		blk_mq_sched_free_requests(q);
424 	mutex_unlock(&q->sysfs_lock);
425 
426 	/* @q is and will stay empty, shutdown and put */
427 	blk_put_queue(q);
428 }
429 EXPORT_SYMBOL(blk_cleanup_queue);
430 
431 /**
432  * blk_queue_enter() - try to increase q->q_usage_counter
433  * @q: request queue pointer
434  * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM
435  */
blk_queue_enter(struct request_queue * q,blk_mq_req_flags_t flags)436 int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
437 {
438 	const bool pm = flags & BLK_MQ_REQ_PM;
439 
440 	while (true) {
441 		bool success = false;
442 
443 		rcu_read_lock();
444 		if (percpu_ref_tryget_live(&q->q_usage_counter)) {
445 			/*
446 			 * The code that increments the pm_only counter is
447 			 * responsible for ensuring that that counter is
448 			 * globally visible before the queue is unfrozen.
449 			 */
450 			if (pm || !blk_queue_pm_only(q)) {
451 				success = true;
452 			} else {
453 				percpu_ref_put(&q->q_usage_counter);
454 			}
455 		}
456 		rcu_read_unlock();
457 
458 		if (success)
459 			return 0;
460 
461 		if (flags & BLK_MQ_REQ_NOWAIT)
462 			return -EBUSY;
463 
464 		/*
465 		 * read pair of barrier in blk_freeze_queue_start(),
466 		 * we need to order reading __PERCPU_REF_DEAD flag of
467 		 * .q_usage_counter and reading .mq_freeze_depth or
468 		 * queue dying flag, otherwise the following wait may
469 		 * never return if the two reads are reordered.
470 		 */
471 		smp_rmb();
472 
473 		wait_event(q->mq_freeze_wq,
474 			   (!q->mq_freeze_depth &&
475 			    (pm || (blk_pm_request_resume(q),
476 				    !blk_queue_pm_only(q)))) ||
477 			   blk_queue_dying(q));
478 		if (blk_queue_dying(q))
479 			return -ENODEV;
480 	}
481 }
482 
bio_queue_enter(struct bio * bio)483 static inline int bio_queue_enter(struct bio *bio)
484 {
485 	struct request_queue *q = bio->bi_disk->queue;
486 	bool nowait = bio->bi_opf & REQ_NOWAIT;
487 	int ret;
488 
489 	ret = blk_queue_enter(q, nowait ? BLK_MQ_REQ_NOWAIT : 0);
490 	if (unlikely(ret)) {
491 		if (nowait && !blk_queue_dying(q))
492 			bio_wouldblock_error(bio);
493 		else
494 			bio_io_error(bio);
495 	}
496 
497 	return ret;
498 }
499 
blk_queue_exit(struct request_queue * q)500 void blk_queue_exit(struct request_queue *q)
501 {
502 	percpu_ref_put(&q->q_usage_counter);
503 }
504 
blk_queue_usage_counter_release(struct percpu_ref * ref)505 static void blk_queue_usage_counter_release(struct percpu_ref *ref)
506 {
507 	struct request_queue *q =
508 		container_of(ref, struct request_queue, q_usage_counter);
509 
510 	wake_up_all(&q->mq_freeze_wq);
511 }
512 
blk_rq_timed_out_timer(struct timer_list * t)513 static void blk_rq_timed_out_timer(struct timer_list *t)
514 {
515 	struct request_queue *q = from_timer(q, t, timeout);
516 
517 	kblockd_schedule_work(&q->timeout_work);
518 }
519 
blk_timeout_work(struct work_struct * work)520 static void blk_timeout_work(struct work_struct *work)
521 {
522 }
523 
blk_alloc_queue(int node_id)524 struct request_queue *blk_alloc_queue(int node_id)
525 {
526 	struct request_queue *q;
527 	int ret;
528 	bool skip = false;
529 
530 	q = kmem_cache_alloc_node(blk_requestq_cachep,
531 				GFP_KERNEL | __GFP_ZERO, node_id);
532 	if (!q)
533 		return NULL;
534 
535 	q->last_merge = NULL;
536 
537 	q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL);
538 	if (q->id < 0)
539 		goto fail_q;
540 
541 	ret = bioset_init(&q->bio_split, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
542 	if (ret)
543 		goto fail_id;
544 
545 	q->backing_dev_info = bdi_alloc(node_id);
546 	if (!q->backing_dev_info)
547 		goto fail_split;
548 
549 	q->stats = blk_alloc_queue_stats();
550 	if (!q->stats)
551 		goto fail_stats;
552 
553 	q->node = node_id;
554 
555 	atomic_set(&q->nr_active_requests_shared_sbitmap, 0);
556 
557 	timer_setup(&q->backing_dev_info->laptop_mode_wb_timer,
558 		    laptop_mode_timer_fn, 0);
559 	timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
560 	INIT_WORK(&q->timeout_work, blk_timeout_work);
561 	INIT_LIST_HEAD(&q->icq_list);
562 #ifdef CONFIG_BLK_CGROUP
563 	INIT_LIST_HEAD(&q->blkg_list);
564 #endif
565 
566 	kobject_init(&q->kobj, &blk_queue_ktype);
567 
568 	mutex_init(&q->debugfs_mutex);
569 	mutex_init(&q->sysfs_lock);
570 	mutex_init(&q->sysfs_dir_lock);
571 	spin_lock_init(&q->queue_lock);
572 
573 	init_waitqueue_head(&q->mq_freeze_wq);
574 	mutex_init(&q->mq_freeze_lock);
575 
576 	/*
577 	 * Init percpu_ref in atomic mode so that it's faster to shutdown.
578 	 * See blk_register_queue() for details.
579 	 */
580 	if (percpu_ref_init(&q->q_usage_counter,
581 				blk_queue_usage_counter_release,
582 				PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
583 		goto fail_bdi;
584 
585 	if (blkcg_init_queue(q))
586 		goto fail_ref;
587 
588 	blk_queue_dma_alignment(q, 511);
589 	blk_set_default_limits(&q->limits);
590 	q->nr_requests = BLKDEV_MAX_RQ;
591 
592 	trace_android_rvh_blk_allocated_queue_init(&skip, q);
593 	if (skip)
594 		goto fail_ref;
595 
596 	return q;
597 
598 fail_ref:
599 	percpu_ref_exit(&q->q_usage_counter);
600 fail_bdi:
601 	blk_free_queue_stats(q->stats);
602 fail_stats:
603 	bdi_put(q->backing_dev_info);
604 fail_split:
605 	bioset_exit(&q->bio_split);
606 fail_id:
607 	ida_simple_remove(&blk_queue_ida, q->id);
608 fail_q:
609 	kmem_cache_free(blk_requestq_cachep, q);
610 	return NULL;
611 }
612 EXPORT_SYMBOL(blk_alloc_queue);
613 
614 /**
615  * blk_get_queue - increment the request_queue refcount
616  * @q: the request_queue structure to increment the refcount for
617  *
618  * Increment the refcount of the request_queue kobject.
619  *
620  * Context: Any context.
621  */
blk_get_queue(struct request_queue * q)622 bool blk_get_queue(struct request_queue *q)
623 {
624 	if (likely(!blk_queue_dying(q))) {
625 		__blk_get_queue(q);
626 		return true;
627 	}
628 
629 	return false;
630 }
631 EXPORT_SYMBOL(blk_get_queue);
632 
633 /**
634  * blk_get_request - allocate a request
635  * @q: request queue to allocate a request for
636  * @op: operation (REQ_OP_*) and REQ_* flags, e.g. REQ_SYNC.
637  * @flags: BLK_MQ_REQ_* flags, e.g. BLK_MQ_REQ_NOWAIT.
638  */
blk_get_request(struct request_queue * q,unsigned int op,blk_mq_req_flags_t flags)639 struct request *blk_get_request(struct request_queue *q, unsigned int op,
640 				blk_mq_req_flags_t flags)
641 {
642 	struct request *req;
643 
644 	WARN_ON_ONCE(op & REQ_NOWAIT);
645 	WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PM));
646 
647 	req = blk_mq_alloc_request(q, op, flags);
648 	if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
649 		q->mq_ops->initialize_rq_fn(req);
650 
651 	return req;
652 }
653 EXPORT_SYMBOL(blk_get_request);
654 
blk_put_request(struct request * req)655 void blk_put_request(struct request *req)
656 {
657 	blk_mq_free_request(req);
658 }
659 EXPORT_SYMBOL(blk_put_request);
660 
handle_bad_sector(struct bio * bio,sector_t maxsector)661 static void handle_bad_sector(struct bio *bio, sector_t maxsector)
662 {
663 	char b[BDEVNAME_SIZE];
664 
665 	pr_info_ratelimited("attempt to access beyond end of device\n"
666 			    "%s: rw=%d, want=%llu, limit=%llu\n",
667 			    bio_devname(bio, b), bio->bi_opf,
668 			    bio_end_sector(bio), maxsector);
669 }
670 
671 #ifdef CONFIG_FAIL_MAKE_REQUEST
672 
673 static DECLARE_FAULT_ATTR(fail_make_request);
674 
setup_fail_make_request(char * str)675 static int __init setup_fail_make_request(char *str)
676 {
677 	return setup_fault_attr(&fail_make_request, str);
678 }
679 __setup("fail_make_request=", setup_fail_make_request);
680 
should_fail_request(struct hd_struct * part,unsigned int bytes)681 static bool should_fail_request(struct hd_struct *part, unsigned int bytes)
682 {
683 	return part->make_it_fail && should_fail(&fail_make_request, bytes);
684 }
685 
fail_make_request_debugfs(void)686 static int __init fail_make_request_debugfs(void)
687 {
688 	struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
689 						NULL, &fail_make_request);
690 
691 	return PTR_ERR_OR_ZERO(dir);
692 }
693 
694 late_initcall(fail_make_request_debugfs);
695 
696 #else /* CONFIG_FAIL_MAKE_REQUEST */
697 
should_fail_request(struct hd_struct * part,unsigned int bytes)698 static inline bool should_fail_request(struct hd_struct *part,
699 					unsigned int bytes)
700 {
701 	return false;
702 }
703 
704 #endif /* CONFIG_FAIL_MAKE_REQUEST */
705 
bio_check_ro(struct bio * bio,struct hd_struct * part)706 static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
707 {
708 	const int op = bio_op(bio);
709 
710 	if (part->policy && op_is_write(op)) {
711 		char b[BDEVNAME_SIZE];
712 
713 		if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
714 			return false;
715 		pr_warn("Trying to write to read-only block-device %s (partno %d)\n",
716 			bio_devname(bio, b), part->partno);
717 		/* Older lvm-tools actually trigger this */
718 		return false;
719 	}
720 
721 	return false;
722 }
723 
should_fail_bio(struct bio * bio)724 static noinline int should_fail_bio(struct bio *bio)
725 {
726 	if (should_fail_request(&bio->bi_disk->part0, bio->bi_iter.bi_size))
727 		return -EIO;
728 	return 0;
729 }
730 ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO);
731 
732 /*
733  * Check whether this bio extends beyond the end of the device or partition.
734  * This may well happen - the kernel calls bread() without checking the size of
735  * the device, e.g., when mounting a file system.
736  */
bio_check_eod(struct bio * bio,sector_t maxsector)737 static inline int bio_check_eod(struct bio *bio, sector_t maxsector)
738 {
739 	unsigned int nr_sectors = bio_sectors(bio);
740 
741 	if (nr_sectors && maxsector &&
742 	    (nr_sectors > maxsector ||
743 	     bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
744 		handle_bad_sector(bio, maxsector);
745 		return -EIO;
746 	}
747 	return 0;
748 }
749 
750 /*
751  * Remap block n of partition p to block n+start(p) of the disk.
752  */
blk_partition_remap(struct bio * bio)753 static inline int blk_partition_remap(struct bio *bio)
754 {
755 	struct hd_struct *p;
756 	int ret = -EIO;
757 
758 	rcu_read_lock();
759 	p = __disk_get_part(bio->bi_disk, bio->bi_partno);
760 	if (unlikely(!p))
761 		goto out;
762 	if (unlikely(should_fail_request(p, bio->bi_iter.bi_size)))
763 		goto out;
764 	if (unlikely(bio_check_ro(bio, p)))
765 		goto out;
766 
767 	if (bio_sectors(bio)) {
768 		if (bio_check_eod(bio, part_nr_sects_read(p)))
769 			goto out;
770 		bio->bi_iter.bi_sector += p->start_sect;
771 		trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p),
772 				      bio->bi_iter.bi_sector - p->start_sect);
773 	}
774 	bio->bi_partno = 0;
775 	ret = 0;
776 out:
777 	rcu_read_unlock();
778 	return ret;
779 }
780 
781 /*
782  * Check write append to a zoned block device.
783  */
blk_check_zone_append(struct request_queue * q,struct bio * bio)784 static inline blk_status_t blk_check_zone_append(struct request_queue *q,
785 						 struct bio *bio)
786 {
787 	sector_t pos = bio->bi_iter.bi_sector;
788 	int nr_sectors = bio_sectors(bio);
789 
790 	/* Only applicable to zoned block devices */
791 	if (!blk_queue_is_zoned(q))
792 		return BLK_STS_NOTSUPP;
793 
794 	/* The bio sector must point to the start of a sequential zone */
795 	if (pos & (blk_queue_zone_sectors(q) - 1) ||
796 	    !blk_queue_zone_is_seq(q, pos))
797 		return BLK_STS_IOERR;
798 
799 	/*
800 	 * Not allowed to cross zone boundaries. Otherwise, the BIO will be
801 	 * split and could result in non-contiguous sectors being written in
802 	 * different zones.
803 	 */
804 	if (nr_sectors > q->limits.chunk_sectors)
805 		return BLK_STS_IOERR;
806 
807 	/* Make sure the BIO is small enough and will not get split */
808 	if (nr_sectors > q->limits.max_zone_append_sectors)
809 		return BLK_STS_IOERR;
810 
811 	bio->bi_opf |= REQ_NOMERGE;
812 
813 	return BLK_STS_OK;
814 }
815 
submit_bio_checks(struct bio * bio)816 static noinline_for_stack bool submit_bio_checks(struct bio *bio)
817 {
818 	struct request_queue *q = bio->bi_disk->queue;
819 	blk_status_t status = BLK_STS_IOERR;
820 	struct blk_plug *plug;
821 
822 	might_sleep();
823 
824 	plug = blk_mq_plug(q, bio);
825 	if (plug && plug->nowait)
826 		bio->bi_opf |= REQ_NOWAIT;
827 
828 	/*
829 	 * For a REQ_NOWAIT based request, return -EOPNOTSUPP
830 	 * if queue does not support NOWAIT.
831 	 */
832 	if ((bio->bi_opf & REQ_NOWAIT) && !blk_queue_nowait(q))
833 		goto not_supported;
834 
835 	if (should_fail_bio(bio))
836 		goto end_io;
837 
838 	if (bio->bi_partno) {
839 		if (unlikely(blk_partition_remap(bio)))
840 			goto end_io;
841 	} else {
842 		if (unlikely(bio_check_ro(bio, &bio->bi_disk->part0)))
843 			goto end_io;
844 		if (unlikely(bio_check_eod(bio, get_capacity(bio->bi_disk))))
845 			goto end_io;
846 	}
847 
848 	/*
849 	 * Filter flush bio's early so that bio based drivers without flush
850 	 * support don't have to worry about them.
851 	 */
852 	if (op_is_flush(bio->bi_opf) &&
853 	    !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
854 		bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
855 		if (!bio_sectors(bio)) {
856 			status = BLK_STS_OK;
857 			goto end_io;
858 		}
859 	}
860 
861 	if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
862 		bio->bi_opf &= ~REQ_HIPRI;
863 
864 	switch (bio_op(bio)) {
865 	case REQ_OP_DISCARD:
866 		if (!blk_queue_discard(q))
867 			goto not_supported;
868 		break;
869 	case REQ_OP_SECURE_ERASE:
870 		if (!blk_queue_secure_erase(q))
871 			goto not_supported;
872 		break;
873 	case REQ_OP_WRITE_SAME:
874 		if (!q->limits.max_write_same_sectors)
875 			goto not_supported;
876 		break;
877 	case REQ_OP_ZONE_APPEND:
878 		status = blk_check_zone_append(q, bio);
879 		if (status != BLK_STS_OK)
880 			goto end_io;
881 		break;
882 	case REQ_OP_ZONE_RESET:
883 	case REQ_OP_ZONE_OPEN:
884 	case REQ_OP_ZONE_CLOSE:
885 	case REQ_OP_ZONE_FINISH:
886 		if (!blk_queue_is_zoned(q))
887 			goto not_supported;
888 		break;
889 	case REQ_OP_ZONE_RESET_ALL:
890 		if (!blk_queue_is_zoned(q) || !blk_queue_zone_resetall(q))
891 			goto not_supported;
892 		break;
893 	case REQ_OP_WRITE_ZEROES:
894 		if (!q->limits.max_write_zeroes_sectors)
895 			goto not_supported;
896 		break;
897 	default:
898 		break;
899 	}
900 
901 	/*
902 	 * Various block parts want %current->io_context, so allocate it up
903 	 * front rather than dealing with lots of pain to allocate it only
904 	 * where needed. This may fail and the block layer knows how to live
905 	 * with it.
906 	 */
907 	if (unlikely(!current->io_context))
908 		create_task_io_context(current, GFP_ATOMIC, q->node);
909 
910 	if (blk_throtl_bio(bio))
911 		return false;
912 
913 	blk_cgroup_bio_start(bio);
914 	blkcg_bio_issue_init(bio);
915 
916 	if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
917 		trace_block_bio_queue(q, bio);
918 		/* Now that enqueuing has been traced, we need to trace
919 		 * completion as well.
920 		 */
921 		bio_set_flag(bio, BIO_TRACE_COMPLETION);
922 	}
923 	return true;
924 
925 not_supported:
926 	status = BLK_STS_NOTSUPP;
927 end_io:
928 	bio->bi_status = status;
929 	bio_endio(bio);
930 	return false;
931 }
932 
__submit_bio(struct bio * bio)933 static blk_qc_t __submit_bio(struct bio *bio)
934 {
935 	struct gendisk *disk = bio->bi_disk;
936 	blk_qc_t ret = BLK_QC_T_NONE;
937 
938 	if (blk_crypto_bio_prep(&bio)) {
939 		if (!disk->fops->submit_bio)
940 			return blk_mq_submit_bio(bio);
941 		ret = disk->fops->submit_bio(bio);
942 	}
943 	blk_queue_exit(disk->queue);
944 	return ret;
945 }
946 
947 /*
948  * The loop in this function may be a bit non-obvious, and so deserves some
949  * explanation:
950  *
951  *  - Before entering the loop, bio->bi_next is NULL (as all callers ensure
952  *    that), so we have a list with a single bio.
953  *  - We pretend that we have just taken it off a longer list, so we assign
954  *    bio_list to a pointer to the bio_list_on_stack, thus initialising the
955  *    bio_list of new bios to be added.  ->submit_bio() may indeed add some more
956  *    bios through a recursive call to submit_bio_noacct.  If it did, we find a
957  *    non-NULL value in bio_list and re-enter the loop from the top.
958  *  - In this case we really did just take the bio of the top of the list (no
959  *    pretending) and so remove it from bio_list, and call into ->submit_bio()
960  *    again.
961  *
962  * bio_list_on_stack[0] contains bios submitted by the current ->submit_bio.
963  * bio_list_on_stack[1] contains bios that were submitted before the current
964  *	->submit_bio_bio, but that haven't been processed yet.
965  */
__submit_bio_noacct(struct bio * bio)966 static blk_qc_t __submit_bio_noacct(struct bio *bio)
967 {
968 	struct bio_list bio_list_on_stack[2];
969 	blk_qc_t ret = BLK_QC_T_NONE;
970 
971 	BUG_ON(bio->bi_next);
972 
973 	bio_list_init(&bio_list_on_stack[0]);
974 	current->bio_list = bio_list_on_stack;
975 
976 	do {
977 		struct request_queue *q = bio->bi_disk->queue;
978 		struct bio_list lower, same;
979 
980 		if (unlikely(bio_queue_enter(bio) != 0))
981 			continue;
982 
983 		/*
984 		 * Create a fresh bio_list for all subordinate requests.
985 		 */
986 		bio_list_on_stack[1] = bio_list_on_stack[0];
987 		bio_list_init(&bio_list_on_stack[0]);
988 
989 		ret = __submit_bio(bio);
990 
991 		/*
992 		 * Sort new bios into those for a lower level and those for the
993 		 * same level.
994 		 */
995 		bio_list_init(&lower);
996 		bio_list_init(&same);
997 		while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
998 			if (q == bio->bi_disk->queue)
999 				bio_list_add(&same, bio);
1000 			else
1001 				bio_list_add(&lower, bio);
1002 
1003 		/*
1004 		 * Now assemble so we handle the lowest level first.
1005 		 */
1006 		bio_list_merge(&bio_list_on_stack[0], &lower);
1007 		bio_list_merge(&bio_list_on_stack[0], &same);
1008 		bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
1009 	} while ((bio = bio_list_pop(&bio_list_on_stack[0])));
1010 
1011 	current->bio_list = NULL;
1012 	return ret;
1013 }
1014 
__submit_bio_noacct_mq(struct bio * bio)1015 static blk_qc_t __submit_bio_noacct_mq(struct bio *bio)
1016 {
1017 	struct bio_list bio_list[2] = { };
1018 	blk_qc_t ret = BLK_QC_T_NONE;
1019 
1020 	current->bio_list = bio_list;
1021 
1022 	do {
1023 		struct gendisk *disk = bio->bi_disk;
1024 
1025 		if (unlikely(bio_queue_enter(bio) != 0))
1026 			continue;
1027 
1028 		if (!blk_crypto_bio_prep(&bio)) {
1029 			blk_queue_exit(disk->queue);
1030 			ret = BLK_QC_T_NONE;
1031 			continue;
1032 		}
1033 
1034 		ret = blk_mq_submit_bio(bio);
1035 	} while ((bio = bio_list_pop(&bio_list[0])));
1036 
1037 	current->bio_list = NULL;
1038 	return ret;
1039 }
1040 
1041 /**
1042  * submit_bio_noacct - re-submit a bio to the block device layer for I/O
1043  * @bio:  The bio describing the location in memory and on the device.
1044  *
1045  * This is a version of submit_bio() that shall only be used for I/O that is
1046  * resubmitted to lower level drivers by stacking block drivers.  All file
1047  * systems and other upper level users of the block layer should use
1048  * submit_bio() instead.
1049  */
submit_bio_noacct(struct bio * bio)1050 blk_qc_t submit_bio_noacct(struct bio *bio)
1051 {
1052 	if (!submit_bio_checks(bio))
1053 		return BLK_QC_T_NONE;
1054 
1055 	/*
1056 	 * We only want one ->submit_bio to be active at a time, else stack
1057 	 * usage with stacked devices could be a problem.  Use current->bio_list
1058 	 * to collect a list of requests submited by a ->submit_bio method while
1059 	 * it is active, and then process them after it returned.
1060 	 */
1061 	if (current->bio_list) {
1062 		bio_list_add(&current->bio_list[0], bio);
1063 		return BLK_QC_T_NONE;
1064 	}
1065 
1066 	if (!bio->bi_disk->fops->submit_bio)
1067 		return __submit_bio_noacct_mq(bio);
1068 	return __submit_bio_noacct(bio);
1069 }
1070 EXPORT_SYMBOL(submit_bio_noacct);
1071 
1072 /**
1073  * submit_bio - submit a bio to the block device layer for I/O
1074  * @bio: The &struct bio which describes the I/O
1075  *
1076  * submit_bio() is used to submit I/O requests to block devices.  It is passed a
1077  * fully set up &struct bio that describes the I/O that needs to be done.  The
1078  * bio will be send to the device described by the bi_disk and bi_partno fields.
1079  *
1080  * The success/failure status of the request, along with notification of
1081  * completion, is delivered asynchronously through the ->bi_end_io() callback
1082  * in @bio.  The bio must NOT be touched by thecaller until ->bi_end_io() has
1083  * been called.
1084  */
submit_bio(struct bio * bio)1085 blk_qc_t submit_bio(struct bio *bio)
1086 {
1087 	if (blkcg_punt_bio_submit(bio))
1088 		return BLK_QC_T_NONE;
1089 
1090 	/*
1091 	 * If it's a regular read/write or a barrier with data attached,
1092 	 * go through the normal accounting stuff before submission.
1093 	 */
1094 	if (bio_has_data(bio)) {
1095 		unsigned int count;
1096 
1097 		if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
1098 			count = queue_logical_block_size(bio->bi_disk->queue) >> 9;
1099 		else
1100 			count = bio_sectors(bio);
1101 
1102 		if (op_is_write(bio_op(bio))) {
1103 			count_vm_events(PGPGOUT, count);
1104 		} else {
1105 			task_io_account_read(bio->bi_iter.bi_size);
1106 			count_vm_events(PGPGIN, count);
1107 		}
1108 
1109 		if (unlikely(block_dump)) {
1110 			char b[BDEVNAME_SIZE];
1111 			printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
1112 			current->comm, task_pid_nr(current),
1113 				op_is_write(bio_op(bio)) ? "WRITE" : "READ",
1114 				(unsigned long long)bio->bi_iter.bi_sector,
1115 				bio_devname(bio, b), count);
1116 		}
1117 	}
1118 
1119 	/*
1120 	 * If we're reading data that is part of the userspace workingset, count
1121 	 * submission time as memory stall.  When the device is congested, or
1122 	 * the submitting cgroup IO-throttled, submission can be a significant
1123 	 * part of overall IO time.
1124 	 */
1125 	if (unlikely(bio_op(bio) == REQ_OP_READ &&
1126 	    bio_flagged(bio, BIO_WORKINGSET))) {
1127 		unsigned long pflags;
1128 		blk_qc_t ret;
1129 
1130 		psi_memstall_enter(&pflags);
1131 		ret = submit_bio_noacct(bio);
1132 		psi_memstall_leave(&pflags);
1133 
1134 		return ret;
1135 	}
1136 
1137 	return submit_bio_noacct(bio);
1138 }
1139 EXPORT_SYMBOL(submit_bio);
1140 
1141 /**
1142  * blk_cloned_rq_check_limits - Helper function to check a cloned request
1143  *                              for the new queue limits
1144  * @q:  the queue
1145  * @rq: the request being checked
1146  *
1147  * Description:
1148  *    @rq may have been made based on weaker limitations of upper-level queues
1149  *    in request stacking drivers, and it may violate the limitation of @q.
1150  *    Since the block layer and the underlying device driver trust @rq
1151  *    after it is inserted to @q, it should be checked against @q before
1152  *    the insertion using this generic function.
1153  *
1154  *    Request stacking drivers like request-based dm may change the queue
1155  *    limits when retrying requests on other queues. Those requests need
1156  *    to be checked against the new queue limits again during dispatch.
1157  */
blk_cloned_rq_check_limits(struct request_queue * q,struct request * rq)1158 static blk_status_t blk_cloned_rq_check_limits(struct request_queue *q,
1159 				      struct request *rq)
1160 {
1161 	unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
1162 
1163 	if (blk_rq_sectors(rq) > max_sectors) {
1164 		/*
1165 		 * SCSI device does not have a good way to return if
1166 		 * Write Same/Zero is actually supported. If a device rejects
1167 		 * a non-read/write command (discard, write same,etc.) the
1168 		 * low-level device driver will set the relevant queue limit to
1169 		 * 0 to prevent blk-lib from issuing more of the offending
1170 		 * operations. Commands queued prior to the queue limit being
1171 		 * reset need to be completed with BLK_STS_NOTSUPP to avoid I/O
1172 		 * errors being propagated to upper layers.
1173 		 */
1174 		if (max_sectors == 0)
1175 			return BLK_STS_NOTSUPP;
1176 
1177 		printk(KERN_ERR "%s: over max size limit. (%u > %u)\n",
1178 			__func__, blk_rq_sectors(rq), max_sectors);
1179 		return BLK_STS_IOERR;
1180 	}
1181 
1182 	/*
1183 	 * queue's settings related to segment counting like q->bounce_pfn
1184 	 * may differ from that of other stacking queues.
1185 	 * Recalculate it to check the request correctly on this queue's
1186 	 * limitation.
1187 	 */
1188 	rq->nr_phys_segments = blk_recalc_rq_segments(rq);
1189 	if (rq->nr_phys_segments > queue_max_segments(q)) {
1190 		printk(KERN_ERR "%s: over max segments limit. (%hu > %hu)\n",
1191 			__func__, rq->nr_phys_segments, queue_max_segments(q));
1192 		return BLK_STS_IOERR;
1193 	}
1194 
1195 	return BLK_STS_OK;
1196 }
1197 
1198 /**
1199  * blk_insert_cloned_request - Helper for stacking drivers to submit a request
1200  * @q:  the queue to submit the request
1201  * @rq: the request being queued
1202  */
blk_insert_cloned_request(struct request_queue * q,struct request * rq)1203 blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
1204 {
1205 	blk_status_t ret;
1206 
1207 	ret = blk_cloned_rq_check_limits(q, rq);
1208 	if (ret != BLK_STS_OK)
1209 		return ret;
1210 
1211 	if (rq->rq_disk &&
1212 	    should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
1213 		return BLK_STS_IOERR;
1214 
1215 	if (blk_crypto_insert_cloned_request(rq))
1216 		return BLK_STS_IOERR;
1217 
1218 	if (blk_queue_io_stat(q))
1219 		blk_account_io_start(rq);
1220 
1221 	/*
1222 	 * Since we have a scheduler attached on the top device,
1223 	 * bypass a potential scheduler on the bottom device for
1224 	 * insert.
1225 	 */
1226 	return blk_mq_request_issue_directly(rq, true);
1227 }
1228 EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
1229 
1230 /**
1231  * blk_rq_err_bytes - determine number of bytes till the next failure boundary
1232  * @rq: request to examine
1233  *
1234  * Description:
1235  *     A request could be merge of IOs which require different failure
1236  *     handling.  This function determines the number of bytes which
1237  *     can be failed from the beginning of the request without
1238  *     crossing into area which need to be retried further.
1239  *
1240  * Return:
1241  *     The number of bytes to fail.
1242  */
blk_rq_err_bytes(const struct request * rq)1243 unsigned int blk_rq_err_bytes(const struct request *rq)
1244 {
1245 	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
1246 	unsigned int bytes = 0;
1247 	struct bio *bio;
1248 
1249 	if (!(rq->rq_flags & RQF_MIXED_MERGE))
1250 		return blk_rq_bytes(rq);
1251 
1252 	/*
1253 	 * Currently the only 'mixing' which can happen is between
1254 	 * different fastfail types.  We can safely fail portions
1255 	 * which have all the failfast bits that the first one has -
1256 	 * the ones which are at least as eager to fail as the first
1257 	 * one.
1258 	 */
1259 	for (bio = rq->bio; bio; bio = bio->bi_next) {
1260 		if ((bio->bi_opf & ff) != ff)
1261 			break;
1262 		bytes += bio->bi_iter.bi_size;
1263 	}
1264 
1265 	/* this could lead to infinite loop */
1266 	BUG_ON(blk_rq_bytes(rq) && !bytes);
1267 	return bytes;
1268 }
1269 EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
1270 
update_io_ticks(struct hd_struct * part,unsigned long now,bool end)1271 static void update_io_ticks(struct hd_struct *part, unsigned long now, bool end)
1272 {
1273 	unsigned long stamp;
1274 again:
1275 	stamp = READ_ONCE(part->stamp);
1276 	if (unlikely(stamp != now)) {
1277 		if (likely(cmpxchg(&part->stamp, stamp, now) == stamp))
1278 			__part_stat_add(part, io_ticks, end ? now - stamp : 1);
1279 	}
1280 	if (part->partno) {
1281 		part = &part_to_disk(part)->part0;
1282 		goto again;
1283 	}
1284 }
1285 
blk_account_io_completion(struct request * req,unsigned int bytes)1286 static void blk_account_io_completion(struct request *req, unsigned int bytes)
1287 {
1288 	if (req->part && blk_do_io_stat(req)) {
1289 		const int sgrp = op_stat_group(req_op(req));
1290 		struct hd_struct *part;
1291 
1292 		part_stat_lock();
1293 		part = req->part;
1294 		part_stat_add(part, sectors[sgrp], bytes >> 9);
1295 		part_stat_unlock();
1296 	}
1297 }
1298 
blk_account_io_done(struct request * req,u64 now)1299 void blk_account_io_done(struct request *req, u64 now)
1300 {
1301 	/*
1302 	 * Account IO completion.  flush_rq isn't accounted as a
1303 	 * normal IO on queueing nor completion.  Accounting the
1304 	 * containing request is enough.
1305 	 */
1306 	if (req->part && blk_do_io_stat(req) &&
1307 	    !(req->rq_flags & RQF_FLUSH_SEQ)) {
1308 		const int sgrp = op_stat_group(req_op(req));
1309 		struct hd_struct *part;
1310 
1311 		part_stat_lock();
1312 		part = req->part;
1313 
1314 		update_io_ticks(part, jiffies, true);
1315 		part_stat_inc(part, ios[sgrp]);
1316 		part_stat_add(part, nsecs[sgrp], now - req->start_time_ns);
1317 		part_stat_unlock();
1318 
1319 		hd_struct_put(part);
1320 	}
1321 }
1322 
blk_account_io_start(struct request * rq)1323 void blk_account_io_start(struct request *rq)
1324 {
1325 	if (!blk_do_io_stat(rq))
1326 		return;
1327 
1328 	rq->part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
1329 
1330 	part_stat_lock();
1331 	update_io_ticks(rq->part, jiffies, false);
1332 	part_stat_unlock();
1333 }
1334 
__part_start_io_acct(struct hd_struct * part,unsigned int sectors,unsigned int op)1335 static unsigned long __part_start_io_acct(struct hd_struct *part,
1336 					  unsigned int sectors, unsigned int op)
1337 {
1338 	const int sgrp = op_stat_group(op);
1339 	unsigned long now = READ_ONCE(jiffies);
1340 
1341 	part_stat_lock();
1342 	update_io_ticks(part, now, false);
1343 	part_stat_inc(part, ios[sgrp]);
1344 	part_stat_add(part, sectors[sgrp], sectors);
1345 	part_stat_local_inc(part, in_flight[op_is_write(op)]);
1346 	part_stat_unlock();
1347 
1348 	return now;
1349 }
1350 
part_start_io_acct(struct gendisk * disk,struct hd_struct ** part,struct bio * bio)1351 unsigned long part_start_io_acct(struct gendisk *disk, struct hd_struct **part,
1352 				 struct bio *bio)
1353 {
1354 	*part = disk_map_sector_rcu(disk, bio->bi_iter.bi_sector);
1355 
1356 	return __part_start_io_acct(*part, bio_sectors(bio), bio_op(bio));
1357 }
1358 EXPORT_SYMBOL_GPL(part_start_io_acct);
1359 
disk_start_io_acct(struct gendisk * disk,unsigned int sectors,unsigned int op)1360 unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
1361 				 unsigned int op)
1362 {
1363 	return __part_start_io_acct(&disk->part0, sectors, op);
1364 }
1365 EXPORT_SYMBOL(disk_start_io_acct);
1366 
__part_end_io_acct(struct hd_struct * part,unsigned int op,unsigned long start_time)1367 static void __part_end_io_acct(struct hd_struct *part, unsigned int op,
1368 			       unsigned long start_time)
1369 {
1370 	const int sgrp = op_stat_group(op);
1371 	unsigned long now = READ_ONCE(jiffies);
1372 	unsigned long duration = now - start_time;
1373 
1374 	part_stat_lock();
1375 	update_io_ticks(part, now, true);
1376 	part_stat_add(part, nsecs[sgrp], jiffies_to_nsecs(duration));
1377 	part_stat_local_dec(part, in_flight[op_is_write(op)]);
1378 	part_stat_unlock();
1379 }
1380 
part_end_io_acct(struct hd_struct * part,struct bio * bio,unsigned long start_time)1381 void part_end_io_acct(struct hd_struct *part, struct bio *bio,
1382 		      unsigned long start_time)
1383 {
1384 	__part_end_io_acct(part, bio_op(bio), start_time);
1385 	hd_struct_put(part);
1386 }
1387 EXPORT_SYMBOL_GPL(part_end_io_acct);
1388 
disk_end_io_acct(struct gendisk * disk,unsigned int op,unsigned long start_time)1389 void disk_end_io_acct(struct gendisk *disk, unsigned int op,
1390 		      unsigned long start_time)
1391 {
1392 	__part_end_io_acct(&disk->part0, op, start_time);
1393 }
1394 EXPORT_SYMBOL(disk_end_io_acct);
1395 
1396 /*
1397  * Steal bios from a request and add them to a bio list.
1398  * The request must not have been partially completed before.
1399  */
blk_steal_bios(struct bio_list * list,struct request * rq)1400 void blk_steal_bios(struct bio_list *list, struct request *rq)
1401 {
1402 	if (rq->bio) {
1403 		if (list->tail)
1404 			list->tail->bi_next = rq->bio;
1405 		else
1406 			list->head = rq->bio;
1407 		list->tail = rq->biotail;
1408 
1409 		rq->bio = NULL;
1410 		rq->biotail = NULL;
1411 	}
1412 
1413 	rq->__data_len = 0;
1414 }
1415 EXPORT_SYMBOL_GPL(blk_steal_bios);
1416 
1417 /**
1418  * blk_update_request - Special helper function for request stacking drivers
1419  * @req:      the request being processed
1420  * @error:    block status code
1421  * @nr_bytes: number of bytes to complete @req
1422  *
1423  * Description:
1424  *     Ends I/O on a number of bytes attached to @req, but doesn't complete
1425  *     the request structure even if @req doesn't have leftover.
1426  *     If @req has leftover, sets it up for the next range of segments.
1427  *
1428  *     This special helper function is only for request stacking drivers
1429  *     (e.g. request-based dm) so that they can handle partial completion.
1430  *     Actual device drivers should use blk_mq_end_request instead.
1431  *
1432  *     Passing the result of blk_rq_bytes() as @nr_bytes guarantees
1433  *     %false return from this function.
1434  *
1435  * Note:
1436  *	The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in both
1437  *	blk_rq_bytes() and in blk_update_request().
1438  *
1439  * Return:
1440  *     %false - this request doesn't have any more data
1441  *     %true  - this request has more data
1442  **/
blk_update_request(struct request * req,blk_status_t error,unsigned int nr_bytes)1443 bool blk_update_request(struct request *req, blk_status_t error,
1444 		unsigned int nr_bytes)
1445 {
1446 	int total_bytes;
1447 
1448 	trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes);
1449 
1450 	if (!req->bio)
1451 		return false;
1452 
1453 #ifdef CONFIG_BLK_DEV_INTEGRITY
1454 	if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
1455 	    error == BLK_STS_OK)
1456 		req->q->integrity.profile->complete_fn(req, nr_bytes);
1457 #endif
1458 
1459 	/*
1460 	 * Upper layers may call blk_crypto_evict_key() anytime after the last
1461 	 * bio_endio().  Therefore, the keyslot must be released before that.
1462 	 */
1463 	if (blk_crypto_rq_has_keyslot(req) && nr_bytes >= blk_rq_bytes(req))
1464 		__blk_crypto_rq_put_keyslot(req);
1465 
1466 	if (unlikely(error && !blk_rq_is_passthrough(req) &&
1467 		     !(req->rq_flags & RQF_QUIET)))
1468 		print_req_error(req, error, __func__);
1469 
1470 	blk_account_io_completion(req, nr_bytes);
1471 
1472 	total_bytes = 0;
1473 	while (req->bio) {
1474 		struct bio *bio = req->bio;
1475 		unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
1476 
1477 		if (bio_bytes == bio->bi_iter.bi_size)
1478 			req->bio = bio->bi_next;
1479 
1480 		/* Completion has already been traced */
1481 		bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1482 		req_bio_endio(req, bio, bio_bytes, error);
1483 
1484 		total_bytes += bio_bytes;
1485 		nr_bytes -= bio_bytes;
1486 
1487 		if (!nr_bytes)
1488 			break;
1489 	}
1490 
1491 	/*
1492 	 * completely done
1493 	 */
1494 	if (!req->bio) {
1495 		/*
1496 		 * Reset counters so that the request stacking driver
1497 		 * can find how many bytes remain in the request
1498 		 * later.
1499 		 */
1500 		req->__data_len = 0;
1501 		return false;
1502 	}
1503 
1504 	req->__data_len -= total_bytes;
1505 
1506 	/* update sector only for requests with clear definition of sector */
1507 	if (!blk_rq_is_passthrough(req))
1508 		req->__sector += total_bytes >> 9;
1509 
1510 	/* mixed attributes always follow the first bio */
1511 	if (req->rq_flags & RQF_MIXED_MERGE) {
1512 		req->cmd_flags &= ~REQ_FAILFAST_MASK;
1513 		req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
1514 	}
1515 
1516 	if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) {
1517 		/*
1518 		 * If total number of sectors is less than the first segment
1519 		 * size, something has gone terribly wrong.
1520 		 */
1521 		if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
1522 			blk_dump_rq_flags(req, "request botched");
1523 			req->__data_len = blk_rq_cur_bytes(req);
1524 		}
1525 
1526 		/* recalculate the number of segments */
1527 		req->nr_phys_segments = blk_recalc_rq_segments(req);
1528 	}
1529 
1530 	return true;
1531 }
1532 EXPORT_SYMBOL_GPL(blk_update_request);
1533 
1534 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
1535 /**
1536  * rq_flush_dcache_pages - Helper function to flush all pages in a request
1537  * @rq: the request to be flushed
1538  *
1539  * Description:
1540  *     Flush all pages in @rq.
1541  */
rq_flush_dcache_pages(struct request * rq)1542 void rq_flush_dcache_pages(struct request *rq)
1543 {
1544 	struct req_iterator iter;
1545 	struct bio_vec bvec;
1546 
1547 	rq_for_each_segment(bvec, rq, iter)
1548 		flush_dcache_page(bvec.bv_page);
1549 }
1550 EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
1551 #endif
1552 
1553 /**
1554  * blk_lld_busy - Check if underlying low-level drivers of a device are busy
1555  * @q : the queue of the device being checked
1556  *
1557  * Description:
1558  *    Check if underlying low-level drivers of a device are busy.
1559  *    If the drivers want to export their busy state, they must set own
1560  *    exporting function using blk_queue_lld_busy() first.
1561  *
1562  *    Basically, this function is used only by request stacking drivers
1563  *    to stop dispatching requests to underlying devices when underlying
1564  *    devices are busy.  This behavior helps more I/O merging on the queue
1565  *    of the request stacking driver and prevents I/O throughput regression
1566  *    on burst I/O load.
1567  *
1568  * Return:
1569  *    0 - Not busy (The request stacking driver should dispatch request)
1570  *    1 - Busy (The request stacking driver should stop dispatching request)
1571  */
blk_lld_busy(struct request_queue * q)1572 int blk_lld_busy(struct request_queue *q)
1573 {
1574 	if (queue_is_mq(q) && q->mq_ops->busy)
1575 		return q->mq_ops->busy(q);
1576 
1577 	return 0;
1578 }
1579 EXPORT_SYMBOL_GPL(blk_lld_busy);
1580 
1581 /**
1582  * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
1583  * @rq: the clone request to be cleaned up
1584  *
1585  * Description:
1586  *     Free all bios in @rq for a cloned request.
1587  */
blk_rq_unprep_clone(struct request * rq)1588 void blk_rq_unprep_clone(struct request *rq)
1589 {
1590 	struct bio *bio;
1591 
1592 	while ((bio = rq->bio) != NULL) {
1593 		rq->bio = bio->bi_next;
1594 
1595 		bio_put(bio);
1596 	}
1597 }
1598 EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
1599 
1600 /**
1601  * blk_rq_prep_clone - Helper function to setup clone request
1602  * @rq: the request to be setup
1603  * @rq_src: original request to be cloned
1604  * @bs: bio_set that bios for clone are allocated from
1605  * @gfp_mask: memory allocation mask for bio
1606  * @bio_ctr: setup function to be called for each clone bio.
1607  *           Returns %0 for success, non %0 for failure.
1608  * @data: private data to be passed to @bio_ctr
1609  *
1610  * Description:
1611  *     Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
1612  *     Also, pages which the original bios are pointing to are not copied
1613  *     and the cloned bios just point same pages.
1614  *     So cloned bios must be completed before original bios, which means
1615  *     the caller must complete @rq before @rq_src.
1616  */
blk_rq_prep_clone(struct request * rq,struct request * rq_src,struct bio_set * bs,gfp_t gfp_mask,int (* bio_ctr)(struct bio *,struct bio *,void *),void * data)1617 int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
1618 		      struct bio_set *bs, gfp_t gfp_mask,
1619 		      int (*bio_ctr)(struct bio *, struct bio *, void *),
1620 		      void *data)
1621 {
1622 	struct bio *bio, *bio_src;
1623 
1624 	if (!bs)
1625 		bs = &fs_bio_set;
1626 
1627 	__rq_for_each_bio(bio_src, rq_src) {
1628 		bio = bio_clone_fast(bio_src, gfp_mask, bs);
1629 		if (!bio)
1630 			goto free_and_out;
1631 
1632 		if (bio_ctr && bio_ctr(bio, bio_src, data))
1633 			goto free_and_out;
1634 
1635 		if (rq->bio) {
1636 			rq->biotail->bi_next = bio;
1637 			rq->biotail = bio;
1638 		} else {
1639 			rq->bio = rq->biotail = bio;
1640 		}
1641 		bio = NULL;
1642 	}
1643 
1644 	/* Copy attributes of the original request to the clone request. */
1645 	rq->__sector = blk_rq_pos(rq_src);
1646 	rq->__data_len = blk_rq_bytes(rq_src);
1647 	if (rq_src->rq_flags & RQF_SPECIAL_PAYLOAD) {
1648 		rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
1649 		rq->special_vec = rq_src->special_vec;
1650 	}
1651 	rq->nr_phys_segments = rq_src->nr_phys_segments;
1652 	rq->ioprio = rq_src->ioprio;
1653 
1654 	if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0)
1655 		goto free_and_out;
1656 
1657 	return 0;
1658 
1659 free_and_out:
1660 	if (bio)
1661 		bio_put(bio);
1662 	blk_rq_unprep_clone(rq);
1663 
1664 	return -ENOMEM;
1665 }
1666 EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
1667 
kblockd_schedule_work(struct work_struct * work)1668 int kblockd_schedule_work(struct work_struct *work)
1669 {
1670 	return queue_work(kblockd_workqueue, work);
1671 }
1672 EXPORT_SYMBOL(kblockd_schedule_work);
1673 
kblockd_mod_delayed_work_on(int cpu,struct delayed_work * dwork,unsigned long delay)1674 int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
1675 				unsigned long delay)
1676 {
1677 	return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
1678 }
1679 EXPORT_SYMBOL(kblockd_mod_delayed_work_on);
1680 
1681 /**
1682  * blk_start_plug - initialize blk_plug and track it inside the task_struct
1683  * @plug:	The &struct blk_plug that needs to be initialized
1684  *
1685  * Description:
1686  *   blk_start_plug() indicates to the block layer an intent by the caller
1687  *   to submit multiple I/O requests in a batch.  The block layer may use
1688  *   this hint to defer submitting I/Os from the caller until blk_finish_plug()
1689  *   is called.  However, the block layer may choose to submit requests
1690  *   before a call to blk_finish_plug() if the number of queued I/Os
1691  *   exceeds %BLK_MAX_REQUEST_COUNT, or if the size of the I/O is larger than
1692  *   %BLK_PLUG_FLUSH_SIZE.  The queued I/Os may also be submitted early if
1693  *   the task schedules (see below).
1694  *
1695  *   Tracking blk_plug inside the task_struct will help with auto-flushing the
1696  *   pending I/O should the task end up blocking between blk_start_plug() and
1697  *   blk_finish_plug(). This is important from a performance perspective, but
1698  *   also ensures that we don't deadlock. For instance, if the task is blocking
1699  *   for a memory allocation, memory reclaim could end up wanting to free a
1700  *   page belonging to that request that is currently residing in our private
1701  *   plug. By flushing the pending I/O when the process goes to sleep, we avoid
1702  *   this kind of deadlock.
1703  */
blk_start_plug(struct blk_plug * plug)1704 void blk_start_plug(struct blk_plug *plug)
1705 {
1706 	struct task_struct *tsk = current;
1707 
1708 	/*
1709 	 * If this is a nested plug, don't actually assign it.
1710 	 */
1711 	if (tsk->plug)
1712 		return;
1713 
1714 	INIT_LIST_HEAD(&plug->mq_list);
1715 	INIT_LIST_HEAD(&plug->cb_list);
1716 	plug->rq_count = 0;
1717 	plug->multiple_queues = false;
1718 	plug->nowait = false;
1719 
1720 	/*
1721 	 * Store ordering should not be needed here, since a potential
1722 	 * preempt will imply a full memory barrier
1723 	 */
1724 	tsk->plug = plug;
1725 }
1726 EXPORT_SYMBOL(blk_start_plug);
1727 
flush_plug_callbacks(struct blk_plug * plug,bool from_schedule)1728 static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
1729 {
1730 	LIST_HEAD(callbacks);
1731 
1732 	while (!list_empty(&plug->cb_list)) {
1733 		list_splice_init(&plug->cb_list, &callbacks);
1734 
1735 		while (!list_empty(&callbacks)) {
1736 			struct blk_plug_cb *cb = list_first_entry(&callbacks,
1737 							  struct blk_plug_cb,
1738 							  list);
1739 			list_del(&cb->list);
1740 			cb->callback(cb, from_schedule);
1741 		}
1742 	}
1743 }
1744 
blk_check_plugged(blk_plug_cb_fn unplug,void * data,int size)1745 struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
1746 				      int size)
1747 {
1748 	struct blk_plug *plug = current->plug;
1749 	struct blk_plug_cb *cb;
1750 
1751 	if (!plug)
1752 		return NULL;
1753 
1754 	list_for_each_entry(cb, &plug->cb_list, list)
1755 		if (cb->callback == unplug && cb->data == data)
1756 			return cb;
1757 
1758 	/* Not currently on the callback list */
1759 	BUG_ON(size < sizeof(*cb));
1760 	cb = kzalloc(size, GFP_ATOMIC);
1761 	if (cb) {
1762 		cb->data = data;
1763 		cb->callback = unplug;
1764 		list_add(&cb->list, &plug->cb_list);
1765 	}
1766 	return cb;
1767 }
1768 EXPORT_SYMBOL(blk_check_plugged);
1769 
blk_flush_plug_list(struct blk_plug * plug,bool from_schedule)1770 void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1771 {
1772 	trace_android_rvh_blk_flush_plug_list(plug, from_schedule);
1773 	flush_plug_callbacks(plug, from_schedule);
1774 
1775 	if (!list_empty(&plug->mq_list))
1776 		blk_mq_flush_plug_list(plug, from_schedule);
1777 }
1778 
1779 /**
1780  * blk_finish_plug - mark the end of a batch of submitted I/O
1781  * @plug:	The &struct blk_plug passed to blk_start_plug()
1782  *
1783  * Description:
1784  * Indicate that a batch of I/O submissions is complete.  This function
1785  * must be paired with an initial call to blk_start_plug().  The intent
1786  * is to allow the block layer to optimize I/O submission.  See the
1787  * documentation for blk_start_plug() for more information.
1788  */
blk_finish_plug(struct blk_plug * plug)1789 void blk_finish_plug(struct blk_plug *plug)
1790 {
1791 	if (plug != current->plug)
1792 		return;
1793 	blk_flush_plug_list(plug, false);
1794 
1795 	current->plug = NULL;
1796 }
1797 EXPORT_SYMBOL(blk_finish_plug);
1798 
blk_io_schedule(void)1799 void blk_io_schedule(void)
1800 {
1801 	/* Prevent hang_check timer from firing at us during very long I/O */
1802 	unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2;
1803 
1804 	if (timeout)
1805 		io_schedule_timeout(timeout);
1806 	else
1807 		io_schedule();
1808 }
1809 EXPORT_SYMBOL_GPL(blk_io_schedule);
1810 
blk_dev_init(void)1811 int __init blk_dev_init(void)
1812 {
1813 	BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
1814 	BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1815 			sizeof_field(struct request, cmd_flags));
1816 	BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1817 			sizeof_field(struct bio, bi_opf));
1818 
1819 	/* used for unplugging and affects IO latency/throughput - HIGHPRI */
1820 	kblockd_workqueue = alloc_workqueue("kblockd",
1821 					    WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1822 	if (!kblockd_workqueue)
1823 		panic("Failed to create kblockd\n");
1824 
1825 	blk_requestq_cachep = kmem_cache_create("request_queue",
1826 			sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
1827 
1828 	blk_debugfs_root = debugfs_create_dir("block", NULL);
1829 
1830 	return 0;
1831 }
1832