• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Functions related to setting various queue properties from drivers
4  */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/bio.h>
8 #include <linux/blkdev.h>
9 #include <linux/blk-mq.h>
10 #include <linux/sched/sysctl.h>
11 
12 #include "blk.h"
13 #include "blk-mq-sched.h"
14 
15 /**
16  * blk_end_sync_rq - executes a completion event on a request
17  * @rq: request to complete
18  * @error: end I/O status of the request
19  */
blk_end_sync_rq(struct request * rq,blk_status_t error)20 static void blk_end_sync_rq(struct request *rq, blk_status_t error)
21 {
22 	struct completion *waiting = rq->end_io_data;
23 
24 	rq->end_io_data = (void *)(uintptr_t)error;
25 
26 	/*
27 	 * complete last, if this is a stack request the process (and thus
28 	 * the rq pointer) could be invalid right after this complete()
29 	 */
30 	complete(waiting);
31 }
32 
33 /**
34  * blk_execute_rq_nowait - insert a request to I/O scheduler for execution
35  * @bd_disk:	matching gendisk
36  * @rq:		request to insert
37  * @at_head:    insert request at head or tail of queue
38  * @done:	I/O completion handler
39  *
40  * Description:
41  *    Insert a fully prepared request at the back of the I/O scheduler queue
42  *    for execution.  Don't wait for completion.
43  *
44  * Note:
45  *    This function will invoke @done directly if the queue is dead.
46  */
blk_execute_rq_nowait(struct gendisk * bd_disk,struct request * rq,int at_head,rq_end_io_fn * done)47 void blk_execute_rq_nowait(struct gendisk *bd_disk, struct request *rq,
48 			   int at_head, rq_end_io_fn *done)
49 {
50 	WARN_ON(irqs_disabled());
51 	WARN_ON(!blk_rq_is_passthrough(rq));
52 
53 	rq->rq_disk = bd_disk;
54 	rq->end_io = done;
55 
56 	blk_account_io_start(rq);
57 
58 	/*
59 	 * don't check dying flag for MQ because the request won't
60 	 * be reused after dying flag is set
61 	 */
62 	blk_mq_sched_insert_request(rq, at_head, true, false);
63 }
64 EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
65 
blk_rq_is_poll(struct request * rq)66 static bool blk_rq_is_poll(struct request *rq)
67 {
68 	return rq->mq_hctx && rq->mq_hctx->type == HCTX_TYPE_POLL;
69 }
70 
blk_rq_poll_completion(struct request * rq,struct completion * wait)71 static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
72 {
73 	do {
74 		blk_poll(rq->q, request_to_qc_t(rq->mq_hctx, rq), true);
75 		cond_resched();
76 	} while (!completion_done(wait));
77 }
78 
79 /**
80  * blk_execute_rq - insert a request into queue for execution
81  * @bd_disk:	matching gendisk
82  * @rq:		request to insert
83  * @at_head:    insert request at head or tail of queue
84  *
85  * Description:
86  *    Insert a fully prepared request at the back of the I/O scheduler queue
87  *    for execution and wait for completion.
88  * Return: The blk_status_t result provided to blk_mq_end_request().
89  */
blk_execute_rq(struct gendisk * bd_disk,struct request * rq,int at_head)90 blk_status_t blk_execute_rq(struct gendisk *bd_disk, struct request *rq, int at_head)
91 {
92 	DECLARE_COMPLETION_ONSTACK(wait);
93 	unsigned long hang_check;
94 
95 	rq->end_io_data = &wait;
96 	blk_execute_rq_nowait(bd_disk, rq, at_head, blk_end_sync_rq);
97 
98 	/* Prevent hang_check timer from firing at us during very long I/O */
99 	hang_check = sysctl_hung_task_timeout_secs;
100 
101 	if (blk_rq_is_poll(rq))
102 		blk_rq_poll_completion(rq, &wait);
103 	else if (hang_check)
104 		while (!wait_for_completion_io_timeout(&wait, hang_check * (HZ/2)));
105 	else
106 		wait_for_completion_io(&wait);
107 
108 	return (blk_status_t)(uintptr_t)rq->end_io_data;
109 }
110 EXPORT_SYMBOL(blk_execute_rq);
111