1 /*
2 * Functions related to generic timeout handling of requests.
3 */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/blkdev.h>
7 #include <linux/fault-inject.h>
8
9 #include "blk.h"
10
11 #ifdef CONFIG_FAIL_IO_TIMEOUT
12
13 static DECLARE_FAULT_ATTR(fail_io_timeout);
14
setup_fail_io_timeout(char * str)15 static int __init setup_fail_io_timeout(char *str)
16 {
17 return setup_fault_attr(&fail_io_timeout, str);
18 }
19 __setup("fail_io_timeout=", setup_fail_io_timeout);
20
blk_should_fake_timeout(struct request_queue * q)21 int blk_should_fake_timeout(struct request_queue *q)
22 {
23 if (!test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags))
24 return 0;
25
26 return should_fail(&fail_io_timeout, 1);
27 }
28
fail_io_timeout_debugfs(void)29 static int __init fail_io_timeout_debugfs(void)
30 {
31 struct dentry *dir = fault_create_debugfs_attr("fail_io_timeout",
32 NULL, &fail_io_timeout);
33
34 return IS_ERR(dir) ? PTR_ERR(dir) : 0;
35 }
36
37 late_initcall(fail_io_timeout_debugfs);
38
part_timeout_show(struct device * dev,struct device_attribute * attr,char * buf)39 ssize_t part_timeout_show(struct device *dev, struct device_attribute *attr,
40 char *buf)
41 {
42 struct gendisk *disk = dev_to_disk(dev);
43 int set = test_bit(QUEUE_FLAG_FAIL_IO, &disk->queue->queue_flags);
44
45 return sprintf(buf, "%d\n", set != 0);
46 }
47
part_timeout_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)48 ssize_t part_timeout_store(struct device *dev, struct device_attribute *attr,
49 const char *buf, size_t count)
50 {
51 struct gendisk *disk = dev_to_disk(dev);
52 int val;
53
54 if (count) {
55 struct request_queue *q = disk->queue;
56 char *p = (char *) buf;
57
58 val = simple_strtoul(p, &p, 10);
59 spin_lock_irq(q->queue_lock);
60 if (val)
61 queue_flag_set(QUEUE_FLAG_FAIL_IO, q);
62 else
63 queue_flag_clear(QUEUE_FLAG_FAIL_IO, q);
64 spin_unlock_irq(q->queue_lock);
65 }
66
67 return count;
68 }
69
70 #endif /* CONFIG_FAIL_IO_TIMEOUT */
71
72 /*
73 * blk_delete_timer - Delete/cancel timer for a given function.
74 * @req: request that we are canceling timer for
75 *
76 */
blk_delete_timer(struct request * req)77 void blk_delete_timer(struct request *req)
78 {
79 list_del_init(&req->timeout_list);
80 }
81
blk_rq_timed_out(struct request * req)82 static void blk_rq_timed_out(struct request *req)
83 {
84 struct request_queue *q = req->q;
85 enum blk_eh_timer_return ret;
86
87 ret = q->rq_timed_out_fn(req);
88 switch (ret) {
89 case BLK_EH_HANDLED:
90 __blk_complete_request(req);
91 break;
92 case BLK_EH_RESET_TIMER:
93 blk_clear_rq_complete(req);
94 blk_add_timer(req);
95 break;
96 case BLK_EH_NOT_HANDLED:
97 /*
98 * LLD handles this for now but in the future
99 * we can send a request msg to abort the command
100 * and we can move more of the generic scsi eh code to
101 * the blk layer.
102 */
103 break;
104 default:
105 printk(KERN_ERR "block: bad eh return: %d\n", ret);
106 break;
107 }
108 }
109
blk_rq_timed_out_timer(unsigned long data)110 void blk_rq_timed_out_timer(unsigned long data)
111 {
112 struct request_queue *q = (struct request_queue *) data;
113 unsigned long flags, next = 0;
114 struct request *rq, *tmp;
115 int next_set = 0;
116
117 spin_lock_irqsave(q->queue_lock, flags);
118
119 list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) {
120 if (time_after_eq(jiffies, rq->deadline)) {
121 list_del_init(&rq->timeout_list);
122
123 /*
124 * Check if we raced with end io completion
125 */
126 if (blk_mark_rq_complete(rq))
127 continue;
128 blk_rq_timed_out(rq);
129 } else if (!next_set || time_after(next, rq->deadline)) {
130 next = rq->deadline;
131 next_set = 1;
132 }
133 }
134
135 if (next_set)
136 mod_timer(&q->timeout, round_jiffies_up(next));
137
138 spin_unlock_irqrestore(q->queue_lock, flags);
139 }
140
141 /**
142 * blk_abort_request -- Request request recovery for the specified command
143 * @req: pointer to the request of interest
144 *
145 * This function requests that the block layer start recovery for the
146 * request by deleting the timer and calling the q's timeout function.
147 * LLDDs who implement their own error recovery MAY ignore the timeout
148 * event if they generated blk_abort_req. Must hold queue lock.
149 */
blk_abort_request(struct request * req)150 void blk_abort_request(struct request *req)
151 {
152 if (blk_mark_rq_complete(req))
153 return;
154 blk_delete_timer(req);
155 blk_rq_timed_out(req);
156 }
157 EXPORT_SYMBOL_GPL(blk_abort_request);
158
159 /**
160 * blk_add_timer - Start timeout timer for a single request
161 * @req: request that is about to start running.
162 *
163 * Notes:
164 * Each request has its own timer, and as it is added to the queue, we
165 * set up the timer. When the request completes, we cancel the timer.
166 */
blk_add_timer(struct request * req)167 void blk_add_timer(struct request *req)
168 {
169 struct request_queue *q = req->q;
170 unsigned long expiry;
171
172 if (!q->rq_timed_out_fn)
173 return;
174
175 BUG_ON(!list_empty(&req->timeout_list));
176 BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
177
178 /*
179 * Some LLDs, like scsi, peek at the timeout to prevent a
180 * command from being retried forever.
181 */
182 if (!req->timeout)
183 req->timeout = q->rq_timeout;
184
185 req->deadline = jiffies + req->timeout;
186 list_add_tail(&req->timeout_list, &q->timeout_list);
187
188 /*
189 * If the timer isn't already pending or this timeout is earlier
190 * than an existing one, modify the timer. Round up to next nearest
191 * second.
192 */
193 expiry = round_jiffies_up(req->deadline);
194
195 if (!timer_pending(&q->timeout) ||
196 time_before(expiry, q->timeout.expires))
197 mod_timer(&q->timeout, expiry);
198 }
199
200