• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Functions related to tagged command queuing
3  */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/bio.h>
7 #include <linux/blkdev.h>
8 #include <linux/slab.h>
9 
10 #include "blk.h"
11 
12 /**
13  * blk_queue_find_tag - find a request by its tag and queue
14  * @q:	 The request queue for the device
15  * @tag: The tag of the request
16  *
17  * Notes:
18  *    Should be used when a device returns a tag and you want to match
19  *    it with a request.
20  *
21  *    no locks need be held.
22  **/
blk_queue_find_tag(struct request_queue * q,int tag)23 struct request *blk_queue_find_tag(struct request_queue *q, int tag)
24 {
25 	return blk_map_queue_find_tag(q->queue_tags, tag);
26 }
27 EXPORT_SYMBOL(blk_queue_find_tag);
28 
29 /**
30  * blk_free_tags - release a given set of tag maintenance info
31  * @bqt:	the tag map to free
32  *
33  * Drop the reference count on @bqt and frees it when the last reference
34  * is dropped.
35  */
blk_free_tags(struct blk_queue_tag * bqt)36 void blk_free_tags(struct blk_queue_tag *bqt)
37 {
38 	if (atomic_dec_and_test(&bqt->refcnt)) {
39 		BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
40 							bqt->max_depth);
41 
42 		kfree(bqt->tag_index);
43 		bqt->tag_index = NULL;
44 
45 		kfree(bqt->tag_map);
46 		bqt->tag_map = NULL;
47 
48 		kfree(bqt);
49 	}
50 }
51 EXPORT_SYMBOL(blk_free_tags);
52 
53 /**
54  * __blk_queue_free_tags - release tag maintenance info
55  * @q:  the request queue for the device
56  *
57  *  Notes:
58  *    blk_cleanup_queue() will take care of calling this function, if tagging
59  *    has been used. So there's no need to call this directly.
60  **/
__blk_queue_free_tags(struct request_queue * q)61 void __blk_queue_free_tags(struct request_queue *q)
62 {
63 	struct blk_queue_tag *bqt = q->queue_tags;
64 
65 	if (!bqt)
66 		return;
67 
68 	blk_free_tags(bqt);
69 
70 	q->queue_tags = NULL;
71 	queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
72 }
73 
74 /**
75  * blk_queue_free_tags - release tag maintenance info
76  * @q:  the request queue for the device
77  *
78  *  Notes:
79  *	This is used to disable tagged queuing to a device, yet leave
80  *	queue in function.
81  **/
blk_queue_free_tags(struct request_queue * q)82 void blk_queue_free_tags(struct request_queue *q)
83 {
84 	queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
85 }
86 EXPORT_SYMBOL(blk_queue_free_tags);
87 
88 static int
init_tag_map(struct request_queue * q,struct blk_queue_tag * tags,int depth)89 init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
90 {
91 	struct request **tag_index;
92 	unsigned long *tag_map;
93 	int nr_ulongs;
94 
95 	if (q && depth > q->nr_requests * 2) {
96 		depth = q->nr_requests * 2;
97 		printk(KERN_ERR "%s: adjusted depth to %d\n",
98 		       __func__, depth);
99 	}
100 
101 	tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
102 	if (!tag_index)
103 		goto fail;
104 
105 	nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
106 	tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
107 	if (!tag_map)
108 		goto fail;
109 
110 	tags->real_max_depth = depth;
111 	tags->max_depth = depth;
112 	tags->tag_index = tag_index;
113 	tags->tag_map = tag_map;
114 
115 	return 0;
116 fail:
117 	kfree(tag_index);
118 	return -ENOMEM;
119 }
120 
__blk_queue_init_tags(struct request_queue * q,int depth,int alloc_policy)121 static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
122 						int depth, int alloc_policy)
123 {
124 	struct blk_queue_tag *tags;
125 
126 	tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
127 	if (!tags)
128 		goto fail;
129 
130 	if (init_tag_map(q, tags, depth))
131 		goto fail;
132 
133 	atomic_set(&tags->refcnt, 1);
134 	tags->alloc_policy = alloc_policy;
135 	tags->next_tag = 0;
136 	return tags;
137 fail:
138 	kfree(tags);
139 	return NULL;
140 }
141 
142 /**
143  * blk_init_tags - initialize the tag info for an external tag map
144  * @depth:	the maximum queue depth supported
145  * @alloc_policy: tag allocation policy
146  **/
blk_init_tags(int depth,int alloc_policy)147 struct blk_queue_tag *blk_init_tags(int depth, int alloc_policy)
148 {
149 	return __blk_queue_init_tags(NULL, depth, alloc_policy);
150 }
151 EXPORT_SYMBOL(blk_init_tags);
152 
153 /**
154  * blk_queue_init_tags - initialize the queue tag info
155  * @q:  the request queue for the device
156  * @depth:  the maximum queue depth supported
157  * @tags: the tag to use
158  * @alloc_policy: tag allocation policy
159  *
160  * Queue lock must be held here if the function is called to resize an
161  * existing map.
162  **/
blk_queue_init_tags(struct request_queue * q,int depth,struct blk_queue_tag * tags,int alloc_policy)163 int blk_queue_init_tags(struct request_queue *q, int depth,
164 			struct blk_queue_tag *tags, int alloc_policy)
165 {
166 	int rc;
167 
168 	BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
169 
170 	if (!tags && !q->queue_tags) {
171 		tags = __blk_queue_init_tags(q, depth, alloc_policy);
172 
173 		if (!tags)
174 			return -ENOMEM;
175 
176 	} else if (q->queue_tags) {
177 		rc = blk_queue_resize_tags(q, depth);
178 		if (rc)
179 			return rc;
180 		queue_flag_set(QUEUE_FLAG_QUEUED, q);
181 		return 0;
182 	} else
183 		atomic_inc(&tags->refcnt);
184 
185 	/*
186 	 * assign it, all done
187 	 */
188 	q->queue_tags = tags;
189 	queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q);
190 	INIT_LIST_HEAD(&q->tag_busy_list);
191 	return 0;
192 }
193 EXPORT_SYMBOL(blk_queue_init_tags);
194 
195 /**
196  * blk_queue_resize_tags - change the queueing depth
197  * @q:  the request queue for the device
198  * @new_depth: the new max command queueing depth
199  *
200  *  Notes:
201  *    Must be called with the queue lock held.
202  **/
blk_queue_resize_tags(struct request_queue * q,int new_depth)203 int blk_queue_resize_tags(struct request_queue *q, int new_depth)
204 {
205 	struct blk_queue_tag *bqt = q->queue_tags;
206 	struct request **tag_index;
207 	unsigned long *tag_map;
208 	int max_depth, nr_ulongs;
209 
210 	if (!bqt)
211 		return -ENXIO;
212 
213 	/*
214 	 * if we already have large enough real_max_depth.  just
215 	 * adjust max_depth.  *NOTE* as requests with tag value
216 	 * between new_depth and real_max_depth can be in-flight, tag
217 	 * map can not be shrunk blindly here.
218 	 */
219 	if (new_depth <= bqt->real_max_depth) {
220 		bqt->max_depth = new_depth;
221 		return 0;
222 	}
223 
224 	/*
225 	 * Currently cannot replace a shared tag map with a new
226 	 * one, so error out if this is the case
227 	 */
228 	if (atomic_read(&bqt->refcnt) != 1)
229 		return -EBUSY;
230 
231 	/*
232 	 * save the old state info, so we can copy it back
233 	 */
234 	tag_index = bqt->tag_index;
235 	tag_map = bqt->tag_map;
236 	max_depth = bqt->real_max_depth;
237 
238 	if (init_tag_map(q, bqt, new_depth))
239 		return -ENOMEM;
240 
241 	memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *));
242 	nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG;
243 	memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long));
244 
245 	kfree(tag_index);
246 	kfree(tag_map);
247 	return 0;
248 }
249 EXPORT_SYMBOL(blk_queue_resize_tags);
250 
251 /**
252  * blk_queue_end_tag - end tag operations for a request
253  * @q:  the request queue for the device
254  * @rq: the request that has completed
255  *
256  *  Description:
257  *    Typically called when end_that_request_first() returns %0, meaning
258  *    all transfers have been done for a request. It's important to call
259  *    this function before end_that_request_last(), as that will put the
260  *    request back on the free list thus corrupting the internal tag list.
261  *
262  *  Notes:
263  *   queue lock must be held.
264  **/
blk_queue_end_tag(struct request_queue * q,struct request * rq)265 void blk_queue_end_tag(struct request_queue *q, struct request *rq)
266 {
267 	struct blk_queue_tag *bqt = q->queue_tags;
268 	unsigned tag = rq->tag; /* negative tags invalid */
269 
270 	BUG_ON(tag >= bqt->real_max_depth);
271 
272 	list_del_init(&rq->queuelist);
273 	rq->cmd_flags &= ~REQ_QUEUED;
274 	rq->tag = -1;
275 
276 	if (unlikely(bqt->tag_index[tag] == NULL))
277 		printk(KERN_ERR "%s: tag %d is missing\n",
278 		       __func__, tag);
279 
280 	bqt->tag_index[tag] = NULL;
281 
282 	if (unlikely(!test_bit(tag, bqt->tag_map))) {
283 		printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
284 		       __func__, tag);
285 		return;
286 	}
287 	/*
288 	 * The tag_map bit acts as a lock for tag_index[bit], so we need
289 	 * unlock memory barrier semantics.
290 	 */
291 	clear_bit_unlock(tag, bqt->tag_map);
292 }
293 EXPORT_SYMBOL(blk_queue_end_tag);
294 
295 /**
296  * blk_queue_start_tag - find a free tag and assign it
297  * @q:  the request queue for the device
298  * @rq:  the block request that needs tagging
299  *
300  *  Description:
301  *    This can either be used as a stand-alone helper, or possibly be
302  *    assigned as the queue &prep_rq_fn (in which case &struct request
303  *    automagically gets a tag assigned). Note that this function
304  *    assumes that any type of request can be queued! if this is not
305  *    true for your device, you must check the request type before
306  *    calling this function.  The request will also be removed from
307  *    the request queue, so it's the drivers responsibility to readd
308  *    it if it should need to be restarted for some reason.
309  *
310  *  Notes:
311  *   queue lock must be held.
312  **/
blk_queue_start_tag(struct request_queue * q,struct request * rq)313 int blk_queue_start_tag(struct request_queue *q, struct request *rq)
314 {
315 	struct blk_queue_tag *bqt = q->queue_tags;
316 	unsigned max_depth;
317 	int tag;
318 
319 	if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
320 		printk(KERN_ERR
321 		       "%s: request %p for device [%s] already tagged %d",
322 		       __func__, rq,
323 		       rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
324 		BUG();
325 	}
326 
327 	/*
328 	 * Protect against shared tag maps, as we may not have exclusive
329 	 * access to the tag map.
330 	 *
331 	 * We reserve a few tags just for sync IO, since we don't want
332 	 * to starve sync IO on behalf of flooding async IO.
333 	 */
334 	max_depth = bqt->max_depth;
335 	if (!rq_is_sync(rq) && max_depth > 1) {
336 		switch (max_depth) {
337 		case 2:
338 			max_depth = 1;
339 			break;
340 		case 3:
341 			max_depth = 2;
342 			break;
343 		default:
344 			max_depth -= 2;
345 		}
346 		if (q->in_flight[BLK_RW_ASYNC] > max_depth)
347 			return 1;
348 	}
349 
350 	do {
351 		if (bqt->alloc_policy == BLK_TAG_ALLOC_FIFO) {
352 			tag = find_first_zero_bit(bqt->tag_map, max_depth);
353 			if (tag >= max_depth)
354 				return 1;
355 		} else {
356 			int start = bqt->next_tag;
357 			int size = min_t(int, bqt->max_depth, max_depth + start);
358 			tag = find_next_zero_bit(bqt->tag_map, size, start);
359 			if (tag >= size && start + size > bqt->max_depth) {
360 				size = start + size - bqt->max_depth;
361 				tag = find_first_zero_bit(bqt->tag_map, size);
362 			}
363 			if (tag >= size)
364 				return 1;
365 		}
366 
367 	} while (test_and_set_bit_lock(tag, bqt->tag_map));
368 	/*
369 	 * We need lock ordering semantics given by test_and_set_bit_lock.
370 	 * See blk_queue_end_tag for details.
371 	 */
372 
373 	bqt->next_tag = (tag + 1) % bqt->max_depth;
374 	rq->cmd_flags |= REQ_QUEUED;
375 	rq->tag = tag;
376 	bqt->tag_index[tag] = rq;
377 	blk_start_request(rq);
378 	list_add(&rq->queuelist, &q->tag_busy_list);
379 	return 0;
380 }
381 EXPORT_SYMBOL(blk_queue_start_tag);
382 
383 /**
384  * blk_queue_invalidate_tags - invalidate all pending tags
385  * @q:  the request queue for the device
386  *
387  *  Description:
388  *   Hardware conditions may dictate a need to stop all pending requests.
389  *   In this case, we will safely clear the block side of the tag queue and
390  *   readd all requests to the request queue in the right order.
391  *
392  *  Notes:
393  *   queue lock must be held.
394  **/
blk_queue_invalidate_tags(struct request_queue * q)395 void blk_queue_invalidate_tags(struct request_queue *q)
396 {
397 	struct list_head *tmp, *n;
398 
399 	list_for_each_safe(tmp, n, &q->tag_busy_list)
400 		blk_requeue_request(q, list_entry_rq(tmp));
401 }
402 EXPORT_SYMBOL(blk_queue_invalidate_tags);
403