• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  scsi_lib.c Copyright (C) 1999 Eric Youngdale
3  *
4  *  SCSI queueing library.
5  *      Initial versions: Eric Youngdale (eric@andante.org).
6  *                        Based upon conversations with large numbers
7  *                        of people at Linux Expo.
8  */
9 
10 #include <linux/bio.h>
11 #include <linux/bitops.h>
12 #include <linux/blkdev.h>
13 #include <linux/completion.h>
14 #include <linux/kernel.h>
15 #include <linux/mempool.h>
16 #include <linux/slab.h>
17 #include <linux/init.h>
18 #include <linux/pci.h>
19 #include <linux/delay.h>
20 #include <linux/hardirq.h>
21 #include <linux/scatterlist.h>
22 
23 #include <scsi/scsi.h>
24 #include <scsi/scsi_cmnd.h>
25 #include <scsi/scsi_dbg.h>
26 #include <scsi/scsi_device.h>
27 #include <scsi/scsi_driver.h>
28 #include <scsi/scsi_eh.h>
29 #include <scsi/scsi_host.h>
30 
31 #include "scsi_priv.h"
32 #include "scsi_logging.h"
33 
34 
35 #define SG_MEMPOOL_NR		ARRAY_SIZE(scsi_sg_pools)
36 #define SG_MEMPOOL_SIZE		2
37 
38 struct scsi_host_sg_pool {
39 	size_t		size;
40 	char		*name;
41 	struct kmem_cache	*slab;
42 	mempool_t	*pool;
43 };
44 
45 #define SP(x) { x, "sgpool-" __stringify(x) }
46 #if (SCSI_MAX_SG_SEGMENTS < 32)
47 #error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater)
48 #endif
49 static struct scsi_host_sg_pool scsi_sg_pools[] = {
50 	SP(8),
51 	SP(16),
52 #if (SCSI_MAX_SG_SEGMENTS > 32)
53 	SP(32),
54 #if (SCSI_MAX_SG_SEGMENTS > 64)
55 	SP(64),
56 #if (SCSI_MAX_SG_SEGMENTS > 128)
57 	SP(128),
58 #if (SCSI_MAX_SG_SEGMENTS > 256)
59 #error SCSI_MAX_SG_SEGMENTS is too large (256 MAX)
60 #endif
61 #endif
62 #endif
63 #endif
64 	SP(SCSI_MAX_SG_SEGMENTS)
65 };
66 #undef SP
67 
68 struct kmem_cache *scsi_sdb_cache;
69 
70 static void scsi_run_queue(struct request_queue *q);
71 
72 /*
73  * Function:	scsi_unprep_request()
74  *
75  * Purpose:	Remove all preparation done for a request, including its
76  *		associated scsi_cmnd, so that it can be requeued.
77  *
78  * Arguments:	req	- request to unprepare
79  *
80  * Lock status:	Assumed that no locks are held upon entry.
81  *
82  * Returns:	Nothing.
83  */
scsi_unprep_request(struct request * req)84 static void scsi_unprep_request(struct request *req)
85 {
86 	struct scsi_cmnd *cmd = req->special;
87 
88 	req->cmd_flags &= ~REQ_DONTPREP;
89 	req->special = NULL;
90 
91 	scsi_put_command(cmd);
92 }
93 
94 /**
95  * __scsi_queue_insert - private queue insertion
96  * @cmd: The SCSI command being requeued
97  * @reason:  The reason for the requeue
98  * @unbusy: Whether the queue should be unbusied
99  *
100  * This is a private queue insertion.  The public interface
101  * scsi_queue_insert() always assumes the queue should be unbusied
102  * because it's always called before the completion.  This function is
103  * for a requeue after completion, which should only occur in this
104  * file.
105  */
__scsi_queue_insert(struct scsi_cmnd * cmd,int reason,int unbusy)106 static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
107 {
108 	struct Scsi_Host *host = cmd->device->host;
109 	struct scsi_device *device = cmd->device;
110 	struct scsi_target *starget = scsi_target(device);
111 	struct request_queue *q = device->request_queue;
112 	unsigned long flags;
113 
114 	SCSI_LOG_MLQUEUE(1,
115 		 printk("Inserting command %p into mlqueue\n", cmd));
116 
117 	/*
118 	 * Set the appropriate busy bit for the device/host.
119 	 *
120 	 * If the host/device isn't busy, assume that something actually
121 	 * completed, and that we should be able to queue a command now.
122 	 *
123 	 * Note that the prior mid-layer assumption that any host could
124 	 * always queue at least one command is now broken.  The mid-layer
125 	 * will implement a user specifiable stall (see
126 	 * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
127 	 * if a command is requeued with no other commands outstanding
128 	 * either for the device or for the host.
129 	 */
130 	switch (reason) {
131 	case SCSI_MLQUEUE_HOST_BUSY:
132 		host->host_blocked = host->max_host_blocked;
133 		break;
134 	case SCSI_MLQUEUE_DEVICE_BUSY:
135 		device->device_blocked = device->max_device_blocked;
136 		break;
137 	case SCSI_MLQUEUE_TARGET_BUSY:
138 		starget->target_blocked = starget->max_target_blocked;
139 		break;
140 	}
141 
142 	/*
143 	 * Decrement the counters, since these commands are no longer
144 	 * active on the host/device.
145 	 */
146 	if (unbusy)
147 		scsi_device_unbusy(device);
148 
149 	/*
150 	 * Requeue this command.  It will go before all other commands
151 	 * that are already in the queue.
152 	 *
153 	 * NOTE: there is magic here about the way the queue is plugged if
154 	 * we have no outstanding commands.
155 	 *
156 	 * Although we *don't* plug the queue, we call the request
157 	 * function.  The SCSI request function detects the blocked condition
158 	 * and plugs the queue appropriately.
159          */
160 	spin_lock_irqsave(q->queue_lock, flags);
161 	blk_requeue_request(q, cmd->request);
162 	spin_unlock_irqrestore(q->queue_lock, flags);
163 
164 	scsi_run_queue(q);
165 
166 	return 0;
167 }
168 
169 /*
170  * Function:    scsi_queue_insert()
171  *
172  * Purpose:     Insert a command in the midlevel queue.
173  *
174  * Arguments:   cmd    - command that we are adding to queue.
175  *              reason - why we are inserting command to queue.
176  *
177  * Lock status: Assumed that lock is not held upon entry.
178  *
179  * Returns:     Nothing.
180  *
181  * Notes:       We do this for one of two cases.  Either the host is busy
182  *              and it cannot accept any more commands for the time being,
183  *              or the device returned QUEUE_FULL and can accept no more
184  *              commands.
185  * Notes:       This could be called either from an interrupt context or a
186  *              normal process context.
187  */
scsi_queue_insert(struct scsi_cmnd * cmd,int reason)188 int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
189 {
190 	return __scsi_queue_insert(cmd, reason, 1);
191 }
192 /**
193  * scsi_execute - insert request and wait for the result
194  * @sdev:	scsi device
195  * @cmd:	scsi command
196  * @data_direction: data direction
197  * @buffer:	data buffer
198  * @bufflen:	len of buffer
199  * @sense:	optional sense buffer
200  * @timeout:	request timeout in seconds
201  * @retries:	number of times to retry request
202  * @flags:	or into request flags;
203  * @resid:	optional residual length
204  *
205  * returns the req->errors value which is the scsi_cmnd result
206  * field.
207  */
scsi_execute(struct scsi_device * sdev,const unsigned char * cmd,int data_direction,void * buffer,unsigned bufflen,unsigned char * sense,int timeout,int retries,int flags,int * resid)208 int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
209 		 int data_direction, void *buffer, unsigned bufflen,
210 		 unsigned char *sense, int timeout, int retries, int flags,
211 		 int *resid)
212 {
213 	struct request *req;
214 	int write = (data_direction == DMA_TO_DEVICE);
215 	int ret = DRIVER_ERROR << 24;
216 
217 	req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
218 
219 	if (bufflen &&	blk_rq_map_kern(sdev->request_queue, req,
220 					buffer, bufflen, __GFP_WAIT))
221 		goto out;
222 
223 	req->cmd_len = COMMAND_SIZE(cmd[0]);
224 	memcpy(req->cmd, cmd, req->cmd_len);
225 	req->sense = sense;
226 	req->sense_len = 0;
227 	req->retries = retries;
228 	req->timeout = timeout;
229 	req->cmd_type = REQ_TYPE_BLOCK_PC;
230 	req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT;
231 
232 	/*
233 	 * head injection *required* here otherwise quiesce won't work
234 	 */
235 	blk_execute_rq(req->q, NULL, req, 1);
236 
237 	/*
238 	 * Some devices (USB mass-storage in particular) may transfer
239 	 * garbage data together with a residue indicating that the data
240 	 * is invalid.  Prevent the garbage from being misinterpreted
241 	 * and prevent security leaks by zeroing out the excess data.
242 	 */
243 	if (unlikely(req->data_len > 0 && req->data_len <= bufflen))
244 		memset(buffer + (bufflen - req->data_len), 0, req->data_len);
245 
246 	if (resid)
247 		*resid = req->data_len;
248 	ret = req->errors;
249  out:
250 	blk_put_request(req);
251 
252 	return ret;
253 }
254 EXPORT_SYMBOL(scsi_execute);
255 
256 
scsi_execute_req(struct scsi_device * sdev,const unsigned char * cmd,int data_direction,void * buffer,unsigned bufflen,struct scsi_sense_hdr * sshdr,int timeout,int retries,int * resid)257 int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
258 		     int data_direction, void *buffer, unsigned bufflen,
259 		     struct scsi_sense_hdr *sshdr, int timeout, int retries,
260 		     int *resid)
261 {
262 	char *sense = NULL;
263 	int result;
264 
265 	if (sshdr) {
266 		sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
267 		if (!sense)
268 			return DRIVER_ERROR << 24;
269 	}
270 	result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
271 			      sense, timeout, retries, 0, resid);
272 	if (sshdr)
273 		scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
274 
275 	kfree(sense);
276 	return result;
277 }
278 EXPORT_SYMBOL(scsi_execute_req);
279 
280 struct scsi_io_context {
281 	void *data;
282 	void (*done)(void *data, char *sense, int result, int resid);
283 	char sense[SCSI_SENSE_BUFFERSIZE];
284 };
285 
286 static struct kmem_cache *scsi_io_context_cache;
287 
scsi_end_async(struct request * req,int uptodate)288 static void scsi_end_async(struct request *req, int uptodate)
289 {
290 	struct scsi_io_context *sioc = req->end_io_data;
291 
292 	if (sioc->done)
293 		sioc->done(sioc->data, sioc->sense, req->errors, req->data_len);
294 
295 	kmem_cache_free(scsi_io_context_cache, sioc);
296 	__blk_put_request(req->q, req);
297 }
298 
scsi_merge_bio(struct request * rq,struct bio * bio)299 static int scsi_merge_bio(struct request *rq, struct bio *bio)
300 {
301 	struct request_queue *q = rq->q;
302 
303 	bio->bi_flags &= ~(1 << BIO_SEG_VALID);
304 	if (rq_data_dir(rq) == WRITE)
305 		bio->bi_rw |= (1 << BIO_RW);
306 	blk_queue_bounce(q, &bio);
307 
308 	return blk_rq_append_bio(q, rq, bio);
309 }
310 
scsi_bi_endio(struct bio * bio,int error)311 static void scsi_bi_endio(struct bio *bio, int error)
312 {
313 	bio_put(bio);
314 }
315 
316 /**
317  * scsi_req_map_sg - map a scatterlist into a request
318  * @rq:		request to fill
319  * @sgl:	scatterlist
320  * @nsegs:	number of elements
321  * @bufflen:	len of buffer
322  * @gfp:	memory allocation flags
323  *
324  * scsi_req_map_sg maps a scatterlist into a request so that the
325  * request can be sent to the block layer. We do not trust the scatterlist
326  * sent to use, as some ULDs use that struct to only organize the pages.
327  */
scsi_req_map_sg(struct request * rq,struct scatterlist * sgl,int nsegs,unsigned bufflen,gfp_t gfp)328 static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl,
329 			   int nsegs, unsigned bufflen, gfp_t gfp)
330 {
331 	struct request_queue *q = rq->q;
332 	int nr_pages = (bufflen + sgl[0].offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
333 	unsigned int data_len = bufflen, len, bytes, off;
334 	struct scatterlist *sg;
335 	struct page *page;
336 	struct bio *bio = NULL;
337 	int i, err, nr_vecs = 0;
338 
339 	for_each_sg(sgl, sg, nsegs, i) {
340 		page = sg_page(sg);
341 		off = sg->offset;
342 		len = sg->length;
343 
344 		while (len > 0 && data_len > 0) {
345 			/*
346 			 * sg sends a scatterlist that is larger than
347 			 * the data_len it wants transferred for certain
348 			 * IO sizes
349 			 */
350 			bytes = min_t(unsigned int, len, PAGE_SIZE - off);
351 			bytes = min(bytes, data_len);
352 
353 			if (!bio) {
354 				nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
355 				nr_pages -= nr_vecs;
356 
357 				bio = bio_alloc(gfp, nr_vecs);
358 				if (!bio) {
359 					err = -ENOMEM;
360 					goto free_bios;
361 				}
362 				bio->bi_end_io = scsi_bi_endio;
363 			}
364 
365 			if (bio_add_pc_page(q, bio, page, bytes, off) !=
366 			    bytes) {
367 				bio_put(bio);
368 				err = -EINVAL;
369 				goto free_bios;
370 			}
371 
372 			if (bio->bi_vcnt >= nr_vecs) {
373 				err = scsi_merge_bio(rq, bio);
374 				if (err) {
375 					bio_endio(bio, 0);
376 					goto free_bios;
377 				}
378 				bio = NULL;
379 			}
380 
381 			page++;
382 			len -= bytes;
383 			data_len -=bytes;
384 			off = 0;
385 		}
386 	}
387 
388 	rq->buffer = rq->data = NULL;
389 	rq->data_len = bufflen;
390 	return 0;
391 
392 free_bios:
393 	while ((bio = rq->bio) != NULL) {
394 		rq->bio = bio->bi_next;
395 		/*
396 		 * call endio instead of bio_put incase it was bounced
397 		 */
398 		bio_endio(bio, 0);
399 	}
400 
401 	return err;
402 }
403 
404 /**
405  * scsi_execute_async - insert request
406  * @sdev:	scsi device
407  * @cmd:	scsi command
408  * @cmd_len:	length of scsi cdb
409  * @data_direction: DMA_TO_DEVICE, DMA_FROM_DEVICE, or DMA_NONE
410  * @buffer:	data buffer (this can be a kernel buffer or scatterlist)
411  * @bufflen:	len of buffer
412  * @use_sg:	if buffer is a scatterlist this is the number of elements
413  * @timeout:	request timeout in seconds
414  * @retries:	number of times to retry request
415  * @privdata:	data passed to done()
416  * @done:	callback function when done
417  * @gfp:	memory allocation flags
418  */
scsi_execute_async(struct scsi_device * sdev,const unsigned char * cmd,int cmd_len,int data_direction,void * buffer,unsigned bufflen,int use_sg,int timeout,int retries,void * privdata,void (* done)(void *,char *,int,int),gfp_t gfp)419 int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd,
420 		       int cmd_len, int data_direction, void *buffer, unsigned bufflen,
421 		       int use_sg, int timeout, int retries, void *privdata,
422 		       void (*done)(void *, char *, int, int), gfp_t gfp)
423 {
424 	struct request *req;
425 	struct scsi_io_context *sioc;
426 	int err = 0;
427 	int write = (data_direction == DMA_TO_DEVICE);
428 
429 	sioc = kmem_cache_zalloc(scsi_io_context_cache, gfp);
430 	if (!sioc)
431 		return DRIVER_ERROR << 24;
432 
433 	req = blk_get_request(sdev->request_queue, write, gfp);
434 	if (!req)
435 		goto free_sense;
436 	req->cmd_type = REQ_TYPE_BLOCK_PC;
437 	req->cmd_flags |= REQ_QUIET;
438 
439 	if (use_sg)
440 		err = scsi_req_map_sg(req, buffer, use_sg, bufflen, gfp);
441 	else if (bufflen)
442 		err = blk_rq_map_kern(req->q, req, buffer, bufflen, gfp);
443 
444 	if (err)
445 		goto free_req;
446 
447 	req->cmd_len = cmd_len;
448 	memset(req->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
449 	memcpy(req->cmd, cmd, req->cmd_len);
450 	req->sense = sioc->sense;
451 	req->sense_len = 0;
452 	req->timeout = timeout;
453 	req->retries = retries;
454 	req->end_io_data = sioc;
455 
456 	sioc->data = privdata;
457 	sioc->done = done;
458 
459 	blk_execute_rq_nowait(req->q, NULL, req, 1, scsi_end_async);
460 	return 0;
461 
462 free_req:
463 	blk_put_request(req);
464 free_sense:
465 	kmem_cache_free(scsi_io_context_cache, sioc);
466 	return DRIVER_ERROR << 24;
467 }
468 EXPORT_SYMBOL_GPL(scsi_execute_async);
469 
470 /*
471  * Function:    scsi_init_cmd_errh()
472  *
473  * Purpose:     Initialize cmd fields related to error handling.
474  *
475  * Arguments:   cmd	- command that is ready to be queued.
476  *
477  * Notes:       This function has the job of initializing a number of
478  *              fields related to error handling.   Typically this will
479  *              be called once for each command, as required.
480  */
scsi_init_cmd_errh(struct scsi_cmnd * cmd)481 static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
482 {
483 	cmd->serial_number = 0;
484 	scsi_set_resid(cmd, 0);
485 	memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
486 	if (cmd->cmd_len == 0)
487 		cmd->cmd_len = scsi_command_size(cmd->cmnd);
488 }
489 
scsi_device_unbusy(struct scsi_device * sdev)490 void scsi_device_unbusy(struct scsi_device *sdev)
491 {
492 	struct Scsi_Host *shost = sdev->host;
493 	struct scsi_target *starget = scsi_target(sdev);
494 	unsigned long flags;
495 
496 	spin_lock_irqsave(shost->host_lock, flags);
497 	shost->host_busy--;
498 	starget->target_busy--;
499 	if (unlikely(scsi_host_in_recovery(shost) &&
500 		     (shost->host_failed || shost->host_eh_scheduled)))
501 		scsi_eh_wakeup(shost);
502 	spin_unlock(shost->host_lock);
503 	spin_lock(sdev->request_queue->queue_lock);
504 	sdev->device_busy--;
505 	spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
506 }
507 
508 /*
509  * Called for single_lun devices on IO completion. Clear starget_sdev_user,
510  * and call blk_run_queue for all the scsi_devices on the target -
511  * including current_sdev first.
512  *
513  * Called with *no* scsi locks held.
514  */
scsi_single_lun_run(struct scsi_device * current_sdev)515 static void scsi_single_lun_run(struct scsi_device *current_sdev)
516 {
517 	struct Scsi_Host *shost = current_sdev->host;
518 	struct scsi_device *sdev, *tmp;
519 	struct scsi_target *starget = scsi_target(current_sdev);
520 	unsigned long flags;
521 
522 	spin_lock_irqsave(shost->host_lock, flags);
523 	starget->starget_sdev_user = NULL;
524 	spin_unlock_irqrestore(shost->host_lock, flags);
525 
526 	/*
527 	 * Call blk_run_queue for all LUNs on the target, starting with
528 	 * current_sdev. We race with others (to set starget_sdev_user),
529 	 * but in most cases, we will be first. Ideally, each LU on the
530 	 * target would get some limited time or requests on the target.
531 	 */
532 	blk_run_queue(current_sdev->request_queue);
533 
534 	spin_lock_irqsave(shost->host_lock, flags);
535 	if (starget->starget_sdev_user)
536 		goto out;
537 	list_for_each_entry_safe(sdev, tmp, &starget->devices,
538 			same_target_siblings) {
539 		if (sdev == current_sdev)
540 			continue;
541 		if (scsi_device_get(sdev))
542 			continue;
543 
544 		spin_unlock_irqrestore(shost->host_lock, flags);
545 		blk_run_queue(sdev->request_queue);
546 		spin_lock_irqsave(shost->host_lock, flags);
547 
548 		scsi_device_put(sdev);
549 	}
550  out:
551 	spin_unlock_irqrestore(shost->host_lock, flags);
552 }
553 
scsi_device_is_busy(struct scsi_device * sdev)554 static inline int scsi_device_is_busy(struct scsi_device *sdev)
555 {
556 	if (sdev->device_busy >= sdev->queue_depth || sdev->device_blocked)
557 		return 1;
558 
559 	return 0;
560 }
561 
scsi_target_is_busy(struct scsi_target * starget)562 static inline int scsi_target_is_busy(struct scsi_target *starget)
563 {
564 	return ((starget->can_queue > 0 &&
565 		 starget->target_busy >= starget->can_queue) ||
566 		 starget->target_blocked);
567 }
568 
scsi_host_is_busy(struct Scsi_Host * shost)569 static inline int scsi_host_is_busy(struct Scsi_Host *shost)
570 {
571 	if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
572 	    shost->host_blocked || shost->host_self_blocked)
573 		return 1;
574 
575 	return 0;
576 }
577 
578 /*
579  * Function:	scsi_run_queue()
580  *
581  * Purpose:	Select a proper request queue to serve next
582  *
583  * Arguments:	q	- last request's queue
584  *
585  * Returns:     Nothing
586  *
587  * Notes:	The previous command was completely finished, start
588  *		a new one if possible.
589  */
scsi_run_queue(struct request_queue * q)590 static void scsi_run_queue(struct request_queue *q)
591 {
592 	struct scsi_device *sdev = q->queuedata;
593 	struct Scsi_Host *shost = sdev->host;
594 	LIST_HEAD(starved_list);
595 	unsigned long flags;
596 
597 	if (scsi_target(sdev)->single_lun)
598 		scsi_single_lun_run(sdev);
599 
600 	spin_lock_irqsave(shost->host_lock, flags);
601 	list_splice_init(&shost->starved_list, &starved_list);
602 
603 	while (!list_empty(&starved_list)) {
604 		int flagset;
605 
606 		/*
607 		 * As long as shost is accepting commands and we have
608 		 * starved queues, call blk_run_queue. scsi_request_fn
609 		 * drops the queue_lock and can add us back to the
610 		 * starved_list.
611 		 *
612 		 * host_lock protects the starved_list and starved_entry.
613 		 * scsi_request_fn must get the host_lock before checking
614 		 * or modifying starved_list or starved_entry.
615 		 */
616 		if (scsi_host_is_busy(shost))
617 			break;
618 
619 		sdev = list_entry(starved_list.next,
620 				  struct scsi_device, starved_entry);
621 		list_del_init(&sdev->starved_entry);
622 		if (scsi_target_is_busy(scsi_target(sdev))) {
623 			list_move_tail(&sdev->starved_entry,
624 				       &shost->starved_list);
625 			continue;
626 		}
627 
628 		spin_unlock(shost->host_lock);
629 
630 		spin_lock(sdev->request_queue->queue_lock);
631 		flagset = test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) &&
632 				!test_bit(QUEUE_FLAG_REENTER,
633 					&sdev->request_queue->queue_flags);
634 		if (flagset)
635 			queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
636 		__blk_run_queue(sdev->request_queue);
637 		if (flagset)
638 			queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
639 		spin_unlock(sdev->request_queue->queue_lock);
640 
641 		spin_lock(shost->host_lock);
642 	}
643 	/* put any unprocessed entries back */
644 	list_splice(&starved_list, &shost->starved_list);
645 	spin_unlock_irqrestore(shost->host_lock, flags);
646 
647 	blk_run_queue(q);
648 }
649 
650 /*
651  * Function:	scsi_requeue_command()
652  *
653  * Purpose:	Handle post-processing of completed commands.
654  *
655  * Arguments:	q	- queue to operate on
656  *		cmd	- command that may need to be requeued.
657  *
658  * Returns:	Nothing
659  *
660  * Notes:	After command completion, there may be blocks left
661  *		over which weren't finished by the previous command
662  *		this can be for a number of reasons - the main one is
663  *		I/O errors in the middle of the request, in which case
664  *		we need to request the blocks that come after the bad
665  *		sector.
666  * Notes:	Upon return, cmd is a stale pointer.
667  */
scsi_requeue_command(struct request_queue * q,struct scsi_cmnd * cmd)668 static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
669 {
670 	struct request *req = cmd->request;
671 	unsigned long flags;
672 
673 	spin_lock_irqsave(q->queue_lock, flags);
674 	scsi_unprep_request(req);
675 	blk_requeue_request(q, req);
676 	spin_unlock_irqrestore(q->queue_lock, flags);
677 
678 	scsi_run_queue(q);
679 }
680 
scsi_next_command(struct scsi_cmnd * cmd)681 void scsi_next_command(struct scsi_cmnd *cmd)
682 {
683 	struct scsi_device *sdev = cmd->device;
684 	struct request_queue *q = sdev->request_queue;
685 
686 	/* need to hold a reference on the device before we let go of the cmd */
687 	get_device(&sdev->sdev_gendev);
688 
689 	scsi_put_command(cmd);
690 	scsi_run_queue(q);
691 
692 	/* ok to remove device now */
693 	put_device(&sdev->sdev_gendev);
694 }
695 
scsi_run_host_queues(struct Scsi_Host * shost)696 void scsi_run_host_queues(struct Scsi_Host *shost)
697 {
698 	struct scsi_device *sdev;
699 
700 	shost_for_each_device(sdev, shost)
701 		scsi_run_queue(sdev->request_queue);
702 }
703 
704 static void __scsi_release_buffers(struct scsi_cmnd *, int);
705 
706 /*
707  * Function:    scsi_end_request()
708  *
709  * Purpose:     Post-processing of completed commands (usually invoked at end
710  *		of upper level post-processing and scsi_io_completion).
711  *
712  * Arguments:   cmd	 - command that is complete.
713  *              error    - 0 if I/O indicates success, < 0 for I/O error.
714  *              bytes    - number of bytes of completed I/O
715  *		requeue  - indicates whether we should requeue leftovers.
716  *
717  * Lock status: Assumed that lock is not held upon entry.
718  *
719  * Returns:     cmd if requeue required, NULL otherwise.
720  *
721  * Notes:       This is called for block device requests in order to
722  *              mark some number of sectors as complete.
723  *
724  *		We are guaranteeing that the request queue will be goosed
725  *		at some point during this call.
726  * Notes:	If cmd was requeued, upon return it will be a stale pointer.
727  */
scsi_end_request(struct scsi_cmnd * cmd,int error,int bytes,int requeue)728 static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
729 					  int bytes, int requeue)
730 {
731 	struct request_queue *q = cmd->device->request_queue;
732 	struct request *req = cmd->request;
733 
734 	/*
735 	 * If there are blocks left over at the end, set up the command
736 	 * to queue the remainder of them.
737 	 */
738 	if (blk_end_request(req, error, bytes)) {
739 		int leftover = (req->hard_nr_sectors << 9);
740 
741 		if (blk_pc_request(req))
742 			leftover = req->data_len;
743 
744 		/* kill remainder if no retrys */
745 		if (error && scsi_noretry_cmd(cmd))
746 			blk_end_request(req, error, leftover);
747 		else {
748 			if (requeue) {
749 				/*
750 				 * Bleah.  Leftovers again.  Stick the
751 				 * leftovers in the front of the
752 				 * queue, and goose the queue again.
753 				 */
754 				scsi_release_buffers(cmd);
755 				scsi_requeue_command(q, cmd);
756 				cmd = NULL;
757 			}
758 			return cmd;
759 		}
760 	}
761 
762 	/*
763 	 * This will goose the queue request function at the end, so we don't
764 	 * need to worry about launching another command.
765 	 */
766 	__scsi_release_buffers(cmd, 0);
767 	scsi_next_command(cmd);
768 	return NULL;
769 }
770 
scsi_sgtable_index(unsigned short nents)771 static inline unsigned int scsi_sgtable_index(unsigned short nents)
772 {
773 	unsigned int index;
774 
775 	BUG_ON(nents > SCSI_MAX_SG_SEGMENTS);
776 
777 	if (nents <= 8)
778 		index = 0;
779 	else
780 		index = get_count_order(nents) - 3;
781 
782 	return index;
783 }
784 
scsi_sg_free(struct scatterlist * sgl,unsigned int nents)785 static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents)
786 {
787 	struct scsi_host_sg_pool *sgp;
788 
789 	sgp = scsi_sg_pools + scsi_sgtable_index(nents);
790 	mempool_free(sgl, sgp->pool);
791 }
792 
scsi_sg_alloc(unsigned int nents,gfp_t gfp_mask)793 static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask)
794 {
795 	struct scsi_host_sg_pool *sgp;
796 
797 	sgp = scsi_sg_pools + scsi_sgtable_index(nents);
798 	return mempool_alloc(sgp->pool, gfp_mask);
799 }
800 
scsi_alloc_sgtable(struct scsi_data_buffer * sdb,int nents,gfp_t gfp_mask)801 static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents,
802 			      gfp_t gfp_mask)
803 {
804 	int ret;
805 
806 	BUG_ON(!nents);
807 
808 	ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS,
809 			       gfp_mask, scsi_sg_alloc);
810 	if (unlikely(ret))
811 		__sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS,
812 				scsi_sg_free);
813 
814 	return ret;
815 }
816 
scsi_free_sgtable(struct scsi_data_buffer * sdb)817 static void scsi_free_sgtable(struct scsi_data_buffer *sdb)
818 {
819 	__sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free);
820 }
821 
__scsi_release_buffers(struct scsi_cmnd * cmd,int do_bidi_check)822 static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check)
823 {
824 
825 	if (cmd->sdb.table.nents)
826 		scsi_free_sgtable(&cmd->sdb);
827 
828 	memset(&cmd->sdb, 0, sizeof(cmd->sdb));
829 
830 	if (do_bidi_check && scsi_bidi_cmnd(cmd)) {
831 		struct scsi_data_buffer *bidi_sdb =
832 			cmd->request->next_rq->special;
833 		scsi_free_sgtable(bidi_sdb);
834 		kmem_cache_free(scsi_sdb_cache, bidi_sdb);
835 		cmd->request->next_rq->special = NULL;
836 	}
837 
838 	if (scsi_prot_sg_count(cmd))
839 		scsi_free_sgtable(cmd->prot_sdb);
840 }
841 
842 /*
843  * Function:    scsi_release_buffers()
844  *
845  * Purpose:     Completion processing for block device I/O requests.
846  *
847  * Arguments:   cmd	- command that we are bailing.
848  *
849  * Lock status: Assumed that no lock is held upon entry.
850  *
851  * Returns:     Nothing
852  *
853  * Notes:       In the event that an upper level driver rejects a
854  *		command, we must release resources allocated during
855  *		the __init_io() function.  Primarily this would involve
856  *		the scatter-gather table, and potentially any bounce
857  *		buffers.
858  */
scsi_release_buffers(struct scsi_cmnd * cmd)859 void scsi_release_buffers(struct scsi_cmnd *cmd)
860 {
861 	__scsi_release_buffers(cmd, 1);
862 }
863 EXPORT_SYMBOL(scsi_release_buffers);
864 
865 /*
866  * Bidi commands Must be complete as a whole, both sides at once.
867  * If part of the bytes were written and lld returned
868  * scsi_in()->resid and/or scsi_out()->resid this information will be left
869  * in req->data_len and req->next_rq->data_len. The upper-layer driver can
870  * decide what to do with this information.
871  */
scsi_end_bidi_request(struct scsi_cmnd * cmd)872 static void scsi_end_bidi_request(struct scsi_cmnd *cmd)
873 {
874 	struct request *req = cmd->request;
875 	unsigned int dlen = req->data_len;
876 	unsigned int next_dlen = req->next_rq->data_len;
877 
878 	req->data_len = scsi_out(cmd)->resid;
879 	req->next_rq->data_len = scsi_in(cmd)->resid;
880 
881 	/* The req and req->next_rq have not been completed */
882 	BUG_ON(blk_end_bidi_request(req, 0, dlen, next_dlen));
883 
884 	scsi_release_buffers(cmd);
885 
886 	/*
887 	 * This will goose the queue request function at the end, so we don't
888 	 * need to worry about launching another command.
889 	 */
890 	scsi_next_command(cmd);
891 }
892 
893 /*
894  * Function:    scsi_io_completion()
895  *
896  * Purpose:     Completion processing for block device I/O requests.
897  *
898  * Arguments:   cmd   - command that is finished.
899  *
900  * Lock status: Assumed that no lock is held upon entry.
901  *
902  * Returns:     Nothing
903  *
904  * Notes:       This function is matched in terms of capabilities to
905  *              the function that created the scatter-gather list.
906  *              In other words, if there are no bounce buffers
907  *              (the normal case for most drivers), we don't need
908  *              the logic to deal with cleaning up afterwards.
909  *
910  *		We must call scsi_end_request().  This will finish off
911  *		the specified number of sectors.  If we are done, the
912  *		command block will be released and the queue function
913  *		will be goosed.  If we are not done then we have to
914  *		figure out what to do next:
915  *
916  *		a) We can call scsi_requeue_command().  The request
917  *		   will be unprepared and put back on the queue.  Then
918  *		   a new command will be created for it.  This should
919  *		   be used if we made forward progress, or if we want
920  *		   to switch from READ(10) to READ(6) for example.
921  *
922  *		b) We can call scsi_queue_insert().  The request will
923  *		   be put back on the queue and retried using the same
924  *		   command as before, possibly after a delay.
925  *
926  *		c) We can call blk_end_request() with -EIO to fail
927  *		   the remainder of the request.
928  */
scsi_io_completion(struct scsi_cmnd * cmd,unsigned int good_bytes)929 void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
930 {
931 	int result = cmd->result;
932 	int this_count;
933 	struct request_queue *q = cmd->device->request_queue;
934 	struct request *req = cmd->request;
935 	int error = 0;
936 	struct scsi_sense_hdr sshdr;
937 	int sense_valid = 0;
938 	int sense_deferred = 0;
939 	enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
940 	      ACTION_DELAYED_RETRY} action;
941 	char *description = NULL;
942 
943 	if (result) {
944 		sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
945 		if (sense_valid)
946 			sense_deferred = scsi_sense_is_deferred(&sshdr);
947 	}
948 
949 	if (blk_pc_request(req)) { /* SG_IO ioctl from block level */
950 		req->errors = result;
951 		if (result) {
952 			if (sense_valid && req->sense) {
953 				/*
954 				 * SG_IO wants current and deferred errors
955 				 */
956 				int len = 8 + cmd->sense_buffer[7];
957 
958 				if (len > SCSI_SENSE_BUFFERSIZE)
959 					len = SCSI_SENSE_BUFFERSIZE;
960 				memcpy(req->sense, cmd->sense_buffer,  len);
961 				req->sense_len = len;
962 			}
963 			if (!sense_deferred)
964 				error = -EIO;
965 		}
966 		if (scsi_bidi_cmnd(cmd)) {
967 			/* will also release_buffers */
968 			scsi_end_bidi_request(cmd);
969 			return;
970 		}
971 		req->data_len = scsi_get_resid(cmd);
972 	}
973 
974 	BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */
975 
976 	/*
977 	 * Next deal with any sectors which we were able to correctly
978 	 * handle.
979 	 */
980 	SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, "
981 				      "%d bytes done.\n",
982 				      req->nr_sectors, good_bytes));
983 
984 	/* A number of bytes were successfully read.  If there
985 	 * are leftovers and there is some kind of error
986 	 * (result != 0), retry the rest.
987 	 */
988 	if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
989 		return;
990 	this_count = blk_rq_bytes(req);
991 
992 	error = -EIO;
993 
994 	if (host_byte(result) == DID_RESET) {
995 		/* Third party bus reset or reset for error recovery
996 		 * reasons.  Just retry the command and see what
997 		 * happens.
998 		 */
999 		action = ACTION_RETRY;
1000 	} else if (sense_valid && !sense_deferred) {
1001 		switch (sshdr.sense_key) {
1002 		case UNIT_ATTENTION:
1003 			if (cmd->device->removable) {
1004 				/* Detected disc change.  Set a bit
1005 				 * and quietly refuse further access.
1006 				 */
1007 				cmd->device->changed = 1;
1008 				description = "Media Changed";
1009 				action = ACTION_FAIL;
1010 			} else {
1011 				/* Must have been a power glitch, or a
1012 				 * bus reset.  Could not have been a
1013 				 * media change, so we just retry the
1014 				 * command and see what happens.
1015 				 */
1016 				action = ACTION_RETRY;
1017 			}
1018 			break;
1019 		case ILLEGAL_REQUEST:
1020 			/* If we had an ILLEGAL REQUEST returned, then
1021 			 * we may have performed an unsupported
1022 			 * command.  The only thing this should be
1023 			 * would be a ten byte read where only a six
1024 			 * byte read was supported.  Also, on a system
1025 			 * where READ CAPACITY failed, we may have
1026 			 * read past the end of the disk.
1027 			 */
1028 			if ((cmd->device->use_10_for_rw &&
1029 			    sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
1030 			    (cmd->cmnd[0] == READ_10 ||
1031 			     cmd->cmnd[0] == WRITE_10)) {
1032 				/* This will issue a new 6-byte command. */
1033 				cmd->device->use_10_for_rw = 0;
1034 				action = ACTION_REPREP;
1035 			} else if (sshdr.asc == 0x10) /* DIX */ {
1036 				description = "Host Data Integrity Failure";
1037 				action = ACTION_FAIL;
1038 				error = -EILSEQ;
1039 			} else
1040 				action = ACTION_FAIL;
1041 			break;
1042 		case ABORTED_COMMAND:
1043 			action = ACTION_FAIL;
1044 			if (sshdr.asc == 0x10) { /* DIF */
1045 				description = "Target Data Integrity Failure";
1046 				error = -EILSEQ;
1047 			}
1048 			break;
1049 		case NOT_READY:
1050 			/* If the device is in the process of becoming
1051 			 * ready, or has a temporary blockage, retry.
1052 			 */
1053 			if (sshdr.asc == 0x04) {
1054 				switch (sshdr.ascq) {
1055 				case 0x01: /* becoming ready */
1056 				case 0x04: /* format in progress */
1057 				case 0x05: /* rebuild in progress */
1058 				case 0x06: /* recalculation in progress */
1059 				case 0x07: /* operation in progress */
1060 				case 0x08: /* Long write in progress */
1061 				case 0x09: /* self test in progress */
1062 					action = ACTION_DELAYED_RETRY;
1063 					break;
1064 				default:
1065 					description = "Device not ready";
1066 					action = ACTION_FAIL;
1067 					break;
1068 				}
1069 			} else {
1070 				description = "Device not ready";
1071 				action = ACTION_FAIL;
1072 			}
1073 			break;
1074 		case VOLUME_OVERFLOW:
1075 			/* See SSC3rXX or current. */
1076 			action = ACTION_FAIL;
1077 			break;
1078 		default:
1079 			description = "Unhandled sense code";
1080 			action = ACTION_FAIL;
1081 			break;
1082 		}
1083 	} else {
1084 		description = "Unhandled error code";
1085 		action = ACTION_FAIL;
1086 	}
1087 
1088 	switch (action) {
1089 	case ACTION_FAIL:
1090 		/* Give up and fail the remainder of the request */
1091 		scsi_release_buffers(cmd);
1092 		if (!(req->cmd_flags & REQ_QUIET)) {
1093 			if (description)
1094 				scmd_printk(KERN_INFO, cmd, "%s\n",
1095 					    description);
1096 			scsi_print_result(cmd);
1097 			if (driver_byte(result) & DRIVER_SENSE)
1098 				scsi_print_sense("", cmd);
1099 		}
1100 		blk_end_request(req, -EIO, blk_rq_bytes(req));
1101 		scsi_next_command(cmd);
1102 		break;
1103 	case ACTION_REPREP:
1104 		/* Unprep the request and put it back at the head of the queue.
1105 		 * A new command will be prepared and issued.
1106 		 */
1107 		scsi_release_buffers(cmd);
1108 		scsi_requeue_command(q, cmd);
1109 		break;
1110 	case ACTION_RETRY:
1111 		/* Retry the same command immediately */
1112 		__scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0);
1113 		break;
1114 	case ACTION_DELAYED_RETRY:
1115 		/* Retry the same command after a delay */
1116 		__scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
1117 		break;
1118 	}
1119 }
1120 
scsi_init_sgtable(struct request * req,struct scsi_data_buffer * sdb,gfp_t gfp_mask)1121 static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
1122 			     gfp_t gfp_mask)
1123 {
1124 	int count;
1125 
1126 	/*
1127 	 * If sg table allocation fails, requeue request later.
1128 	 */
1129 	if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments,
1130 					gfp_mask))) {
1131 		return BLKPREP_DEFER;
1132 	}
1133 
1134 	req->buffer = NULL;
1135 
1136 	/*
1137 	 * Next, walk the list, and fill in the addresses and sizes of
1138 	 * each segment.
1139 	 */
1140 	count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
1141 	BUG_ON(count > sdb->table.nents);
1142 	sdb->table.nents = count;
1143 	if (blk_pc_request(req))
1144 		sdb->length = req->data_len;
1145 	else
1146 		sdb->length = req->nr_sectors << 9;
1147 	return BLKPREP_OK;
1148 }
1149 
1150 /*
1151  * Function:    scsi_init_io()
1152  *
1153  * Purpose:     SCSI I/O initialize function.
1154  *
1155  * Arguments:   cmd   - Command descriptor we wish to initialize
1156  *
1157  * Returns:     0 on success
1158  *		BLKPREP_DEFER if the failure is retryable
1159  *		BLKPREP_KILL if the failure is fatal
1160  */
scsi_init_io(struct scsi_cmnd * cmd,gfp_t gfp_mask)1161 int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
1162 {
1163 	int error = scsi_init_sgtable(cmd->request, &cmd->sdb, gfp_mask);
1164 	if (error)
1165 		goto err_exit;
1166 
1167 	if (blk_bidi_rq(cmd->request)) {
1168 		struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc(
1169 			scsi_sdb_cache, GFP_ATOMIC);
1170 		if (!bidi_sdb) {
1171 			error = BLKPREP_DEFER;
1172 			goto err_exit;
1173 		}
1174 
1175 		cmd->request->next_rq->special = bidi_sdb;
1176 		error = scsi_init_sgtable(cmd->request->next_rq, bidi_sdb,
1177 								    GFP_ATOMIC);
1178 		if (error)
1179 			goto err_exit;
1180 	}
1181 
1182 	if (blk_integrity_rq(cmd->request)) {
1183 		struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
1184 		int ivecs, count;
1185 
1186 		BUG_ON(prot_sdb == NULL);
1187 		ivecs = blk_rq_count_integrity_sg(cmd->request);
1188 
1189 		if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) {
1190 			error = BLKPREP_DEFER;
1191 			goto err_exit;
1192 		}
1193 
1194 		count = blk_rq_map_integrity_sg(cmd->request,
1195 						prot_sdb->table.sgl);
1196 		BUG_ON(unlikely(count > ivecs));
1197 
1198 		cmd->prot_sdb = prot_sdb;
1199 		cmd->prot_sdb->table.nents = count;
1200 	}
1201 
1202 	return BLKPREP_OK ;
1203 
1204 err_exit:
1205 	scsi_release_buffers(cmd);
1206 	if (error == BLKPREP_KILL)
1207 		scsi_put_command(cmd);
1208 	else /* BLKPREP_DEFER */
1209 		scsi_unprep_request(cmd->request);
1210 
1211 	return error;
1212 }
1213 EXPORT_SYMBOL(scsi_init_io);
1214 
scsi_get_cmd_from_req(struct scsi_device * sdev,struct request * req)1215 static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
1216 		struct request *req)
1217 {
1218 	struct scsi_cmnd *cmd;
1219 
1220 	if (!req->special) {
1221 		cmd = scsi_get_command(sdev, GFP_ATOMIC);
1222 		if (unlikely(!cmd))
1223 			return NULL;
1224 		req->special = cmd;
1225 	} else {
1226 		cmd = req->special;
1227 	}
1228 
1229 	/* pull a tag out of the request if we have one */
1230 	cmd->tag = req->tag;
1231 	cmd->request = req;
1232 
1233 	cmd->cmnd = req->cmd;
1234 
1235 	return cmd;
1236 }
1237 
scsi_setup_blk_pc_cmnd(struct scsi_device * sdev,struct request * req)1238 int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
1239 {
1240 	struct scsi_cmnd *cmd;
1241 	int ret = scsi_prep_state_check(sdev, req);
1242 
1243 	if (ret != BLKPREP_OK)
1244 		return ret;
1245 
1246 	cmd = scsi_get_cmd_from_req(sdev, req);
1247 	if (unlikely(!cmd))
1248 		return BLKPREP_DEFER;
1249 
1250 	/*
1251 	 * BLOCK_PC requests may transfer data, in which case they must
1252 	 * a bio attached to them.  Or they might contain a SCSI command
1253 	 * that does not transfer data, in which case they may optionally
1254 	 * submit a request without an attached bio.
1255 	 */
1256 	if (req->bio) {
1257 		int ret;
1258 
1259 		BUG_ON(!req->nr_phys_segments);
1260 
1261 		ret = scsi_init_io(cmd, GFP_ATOMIC);
1262 		if (unlikely(ret))
1263 			return ret;
1264 	} else {
1265 		BUG_ON(req->data_len);
1266 		BUG_ON(req->data);
1267 
1268 		memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1269 		req->buffer = NULL;
1270 	}
1271 
1272 	cmd->cmd_len = req->cmd_len;
1273 	if (!req->data_len)
1274 		cmd->sc_data_direction = DMA_NONE;
1275 	else if (rq_data_dir(req) == WRITE)
1276 		cmd->sc_data_direction = DMA_TO_DEVICE;
1277 	else
1278 		cmd->sc_data_direction = DMA_FROM_DEVICE;
1279 
1280 	cmd->transfersize = req->data_len;
1281 	cmd->allowed = req->retries;
1282 	return BLKPREP_OK;
1283 }
1284 EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd);
1285 
1286 /*
1287  * Setup a REQ_TYPE_FS command.  These are simple read/write request
1288  * from filesystems that still need to be translated to SCSI CDBs from
1289  * the ULD.
1290  */
scsi_setup_fs_cmnd(struct scsi_device * sdev,struct request * req)1291 int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
1292 {
1293 	struct scsi_cmnd *cmd;
1294 	int ret = scsi_prep_state_check(sdev, req);
1295 
1296 	if (ret != BLKPREP_OK)
1297 		return ret;
1298 
1299 	if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh
1300 			 && sdev->scsi_dh_data->scsi_dh->prep_fn)) {
1301 		ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req);
1302 		if (ret != BLKPREP_OK)
1303 			return ret;
1304 	}
1305 
1306 	/*
1307 	 * Filesystem requests must transfer data.
1308 	 */
1309 	BUG_ON(!req->nr_phys_segments);
1310 
1311 	cmd = scsi_get_cmd_from_req(sdev, req);
1312 	if (unlikely(!cmd))
1313 		return BLKPREP_DEFER;
1314 
1315 	memset(cmd->cmnd, 0, BLK_MAX_CDB);
1316 	return scsi_init_io(cmd, GFP_ATOMIC);
1317 }
1318 EXPORT_SYMBOL(scsi_setup_fs_cmnd);
1319 
scsi_prep_state_check(struct scsi_device * sdev,struct request * req)1320 int scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
1321 {
1322 	int ret = BLKPREP_OK;
1323 
1324 	/*
1325 	 * If the device is not in running state we will reject some
1326 	 * or all commands.
1327 	 */
1328 	if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1329 		switch (sdev->sdev_state) {
1330 		case SDEV_OFFLINE:
1331 			/*
1332 			 * If the device is offline we refuse to process any
1333 			 * commands.  The device must be brought online
1334 			 * before trying any recovery commands.
1335 			 */
1336 			sdev_printk(KERN_ERR, sdev,
1337 				    "rejecting I/O to offline device\n");
1338 			ret = BLKPREP_KILL;
1339 			break;
1340 		case SDEV_DEL:
1341 			/*
1342 			 * If the device is fully deleted, we refuse to
1343 			 * process any commands as well.
1344 			 */
1345 			sdev_printk(KERN_ERR, sdev,
1346 				    "rejecting I/O to dead device\n");
1347 			ret = BLKPREP_KILL;
1348 			break;
1349 		case SDEV_QUIESCE:
1350 		case SDEV_BLOCK:
1351 		case SDEV_CREATED_BLOCK:
1352 			/*
1353 			 * If the devices is blocked we defer normal commands.
1354 			 */
1355 			if (!(req->cmd_flags & REQ_PREEMPT))
1356 				ret = BLKPREP_DEFER;
1357 			break;
1358 		default:
1359 			/*
1360 			 * For any other not fully online state we only allow
1361 			 * special commands.  In particular any user initiated
1362 			 * command is not allowed.
1363 			 */
1364 			if (!(req->cmd_flags & REQ_PREEMPT))
1365 				ret = BLKPREP_KILL;
1366 			break;
1367 		}
1368 	}
1369 	return ret;
1370 }
1371 EXPORT_SYMBOL(scsi_prep_state_check);
1372 
scsi_prep_return(struct request_queue * q,struct request * req,int ret)1373 int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1374 {
1375 	struct scsi_device *sdev = q->queuedata;
1376 
1377 	switch (ret) {
1378 	case BLKPREP_KILL:
1379 		req->errors = DID_NO_CONNECT << 16;
1380 		/* release the command and kill it */
1381 		if (req->special) {
1382 			struct scsi_cmnd *cmd = req->special;
1383 			scsi_release_buffers(cmd);
1384 			scsi_put_command(cmd);
1385 			req->special = NULL;
1386 		}
1387 		break;
1388 	case BLKPREP_DEFER:
1389 		/*
1390 		 * If we defer, the elv_next_request() returns NULL, but the
1391 		 * queue must be restarted, so we plug here if no returning
1392 		 * command will automatically do that.
1393 		 */
1394 		if (sdev->device_busy == 0)
1395 			blk_plug_device(q);
1396 		break;
1397 	default:
1398 		req->cmd_flags |= REQ_DONTPREP;
1399 	}
1400 
1401 	return ret;
1402 }
1403 EXPORT_SYMBOL(scsi_prep_return);
1404 
scsi_prep_fn(struct request_queue * q,struct request * req)1405 int scsi_prep_fn(struct request_queue *q, struct request *req)
1406 {
1407 	struct scsi_device *sdev = q->queuedata;
1408 	int ret = BLKPREP_KILL;
1409 
1410 	if (req->cmd_type == REQ_TYPE_BLOCK_PC)
1411 		ret = scsi_setup_blk_pc_cmnd(sdev, req);
1412 	return scsi_prep_return(q, req, ret);
1413 }
1414 
1415 /*
1416  * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
1417  * return 0.
1418  *
1419  * Called with the queue_lock held.
1420  */
scsi_dev_queue_ready(struct request_queue * q,struct scsi_device * sdev)1421 static inline int scsi_dev_queue_ready(struct request_queue *q,
1422 				  struct scsi_device *sdev)
1423 {
1424 	if (sdev->device_busy == 0 && sdev->device_blocked) {
1425 		/*
1426 		 * unblock after device_blocked iterates to zero
1427 		 */
1428 		if (--sdev->device_blocked == 0) {
1429 			SCSI_LOG_MLQUEUE(3,
1430 				   sdev_printk(KERN_INFO, sdev,
1431 				   "unblocking device at zero depth\n"));
1432 		} else {
1433 			blk_plug_device(q);
1434 			return 0;
1435 		}
1436 	}
1437 	if (scsi_device_is_busy(sdev))
1438 		return 0;
1439 
1440 	return 1;
1441 }
1442 
1443 
1444 /*
1445  * scsi_target_queue_ready: checks if there we can send commands to target
1446  * @sdev: scsi device on starget to check.
1447  *
1448  * Called with the host lock held.
1449  */
scsi_target_queue_ready(struct Scsi_Host * shost,struct scsi_device * sdev)1450 static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
1451 					   struct scsi_device *sdev)
1452 {
1453 	struct scsi_target *starget = scsi_target(sdev);
1454 
1455 	if (starget->single_lun) {
1456 		if (starget->starget_sdev_user &&
1457 		    starget->starget_sdev_user != sdev)
1458 			return 0;
1459 		starget->starget_sdev_user = sdev;
1460 	}
1461 
1462 	if (starget->target_busy == 0 && starget->target_blocked) {
1463 		/*
1464 		 * unblock after target_blocked iterates to zero
1465 		 */
1466 		if (--starget->target_blocked == 0) {
1467 			SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
1468 					 "unblocking target at zero depth\n"));
1469 		} else {
1470 			blk_plug_device(sdev->request_queue);
1471 			return 0;
1472 		}
1473 	}
1474 
1475 	if (scsi_target_is_busy(starget)) {
1476 		if (list_empty(&sdev->starved_entry)) {
1477 			list_add_tail(&sdev->starved_entry,
1478 				      &shost->starved_list);
1479 			return 0;
1480 		}
1481 	}
1482 
1483 	/* We're OK to process the command, so we can't be starved */
1484 	if (!list_empty(&sdev->starved_entry))
1485 		list_del_init(&sdev->starved_entry);
1486 	return 1;
1487 }
1488 
1489 /*
1490  * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1491  * return 0. We must end up running the queue again whenever 0 is
1492  * returned, else IO can hang.
1493  *
1494  * Called with host_lock held.
1495  */
scsi_host_queue_ready(struct request_queue * q,struct Scsi_Host * shost,struct scsi_device * sdev)1496 static inline int scsi_host_queue_ready(struct request_queue *q,
1497 				   struct Scsi_Host *shost,
1498 				   struct scsi_device *sdev)
1499 {
1500 	if (scsi_host_in_recovery(shost))
1501 		return 0;
1502 	if (shost->host_busy == 0 && shost->host_blocked) {
1503 		/*
1504 		 * unblock after host_blocked iterates to zero
1505 		 */
1506 		if (--shost->host_blocked == 0) {
1507 			SCSI_LOG_MLQUEUE(3,
1508 				printk("scsi%d unblocking host at zero depth\n",
1509 					shost->host_no));
1510 		} else {
1511 			return 0;
1512 		}
1513 	}
1514 	if (scsi_host_is_busy(shost)) {
1515 		if (list_empty(&sdev->starved_entry))
1516 			list_add_tail(&sdev->starved_entry, &shost->starved_list);
1517 		return 0;
1518 	}
1519 
1520 	/* We're OK to process the command, so we can't be starved */
1521 	if (!list_empty(&sdev->starved_entry))
1522 		list_del_init(&sdev->starved_entry);
1523 
1524 	return 1;
1525 }
1526 
1527 /*
1528  * Busy state exporting function for request stacking drivers.
1529  *
1530  * For efficiency, no lock is taken to check the busy state of
1531  * shost/starget/sdev, since the returned value is not guaranteed and
1532  * may be changed after request stacking drivers call the function,
1533  * regardless of taking lock or not.
1534  *
1535  * When scsi can't dispatch I/Os anymore and needs to kill I/Os
1536  * (e.g. !sdev), scsi needs to return 'not busy'.
1537  * Otherwise, request stacking drivers may hold requests forever.
1538  */
scsi_lld_busy(struct request_queue * q)1539 static int scsi_lld_busy(struct request_queue *q)
1540 {
1541 	struct scsi_device *sdev = q->queuedata;
1542 	struct Scsi_Host *shost;
1543 	struct scsi_target *starget;
1544 
1545 	if (!sdev)
1546 		return 0;
1547 
1548 	shost = sdev->host;
1549 	starget = scsi_target(sdev);
1550 
1551 	if (scsi_host_in_recovery(shost) || scsi_host_is_busy(shost) ||
1552 	    scsi_target_is_busy(starget) || scsi_device_is_busy(sdev))
1553 		return 1;
1554 
1555 	return 0;
1556 }
1557 
1558 /*
1559  * Kill a request for a dead device
1560  */
scsi_kill_request(struct request * req,struct request_queue * q)1561 static void scsi_kill_request(struct request *req, struct request_queue *q)
1562 {
1563 	struct scsi_cmnd *cmd = req->special;
1564 	struct scsi_device *sdev = cmd->device;
1565 	struct scsi_target *starget = scsi_target(sdev);
1566 	struct Scsi_Host *shost = sdev->host;
1567 
1568 	blkdev_dequeue_request(req);
1569 
1570 	if (unlikely(cmd == NULL)) {
1571 		printk(KERN_CRIT "impossible request in %s.\n",
1572 				 __func__);
1573 		BUG();
1574 	}
1575 
1576 	scsi_init_cmd_errh(cmd);
1577 	cmd->result = DID_NO_CONNECT << 16;
1578 	atomic_inc(&cmd->device->iorequest_cnt);
1579 
1580 	/*
1581 	 * SCSI request completion path will do scsi_device_unbusy(),
1582 	 * bump busy counts.  To bump the counters, we need to dance
1583 	 * with the locks as normal issue path does.
1584 	 */
1585 	sdev->device_busy++;
1586 	spin_unlock(sdev->request_queue->queue_lock);
1587 	spin_lock(shost->host_lock);
1588 	shost->host_busy++;
1589 	starget->target_busy++;
1590 	spin_unlock(shost->host_lock);
1591 	spin_lock(sdev->request_queue->queue_lock);
1592 
1593 	blk_complete_request(req);
1594 }
1595 
scsi_softirq_done(struct request * rq)1596 static void scsi_softirq_done(struct request *rq)
1597 {
1598 	struct scsi_cmnd *cmd = rq->special;
1599 	unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
1600 	int disposition;
1601 
1602 	INIT_LIST_HEAD(&cmd->eh_entry);
1603 
1604 	/*
1605 	 * Set the serial numbers back to zero
1606 	 */
1607 	cmd->serial_number = 0;
1608 
1609 	atomic_inc(&cmd->device->iodone_cnt);
1610 	if (cmd->result)
1611 		atomic_inc(&cmd->device->ioerr_cnt);
1612 
1613 	disposition = scsi_decide_disposition(cmd);
1614 	if (disposition != SUCCESS &&
1615 	    time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
1616 		sdev_printk(KERN_ERR, cmd->device,
1617 			    "timing out command, waited %lus\n",
1618 			    wait_for/HZ);
1619 		disposition = SUCCESS;
1620 	}
1621 
1622 	scsi_log_completion(cmd, disposition);
1623 
1624 	switch (disposition) {
1625 		case SUCCESS:
1626 			scsi_finish_command(cmd);
1627 			break;
1628 		case NEEDS_RETRY:
1629 			scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
1630 			break;
1631 		case ADD_TO_MLQUEUE:
1632 			scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
1633 			break;
1634 		default:
1635 			if (!scsi_eh_scmd_add(cmd, 0))
1636 				scsi_finish_command(cmd);
1637 	}
1638 }
1639 
1640 /*
1641  * Function:    scsi_request_fn()
1642  *
1643  * Purpose:     Main strategy routine for SCSI.
1644  *
1645  * Arguments:   q       - Pointer to actual queue.
1646  *
1647  * Returns:     Nothing
1648  *
1649  * Lock status: IO request lock assumed to be held when called.
1650  */
scsi_request_fn(struct request_queue * q)1651 static void scsi_request_fn(struct request_queue *q)
1652 {
1653 	struct scsi_device *sdev = q->queuedata;
1654 	struct Scsi_Host *shost;
1655 	struct scsi_cmnd *cmd;
1656 	struct request *req;
1657 
1658 	if (!sdev) {
1659 		printk("scsi: killing requests for dead queue\n");
1660 		while ((req = elv_next_request(q)) != NULL)
1661 			scsi_kill_request(req, q);
1662 		return;
1663 	}
1664 
1665 	if(!get_device(&sdev->sdev_gendev))
1666 		/* We must be tearing the block queue down already */
1667 		return;
1668 
1669 	/*
1670 	 * To start with, we keep looping until the queue is empty, or until
1671 	 * the host is no longer able to accept any more requests.
1672 	 */
1673 	shost = sdev->host;
1674 	while (!blk_queue_plugged(q)) {
1675 		int rtn;
1676 		/*
1677 		 * get next queueable request.  We do this early to make sure
1678 		 * that the request is fully prepared even if we cannot
1679 		 * accept it.
1680 		 */
1681 		req = elv_next_request(q);
1682 		if (!req || !scsi_dev_queue_ready(q, sdev))
1683 			break;
1684 
1685 		if (unlikely(!scsi_device_online(sdev))) {
1686 			sdev_printk(KERN_ERR, sdev,
1687 				    "rejecting I/O to offline device\n");
1688 			scsi_kill_request(req, q);
1689 			continue;
1690 		}
1691 
1692 
1693 		/*
1694 		 * Remove the request from the request list.
1695 		 */
1696 		if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1697 			blkdev_dequeue_request(req);
1698 		sdev->device_busy++;
1699 
1700 		spin_unlock(q->queue_lock);
1701 		cmd = req->special;
1702 		if (unlikely(cmd == NULL)) {
1703 			printk(KERN_CRIT "impossible request in %s.\n"
1704 					 "please mail a stack trace to "
1705 					 "linux-scsi@vger.kernel.org\n",
1706 					 __func__);
1707 			blk_dump_rq_flags(req, "foo");
1708 			BUG();
1709 		}
1710 		spin_lock(shost->host_lock);
1711 
1712 		/*
1713 		 * We hit this when the driver is using a host wide
1714 		 * tag map. For device level tag maps the queue_depth check
1715 		 * in the device ready fn would prevent us from trying
1716 		 * to allocate a tag. Since the map is a shared host resource
1717 		 * we add the dev to the starved list so it eventually gets
1718 		 * a run when a tag is freed.
1719 		 */
1720 		if (blk_queue_tagged(q) && !blk_rq_tagged(req)) {
1721 			if (list_empty(&sdev->starved_entry))
1722 				list_add_tail(&sdev->starved_entry,
1723 					      &shost->starved_list);
1724 			goto not_ready;
1725 		}
1726 
1727 		if (!scsi_target_queue_ready(shost, sdev))
1728 			goto not_ready;
1729 
1730 		if (!scsi_host_queue_ready(q, shost, sdev))
1731 			goto not_ready;
1732 
1733 		scsi_target(sdev)->target_busy++;
1734 		shost->host_busy++;
1735 
1736 		/*
1737 		 * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
1738 		 *		take the lock again.
1739 		 */
1740 		spin_unlock_irq(shost->host_lock);
1741 
1742 		/*
1743 		 * Finally, initialize any error handling parameters, and set up
1744 		 * the timers for timeouts.
1745 		 */
1746 		scsi_init_cmd_errh(cmd);
1747 
1748 		/*
1749 		 * Dispatch the command to the low-level driver.
1750 		 */
1751 		rtn = scsi_dispatch_cmd(cmd);
1752 		spin_lock_irq(q->queue_lock);
1753 		if(rtn) {
1754 			/* we're refusing the command; because of
1755 			 * the way locks get dropped, we need to
1756 			 * check here if plugging is required */
1757 			if(sdev->device_busy == 0)
1758 				blk_plug_device(q);
1759 
1760 			break;
1761 		}
1762 	}
1763 
1764 	goto out;
1765 
1766  not_ready:
1767 	spin_unlock_irq(shost->host_lock);
1768 
1769 	/*
1770 	 * lock q, handle tag, requeue req, and decrement device_busy. We
1771 	 * must return with queue_lock held.
1772 	 *
1773 	 * Decrementing device_busy without checking it is OK, as all such
1774 	 * cases (host limits or settings) should run the queue at some
1775 	 * later time.
1776 	 */
1777 	spin_lock_irq(q->queue_lock);
1778 	blk_requeue_request(q, req);
1779 	sdev->device_busy--;
1780 	if(sdev->device_busy == 0)
1781 		blk_plug_device(q);
1782  out:
1783 	/* must be careful here...if we trigger the ->remove() function
1784 	 * we cannot be holding the q lock */
1785 	spin_unlock_irq(q->queue_lock);
1786 	put_device(&sdev->sdev_gendev);
1787 	spin_lock_irq(q->queue_lock);
1788 }
1789 
scsi_calculate_bounce_limit(struct Scsi_Host * shost)1790 u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1791 {
1792 	struct device *host_dev;
1793 	u64 bounce_limit = 0xffffffff;
1794 
1795 	if (shost->unchecked_isa_dma)
1796 		return BLK_BOUNCE_ISA;
1797 	/*
1798 	 * Platforms with virtual-DMA translation
1799 	 * hardware have no practical limit.
1800 	 */
1801 	if (!PCI_DMA_BUS_IS_PHYS)
1802 		return BLK_BOUNCE_ANY;
1803 
1804 	host_dev = scsi_get_device(shost);
1805 	if (host_dev && host_dev->dma_mask)
1806 		bounce_limit = *host_dev->dma_mask;
1807 
1808 	return bounce_limit;
1809 }
1810 EXPORT_SYMBOL(scsi_calculate_bounce_limit);
1811 
__scsi_alloc_queue(struct Scsi_Host * shost,request_fn_proc * request_fn)1812 struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
1813 					 request_fn_proc *request_fn)
1814 {
1815 	struct request_queue *q;
1816 	struct device *dev = shost->shost_gendev.parent;
1817 
1818 	q = blk_init_queue(request_fn, NULL);
1819 	if (!q)
1820 		return NULL;
1821 
1822 	/*
1823 	 * this limit is imposed by hardware restrictions
1824 	 */
1825 	blk_queue_max_hw_segments(q, shost->sg_tablesize);
1826 	blk_queue_max_phys_segments(q, SCSI_MAX_SG_CHAIN_SEGMENTS);
1827 
1828 	blk_queue_max_sectors(q, shost->max_sectors);
1829 	blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1830 	blk_queue_segment_boundary(q, shost->dma_boundary);
1831 	dma_set_seg_boundary(dev, shost->dma_boundary);
1832 
1833 	blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
1834 
1835 	/* New queue, no concurrency on queue_flags */
1836 	if (!shost->use_clustering)
1837 		queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
1838 
1839 	/*
1840 	 * set a reasonable default alignment on word boundaries: the
1841 	 * host and device may alter it using
1842 	 * blk_queue_update_dma_alignment() later.
1843 	 */
1844 	blk_queue_dma_alignment(q, 0x03);
1845 
1846 	return q;
1847 }
1848 EXPORT_SYMBOL(__scsi_alloc_queue);
1849 
scsi_alloc_queue(struct scsi_device * sdev)1850 struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1851 {
1852 	struct request_queue *q;
1853 
1854 	q = __scsi_alloc_queue(sdev->host, scsi_request_fn);
1855 	if (!q)
1856 		return NULL;
1857 
1858 	blk_queue_prep_rq(q, scsi_prep_fn);
1859 	blk_queue_softirq_done(q, scsi_softirq_done);
1860 	blk_queue_rq_timed_out(q, scsi_times_out);
1861 	blk_queue_lld_busy(q, scsi_lld_busy);
1862 	return q;
1863 }
1864 
scsi_free_queue(struct request_queue * q)1865 void scsi_free_queue(struct request_queue *q)
1866 {
1867 	blk_cleanup_queue(q);
1868 }
1869 
1870 /*
1871  * Function:    scsi_block_requests()
1872  *
1873  * Purpose:     Utility function used by low-level drivers to prevent further
1874  *		commands from being queued to the device.
1875  *
1876  * Arguments:   shost       - Host in question
1877  *
1878  * Returns:     Nothing
1879  *
1880  * Lock status: No locks are assumed held.
1881  *
1882  * Notes:       There is no timer nor any other means by which the requests
1883  *		get unblocked other than the low-level driver calling
1884  *		scsi_unblock_requests().
1885  */
scsi_block_requests(struct Scsi_Host * shost)1886 void scsi_block_requests(struct Scsi_Host *shost)
1887 {
1888 	shost->host_self_blocked = 1;
1889 }
1890 EXPORT_SYMBOL(scsi_block_requests);
1891 
1892 /*
1893  * Function:    scsi_unblock_requests()
1894  *
1895  * Purpose:     Utility function used by low-level drivers to allow further
1896  *		commands from being queued to the device.
1897  *
1898  * Arguments:   shost       - Host in question
1899  *
1900  * Returns:     Nothing
1901  *
1902  * Lock status: No locks are assumed held.
1903  *
1904  * Notes:       There is no timer nor any other means by which the requests
1905  *		get unblocked other than the low-level driver calling
1906  *		scsi_unblock_requests().
1907  *
1908  *		This is done as an API function so that changes to the
1909  *		internals of the scsi mid-layer won't require wholesale
1910  *		changes to drivers that use this feature.
1911  */
scsi_unblock_requests(struct Scsi_Host * shost)1912 void scsi_unblock_requests(struct Scsi_Host *shost)
1913 {
1914 	shost->host_self_blocked = 0;
1915 	scsi_run_host_queues(shost);
1916 }
1917 EXPORT_SYMBOL(scsi_unblock_requests);
1918 
scsi_init_queue(void)1919 int __init scsi_init_queue(void)
1920 {
1921 	int i;
1922 
1923 	scsi_io_context_cache = kmem_cache_create("scsi_io_context",
1924 					sizeof(struct scsi_io_context),
1925 					0, 0, NULL);
1926 	if (!scsi_io_context_cache) {
1927 		printk(KERN_ERR "SCSI: can't init scsi io context cache\n");
1928 		return -ENOMEM;
1929 	}
1930 
1931 	scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
1932 					   sizeof(struct scsi_data_buffer),
1933 					   0, 0, NULL);
1934 	if (!scsi_sdb_cache) {
1935 		printk(KERN_ERR "SCSI: can't init scsi sdb cache\n");
1936 		goto cleanup_io_context;
1937 	}
1938 
1939 	for (i = 0; i < SG_MEMPOOL_NR; i++) {
1940 		struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1941 		int size = sgp->size * sizeof(struct scatterlist);
1942 
1943 		sgp->slab = kmem_cache_create(sgp->name, size, 0,
1944 				SLAB_HWCACHE_ALIGN, NULL);
1945 		if (!sgp->slab) {
1946 			printk(KERN_ERR "SCSI: can't init sg slab %s\n",
1947 					sgp->name);
1948 			goto cleanup_sdb;
1949 		}
1950 
1951 		sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
1952 						     sgp->slab);
1953 		if (!sgp->pool) {
1954 			printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1955 					sgp->name);
1956 			goto cleanup_sdb;
1957 		}
1958 	}
1959 
1960 	return 0;
1961 
1962 cleanup_sdb:
1963 	for (i = 0; i < SG_MEMPOOL_NR; i++) {
1964 		struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1965 		if (sgp->pool)
1966 			mempool_destroy(sgp->pool);
1967 		if (sgp->slab)
1968 			kmem_cache_destroy(sgp->slab);
1969 	}
1970 	kmem_cache_destroy(scsi_sdb_cache);
1971 cleanup_io_context:
1972 	kmem_cache_destroy(scsi_io_context_cache);
1973 
1974 	return -ENOMEM;
1975 }
1976 
scsi_exit_queue(void)1977 void scsi_exit_queue(void)
1978 {
1979 	int i;
1980 
1981 	kmem_cache_destroy(scsi_io_context_cache);
1982 	kmem_cache_destroy(scsi_sdb_cache);
1983 
1984 	for (i = 0; i < SG_MEMPOOL_NR; i++) {
1985 		struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1986 		mempool_destroy(sgp->pool);
1987 		kmem_cache_destroy(sgp->slab);
1988 	}
1989 }
1990 
1991 /**
1992  *	scsi_mode_select - issue a mode select
1993  *	@sdev:	SCSI device to be queried
1994  *	@pf:	Page format bit (1 == standard, 0 == vendor specific)
1995  *	@sp:	Save page bit (0 == don't save, 1 == save)
1996  *	@modepage: mode page being requested
1997  *	@buffer: request buffer (may not be smaller than eight bytes)
1998  *	@len:	length of request buffer.
1999  *	@timeout: command timeout
2000  *	@retries: number of retries before failing
2001  *	@data: returns a structure abstracting the mode header data
2002  *	@sshdr: place to put sense data (or NULL if no sense to be collected).
2003  *		must be SCSI_SENSE_BUFFERSIZE big.
2004  *
2005  *	Returns zero if successful; negative error number or scsi
2006  *	status on error
2007  *
2008  */
2009 int
scsi_mode_select(struct scsi_device * sdev,int pf,int sp,int modepage,unsigned char * buffer,int len,int timeout,int retries,struct scsi_mode_data * data,struct scsi_sense_hdr * sshdr)2010 scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
2011 		 unsigned char *buffer, int len, int timeout, int retries,
2012 		 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
2013 {
2014 	unsigned char cmd[10];
2015 	unsigned char *real_buffer;
2016 	int ret;
2017 
2018 	memset(cmd, 0, sizeof(cmd));
2019 	cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0);
2020 
2021 	if (sdev->use_10_for_ms) {
2022 		if (len > 65535)
2023 			return -EINVAL;
2024 		real_buffer = kmalloc(8 + len, GFP_KERNEL);
2025 		if (!real_buffer)
2026 			return -ENOMEM;
2027 		memcpy(real_buffer + 8, buffer, len);
2028 		len += 8;
2029 		real_buffer[0] = 0;
2030 		real_buffer[1] = 0;
2031 		real_buffer[2] = data->medium_type;
2032 		real_buffer[3] = data->device_specific;
2033 		real_buffer[4] = data->longlba ? 0x01 : 0;
2034 		real_buffer[5] = 0;
2035 		real_buffer[6] = data->block_descriptor_length >> 8;
2036 		real_buffer[7] = data->block_descriptor_length;
2037 
2038 		cmd[0] = MODE_SELECT_10;
2039 		cmd[7] = len >> 8;
2040 		cmd[8] = len;
2041 	} else {
2042 		if (len > 255 || data->block_descriptor_length > 255 ||
2043 		    data->longlba)
2044 			return -EINVAL;
2045 
2046 		real_buffer = kmalloc(4 + len, GFP_KERNEL);
2047 		if (!real_buffer)
2048 			return -ENOMEM;
2049 		memcpy(real_buffer + 4, buffer, len);
2050 		len += 4;
2051 		real_buffer[0] = 0;
2052 		real_buffer[1] = data->medium_type;
2053 		real_buffer[2] = data->device_specific;
2054 		real_buffer[3] = data->block_descriptor_length;
2055 
2056 
2057 		cmd[0] = MODE_SELECT;
2058 		cmd[4] = len;
2059 	}
2060 
2061 	ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len,
2062 			       sshdr, timeout, retries, NULL);
2063 	kfree(real_buffer);
2064 	return ret;
2065 }
2066 EXPORT_SYMBOL_GPL(scsi_mode_select);
2067 
2068 /**
2069  *	scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary.
2070  *	@sdev:	SCSI device to be queried
2071  *	@dbd:	set if mode sense will allow block descriptors to be returned
2072  *	@modepage: mode page being requested
2073  *	@buffer: request buffer (may not be smaller than eight bytes)
2074  *	@len:	length of request buffer.
2075  *	@timeout: command timeout
2076  *	@retries: number of retries before failing
2077  *	@data: returns a structure abstracting the mode header data
2078  *	@sshdr: place to put sense data (or NULL if no sense to be collected).
2079  *		must be SCSI_SENSE_BUFFERSIZE big.
2080  *
2081  *	Returns zero if unsuccessful, or the header offset (either 4
2082  *	or 8 depending on whether a six or ten byte command was
2083  *	issued) if successful.
2084  */
2085 int
scsi_mode_sense(struct scsi_device * sdev,int dbd,int modepage,unsigned char * buffer,int len,int timeout,int retries,struct scsi_mode_data * data,struct scsi_sense_hdr * sshdr)2086 scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
2087 		  unsigned char *buffer, int len, int timeout, int retries,
2088 		  struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
2089 {
2090 	unsigned char cmd[12];
2091 	int use_10_for_ms;
2092 	int header_length;
2093 	int result;
2094 	struct scsi_sense_hdr my_sshdr;
2095 
2096 	memset(data, 0, sizeof(*data));
2097 	memset(&cmd[0], 0, 12);
2098 	cmd[1] = dbd & 0x18;	/* allows DBD and LLBA bits */
2099 	cmd[2] = modepage;
2100 
2101 	/* caller might not be interested in sense, but we need it */
2102 	if (!sshdr)
2103 		sshdr = &my_sshdr;
2104 
2105  retry:
2106 	use_10_for_ms = sdev->use_10_for_ms;
2107 
2108 	if (use_10_for_ms) {
2109 		if (len < 8)
2110 			len = 8;
2111 
2112 		cmd[0] = MODE_SENSE_10;
2113 		cmd[8] = len;
2114 		header_length = 8;
2115 	} else {
2116 		if (len < 4)
2117 			len = 4;
2118 
2119 		cmd[0] = MODE_SENSE;
2120 		cmd[4] = len;
2121 		header_length = 4;
2122 	}
2123 
2124 	memset(buffer, 0, len);
2125 
2126 	result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
2127 				  sshdr, timeout, retries, NULL);
2128 
2129 	/* This code looks awful: what it's doing is making sure an
2130 	 * ILLEGAL REQUEST sense return identifies the actual command
2131 	 * byte as the problem.  MODE_SENSE commands can return
2132 	 * ILLEGAL REQUEST if the code page isn't supported */
2133 
2134 	if (use_10_for_ms && !scsi_status_is_good(result) &&
2135 	    (driver_byte(result) & DRIVER_SENSE)) {
2136 		if (scsi_sense_valid(sshdr)) {
2137 			if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
2138 			    (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
2139 				/*
2140 				 * Invalid command operation code
2141 				 */
2142 				sdev->use_10_for_ms = 0;
2143 				goto retry;
2144 			}
2145 		}
2146 	}
2147 
2148 	if(scsi_status_is_good(result)) {
2149 		if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
2150 			     (modepage == 6 || modepage == 8))) {
2151 			/* Initio breakage? */
2152 			header_length = 0;
2153 			data->length = 13;
2154 			data->medium_type = 0;
2155 			data->device_specific = 0;
2156 			data->longlba = 0;
2157 			data->block_descriptor_length = 0;
2158 		} else if(use_10_for_ms) {
2159 			data->length = buffer[0]*256 + buffer[1] + 2;
2160 			data->medium_type = buffer[2];
2161 			data->device_specific = buffer[3];
2162 			data->longlba = buffer[4] & 0x01;
2163 			data->block_descriptor_length = buffer[6]*256
2164 				+ buffer[7];
2165 		} else {
2166 			data->length = buffer[0] + 1;
2167 			data->medium_type = buffer[1];
2168 			data->device_specific = buffer[2];
2169 			data->block_descriptor_length = buffer[3];
2170 		}
2171 		data->header_length = header_length;
2172 	}
2173 
2174 	return result;
2175 }
2176 EXPORT_SYMBOL(scsi_mode_sense);
2177 
2178 /**
2179  *	scsi_test_unit_ready - test if unit is ready
2180  *	@sdev:	scsi device to change the state of.
2181  *	@timeout: command timeout
2182  *	@retries: number of retries before failing
2183  *	@sshdr_external: Optional pointer to struct scsi_sense_hdr for
2184  *		returning sense. Make sure that this is cleared before passing
2185  *		in.
2186  *
2187  *	Returns zero if unsuccessful or an error if TUR failed.  For
2188  *	removable media, a return of NOT_READY or UNIT_ATTENTION is
2189  *	translated to success, with the ->changed flag updated.
2190  **/
2191 int
scsi_test_unit_ready(struct scsi_device * sdev,int timeout,int retries,struct scsi_sense_hdr * sshdr_external)2192 scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
2193 		     struct scsi_sense_hdr *sshdr_external)
2194 {
2195 	char cmd[] = {
2196 		TEST_UNIT_READY, 0, 0, 0, 0, 0,
2197 	};
2198 	struct scsi_sense_hdr *sshdr;
2199 	int result;
2200 
2201 	if (!sshdr_external)
2202 		sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
2203 	else
2204 		sshdr = sshdr_external;
2205 
2206 	/* try to eat the UNIT_ATTENTION if there are enough retries */
2207 	do {
2208 		result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
2209 					  timeout, retries, NULL);
2210 		if (sdev->removable && scsi_sense_valid(sshdr) &&
2211 		    sshdr->sense_key == UNIT_ATTENTION)
2212 			sdev->changed = 1;
2213 	} while (scsi_sense_valid(sshdr) &&
2214 		 sshdr->sense_key == UNIT_ATTENTION && --retries);
2215 
2216 	if (!sshdr)
2217 		/* could not allocate sense buffer, so can't process it */
2218 		return result;
2219 
2220 	if (sdev->removable && scsi_sense_valid(sshdr) &&
2221 	    (sshdr->sense_key == UNIT_ATTENTION ||
2222 	     sshdr->sense_key == NOT_READY)) {
2223 		sdev->changed = 1;
2224 		result = 0;
2225 	}
2226 	if (!sshdr_external)
2227 		kfree(sshdr);
2228 	return result;
2229 }
2230 EXPORT_SYMBOL(scsi_test_unit_ready);
2231 
2232 /**
2233  *	scsi_device_set_state - Take the given device through the device state model.
2234  *	@sdev:	scsi device to change the state of.
2235  *	@state:	state to change to.
2236  *
2237  *	Returns zero if unsuccessful or an error if the requested
2238  *	transition is illegal.
2239  */
2240 int
scsi_device_set_state(struct scsi_device * sdev,enum scsi_device_state state)2241 scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2242 {
2243 	enum scsi_device_state oldstate = sdev->sdev_state;
2244 
2245 	if (state == oldstate)
2246 		return 0;
2247 
2248 	switch (state) {
2249 	case SDEV_CREATED:
2250 		switch (oldstate) {
2251 		case SDEV_CREATED_BLOCK:
2252 			break;
2253 		default:
2254 			goto illegal;
2255 		}
2256 		break;
2257 
2258 	case SDEV_RUNNING:
2259 		switch (oldstate) {
2260 		case SDEV_CREATED:
2261 		case SDEV_OFFLINE:
2262 		case SDEV_QUIESCE:
2263 		case SDEV_BLOCK:
2264 			break;
2265 		default:
2266 			goto illegal;
2267 		}
2268 		break;
2269 
2270 	case SDEV_QUIESCE:
2271 		switch (oldstate) {
2272 		case SDEV_RUNNING:
2273 		case SDEV_OFFLINE:
2274 			break;
2275 		default:
2276 			goto illegal;
2277 		}
2278 		break;
2279 
2280 	case SDEV_OFFLINE:
2281 		switch (oldstate) {
2282 		case SDEV_CREATED:
2283 		case SDEV_RUNNING:
2284 		case SDEV_QUIESCE:
2285 		case SDEV_BLOCK:
2286 			break;
2287 		default:
2288 			goto illegal;
2289 		}
2290 		break;
2291 
2292 	case SDEV_BLOCK:
2293 		switch (oldstate) {
2294 		case SDEV_RUNNING:
2295 		case SDEV_CREATED_BLOCK:
2296 			break;
2297 		default:
2298 			goto illegal;
2299 		}
2300 		break;
2301 
2302 	case SDEV_CREATED_BLOCK:
2303 		switch (oldstate) {
2304 		case SDEV_CREATED:
2305 			break;
2306 		default:
2307 			goto illegal;
2308 		}
2309 		break;
2310 
2311 	case SDEV_CANCEL:
2312 		switch (oldstate) {
2313 		case SDEV_CREATED:
2314 		case SDEV_RUNNING:
2315 		case SDEV_QUIESCE:
2316 		case SDEV_OFFLINE:
2317 		case SDEV_BLOCK:
2318 			break;
2319 		default:
2320 			goto illegal;
2321 		}
2322 		break;
2323 
2324 	case SDEV_DEL:
2325 		switch (oldstate) {
2326 		case SDEV_CREATED:
2327 		case SDEV_RUNNING:
2328 		case SDEV_OFFLINE:
2329 		case SDEV_CANCEL:
2330 			break;
2331 		default:
2332 			goto illegal;
2333 		}
2334 		break;
2335 
2336 	}
2337 	sdev->sdev_state = state;
2338 	return 0;
2339 
2340  illegal:
2341 	SCSI_LOG_ERROR_RECOVERY(1,
2342 				sdev_printk(KERN_ERR, sdev,
2343 					    "Illegal state transition %s->%s\n",
2344 					    scsi_device_state_name(oldstate),
2345 					    scsi_device_state_name(state))
2346 				);
2347 	return -EINVAL;
2348 }
2349 EXPORT_SYMBOL(scsi_device_set_state);
2350 
2351 /**
2352  * 	sdev_evt_emit - emit a single SCSI device uevent
2353  *	@sdev: associated SCSI device
2354  *	@evt: event to emit
2355  *
2356  *	Send a single uevent (scsi_event) to the associated scsi_device.
2357  */
scsi_evt_emit(struct scsi_device * sdev,struct scsi_event * evt)2358 static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
2359 {
2360 	int idx = 0;
2361 	char *envp[3];
2362 
2363 	switch (evt->evt_type) {
2364 	case SDEV_EVT_MEDIA_CHANGE:
2365 		envp[idx++] = "SDEV_MEDIA_CHANGE=1";
2366 		break;
2367 
2368 	default:
2369 		/* do nothing */
2370 		break;
2371 	}
2372 
2373 	envp[idx++] = NULL;
2374 
2375 	kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp);
2376 }
2377 
2378 /**
2379  * 	sdev_evt_thread - send a uevent for each scsi event
2380  *	@work: work struct for scsi_device
2381  *
2382  *	Dispatch queued events to their associated scsi_device kobjects
2383  *	as uevents.
2384  */
scsi_evt_thread(struct work_struct * work)2385 void scsi_evt_thread(struct work_struct *work)
2386 {
2387 	struct scsi_device *sdev;
2388 	LIST_HEAD(event_list);
2389 
2390 	sdev = container_of(work, struct scsi_device, event_work);
2391 
2392 	while (1) {
2393 		struct scsi_event *evt;
2394 		struct list_head *this, *tmp;
2395 		unsigned long flags;
2396 
2397 		spin_lock_irqsave(&sdev->list_lock, flags);
2398 		list_splice_init(&sdev->event_list, &event_list);
2399 		spin_unlock_irqrestore(&sdev->list_lock, flags);
2400 
2401 		if (list_empty(&event_list))
2402 			break;
2403 
2404 		list_for_each_safe(this, tmp, &event_list) {
2405 			evt = list_entry(this, struct scsi_event, node);
2406 			list_del(&evt->node);
2407 			scsi_evt_emit(sdev, evt);
2408 			kfree(evt);
2409 		}
2410 	}
2411 }
2412 
2413 /**
2414  * 	sdev_evt_send - send asserted event to uevent thread
2415  *	@sdev: scsi_device event occurred on
2416  *	@evt: event to send
2417  *
2418  *	Assert scsi device event asynchronously.
2419  */
sdev_evt_send(struct scsi_device * sdev,struct scsi_event * evt)2420 void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt)
2421 {
2422 	unsigned long flags;
2423 
2424 #if 0
2425 	/* FIXME: currently this check eliminates all media change events
2426 	 * for polled devices.  Need to update to discriminate between AN
2427 	 * and polled events */
2428 	if (!test_bit(evt->evt_type, sdev->supported_events)) {
2429 		kfree(evt);
2430 		return;
2431 	}
2432 #endif
2433 
2434 	spin_lock_irqsave(&sdev->list_lock, flags);
2435 	list_add_tail(&evt->node, &sdev->event_list);
2436 	schedule_work(&sdev->event_work);
2437 	spin_unlock_irqrestore(&sdev->list_lock, flags);
2438 }
2439 EXPORT_SYMBOL_GPL(sdev_evt_send);
2440 
2441 /**
2442  * 	sdev_evt_alloc - allocate a new scsi event
2443  *	@evt_type: type of event to allocate
2444  *	@gfpflags: GFP flags for allocation
2445  *
2446  *	Allocates and returns a new scsi_event.
2447  */
sdev_evt_alloc(enum scsi_device_event evt_type,gfp_t gfpflags)2448 struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
2449 				  gfp_t gfpflags)
2450 {
2451 	struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags);
2452 	if (!evt)
2453 		return NULL;
2454 
2455 	evt->evt_type = evt_type;
2456 	INIT_LIST_HEAD(&evt->node);
2457 
2458 	/* evt_type-specific initialization, if any */
2459 	switch (evt_type) {
2460 	case SDEV_EVT_MEDIA_CHANGE:
2461 	default:
2462 		/* do nothing */
2463 		break;
2464 	}
2465 
2466 	return evt;
2467 }
2468 EXPORT_SYMBOL_GPL(sdev_evt_alloc);
2469 
2470 /**
2471  * 	sdev_evt_send_simple - send asserted event to uevent thread
2472  *	@sdev: scsi_device event occurred on
2473  *	@evt_type: type of event to send
2474  *	@gfpflags: GFP flags for allocation
2475  *
2476  *	Assert scsi device event asynchronously, given an event type.
2477  */
sdev_evt_send_simple(struct scsi_device * sdev,enum scsi_device_event evt_type,gfp_t gfpflags)2478 void sdev_evt_send_simple(struct scsi_device *sdev,
2479 			  enum scsi_device_event evt_type, gfp_t gfpflags)
2480 {
2481 	struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags);
2482 	if (!evt) {
2483 		sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n",
2484 			    evt_type);
2485 		return;
2486 	}
2487 
2488 	sdev_evt_send(sdev, evt);
2489 }
2490 EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
2491 
2492 /**
2493  *	scsi_device_quiesce - Block user issued commands.
2494  *	@sdev:	scsi device to quiesce.
2495  *
2496  *	This works by trying to transition to the SDEV_QUIESCE state
2497  *	(which must be a legal transition).  When the device is in this
2498  *	state, only special requests will be accepted, all others will
2499  *	be deferred.  Since special requests may also be requeued requests,
2500  *	a successful return doesn't guarantee the device will be
2501  *	totally quiescent.
2502  *
2503  *	Must be called with user context, may sleep.
2504  *
2505  *	Returns zero if unsuccessful or an error if not.
2506  */
2507 int
scsi_device_quiesce(struct scsi_device * sdev)2508 scsi_device_quiesce(struct scsi_device *sdev)
2509 {
2510 	int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
2511 	if (err)
2512 		return err;
2513 
2514 	scsi_run_queue(sdev->request_queue);
2515 	while (sdev->device_busy) {
2516 		msleep_interruptible(200);
2517 		scsi_run_queue(sdev->request_queue);
2518 	}
2519 	return 0;
2520 }
2521 EXPORT_SYMBOL(scsi_device_quiesce);
2522 
2523 /**
2524  *	scsi_device_resume - Restart user issued commands to a quiesced device.
2525  *	@sdev:	scsi device to resume.
2526  *
2527  *	Moves the device from quiesced back to running and restarts the
2528  *	queues.
2529  *
2530  *	Must be called with user context, may sleep.
2531  */
2532 void
scsi_device_resume(struct scsi_device * sdev)2533 scsi_device_resume(struct scsi_device *sdev)
2534 {
2535 	if(scsi_device_set_state(sdev, SDEV_RUNNING))
2536 		return;
2537 	scsi_run_queue(sdev->request_queue);
2538 }
2539 EXPORT_SYMBOL(scsi_device_resume);
2540 
2541 static void
device_quiesce_fn(struct scsi_device * sdev,void * data)2542 device_quiesce_fn(struct scsi_device *sdev, void *data)
2543 {
2544 	scsi_device_quiesce(sdev);
2545 }
2546 
2547 void
scsi_target_quiesce(struct scsi_target * starget)2548 scsi_target_quiesce(struct scsi_target *starget)
2549 {
2550 	starget_for_each_device(starget, NULL, device_quiesce_fn);
2551 }
2552 EXPORT_SYMBOL(scsi_target_quiesce);
2553 
2554 static void
device_resume_fn(struct scsi_device * sdev,void * data)2555 device_resume_fn(struct scsi_device *sdev, void *data)
2556 {
2557 	scsi_device_resume(sdev);
2558 }
2559 
2560 void
scsi_target_resume(struct scsi_target * starget)2561 scsi_target_resume(struct scsi_target *starget)
2562 {
2563 	starget_for_each_device(starget, NULL, device_resume_fn);
2564 }
2565 EXPORT_SYMBOL(scsi_target_resume);
2566 
2567 /**
2568  * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state
2569  * @sdev:	device to block
2570  *
2571  * Block request made by scsi lld's to temporarily stop all
2572  * scsi commands on the specified device.  Called from interrupt
2573  * or normal process context.
2574  *
2575  * Returns zero if successful or error if not
2576  *
2577  * Notes:
2578  *	This routine transitions the device to the SDEV_BLOCK state
2579  *	(which must be a legal transition).  When the device is in this
2580  *	state, all commands are deferred until the scsi lld reenables
2581  *	the device with scsi_device_unblock or device_block_tmo fires.
2582  *	This routine assumes the host_lock is held on entry.
2583  */
2584 int
scsi_internal_device_block(struct scsi_device * sdev)2585 scsi_internal_device_block(struct scsi_device *sdev)
2586 {
2587 	struct request_queue *q = sdev->request_queue;
2588 	unsigned long flags;
2589 	int err = 0;
2590 
2591 	err = scsi_device_set_state(sdev, SDEV_BLOCK);
2592 	if (err) {
2593 		err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);
2594 
2595 		if (err)
2596 			return err;
2597 	}
2598 
2599 	/*
2600 	 * The device has transitioned to SDEV_BLOCK.  Stop the
2601 	 * block layer from calling the midlayer with this device's
2602 	 * request queue.
2603 	 */
2604 	spin_lock_irqsave(q->queue_lock, flags);
2605 	blk_stop_queue(q);
2606 	spin_unlock_irqrestore(q->queue_lock, flags);
2607 
2608 	return 0;
2609 }
2610 EXPORT_SYMBOL_GPL(scsi_internal_device_block);
2611 
2612 /**
2613  * scsi_internal_device_unblock - resume a device after a block request
2614  * @sdev:	device to resume
2615  *
2616  * Called by scsi lld's or the midlayer to restart the device queue
2617  * for the previously suspended scsi device.  Called from interrupt or
2618  * normal process context.
2619  *
2620  * Returns zero if successful or error if not.
2621  *
2622  * Notes:
2623  *	This routine transitions the device to the SDEV_RUNNING state
2624  *	(which must be a legal transition) allowing the midlayer to
2625  *	goose the queue for this device.  This routine assumes the
2626  *	host_lock is held upon entry.
2627  */
2628 int
scsi_internal_device_unblock(struct scsi_device * sdev)2629 scsi_internal_device_unblock(struct scsi_device *sdev)
2630 {
2631 	struct request_queue *q = sdev->request_queue;
2632 	int err;
2633 	unsigned long flags;
2634 
2635 	/*
2636 	 * Try to transition the scsi device to SDEV_RUNNING
2637 	 * and goose the device queue if successful.
2638 	 */
2639 	err = scsi_device_set_state(sdev, SDEV_RUNNING);
2640 	if (err) {
2641 		err = scsi_device_set_state(sdev, SDEV_CREATED);
2642 
2643 		if (err)
2644 			return err;
2645 	}
2646 
2647 	spin_lock_irqsave(q->queue_lock, flags);
2648 	blk_start_queue(q);
2649 	spin_unlock_irqrestore(q->queue_lock, flags);
2650 
2651 	return 0;
2652 }
2653 EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
2654 
2655 static void
device_block(struct scsi_device * sdev,void * data)2656 device_block(struct scsi_device *sdev, void *data)
2657 {
2658 	scsi_internal_device_block(sdev);
2659 }
2660 
2661 static int
target_block(struct device * dev,void * data)2662 target_block(struct device *dev, void *data)
2663 {
2664 	if (scsi_is_target_device(dev))
2665 		starget_for_each_device(to_scsi_target(dev), NULL,
2666 					device_block);
2667 	return 0;
2668 }
2669 
2670 void
scsi_target_block(struct device * dev)2671 scsi_target_block(struct device *dev)
2672 {
2673 	if (scsi_is_target_device(dev))
2674 		starget_for_each_device(to_scsi_target(dev), NULL,
2675 					device_block);
2676 	else
2677 		device_for_each_child(dev, NULL, target_block);
2678 }
2679 EXPORT_SYMBOL_GPL(scsi_target_block);
2680 
2681 static void
device_unblock(struct scsi_device * sdev,void * data)2682 device_unblock(struct scsi_device *sdev, void *data)
2683 {
2684 	scsi_internal_device_unblock(sdev);
2685 }
2686 
2687 static int
target_unblock(struct device * dev,void * data)2688 target_unblock(struct device *dev, void *data)
2689 {
2690 	if (scsi_is_target_device(dev))
2691 		starget_for_each_device(to_scsi_target(dev), NULL,
2692 					device_unblock);
2693 	return 0;
2694 }
2695 
2696 void
scsi_target_unblock(struct device * dev)2697 scsi_target_unblock(struct device *dev)
2698 {
2699 	if (scsi_is_target_device(dev))
2700 		starget_for_each_device(to_scsi_target(dev), NULL,
2701 					device_unblock);
2702 	else
2703 		device_for_each_child(dev, NULL, target_unblock);
2704 }
2705 EXPORT_SYMBOL_GPL(scsi_target_unblock);
2706 
2707 /**
2708  * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt
2709  * @sgl:	scatter-gather list
2710  * @sg_count:	number of segments in sg
2711  * @offset:	offset in bytes into sg, on return offset into the mapped area
2712  * @len:	bytes to map, on return number of bytes mapped
2713  *
2714  * Returns virtual address of the start of the mapped page
2715  */
scsi_kmap_atomic_sg(struct scatterlist * sgl,int sg_count,size_t * offset,size_t * len)2716 void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
2717 			  size_t *offset, size_t *len)
2718 {
2719 	int i;
2720 	size_t sg_len = 0, len_complete = 0;
2721 	struct scatterlist *sg;
2722 	struct page *page;
2723 
2724 	WARN_ON(!irqs_disabled());
2725 
2726 	for_each_sg(sgl, sg, sg_count, i) {
2727 		len_complete = sg_len; /* Complete sg-entries */
2728 		sg_len += sg->length;
2729 		if (sg_len > *offset)
2730 			break;
2731 	}
2732 
2733 	if (unlikely(i == sg_count)) {
2734 		printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, "
2735 			"elements %d\n",
2736 		       __func__, sg_len, *offset, sg_count);
2737 		WARN_ON(1);
2738 		return NULL;
2739 	}
2740 
2741 	/* Offset starting from the beginning of first page in this sg-entry */
2742 	*offset = *offset - len_complete + sg->offset;
2743 
2744 	/* Assumption: contiguous pages can be accessed as "page + i" */
2745 	page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT));
2746 	*offset &= ~PAGE_MASK;
2747 
2748 	/* Bytes in this sg-entry from *offset to the end of the page */
2749 	sg_len = PAGE_SIZE - *offset;
2750 	if (*len > sg_len)
2751 		*len = sg_len;
2752 
2753 	return kmap_atomic(page, KM_BIO_SRC_IRQ);
2754 }
2755 EXPORT_SYMBOL(scsi_kmap_atomic_sg);
2756 
2757 /**
2758  * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg
2759  * @virt:	virtual address to be unmapped
2760  */
scsi_kunmap_atomic_sg(void * virt)2761 void scsi_kunmap_atomic_sg(void *virt)
2762 {
2763 	kunmap_atomic(virt, KM_BIO_SRC_IRQ);
2764 }
2765 EXPORT_SYMBOL(scsi_kunmap_atomic_sg);
2766