• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  /*
2   *  hosts.c Copyright (C) 1992 Drew Eckhardt
3   *          Copyright (C) 1993, 1994, 1995 Eric Youngdale
4   *          Copyright (C) 2002-2003 Christoph Hellwig
5   *
6   *  mid to lowlevel SCSI driver interface
7   *      Initial versions: Drew Eckhardt
8   *      Subsequent revisions: Eric Youngdale
9   *
10   *  <drew@colorado.edu>
11   *
12   *  Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli
13   *  Added QLOGIC QLA1280 SCSI controller kernel host support.
14   *     August 4, 1999 Fred Lewis, Intel DuPont
15   *
16   *  Updated to reflect the new initialization scheme for the higher
17   *  level of scsi drivers (sd/sr/st)
18   *  September 17, 2000 Torben Mathiasen <tmm@image.dk>
19   *
20   *  Restructured scsi_host lists and associated functions.
21   *  September 04, 2002 Mike Anderson (andmike@us.ibm.com)
22   */
23  
24  #include <linux/module.h>
25  #include <linux/blkdev.h>
26  #include <linux/kernel.h>
27  #include <linux/slab.h>
28  #include <linux/kthread.h>
29  #include <linux/string.h>
30  #include <linux/mm.h>
31  #include <linux/init.h>
32  #include <linux/completion.h>
33  #include <linux/transport_class.h>
34  #include <linux/platform_device.h>
35  #include <linux/pm_runtime.h>
36  
37  #include <scsi/scsi_device.h>
38  #include <scsi/scsi_host.h>
39  #include <scsi/scsi_transport.h>
40  
41  #include "scsi_priv.h"
42  #include "scsi_logging.h"
43  
44  
45  static atomic_t scsi_host_next_hn = ATOMIC_INIT(0);	/* host_no for next new host */
46  
47  
scsi_host_cls_release(struct device * dev)48  static void scsi_host_cls_release(struct device *dev)
49  {
50  	put_device(&class_to_shost(dev)->shost_gendev);
51  }
52  
53  static struct class shost_class = {
54  	.name		= "scsi_host",
55  	.dev_release	= scsi_host_cls_release,
56  };
57  
58  /**
59   *	scsi_host_set_state - Take the given host through the host state model.
60   *	@shost:	scsi host to change the state of.
61   *	@state:	state to change to.
62   *
63   *	Returns zero if unsuccessful or an error if the requested
64   *	transition is illegal.
65   **/
scsi_host_set_state(struct Scsi_Host * shost,enum scsi_host_state state)66  int scsi_host_set_state(struct Scsi_Host *shost, enum scsi_host_state state)
67  {
68  	enum scsi_host_state oldstate = shost->shost_state;
69  
70  	if (state == oldstate)
71  		return 0;
72  
73  	switch (state) {
74  	case SHOST_CREATED:
75  		/* There are no legal states that come back to
76  		 * created.  This is the manually initialised start
77  		 * state */
78  		goto illegal;
79  
80  	case SHOST_RUNNING:
81  		switch (oldstate) {
82  		case SHOST_CREATED:
83  		case SHOST_RECOVERY:
84  			break;
85  		default:
86  			goto illegal;
87  		}
88  		break;
89  
90  	case SHOST_RECOVERY:
91  		switch (oldstate) {
92  		case SHOST_RUNNING:
93  			break;
94  		default:
95  			goto illegal;
96  		}
97  		break;
98  
99  	case SHOST_CANCEL:
100  		switch (oldstate) {
101  		case SHOST_CREATED:
102  		case SHOST_RUNNING:
103  		case SHOST_CANCEL_RECOVERY:
104  			break;
105  		default:
106  			goto illegal;
107  		}
108  		break;
109  
110  	case SHOST_DEL:
111  		switch (oldstate) {
112  		case SHOST_CANCEL:
113  		case SHOST_DEL_RECOVERY:
114  			break;
115  		default:
116  			goto illegal;
117  		}
118  		break;
119  
120  	case SHOST_CANCEL_RECOVERY:
121  		switch (oldstate) {
122  		case SHOST_CANCEL:
123  		case SHOST_RECOVERY:
124  			break;
125  		default:
126  			goto illegal;
127  		}
128  		break;
129  
130  	case SHOST_DEL_RECOVERY:
131  		switch (oldstate) {
132  		case SHOST_CANCEL_RECOVERY:
133  			break;
134  		default:
135  			goto illegal;
136  		}
137  		break;
138  	}
139  	shost->shost_state = state;
140  	return 0;
141  
142   illegal:
143  	SCSI_LOG_ERROR_RECOVERY(1,
144  				shost_printk(KERN_ERR, shost,
145  					     "Illegal host state transition"
146  					     "%s->%s\n",
147  					     scsi_host_state_name(oldstate),
148  					     scsi_host_state_name(state)));
149  	return -EINVAL;
150  }
151  EXPORT_SYMBOL(scsi_host_set_state);
152  
153  /**
154   * scsi_remove_host - remove a scsi host
155   * @shost:	a pointer to a scsi host to remove
156   **/
scsi_remove_host(struct Scsi_Host * shost)157  void scsi_remove_host(struct Scsi_Host *shost)
158  {
159  	unsigned long flags;
160  
161  	mutex_lock(&shost->scan_mutex);
162  	spin_lock_irqsave(shost->host_lock, flags);
163  	if (scsi_host_set_state(shost, SHOST_CANCEL))
164  		if (scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY)) {
165  			spin_unlock_irqrestore(shost->host_lock, flags);
166  			mutex_unlock(&shost->scan_mutex);
167  			return;
168  		}
169  	spin_unlock_irqrestore(shost->host_lock, flags);
170  
171  	scsi_autopm_get_host(shost);
172  	scsi_forget_host(shost);
173  	mutex_unlock(&shost->scan_mutex);
174  	scsi_proc_host_rm(shost);
175  
176  	spin_lock_irqsave(shost->host_lock, flags);
177  	if (scsi_host_set_state(shost, SHOST_DEL))
178  		BUG_ON(scsi_host_set_state(shost, SHOST_DEL_RECOVERY));
179  	spin_unlock_irqrestore(shost->host_lock, flags);
180  
181  	transport_unregister_device(&shost->shost_gendev);
182  	device_unregister(&shost->shost_dev);
183  	device_del(&shost->shost_gendev);
184  }
185  EXPORT_SYMBOL(scsi_remove_host);
186  
187  /**
188   * scsi_add_host_with_dma - add a scsi host with dma device
189   * @shost:	scsi host pointer to add
190   * @dev:	a struct device of type scsi class
191   * @dma_dev:	dma device for the host
192   *
193   * Note: You rarely need to worry about this unless you're in a
194   * virtualised host environments, so use the simpler scsi_add_host()
195   * function instead.
196   *
197   * Return value:
198   * 	0 on success / != 0 for error
199   **/
scsi_add_host_with_dma(struct Scsi_Host * shost,struct device * dev,struct device * dma_dev)200  int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
201  			   struct device *dma_dev)
202  {
203  	struct scsi_host_template *sht = shost->hostt;
204  	int error = -EINVAL;
205  
206  	printk(KERN_INFO "scsi%d : %s\n", shost->host_no,
207  			sht->info ? sht->info(shost) : sht->name);
208  
209  	if (!shost->can_queue) {
210  		printk(KERN_ERR "%s: can_queue = 0 no longer supported\n",
211  				sht->name);
212  		goto fail;
213  	}
214  
215  	error = scsi_setup_command_freelist(shost);
216  	if (error)
217  		goto fail;
218  
219  	if (!shost->shost_gendev.parent)
220  		shost->shost_gendev.parent = dev ? dev : &platform_bus;
221  	if (!dma_dev)
222  		dma_dev = shost->shost_gendev.parent;
223  
224  	shost->dma_dev = dma_dev;
225  
226  	error = device_add(&shost->shost_gendev);
227  	if (error)
228  		goto out;
229  
230  	pm_runtime_set_active(&shost->shost_gendev);
231  	pm_runtime_enable(&shost->shost_gendev);
232  	device_enable_async_suspend(&shost->shost_gendev);
233  
234  	scsi_host_set_state(shost, SHOST_RUNNING);
235  	get_device(shost->shost_gendev.parent);
236  
237  	device_enable_async_suspend(&shost->shost_dev);
238  
239  	error = device_add(&shost->shost_dev);
240  	if (error)
241  		goto out_del_gendev;
242  
243  	get_device(&shost->shost_gendev);
244  
245  	if (shost->transportt->host_size) {
246  		shost->shost_data = kzalloc(shost->transportt->host_size,
247  					 GFP_KERNEL);
248  		if (shost->shost_data == NULL) {
249  			error = -ENOMEM;
250  			goto out_del_dev;
251  		}
252  	}
253  
254  	if (shost->transportt->create_work_queue) {
255  		snprintf(shost->work_q_name, sizeof(shost->work_q_name),
256  			 "scsi_wq_%d", shost->host_no);
257  		shost->work_q = create_singlethread_workqueue(
258  					shost->work_q_name);
259  		if (!shost->work_q) {
260  			error = -EINVAL;
261  			goto out_free_shost_data;
262  		}
263  	}
264  
265  	error = scsi_sysfs_add_host(shost);
266  	if (error)
267  		goto out_destroy_host;
268  
269  	scsi_proc_host_add(shost);
270  	return error;
271  
272   out_destroy_host:
273  	if (shost->work_q)
274  		destroy_workqueue(shost->work_q);
275   out_free_shost_data:
276  	kfree(shost->shost_data);
277   out_del_dev:
278  	device_del(&shost->shost_dev);
279   out_del_gendev:
280  	device_del(&shost->shost_gendev);
281   out:
282  	scsi_destroy_command_freelist(shost);
283   fail:
284  	return error;
285  }
286  EXPORT_SYMBOL(scsi_add_host_with_dma);
287  
scsi_host_dev_release(struct device * dev)288  static void scsi_host_dev_release(struct device *dev)
289  {
290  	struct Scsi_Host *shost = dev_to_shost(dev);
291  	struct device *parent = dev->parent;
292  	struct request_queue *q;
293  	void *queuedata;
294  
295  	scsi_proc_hostdir_rm(shost->hostt);
296  
297  	if (shost->ehandler)
298  		kthread_stop(shost->ehandler);
299  	if (shost->work_q)
300  		destroy_workqueue(shost->work_q);
301  	q = shost->uspace_req_q;
302  	if (q) {
303  		queuedata = q->queuedata;
304  		blk_cleanup_queue(q);
305  		kfree(queuedata);
306  	}
307  
308  	scsi_destroy_command_freelist(shost);
309  	if (shost->bqt)
310  		blk_free_tags(shost->bqt);
311  
312  	kfree(shost->shost_data);
313  
314  	if (parent)
315  		put_device(parent);
316  	kfree(shost);
317  }
318  
319  static struct device_type scsi_host_type = {
320  	.name =		"scsi_host",
321  	.release =	scsi_host_dev_release,
322  };
323  
324  /**
325   * scsi_host_alloc - register a scsi host adapter instance.
326   * @sht:	pointer to scsi host template
327   * @privsize:	extra bytes to allocate for driver
328   *
329   * Note:
330   * 	Allocate a new Scsi_Host and perform basic initialization.
331   * 	The host is not published to the scsi midlayer until scsi_add_host
332   * 	is called.
333   *
334   * Return value:
335   * 	Pointer to a new Scsi_Host
336   **/
scsi_host_alloc(struct scsi_host_template * sht,int privsize)337  struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
338  {
339  	struct Scsi_Host *shost;
340  	gfp_t gfp_mask = GFP_KERNEL;
341  
342  	if (sht->unchecked_isa_dma && privsize)
343  		gfp_mask |= __GFP_DMA;
344  
345  	shost = kzalloc(sizeof(struct Scsi_Host) + privsize, gfp_mask);
346  	if (!shost)
347  		return NULL;
348  
349  	shost->host_lock = &shost->default_lock;
350  	spin_lock_init(shost->host_lock);
351  	shost->shost_state = SHOST_CREATED;
352  	INIT_LIST_HEAD(&shost->__devices);
353  	INIT_LIST_HEAD(&shost->__targets);
354  	INIT_LIST_HEAD(&shost->eh_cmd_q);
355  	INIT_LIST_HEAD(&shost->starved_list);
356  	init_waitqueue_head(&shost->host_wait);
357  
358  	mutex_init(&shost->scan_mutex);
359  
360  	/*
361  	 * subtract one because we increment first then return, but we need to
362  	 * know what the next host number was before increment
363  	 */
364  	shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
365  	shost->dma_channel = 0xff;
366  
367  	/* These three are default values which can be overridden */
368  	shost->max_channel = 0;
369  	shost->max_id = 8;
370  	shost->max_lun = 8;
371  
372  	/* Give each shost a default transportt */
373  	shost->transportt = &blank_transport_template;
374  
375  	/*
376  	 * All drivers right now should be able to handle 12 byte
377  	 * commands.  Every so often there are requests for 16 byte
378  	 * commands, but individual low-level drivers need to certify that
379  	 * they actually do something sensible with such commands.
380  	 */
381  	shost->max_cmd_len = 12;
382  	shost->hostt = sht;
383  	shost->this_id = sht->this_id;
384  	shost->can_queue = sht->can_queue;
385  	shost->sg_tablesize = sht->sg_tablesize;
386  	shost->sg_prot_tablesize = sht->sg_prot_tablesize;
387  	shost->cmd_per_lun = sht->cmd_per_lun;
388  	shost->unchecked_isa_dma = sht->unchecked_isa_dma;
389  	shost->use_clustering = sht->use_clustering;
390  	shost->ordered_tag = sht->ordered_tag;
391  
392  	if (sht->supported_mode == MODE_UNKNOWN)
393  		/* means we didn't set it ... default to INITIATOR */
394  		shost->active_mode = MODE_INITIATOR;
395  	else
396  		shost->active_mode = sht->supported_mode;
397  
398  	if (sht->max_host_blocked)
399  		shost->max_host_blocked = sht->max_host_blocked;
400  	else
401  		shost->max_host_blocked = SCSI_DEFAULT_HOST_BLOCKED;
402  
403  	/*
404  	 * If the driver imposes no hard sector transfer limit, start at
405  	 * machine infinity initially.
406  	 */
407  	if (sht->max_sectors)
408  		shost->max_sectors = sht->max_sectors;
409  	else
410  		shost->max_sectors = SCSI_DEFAULT_MAX_SECTORS;
411  
412  	/*
413  	 * assume a 4GB boundary, if not set
414  	 */
415  	if (sht->dma_boundary)
416  		shost->dma_boundary = sht->dma_boundary;
417  	else
418  		shost->dma_boundary = 0xffffffff;
419  
420  	device_initialize(&shost->shost_gendev);
421  	dev_set_name(&shost->shost_gendev, "host%d", shost->host_no);
422  	shost->shost_gendev.bus = &scsi_bus_type;
423  	shost->shost_gendev.type = &scsi_host_type;
424  
425  	device_initialize(&shost->shost_dev);
426  	shost->shost_dev.parent = &shost->shost_gendev;
427  	shost->shost_dev.class = &shost_class;
428  	dev_set_name(&shost->shost_dev, "host%d", shost->host_no);
429  	shost->shost_dev.groups = scsi_sysfs_shost_attr_groups;
430  
431  	shost->ehandler = kthread_run(scsi_error_handler, shost,
432  			"scsi_eh_%d", shost->host_no);
433  	if (IS_ERR(shost->ehandler)) {
434  		printk(KERN_WARNING "scsi%d: error handler thread failed to spawn, error = %ld\n",
435  			shost->host_no, PTR_ERR(shost->ehandler));
436  		goto fail_kfree;
437  	}
438  
439  	scsi_proc_hostdir_add(shost->hostt);
440  	return shost;
441  
442   fail_kfree:
443  	kfree(shost);
444  	return NULL;
445  }
446  EXPORT_SYMBOL(scsi_host_alloc);
447  
scsi_register(struct scsi_host_template * sht,int privsize)448  struct Scsi_Host *scsi_register(struct scsi_host_template *sht, int privsize)
449  {
450  	struct Scsi_Host *shost = scsi_host_alloc(sht, privsize);
451  
452  	if (!sht->detect) {
453  		printk(KERN_WARNING "scsi_register() called on new-style "
454  				    "template for driver %s\n", sht->name);
455  		dump_stack();
456  	}
457  
458  	if (shost)
459  		list_add_tail(&shost->sht_legacy_list, &sht->legacy_hosts);
460  	return shost;
461  }
462  EXPORT_SYMBOL(scsi_register);
463  
scsi_unregister(struct Scsi_Host * shost)464  void scsi_unregister(struct Scsi_Host *shost)
465  {
466  	list_del(&shost->sht_legacy_list);
467  	scsi_host_put(shost);
468  }
469  EXPORT_SYMBOL(scsi_unregister);
470  
__scsi_host_match(struct device * dev,const void * data)471  static int __scsi_host_match(struct device *dev, const void *data)
472  {
473  	struct Scsi_Host *p;
474  	const unsigned short *hostnum = data;
475  
476  	p = class_to_shost(dev);
477  	return p->host_no == *hostnum;
478  }
479  
480  /**
481   * scsi_host_lookup - get a reference to a Scsi_Host by host no
482   * @hostnum:	host number to locate
483   *
484   * Return value:
485   *	A pointer to located Scsi_Host or NULL.
486   *
487   *	The caller must do a scsi_host_put() to drop the reference
488   *	that scsi_host_get() took. The put_device() below dropped
489   *	the reference from class_find_device().
490   **/
scsi_host_lookup(unsigned short hostnum)491  struct Scsi_Host *scsi_host_lookup(unsigned short hostnum)
492  {
493  	struct device *cdev;
494  	struct Scsi_Host *shost = NULL;
495  
496  	cdev = class_find_device(&shost_class, NULL, &hostnum,
497  				 __scsi_host_match);
498  	if (cdev) {
499  		shost = scsi_host_get(class_to_shost(cdev));
500  		put_device(cdev);
501  	}
502  	return shost;
503  }
504  EXPORT_SYMBOL(scsi_host_lookup);
505  
506  /**
507   * scsi_host_get - inc a Scsi_Host ref count
508   * @shost:	Pointer to Scsi_Host to inc.
509   **/
scsi_host_get(struct Scsi_Host * shost)510  struct Scsi_Host *scsi_host_get(struct Scsi_Host *shost)
511  {
512  	if ((shost->shost_state == SHOST_DEL) ||
513  		!get_device(&shost->shost_gendev))
514  		return NULL;
515  	return shost;
516  }
517  EXPORT_SYMBOL(scsi_host_get);
518  
519  /**
520   * scsi_host_put - dec a Scsi_Host ref count
521   * @shost:	Pointer to Scsi_Host to dec.
522   **/
scsi_host_put(struct Scsi_Host * shost)523  void scsi_host_put(struct Scsi_Host *shost)
524  {
525  	put_device(&shost->shost_gendev);
526  }
527  EXPORT_SYMBOL(scsi_host_put);
528  
scsi_init_hosts(void)529  int scsi_init_hosts(void)
530  {
531  	return class_register(&shost_class);
532  }
533  
scsi_exit_hosts(void)534  void scsi_exit_hosts(void)
535  {
536  	class_unregister(&shost_class);
537  }
538  
scsi_is_host_device(const struct device * dev)539  int scsi_is_host_device(const struct device *dev)
540  {
541  	return dev->type == &scsi_host_type;
542  }
543  EXPORT_SYMBOL(scsi_is_host_device);
544  
545  /**
546   * scsi_queue_work - Queue work to the Scsi_Host workqueue.
547   * @shost:	Pointer to Scsi_Host.
548   * @work:	Work to queue for execution.
549   *
550   * Return value:
551   * 	1 - work queued for execution
552   *	0 - work is already queued
553   *	-EINVAL - work queue doesn't exist
554   **/
scsi_queue_work(struct Scsi_Host * shost,struct work_struct * work)555  int scsi_queue_work(struct Scsi_Host *shost, struct work_struct *work)
556  {
557  	if (unlikely(!shost->work_q)) {
558  		printk(KERN_ERR
559  			"ERROR: Scsi host '%s' attempted to queue scsi-work, "
560  			"when no workqueue created.\n", shost->hostt->name);
561  		dump_stack();
562  
563  		return -EINVAL;
564  	}
565  
566  	return queue_work(shost->work_q, work);
567  }
568  EXPORT_SYMBOL_GPL(scsi_queue_work);
569  
570  /**
571   * scsi_flush_work - Flush a Scsi_Host's workqueue.
572   * @shost:	Pointer to Scsi_Host.
573   **/
scsi_flush_work(struct Scsi_Host * shost)574  void scsi_flush_work(struct Scsi_Host *shost)
575  {
576  	if (!shost->work_q) {
577  		printk(KERN_ERR
578  			"ERROR: Scsi host '%s' attempted to flush scsi-work, "
579  			"when no workqueue created.\n", shost->hostt->name);
580  		dump_stack();
581  		return;
582  	}
583  
584  	flush_workqueue(shost->work_q);
585  }
586  EXPORT_SYMBOL_GPL(scsi_flush_work);
587