• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  /* SPDX-License-Identifier: GPL-2.0 */
2  #ifndef _SCSI_SCSI_HOST_H
3  #define _SCSI_SCSI_HOST_H
4  
5  #include <linux/device.h>
6  #include <linux/list.h>
7  #include <linux/types.h>
8  #include <linux/workqueue.h>
9  #include <linux/mutex.h>
10  #include <linux/seq_file.h>
11  #include <linux/blk-mq.h>
12  #include <scsi/scsi.h>
13  #include <linux/android_kabi.h>
14  
15  struct block_device;
16  struct completion;
17  struct module;
18  struct scsi_cmnd;
19  struct scsi_device;
20  struct scsi_target;
21  struct Scsi_Host;
22  struct scsi_transport_template;
23  
24  
25  #define SG_ALL	SG_CHUNK_SIZE
26  
27  #define MODE_UNKNOWN 0x00
28  #define MODE_INITIATOR 0x01
29  #define MODE_TARGET 0x02
30  
31  /**
32   * enum scsi_timeout_action - How to handle a command that timed out.
33   * @SCSI_EH_DONE: The command has already been completed.
34   * @SCSI_EH_RESET_TIMER: Reset the timer and continue waiting for completion.
35   * @SCSI_EH_NOT_HANDLED: The command has not yet finished. Abort the command.
36   */
37  enum scsi_timeout_action {
38  	SCSI_EH_DONE,
39  	SCSI_EH_RESET_TIMER,
40  	SCSI_EH_NOT_HANDLED,
41  };
42  
43  struct scsi_host_template {
44  	/*
45  	 * Put fields referenced in IO submission path together in
46  	 * same cacheline
47  	 */
48  
49  	/*
50  	 * Additional per-command data allocated for the driver.
51  	 */
52  	unsigned int cmd_size;
53  
54  	/*
55  	 * The queuecommand function is used to queue up a scsi
56  	 * command block to the LLDD.  When the driver finished
57  	 * processing the command the done callback is invoked.
58  	 *
59  	 * If queuecommand returns 0, then the driver has accepted the
60  	 * command.  It must also push it to the HBA if the scsi_cmnd
61  	 * flag SCMD_LAST is set, or if the driver does not implement
62  	 * commit_rqs.  The done() function must be called on the command
63  	 * when the driver has finished with it. (you may call done on the
64  	 * command before queuecommand returns, but in this case you
65  	 * *must* return 0 from queuecommand).
66  	 *
67  	 * Queuecommand may also reject the command, in which case it may
68  	 * not touch the command and must not call done() for it.
69  	 *
70  	 * There are two possible rejection returns:
71  	 *
72  	 *   SCSI_MLQUEUE_DEVICE_BUSY: Block this device temporarily, but
73  	 *   allow commands to other devices serviced by this host.
74  	 *
75  	 *   SCSI_MLQUEUE_HOST_BUSY: Block all devices served by this
76  	 *   host temporarily.
77  	 *
78           * For compatibility, any other non-zero return is treated the
79           * same as SCSI_MLQUEUE_HOST_BUSY.
80  	 *
81  	 * NOTE: "temporarily" means either until the next command for#
82  	 * this device/host completes, or a period of time determined by
83  	 * I/O pressure in the system if there are no other outstanding
84  	 * commands.
85  	 *
86  	 * STATUS: REQUIRED
87  	 */
88  	int (* queuecommand)(struct Scsi_Host *, struct scsi_cmnd *);
89  
90  	/*
91  	 * The commit_rqs function is used to trigger a hardware
92  	 * doorbell after some requests have been queued with
93  	 * queuecommand, when an error is encountered before sending
94  	 * the request with SCMD_LAST set.
95  	 *
96  	 * STATUS: OPTIONAL
97  	 */
98  	void (*commit_rqs)(struct Scsi_Host *, u16);
99  
100  	struct module *module;
101  	const char *name;
102  
103  	/*
104  	 * The info function will return whatever useful information the
105  	 * developer sees fit.  If not provided, then the name field will
106  	 * be used instead.
107  	 *
108  	 * Status: OPTIONAL
109  	 */
110  	const char *(*info)(struct Scsi_Host *);
111  
112  	/*
113  	 * Ioctl interface
114  	 *
115  	 * Status: OPTIONAL
116  	 */
117  	int (*ioctl)(struct scsi_device *dev, unsigned int cmd,
118  		     void __user *arg);
119  
120  
121  #ifdef CONFIG_COMPAT
122  	/*
123  	 * Compat handler. Handle 32bit ABI.
124  	 * When unknown ioctl is passed return -ENOIOCTLCMD.
125  	 *
126  	 * Status: OPTIONAL
127  	 */
128  	int (*compat_ioctl)(struct scsi_device *dev, unsigned int cmd,
129  			    void __user *arg);
130  #endif
131  
132  	int (*init_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd);
133  	int (*exit_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd);
134  
135  	/*
136  	 * This is an error handling strategy routine.  You don't need to
137  	 * define one of these if you don't want to - there is a default
138  	 * routine that is present that should work in most cases.  For those
139  	 * driver authors that have the inclination and ability to write their
140  	 * own strategy routine, this is where it is specified.  Note - the
141  	 * strategy routine is *ALWAYS* run in the context of the kernel eh
142  	 * thread.  Thus you are guaranteed to *NOT* be in an interrupt
143  	 * handler when you execute this, and you are also guaranteed to
144  	 * *NOT* have any other commands being queued while you are in the
145  	 * strategy routine. When you return from this function, operations
146  	 * return to normal.
147  	 *
148  	 * See scsi_error.c scsi_unjam_host for additional comments about
149  	 * what this function should and should not be attempting to do.
150  	 *
151  	 * Status: REQUIRED	(at least one of them)
152  	 */
153  	int (* eh_abort_handler)(struct scsi_cmnd *);
154  	int (* eh_device_reset_handler)(struct scsi_cmnd *);
155  	int (* eh_target_reset_handler)(struct scsi_cmnd *);
156  	int (* eh_bus_reset_handler)(struct scsi_cmnd *);
157  	int (* eh_host_reset_handler)(struct scsi_cmnd *);
158  
159  	/*
160  	 * Before the mid layer attempts to scan for a new device where none
161  	 * currently exists, it will call this entry in your driver.  Should
162  	 * your driver need to allocate any structs or perform any other init
163  	 * items in order to send commands to a currently unused target/lun
164  	 * combo, then this is where you can perform those allocations.  This
165  	 * is specifically so that drivers won't have to perform any kind of
166  	 * "is this a new device" checks in their queuecommand routine,
167  	 * thereby making the hot path a bit quicker.
168  	 *
169  	 * Return values: 0 on success, non-0 on failure
170  	 *
171  	 * Deallocation:  If we didn't find any devices at this ID, you will
172  	 * get an immediate call to slave_destroy().  If we find something
173  	 * here then you will get a call to slave_configure(), then the
174  	 * device will be used for however long it is kept around, then when
175  	 * the device is removed from the system (or * possibly at reboot
176  	 * time), you will then get a call to slave_destroy().  This is
177  	 * assuming you implement slave_configure and slave_destroy.
178  	 * However, if you allocate memory and hang it off the device struct,
179  	 * then you must implement the slave_destroy() routine at a minimum
180  	 * in order to avoid leaking memory
181  	 * each time a device is tore down.
182  	 *
183  	 * Status: OPTIONAL
184  	 */
185  	int (* slave_alloc)(struct scsi_device *);
186  
187  	/*
188  	 * Once the device has responded to an INQUIRY and we know the
189  	 * device is online, we call into the low level driver with the
190  	 * struct scsi_device *.  If the low level device driver implements
191  	 * this function, it *must* perform the task of setting the queue
192  	 * depth on the device.  All other tasks are optional and depend
193  	 * on what the driver supports and various implementation details.
194  	 *
195  	 * Things currently recommended to be handled at this time include:
196  	 *
197  	 * 1.  Setting the device queue depth.  Proper setting of this is
198  	 *     described in the comments for scsi_change_queue_depth.
199  	 * 2.  Determining if the device supports the various synchronous
200  	 *     negotiation protocols.  The device struct will already have
201  	 *     responded to INQUIRY and the results of the standard items
202  	 *     will have been shoved into the various device flag bits, eg.
203  	 *     device->sdtr will be true if the device supports SDTR messages.
204  	 * 3.  Allocating command structs that the device will need.
205  	 * 4.  Setting the default timeout on this device (if needed).
206  	 * 5.  Anything else the low level driver might want to do on a device
207  	 *     specific setup basis...
208  	 * 6.  Return 0 on success, non-0 on error.  The device will be marked
209  	 *     as offline on error so that no access will occur.  If you return
210  	 *     non-0, your slave_destroy routine will never get called for this
211  	 *     device, so don't leave any loose memory hanging around, clean
212  	 *     up after yourself before returning non-0
213  	 *
214  	 * Status: OPTIONAL
215  	 */
216  	int (* slave_configure)(struct scsi_device *);
217  
218  	/*
219  	 * Immediately prior to deallocating the device and after all activity
220  	 * has ceased the mid layer calls this point so that the low level
221  	 * driver may completely detach itself from the scsi device and vice
222  	 * versa.  The low level driver is responsible for freeing any memory
223  	 * it allocated in the slave_alloc or slave_configure calls.
224  	 *
225  	 * Status: OPTIONAL
226  	 */
227  	void (* slave_destroy)(struct scsi_device *);
228  
229  	/*
230  	 * Before the mid layer attempts to scan for a new device attached
231  	 * to a target where no target currently exists, it will call this
232  	 * entry in your driver.  Should your driver need to allocate any
233  	 * structs or perform any other init items in order to send commands
234  	 * to a currently unused target, then this is where you can perform
235  	 * those allocations.
236  	 *
237  	 * Return values: 0 on success, non-0 on failure
238  	 *
239  	 * Status: OPTIONAL
240  	 */
241  	int (* target_alloc)(struct scsi_target *);
242  
243  	/*
244  	 * Immediately prior to deallocating the target structure, and
245  	 * after all activity to attached scsi devices has ceased, the
246  	 * midlayer calls this point so that the driver may deallocate
247  	 * and terminate any references to the target.
248  	 *
249  	 * Status: OPTIONAL
250  	 */
251  	void (* target_destroy)(struct scsi_target *);
252  
253  	/*
254  	 * If a host has the ability to discover targets on its own instead
255  	 * of scanning the entire bus, it can fill in this function and
256  	 * call scsi_scan_host().  This function will be called periodically
257  	 * until it returns 1 with the scsi_host and the elapsed time of
258  	 * the scan in jiffies.
259  	 *
260  	 * Status: OPTIONAL
261  	 */
262  	int (* scan_finished)(struct Scsi_Host *, unsigned long);
263  
264  	/*
265  	 * If the host wants to be called before the scan starts, but
266  	 * after the midlayer has set up ready for the scan, it can fill
267  	 * in this function.
268  	 *
269  	 * Status: OPTIONAL
270  	 */
271  	void (* scan_start)(struct Scsi_Host *);
272  
273  	/*
274  	 * Fill in this function to allow the queue depth of this host
275  	 * to be changeable (on a per device basis).  Returns either
276  	 * the current queue depth setting (may be different from what
277  	 * was passed in) or an error.  An error should only be
278  	 * returned if the requested depth is legal but the driver was
279  	 * unable to set it.  If the requested depth is illegal, the
280  	 * driver should set and return the closest legal queue depth.
281  	 *
282  	 * Status: OPTIONAL
283  	 */
284  	int (* change_queue_depth)(struct scsi_device *, int);
285  
286  	/*
287  	 * This functions lets the driver expose the queue mapping
288  	 * to the block layer.
289  	 *
290  	 * Status: OPTIONAL
291  	 */
292  	void (* map_queues)(struct Scsi_Host *shost);
293  
294  	/*
295  	 * SCSI interface of blk_poll - poll for IO completions.
296  	 * Only applicable if SCSI LLD exposes multiple h/w queues.
297  	 *
298  	 * Return value: Number of completed entries found.
299  	 *
300  	 * Status: OPTIONAL
301  	 */
302  	int (* mq_poll)(struct Scsi_Host *shost, unsigned int queue_num);
303  
304  	/*
305  	 * Check if scatterlists need to be padded for DMA draining.
306  	 *
307  	 * Status: OPTIONAL
308  	 */
309  	bool (* dma_need_drain)(struct request *rq);
310  
311  	/*
312  	 * This function determines the BIOS parameters for a given
313  	 * harddisk.  These tend to be numbers that are made up by
314  	 * the host adapter.  Parameters:
315  	 * size, device, list (heads, sectors, cylinders)
316  	 *
317  	 * Status: OPTIONAL
318  	 */
319  	int (* bios_param)(struct scsi_device *, struct block_device *,
320  			sector_t, int []);
321  
322  	/*
323  	 * This function is called when one or more partitions on the
324  	 * device reach beyond the end of the device.
325  	 *
326  	 * Status: OPTIONAL
327  	 */
328  	void (*unlock_native_capacity)(struct scsi_device *);
329  
330  	/*
331  	 * Can be used to export driver statistics and other infos to the
332  	 * world outside the kernel ie. userspace and it also provides an
333  	 * interface to feed the driver with information.
334  	 *
335  	 * Status: OBSOLETE
336  	 */
337  	int (*show_info)(struct seq_file *, struct Scsi_Host *);
338  	int (*write_info)(struct Scsi_Host *, char *, int);
339  
340  	/*
341  	 * This is an optional routine that allows the transport to become
342  	 * involved when a scsi io timer fires. The return value tells the
343  	 * timer routine how to finish the io timeout handling.
344  	 *
345  	 * Status: OPTIONAL
346  	 */
347  	enum scsi_timeout_action (*eh_timed_out)(struct scsi_cmnd *);
348  	/*
349  	 * Optional routine that allows the transport to decide if a cmd
350  	 * is retryable. Return true if the transport is in a state the
351  	 * cmd should be retried on.
352  	 */
353  	bool (*eh_should_retry_cmd)(struct scsi_cmnd *scmd);
354  
355  	/* This is an optional routine that allows transport to initiate
356  	 * LLD adapter or firmware reset using sysfs attribute.
357  	 *
358  	 * Return values: 0 on success, -ve value on failure.
359  	 *
360  	 * Status: OPTIONAL
361  	 */
362  
363  	int (*host_reset)(struct Scsi_Host *shost, int reset_type);
364  #define SCSI_ADAPTER_RESET	1
365  #define SCSI_FIRMWARE_RESET	2
366  
367  
368  	/*
369  	 * Name of proc directory
370  	 */
371  	const char *proc_name;
372  
373  	/*
374  	 * This determines if we will use a non-interrupt driven
375  	 * or an interrupt driven scheme.  It is set to the maximum number
376  	 * of simultaneous commands a single hw queue in HBA will accept.
377  	 */
378  	int can_queue;
379  
380  	/*
381  	 * In many instances, especially where disconnect / reconnect are
382  	 * supported, our host also has an ID on the SCSI bus.  If this is
383  	 * the case, then it must be reserved.  Please set this_id to -1 if
384  	 * your setup is in single initiator mode, and the host lacks an
385  	 * ID.
386  	 */
387  	int this_id;
388  
389  	/*
390  	 * This determines the degree to which the host adapter is capable
391  	 * of scatter-gather.
392  	 */
393  	unsigned short sg_tablesize;
394  	unsigned short sg_prot_tablesize;
395  
396  	/*
397  	 * Set this if the host adapter has limitations beside segment count.
398  	 */
399  	unsigned int max_sectors;
400  
401  	/*
402  	 * Maximum size in bytes of a single segment.
403  	 */
404  	unsigned int max_segment_size;
405  
406  	/*
407  	 * DMA scatter gather segment boundary limit. A segment crossing this
408  	 * boundary will be split in two.
409  	 */
410  	unsigned long dma_boundary;
411  
412  	unsigned long virt_boundary_mask;
413  
414  	/*
415  	 * This specifies "machine infinity" for host templates which don't
416  	 * limit the transfer size.  Note this limit represents an absolute
417  	 * maximum, and may be over the transfer limits allowed for
418  	 * individual devices (e.g. 256 for SCSI-1).
419  	 */
420  #define SCSI_DEFAULT_MAX_SECTORS	1024
421  
422  	/*
423  	 * True if this host adapter can make good use of linked commands.
424  	 * This will allow more than one command to be queued to a given
425  	 * unit on a given host.  Set this to the maximum number of command
426  	 * blocks to be provided for each device.  Set this to 1 for one
427  	 * command block per lun, 2 for two, etc.  Do not set this to 0.
428  	 * You should make sure that the host adapter will do the right thing
429  	 * before you try setting this above 1.
430  	 */
431  	short cmd_per_lun;
432  
433  	/* If use block layer to manage tags, this is tag allocation policy */
434  	int tag_alloc_policy;
435  
436  	/*
437  	 * Track QUEUE_FULL events and reduce queue depth on demand.
438  	 */
439  	unsigned track_queue_depth:1;
440  
441  	/*
442  	 * This specifies the mode that a LLD supports.
443  	 */
444  	unsigned supported_mode:2;
445  
446  	/*
447  	 * True for emulated SCSI host adapters (e.g. ATAPI).
448  	 */
449  	unsigned emulated:1;
450  
451  	/*
452  	 * True if the low-level driver performs its own reset-settle delays.
453  	 */
454  	unsigned skip_settle_delay:1;
455  
456  	/* True if the controller does not support WRITE SAME */
457  	unsigned no_write_same:1;
458  
459  	/* True if the host uses host-wide tagspace */
460  	unsigned host_tagset:1;
461  
462  	/* The queuecommand callback may block. See also BLK_MQ_F_BLOCKING. */
463  	unsigned queuecommand_may_block:1;
464  
465  	/*
466  	 * Countdown for host blocking with no commands outstanding.
467  	 */
468  	unsigned int max_host_blocked;
469  
470  	/*
471  	 * Default value for the blocking.  If the queue is empty,
472  	 * host_blocked counts down in the request_fn until it restarts
473  	 * host operations as zero is reached.
474  	 *
475  	 * FIXME: This should probably be a value in the template
476  	 */
477  #define SCSI_DEFAULT_HOST_BLOCKED	7
478  
479  	/*
480  	 * Pointer to the SCSI host sysfs attribute groups, NULL terminated.
481  	 */
482  	const struct attribute_group **shost_groups;
483  
484  	/*
485  	 * Pointer to the SCSI device attribute groups for this host,
486  	 * NULL terminated.
487  	 */
488  	const struct attribute_group **sdev_groups;
489  
490  	/*
491  	 * Vendor Identifier associated with the host
492  	 *
493  	 * Note: When specifying vendor_id, be sure to read the
494  	 *   Vendor Type and ID formatting requirements specified in
495  	 *   scsi_netlink.h
496  	 */
497  	u64 vendor_id;
498  
499  	ANDROID_OEM_DATA(1);
500  	ANDROID_KABI_RESERVE(1);
501  	ANDROID_KABI_RESERVE(2);
502  	ANDROID_KABI_RESERVE(3);
503  	ANDROID_KABI_RESERVE(4);
504  };
505  
506  /*
507   * Temporary #define for host lock push down. Can be removed when all
508   * drivers have been updated to take advantage of unlocked
509   * queuecommand.
510   *
511   */
512  #define DEF_SCSI_QCMD(func_name) \
513  	int func_name(struct Scsi_Host *shost, struct scsi_cmnd *cmd)	\
514  	{								\
515  		unsigned long irq_flags;				\
516  		int rc;							\
517  		spin_lock_irqsave(shost->host_lock, irq_flags);		\
518  		rc = func_name##_lck(cmd);				\
519  		spin_unlock_irqrestore(shost->host_lock, irq_flags);	\
520  		return rc;						\
521  	}
522  
523  
524  /*
525   * shost state: If you alter this, you also need to alter scsi_sysfs.c
526   * (for the ascii descriptions) and the state model enforcer:
527   * scsi_host_set_state()
528   */
529  enum scsi_host_state {
530  	SHOST_CREATED = 1,
531  	SHOST_RUNNING,
532  	SHOST_CANCEL,
533  	SHOST_DEL,
534  	SHOST_RECOVERY,
535  	SHOST_CANCEL_RECOVERY,
536  	SHOST_DEL_RECOVERY,
537  };
538  
539  struct Scsi_Host {
540  	/*
541  	 * __devices is protected by the host_lock, but you should
542  	 * usually use scsi_device_lookup / shost_for_each_device
543  	 * to access it and don't care about locking yourself.
544  	 * In the rare case of being in irq context you can use
545  	 * their __ prefixed variants with the lock held. NEVER
546  	 * access this list directly from a driver.
547  	 */
548  	struct list_head	__devices;
549  	struct list_head	__targets;
550  
551  	struct list_head	starved_list;
552  
553  	spinlock_t		default_lock;
554  	spinlock_t		*host_lock;
555  
556  	struct mutex		scan_mutex;/* serialize scanning activity */
557  
558  	struct list_head	eh_abort_list;
559  	struct list_head	eh_cmd_q;
560  	struct task_struct    * ehandler;  /* Error recovery thread. */
561  	struct completion     * eh_action; /* Wait for specific actions on the
562  					      host. */
563  	wait_queue_head_t       host_wait;
564  	const struct scsi_host_template *hostt;
565  	struct scsi_transport_template *transportt;
566  
567  	struct kref		tagset_refcnt;
568  	struct completion	tagset_freed;
569  	/* Area to keep a shared tag map */
570  	struct blk_mq_tag_set	tag_set;
571  
572  	atomic_t host_blocked;
573  
574  	unsigned int host_failed;	   /* commands that failed.
575  					      protected by host_lock */
576  	unsigned int host_eh_scheduled;    /* EH scheduled without command */
577  
578  	unsigned int host_no;  /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */
579  
580  	/* next two fields are used to bound the time spent in error handling */
581  	int eh_deadline;
582  	unsigned long last_reset;
583  
584  
585  	/*
586  	 * These three parameters can be used to allow for wide scsi,
587  	 * and for host adapters that support multiple busses
588  	 * The last two should be set to 1 more than the actual max id
589  	 * or lun (e.g. 8 for SCSI parallel systems).
590  	 */
591  	unsigned int max_channel;
592  	unsigned int max_id;
593  	u64 max_lun;
594  
595  	/*
596  	 * This is a unique identifier that must be assigned so that we
597  	 * have some way of identifying each detected host adapter properly
598  	 * and uniquely.  For hosts that do not support more than one card
599  	 * in the system at one time, this does not need to be set.  It is
600  	 * initialized to 0 in scsi_register.
601  	 */
602  	unsigned int unique_id;
603  
604  	/*
605  	 * The maximum length of SCSI commands that this host can accept.
606  	 * Probably 12 for most host adapters, but could be 16 for others.
607  	 * or 260 if the driver supports variable length cdbs.
608  	 * For drivers that don't set this field, a value of 12 is
609  	 * assumed.
610  	 */
611  	unsigned short max_cmd_len;
612  
613  	int this_id;
614  	int can_queue;
615  	short cmd_per_lun;
616  	short unsigned int sg_tablesize;
617  	short unsigned int sg_prot_tablesize;
618  	unsigned int max_sectors;
619  	unsigned int opt_sectors;
620  	unsigned int max_segment_size;
621  	unsigned long dma_boundary;
622  	unsigned long virt_boundary_mask;
623  	/*
624  	 * In scsi-mq mode, the number of hardware queues supported by the LLD.
625  	 *
626  	 * Note: it is assumed that each hardware queue has a queue depth of
627  	 * can_queue. In other words, the total queue depth per host
628  	 * is nr_hw_queues * can_queue. However, for when host_tagset is set,
629  	 * the total queue depth is can_queue.
630  	 */
631  	unsigned nr_hw_queues;
632  	unsigned nr_maps;
633  	unsigned active_mode:2;
634  
635  	/*
636  	 * Host has requested that no further requests come through for the
637  	 * time being.
638  	 */
639  	unsigned host_self_blocked:1;
640  
641  	/*
642  	 * Host uses correct SCSI ordering not PC ordering. The bit is
643  	 * set for the minority of drivers whose authors actually read
644  	 * the spec ;).
645  	 */
646  	unsigned reverse_ordering:1;
647  
648  	/* Task mgmt function in progress */
649  	unsigned tmf_in_progress:1;
650  
651  	/* Asynchronous scan in progress */
652  	unsigned async_scan:1;
653  
654  	/* Don't resume host in EH */
655  	unsigned eh_noresume:1;
656  
657  	/* The controller does not support WRITE SAME */
658  	unsigned no_write_same:1;
659  
660  	/* True if the host uses host-wide tagspace */
661  	unsigned host_tagset:1;
662  
663  	/* The queuecommand callback may block. See also BLK_MQ_F_BLOCKING. */
664  	unsigned queuecommand_may_block:1;
665  
666  	/* Host responded with short (<36 bytes) INQUIRY result */
667  	unsigned short_inquiry:1;
668  
669  	/* The transport requires the LUN bits NOT to be stored in CDB[1] */
670  	unsigned no_scsi2_lun_in_cdb:1;
671  
672  	/*
673  	 * Optional work queue to be utilized by the transport
674  	 */
675  	char work_q_name[20];
676  	struct workqueue_struct *work_q;
677  
678  	/*
679  	 * Task management function work queue
680  	 */
681  	struct workqueue_struct *tmf_work_q;
682  
683  	/*
684  	 * Value host_blocked counts down from
685  	 */
686  	unsigned int max_host_blocked;
687  
688  	/* Protection Information */
689  	unsigned int prot_capabilities;
690  	unsigned char prot_guard_type;
691  
692  	/* legacy crap */
693  	unsigned long base;
694  	unsigned long io_port;
695  	unsigned char n_io_port;
696  	unsigned char dma_channel;
697  	unsigned int  irq;
698  
699  
700  	enum scsi_host_state shost_state;
701  
702  	/* ldm bits */
703  	struct device		shost_gendev, shost_dev;
704  
705  	/*
706  	 * Points to the transport data (if any) which is allocated
707  	 * separately
708  	 */
709  	void *shost_data;
710  
711  	/*
712  	 * Points to the physical bus device we'd use to do DMA
713  	 * Needed just in case we have virtual hosts.
714  	 */
715  	struct device *dma_dev;
716  
717  	/* Delay for runtime autosuspend */
718  	int rpm_autosuspend_delay;
719  
720  	ANDROID_KABI_RESERVE(1);
721  
722  	/*
723  	 * We should ensure that this is aligned, both for better performance
724  	 * and also because some compilers (m68k) don't automatically force
725  	 * alignment to a long boundary.
726  	 */
727  	unsigned long hostdata[]  /* Used for storage of host specific stuff */
728  		__attribute__ ((aligned (sizeof(unsigned long))));
729  };
730  
731  #define		class_to_shost(d)	\
732  	container_of(d, struct Scsi_Host, shost_dev)
733  
734  #define shost_printk(prefix, shost, fmt, a...)	\
735  	dev_printk(prefix, &(shost)->shost_gendev, fmt, ##a)
736  
shost_priv(struct Scsi_Host * shost)737  static inline void *shost_priv(struct Scsi_Host *shost)
738  {
739  	return (void *)shost->hostdata;
740  }
741  
742  int scsi_is_host_device(const struct device *);
743  
dev_to_shost(struct device * dev)744  static inline struct Scsi_Host *dev_to_shost(struct device *dev)
745  {
746  	while (!scsi_is_host_device(dev)) {
747  		if (!dev->parent)
748  			return NULL;
749  		dev = dev->parent;
750  	}
751  	return container_of(dev, struct Scsi_Host, shost_gendev);
752  }
753  
scsi_host_in_recovery(struct Scsi_Host * shost)754  static inline int scsi_host_in_recovery(struct Scsi_Host *shost)
755  {
756  	return shost->shost_state == SHOST_RECOVERY ||
757  		shost->shost_state == SHOST_CANCEL_RECOVERY ||
758  		shost->shost_state == SHOST_DEL_RECOVERY ||
759  		shost->tmf_in_progress;
760  }
761  
762  extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *);
763  extern void scsi_flush_work(struct Scsi_Host *);
764  
765  extern struct Scsi_Host *scsi_host_alloc(const struct scsi_host_template *, int);
766  extern int __must_check scsi_add_host_with_dma(struct Scsi_Host *,
767  					       struct device *,
768  					       struct device *);
769  #if defined(CONFIG_SCSI_PROC_FS)
770  struct proc_dir_entry *
771  scsi_template_proc_dir(const struct scsi_host_template *sht);
772  #else
773  #define scsi_template_proc_dir(sht) NULL
774  #endif
775  extern void scsi_scan_host(struct Scsi_Host *);
776  extern int scsi_resume_device(struct scsi_device *sdev);
777  extern int scsi_rescan_device(struct scsi_device *sdev);
778  extern void scsi_remove_host(struct Scsi_Host *);
779  extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
780  extern int scsi_host_busy(struct Scsi_Host *shost);
781  extern void scsi_host_put(struct Scsi_Host *t);
782  extern struct Scsi_Host *scsi_host_lookup(unsigned int hostnum);
783  extern const char *scsi_host_state_name(enum scsi_host_state);
784  extern void scsi_host_complete_all_commands(struct Scsi_Host *shost,
785  					    enum scsi_host_status status);
786  
scsi_add_host(struct Scsi_Host * host,struct device * dev)787  static inline int __must_check scsi_add_host(struct Scsi_Host *host,
788  					     struct device *dev)
789  {
790  	return scsi_add_host_with_dma(host, dev, dev);
791  }
792  
scsi_get_device(struct Scsi_Host * shost)793  static inline struct device *scsi_get_device(struct Scsi_Host *shost)
794  {
795          return shost->shost_gendev.parent;
796  }
797  
798  /**
799   * scsi_host_scan_allowed - Is scanning of this host allowed
800   * @shost:	Pointer to Scsi_Host.
801   **/
scsi_host_scan_allowed(struct Scsi_Host * shost)802  static inline int scsi_host_scan_allowed(struct Scsi_Host *shost)
803  {
804  	return shost->shost_state == SHOST_RUNNING ||
805  	       shost->shost_state == SHOST_RECOVERY;
806  }
807  
808  extern void scsi_unblock_requests(struct Scsi_Host *);
809  extern void scsi_block_requests(struct Scsi_Host *);
810  extern int scsi_host_block(struct Scsi_Host *shost);
811  extern int scsi_host_unblock(struct Scsi_Host *shost, int new_state);
812  
813  void scsi_host_busy_iter(struct Scsi_Host *,
814  			 bool (*fn)(struct scsi_cmnd *, void *), void *priv);
815  
816  struct class_container;
817  
818  /*
819   * DIF defines the exchange of protection information between
820   * initiator and SBC block device.
821   *
822   * DIX defines the exchange of protection information between OS and
823   * initiator.
824   */
825  enum scsi_host_prot_capabilities {
826  	SHOST_DIF_TYPE1_PROTECTION = 1 << 0, /* T10 DIF Type 1 */
827  	SHOST_DIF_TYPE2_PROTECTION = 1 << 1, /* T10 DIF Type 2 */
828  	SHOST_DIF_TYPE3_PROTECTION = 1 << 2, /* T10 DIF Type 3 */
829  
830  	SHOST_DIX_TYPE0_PROTECTION = 1 << 3, /* DIX between OS and HBA only */
831  	SHOST_DIX_TYPE1_PROTECTION = 1 << 4, /* DIX with DIF Type 1 */
832  	SHOST_DIX_TYPE2_PROTECTION = 1 << 5, /* DIX with DIF Type 2 */
833  	SHOST_DIX_TYPE3_PROTECTION = 1 << 6, /* DIX with DIF Type 3 */
834  };
835  
836  /*
837   * SCSI hosts which support the Data Integrity Extensions must
838   * indicate their capabilities by setting the prot_capabilities using
839   * this call.
840   */
scsi_host_set_prot(struct Scsi_Host * shost,unsigned int mask)841  static inline void scsi_host_set_prot(struct Scsi_Host *shost, unsigned int mask)
842  {
843  	shost->prot_capabilities = mask;
844  }
845  
scsi_host_get_prot(struct Scsi_Host * shost)846  static inline unsigned int scsi_host_get_prot(struct Scsi_Host *shost)
847  {
848  	return shost->prot_capabilities;
849  }
850  
scsi_host_prot_dma(struct Scsi_Host * shost)851  static inline int scsi_host_prot_dma(struct Scsi_Host *shost)
852  {
853  	return shost->prot_capabilities >= SHOST_DIX_TYPE0_PROTECTION;
854  }
855  
scsi_host_dif_capable(struct Scsi_Host * shost,unsigned int target_type)856  static inline unsigned int scsi_host_dif_capable(struct Scsi_Host *shost, unsigned int target_type)
857  {
858  	static unsigned char cap[] = { 0,
859  				       SHOST_DIF_TYPE1_PROTECTION,
860  				       SHOST_DIF_TYPE2_PROTECTION,
861  				       SHOST_DIF_TYPE3_PROTECTION };
862  
863  	if (target_type >= ARRAY_SIZE(cap))
864  		return 0;
865  
866  	return shost->prot_capabilities & cap[target_type] ? target_type : 0;
867  }
868  
scsi_host_dix_capable(struct Scsi_Host * shost,unsigned int target_type)869  static inline unsigned int scsi_host_dix_capable(struct Scsi_Host *shost, unsigned int target_type)
870  {
871  #if defined(CONFIG_BLK_DEV_INTEGRITY)
872  	static unsigned char cap[] = { SHOST_DIX_TYPE0_PROTECTION,
873  				       SHOST_DIX_TYPE1_PROTECTION,
874  				       SHOST_DIX_TYPE2_PROTECTION,
875  				       SHOST_DIX_TYPE3_PROTECTION };
876  
877  	if (target_type >= ARRAY_SIZE(cap))
878  		return 0;
879  
880  	return shost->prot_capabilities & cap[target_type];
881  #endif
882  	return 0;
883  }
884  
885  /*
886   * All DIX-capable initiators must support the T10-mandated CRC
887   * checksum.  Controllers can optionally implement the IP checksum
888   * scheme which has much lower impact on system performance.  Note
889   * that the main rationale for the checksum is to match integrity
890   * metadata with data.  Detecting bit errors are a job for ECC memory
891   * and buses.
892   */
893  
894  enum scsi_host_guard_type {
895  	SHOST_DIX_GUARD_CRC = 1 << 0,
896  	SHOST_DIX_GUARD_IP  = 1 << 1,
897  };
898  
scsi_host_set_guard(struct Scsi_Host * shost,unsigned char type)899  static inline void scsi_host_set_guard(struct Scsi_Host *shost, unsigned char type)
900  {
901  	shost->prot_guard_type = type;
902  }
903  
scsi_host_get_guard(struct Scsi_Host * shost)904  static inline unsigned char scsi_host_get_guard(struct Scsi_Host *shost)
905  {
906  	return shost->prot_guard_type;
907  }
908  
909  extern int scsi_host_set_state(struct Scsi_Host *, enum scsi_host_state);
910  
911  #endif /* _SCSI_SCSI_HOST_H */
912