• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _SCSI_SCSI_HOST_H
3 #define _SCSI_SCSI_HOST_H
4 
5 #include <linux/device.h>
6 #include <linux/list.h>
7 #include <linux/types.h>
8 #include <linux/workqueue.h>
9 #include <linux/mutex.h>
10 #include <linux/seq_file.h>
11 #include <linux/blk-mq.h>
12 #include <scsi/scsi.h>
13 #include <linux/android_kabi.h>
14 
15 struct block_device;
16 struct completion;
17 struct module;
18 struct scsi_cmnd;
19 struct scsi_device;
20 struct scsi_host_cmd_pool;
21 struct scsi_target;
22 struct Scsi_Host;
23 struct scsi_host_cmd_pool;
24 struct scsi_transport_template;
25 
26 
27 /*
28  * The various choices mean:
29  * NONE: Self evident.	Host adapter is not capable of scatter-gather.
30  * ALL:	 Means that the host adapter module can do scatter-gather,
31  *	 and that there is no limit to the size of the table to which
32  *	 we scatter/gather data.  The value we set here is the maximum
33  *	 single element sglist.  To use chained sglists, the adapter
34  *	 has to set a value beyond ALL (and correctly use the chain
35  *	 handling API.
36  * Anything else:  Indicates the maximum number of chains that can be
37  *	 used in one scatter-gather request.
38  */
39 #define SG_NONE 0
40 #define SG_ALL	SG_CHUNK_SIZE
41 
42 #define MODE_UNKNOWN 0x00
43 #define MODE_INITIATOR 0x01
44 #define MODE_TARGET 0x02
45 
46 struct scsi_host_template {
47 	struct module *module;
48 	const char *name;
49 
50 	/*
51 	 * The info function will return whatever useful information the
52 	 * developer sees fit.  If not provided, then the name field will
53 	 * be used instead.
54 	 *
55 	 * Status: OPTIONAL
56 	 */
57 	const char *(* info)(struct Scsi_Host *);
58 
59 	/*
60 	 * Ioctl interface
61 	 *
62 	 * Status: OPTIONAL
63 	 */
64 	int (*ioctl)(struct scsi_device *dev, unsigned int cmd,
65 		     void __user *arg);
66 
67 
68 #ifdef CONFIG_COMPAT
69 	/*
70 	 * Compat handler. Handle 32bit ABI.
71 	 * When unknown ioctl is passed return -ENOIOCTLCMD.
72 	 *
73 	 * Status: OPTIONAL
74 	 */
75 	int (*compat_ioctl)(struct scsi_device *dev, unsigned int cmd,
76 			    void __user *arg);
77 #endif
78 
79 	/*
80 	 * The queuecommand function is used to queue up a scsi
81 	 * command block to the LLDD.  When the driver finished
82 	 * processing the command the done callback is invoked.
83 	 *
84 	 * If queuecommand returns 0, then the driver has accepted the
85 	 * command.  It must also push it to the HBA if the scsi_cmnd
86 	 * flag SCMD_LAST is set, or if the driver does not implement
87 	 * commit_rqs.  The done() function must be called on the command
88 	 * when the driver has finished with it. (you may call done on the
89 	 * command before queuecommand returns, but in this case you
90 	 * *must* return 0 from queuecommand).
91 	 *
92 	 * Queuecommand may also reject the command, in which case it may
93 	 * not touch the command and must not call done() for it.
94 	 *
95 	 * There are two possible rejection returns:
96 	 *
97 	 *   SCSI_MLQUEUE_DEVICE_BUSY: Block this device temporarily, but
98 	 *   allow commands to other devices serviced by this host.
99 	 *
100 	 *   SCSI_MLQUEUE_HOST_BUSY: Block all devices served by this
101 	 *   host temporarily.
102 	 *
103          * For compatibility, any other non-zero return is treated the
104          * same as SCSI_MLQUEUE_HOST_BUSY.
105 	 *
106 	 * NOTE: "temporarily" means either until the next command for#
107 	 * this device/host completes, or a period of time determined by
108 	 * I/O pressure in the system if there are no other outstanding
109 	 * commands.
110 	 *
111 	 * STATUS: REQUIRED
112 	 */
113 	int (* queuecommand)(struct Scsi_Host *, struct scsi_cmnd *);
114 
115 	/*
116 	 * The commit_rqs function is used to trigger a hardware
117 	 * doorbell after some requests have been queued with
118 	 * queuecommand, when an error is encountered before sending
119 	 * the request with SCMD_LAST set.
120 	 *
121 	 * STATUS: OPTIONAL
122 	 */
123 	void (*commit_rqs)(struct Scsi_Host *, u16);
124 
125 	/*
126 	 * This is an error handling strategy routine.  You don't need to
127 	 * define one of these if you don't want to - there is a default
128 	 * routine that is present that should work in most cases.  For those
129 	 * driver authors that have the inclination and ability to write their
130 	 * own strategy routine, this is where it is specified.  Note - the
131 	 * strategy routine is *ALWAYS* run in the context of the kernel eh
132 	 * thread.  Thus you are guaranteed to *NOT* be in an interrupt
133 	 * handler when you execute this, and you are also guaranteed to
134 	 * *NOT* have any other commands being queued while you are in the
135 	 * strategy routine. When you return from this function, operations
136 	 * return to normal.
137 	 *
138 	 * See scsi_error.c scsi_unjam_host for additional comments about
139 	 * what this function should and should not be attempting to do.
140 	 *
141 	 * Status: REQUIRED	(at least one of them)
142 	 */
143 	int (* eh_abort_handler)(struct scsi_cmnd *);
144 	int (* eh_device_reset_handler)(struct scsi_cmnd *);
145 	int (* eh_target_reset_handler)(struct scsi_cmnd *);
146 	int (* eh_bus_reset_handler)(struct scsi_cmnd *);
147 	int (* eh_host_reset_handler)(struct scsi_cmnd *);
148 
149 	/*
150 	 * Before the mid layer attempts to scan for a new device where none
151 	 * currently exists, it will call this entry in your driver.  Should
152 	 * your driver need to allocate any structs or perform any other init
153 	 * items in order to send commands to a currently unused target/lun
154 	 * combo, then this is where you can perform those allocations.  This
155 	 * is specifically so that drivers won't have to perform any kind of
156 	 * "is this a new device" checks in their queuecommand routine,
157 	 * thereby making the hot path a bit quicker.
158 	 *
159 	 * Return values: 0 on success, non-0 on failure
160 	 *
161 	 * Deallocation:  If we didn't find any devices at this ID, you will
162 	 * get an immediate call to slave_destroy().  If we find something
163 	 * here then you will get a call to slave_configure(), then the
164 	 * device will be used for however long it is kept around, then when
165 	 * the device is removed from the system (or * possibly at reboot
166 	 * time), you will then get a call to slave_destroy().  This is
167 	 * assuming you implement slave_configure and slave_destroy.
168 	 * However, if you allocate memory and hang it off the device struct,
169 	 * then you must implement the slave_destroy() routine at a minimum
170 	 * in order to avoid leaking memory
171 	 * each time a device is tore down.
172 	 *
173 	 * Status: OPTIONAL
174 	 */
175 	int (* slave_alloc)(struct scsi_device *);
176 
177 	/*
178 	 * Once the device has responded to an INQUIRY and we know the
179 	 * device is online, we call into the low level driver with the
180 	 * struct scsi_device *.  If the low level device driver implements
181 	 * this function, it *must* perform the task of setting the queue
182 	 * depth on the device.  All other tasks are optional and depend
183 	 * on what the driver supports and various implementation details.
184 	 *
185 	 * Things currently recommended to be handled at this time include:
186 	 *
187 	 * 1.  Setting the device queue depth.  Proper setting of this is
188 	 *     described in the comments for scsi_change_queue_depth.
189 	 * 2.  Determining if the device supports the various synchronous
190 	 *     negotiation protocols.  The device struct will already have
191 	 *     responded to INQUIRY and the results of the standard items
192 	 *     will have been shoved into the various device flag bits, eg.
193 	 *     device->sdtr will be true if the device supports SDTR messages.
194 	 * 3.  Allocating command structs that the device will need.
195 	 * 4.  Setting the default timeout on this device (if needed).
196 	 * 5.  Anything else the low level driver might want to do on a device
197 	 *     specific setup basis...
198 	 * 6.  Return 0 on success, non-0 on error.  The device will be marked
199 	 *     as offline on error so that no access will occur.  If you return
200 	 *     non-0, your slave_destroy routine will never get called for this
201 	 *     device, so don't leave any loose memory hanging around, clean
202 	 *     up after yourself before returning non-0
203 	 *
204 	 * Status: OPTIONAL
205 	 */
206 	int (* slave_configure)(struct scsi_device *);
207 
208 	/*
209 	 * Immediately prior to deallocating the device and after all activity
210 	 * has ceased the mid layer calls this point so that the low level
211 	 * driver may completely detach itself from the scsi device and vice
212 	 * versa.  The low level driver is responsible for freeing any memory
213 	 * it allocated in the slave_alloc or slave_configure calls.
214 	 *
215 	 * Status: OPTIONAL
216 	 */
217 	void (* slave_destroy)(struct scsi_device *);
218 
219 	/*
220 	 * Before the mid layer attempts to scan for a new device attached
221 	 * to a target where no target currently exists, it will call this
222 	 * entry in your driver.  Should your driver need to allocate any
223 	 * structs or perform any other init items in order to send commands
224 	 * to a currently unused target, then this is where you can perform
225 	 * those allocations.
226 	 *
227 	 * Return values: 0 on success, non-0 on failure
228 	 *
229 	 * Status: OPTIONAL
230 	 */
231 	int (* target_alloc)(struct scsi_target *);
232 
233 	/*
234 	 * Immediately prior to deallocating the target structure, and
235 	 * after all activity to attached scsi devices has ceased, the
236 	 * midlayer calls this point so that the driver may deallocate
237 	 * and terminate any references to the target.
238 	 *
239 	 * Status: OPTIONAL
240 	 */
241 	void (* target_destroy)(struct scsi_target *);
242 
243 	/*
244 	 * If a host has the ability to discover targets on its own instead
245 	 * of scanning the entire bus, it can fill in this function and
246 	 * call scsi_scan_host().  This function will be called periodically
247 	 * until it returns 1 with the scsi_host and the elapsed time of
248 	 * the scan in jiffies.
249 	 *
250 	 * Status: OPTIONAL
251 	 */
252 	int (* scan_finished)(struct Scsi_Host *, unsigned long);
253 
254 	/*
255 	 * If the host wants to be called before the scan starts, but
256 	 * after the midlayer has set up ready for the scan, it can fill
257 	 * in this function.
258 	 *
259 	 * Status: OPTIONAL
260 	 */
261 	void (* scan_start)(struct Scsi_Host *);
262 
263 	/*
264 	 * Fill in this function to allow the queue depth of this host
265 	 * to be changeable (on a per device basis).  Returns either
266 	 * the current queue depth setting (may be different from what
267 	 * was passed in) or an error.  An error should only be
268 	 * returned if the requested depth is legal but the driver was
269 	 * unable to set it.  If the requested depth is illegal, the
270 	 * driver should set and return the closest legal queue depth.
271 	 *
272 	 * Status: OPTIONAL
273 	 */
274 	int (* change_queue_depth)(struct scsi_device *, int);
275 
276 	/*
277 	 * This functions lets the driver expose the queue mapping
278 	 * to the block layer.
279 	 *
280 	 * Status: OPTIONAL
281 	 */
282 	int (* map_queues)(struct Scsi_Host *shost);
283 
284 	/*
285 	 * This function determines the BIOS parameters for a given
286 	 * harddisk.  These tend to be numbers that are made up by
287 	 * the host adapter.  Parameters:
288 	 * size, device, list (heads, sectors, cylinders)
289 	 *
290 	 * Status: OPTIONAL
291 	 */
292 	int (* bios_param)(struct scsi_device *, struct block_device *,
293 			sector_t, int []);
294 
295 	/*
296 	 * This function is called when one or more partitions on the
297 	 * device reach beyond the end of the device.
298 	 *
299 	 * Status: OPTIONAL
300 	 */
301 	void (*unlock_native_capacity)(struct scsi_device *);
302 
303 	/*
304 	 * Can be used to export driver statistics and other infos to the
305 	 * world outside the kernel ie. userspace and it also provides an
306 	 * interface to feed the driver with information.
307 	 *
308 	 * Status: OBSOLETE
309 	 */
310 	int (*show_info)(struct seq_file *, struct Scsi_Host *);
311 	int (*write_info)(struct Scsi_Host *, char *, int);
312 
313 	/*
314 	 * This is an optional routine that allows the transport to become
315 	 * involved when a scsi io timer fires. The return value tells the
316 	 * timer routine how to finish the io timeout handling.
317 	 *
318 	 * Status: OPTIONAL
319 	 */
320 	enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *);
321 
322 	/* This is an optional routine that allows transport to initiate
323 	 * LLD adapter or firmware reset using sysfs attribute.
324 	 *
325 	 * Return values: 0 on success, -ve value on failure.
326 	 *
327 	 * Status: OPTIONAL
328 	 */
329 
330 	int (*host_reset)(struct Scsi_Host *shost, int reset_type);
331 #define SCSI_ADAPTER_RESET	1
332 #define SCSI_FIRMWARE_RESET	2
333 
334 
335 	/*
336 	 * Name of proc directory
337 	 */
338 	const char *proc_name;
339 
340 	/*
341 	 * Used to store the procfs directory if a driver implements the
342 	 * show_info method.
343 	 */
344 	struct proc_dir_entry *proc_dir;
345 
346 	/*
347 	 * This determines if we will use a non-interrupt driven
348 	 * or an interrupt driven scheme.  It is set to the maximum number
349 	 * of simultaneous commands a given host adapter will accept.
350 	 */
351 	int can_queue;
352 
353 	/*
354 	 * In many instances, especially where disconnect / reconnect are
355 	 * supported, our host also has an ID on the SCSI bus.  If this is
356 	 * the case, then it must be reserved.  Please set this_id to -1 if
357 	 * your setup is in single initiator mode, and the host lacks an
358 	 * ID.
359 	 */
360 	int this_id;
361 
362 	/*
363 	 * This determines the degree to which the host adapter is capable
364 	 * of scatter-gather.
365 	 */
366 	unsigned short sg_tablesize;
367 	unsigned short sg_prot_tablesize;
368 
369 	/*
370 	 * Set this if the host adapter has limitations beside segment count.
371 	 */
372 	unsigned int max_sectors;
373 
374 	/*
375 	 * Maximum size in bytes of a single segment.
376 	 */
377 	unsigned int max_segment_size;
378 
379 	/*
380 	 * DMA scatter gather segment boundary limit. A segment crossing this
381 	 * boundary will be split in two.
382 	 */
383 	unsigned long dma_boundary;
384 
385 	unsigned long virt_boundary_mask;
386 
387 	/*
388 	 * This specifies "machine infinity" for host templates which don't
389 	 * limit the transfer size.  Note this limit represents an absolute
390 	 * maximum, and may be over the transfer limits allowed for
391 	 * individual devices (e.g. 256 for SCSI-1).
392 	 */
393 #define SCSI_DEFAULT_MAX_SECTORS	1024
394 
395 	/*
396 	 * True if this host adapter can make good use of linked commands.
397 	 * This will allow more than one command to be queued to a given
398 	 * unit on a given host.  Set this to the maximum number of command
399 	 * blocks to be provided for each device.  Set this to 1 for one
400 	 * command block per lun, 2 for two, etc.  Do not set this to 0.
401 	 * You should make sure that the host adapter will do the right thing
402 	 * before you try setting this above 1.
403 	 */
404 	short cmd_per_lun;
405 
406 	/*
407 	 * present contains counter indicating how many boards of this
408 	 * type were found when we did the scan.
409 	 */
410 	unsigned char present;
411 
412 	/* If use block layer to manage tags, this is tag allocation policy */
413 	int tag_alloc_policy;
414 
415 	/*
416 	 * Track QUEUE_FULL events and reduce queue depth on demand.
417 	 */
418 	unsigned track_queue_depth:1;
419 
420 	/*
421 	 * This specifies the mode that a LLD supports.
422 	 */
423 	unsigned supported_mode:2;
424 
425 	/*
426 	 * True if this host adapter uses unchecked DMA onto an ISA bus.
427 	 */
428 	unsigned unchecked_isa_dma:1;
429 
430 	/*
431 	 * True for emulated SCSI host adapters (e.g. ATAPI).
432 	 */
433 	unsigned emulated:1;
434 
435 	/*
436 	 * True if the low-level driver performs its own reset-settle delays.
437 	 */
438 	unsigned skip_settle_delay:1;
439 
440 	/* True if the controller does not support WRITE SAME */
441 	unsigned no_write_same:1;
442 
443 	/* True if the low-level driver supports blk-mq only */
444 	unsigned force_blk_mq:1;
445 
446 	/*
447 	 * Countdown for host blocking with no commands outstanding.
448 	 */
449 	unsigned int max_host_blocked;
450 
451 	/*
452 	 * Default value for the blocking.  If the queue is empty,
453 	 * host_blocked counts down in the request_fn until it restarts
454 	 * host operations as zero is reached.
455 	 *
456 	 * FIXME: This should probably be a value in the template
457 	 */
458 #define SCSI_DEFAULT_HOST_BLOCKED	7
459 
460 	/*
461 	 * Pointer to the sysfs class properties for this host, NULL terminated.
462 	 */
463 	struct device_attribute **shost_attrs;
464 
465 	/*
466 	 * Pointer to the SCSI device properties for this host, NULL terminated.
467 	 */
468 	struct device_attribute **sdev_attrs;
469 
470 	/*
471 	 * Pointer to the SCSI device attribute groups for this host,
472 	 * NULL terminated.
473 	 */
474 	const struct attribute_group **sdev_groups;
475 
476 	/*
477 	 * Vendor Identifier associated with the host
478 	 *
479 	 * Note: When specifying vendor_id, be sure to read the
480 	 *   Vendor Type and ID formatting requirements specified in
481 	 *   scsi_netlink.h
482 	 */
483 	u64 vendor_id;
484 
485 	/*
486 	 * Additional per-command data allocated for the driver.
487 	 */
488 	unsigned int cmd_size;
489 	struct scsi_host_cmd_pool *cmd_pool;
490 
491 	/* Delay for runtime autosuspend */
492 	int rpm_autosuspend_delay;
493 
494 	ANDROID_KABI_RESERVE(1);
495 	ANDROID_KABI_RESERVE(2);
496 	ANDROID_KABI_RESERVE(3);
497 	ANDROID_KABI_RESERVE(4);
498 };
499 
500 /*
501  * Temporary #define for host lock push down. Can be removed when all
502  * drivers have been updated to take advantage of unlocked
503  * queuecommand.
504  *
505  */
506 #define DEF_SCSI_QCMD(func_name) \
507 	int func_name(struct Scsi_Host *shost, struct scsi_cmnd *cmd)	\
508 	{								\
509 		unsigned long irq_flags;				\
510 		int rc;							\
511 		spin_lock_irqsave(shost->host_lock, irq_flags);		\
512 		rc = func_name##_lck (cmd, cmd->scsi_done);			\
513 		spin_unlock_irqrestore(shost->host_lock, irq_flags);	\
514 		return rc;						\
515 	}
516 
517 
518 /*
519  * shost state: If you alter this, you also need to alter scsi_sysfs.c
520  * (for the ascii descriptions) and the state model enforcer:
521  * scsi_host_set_state()
522  */
523 enum scsi_host_state {
524 	SHOST_CREATED = 1,
525 	SHOST_RUNNING,
526 	SHOST_CANCEL,
527 	SHOST_DEL,
528 	SHOST_RECOVERY,
529 	SHOST_CANCEL_RECOVERY,
530 	SHOST_DEL_RECOVERY,
531 };
532 
533 struct Scsi_Host {
534 	/*
535 	 * __devices is protected by the host_lock, but you should
536 	 * usually use scsi_device_lookup / shost_for_each_device
537 	 * to access it and don't care about locking yourself.
538 	 * In the rare case of being in irq context you can use
539 	 * their __ prefixed variants with the lock held. NEVER
540 	 * access this list directly from a driver.
541 	 */
542 	struct list_head	__devices;
543 	struct list_head	__targets;
544 
545 	struct list_head	starved_list;
546 
547 	spinlock_t		default_lock;
548 	spinlock_t		*host_lock;
549 
550 	struct mutex		scan_mutex;/* serialize scanning activity */
551 
552 	struct list_head	eh_cmd_q;
553 	struct task_struct    * ehandler;  /* Error recovery thread. */
554 	struct completion     * eh_action; /* Wait for specific actions on the
555 					      host. */
556 	wait_queue_head_t       host_wait;
557 	struct scsi_host_template *hostt;
558 	struct scsi_transport_template *transportt;
559 
560 	/* Area to keep a shared tag map */
561 	struct blk_mq_tag_set	tag_set;
562 
563 	atomic_t host_busy;		   /* commands actually active on low-level */
564 	atomic_t host_blocked;
565 
566 	unsigned int host_failed;	   /* commands that failed.
567 					      protected by host_lock */
568 	unsigned int host_eh_scheduled;    /* EH scheduled without command */
569 
570 	unsigned int host_no;  /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */
571 
572 	/* next two fields are used to bound the time spent in error handling */
573 	int eh_deadline;
574 	unsigned long last_reset;
575 
576 
577 	/*
578 	 * These three parameters can be used to allow for wide scsi,
579 	 * and for host adapters that support multiple busses
580 	 * The last two should be set to 1 more than the actual max id
581 	 * or lun (e.g. 8 for SCSI parallel systems).
582 	 */
583 	unsigned int max_channel;
584 	unsigned int max_id;
585 	u64 max_lun;
586 
587 	/*
588 	 * This is a unique identifier that must be assigned so that we
589 	 * have some way of identifying each detected host adapter properly
590 	 * and uniquely.  For hosts that do not support more than one card
591 	 * in the system at one time, this does not need to be set.  It is
592 	 * initialized to 0 in scsi_register.
593 	 */
594 	unsigned int unique_id;
595 
596 	/*
597 	 * The maximum length of SCSI commands that this host can accept.
598 	 * Probably 12 for most host adapters, but could be 16 for others.
599 	 * or 260 if the driver supports variable length cdbs.
600 	 * For drivers that don't set this field, a value of 12 is
601 	 * assumed.
602 	 */
603 	unsigned short max_cmd_len;
604 
605 	int this_id;
606 	int can_queue;
607 	short cmd_per_lun;
608 	short unsigned int sg_tablesize;
609 	short unsigned int sg_prot_tablesize;
610 	unsigned int max_sectors;
611 	unsigned int max_segment_size;
612 	unsigned long dma_boundary;
613 	unsigned long virt_boundary_mask;
614 	/*
615 	 * In scsi-mq mode, the number of hardware queues supported by the LLD.
616 	 *
617 	 * Note: it is assumed that each hardware queue has a queue depth of
618 	 * can_queue. In other words, the total queue depth per host
619 	 * is nr_hw_queues * can_queue.
620 	 */
621 	unsigned nr_hw_queues;
622 	unsigned active_mode:2;
623 	unsigned unchecked_isa_dma:1;
624 
625 	/*
626 	 * Host has requested that no further requests come through for the
627 	 * time being.
628 	 */
629 	unsigned host_self_blocked:1;
630 
631 	/*
632 	 * Host uses correct SCSI ordering not PC ordering. The bit is
633 	 * set for the minority of drivers whose authors actually read
634 	 * the spec ;).
635 	 */
636 	unsigned reverse_ordering:1;
637 
638 	/* Task mgmt function in progress */
639 	unsigned tmf_in_progress:1;
640 
641 	/* Asynchronous scan in progress */
642 	unsigned async_scan:1;
643 
644 	/* Don't resume host in EH */
645 	unsigned eh_noresume:1;
646 
647 	/* The controller does not support WRITE SAME */
648 	unsigned no_write_same:1;
649 
650 	unsigned use_cmd_list:1;
651 
652 	/* Host responded with short (<36 bytes) INQUIRY result */
653 	unsigned short_inquiry:1;
654 
655 	/* The transport requires the LUN bits NOT to be stored in CDB[1] */
656 	unsigned no_scsi2_lun_in_cdb:1;
657 
658 	/*
659 	 * Optional work queue to be utilized by the transport
660 	 */
661 	char work_q_name[20];
662 	struct workqueue_struct *work_q;
663 
664 	/*
665 	 * Task management function work queue
666 	 */
667 	struct workqueue_struct *tmf_work_q;
668 
669 	/*
670 	 * Value host_blocked counts down from
671 	 */
672 	unsigned int max_host_blocked;
673 
674 	/* Protection Information */
675 	unsigned int prot_capabilities;
676 	unsigned char prot_guard_type;
677 
678 	/* legacy crap */
679 	unsigned long base;
680 	unsigned long io_port;
681 	unsigned char n_io_port;
682 	unsigned char dma_channel;
683 	unsigned int  irq;
684 
685 
686 	enum scsi_host_state shost_state;
687 
688 	/* ldm bits */
689 	struct device		shost_gendev, shost_dev;
690 
691 	/*
692 	 * Points to the transport data (if any) which is allocated
693 	 * separately
694 	 */
695 	void *shost_data;
696 
697 	/*
698 	 * Points to the physical bus device we'd use to do DMA
699 	 * Needed just in case we have virtual hosts.
700 	 */
701 	struct device *dma_dev;
702 
703 	/*
704 	 * We should ensure that this is aligned, both for better performance
705 	 * and also because some compilers (m68k) don't automatically force
706 	 * alignment to a long boundary.
707 	 */
708 	unsigned long hostdata[0]  /* Used for storage of host specific stuff */
709 		__attribute__ ((aligned (sizeof(unsigned long))));
710 };
711 
712 #define		class_to_shost(d)	\
713 	container_of(d, struct Scsi_Host, shost_dev)
714 
715 #define shost_printk(prefix, shost, fmt, a...)	\
716 	dev_printk(prefix, &(shost)->shost_gendev, fmt, ##a)
717 
shost_priv(struct Scsi_Host * shost)718 static inline void *shost_priv(struct Scsi_Host *shost)
719 {
720 	return (void *)shost->hostdata;
721 }
722 
723 int scsi_is_host_device(const struct device *);
724 
dev_to_shost(struct device * dev)725 static inline struct Scsi_Host *dev_to_shost(struct device *dev)
726 {
727 	while (!scsi_is_host_device(dev)) {
728 		if (!dev->parent)
729 			return NULL;
730 		dev = dev->parent;
731 	}
732 	return container_of(dev, struct Scsi_Host, shost_gendev);
733 }
734 
scsi_host_in_recovery(struct Scsi_Host * shost)735 static inline int scsi_host_in_recovery(struct Scsi_Host *shost)
736 {
737 	return shost->shost_state == SHOST_RECOVERY ||
738 		shost->shost_state == SHOST_CANCEL_RECOVERY ||
739 		shost->shost_state == SHOST_DEL_RECOVERY ||
740 		shost->tmf_in_progress;
741 }
742 
743 extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *);
744 extern void scsi_flush_work(struct Scsi_Host *);
745 
746 extern struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *, int);
747 extern int __must_check scsi_add_host_with_dma(struct Scsi_Host *,
748 					       struct device *,
749 					       struct device *);
750 extern void scsi_scan_host(struct Scsi_Host *);
751 extern void scsi_rescan_device(struct device *);
752 extern void scsi_remove_host(struct Scsi_Host *);
753 extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
754 extern int scsi_host_busy(struct Scsi_Host *shost);
755 extern void scsi_host_put(struct Scsi_Host *t);
756 extern struct Scsi_Host *scsi_host_lookup(unsigned int hostnum);
757 extern const char *scsi_host_state_name(enum scsi_host_state);
758 
scsi_add_host(struct Scsi_Host * host,struct device * dev)759 static inline int __must_check scsi_add_host(struct Scsi_Host *host,
760 					     struct device *dev)
761 {
762 	return scsi_add_host_with_dma(host, dev, dev);
763 }
764 
scsi_get_device(struct Scsi_Host * shost)765 static inline struct device *scsi_get_device(struct Scsi_Host *shost)
766 {
767         return shost->shost_gendev.parent;
768 }
769 
770 /**
771  * scsi_host_scan_allowed - Is scanning of this host allowed
772  * @shost:	Pointer to Scsi_Host.
773  **/
scsi_host_scan_allowed(struct Scsi_Host * shost)774 static inline int scsi_host_scan_allowed(struct Scsi_Host *shost)
775 {
776 	return shost->shost_state == SHOST_RUNNING ||
777 	       shost->shost_state == SHOST_RECOVERY;
778 }
779 
780 extern void scsi_unblock_requests(struct Scsi_Host *);
781 extern void scsi_block_requests(struct Scsi_Host *);
782 
783 struct class_container;
784 
785 /*
786  * These two functions are used to allocate and free a pseudo device
787  * which will connect to the host adapter itself rather than any
788  * physical device.  You must deallocate when you are done with the
789  * thing.  This physical pseudo-device isn't real and won't be available
790  * from any high-level drivers.
791  */
792 extern void scsi_free_host_dev(struct scsi_device *);
793 extern struct scsi_device *scsi_get_host_dev(struct Scsi_Host *);
794 
795 /*
796  * DIF defines the exchange of protection information between
797  * initiator and SBC block device.
798  *
799  * DIX defines the exchange of protection information between OS and
800  * initiator.
801  */
802 enum scsi_host_prot_capabilities {
803 	SHOST_DIF_TYPE1_PROTECTION = 1 << 0, /* T10 DIF Type 1 */
804 	SHOST_DIF_TYPE2_PROTECTION = 1 << 1, /* T10 DIF Type 2 */
805 	SHOST_DIF_TYPE3_PROTECTION = 1 << 2, /* T10 DIF Type 3 */
806 
807 	SHOST_DIX_TYPE0_PROTECTION = 1 << 3, /* DIX between OS and HBA only */
808 	SHOST_DIX_TYPE1_PROTECTION = 1 << 4, /* DIX with DIF Type 1 */
809 	SHOST_DIX_TYPE2_PROTECTION = 1 << 5, /* DIX with DIF Type 2 */
810 	SHOST_DIX_TYPE3_PROTECTION = 1 << 6, /* DIX with DIF Type 3 */
811 };
812 
813 /*
814  * SCSI hosts which support the Data Integrity Extensions must
815  * indicate their capabilities by setting the prot_capabilities using
816  * this call.
817  */
scsi_host_set_prot(struct Scsi_Host * shost,unsigned int mask)818 static inline void scsi_host_set_prot(struct Scsi_Host *shost, unsigned int mask)
819 {
820 	shost->prot_capabilities = mask;
821 }
822 
scsi_host_get_prot(struct Scsi_Host * shost)823 static inline unsigned int scsi_host_get_prot(struct Scsi_Host *shost)
824 {
825 	return shost->prot_capabilities;
826 }
827 
scsi_host_prot_dma(struct Scsi_Host * shost)828 static inline int scsi_host_prot_dma(struct Scsi_Host *shost)
829 {
830 	return shost->prot_capabilities >= SHOST_DIX_TYPE0_PROTECTION;
831 }
832 
scsi_host_dif_capable(struct Scsi_Host * shost,unsigned int target_type)833 static inline unsigned int scsi_host_dif_capable(struct Scsi_Host *shost, unsigned int target_type)
834 {
835 	static unsigned char cap[] = { 0,
836 				       SHOST_DIF_TYPE1_PROTECTION,
837 				       SHOST_DIF_TYPE2_PROTECTION,
838 				       SHOST_DIF_TYPE3_PROTECTION };
839 
840 	if (target_type >= ARRAY_SIZE(cap))
841 		return 0;
842 
843 	return shost->prot_capabilities & cap[target_type] ? target_type : 0;
844 }
845 
scsi_host_dix_capable(struct Scsi_Host * shost,unsigned int target_type)846 static inline unsigned int scsi_host_dix_capable(struct Scsi_Host *shost, unsigned int target_type)
847 {
848 #if defined(CONFIG_BLK_DEV_INTEGRITY)
849 	static unsigned char cap[] = { SHOST_DIX_TYPE0_PROTECTION,
850 				       SHOST_DIX_TYPE1_PROTECTION,
851 				       SHOST_DIX_TYPE2_PROTECTION,
852 				       SHOST_DIX_TYPE3_PROTECTION };
853 
854 	if (target_type >= ARRAY_SIZE(cap))
855 		return 0;
856 
857 	return shost->prot_capabilities & cap[target_type];
858 #endif
859 	return 0;
860 }
861 
862 /*
863  * All DIX-capable initiators must support the T10-mandated CRC
864  * checksum.  Controllers can optionally implement the IP checksum
865  * scheme which has much lower impact on system performance.  Note
866  * that the main rationale for the checksum is to match integrity
867  * metadata with data.  Detecting bit errors are a job for ECC memory
868  * and buses.
869  */
870 
871 enum scsi_host_guard_type {
872 	SHOST_DIX_GUARD_CRC = 1 << 0,
873 	SHOST_DIX_GUARD_IP  = 1 << 1,
874 };
875 
scsi_host_set_guard(struct Scsi_Host * shost,unsigned char type)876 static inline void scsi_host_set_guard(struct Scsi_Host *shost, unsigned char type)
877 {
878 	shost->prot_guard_type = type;
879 }
880 
scsi_host_get_guard(struct Scsi_Host * shost)881 static inline unsigned char scsi_host_get_guard(struct Scsi_Host *shost)
882 {
883 	return shost->prot_guard_type;
884 }
885 
886 extern int scsi_host_set_state(struct Scsi_Host *, enum scsi_host_state);
887 
888 #endif /* _SCSI_SCSI_HOST_H */
889