1 #ifndef _SCSI_SCSI_DEVICE_H
2 #define _SCSI_SCSI_DEVICE_H
3
4 #include <linux/device.h>
5 #include <linux/list.h>
6 #include <linux/spinlock.h>
7 #include <linux/workqueue.h>
8 #include <linux/blkdev.h>
9 #include <scsi/scsi.h>
10 #include <asm/atomic.h>
11
12 struct request_queue;
13 struct scsi_cmnd;
14 struct scsi_lun;
15 struct scsi_sense_hdr;
16
17 struct scsi_mode_data {
18 __u32 length;
19 __u16 block_descriptor_length;
20 __u8 medium_type;
21 __u8 device_specific;
22 __u8 header_length;
23 __u8 longlba:1;
24 };
25
26 /*
27 * sdev state: If you alter this, you also need to alter scsi_sysfs.c
28 * (for the ascii descriptions) and the state model enforcer:
29 * scsi_lib:scsi_device_set_state().
30 */
31 enum scsi_device_state {
32 SDEV_CREATED = 1, /* device created but not added to sysfs
33 * Only internal commands allowed (for inq) */
34 SDEV_RUNNING, /* device properly configured
35 * All commands allowed */
36 SDEV_CANCEL, /* beginning to delete device
37 * Only error handler commands allowed */
38 SDEV_DEL, /* device deleted
39 * no commands allowed */
40 SDEV_QUIESCE, /* Device quiescent. No block commands
41 * will be accepted, only specials (which
42 * originate in the mid-layer) */
43 SDEV_OFFLINE, /* Device offlined (by error handling or
44 * user request */
45 SDEV_BLOCK, /* Device blocked by scsi lld. No
46 * scsi commands from user or midlayer
47 * should be issued to the scsi
48 * lld. */
49 SDEV_CREATED_BLOCK, /* same as above but for created devices */
50 };
51
52 enum scsi_device_event {
53 SDEV_EVT_MEDIA_CHANGE = 1, /* media has changed */
54
55 SDEV_EVT_LAST = SDEV_EVT_MEDIA_CHANGE,
56 SDEV_EVT_MAXBITS = SDEV_EVT_LAST + 1
57 };
58
59 struct scsi_event {
60 enum scsi_device_event evt_type;
61 struct list_head node;
62
63 /* put union of data structures, for non-simple event types,
64 * here
65 */
66 };
67
68 struct scsi_device {
69 struct Scsi_Host *host;
70 struct request_queue *request_queue;
71
72 /* the next two are protected by the host->host_lock */
73 struct list_head siblings; /* list of all devices on this host */
74 struct list_head same_target_siblings; /* just the devices sharing same target id */
75
76 /* this is now protected by the request_queue->queue_lock */
77 unsigned int device_busy; /* commands actually active on
78 * low-level. protected by queue_lock. */
79 spinlock_t list_lock;
80 struct list_head cmd_list; /* queue of in use SCSI Command structures */
81 struct list_head starved_entry;
82 struct scsi_cmnd *current_cmnd; /* currently active command */
83 unsigned short queue_depth; /* How deep of a queue we want */
84 unsigned short last_queue_full_depth; /* These two are used by */
85 unsigned short last_queue_full_count; /* scsi_track_queue_full() */
86 unsigned long last_queue_full_time;/* don't let QUEUE_FULLs on the same
87 jiffie count on our counter, they
88 could all be from the same event. */
89
90 unsigned int id, lun, channel;
91
92 unsigned int manufacturer; /* Manufacturer of device, for using
93 * vendor-specific cmd's */
94 unsigned sector_size; /* size in bytes */
95
96 void *hostdata; /* available to low-level driver */
97 char type;
98 char scsi_level;
99 char inq_periph_qual; /* PQ from INQUIRY data */
100 unsigned char inquiry_len; /* valid bytes in 'inquiry' */
101 unsigned char * inquiry; /* INQUIRY response data */
102 const char * vendor; /* [back_compat] point into 'inquiry' ... */
103 const char * model; /* ... after scan; point to static string */
104 const char * rev; /* ... "nullnullnullnull" before scan */
105 unsigned char current_tag; /* current tag */
106 struct scsi_target *sdev_target; /* used only for single_lun */
107
108 unsigned int sdev_bflags; /* black/white flags as also found in
109 * scsi_devinfo.[hc]. For now used only to
110 * pass settings from slave_alloc to scsi
111 * core. */
112 unsigned writeable:1;
113 unsigned removable:1;
114 unsigned changed:1; /* Data invalid due to media change */
115 unsigned busy:1; /* Used to prevent races */
116 unsigned lockable:1; /* Able to prevent media removal */
117 unsigned locked:1; /* Media removal disabled */
118 unsigned borken:1; /* Tell the Seagate driver to be
119 * painfully slow on this device */
120 unsigned disconnect:1; /* can disconnect */
121 unsigned soft_reset:1; /* Uses soft reset option */
122 unsigned sdtr:1; /* Device supports SDTR messages */
123 unsigned wdtr:1; /* Device supports WDTR messages */
124 unsigned ppr:1; /* Device supports PPR messages */
125 unsigned tagged_supported:1; /* Supports SCSI-II tagged queuing */
126 unsigned simple_tags:1; /* simple queue tag messages are enabled */
127 unsigned ordered_tags:1;/* ordered queue tag messages are enabled */
128 unsigned was_reset:1; /* There was a bus reset on the bus for
129 * this device */
130 unsigned expecting_cc_ua:1; /* Expecting a CHECK_CONDITION/UNIT_ATTN
131 * because we did a bus reset. */
132 unsigned use_10_for_rw:1; /* first try 10-byte read / write */
133 unsigned use_10_for_ms:1; /* first try 10-byte mode sense/select */
134 unsigned skip_ms_page_8:1; /* do not use MODE SENSE page 0x08 */
135 unsigned skip_ms_page_3f:1; /* do not use MODE SENSE page 0x3f */
136 unsigned use_192_bytes_for_3f:1; /* ask for 192 bytes from page 0x3f */
137 unsigned no_start_on_add:1; /* do not issue start on add */
138 unsigned allow_restart:1; /* issue START_UNIT in error handler */
139 unsigned manage_start_stop:1; /* Let HLD (sd) manage start/stop */
140 unsigned start_stop_pwr_cond:1; /* Set power cond. in START_STOP_UNIT */
141 unsigned no_uld_attach:1; /* disable connecting to upper level drivers */
142 unsigned select_no_atn:1;
143 unsigned fix_capacity:1; /* READ_CAPACITY is too high by 1 */
144 unsigned guess_capacity:1; /* READ_CAPACITY might be too high by 1 */
145 unsigned retry_hwerror:1; /* Retry HARDWARE_ERROR */
146 unsigned last_sector_bug:1; /* do not use multisector accesses on
147 SD_LAST_BUGGY_SECTORS */
148
149 DECLARE_BITMAP(supported_events, SDEV_EVT_MAXBITS); /* supported events */
150 struct list_head event_list; /* asserted events */
151 struct work_struct event_work;
152
153 unsigned int device_blocked; /* Device returned QUEUE_FULL. */
154
155 unsigned int max_device_blocked; /* what device_blocked counts down from */
156 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
157
158 atomic_t iorequest_cnt;
159 atomic_t iodone_cnt;
160 atomic_t ioerr_cnt;
161
162 struct device sdev_gendev,
163 sdev_dev;
164
165 struct execute_work ew; /* used to get process context on put */
166
167 struct scsi_dh_data *scsi_dh_data;
168 enum scsi_device_state sdev_state;
169 unsigned long sdev_data[0];
170 } __attribute__((aligned(sizeof(unsigned long))));
171
172 struct scsi_dh_devlist {
173 char *vendor;
174 char *model;
175 };
176
177 struct scsi_device_handler {
178 /* Used by the infrastructure */
179 struct list_head list; /* list of scsi_device_handlers */
180
181 /* Filled by the hardware handler */
182 struct module *module;
183 const char *name;
184 const struct scsi_dh_devlist *devlist;
185 int (*check_sense)(struct scsi_device *, struct scsi_sense_hdr *);
186 int (*attach)(struct scsi_device *);
187 void (*detach)(struct scsi_device *);
188 int (*activate)(struct scsi_device *);
189 int (*prep_fn)(struct scsi_device *, struct request *);
190 };
191
192 struct scsi_dh_data {
193 struct scsi_device_handler *scsi_dh;
194 char buf[0];
195 };
196
197 #define to_scsi_device(d) \
198 container_of(d, struct scsi_device, sdev_gendev)
199 #define class_to_sdev(d) \
200 container_of(d, struct scsi_device, sdev_dev)
201 #define transport_class_to_sdev(class_dev) \
202 to_scsi_device(class_dev->parent)
203
204 #define sdev_printk(prefix, sdev, fmt, a...) \
205 dev_printk(prefix, &(sdev)->sdev_gendev, fmt, ##a)
206
207 #define scmd_printk(prefix, scmd, fmt, a...) \
208 (scmd)->request->rq_disk ? \
209 sdev_printk(prefix, (scmd)->device, "[%s] " fmt, \
210 (scmd)->request->rq_disk->disk_name, ##a) : \
211 sdev_printk(prefix, (scmd)->device, fmt, ##a)
212
213 enum scsi_target_state {
214 STARGET_CREATED = 1,
215 STARGET_RUNNING,
216 STARGET_DEL,
217 };
218
219 /*
220 * scsi_target: representation of a scsi target, for now, this is only
221 * used for single_lun devices. If no one has active IO to the target,
222 * starget_sdev_user is NULL, else it points to the active sdev.
223 */
224 struct scsi_target {
225 struct scsi_device *starget_sdev_user;
226 struct list_head siblings;
227 struct list_head devices;
228 struct device dev;
229 unsigned int reap_ref; /* protected by the host lock */
230 unsigned int channel;
231 unsigned int id; /* target id ... replace
232 * scsi_device.id eventually */
233 unsigned int create:1; /* signal that it needs to be added */
234 unsigned int single_lun:1; /* Indicates we should only
235 * allow I/O to one of the luns
236 * for the device at a time. */
237 unsigned int pdt_1f_for_no_lun; /* PDT = 0x1f */
238 /* means no lun present */
239 /* commands actually active on LLD. protected by host lock. */
240 unsigned int target_busy;
241 /*
242 * LLDs should set this in the slave_alloc host template callout.
243 * If set to zero then there is not limit.
244 */
245 unsigned int can_queue;
246 unsigned int target_blocked;
247 unsigned int max_target_blocked;
248 #define SCSI_DEFAULT_TARGET_BLOCKED 3
249
250 char scsi_level;
251 struct execute_work ew;
252 enum scsi_target_state state;
253 void *hostdata; /* available to low-level driver */
254 unsigned long starget_data[0]; /* for the transport */
255 /* starget_data must be the last element!!!! */
256 } __attribute__((aligned(sizeof(unsigned long))));
257
258 #define to_scsi_target(d) container_of(d, struct scsi_target, dev)
scsi_target(struct scsi_device * sdev)259 static inline struct scsi_target *scsi_target(struct scsi_device *sdev)
260 {
261 return to_scsi_target(sdev->sdev_gendev.parent);
262 }
263 #define transport_class_to_starget(class_dev) \
264 to_scsi_target(class_dev->parent)
265
266 #define starget_printk(prefix, starget, fmt, a...) \
267 dev_printk(prefix, &(starget)->dev, fmt, ##a)
268
269 extern struct scsi_device *__scsi_add_device(struct Scsi_Host *,
270 uint, uint, uint, void *hostdata);
271 extern int scsi_add_device(struct Scsi_Host *host, uint channel,
272 uint target, uint lun);
273 extern int scsi_register_device_handler(struct scsi_device_handler *scsi_dh);
274 extern void scsi_remove_device(struct scsi_device *);
275 extern int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh);
276
277 extern int scsi_device_get(struct scsi_device *);
278 extern void scsi_device_put(struct scsi_device *);
279 extern struct scsi_device *scsi_device_lookup(struct Scsi_Host *,
280 uint, uint, uint);
281 extern struct scsi_device *__scsi_device_lookup(struct Scsi_Host *,
282 uint, uint, uint);
283 extern struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *,
284 uint);
285 extern struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *,
286 uint);
287 extern void starget_for_each_device(struct scsi_target *, void *,
288 void (*fn)(struct scsi_device *, void *));
289 extern void __starget_for_each_device(struct scsi_target *, void *,
290 void (*fn)(struct scsi_device *,
291 void *));
292
293 /* only exposed to implement shost_for_each_device */
294 extern struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *,
295 struct scsi_device *);
296
297 /**
298 * shost_for_each_device - iterate over all devices of a host
299 * @sdev: the &struct scsi_device to use as a cursor
300 * @shost: the &struct scsi_host to iterate over
301 *
302 * Iterator that returns each device attached to @shost. This loop
303 * takes a reference on each device and releases it at the end. If
304 * you break out of the loop, you must call scsi_device_put(sdev).
305 */
306 #define shost_for_each_device(sdev, shost) \
307 for ((sdev) = __scsi_iterate_devices((shost), NULL); \
308 (sdev); \
309 (sdev) = __scsi_iterate_devices((shost), (sdev)))
310
311 /**
312 * __shost_for_each_device - iterate over all devices of a host (UNLOCKED)
313 * @sdev: the &struct scsi_device to use as a cursor
314 * @shost: the &struct scsi_host to iterate over
315 *
316 * Iterator that returns each device attached to @shost. It does _not_
317 * take a reference on the scsi_device, so the whole loop must be
318 * protected by shost->host_lock.
319 *
320 * Note: The only reason to use this is because you need to access the
321 * device list in interrupt context. Otherwise you really want to use
322 * shost_for_each_device instead.
323 */
324 #define __shost_for_each_device(sdev, shost) \
325 list_for_each_entry((sdev), &((shost)->__devices), siblings)
326
327 extern void scsi_adjust_queue_depth(struct scsi_device *, int, int);
328 extern int scsi_track_queue_full(struct scsi_device *, int);
329
330 extern int scsi_set_medium_removal(struct scsi_device *, char);
331
332 extern int scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
333 unsigned char *buffer, int len, int timeout,
334 int retries, struct scsi_mode_data *data,
335 struct scsi_sense_hdr *);
336 extern int scsi_mode_select(struct scsi_device *sdev, int pf, int sp,
337 int modepage, unsigned char *buffer, int len,
338 int timeout, int retries,
339 struct scsi_mode_data *data,
340 struct scsi_sense_hdr *);
341 extern int scsi_test_unit_ready(struct scsi_device *sdev, int timeout,
342 int retries, struct scsi_sense_hdr *sshdr);
343 extern int scsi_device_set_state(struct scsi_device *sdev,
344 enum scsi_device_state state);
345 extern struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
346 gfp_t gfpflags);
347 extern void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt);
348 extern void sdev_evt_send_simple(struct scsi_device *sdev,
349 enum scsi_device_event evt_type, gfp_t gfpflags);
350 extern int scsi_device_quiesce(struct scsi_device *sdev);
351 extern void scsi_device_resume(struct scsi_device *sdev);
352 extern void scsi_target_quiesce(struct scsi_target *);
353 extern void scsi_target_resume(struct scsi_target *);
354 extern void scsi_scan_target(struct device *parent, unsigned int channel,
355 unsigned int id, unsigned int lun, int rescan);
356 extern void scsi_target_reap(struct scsi_target *);
357 extern void scsi_target_block(struct device *);
358 extern void scsi_target_unblock(struct device *);
359 extern void scsi_remove_target(struct device *);
360 extern void int_to_scsilun(unsigned int, struct scsi_lun *);
361 extern int scsilun_to_int(struct scsi_lun *);
362 extern const char *scsi_device_state_name(enum scsi_device_state);
363 extern int scsi_is_sdev_device(const struct device *);
364 extern int scsi_is_target_device(const struct device *);
365 extern int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
366 int data_direction, void *buffer, unsigned bufflen,
367 unsigned char *sense, int timeout, int retries,
368 int flag, int *resid);
369 extern int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
370 int data_direction, void *buffer, unsigned bufflen,
371 struct scsi_sense_hdr *, int timeout, int retries,
372 int *resid);
373 extern int scsi_execute_async(struct scsi_device *sdev,
374 const unsigned char *cmd, int cmd_len, int data_direction,
375 void *buffer, unsigned bufflen, int use_sg,
376 int timeout, int retries, void *privdata,
377 void (*done)(void *, char *, int, int),
378 gfp_t gfp);
379
scsi_device_reprobe(struct scsi_device * sdev)380 static inline int __must_check scsi_device_reprobe(struct scsi_device *sdev)
381 {
382 return device_reprobe(&sdev->sdev_gendev);
383 }
384
sdev_channel(struct scsi_device * sdev)385 static inline unsigned int sdev_channel(struct scsi_device *sdev)
386 {
387 return sdev->channel;
388 }
389
sdev_id(struct scsi_device * sdev)390 static inline unsigned int sdev_id(struct scsi_device *sdev)
391 {
392 return sdev->id;
393 }
394
395 #define scmd_id(scmd) sdev_id((scmd)->device)
396 #define scmd_channel(scmd) sdev_channel((scmd)->device)
397
398 /*
399 * checks for positions of the SCSI state machine
400 */
scsi_device_online(struct scsi_device * sdev)401 static inline int scsi_device_online(struct scsi_device *sdev)
402 {
403 return sdev->sdev_state != SDEV_OFFLINE;
404 }
scsi_device_blocked(struct scsi_device * sdev)405 static inline int scsi_device_blocked(struct scsi_device *sdev)
406 {
407 return sdev->sdev_state == SDEV_BLOCK ||
408 sdev->sdev_state == SDEV_CREATED_BLOCK;
409 }
scsi_device_created(struct scsi_device * sdev)410 static inline int scsi_device_created(struct scsi_device *sdev)
411 {
412 return sdev->sdev_state == SDEV_CREATED ||
413 sdev->sdev_state == SDEV_CREATED_BLOCK;
414 }
415
416 /* accessor functions for the SCSI parameters */
scsi_device_sync(struct scsi_device * sdev)417 static inline int scsi_device_sync(struct scsi_device *sdev)
418 {
419 return sdev->sdtr;
420 }
scsi_device_wide(struct scsi_device * sdev)421 static inline int scsi_device_wide(struct scsi_device *sdev)
422 {
423 return sdev->wdtr;
424 }
scsi_device_dt(struct scsi_device * sdev)425 static inline int scsi_device_dt(struct scsi_device *sdev)
426 {
427 return sdev->ppr;
428 }
scsi_device_dt_only(struct scsi_device * sdev)429 static inline int scsi_device_dt_only(struct scsi_device *sdev)
430 {
431 if (sdev->inquiry_len < 57)
432 return 0;
433 return (sdev->inquiry[56] & 0x0c) == 0x04;
434 }
scsi_device_ius(struct scsi_device * sdev)435 static inline int scsi_device_ius(struct scsi_device *sdev)
436 {
437 if (sdev->inquiry_len < 57)
438 return 0;
439 return sdev->inquiry[56] & 0x01;
440 }
scsi_device_qas(struct scsi_device * sdev)441 static inline int scsi_device_qas(struct scsi_device *sdev)
442 {
443 if (sdev->inquiry_len < 57)
444 return 0;
445 return sdev->inquiry[56] & 0x02;
446 }
scsi_device_enclosure(struct scsi_device * sdev)447 static inline int scsi_device_enclosure(struct scsi_device *sdev)
448 {
449 return sdev->inquiry[6] & (1<<6);
450 }
451
scsi_device_protection(struct scsi_device * sdev)452 static inline int scsi_device_protection(struct scsi_device *sdev)
453 {
454 return sdev->scsi_level > SCSI_2 && sdev->inquiry[5] & (1<<0);
455 }
456
457 #define MODULE_ALIAS_SCSI_DEVICE(type) \
458 MODULE_ALIAS("scsi:t-" __stringify(type) "*")
459 #define SCSI_DEVICE_MODALIAS_FMT "scsi:t-0x%02x"
460
461 #endif /* _SCSI_SCSI_DEVICE_H */
462