• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /************************************************************************
3  * Linux driver for                                                     *
4  * ICP vortex GmbH:    GDT PCI Disk Array Controllers                   *
5  * Intel Corporation:  Storage RAID Controllers                         *
6  *                                                                      *
7  * gdth.c                                                               *
8  * Copyright (C) 1995-06 ICP vortex GmbH, Achim Leubner                 *
9  * Copyright (C) 2002-04 Intel Corporation                              *
10  * Copyright (C) 2003-06 Adaptec Inc.                                   *
11  * <achim_leubner@adaptec.com>                                          *
12  *                                                                      *
13  * Additions/Fixes:                                                     *
14  * Boji Tony Kannanthanam <boji.t.kannanthanam@intel.com>               *
15  * Johannes Dinner <johannes_dinner@adaptec.com>                        *
16  *                                                                      *
17  *                                                                      *
18  * Linux kernel 2.6.x supported						*
19  *                                                                      *
20  ************************************************************************/
21 
22 /* All GDT Disk Array Controllers are fully supported by this driver.
23  * This includes the PCI SCSI Disk Array Controllers and the
24  * PCI Fibre Channel Disk Array Controllers. See gdth.h for a complete
25  * list of all controller types.
26  *
27  * After the optional list of IRQ values, other possible
28  * command line options are:
29  * disable:Y                    disable driver
30  * disable:N                    enable driver
31  * reserve_mode:0               reserve no drives for the raw service
32  * reserve_mode:1               reserve all not init., removable drives
33  * reserve_mode:2               reserve all not init. drives
34  * reserve_list:h,b,t,l,h,b,t,l,...     reserve particular drive(s) with
35  *                              h- controller no., b- channel no.,
36  *                              t- target ID, l- LUN
37  * reverse_scan:Y               reverse scan order for PCI controllers
38  * reverse_scan:N               scan PCI controllers like BIOS
39  * max_ids:x                    x - target ID count per channel (1..MAXID)
40  * rescan:Y                     rescan all channels/IDs
41  * rescan:N                     use all devices found until now
42  * hdr_channel:x                x - number of virtual bus for host drives
43  * shared_access:Y              disable driver reserve/release protocol to
44  *                              access a shared resource from several nodes,
45  *                              appropriate controller firmware required
46  * shared_access:N              enable driver reserve/release protocol
47  * force_dma32:Y                use only 32 bit DMA mode
48  * force_dma32:N                use 64 bit DMA mode, if supported
49  *
50  * The default values are: "gdth=disable:N,reserve_mode:1,reverse_scan:N,
51  *                          max_ids:127,rescan:N,hdr_channel:0,
52  *                          shared_access:Y,force_dma32:N".
53  * Here is another example: "gdth=reserve_list:0,1,2,0,0,1,3,0,rescan:Y".
54  *
55  * When loading the gdth driver as a module, the same options are available.
56  * You can set the IRQs with "IRQ=...". However, the syntax to specify the
57  * options changes slightly. You must replace all ',' between options
58  * with ' ' and all ':' with '=' and you must use
59  * '1' in place of 'Y' and '0' in place of 'N'.
60  *
61  * Default: "modprobe gdth disable=0 reserve_mode=1 reverse_scan=0
62  *           max_ids=127 rescan=0 hdr_channel=0 shared_access=0
63  *           force_dma32=0"
64  * The other example: "modprobe gdth reserve_list=0,1,2,0,0,1,3,0 rescan=1".
65  */
66 
67 /* The meaning of the Scsi_Pointer members in this driver is as follows:
68  * ptr:                     Chaining
69  * this_residual:           unused
70  * buffer:                  unused
71  * dma_handle:              unused
72  * buffers_residual:        unused
73  * Status:                  unused
74  * Message:                 unused
75  * have_data_in:            unused
76  * sent_command:            unused
77  * phase:                   unused
78  */
79 
80 /* statistics */
81 #define GDTH_STATISTICS
82 
83 #include <linux/module.h>
84 
85 #include <linux/version.h>
86 #include <linux/kernel.h>
87 #include <linux/types.h>
88 #include <linux/pci.h>
89 #include <linux/string.h>
90 #include <linux/ctype.h>
91 #include <linux/ioport.h>
92 #include <linux/delay.h>
93 #include <linux/interrupt.h>
94 #include <linux/in.h>
95 #include <linux/proc_fs.h>
96 #include <linux/time.h>
97 #include <linux/timer.h>
98 #include <linux/dma-mapping.h>
99 #include <linux/list.h>
100 #include <linux/mutex.h>
101 #include <linux/slab.h>
102 #include <linux/reboot.h>
103 
104 #include <asm/dma.h>
105 #include <asm/io.h>
106 #include <linux/uaccess.h>
107 #include <linux/spinlock.h>
108 #include <linux/blkdev.h>
109 #include <linux/scatterlist.h>
110 
111 #include "scsi.h"
112 #include <scsi/scsi_host.h>
113 #include "gdth.h"
114 
115 static DEFINE_MUTEX(gdth_mutex);
116 static void gdth_delay(int milliseconds);
117 static void gdth_eval_mapping(u32 size, u32 *cyls, int *heads, int *secs);
118 static irqreturn_t gdth_interrupt(int irq, void *dev_id);
119 static irqreturn_t __gdth_interrupt(gdth_ha_str *ha,
120                                     int gdth_from_wait, int* pIndex);
121 static int gdth_sync_event(gdth_ha_str *ha, int service, u8 index,
122                                                                struct scsi_cmnd *scp);
123 static int gdth_async_event(gdth_ha_str *ha);
124 static void gdth_log_event(gdth_evt_data *dvr, char *buffer);
125 
126 static void gdth_putq(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 priority);
127 static void gdth_next(gdth_ha_str *ha);
128 static int gdth_fill_raw_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 b);
129 static int gdth_special_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp);
130 static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, u16 source,
131                                       u16 idx, gdth_evt_data *evt);
132 static int gdth_read_event(gdth_ha_str *ha, int handle, gdth_evt_str *estr);
133 static void gdth_readapp_event(gdth_ha_str *ha, u8 application,
134                                gdth_evt_str *estr);
135 static void gdth_clear_events(void);
136 
137 static void gdth_copy_internal_data(gdth_ha_str *ha, struct scsi_cmnd *scp,
138                                     char *buffer, u16 count);
139 static int gdth_internal_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp);
140 static int gdth_fill_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp,
141 			       u16 hdrive);
142 
143 static void gdth_enable_int(gdth_ha_str *ha);
144 static int gdth_test_busy(gdth_ha_str *ha);
145 static int gdth_get_cmd_index(gdth_ha_str *ha);
146 static void gdth_release_event(gdth_ha_str *ha);
147 static int gdth_wait(gdth_ha_str *ha, int index,u32 time);
148 static int gdth_internal_cmd(gdth_ha_str *ha, u8 service, u16 opcode,
149                                              u32 p1, u64 p2,u64 p3);
150 static int gdth_search_drives(gdth_ha_str *ha);
151 static int gdth_analyse_hdrive(gdth_ha_str *ha, u16 hdrive);
152 
153 static const char *gdth_ctr_name(gdth_ha_str *ha);
154 
155 static int gdth_open(struct inode *inode, struct file *filep);
156 static int gdth_close(struct inode *inode, struct file *filep);
157 static long gdth_unlocked_ioctl(struct file *filep, unsigned int cmd,
158 			        unsigned long arg);
159 
160 static void gdth_flush(gdth_ha_str *ha);
161 static int gdth_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd);
162 static int __gdth_queuecommand(gdth_ha_str *ha, struct scsi_cmnd *scp,
163 				struct gdth_cmndinfo *cmndinfo);
164 static void gdth_scsi_done(struct scsi_cmnd *scp);
165 
166 #ifdef DEBUG_GDTH
167 static u8   DebugState = DEBUG_GDTH;
168 #define TRACE(a)    {if (DebugState==1) {printk a;}}
169 #define TRACE2(a)   {if (DebugState==1 || DebugState==2) {printk a;}}
170 #define TRACE3(a)   {if (DebugState!=0) {printk a;}}
171 #else /* !DEBUG */
172 #define TRACE(a)
173 #define TRACE2(a)
174 #define TRACE3(a)
175 #endif
176 
177 #ifdef GDTH_STATISTICS
178 static u32 max_rq=0, max_index=0, max_sg=0;
179 static u32 act_ints=0, act_ios=0, act_stats=0, act_rq=0;
180 static struct timer_list gdth_timer;
181 #endif
182 
183 #define PTR2USHORT(a)   (u16)(unsigned long)(a)
184 #define GDTOFFSOF(a,b)  (size_t)&(((a*)0)->b)
185 #define INDEX_OK(i,t)   ((i)<ARRAY_SIZE(t))
186 
187 #define BUS_L2P(a,b)    ((b)>(a)->virt_bus ? (b-1):(b))
188 
189 static u8   gdth_polling;                           /* polling if TRUE */
190 static int      gdth_ctr_count  = 0;                    /* controller count */
191 static LIST_HEAD(gdth_instances);                       /* controller list */
192 static u8   gdth_write_through = FALSE;             /* write through */
193 static gdth_evt_str ebuffer[MAX_EVENTS];                /* event buffer */
194 static int elastidx;
195 static int eoldidx;
196 static int major;
197 
198 #define DIN     1                               /* IN data direction */
199 #define DOU     2                               /* OUT data direction */
200 #define DNO     DIN                             /* no data transfer */
201 #define DUN     DIN                             /* unknown data direction */
202 static u8 gdth_direction_tab[0x100] = {
203     DNO,DNO,DIN,DIN,DOU,DIN,DIN,DOU,DIN,DUN,DOU,DOU,DUN,DUN,DUN,DIN,
204     DNO,DIN,DIN,DOU,DIN,DOU,DNO,DNO,DOU,DNO,DIN,DNO,DIN,DOU,DNO,DUN,
205     DIN,DUN,DIN,DUN,DOU,DIN,DUN,DUN,DIN,DIN,DOU,DNO,DUN,DIN,DOU,DOU,
206     DOU,DOU,DOU,DNO,DIN,DNO,DNO,DIN,DOU,DOU,DOU,DOU,DIN,DOU,DIN,DOU,
207     DOU,DOU,DIN,DIN,DIN,DNO,DUN,DNO,DNO,DNO,DUN,DNO,DOU,DIN,DUN,DUN,
208     DUN,DUN,DUN,DUN,DUN,DOU,DUN,DUN,DUN,DUN,DIN,DUN,DUN,DUN,DUN,DUN,
209     DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
210     DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
211     DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DIN,DUN,DOU,DUN,DUN,DUN,DUN,DUN,
212     DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DIN,DUN,
213     DUN,DUN,DUN,DUN,DUN,DNO,DNO,DUN,DIN,DNO,DOU,DUN,DNO,DUN,DOU,DOU,
214     DOU,DOU,DOU,DNO,DUN,DIN,DOU,DIN,DIN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
215     DUN,DUN,DOU,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
216     DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
217     DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DOU,DUN,DUN,DUN,DUN,DUN,
218     DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN
219 };
220 
221 /* LILO and modprobe/insmod parameters */
222 /* disable driver flag */
223 static int disable __initdata = 0;
224 /* reserve flag */
225 static int reserve_mode = 1;
226 /* reserve list */
227 static int reserve_list[MAX_RES_ARGS] =
228 {0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
229  0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
230  0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff};
231 /* scan order for PCI controllers */
232 static int reverse_scan = 0;
233 /* virtual channel for the host drives */
234 static int hdr_channel = 0;
235 /* max. IDs per channel */
236 static int max_ids = MAXID;
237 /* rescan all IDs */
238 static int rescan = 0;
239 /* shared access */
240 static int shared_access = 1;
241 /* 64 bit DMA mode, support for drives > 2 TB, if force_dma32 = 0 */
242 static int force_dma32 = 0;
243 
244 /* parameters for modprobe/insmod */
245 module_param(disable, int, 0);
246 module_param(reserve_mode, int, 0);
247 module_param_array(reserve_list, int, NULL, 0);
248 module_param(reverse_scan, int, 0);
249 module_param(hdr_channel, int, 0);
250 module_param(max_ids, int, 0);
251 module_param(rescan, int, 0);
252 module_param(shared_access, int, 0);
253 module_param(force_dma32, int, 0);
254 MODULE_AUTHOR("Achim Leubner");
255 MODULE_LICENSE("GPL");
256 
257 /* ioctl interface */
258 static const struct file_operations gdth_fops = {
259     .unlocked_ioctl   = gdth_unlocked_ioctl,
260     .open    = gdth_open,
261     .release = gdth_close,
262     .llseek = noop_llseek,
263 };
264 
265 #include "gdth_proc.h"
266 #include "gdth_proc.c"
267 
gdth_find_ha(int hanum)268 static gdth_ha_str *gdth_find_ha(int hanum)
269 {
270 	gdth_ha_str *ha;
271 
272 	list_for_each_entry(ha, &gdth_instances, list)
273 		if (hanum == ha->hanum)
274 			return ha;
275 
276 	return NULL;
277 }
278 
gdth_get_cmndinfo(gdth_ha_str * ha)279 static struct gdth_cmndinfo *gdth_get_cmndinfo(gdth_ha_str *ha)
280 {
281 	struct gdth_cmndinfo *priv = NULL;
282 	unsigned long flags;
283 	int i;
284 
285 	spin_lock_irqsave(&ha->smp_lock, flags);
286 
287 	for (i=0; i<GDTH_MAXCMDS; ++i) {
288 		if (ha->cmndinfo[i].index == 0) {
289 			priv = &ha->cmndinfo[i];
290 			memset(priv, 0, sizeof(*priv));
291 			priv->index = i+1;
292 			break;
293 		}
294 	}
295 
296 	spin_unlock_irqrestore(&ha->smp_lock, flags);
297 
298 	return priv;
299 }
300 
gdth_put_cmndinfo(struct gdth_cmndinfo * priv)301 static void gdth_put_cmndinfo(struct gdth_cmndinfo *priv)
302 {
303 	BUG_ON(!priv);
304 	priv->index = 0;
305 }
306 
gdth_delay(int milliseconds)307 static void gdth_delay(int milliseconds)
308 {
309     if (milliseconds == 0) {
310         udelay(1);
311     } else {
312         mdelay(milliseconds);
313     }
314 }
315 
gdth_scsi_done(struct scsi_cmnd * scp)316 static void gdth_scsi_done(struct scsi_cmnd *scp)
317 {
318 	struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
319 	int internal_command = cmndinfo->internal_command;
320 
321 	TRACE2(("gdth_scsi_done()\n"));
322 
323 	gdth_put_cmndinfo(cmndinfo);
324 	scp->host_scribble = NULL;
325 
326 	if (internal_command)
327 		complete((struct completion *)scp->request);
328 	else
329 		scp->scsi_done(scp);
330 }
331 
__gdth_execute(struct scsi_device * sdev,gdth_cmd_str * gdtcmd,char * cmnd,int timeout,u32 * info)332 int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd,
333                    int timeout, u32 *info)
334 {
335     gdth_ha_str *ha = shost_priv(sdev->host);
336     struct scsi_cmnd *scp;
337     struct gdth_cmndinfo cmndinfo;
338     DECLARE_COMPLETION_ONSTACK(wait);
339     int rval;
340 
341     scp = kzalloc(sizeof(*scp), GFP_KERNEL);
342     if (!scp)
343         return -ENOMEM;
344 
345     scp->sense_buffer = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
346     if (!scp->sense_buffer) {
347 	kfree(scp);
348 	return -ENOMEM;
349     }
350 
351     scp->device = sdev;
352     memset(&cmndinfo, 0, sizeof(cmndinfo));
353 
354     /* use request field to save the ptr. to completion struct. */
355     scp->request = (struct request *)&wait;
356     scp->cmd_len = 12;
357     scp->cmnd = cmnd;
358     cmndinfo.priority = IOCTL_PRI;
359     cmndinfo.internal_cmd_str = gdtcmd;
360     cmndinfo.internal_command = 1;
361 
362     TRACE(("__gdth_execute() cmd 0x%x\n", scp->cmnd[0]));
363     __gdth_queuecommand(ha, scp, &cmndinfo);
364 
365     wait_for_completion(&wait);
366 
367     rval = cmndinfo.status;
368     if (info)
369         *info = cmndinfo.info;
370     kfree(scp->sense_buffer);
371     kfree(scp);
372     return rval;
373 }
374 
gdth_execute(struct Scsi_Host * shost,gdth_cmd_str * gdtcmd,char * cmnd,int timeout,u32 * info)375 int gdth_execute(struct Scsi_Host *shost, gdth_cmd_str *gdtcmd, char *cmnd,
376                  int timeout, u32 *info)
377 {
378     struct scsi_device *sdev = scsi_get_host_dev(shost);
379     int rval = __gdth_execute(sdev, gdtcmd, cmnd, timeout, info);
380 
381     scsi_free_host_dev(sdev);
382     return rval;
383 }
384 
gdth_eval_mapping(u32 size,u32 * cyls,int * heads,int * secs)385 static void gdth_eval_mapping(u32 size, u32 *cyls, int *heads, int *secs)
386 {
387     *cyls = size /HEADS/SECS;
388     if (*cyls <= MAXCYLS) {
389         *heads = HEADS;
390         *secs = SECS;
391     } else {                                        /* too high for 64*32 */
392         *cyls = size /MEDHEADS/MEDSECS;
393         if (*cyls <= MAXCYLS) {
394             *heads = MEDHEADS;
395             *secs = MEDSECS;
396         } else {                                    /* too high for 127*63 */
397             *cyls = size /BIGHEADS/BIGSECS;
398             *heads = BIGHEADS;
399             *secs = BIGSECS;
400         }
401     }
402 }
403 
gdth_search_vortex(u16 device)404 static bool gdth_search_vortex(u16 device)
405 {
406 	if (device <= PCI_DEVICE_ID_VORTEX_GDT6555)
407 		return true;
408 	if (device >= PCI_DEVICE_ID_VORTEX_GDT6x17RP &&
409 	    device <= PCI_DEVICE_ID_VORTEX_GDTMAXRP)
410 		return true;
411 	if (device == PCI_DEVICE_ID_VORTEX_GDTNEWRX ||
412 	    device == PCI_DEVICE_ID_VORTEX_GDTNEWRX2)
413 		return true;
414 	return false;
415 }
416 
417 static int gdth_pci_probe_one(gdth_pci_str *pcistr, gdth_ha_str **ha_out);
418 static int gdth_pci_init_one(struct pci_dev *pdev,
419 			     const struct pci_device_id *ent);
420 static void gdth_pci_remove_one(struct pci_dev *pdev);
421 static void gdth_remove_one(gdth_ha_str *ha);
422 
423 /* Vortex only makes RAID controllers.
424  * We do not really want to specify all 550 ids here, so wildcard match.
425  */
426 static const struct pci_device_id gdthtable[] = {
427 	{ PCI_VDEVICE(VORTEX, PCI_ANY_ID) },
428 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SRC) },
429 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SRC_XSCALE) },
430 	{ }	/* terminate list */
431 };
432 MODULE_DEVICE_TABLE(pci, gdthtable);
433 
434 static struct pci_driver gdth_pci_driver = {
435 	.name		= "gdth",
436 	.id_table	= gdthtable,
437 	.probe		= gdth_pci_init_one,
438 	.remove		= gdth_pci_remove_one,
439 };
440 
gdth_pci_remove_one(struct pci_dev * pdev)441 static void gdth_pci_remove_one(struct pci_dev *pdev)
442 {
443 	gdth_ha_str *ha = pci_get_drvdata(pdev);
444 
445 	list_del(&ha->list);
446 	gdth_remove_one(ha);
447 
448 	pci_disable_device(pdev);
449 }
450 
gdth_pci_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)451 static int gdth_pci_init_one(struct pci_dev *pdev,
452 			     const struct pci_device_id *ent)
453 {
454 	u16 vendor = pdev->vendor;
455 	u16 device = pdev->device;
456 	unsigned long base0, base1, base2;
457 	int rc;
458 	gdth_pci_str gdth_pcistr;
459 	gdth_ha_str *ha = NULL;
460 
461 	TRACE(("gdth_search_dev() cnt %d vendor %x device %x\n",
462 	       gdth_ctr_count, vendor, device));
463 
464 	memset(&gdth_pcistr, 0, sizeof(gdth_pcistr));
465 
466 	if (vendor == PCI_VENDOR_ID_VORTEX && !gdth_search_vortex(device))
467 		return -ENODEV;
468 
469 	rc = pci_enable_device(pdev);
470 	if (rc)
471 		return rc;
472 
473 	if (gdth_ctr_count >= MAXHA)
474 		return -EBUSY;
475 
476         /* GDT PCI controller found, resources are already in pdev */
477 	gdth_pcistr.pdev = pdev;
478         base0 = pci_resource_flags(pdev, 0);
479         base1 = pci_resource_flags(pdev, 1);
480         base2 = pci_resource_flags(pdev, 2);
481         if (device <= PCI_DEVICE_ID_VORTEX_GDT6000B ||   /* GDT6000/B */
482             device >= PCI_DEVICE_ID_VORTEX_GDT6x17RP) {  /* MPR */
483             if (!(base0 & IORESOURCE_MEM))
484 		return -ENODEV;
485 	    gdth_pcistr.dpmem = pci_resource_start(pdev, 0);
486         } else {                                  /* GDT6110, GDT6120, .. */
487             if (!(base0 & IORESOURCE_MEM) ||
488                 !(base2 & IORESOURCE_MEM) ||
489                 !(base1 & IORESOURCE_IO))
490 		return -ENODEV;
491 	    gdth_pcistr.dpmem = pci_resource_start(pdev, 2);
492 	    gdth_pcistr.io    = pci_resource_start(pdev, 1);
493         }
494         TRACE2(("Controller found at %d/%d, irq %d, dpmem 0x%lx\n",
495 		gdth_pcistr.pdev->bus->number,
496 		PCI_SLOT(gdth_pcistr.pdev->devfn),
497 		gdth_pcistr.irq,
498 		gdth_pcistr.dpmem));
499 
500 	rc = gdth_pci_probe_one(&gdth_pcistr, &ha);
501 	if (rc)
502 		return rc;
503 
504 	return 0;
505 }
506 
gdth_init_pci(struct pci_dev * pdev,gdth_pci_str * pcistr,gdth_ha_str * ha)507 static int gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
508 			 gdth_ha_str *ha)
509 {
510     register gdt6_dpram_str __iomem *dp6_ptr;
511     register gdt6c_dpram_str __iomem *dp6c_ptr;
512     register gdt6m_dpram_str __iomem *dp6m_ptr;
513     u32 retries;
514     u8 prot_ver;
515     u16 command;
516     int i, found = FALSE;
517 
518     TRACE(("gdth_init_pci()\n"));
519 
520     if (pdev->vendor == PCI_VENDOR_ID_INTEL)
521         ha->oem_id = OEM_ID_INTEL;
522     else
523         ha->oem_id = OEM_ID_ICP;
524     ha->brd_phys = (pdev->bus->number << 8) | (pdev->devfn & 0xf8);
525     ha->stype = (u32)pdev->device;
526     ha->irq = pdev->irq;
527     ha->pdev = pdev;
528 
529     if (ha->pdev->device <= PCI_DEVICE_ID_VORTEX_GDT6000B) {  /* GDT6000/B */
530         TRACE2(("init_pci() dpmem %lx irq %d\n",pcistr->dpmem,ha->irq));
531         ha->brd = ioremap(pcistr->dpmem, sizeof(gdt6_dpram_str));
532         if (ha->brd == NULL) {
533             printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
534             return 0;
535         }
536         /* check and reset interface area */
537         dp6_ptr = ha->brd;
538         writel(DPMEM_MAGIC, &dp6_ptr->u);
539         if (readl(&dp6_ptr->u) != DPMEM_MAGIC) {
540             printk("GDT-PCI: Cannot access DPMEM at 0x%lx (shadowed?)\n",
541                    pcistr->dpmem);
542             found = FALSE;
543             for (i = 0xC8000; i < 0xE8000; i += 0x4000) {
544                 iounmap(ha->brd);
545                 ha->brd = ioremap(i, sizeof(u16));
546                 if (ha->brd == NULL) {
547                     printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
548                     return 0;
549                 }
550                 if (readw(ha->brd) != 0xffff) {
551                     TRACE2(("init_pci_old() address 0x%x busy\n", i));
552                     continue;
553                 }
554                 iounmap(ha->brd);
555 		pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, i);
556                 ha->brd = ioremap(i, sizeof(gdt6_dpram_str));
557                 if (ha->brd == NULL) {
558                     printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
559                     return 0;
560                 }
561                 dp6_ptr = ha->brd;
562                 writel(DPMEM_MAGIC, &dp6_ptr->u);
563                 if (readl(&dp6_ptr->u) == DPMEM_MAGIC) {
564                     printk("GDT-PCI: Use free address at 0x%x\n", i);
565                     found = TRUE;
566                     break;
567                 }
568             }
569             if (!found) {
570                 printk("GDT-PCI: No free address found!\n");
571                 iounmap(ha->brd);
572                 return 0;
573             }
574         }
575         memset_io(&dp6_ptr->u, 0, sizeof(dp6_ptr->u));
576         if (readl(&dp6_ptr->u) != 0) {
577             printk("GDT-PCI: Initialization error (DPMEM write error)\n");
578             iounmap(ha->brd);
579             return 0;
580         }
581 
582         /* disable board interrupts, deinit services */
583         writeb(0xff, &dp6_ptr->io.irqdel);
584         writeb(0x00, &dp6_ptr->io.irqen);
585         writeb(0x00, &dp6_ptr->u.ic.S_Status);
586         writeb(0x00, &dp6_ptr->u.ic.Cmd_Index);
587 
588         writel(pcistr->dpmem, &dp6_ptr->u.ic.S_Info[0]);
589         writeb(0xff, &dp6_ptr->u.ic.S_Cmd_Indx);
590         writeb(0, &dp6_ptr->io.event);
591         retries = INIT_RETRIES;
592         gdth_delay(20);
593         while (readb(&dp6_ptr->u.ic.S_Status) != 0xff) {
594             if (--retries == 0) {
595                 printk("GDT-PCI: Initialization error (DEINIT failed)\n");
596                 iounmap(ha->brd);
597                 return 0;
598             }
599             gdth_delay(1);
600         }
601         prot_ver = (u8)readl(&dp6_ptr->u.ic.S_Info[0]);
602         writeb(0, &dp6_ptr->u.ic.S_Status);
603         writeb(0xff, &dp6_ptr->io.irqdel);
604         if (prot_ver != PROTOCOL_VERSION) {
605             printk("GDT-PCI: Illegal protocol version\n");
606             iounmap(ha->brd);
607             return 0;
608         }
609 
610         ha->type = GDT_PCI;
611         ha->ic_all_size = sizeof(dp6_ptr->u);
612 
613         /* special command to controller BIOS */
614         writel(0x00, &dp6_ptr->u.ic.S_Info[0]);
615         writel(0x00, &dp6_ptr->u.ic.S_Info[1]);
616         writel(0x00, &dp6_ptr->u.ic.S_Info[2]);
617         writel(0x00, &dp6_ptr->u.ic.S_Info[3]);
618         writeb(0xfe, &dp6_ptr->u.ic.S_Cmd_Indx);
619         writeb(0, &dp6_ptr->io.event);
620         retries = INIT_RETRIES;
621         gdth_delay(20);
622         while (readb(&dp6_ptr->u.ic.S_Status) != 0xfe) {
623             if (--retries == 0) {
624                 printk("GDT-PCI: Initialization error\n");
625                 iounmap(ha->brd);
626                 return 0;
627             }
628             gdth_delay(1);
629         }
630         writeb(0, &dp6_ptr->u.ic.S_Status);
631         writeb(0xff, &dp6_ptr->io.irqdel);
632 
633         ha->dma64_support = 0;
634 
635     } else if (ha->pdev->device <= PCI_DEVICE_ID_VORTEX_GDT6555) { /* GDT6110, ... */
636         ha->plx = (gdt6c_plx_regs *)pcistr->io;
637         TRACE2(("init_pci_new() dpmem %lx irq %d\n",
638             pcistr->dpmem,ha->irq));
639         ha->brd = ioremap(pcistr->dpmem, sizeof(gdt6c_dpram_str));
640         if (ha->brd == NULL) {
641             printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
642             iounmap(ha->brd);
643             return 0;
644         }
645         /* check and reset interface area */
646         dp6c_ptr = ha->brd;
647         writel(DPMEM_MAGIC, &dp6c_ptr->u);
648         if (readl(&dp6c_ptr->u) != DPMEM_MAGIC) {
649             printk("GDT-PCI: Cannot access DPMEM at 0x%lx (shadowed?)\n",
650                    pcistr->dpmem);
651             found = FALSE;
652             for (i = 0xC8000; i < 0xE8000; i += 0x4000) {
653                 iounmap(ha->brd);
654                 ha->brd = ioremap(i, sizeof(u16));
655                 if (ha->brd == NULL) {
656                     printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
657                     return 0;
658                 }
659                 if (readw(ha->brd) != 0xffff) {
660                     TRACE2(("init_pci_plx() address 0x%x busy\n", i));
661                     continue;
662                 }
663                 iounmap(ha->brd);
664 		pci_write_config_dword(pdev, PCI_BASE_ADDRESS_2, i);
665                 ha->brd = ioremap(i, sizeof(gdt6c_dpram_str));
666                 if (ha->brd == NULL) {
667                     printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
668                     return 0;
669                 }
670                 dp6c_ptr = ha->brd;
671                 writel(DPMEM_MAGIC, &dp6c_ptr->u);
672                 if (readl(&dp6c_ptr->u) == DPMEM_MAGIC) {
673                     printk("GDT-PCI: Use free address at 0x%x\n", i);
674                     found = TRUE;
675                     break;
676                 }
677             }
678             if (!found) {
679                 printk("GDT-PCI: No free address found!\n");
680                 iounmap(ha->brd);
681                 return 0;
682             }
683         }
684         memset_io(&dp6c_ptr->u, 0, sizeof(dp6c_ptr->u));
685         if (readl(&dp6c_ptr->u) != 0) {
686             printk("GDT-PCI: Initialization error (DPMEM write error)\n");
687             iounmap(ha->brd);
688             return 0;
689         }
690 
691         /* disable board interrupts, deinit services */
692         outb(0x00,PTR2USHORT(&ha->plx->control1));
693         outb(0xff,PTR2USHORT(&ha->plx->edoor_reg));
694 
695         writeb(0x00, &dp6c_ptr->u.ic.S_Status);
696         writeb(0x00, &dp6c_ptr->u.ic.Cmd_Index);
697 
698         writel(pcistr->dpmem, &dp6c_ptr->u.ic.S_Info[0]);
699         writeb(0xff, &dp6c_ptr->u.ic.S_Cmd_Indx);
700 
701         outb(1,PTR2USHORT(&ha->plx->ldoor_reg));
702 
703         retries = INIT_RETRIES;
704         gdth_delay(20);
705         while (readb(&dp6c_ptr->u.ic.S_Status) != 0xff) {
706             if (--retries == 0) {
707                 printk("GDT-PCI: Initialization error (DEINIT failed)\n");
708                 iounmap(ha->brd);
709                 return 0;
710             }
711             gdth_delay(1);
712         }
713         prot_ver = (u8)readl(&dp6c_ptr->u.ic.S_Info[0]);
714         writeb(0, &dp6c_ptr->u.ic.Status);
715         if (prot_ver != PROTOCOL_VERSION) {
716             printk("GDT-PCI: Illegal protocol version\n");
717             iounmap(ha->brd);
718             return 0;
719         }
720 
721         ha->type = GDT_PCINEW;
722         ha->ic_all_size = sizeof(dp6c_ptr->u);
723 
724         /* special command to controller BIOS */
725         writel(0x00, &dp6c_ptr->u.ic.S_Info[0]);
726         writel(0x00, &dp6c_ptr->u.ic.S_Info[1]);
727         writel(0x00, &dp6c_ptr->u.ic.S_Info[2]);
728         writel(0x00, &dp6c_ptr->u.ic.S_Info[3]);
729         writeb(0xfe, &dp6c_ptr->u.ic.S_Cmd_Indx);
730 
731         outb(1,PTR2USHORT(&ha->plx->ldoor_reg));
732 
733         retries = INIT_RETRIES;
734         gdth_delay(20);
735         while (readb(&dp6c_ptr->u.ic.S_Status) != 0xfe) {
736             if (--retries == 0) {
737                 printk("GDT-PCI: Initialization error\n");
738                 iounmap(ha->brd);
739                 return 0;
740             }
741             gdth_delay(1);
742         }
743         writeb(0, &dp6c_ptr->u.ic.S_Status);
744 
745         ha->dma64_support = 0;
746 
747     } else {                                            /* MPR */
748         TRACE2(("init_pci_mpr() dpmem %lx irq %d\n",pcistr->dpmem,ha->irq));
749         ha->brd = ioremap(pcistr->dpmem, sizeof(gdt6m_dpram_str));
750         if (ha->brd == NULL) {
751             printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
752             return 0;
753         }
754 
755         /* manipulate config. space to enable DPMEM, start RP controller */
756 	pci_read_config_word(pdev, PCI_COMMAND, &command);
757         command |= 6;
758 	pci_write_config_word(pdev, PCI_COMMAND, command);
759 	gdth_delay(1);
760 
761         dp6m_ptr = ha->brd;
762 
763         /* Ensure that it is safe to access the non HW portions of DPMEM.
764          * Aditional check needed for Xscale based RAID controllers */
765         while( ((int)readb(&dp6m_ptr->i960r.sema0_reg) ) & 3 )
766             gdth_delay(1);
767 
768         /* check and reset interface area */
769         writel(DPMEM_MAGIC, &dp6m_ptr->u);
770         if (readl(&dp6m_ptr->u) != DPMEM_MAGIC) {
771             printk("GDT-PCI: Cannot access DPMEM at 0x%lx (shadowed?)\n",
772                    pcistr->dpmem);
773             found = FALSE;
774             for (i = 0xC8000; i < 0xE8000; i += 0x4000) {
775                 iounmap(ha->brd);
776                 ha->brd = ioremap(i, sizeof(u16));
777                 if (ha->brd == NULL) {
778                     printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
779                     return 0;
780                 }
781                 if (readw(ha->brd) != 0xffff) {
782                     TRACE2(("init_pci_mpr() address 0x%x busy\n", i));
783                     continue;
784                 }
785                 iounmap(ha->brd);
786 		pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, i);
787                 ha->brd = ioremap(i, sizeof(gdt6m_dpram_str));
788                 if (ha->brd == NULL) {
789                     printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
790                     return 0;
791                 }
792                 dp6m_ptr = ha->brd;
793                 writel(DPMEM_MAGIC, &dp6m_ptr->u);
794                 if (readl(&dp6m_ptr->u) == DPMEM_MAGIC) {
795                     printk("GDT-PCI: Use free address at 0x%x\n", i);
796                     found = TRUE;
797                     break;
798                 }
799             }
800             if (!found) {
801                 printk("GDT-PCI: No free address found!\n");
802                 iounmap(ha->brd);
803                 return 0;
804             }
805         }
806         memset_io(&dp6m_ptr->u, 0, sizeof(dp6m_ptr->u));
807 
808         /* disable board interrupts, deinit services */
809         writeb(readb(&dp6m_ptr->i960r.edoor_en_reg) | 4,
810                     &dp6m_ptr->i960r.edoor_en_reg);
811         writeb(0xff, &dp6m_ptr->i960r.edoor_reg);
812         writeb(0x00, &dp6m_ptr->u.ic.S_Status);
813         writeb(0x00, &dp6m_ptr->u.ic.Cmd_Index);
814 
815         writel(pcistr->dpmem, &dp6m_ptr->u.ic.S_Info[0]);
816         writeb(0xff, &dp6m_ptr->u.ic.S_Cmd_Indx);
817         writeb(1, &dp6m_ptr->i960r.ldoor_reg);
818         retries = INIT_RETRIES;
819         gdth_delay(20);
820         while (readb(&dp6m_ptr->u.ic.S_Status) != 0xff) {
821             if (--retries == 0) {
822                 printk("GDT-PCI: Initialization error (DEINIT failed)\n");
823                 iounmap(ha->brd);
824                 return 0;
825             }
826             gdth_delay(1);
827         }
828         prot_ver = (u8)readl(&dp6m_ptr->u.ic.S_Info[0]);
829         writeb(0, &dp6m_ptr->u.ic.S_Status);
830         if (prot_ver != PROTOCOL_VERSION) {
831             printk("GDT-PCI: Illegal protocol version\n");
832             iounmap(ha->brd);
833             return 0;
834         }
835 
836         ha->type = GDT_PCIMPR;
837         ha->ic_all_size = sizeof(dp6m_ptr->u);
838 
839         /* special command to controller BIOS */
840         writel(0x00, &dp6m_ptr->u.ic.S_Info[0]);
841         writel(0x00, &dp6m_ptr->u.ic.S_Info[1]);
842         writel(0x00, &dp6m_ptr->u.ic.S_Info[2]);
843         writel(0x00, &dp6m_ptr->u.ic.S_Info[3]);
844         writeb(0xfe, &dp6m_ptr->u.ic.S_Cmd_Indx);
845         writeb(1, &dp6m_ptr->i960r.ldoor_reg);
846         retries = INIT_RETRIES;
847         gdth_delay(20);
848         while (readb(&dp6m_ptr->u.ic.S_Status) != 0xfe) {
849             if (--retries == 0) {
850                 printk("GDT-PCI: Initialization error\n");
851                 iounmap(ha->brd);
852                 return 0;
853             }
854             gdth_delay(1);
855         }
856         writeb(0, &dp6m_ptr->u.ic.S_Status);
857 
858         /* read FW version to detect 64-bit DMA support */
859         writeb(0xfd, &dp6m_ptr->u.ic.S_Cmd_Indx);
860         writeb(1, &dp6m_ptr->i960r.ldoor_reg);
861         retries = INIT_RETRIES;
862         gdth_delay(20);
863         while (readb(&dp6m_ptr->u.ic.S_Status) != 0xfd) {
864             if (--retries == 0) {
865                 printk("GDT-PCI: Initialization error (DEINIT failed)\n");
866                 iounmap(ha->brd);
867                 return 0;
868             }
869             gdth_delay(1);
870         }
871         prot_ver = (u8)(readl(&dp6m_ptr->u.ic.S_Info[0]) >> 16);
872         writeb(0, &dp6m_ptr->u.ic.S_Status);
873         if (prot_ver < 0x2b)      /* FW < x.43: no 64-bit DMA support */
874             ha->dma64_support = 0;
875         else
876             ha->dma64_support = 1;
877     }
878 
879     return 1;
880 }
881 
882 /* controller protocol functions */
883 
gdth_enable_int(gdth_ha_str * ha)884 static void gdth_enable_int(gdth_ha_str *ha)
885 {
886     unsigned long flags;
887     gdt6_dpram_str __iomem *dp6_ptr;
888     gdt6m_dpram_str __iomem *dp6m_ptr;
889 
890     TRACE(("gdth_enable_int() hanum %d\n",ha->hanum));
891     spin_lock_irqsave(&ha->smp_lock, flags);
892 
893     if (ha->type == GDT_PCI) {
894         dp6_ptr = ha->brd;
895         writeb(1, &dp6_ptr->io.irqdel);
896         writeb(0, &dp6_ptr->u.ic.Cmd_Index);
897         writeb(1, &dp6_ptr->io.irqen);
898     } else if (ha->type == GDT_PCINEW) {
899         outb(0xff, PTR2USHORT(&ha->plx->edoor_reg));
900         outb(0x03, PTR2USHORT(&ha->plx->control1));
901     } else if (ha->type == GDT_PCIMPR) {
902         dp6m_ptr = ha->brd;
903         writeb(0xff, &dp6m_ptr->i960r.edoor_reg);
904         writeb(readb(&dp6m_ptr->i960r.edoor_en_reg) & ~4,
905                     &dp6m_ptr->i960r.edoor_en_reg);
906     }
907     spin_unlock_irqrestore(&ha->smp_lock, flags);
908 }
909 
910 /* return IStatus if interrupt was from this card else 0 */
gdth_get_status(gdth_ha_str * ha)911 static u8 gdth_get_status(gdth_ha_str *ha)
912 {
913     u8 IStatus = 0;
914 
915     TRACE(("gdth_get_status() irq %d ctr_count %d\n", ha->irq, gdth_ctr_count));
916 
917         if (ha->type == GDT_PCI)
918             IStatus =
919                 readb(&((gdt6_dpram_str __iomem *)ha->brd)->u.ic.Cmd_Index);
920         else if (ha->type == GDT_PCINEW)
921             IStatus = inb(PTR2USHORT(&ha->plx->edoor_reg));
922         else if (ha->type == GDT_PCIMPR)
923             IStatus =
924                 readb(&((gdt6m_dpram_str __iomem *)ha->brd)->i960r.edoor_reg);
925 
926         return IStatus;
927 }
928 
gdth_test_busy(gdth_ha_str * ha)929 static int gdth_test_busy(gdth_ha_str *ha)
930 {
931     register int gdtsema0 = 0;
932 
933     TRACE(("gdth_test_busy() hanum %d\n", ha->hanum));
934 
935     if (ha->type == GDT_PCI)
936         gdtsema0 = (int)readb(&((gdt6_dpram_str __iomem *)ha->brd)->u.ic.Sema0);
937     else if (ha->type == GDT_PCINEW)
938         gdtsema0 = (int)inb(PTR2USHORT(&ha->plx->sema0_reg));
939     else if (ha->type == GDT_PCIMPR)
940         gdtsema0 =
941             (int)readb(&((gdt6m_dpram_str __iomem *)ha->brd)->i960r.sema0_reg);
942 
943     return (gdtsema0 & 1);
944 }
945 
946 
gdth_get_cmd_index(gdth_ha_str * ha)947 static int gdth_get_cmd_index(gdth_ha_str *ha)
948 {
949     int i;
950 
951     TRACE(("gdth_get_cmd_index() hanum %d\n", ha->hanum));
952 
953     for (i=0; i<GDTH_MAXCMDS; ++i) {
954         if (ha->cmd_tab[i].cmnd == UNUSED_CMND) {
955             ha->cmd_tab[i].cmnd = ha->pccb->RequestBuffer;
956             ha->cmd_tab[i].service = ha->pccb->Service;
957             ha->pccb->CommandIndex = (u32)i+2;
958             return (i+2);
959         }
960     }
961     return 0;
962 }
963 
964 
gdth_set_sema0(gdth_ha_str * ha)965 static void gdth_set_sema0(gdth_ha_str *ha)
966 {
967     TRACE(("gdth_set_sema0() hanum %d\n", ha->hanum));
968 
969     if (ha->type == GDT_PCI) {
970         writeb(1, &((gdt6_dpram_str __iomem *)ha->brd)->u.ic.Sema0);
971     } else if (ha->type == GDT_PCINEW) {
972         outb(1, PTR2USHORT(&ha->plx->sema0_reg));
973     } else if (ha->type == GDT_PCIMPR) {
974         writeb(1, &((gdt6m_dpram_str __iomem *)ha->brd)->i960r.sema0_reg);
975     }
976 }
977 
978 
gdth_copy_command(gdth_ha_str * ha)979 static void gdth_copy_command(gdth_ha_str *ha)
980 {
981     register gdth_cmd_str *cmd_ptr;
982     register gdt6m_dpram_str __iomem *dp6m_ptr;
983     register gdt6c_dpram_str __iomem *dp6c_ptr;
984     gdt6_dpram_str __iomem *dp6_ptr;
985     u16 cp_count,dp_offset,cmd_no;
986 
987     TRACE(("gdth_copy_command() hanum %d\n", ha->hanum));
988 
989     cp_count = ha->cmd_len;
990     dp_offset= ha->cmd_offs_dpmem;
991     cmd_no   = ha->cmd_cnt;
992     cmd_ptr  = ha->pccb;
993 
994     ++ha->cmd_cnt;
995 
996     /* set cpcount dword aligned */
997     if (cp_count & 3)
998         cp_count += (4 - (cp_count & 3));
999 
1000     ha->cmd_offs_dpmem += cp_count;
1001 
1002     /* set offset and service, copy command to DPMEM */
1003     if (ha->type == GDT_PCI) {
1004         dp6_ptr = ha->brd;
1005         writew(dp_offset + DPMEM_COMMAND_OFFSET,
1006                     &dp6_ptr->u.ic.comm_queue[cmd_no].offset);
1007         writew((u16)cmd_ptr->Service,
1008                     &dp6_ptr->u.ic.comm_queue[cmd_no].serv_id);
1009         memcpy_toio(&dp6_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
1010     } else if (ha->type == GDT_PCINEW) {
1011         dp6c_ptr = ha->brd;
1012         writew(dp_offset + DPMEM_COMMAND_OFFSET,
1013                     &dp6c_ptr->u.ic.comm_queue[cmd_no].offset);
1014         writew((u16)cmd_ptr->Service,
1015                     &dp6c_ptr->u.ic.comm_queue[cmd_no].serv_id);
1016         memcpy_toio(&dp6c_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
1017     } else if (ha->type == GDT_PCIMPR) {
1018         dp6m_ptr = ha->brd;
1019         writew(dp_offset + DPMEM_COMMAND_OFFSET,
1020                     &dp6m_ptr->u.ic.comm_queue[cmd_no].offset);
1021         writew((u16)cmd_ptr->Service,
1022                     &dp6m_ptr->u.ic.comm_queue[cmd_no].serv_id);
1023         memcpy_toio(&dp6m_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
1024     }
1025 }
1026 
1027 
gdth_release_event(gdth_ha_str * ha)1028 static void gdth_release_event(gdth_ha_str *ha)
1029 {
1030     TRACE(("gdth_release_event() hanum %d\n", ha->hanum));
1031 
1032 #ifdef GDTH_STATISTICS
1033     {
1034         u32 i,j;
1035         for (i=0,j=0; j<GDTH_MAXCMDS; ++j) {
1036             if (ha->cmd_tab[j].cmnd != UNUSED_CMND)
1037                 ++i;
1038         }
1039         if (max_index < i) {
1040             max_index = i;
1041             TRACE3(("GDT: max_index = %d\n",(u16)i));
1042         }
1043     }
1044 #endif
1045 
1046     if (ha->pccb->OpCode == GDT_INIT)
1047         ha->pccb->Service |= 0x80;
1048 
1049     if (ha->type == GDT_PCI) {
1050         writeb(0, &((gdt6_dpram_str __iomem *)ha->brd)->io.event);
1051     } else if (ha->type == GDT_PCINEW) {
1052         outb(1, PTR2USHORT(&ha->plx->ldoor_reg));
1053     } else if (ha->type == GDT_PCIMPR) {
1054         writeb(1, &((gdt6m_dpram_str __iomem *)ha->brd)->i960r.ldoor_reg);
1055     }
1056 }
1057 
gdth_wait(gdth_ha_str * ha,int index,u32 time)1058 static int gdth_wait(gdth_ha_str *ha, int index, u32 time)
1059 {
1060     int answer_found = FALSE;
1061     int wait_index = 0;
1062 
1063     TRACE(("gdth_wait() hanum %d index %d time %d\n", ha->hanum, index, time));
1064 
1065     if (index == 0)
1066         return 1;                               /* no wait required */
1067 
1068     do {
1069 	__gdth_interrupt(ha, true, &wait_index);
1070         if (wait_index == index) {
1071             answer_found = TRUE;
1072             break;
1073         }
1074         gdth_delay(1);
1075     } while (--time);
1076 
1077     while (gdth_test_busy(ha))
1078         gdth_delay(0);
1079 
1080     return (answer_found);
1081 }
1082 
1083 
gdth_internal_cmd(gdth_ha_str * ha,u8 service,u16 opcode,u32 p1,u64 p2,u64 p3)1084 static int gdth_internal_cmd(gdth_ha_str *ha, u8 service, u16 opcode,
1085                                             u32 p1, u64 p2, u64 p3)
1086 {
1087     register gdth_cmd_str *cmd_ptr;
1088     int retries,index;
1089 
1090     TRACE2(("gdth_internal_cmd() service %d opcode %d\n",service,opcode));
1091 
1092     cmd_ptr = ha->pccb;
1093     memset((char*)cmd_ptr,0,sizeof(gdth_cmd_str));
1094 
1095     /* make command  */
1096     for (retries = INIT_RETRIES;;) {
1097         cmd_ptr->Service          = service;
1098         cmd_ptr->RequestBuffer    = INTERNAL_CMND;
1099         if (!(index=gdth_get_cmd_index(ha))) {
1100             TRACE(("GDT: No free command index found\n"));
1101             return 0;
1102         }
1103         gdth_set_sema0(ha);
1104         cmd_ptr->OpCode           = opcode;
1105         cmd_ptr->BoardNode        = LOCALBOARD;
1106         if (service == CACHESERVICE) {
1107             if (opcode == GDT_IOCTL) {
1108                 cmd_ptr->u.ioctl.subfunc = p1;
1109                 cmd_ptr->u.ioctl.channel = (u32)p2;
1110                 cmd_ptr->u.ioctl.param_size = (u16)p3;
1111                 cmd_ptr->u.ioctl.p_param = ha->scratch_phys;
1112             } else {
1113                 if (ha->cache_feat & GDT_64BIT) {
1114                     cmd_ptr->u.cache64.DeviceNo = (u16)p1;
1115                     cmd_ptr->u.cache64.BlockNo  = p2;
1116                 } else {
1117                     cmd_ptr->u.cache.DeviceNo = (u16)p1;
1118                     cmd_ptr->u.cache.BlockNo  = (u32)p2;
1119                 }
1120             }
1121         } else if (service == SCSIRAWSERVICE) {
1122             if (ha->raw_feat & GDT_64BIT) {
1123                 cmd_ptr->u.raw64.direction  = p1;
1124                 cmd_ptr->u.raw64.bus        = (u8)p2;
1125                 cmd_ptr->u.raw64.target     = (u8)p3;
1126                 cmd_ptr->u.raw64.lun        = (u8)(p3 >> 8);
1127             } else {
1128                 cmd_ptr->u.raw.direction  = p1;
1129                 cmd_ptr->u.raw.bus        = (u8)p2;
1130                 cmd_ptr->u.raw.target     = (u8)p3;
1131                 cmd_ptr->u.raw.lun        = (u8)(p3 >> 8);
1132             }
1133         } else if (service == SCREENSERVICE) {
1134             if (opcode == GDT_REALTIME) {
1135                 *(u32 *)&cmd_ptr->u.screen.su.data[0] = p1;
1136                 *(u32 *)&cmd_ptr->u.screen.su.data[4] = (u32)p2;
1137                 *(u32 *)&cmd_ptr->u.screen.su.data[8] = (u32)p3;
1138             }
1139         }
1140         ha->cmd_len          = sizeof(gdth_cmd_str);
1141         ha->cmd_offs_dpmem   = 0;
1142         ha->cmd_cnt          = 0;
1143         gdth_copy_command(ha);
1144         gdth_release_event(ha);
1145         gdth_delay(20);
1146         if (!gdth_wait(ha, index, INIT_TIMEOUT)) {
1147             printk("GDT: Initialization error (timeout service %d)\n",service);
1148             return 0;
1149         }
1150         if (ha->status != S_BSY || --retries == 0)
1151             break;
1152         gdth_delay(1);
1153     }
1154 
1155     return (ha->status != S_OK ? 0:1);
1156 }
1157 
1158 
1159 /* search for devices */
1160 
gdth_search_drives(gdth_ha_str * ha)1161 static int gdth_search_drives(gdth_ha_str *ha)
1162 {
1163     u16 cdev_cnt, i;
1164     int ok;
1165     u32 bus_no, drv_cnt, drv_no, j;
1166     gdth_getch_str *chn;
1167     gdth_drlist_str *drl;
1168     gdth_iochan_str *ioc;
1169     gdth_raw_iochan_str *iocr;
1170     gdth_arcdl_str *alst;
1171     gdth_alist_str *alst2;
1172     gdth_oem_str_ioctl *oemstr;
1173 
1174     TRACE(("gdth_search_drives() hanum %d\n", ha->hanum));
1175     ok = 0;
1176 
1177     /* initialize controller services, at first: screen service */
1178     ha->screen_feat = 0;
1179     if (!force_dma32) {
1180         ok = gdth_internal_cmd(ha, SCREENSERVICE, GDT_X_INIT_SCR, 0, 0, 0);
1181         if (ok)
1182             ha->screen_feat = GDT_64BIT;
1183     }
1184     if (force_dma32 || (!ok && ha->status == (u16)S_NOFUNC))
1185         ok = gdth_internal_cmd(ha, SCREENSERVICE, GDT_INIT, 0, 0, 0);
1186     if (!ok) {
1187         printk("GDT-HA %d: Initialization error screen service (code %d)\n",
1188                ha->hanum, ha->status);
1189         return 0;
1190     }
1191     TRACE2(("gdth_search_drives(): SCREENSERVICE initialized\n"));
1192 
1193     /* unfreeze all IOs */
1194     gdth_internal_cmd(ha, CACHESERVICE, GDT_UNFREEZE_IO, 0, 0, 0);
1195 
1196     /* initialize cache service */
1197     ha->cache_feat = 0;
1198     if (!force_dma32) {
1199         ok = gdth_internal_cmd(ha, CACHESERVICE, GDT_X_INIT_HOST, LINUX_OS,
1200                                                                          0, 0);
1201         if (ok)
1202             ha->cache_feat = GDT_64BIT;
1203     }
1204     if (force_dma32 || (!ok && ha->status == (u16)S_NOFUNC))
1205         ok = gdth_internal_cmd(ha, CACHESERVICE, GDT_INIT, LINUX_OS, 0, 0);
1206     if (!ok) {
1207         printk("GDT-HA %d: Initialization error cache service (code %d)\n",
1208                ha->hanum, ha->status);
1209         return 0;
1210     }
1211     TRACE2(("gdth_search_drives(): CACHESERVICE initialized\n"));
1212     cdev_cnt = (u16)ha->info;
1213     ha->fw_vers = ha->service;
1214 
1215     /* detect number of buses - try new IOCTL */
1216     iocr = (gdth_raw_iochan_str *)ha->pscratch;
1217     iocr->hdr.version        = 0xffffffff;
1218     iocr->hdr.list_entries   = MAXBUS;
1219     iocr->hdr.first_chan     = 0;
1220     iocr->hdr.last_chan      = MAXBUS-1;
1221     iocr->hdr.list_offset    = GDTOFFSOF(gdth_raw_iochan_str, list[0]);
1222     if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, IOCHAN_RAW_DESC,
1223                           INVALID_CHANNEL,sizeof(gdth_raw_iochan_str))) {
1224         TRACE2(("IOCHAN_RAW_DESC supported!\n"));
1225         ha->bus_cnt = iocr->hdr.chan_count;
1226         for (bus_no = 0; bus_no < ha->bus_cnt; ++bus_no) {
1227             if (iocr->list[bus_no].proc_id < MAXID)
1228                 ha->bus_id[bus_no] = iocr->list[bus_no].proc_id;
1229             else
1230                 ha->bus_id[bus_no] = 0xff;
1231         }
1232     } else {
1233         /* old method */
1234         chn = (gdth_getch_str *)ha->pscratch;
1235         for (bus_no = 0; bus_no < MAXBUS; ++bus_no) {
1236             chn->channel_no = bus_no;
1237             if (!gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL,
1238                                    SCSI_CHAN_CNT | L_CTRL_PATTERN,
1239                                    IO_CHANNEL | INVALID_CHANNEL,
1240                                    sizeof(gdth_getch_str))) {
1241                 if (bus_no == 0) {
1242                     printk("GDT-HA %d: Error detecting channel count (0x%x)\n",
1243                            ha->hanum, ha->status);
1244                     return 0;
1245                 }
1246                 break;
1247             }
1248             if (chn->siop_id < MAXID)
1249                 ha->bus_id[bus_no] = chn->siop_id;
1250             else
1251                 ha->bus_id[bus_no] = 0xff;
1252         }
1253         ha->bus_cnt = (u8)bus_no;
1254     }
1255     TRACE2(("gdth_search_drives() %d channels\n",ha->bus_cnt));
1256 
1257     /* read cache configuration */
1258     if (!gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, CACHE_INFO,
1259                            INVALID_CHANNEL,sizeof(gdth_cinfo_str))) {
1260         printk("GDT-HA %d: Initialization error cache service (code %d)\n",
1261                ha->hanum, ha->status);
1262         return 0;
1263     }
1264     ha->cpar = ((gdth_cinfo_str *)ha->pscratch)->cpar;
1265     TRACE2(("gdth_search_drives() cinfo: vs %x sta %d str %d dw %d b %d\n",
1266             ha->cpar.version,ha->cpar.state,ha->cpar.strategy,
1267             ha->cpar.write_back,ha->cpar.block_size));
1268 
1269     /* read board info and features */
1270     ha->more_proc = FALSE;
1271     if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, BOARD_INFO,
1272                           INVALID_CHANNEL,sizeof(gdth_binfo_str))) {
1273         memcpy(&ha->binfo, (gdth_binfo_str *)ha->pscratch,
1274                sizeof(gdth_binfo_str));
1275         if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, BOARD_FEATURES,
1276                               INVALID_CHANNEL,sizeof(gdth_bfeat_str))) {
1277             TRACE2(("BOARD_INFO/BOARD_FEATURES supported\n"));
1278             ha->bfeat = *(gdth_bfeat_str *)ha->pscratch;
1279             ha->more_proc = TRUE;
1280         }
1281     } else {
1282         TRACE2(("BOARD_INFO requires firmware >= 1.10/2.08\n"));
1283         strcpy(ha->binfo.type_string, gdth_ctr_name(ha));
1284     }
1285     TRACE2(("Controller name: %s\n",ha->binfo.type_string));
1286 
1287     /* read more informations */
1288     if (ha->more_proc) {
1289         /* physical drives, channel addresses */
1290         ioc = (gdth_iochan_str *)ha->pscratch;
1291         ioc->hdr.version        = 0xffffffff;
1292         ioc->hdr.list_entries   = MAXBUS;
1293         ioc->hdr.first_chan     = 0;
1294         ioc->hdr.last_chan      = MAXBUS-1;
1295         ioc->hdr.list_offset    = GDTOFFSOF(gdth_iochan_str, list[0]);
1296         if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, IOCHAN_DESC,
1297                               INVALID_CHANNEL,sizeof(gdth_iochan_str))) {
1298             for (bus_no = 0; bus_no < ha->bus_cnt; ++bus_no) {
1299                 ha->raw[bus_no].address = ioc->list[bus_no].address;
1300                 ha->raw[bus_no].local_no = ioc->list[bus_no].local_no;
1301             }
1302         } else {
1303             for (bus_no = 0; bus_no < ha->bus_cnt; ++bus_no) {
1304                 ha->raw[bus_no].address = IO_CHANNEL;
1305                 ha->raw[bus_no].local_no = bus_no;
1306             }
1307         }
1308         for (bus_no = 0; bus_no < ha->bus_cnt; ++bus_no) {
1309             chn = (gdth_getch_str *)ha->pscratch;
1310             chn->channel_no = ha->raw[bus_no].local_no;
1311             if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL,
1312                                   SCSI_CHAN_CNT | L_CTRL_PATTERN,
1313                                   ha->raw[bus_no].address | INVALID_CHANNEL,
1314                                   sizeof(gdth_getch_str))) {
1315                 ha->raw[bus_no].pdev_cnt = chn->drive_cnt;
1316                 TRACE2(("Channel %d: %d phys. drives\n",
1317                         bus_no,chn->drive_cnt));
1318             }
1319             if (ha->raw[bus_no].pdev_cnt > 0) {
1320                 drl = (gdth_drlist_str *)ha->pscratch;
1321                 drl->sc_no = ha->raw[bus_no].local_no;
1322                 drl->sc_cnt = ha->raw[bus_no].pdev_cnt;
1323                 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL,
1324                                       SCSI_DR_LIST | L_CTRL_PATTERN,
1325                                       ha->raw[bus_no].address | INVALID_CHANNEL,
1326                                       sizeof(gdth_drlist_str))) {
1327                     for (j = 0; j < ha->raw[bus_no].pdev_cnt; ++j)
1328                         ha->raw[bus_no].id_list[j] = drl->sc_list[j];
1329                 } else {
1330                     ha->raw[bus_no].pdev_cnt = 0;
1331                 }
1332             }
1333         }
1334 
1335         /* logical drives */
1336         if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, CACHE_DRV_CNT,
1337                               INVALID_CHANNEL,sizeof(u32))) {
1338             drv_cnt = *(u32 *)ha->pscratch;
1339             if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, CACHE_DRV_LIST,
1340                                   INVALID_CHANNEL,drv_cnt * sizeof(u32))) {
1341                 for (j = 0; j < drv_cnt; ++j) {
1342                     drv_no = ((u32 *)ha->pscratch)[j];
1343                     if (drv_no < MAX_LDRIVES) {
1344                         ha->hdr[drv_no].is_logdrv = TRUE;
1345                         TRACE2(("Drive %d is log. drive\n",drv_no));
1346                     }
1347                 }
1348             }
1349             alst = (gdth_arcdl_str *)ha->pscratch;
1350             alst->entries_avail = MAX_LDRIVES;
1351             alst->first_entry = 0;
1352             alst->list_offset = GDTOFFSOF(gdth_arcdl_str, list[0]);
1353             if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL,
1354                                   ARRAY_DRV_LIST2 | LA_CTRL_PATTERN,
1355                                   INVALID_CHANNEL, sizeof(gdth_arcdl_str) +
1356                                   (alst->entries_avail-1) * sizeof(gdth_alist_str))) {
1357                 for (j = 0; j < alst->entries_init; ++j) {
1358                     ha->hdr[j].is_arraydrv = alst->list[j].is_arrayd;
1359                     ha->hdr[j].is_master = alst->list[j].is_master;
1360                     ha->hdr[j].is_parity = alst->list[j].is_parity;
1361                     ha->hdr[j].is_hotfix = alst->list[j].is_hotfix;
1362                     ha->hdr[j].master_no = alst->list[j].cd_handle;
1363                 }
1364             } else if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL,
1365                                          ARRAY_DRV_LIST | LA_CTRL_PATTERN,
1366                                          0, 35 * sizeof(gdth_alist_str))) {
1367                 for (j = 0; j < 35; ++j) {
1368                     alst2 = &((gdth_alist_str *)ha->pscratch)[j];
1369                     ha->hdr[j].is_arraydrv = alst2->is_arrayd;
1370                     ha->hdr[j].is_master = alst2->is_master;
1371                     ha->hdr[j].is_parity = alst2->is_parity;
1372                     ha->hdr[j].is_hotfix = alst2->is_hotfix;
1373                     ha->hdr[j].master_no = alst2->cd_handle;
1374                 }
1375             }
1376         }
1377     }
1378 
1379     /* initialize raw service */
1380     ha->raw_feat = 0;
1381     if (!force_dma32) {
1382         ok = gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_X_INIT_RAW, 0, 0, 0);
1383         if (ok)
1384             ha->raw_feat = GDT_64BIT;
1385     }
1386     if (force_dma32 || (!ok && ha->status == (u16)S_NOFUNC))
1387         ok = gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_INIT, 0, 0, 0);
1388     if (!ok) {
1389         printk("GDT-HA %d: Initialization error raw service (code %d)\n",
1390                ha->hanum, ha->status);
1391         return 0;
1392     }
1393     TRACE2(("gdth_search_drives(): RAWSERVICE initialized\n"));
1394 
1395     /* set/get features raw service (scatter/gather) */
1396     if (gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_SET_FEAT, SCATTER_GATHER,
1397                           0, 0)) {
1398         TRACE2(("gdth_search_drives(): set features RAWSERVICE OK\n"));
1399         if (gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_GET_FEAT, 0, 0, 0)) {
1400             TRACE2(("gdth_search_dr(): get feat RAWSERVICE %d\n",
1401                     ha->info));
1402             ha->raw_feat |= (u16)ha->info;
1403         }
1404     }
1405 
1406     /* set/get features cache service (equal to raw service) */
1407     if (gdth_internal_cmd(ha, CACHESERVICE, GDT_SET_FEAT, 0,
1408                           SCATTER_GATHER,0)) {
1409         TRACE2(("gdth_search_drives(): set features CACHESERVICE OK\n"));
1410         if (gdth_internal_cmd(ha, CACHESERVICE, GDT_GET_FEAT, 0, 0, 0)) {
1411             TRACE2(("gdth_search_dr(): get feat CACHESERV. %d\n",
1412                     ha->info));
1413             ha->cache_feat |= (u16)ha->info;
1414         }
1415     }
1416 
1417     /* reserve drives for raw service */
1418     if (reserve_mode != 0) {
1419         gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_RESERVE_ALL,
1420                           reserve_mode == 1 ? 1 : 3, 0, 0);
1421         TRACE2(("gdth_search_drives(): RESERVE_ALL code %d\n",
1422                 ha->status));
1423     }
1424     for (i = 0; i < MAX_RES_ARGS; i += 4) {
1425         if (reserve_list[i] == ha->hanum && reserve_list[i+1] < ha->bus_cnt &&
1426             reserve_list[i+2] < ha->tid_cnt && reserve_list[i+3] < MAXLUN) {
1427             TRACE2(("gdth_search_drives(): reserve ha %d bus %d id %d lun %d\n",
1428                     reserve_list[i], reserve_list[i+1],
1429                     reserve_list[i+2], reserve_list[i+3]));
1430             if (!gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_RESERVE, 0,
1431                                    reserve_list[i+1], reserve_list[i+2] |
1432                                    (reserve_list[i+3] << 8))) {
1433                 printk("GDT-HA %d: Error raw service (RESERVE, code %d)\n",
1434                        ha->hanum, ha->status);
1435              }
1436         }
1437     }
1438 
1439     /* Determine OEM string using IOCTL */
1440     oemstr = (gdth_oem_str_ioctl *)ha->pscratch;
1441     oemstr->params.ctl_version = 0x01;
1442     oemstr->params.buffer_size = sizeof(oemstr->text);
1443     if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL,
1444                           CACHE_READ_OEM_STRING_RECORD,INVALID_CHANNEL,
1445                           sizeof(gdth_oem_str_ioctl))) {
1446         TRACE2(("gdth_search_drives(): CACHE_READ_OEM_STRING_RECORD OK\n"));
1447         printk("GDT-HA %d: Vendor: %s Name: %s\n",
1448                ha->hanum, oemstr->text.oem_company_name, ha->binfo.type_string);
1449         /* Save the Host Drive inquiry data */
1450         strlcpy(ha->oem_name,oemstr->text.scsi_host_drive_inquiry_vendor_id,
1451                 sizeof(ha->oem_name));
1452     } else {
1453         /* Old method, based on PCI ID */
1454         TRACE2(("gdth_search_drives(): CACHE_READ_OEM_STRING_RECORD failed\n"));
1455         printk("GDT-HA %d: Name: %s\n",
1456                ha->hanum, ha->binfo.type_string);
1457         if (ha->oem_id == OEM_ID_INTEL)
1458             strlcpy(ha->oem_name,"Intel  ", sizeof(ha->oem_name));
1459         else
1460             strlcpy(ha->oem_name,"ICP    ", sizeof(ha->oem_name));
1461     }
1462 
1463     /* scanning for host drives */
1464     for (i = 0; i < cdev_cnt; ++i)
1465         gdth_analyse_hdrive(ha, i);
1466 
1467     TRACE(("gdth_search_drives() OK\n"));
1468     return 1;
1469 }
1470 
gdth_analyse_hdrive(gdth_ha_str * ha,u16 hdrive)1471 static int gdth_analyse_hdrive(gdth_ha_str *ha, u16 hdrive)
1472 {
1473     u32 drv_cyls;
1474     int drv_hds, drv_secs;
1475 
1476     TRACE(("gdth_analyse_hdrive() hanum %d drive %d\n", ha->hanum, hdrive));
1477     if (hdrive >= MAX_HDRIVES)
1478         return 0;
1479 
1480     if (!gdth_internal_cmd(ha, CACHESERVICE, GDT_INFO, hdrive, 0, 0))
1481         return 0;
1482     ha->hdr[hdrive].present = TRUE;
1483     ha->hdr[hdrive].size = ha->info;
1484 
1485     /* evaluate mapping (sectors per head, heads per cylinder) */
1486     ha->hdr[hdrive].size &= ~SECS32;
1487     if (ha->info2 == 0) {
1488         gdth_eval_mapping(ha->hdr[hdrive].size,&drv_cyls,&drv_hds,&drv_secs);
1489     } else {
1490         drv_hds = ha->info2 & 0xff;
1491         drv_secs = (ha->info2 >> 8) & 0xff;
1492         drv_cyls = (u32)ha->hdr[hdrive].size / drv_hds / drv_secs;
1493     }
1494     ha->hdr[hdrive].heads = (u8)drv_hds;
1495     ha->hdr[hdrive].secs  = (u8)drv_secs;
1496     /* round size */
1497     ha->hdr[hdrive].size  = drv_cyls * drv_hds * drv_secs;
1498 
1499     if (ha->cache_feat & GDT_64BIT) {
1500         if (gdth_internal_cmd(ha, CACHESERVICE, GDT_X_INFO, hdrive, 0, 0)
1501             && ha->info2 != 0) {
1502             ha->hdr[hdrive].size = ((u64)ha->info2 << 32) | ha->info;
1503         }
1504     }
1505     TRACE2(("gdth_search_dr() cdr. %d size %d hds %d scs %d\n",
1506             hdrive,ha->hdr[hdrive].size,drv_hds,drv_secs));
1507 
1508     /* get informations about device */
1509     if (gdth_internal_cmd(ha, CACHESERVICE, GDT_DEVTYPE, hdrive, 0, 0)) {
1510         TRACE2(("gdth_search_dr() cache drive %d devtype %d\n",
1511                 hdrive,ha->info));
1512         ha->hdr[hdrive].devtype = (u16)ha->info;
1513     }
1514 
1515     /* cluster info */
1516     if (gdth_internal_cmd(ha, CACHESERVICE, GDT_CLUST_INFO, hdrive, 0, 0)) {
1517         TRACE2(("gdth_search_dr() cache drive %d cluster info %d\n",
1518                 hdrive,ha->info));
1519         if (!shared_access)
1520             ha->hdr[hdrive].cluster_type = (u8)ha->info;
1521     }
1522 
1523     /* R/W attributes */
1524     if (gdth_internal_cmd(ha, CACHESERVICE, GDT_RW_ATTRIBS, hdrive, 0, 0)) {
1525         TRACE2(("gdth_search_dr() cache drive %d r/w attrib. %d\n",
1526                 hdrive,ha->info));
1527         ha->hdr[hdrive].rw_attribs = (u8)ha->info;
1528     }
1529 
1530     return 1;
1531 }
1532 
1533 
1534 /* command queueing/sending functions */
1535 
gdth_putq(gdth_ha_str * ha,struct scsi_cmnd * scp,u8 priority)1536 static void gdth_putq(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 priority)
1537 {
1538     struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
1539     register struct scsi_cmnd *pscp;
1540     register struct scsi_cmnd *nscp;
1541     unsigned long flags;
1542 
1543     TRACE(("gdth_putq() priority %d\n",priority));
1544     spin_lock_irqsave(&ha->smp_lock, flags);
1545 
1546     if (!cmndinfo->internal_command)
1547         cmndinfo->priority = priority;
1548 
1549     if (ha->req_first==NULL) {
1550         ha->req_first = scp;                    /* queue was empty */
1551         scp->SCp.ptr = NULL;
1552     } else {                                    /* queue not empty */
1553         pscp = ha->req_first;
1554         nscp = (struct scsi_cmnd *)pscp->SCp.ptr;
1555         /* priority: 0-highest,..,0xff-lowest */
1556         while (nscp && gdth_cmnd_priv(nscp)->priority <= priority) {
1557             pscp = nscp;
1558             nscp = (struct scsi_cmnd *)pscp->SCp.ptr;
1559         }
1560         pscp->SCp.ptr = (char *)scp;
1561         scp->SCp.ptr  = (char *)nscp;
1562     }
1563     spin_unlock_irqrestore(&ha->smp_lock, flags);
1564 
1565 #ifdef GDTH_STATISTICS
1566     flags = 0;
1567     for (nscp=ha->req_first; nscp; nscp=(struct scsi_cmnd*)nscp->SCp.ptr)
1568         ++flags;
1569     if (max_rq < flags) {
1570         max_rq = flags;
1571         TRACE3(("GDT: max_rq = %d\n",(u16)max_rq));
1572     }
1573 #endif
1574 }
1575 
gdth_next(gdth_ha_str * ha)1576 static void gdth_next(gdth_ha_str *ha)
1577 {
1578     register struct scsi_cmnd *pscp;
1579     register struct scsi_cmnd *nscp;
1580     u8 b, t, l, firsttime;
1581     u8 this_cmd, next_cmd;
1582     unsigned long flags = 0;
1583     int cmd_index;
1584 
1585     TRACE(("gdth_next() hanum %d\n", ha->hanum));
1586     if (!gdth_polling)
1587         spin_lock_irqsave(&ha->smp_lock, flags);
1588 
1589     ha->cmd_cnt = ha->cmd_offs_dpmem = 0;
1590     this_cmd = firsttime = TRUE;
1591     next_cmd = gdth_polling ? FALSE:TRUE;
1592     cmd_index = 0;
1593 
1594     for (nscp = pscp = ha->req_first; nscp; nscp = (struct scsi_cmnd *)nscp->SCp.ptr) {
1595         struct gdth_cmndinfo *nscp_cmndinfo = gdth_cmnd_priv(nscp);
1596         if (nscp != pscp && nscp != (struct scsi_cmnd *)pscp->SCp.ptr)
1597             pscp = (struct scsi_cmnd *)pscp->SCp.ptr;
1598         if (!nscp_cmndinfo->internal_command) {
1599             b = nscp->device->channel;
1600             t = nscp->device->id;
1601             l = nscp->device->lun;
1602             if (nscp_cmndinfo->priority >= DEFAULT_PRI) {
1603                 if ((b != ha->virt_bus && ha->raw[BUS_L2P(ha,b)].lock) ||
1604                     (b == ha->virt_bus && t < MAX_HDRIVES && ha->hdr[t].lock))
1605                     continue;
1606             }
1607         } else
1608             b = t = l = 0;
1609 
1610         if (firsttime) {
1611             if (gdth_test_busy(ha)) {        /* controller busy ? */
1612                 TRACE(("gdth_next() controller %d busy !\n", ha->hanum));
1613                 if (!gdth_polling) {
1614                     spin_unlock_irqrestore(&ha->smp_lock, flags);
1615                     return;
1616                 }
1617                 while (gdth_test_busy(ha))
1618                     gdth_delay(1);
1619             }
1620             firsttime = FALSE;
1621         }
1622 
1623         if (!nscp_cmndinfo->internal_command) {
1624         if (nscp_cmndinfo->phase == -1) {
1625             nscp_cmndinfo->phase = CACHESERVICE;           /* default: cache svc. */
1626             if (nscp->cmnd[0] == TEST_UNIT_READY) {
1627                 TRACE2(("TEST_UNIT_READY Bus %d Id %d LUN %d\n",
1628                         b, t, l));
1629                 /* TEST_UNIT_READY -> set scan mode */
1630                 if ((ha->scan_mode & 0x0f) == 0) {
1631                     if (b == 0 && t == 0 && l == 0) {
1632                         ha->scan_mode |= 1;
1633                         TRACE2(("Scan mode: 0x%x\n", ha->scan_mode));
1634                     }
1635                 } else if ((ha->scan_mode & 0x0f) == 1) {
1636                     if (b == 0 && ((t == 0 && l == 1) ||
1637                          (t == 1 && l == 0))) {
1638                         nscp_cmndinfo->OpCode = GDT_SCAN_START;
1639                         nscp_cmndinfo->phase = ((ha->scan_mode & 0x10 ? 1:0) << 8)
1640                             | SCSIRAWSERVICE;
1641                         ha->scan_mode = 0x12;
1642                         TRACE2(("Scan mode: 0x%x (SCAN_START)\n",
1643                                 ha->scan_mode));
1644                     } else {
1645                         ha->scan_mode &= 0x10;
1646                         TRACE2(("Scan mode: 0x%x\n", ha->scan_mode));
1647                     }
1648                 } else if (ha->scan_mode == 0x12) {
1649                     if (b == ha->bus_cnt && t == ha->tid_cnt-1) {
1650                         nscp_cmndinfo->phase = SCSIRAWSERVICE;
1651                         nscp_cmndinfo->OpCode = GDT_SCAN_END;
1652                         ha->scan_mode &= 0x10;
1653                         TRACE2(("Scan mode: 0x%x (SCAN_END)\n",
1654                                 ha->scan_mode));
1655                     }
1656                 }
1657             }
1658             if (b == ha->virt_bus && nscp->cmnd[0] != INQUIRY &&
1659                 nscp->cmnd[0] != READ_CAPACITY && nscp->cmnd[0] != MODE_SENSE &&
1660                 (ha->hdr[t].cluster_type & CLUSTER_DRIVE)) {
1661                 /* always GDT_CLUST_INFO! */
1662                 nscp_cmndinfo->OpCode = GDT_CLUST_INFO;
1663             }
1664         }
1665         }
1666 
1667         if (nscp_cmndinfo->OpCode != -1) {
1668             if ((nscp_cmndinfo->phase & 0xff) == CACHESERVICE) {
1669                 if (!(cmd_index=gdth_fill_cache_cmd(ha, nscp, t)))
1670                     this_cmd = FALSE;
1671                 next_cmd = FALSE;
1672             } else if ((nscp_cmndinfo->phase & 0xff) == SCSIRAWSERVICE) {
1673                 if (!(cmd_index=gdth_fill_raw_cmd(ha, nscp, BUS_L2P(ha, b))))
1674                     this_cmd = FALSE;
1675                 next_cmd = FALSE;
1676             } else {
1677                 memset((char*)nscp->sense_buffer,0,16);
1678                 nscp->sense_buffer[0] = 0x70;
1679                 nscp->sense_buffer[2] = NOT_READY;
1680                 nscp->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
1681                 if (!nscp_cmndinfo->wait_for_completion)
1682                     nscp_cmndinfo->wait_for_completion++;
1683                 else
1684                     gdth_scsi_done(nscp);
1685             }
1686         } else if (gdth_cmnd_priv(nscp)->internal_command) {
1687             if (!(cmd_index=gdth_special_cmd(ha, nscp)))
1688                 this_cmd = FALSE;
1689             next_cmd = FALSE;
1690         } else if (b != ha->virt_bus) {
1691             if (ha->raw[BUS_L2P(ha,b)].io_cnt[t] >= GDTH_MAX_RAW ||
1692                 !(cmd_index=gdth_fill_raw_cmd(ha, nscp, BUS_L2P(ha, b))))
1693                 this_cmd = FALSE;
1694             else
1695                 ha->raw[BUS_L2P(ha,b)].io_cnt[t]++;
1696         } else if (t >= MAX_HDRIVES || !ha->hdr[t].present || l != 0) {
1697             TRACE2(("Command 0x%x to bus %d id %d lun %d -> IGNORE\n",
1698                     nscp->cmnd[0], b, t, l));
1699             nscp->result = DID_BAD_TARGET << 16;
1700             if (!nscp_cmndinfo->wait_for_completion)
1701                 nscp_cmndinfo->wait_for_completion++;
1702             else
1703                 gdth_scsi_done(nscp);
1704         } else {
1705             switch (nscp->cmnd[0]) {
1706               case TEST_UNIT_READY:
1707               case INQUIRY:
1708               case REQUEST_SENSE:
1709               case READ_CAPACITY:
1710               case VERIFY:
1711               case START_STOP:
1712               case MODE_SENSE:
1713               case SERVICE_ACTION_IN_16:
1714                 TRACE(("cache cmd %x/%x/%x/%x/%x/%x\n",nscp->cmnd[0],
1715                        nscp->cmnd[1],nscp->cmnd[2],nscp->cmnd[3],
1716                        nscp->cmnd[4],nscp->cmnd[5]));
1717                 if (ha->hdr[t].media_changed && nscp->cmnd[0] != INQUIRY) {
1718                     /* return UNIT_ATTENTION */
1719                     TRACE2(("cmd 0x%x target %d: UNIT_ATTENTION\n",
1720                              nscp->cmnd[0], t));
1721                     ha->hdr[t].media_changed = FALSE;
1722                     memset((char*)nscp->sense_buffer,0,16);
1723                     nscp->sense_buffer[0] = 0x70;
1724                     nscp->sense_buffer[2] = UNIT_ATTENTION;
1725                     nscp->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
1726                     if (!nscp_cmndinfo->wait_for_completion)
1727                         nscp_cmndinfo->wait_for_completion++;
1728                     else
1729                         gdth_scsi_done(nscp);
1730                 } else if (gdth_internal_cache_cmd(ha, nscp))
1731                     gdth_scsi_done(nscp);
1732                 break;
1733 
1734               case ALLOW_MEDIUM_REMOVAL:
1735                 TRACE(("cache cmd %x/%x/%x/%x/%x/%x\n",nscp->cmnd[0],
1736                        nscp->cmnd[1],nscp->cmnd[2],nscp->cmnd[3],
1737                        nscp->cmnd[4],nscp->cmnd[5]));
1738                 if ( (nscp->cmnd[4]&1) && !(ha->hdr[t].devtype&1) ) {
1739                     TRACE(("Prevent r. nonremov. drive->do nothing\n"));
1740                     nscp->result = DID_OK << 16;
1741                     nscp->sense_buffer[0] = 0;
1742                     if (!nscp_cmndinfo->wait_for_completion)
1743                         nscp_cmndinfo->wait_for_completion++;
1744                     else
1745                         gdth_scsi_done(nscp);
1746                 } else {
1747                     nscp->cmnd[3] = (ha->hdr[t].devtype&1) ? 1:0;
1748                     TRACE(("Prevent/allow r. %d rem. drive %d\n",
1749                            nscp->cmnd[4],nscp->cmnd[3]));
1750                     if (!(cmd_index=gdth_fill_cache_cmd(ha, nscp, t)))
1751                         this_cmd = FALSE;
1752                 }
1753                 break;
1754 
1755               case RESERVE:
1756               case RELEASE:
1757                 TRACE2(("cache cmd %s\n",nscp->cmnd[0] == RESERVE ?
1758                         "RESERVE" : "RELEASE"));
1759                 if (!(cmd_index=gdth_fill_cache_cmd(ha, nscp, t)))
1760                     this_cmd = FALSE;
1761                 break;
1762 
1763               case READ_6:
1764               case WRITE_6:
1765               case READ_10:
1766               case WRITE_10:
1767               case READ_16:
1768               case WRITE_16:
1769                 if (ha->hdr[t].media_changed) {
1770                     /* return UNIT_ATTENTION */
1771                     TRACE2(("cmd 0x%x target %d: UNIT_ATTENTION\n",
1772                              nscp->cmnd[0], t));
1773                     ha->hdr[t].media_changed = FALSE;
1774                     memset((char*)nscp->sense_buffer,0,16);
1775                     nscp->sense_buffer[0] = 0x70;
1776                     nscp->sense_buffer[2] = UNIT_ATTENTION;
1777                     nscp->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
1778                     if (!nscp_cmndinfo->wait_for_completion)
1779                         nscp_cmndinfo->wait_for_completion++;
1780                     else
1781                         gdth_scsi_done(nscp);
1782                 } else if (!(cmd_index=gdth_fill_cache_cmd(ha, nscp, t)))
1783                     this_cmd = FALSE;
1784                 break;
1785 
1786               default:
1787                 TRACE2(("cache cmd %x/%x/%x/%x/%x/%x unknown\n",nscp->cmnd[0],
1788                         nscp->cmnd[1],nscp->cmnd[2],nscp->cmnd[3],
1789                         nscp->cmnd[4],nscp->cmnd[5]));
1790                 printk("GDT-HA %d: Unknown SCSI command 0x%x to cache service !\n",
1791                        ha->hanum, nscp->cmnd[0]);
1792                 nscp->result = DID_ABORT << 16;
1793                 if (!nscp_cmndinfo->wait_for_completion)
1794                     nscp_cmndinfo->wait_for_completion++;
1795                 else
1796                     gdth_scsi_done(nscp);
1797                 break;
1798             }
1799         }
1800 
1801         if (!this_cmd)
1802             break;
1803         if (nscp == ha->req_first)
1804             ha->req_first = pscp = (struct scsi_cmnd *)nscp->SCp.ptr;
1805         else
1806             pscp->SCp.ptr = nscp->SCp.ptr;
1807         if (!next_cmd)
1808             break;
1809     }
1810 
1811     if (ha->cmd_cnt > 0) {
1812         gdth_release_event(ha);
1813     }
1814 
1815     if (!gdth_polling)
1816         spin_unlock_irqrestore(&ha->smp_lock, flags);
1817 
1818     if (gdth_polling && ha->cmd_cnt > 0) {
1819         if (!gdth_wait(ha, cmd_index, POLL_TIMEOUT))
1820             printk("GDT-HA %d: Command %d timed out !\n",
1821                    ha->hanum, cmd_index);
1822     }
1823 }
1824 
1825 /*
1826  * gdth_copy_internal_data() - copy to/from a buffer onto a scsi_cmnd's
1827  * buffers, kmap_atomic() as needed.
1828  */
gdth_copy_internal_data(gdth_ha_str * ha,struct scsi_cmnd * scp,char * buffer,u16 count)1829 static void gdth_copy_internal_data(gdth_ha_str *ha, struct scsi_cmnd *scp,
1830                                     char *buffer, u16 count)
1831 {
1832     u16 cpcount,i, max_sg = scsi_sg_count(scp);
1833     u16 cpsum,cpnow;
1834     struct scatterlist *sl;
1835     char *address;
1836 
1837     cpcount = min_t(u16, count, scsi_bufflen(scp));
1838 
1839     if (cpcount) {
1840         cpsum=0;
1841         scsi_for_each_sg(scp, sl, max_sg, i) {
1842             unsigned long flags;
1843             cpnow = (u16)sl->length;
1844             TRACE(("copy_internal() now %d sum %d count %d %d\n",
1845                           cpnow, cpsum, cpcount, scsi_bufflen(scp)));
1846             if (cpsum+cpnow > cpcount)
1847                 cpnow = cpcount - cpsum;
1848             cpsum += cpnow;
1849             if (!sg_page(sl)) {
1850                 printk("GDT-HA %d: invalid sc/gt element in gdth_copy_internal_data()\n",
1851                        ha->hanum);
1852                 return;
1853             }
1854             local_irq_save(flags);
1855             address = kmap_atomic(sg_page(sl)) + sl->offset;
1856             memcpy(address, buffer, cpnow);
1857             flush_dcache_page(sg_page(sl));
1858             kunmap_atomic(address);
1859             local_irq_restore(flags);
1860             if (cpsum == cpcount)
1861                 break;
1862             buffer += cpnow;
1863         }
1864     } else if (count) {
1865         printk("GDT-HA %d: SCSI command with no buffers but data transfer expected!\n",
1866                ha->hanum);
1867         WARN_ON(1);
1868     }
1869 }
1870 
gdth_internal_cache_cmd(gdth_ha_str * ha,struct scsi_cmnd * scp)1871 static int gdth_internal_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp)
1872 {
1873     u8 t;
1874     gdth_inq_data inq;
1875     gdth_rdcap_data rdc;
1876     gdth_sense_data sd;
1877     gdth_modep_data mpd;
1878     struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
1879 
1880     t  = scp->device->id;
1881     TRACE(("gdth_internal_cache_cmd() cmd 0x%x hdrive %d\n",
1882            scp->cmnd[0],t));
1883 
1884     scp->result = DID_OK << 16;
1885     scp->sense_buffer[0] = 0;
1886 
1887     switch (scp->cmnd[0]) {
1888       case TEST_UNIT_READY:
1889       case VERIFY:
1890       case START_STOP:
1891         TRACE2(("Test/Verify/Start hdrive %d\n",t));
1892         break;
1893 
1894       case INQUIRY:
1895         TRACE2(("Inquiry hdrive %d devtype %d\n",
1896                 t,ha->hdr[t].devtype));
1897         inq.type_qual = (ha->hdr[t].devtype&4) ? TYPE_ROM:TYPE_DISK;
1898         /* you can here set all disks to removable, if you want to do
1899            a flush using the ALLOW_MEDIUM_REMOVAL command */
1900         inq.modif_rmb = 0x00;
1901         if ((ha->hdr[t].devtype & 1) ||
1902             (ha->hdr[t].cluster_type & CLUSTER_DRIVE))
1903             inq.modif_rmb = 0x80;
1904         inq.version   = 2;
1905         inq.resp_aenc = 2;
1906         inq.add_length= 32;
1907         strcpy(inq.vendor,ha->oem_name);
1908         snprintf(inq.product, sizeof(inq.product), "Host Drive  #%02d",t);
1909         strcpy(inq.revision,"   ");
1910         gdth_copy_internal_data(ha, scp, (char*)&inq, sizeof(gdth_inq_data));
1911         break;
1912 
1913       case REQUEST_SENSE:
1914         TRACE2(("Request sense hdrive %d\n",t));
1915         sd.errorcode = 0x70;
1916         sd.segno     = 0x00;
1917         sd.key       = NO_SENSE;
1918         sd.info      = 0;
1919         sd.add_length= 0;
1920         gdth_copy_internal_data(ha, scp, (char*)&sd, sizeof(gdth_sense_data));
1921         break;
1922 
1923       case MODE_SENSE:
1924         TRACE2(("Mode sense hdrive %d\n",t));
1925         memset((char*)&mpd,0,sizeof(gdth_modep_data));
1926         mpd.hd.data_length = sizeof(gdth_modep_data);
1927         mpd.hd.dev_par     = (ha->hdr[t].devtype&2) ? 0x80:0;
1928         mpd.hd.bd_length   = sizeof(mpd.bd);
1929         mpd.bd.block_length[0] = (SECTOR_SIZE & 0x00ff0000) >> 16;
1930         mpd.bd.block_length[1] = (SECTOR_SIZE & 0x0000ff00) >> 8;
1931         mpd.bd.block_length[2] = (SECTOR_SIZE & 0x000000ff);
1932         gdth_copy_internal_data(ha, scp, (char*)&mpd, sizeof(gdth_modep_data));
1933         break;
1934 
1935       case READ_CAPACITY:
1936         TRACE2(("Read capacity hdrive %d\n",t));
1937         if (ha->hdr[t].size > (u64)0xffffffff)
1938             rdc.last_block_no = 0xffffffff;
1939         else
1940             rdc.last_block_no = cpu_to_be32(ha->hdr[t].size-1);
1941         rdc.block_length  = cpu_to_be32(SECTOR_SIZE);
1942         gdth_copy_internal_data(ha, scp, (char*)&rdc, sizeof(gdth_rdcap_data));
1943         break;
1944 
1945       case SERVICE_ACTION_IN_16:
1946         if ((scp->cmnd[1] & 0x1f) == SAI_READ_CAPACITY_16 &&
1947             (ha->cache_feat & GDT_64BIT)) {
1948             gdth_rdcap16_data rdc16;
1949 
1950             TRACE2(("Read capacity (16) hdrive %d\n",t));
1951             rdc16.last_block_no = cpu_to_be64(ha->hdr[t].size-1);
1952             rdc16.block_length  = cpu_to_be32(SECTOR_SIZE);
1953             gdth_copy_internal_data(ha, scp, (char*)&rdc16,
1954                                                  sizeof(gdth_rdcap16_data));
1955         } else {
1956             scp->result = DID_ABORT << 16;
1957         }
1958         break;
1959 
1960       default:
1961         TRACE2(("Internal cache cmd 0x%x unknown\n",scp->cmnd[0]));
1962         break;
1963     }
1964 
1965     if (!cmndinfo->wait_for_completion)
1966         cmndinfo->wait_for_completion++;
1967     else
1968         return 1;
1969 
1970     return 0;
1971 }
1972 
gdth_fill_cache_cmd(gdth_ha_str * ha,struct scsi_cmnd * scp,u16 hdrive)1973 static int gdth_fill_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp,
1974                                u16 hdrive)
1975 {
1976     register gdth_cmd_str *cmdp;
1977     struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
1978     u32 cnt, blockcnt;
1979     u64 no, blockno;
1980     int i, cmd_index, read_write, sgcnt, mode64;
1981 
1982     cmdp = ha->pccb;
1983     TRACE(("gdth_fill_cache_cmd() cmd 0x%x cmdsize %d hdrive %d\n",
1984                  scp->cmnd[0],scp->cmd_len,hdrive));
1985 
1986     mode64 = (ha->cache_feat & GDT_64BIT) ? TRUE : FALSE;
1987     /* test for READ_16, WRITE_16 if !mode64 ? ---
1988        not required, should not occur due to error return on
1989        READ_CAPACITY_16 */
1990 
1991     cmdp->Service = CACHESERVICE;
1992     cmdp->RequestBuffer = scp;
1993     /* search free command index */
1994     if (!(cmd_index=gdth_get_cmd_index(ha))) {
1995         TRACE(("GDT: No free command index found\n"));
1996         return 0;
1997     }
1998     /* if it's the first command, set command semaphore */
1999     if (ha->cmd_cnt == 0)
2000         gdth_set_sema0(ha);
2001 
2002     /* fill command */
2003     read_write = 0;
2004     if (cmndinfo->OpCode != -1)
2005         cmdp->OpCode = cmndinfo->OpCode;   /* special cache cmd. */
2006     else if (scp->cmnd[0] == RESERVE)
2007         cmdp->OpCode = GDT_RESERVE_DRV;
2008     else if (scp->cmnd[0] == RELEASE)
2009         cmdp->OpCode = GDT_RELEASE_DRV;
2010     else if (scp->cmnd[0] == ALLOW_MEDIUM_REMOVAL) {
2011         if (scp->cmnd[4] & 1)                   /* prevent ? */
2012             cmdp->OpCode = GDT_MOUNT;
2013         else if (scp->cmnd[3] & 1)              /* removable drive ? */
2014             cmdp->OpCode = GDT_UNMOUNT;
2015         else
2016             cmdp->OpCode = GDT_FLUSH;
2017     } else if (scp->cmnd[0] == WRITE_6 || scp->cmnd[0] == WRITE_10 ||
2018                scp->cmnd[0] == WRITE_12 || scp->cmnd[0] == WRITE_16
2019     ) {
2020         read_write = 1;
2021         if (gdth_write_through || ((ha->hdr[hdrive].rw_attribs & 1) &&
2022                                    (ha->cache_feat & GDT_WR_THROUGH)))
2023             cmdp->OpCode = GDT_WRITE_THR;
2024         else
2025             cmdp->OpCode = GDT_WRITE;
2026     } else {
2027         read_write = 2;
2028         cmdp->OpCode = GDT_READ;
2029     }
2030 
2031     cmdp->BoardNode = LOCALBOARD;
2032     if (mode64) {
2033         cmdp->u.cache64.DeviceNo = hdrive;
2034         cmdp->u.cache64.BlockNo  = 1;
2035         cmdp->u.cache64.sg_canz  = 0;
2036     } else {
2037         cmdp->u.cache.DeviceNo = hdrive;
2038         cmdp->u.cache.BlockNo  = 1;
2039         cmdp->u.cache.sg_canz  = 0;
2040     }
2041 
2042     if (read_write) {
2043         if (scp->cmd_len == 16) {
2044             memcpy(&no, &scp->cmnd[2], sizeof(u64));
2045             blockno = be64_to_cpu(no);
2046             memcpy(&cnt, &scp->cmnd[10], sizeof(u32));
2047             blockcnt = be32_to_cpu(cnt);
2048         } else if (scp->cmd_len == 10) {
2049             memcpy(&no, &scp->cmnd[2], sizeof(u32));
2050             blockno = be32_to_cpu(no);
2051             memcpy(&cnt, &scp->cmnd[7], sizeof(u16));
2052             blockcnt = be16_to_cpu(cnt);
2053         } else {
2054             memcpy(&no, &scp->cmnd[0], sizeof(u32));
2055             blockno = be32_to_cpu(no) & 0x001fffffUL;
2056             blockcnt= scp->cmnd[4]==0 ? 0x100 : scp->cmnd[4];
2057         }
2058         if (mode64) {
2059             cmdp->u.cache64.BlockNo = blockno;
2060             cmdp->u.cache64.BlockCnt = blockcnt;
2061         } else {
2062             cmdp->u.cache.BlockNo = (u32)blockno;
2063             cmdp->u.cache.BlockCnt = blockcnt;
2064         }
2065 
2066         if (scsi_bufflen(scp)) {
2067             cmndinfo->dma_dir = (read_write == 1 ?
2068                 DMA_TO_DEVICE : DMA_FROM_DEVICE);
2069             sgcnt = dma_map_sg(&ha->pdev->dev, scsi_sglist(scp),
2070 			       scsi_sg_count(scp), cmndinfo->dma_dir);
2071             if (mode64) {
2072                 struct scatterlist *sl;
2073 
2074                 cmdp->u.cache64.DestAddr= (u64)-1;
2075                 cmdp->u.cache64.sg_canz = sgcnt;
2076                 scsi_for_each_sg(scp, sl, sgcnt, i) {
2077                     cmdp->u.cache64.sg_lst[i].sg_ptr = sg_dma_address(sl);
2078                     cmdp->u.cache64.sg_lst[i].sg_len = sg_dma_len(sl);
2079                 }
2080             } else {
2081                 struct scatterlist *sl;
2082 
2083                 cmdp->u.cache.DestAddr= 0xffffffff;
2084                 cmdp->u.cache.sg_canz = sgcnt;
2085                 scsi_for_each_sg(scp, sl, sgcnt, i) {
2086                     cmdp->u.cache.sg_lst[i].sg_ptr = sg_dma_address(sl);
2087                     cmdp->u.cache.sg_lst[i].sg_len = sg_dma_len(sl);
2088                 }
2089             }
2090 
2091 #ifdef GDTH_STATISTICS
2092             if (max_sg < (u32)sgcnt) {
2093                 max_sg = (u32)sgcnt;
2094                 TRACE3(("GDT: max_sg = %d\n",max_sg));
2095             }
2096 #endif
2097 
2098         }
2099     }
2100     /* evaluate command size, check space */
2101     if (mode64) {
2102         TRACE(("cache cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n",
2103                cmdp->u.cache64.DestAddr,cmdp->u.cache64.sg_canz,
2104                cmdp->u.cache64.sg_lst[0].sg_ptr,
2105                cmdp->u.cache64.sg_lst[0].sg_len));
2106         TRACE(("cache cmd: cmd %d blockno. %d, blockcnt %d\n",
2107                cmdp->OpCode,cmdp->u.cache64.BlockNo,cmdp->u.cache64.BlockCnt));
2108         ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.cache64.sg_lst) +
2109             (u16)cmdp->u.cache64.sg_canz * sizeof(gdth_sg64_str);
2110     } else {
2111         TRACE(("cache cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n",
2112                cmdp->u.cache.DestAddr,cmdp->u.cache.sg_canz,
2113                cmdp->u.cache.sg_lst[0].sg_ptr,
2114                cmdp->u.cache.sg_lst[0].sg_len));
2115         TRACE(("cache cmd: cmd %d blockno. %d, blockcnt %d\n",
2116                cmdp->OpCode,cmdp->u.cache.BlockNo,cmdp->u.cache.BlockCnt));
2117         ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.cache.sg_lst) +
2118             (u16)cmdp->u.cache.sg_canz * sizeof(gdth_sg_str);
2119     }
2120     if (ha->cmd_len & 3)
2121         ha->cmd_len += (4 - (ha->cmd_len & 3));
2122 
2123     if (ha->cmd_cnt > 0) {
2124         if ((ha->cmd_offs_dpmem + ha->cmd_len + DPMEM_COMMAND_OFFSET) >
2125             ha->ic_all_size) {
2126             TRACE2(("gdth_fill_cache() DPMEM overflow\n"));
2127             ha->cmd_tab[cmd_index-2].cmnd = UNUSED_CMND;
2128             return 0;
2129         }
2130     }
2131 
2132     /* copy command */
2133     gdth_copy_command(ha);
2134     return cmd_index;
2135 }
2136 
gdth_fill_raw_cmd(gdth_ha_str * ha,struct scsi_cmnd * scp,u8 b)2137 static int gdth_fill_raw_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 b)
2138 {
2139     register gdth_cmd_str *cmdp;
2140     u16 i;
2141     dma_addr_t sense_paddr;
2142     int cmd_index, sgcnt, mode64;
2143     u8 t,l;
2144     struct gdth_cmndinfo *cmndinfo;
2145 
2146     t = scp->device->id;
2147     l = scp->device->lun;
2148     cmdp = ha->pccb;
2149     TRACE(("gdth_fill_raw_cmd() cmd 0x%x bus %d ID %d LUN %d\n",
2150            scp->cmnd[0],b,t,l));
2151 
2152     mode64 = (ha->raw_feat & GDT_64BIT) ? TRUE : FALSE;
2153 
2154     cmdp->Service = SCSIRAWSERVICE;
2155     cmdp->RequestBuffer = scp;
2156     /* search free command index */
2157     if (!(cmd_index=gdth_get_cmd_index(ha))) {
2158         TRACE(("GDT: No free command index found\n"));
2159         return 0;
2160     }
2161     /* if it's the first command, set command semaphore */
2162     if (ha->cmd_cnt == 0)
2163         gdth_set_sema0(ha);
2164 
2165     cmndinfo = gdth_cmnd_priv(scp);
2166     /* fill command */
2167     if (cmndinfo->OpCode != -1) {
2168         cmdp->OpCode           = cmndinfo->OpCode; /* special raw cmd. */
2169         cmdp->BoardNode        = LOCALBOARD;
2170         if (mode64) {
2171             cmdp->u.raw64.direction = (cmndinfo->phase >> 8);
2172             TRACE2(("special raw cmd 0x%x param 0x%x\n",
2173                     cmdp->OpCode, cmdp->u.raw64.direction));
2174             /* evaluate command size */
2175             ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw64.sg_lst);
2176         } else {
2177             cmdp->u.raw.direction  = (cmndinfo->phase >> 8);
2178             TRACE2(("special raw cmd 0x%x param 0x%x\n",
2179                     cmdp->OpCode, cmdp->u.raw.direction));
2180             /* evaluate command size */
2181             ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw.sg_lst);
2182         }
2183 
2184     } else {
2185         sense_paddr = dma_map_single(&ha->pdev->dev, scp->sense_buffer, 16,
2186 				     DMA_FROM_DEVICE);
2187 
2188 	cmndinfo->sense_paddr  = sense_paddr;
2189         cmdp->OpCode           = GDT_WRITE;             /* always */
2190         cmdp->BoardNode        = LOCALBOARD;
2191         if (mode64) {
2192             cmdp->u.raw64.reserved   = 0;
2193             cmdp->u.raw64.mdisc_time = 0;
2194             cmdp->u.raw64.mcon_time  = 0;
2195             cmdp->u.raw64.clen       = scp->cmd_len;
2196             cmdp->u.raw64.target     = t;
2197             cmdp->u.raw64.lun        = l;
2198             cmdp->u.raw64.bus        = b;
2199             cmdp->u.raw64.priority   = 0;
2200             cmdp->u.raw64.sdlen      = scsi_bufflen(scp);
2201             cmdp->u.raw64.sense_len  = 16;
2202             cmdp->u.raw64.sense_data = sense_paddr;
2203             cmdp->u.raw64.direction  =
2204                 gdth_direction_tab[scp->cmnd[0]]==DOU ? GDTH_DATA_OUT:GDTH_DATA_IN;
2205             memcpy(cmdp->u.raw64.cmd,scp->cmnd,16);
2206             cmdp->u.raw64.sg_ranz    = 0;
2207         } else {
2208             cmdp->u.raw.reserved   = 0;
2209             cmdp->u.raw.mdisc_time = 0;
2210             cmdp->u.raw.mcon_time  = 0;
2211             cmdp->u.raw.clen       = scp->cmd_len;
2212             cmdp->u.raw.target     = t;
2213             cmdp->u.raw.lun        = l;
2214             cmdp->u.raw.bus        = b;
2215             cmdp->u.raw.priority   = 0;
2216             cmdp->u.raw.link_p     = 0;
2217             cmdp->u.raw.sdlen      = scsi_bufflen(scp);
2218             cmdp->u.raw.sense_len  = 16;
2219             cmdp->u.raw.sense_data = sense_paddr;
2220             cmdp->u.raw.direction  =
2221                 gdth_direction_tab[scp->cmnd[0]]==DOU ? GDTH_DATA_OUT:GDTH_DATA_IN;
2222             memcpy(cmdp->u.raw.cmd,scp->cmnd,12);
2223             cmdp->u.raw.sg_ranz    = 0;
2224         }
2225 
2226         if (scsi_bufflen(scp)) {
2227             cmndinfo->dma_dir = DMA_BIDIRECTIONAL;
2228             sgcnt = dma_map_sg(&ha->pdev->dev, scsi_sglist(scp),
2229 			       scsi_sg_count(scp), cmndinfo->dma_dir);
2230             if (mode64) {
2231                 struct scatterlist *sl;
2232 
2233                 cmdp->u.raw64.sdata = (u64)-1;
2234                 cmdp->u.raw64.sg_ranz = sgcnt;
2235                 scsi_for_each_sg(scp, sl, sgcnt, i) {
2236                     cmdp->u.raw64.sg_lst[i].sg_ptr = sg_dma_address(sl);
2237                     cmdp->u.raw64.sg_lst[i].sg_len = sg_dma_len(sl);
2238                 }
2239             } else {
2240                 struct scatterlist *sl;
2241 
2242                 cmdp->u.raw.sdata = 0xffffffff;
2243                 cmdp->u.raw.sg_ranz = sgcnt;
2244                 scsi_for_each_sg(scp, sl, sgcnt, i) {
2245                     cmdp->u.raw.sg_lst[i].sg_ptr = sg_dma_address(sl);
2246                     cmdp->u.raw.sg_lst[i].sg_len = sg_dma_len(sl);
2247                 }
2248             }
2249 
2250 #ifdef GDTH_STATISTICS
2251             if (max_sg < sgcnt) {
2252                 max_sg = sgcnt;
2253                 TRACE3(("GDT: max_sg = %d\n",sgcnt));
2254             }
2255 #endif
2256 
2257         }
2258         if (mode64) {
2259             TRACE(("raw cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n",
2260                    cmdp->u.raw64.sdata,cmdp->u.raw64.sg_ranz,
2261                    cmdp->u.raw64.sg_lst[0].sg_ptr,
2262                    cmdp->u.raw64.sg_lst[0].sg_len));
2263             /* evaluate command size */
2264             ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw64.sg_lst) +
2265                 (u16)cmdp->u.raw64.sg_ranz * sizeof(gdth_sg64_str);
2266         } else {
2267             TRACE(("raw cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n",
2268                    cmdp->u.raw.sdata,cmdp->u.raw.sg_ranz,
2269                    cmdp->u.raw.sg_lst[0].sg_ptr,
2270                    cmdp->u.raw.sg_lst[0].sg_len));
2271             /* evaluate command size */
2272             ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw.sg_lst) +
2273                 (u16)cmdp->u.raw.sg_ranz * sizeof(gdth_sg_str);
2274         }
2275     }
2276     /* check space */
2277     if (ha->cmd_len & 3)
2278         ha->cmd_len += (4 - (ha->cmd_len & 3));
2279 
2280     if (ha->cmd_cnt > 0) {
2281         if ((ha->cmd_offs_dpmem + ha->cmd_len + DPMEM_COMMAND_OFFSET) >
2282             ha->ic_all_size) {
2283             TRACE2(("gdth_fill_raw() DPMEM overflow\n"));
2284             ha->cmd_tab[cmd_index-2].cmnd = UNUSED_CMND;
2285             return 0;
2286         }
2287     }
2288 
2289     /* copy command */
2290     gdth_copy_command(ha);
2291     return cmd_index;
2292 }
2293 
gdth_special_cmd(gdth_ha_str * ha,struct scsi_cmnd * scp)2294 static int gdth_special_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp)
2295 {
2296     register gdth_cmd_str *cmdp;
2297     struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
2298     int cmd_index;
2299 
2300     cmdp= ha->pccb;
2301     TRACE2(("gdth_special_cmd(): "));
2302 
2303     *cmdp = *cmndinfo->internal_cmd_str;
2304     cmdp->RequestBuffer = scp;
2305 
2306     /* search free command index */
2307     if (!(cmd_index=gdth_get_cmd_index(ha))) {
2308         TRACE(("GDT: No free command index found\n"));
2309         return 0;
2310     }
2311 
2312     /* if it's the first command, set command semaphore */
2313     if (ha->cmd_cnt == 0)
2314        gdth_set_sema0(ha);
2315 
2316     /* evaluate command size, check space */
2317     if (cmdp->OpCode == GDT_IOCTL) {
2318         TRACE2(("IOCTL\n"));
2319         ha->cmd_len =
2320             GDTOFFSOF(gdth_cmd_str,u.ioctl.p_param) + sizeof(u64);
2321     } else if (cmdp->Service == CACHESERVICE) {
2322         TRACE2(("cache command %d\n",cmdp->OpCode));
2323         if (ha->cache_feat & GDT_64BIT)
2324             ha->cmd_len =
2325                 GDTOFFSOF(gdth_cmd_str,u.cache64.sg_lst) + sizeof(gdth_sg64_str);
2326         else
2327             ha->cmd_len =
2328                 GDTOFFSOF(gdth_cmd_str,u.cache.sg_lst) + sizeof(gdth_sg_str);
2329     } else if (cmdp->Service == SCSIRAWSERVICE) {
2330         TRACE2(("raw command %d\n",cmdp->OpCode));
2331         if (ha->raw_feat & GDT_64BIT)
2332             ha->cmd_len =
2333                 GDTOFFSOF(gdth_cmd_str,u.raw64.sg_lst) + sizeof(gdth_sg64_str);
2334         else
2335             ha->cmd_len =
2336                 GDTOFFSOF(gdth_cmd_str,u.raw.sg_lst) + sizeof(gdth_sg_str);
2337     }
2338 
2339     if (ha->cmd_len & 3)
2340         ha->cmd_len += (4 - (ha->cmd_len & 3));
2341 
2342     if (ha->cmd_cnt > 0) {
2343         if ((ha->cmd_offs_dpmem + ha->cmd_len + DPMEM_COMMAND_OFFSET) >
2344             ha->ic_all_size) {
2345             TRACE2(("gdth_special_cmd() DPMEM overflow\n"));
2346             ha->cmd_tab[cmd_index-2].cmnd = UNUSED_CMND;
2347             return 0;
2348         }
2349     }
2350 
2351     /* copy command */
2352     gdth_copy_command(ha);
2353     return cmd_index;
2354 }
2355 
2356 
2357 /* Controller event handling functions */
gdth_store_event(gdth_ha_str * ha,u16 source,u16 idx,gdth_evt_data * evt)2358 static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, u16 source,
2359                                       u16 idx, gdth_evt_data *evt)
2360 {
2361     gdth_evt_str *e;
2362 
2363     /* no GDTH_LOCK_HA() ! */
2364     TRACE2(("gdth_store_event() source %d idx %d\n", source, idx));
2365     if (source == 0)                        /* no source -> no event */
2366         return NULL;
2367 
2368     if (ebuffer[elastidx].event_source == source &&
2369         ebuffer[elastidx].event_idx == idx &&
2370         ((evt->size != 0 && ebuffer[elastidx].event_data.size != 0 &&
2371             !memcmp((char *)&ebuffer[elastidx].event_data.eu,
2372             (char *)&evt->eu, evt->size)) ||
2373         (evt->size == 0 && ebuffer[elastidx].event_data.size == 0 &&
2374             !strcmp((char *)&ebuffer[elastidx].event_data.event_string,
2375             (char *)&evt->event_string)))) {
2376         e = &ebuffer[elastidx];
2377 	e->last_stamp = (u32)ktime_get_real_seconds();
2378         ++e->same_count;
2379     } else {
2380         if (ebuffer[elastidx].event_source != 0) {  /* entry not free ? */
2381             ++elastidx;
2382             if (elastidx == MAX_EVENTS)
2383                 elastidx = 0;
2384             if (elastidx == eoldidx) {              /* reached mark ? */
2385                 ++eoldidx;
2386                 if (eoldidx == MAX_EVENTS)
2387                     eoldidx = 0;
2388             }
2389         }
2390         e = &ebuffer[elastidx];
2391         e->event_source = source;
2392         e->event_idx = idx;
2393 	e->first_stamp = e->last_stamp = (u32)ktime_get_real_seconds();
2394         e->same_count = 1;
2395         e->event_data = *evt;
2396         e->application = 0;
2397     }
2398     return e;
2399 }
2400 
gdth_read_event(gdth_ha_str * ha,int handle,gdth_evt_str * estr)2401 static int gdth_read_event(gdth_ha_str *ha, int handle, gdth_evt_str *estr)
2402 {
2403     gdth_evt_str *e;
2404     int eindex;
2405     unsigned long flags;
2406 
2407     TRACE2(("gdth_read_event() handle %d\n", handle));
2408     spin_lock_irqsave(&ha->smp_lock, flags);
2409     if (handle == -1)
2410         eindex = eoldidx;
2411     else
2412         eindex = handle;
2413     estr->event_source = 0;
2414 
2415     if (eindex < 0 || eindex >= MAX_EVENTS) {
2416         spin_unlock_irqrestore(&ha->smp_lock, flags);
2417         return eindex;
2418     }
2419     e = &ebuffer[eindex];
2420     if (e->event_source != 0) {
2421         if (eindex != elastidx) {
2422             if (++eindex == MAX_EVENTS)
2423                 eindex = 0;
2424         } else {
2425             eindex = -1;
2426         }
2427         memcpy(estr, e, sizeof(gdth_evt_str));
2428     }
2429     spin_unlock_irqrestore(&ha->smp_lock, flags);
2430     return eindex;
2431 }
2432 
gdth_readapp_event(gdth_ha_str * ha,u8 application,gdth_evt_str * estr)2433 static void gdth_readapp_event(gdth_ha_str *ha,
2434                                u8 application, gdth_evt_str *estr)
2435 {
2436     gdth_evt_str *e;
2437     int eindex;
2438     unsigned long flags;
2439     u8 found = FALSE;
2440 
2441     TRACE2(("gdth_readapp_event() app. %d\n", application));
2442     spin_lock_irqsave(&ha->smp_lock, flags);
2443     eindex = eoldidx;
2444     for (;;) {
2445         e = &ebuffer[eindex];
2446         if (e->event_source == 0)
2447             break;
2448         if ((e->application & application) == 0) {
2449             e->application |= application;
2450             found = TRUE;
2451             break;
2452         }
2453         if (eindex == elastidx)
2454             break;
2455         if (++eindex == MAX_EVENTS)
2456             eindex = 0;
2457     }
2458     if (found)
2459         memcpy(estr, e, sizeof(gdth_evt_str));
2460     else
2461         estr->event_source = 0;
2462     spin_unlock_irqrestore(&ha->smp_lock, flags);
2463 }
2464 
gdth_clear_events(void)2465 static void gdth_clear_events(void)
2466 {
2467     TRACE(("gdth_clear_events()"));
2468 
2469     eoldidx = elastidx = 0;
2470     ebuffer[0].event_source = 0;
2471 }
2472 
2473 
2474 /* SCSI interface functions */
2475 
__gdth_interrupt(gdth_ha_str * ha,int gdth_from_wait,int * pIndex)2476 static irqreturn_t __gdth_interrupt(gdth_ha_str *ha,
2477                                     int gdth_from_wait, int* pIndex)
2478 {
2479     gdt6m_dpram_str __iomem *dp6m_ptr = NULL;
2480     gdt6_dpram_str __iomem *dp6_ptr;
2481     struct scsi_cmnd *scp;
2482     int rval, i;
2483     u8 IStatus;
2484     u16 Service;
2485     unsigned long flags = 0;
2486 
2487     TRACE(("gdth_interrupt() IRQ %d\n", ha->irq));
2488 
2489     /* if polling and not from gdth_wait() -> return */
2490     if (gdth_polling) {
2491         if (!gdth_from_wait) {
2492             return IRQ_HANDLED;
2493         }
2494     }
2495 
2496     if (!gdth_polling)
2497         spin_lock_irqsave(&ha->smp_lock, flags);
2498 
2499     /* search controller */
2500     IStatus = gdth_get_status(ha);
2501     if (IStatus == 0) {
2502         /* spurious interrupt */
2503         if (!gdth_polling)
2504             spin_unlock_irqrestore(&ha->smp_lock, flags);
2505         return IRQ_HANDLED;
2506     }
2507 
2508 #ifdef GDTH_STATISTICS
2509     ++act_ints;
2510 #endif
2511 
2512         if (ha->type == GDT_PCI) {
2513             dp6_ptr = ha->brd;
2514             if (IStatus & 0x80) {                       /* error flag */
2515                 IStatus &= ~0x80;
2516                 ha->status = readw(&dp6_ptr->u.ic.Status);
2517                 TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status));
2518             } else                                      /* no error */
2519                 ha->status = S_OK;
2520             ha->info = readl(&dp6_ptr->u.ic.Info[0]);
2521             ha->service = readw(&dp6_ptr->u.ic.Service);
2522             ha->info2 = readl(&dp6_ptr->u.ic.Info[1]);
2523 
2524             writeb(0xff, &dp6_ptr->io.irqdel); /* acknowledge interrupt */
2525             writeb(0, &dp6_ptr->u.ic.Cmd_Index);/* reset command index */
2526             writeb(0, &dp6_ptr->io.Sema1);     /* reset status semaphore */
2527         } else if (ha->type == GDT_PCINEW) {
2528             if (IStatus & 0x80) {                       /* error flag */
2529                 IStatus &= ~0x80;
2530                 ha->status = inw(PTR2USHORT(&ha->plx->status));
2531                 TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status));
2532             } else
2533                 ha->status = S_OK;
2534             ha->info = inl(PTR2USHORT(&ha->plx->info[0]));
2535             ha->service = inw(PTR2USHORT(&ha->plx->service));
2536             ha->info2 = inl(PTR2USHORT(&ha->plx->info[1]));
2537 
2538             outb(0xff, PTR2USHORT(&ha->plx->edoor_reg));
2539             outb(0x00, PTR2USHORT(&ha->plx->sema1_reg));
2540         } else if (ha->type == GDT_PCIMPR) {
2541             dp6m_ptr = ha->brd;
2542             if (IStatus & 0x80) {                       /* error flag */
2543                 IStatus &= ~0x80;
2544                 ha->status = readw(&dp6m_ptr->i960r.status);
2545                 TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status));
2546             } else                                      /* no error */
2547                 ha->status = S_OK;
2548 
2549             ha->info = readl(&dp6m_ptr->i960r.info[0]);
2550             ha->service = readw(&dp6m_ptr->i960r.service);
2551             ha->info2 = readl(&dp6m_ptr->i960r.info[1]);
2552 
2553             /* event string */
2554             if (IStatus == ASYNCINDEX) {
2555                 if (ha->service != SCREENSERVICE &&
2556                     (ha->fw_vers & 0xff) >= 0x1a) {
2557                     ha->dvr.severity = readb
2558                         (&((gdt6m_dpram_str __iomem *)ha->brd)->i960r.severity);
2559                     for (i = 0; i < 256; ++i) {
2560                         ha->dvr.event_string[i] = readb
2561                             (&((gdt6m_dpram_str __iomem *)ha->brd)->i960r.evt_str[i]);
2562                         if (ha->dvr.event_string[i] == 0)
2563                             break;
2564                     }
2565                 }
2566             }
2567             writeb(0xff, &dp6m_ptr->i960r.edoor_reg);
2568             writeb(0, &dp6m_ptr->i960r.sema1_reg);
2569         } else {
2570             TRACE2(("gdth_interrupt() unknown controller type\n"));
2571             if (!gdth_polling)
2572                 spin_unlock_irqrestore(&ha->smp_lock, flags);
2573             return IRQ_HANDLED;
2574         }
2575 
2576         TRACE(("gdth_interrupt() index %d stat %d info %d\n",
2577                IStatus,ha->status,ha->info));
2578 
2579         if (gdth_from_wait) {
2580             *pIndex = (int)IStatus;
2581         }
2582 
2583         if (IStatus == ASYNCINDEX) {
2584             TRACE2(("gdth_interrupt() async. event\n"));
2585             gdth_async_event(ha);
2586             if (!gdth_polling)
2587                 spin_unlock_irqrestore(&ha->smp_lock, flags);
2588             gdth_next(ha);
2589             return IRQ_HANDLED;
2590         }
2591 
2592         if (IStatus == SPEZINDEX) {
2593             TRACE2(("Service unknown or not initialized !\n"));
2594             ha->dvr.size = sizeof(ha->dvr.eu.driver);
2595             ha->dvr.eu.driver.ionode = ha->hanum;
2596             gdth_store_event(ha, ES_DRIVER, 4, &ha->dvr);
2597             if (!gdth_polling)
2598                 spin_unlock_irqrestore(&ha->smp_lock, flags);
2599             return IRQ_HANDLED;
2600         }
2601         scp     = ha->cmd_tab[IStatus-2].cmnd;
2602         Service = ha->cmd_tab[IStatus-2].service;
2603         ha->cmd_tab[IStatus-2].cmnd = UNUSED_CMND;
2604         if (scp == UNUSED_CMND) {
2605             TRACE2(("gdth_interrupt() index to unused command (%d)\n",IStatus));
2606             ha->dvr.size = sizeof(ha->dvr.eu.driver);
2607             ha->dvr.eu.driver.ionode = ha->hanum;
2608             ha->dvr.eu.driver.index = IStatus;
2609             gdth_store_event(ha, ES_DRIVER, 1, &ha->dvr);
2610             if (!gdth_polling)
2611                 spin_unlock_irqrestore(&ha->smp_lock, flags);
2612             return IRQ_HANDLED;
2613         }
2614         if (scp == INTERNAL_CMND) {
2615             TRACE(("gdth_interrupt() answer to internal command\n"));
2616             if (!gdth_polling)
2617                 spin_unlock_irqrestore(&ha->smp_lock, flags);
2618             return IRQ_HANDLED;
2619         }
2620 
2621         TRACE(("gdth_interrupt() sync. status\n"));
2622         rval = gdth_sync_event(ha,Service,IStatus,scp);
2623         if (!gdth_polling)
2624             spin_unlock_irqrestore(&ha->smp_lock, flags);
2625         if (rval == 2) {
2626             gdth_putq(ha, scp, gdth_cmnd_priv(scp)->priority);
2627         } else if (rval == 1) {
2628             gdth_scsi_done(scp);
2629         }
2630 
2631     gdth_next(ha);
2632     return IRQ_HANDLED;
2633 }
2634 
gdth_interrupt(int irq,void * dev_id)2635 static irqreturn_t gdth_interrupt(int irq, void *dev_id)
2636 {
2637 	gdth_ha_str *ha = dev_id;
2638 
2639 	return __gdth_interrupt(ha, false, NULL);
2640 }
2641 
gdth_sync_event(gdth_ha_str * ha,int service,u8 index,struct scsi_cmnd * scp)2642 static int gdth_sync_event(gdth_ha_str *ha, int service, u8 index,
2643                                                               struct scsi_cmnd *scp)
2644 {
2645     gdth_msg_str *msg;
2646     gdth_cmd_str *cmdp;
2647     u8 b, t;
2648     struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
2649 
2650     cmdp = ha->pccb;
2651     TRACE(("gdth_sync_event() serv %d status %d\n",
2652            service,ha->status));
2653 
2654     if (service == SCREENSERVICE) {
2655         msg  = ha->pmsg;
2656         TRACE(("len: %d, answer: %d, ext: %d, alen: %d\n",
2657                msg->msg_len,msg->msg_answer,msg->msg_ext,msg->msg_alen));
2658         if (msg->msg_len > MSGLEN+1)
2659             msg->msg_len = MSGLEN+1;
2660         if (msg->msg_len)
2661             if (!(msg->msg_answer && msg->msg_ext)) {
2662                 msg->msg_text[msg->msg_len] = '\0';
2663                 printk("%s",msg->msg_text);
2664             }
2665 
2666         if (msg->msg_ext && !msg->msg_answer) {
2667             while (gdth_test_busy(ha))
2668                 gdth_delay(0);
2669             cmdp->Service       = SCREENSERVICE;
2670             cmdp->RequestBuffer = SCREEN_CMND;
2671             gdth_get_cmd_index(ha);
2672             gdth_set_sema0(ha);
2673             cmdp->OpCode        = GDT_READ;
2674             cmdp->BoardNode     = LOCALBOARD;
2675             cmdp->u.screen.reserved  = 0;
2676             cmdp->u.screen.su.msg.msg_handle= msg->msg_handle;
2677             cmdp->u.screen.su.msg.msg_addr  = ha->msg_phys;
2678             ha->cmd_offs_dpmem = 0;
2679             ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr)
2680                 + sizeof(u64);
2681             ha->cmd_cnt = 0;
2682             gdth_copy_command(ha);
2683             gdth_release_event(ha);
2684             return 0;
2685         }
2686 
2687         if (msg->msg_answer && msg->msg_alen) {
2688             /* default answers (getchar() not possible) */
2689             if (msg->msg_alen == 1) {
2690                 msg->msg_alen = 0;
2691                 msg->msg_len = 1;
2692                 msg->msg_text[0] = 0;
2693             } else {
2694                 msg->msg_alen -= 2;
2695                 msg->msg_len = 2;
2696                 msg->msg_text[0] = 1;
2697                 msg->msg_text[1] = 0;
2698             }
2699             msg->msg_ext    = 0;
2700             msg->msg_answer = 0;
2701             while (gdth_test_busy(ha))
2702                 gdth_delay(0);
2703             cmdp->Service       = SCREENSERVICE;
2704             cmdp->RequestBuffer = SCREEN_CMND;
2705             gdth_get_cmd_index(ha);
2706             gdth_set_sema0(ha);
2707             cmdp->OpCode        = GDT_WRITE;
2708             cmdp->BoardNode     = LOCALBOARD;
2709             cmdp->u.screen.reserved  = 0;
2710             cmdp->u.screen.su.msg.msg_handle= msg->msg_handle;
2711             cmdp->u.screen.su.msg.msg_addr  = ha->msg_phys;
2712             ha->cmd_offs_dpmem = 0;
2713             ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr)
2714                 + sizeof(u64);
2715             ha->cmd_cnt = 0;
2716             gdth_copy_command(ha);
2717             gdth_release_event(ha);
2718             return 0;
2719         }
2720         printk("\n");
2721 
2722     } else {
2723         b = scp->device->channel;
2724         t = scp->device->id;
2725         if (cmndinfo->OpCode == -1 && b != ha->virt_bus) {
2726             ha->raw[BUS_L2P(ha,b)].io_cnt[t]--;
2727         }
2728         /* cache or raw service */
2729         if (ha->status == S_BSY) {
2730             TRACE2(("Controller busy -> retry !\n"));
2731             if (cmndinfo->OpCode == GDT_MOUNT)
2732                 cmndinfo->OpCode = GDT_CLUST_INFO;
2733             /* retry */
2734             return 2;
2735         }
2736         if (scsi_bufflen(scp))
2737             dma_unmap_sg(&ha->pdev->dev, scsi_sglist(scp), scsi_sg_count(scp),
2738                          cmndinfo->dma_dir);
2739 
2740         if (cmndinfo->sense_paddr)
2741             dma_unmap_page(&ha->pdev->dev, cmndinfo->sense_paddr, 16,
2742 			   DMA_FROM_DEVICE);
2743 
2744         if (ha->status == S_OK) {
2745             cmndinfo->status = S_OK;
2746             cmndinfo->info = ha->info;
2747             if (cmndinfo->OpCode != -1) {
2748                 TRACE2(("gdth_sync_event(): special cmd 0x%x OK\n",
2749                         cmndinfo->OpCode));
2750                 /* special commands GDT_CLUST_INFO/GDT_MOUNT ? */
2751                 if (cmndinfo->OpCode == GDT_CLUST_INFO) {
2752                     ha->hdr[t].cluster_type = (u8)ha->info;
2753                     if (!(ha->hdr[t].cluster_type &
2754                         CLUSTER_MOUNTED)) {
2755                         /* NOT MOUNTED -> MOUNT */
2756                         cmndinfo->OpCode = GDT_MOUNT;
2757                         if (ha->hdr[t].cluster_type &
2758                             CLUSTER_RESERVED) {
2759                             /* cluster drive RESERVED (on the other node) */
2760                             cmndinfo->phase = -2;      /* reservation conflict */
2761                         }
2762                     } else {
2763                         cmndinfo->OpCode = -1;
2764                     }
2765                 } else {
2766                     if (cmndinfo->OpCode == GDT_MOUNT) {
2767                         ha->hdr[t].cluster_type |= CLUSTER_MOUNTED;
2768                         ha->hdr[t].media_changed = TRUE;
2769                     } else if (cmndinfo->OpCode == GDT_UNMOUNT) {
2770                         ha->hdr[t].cluster_type &= ~CLUSTER_MOUNTED;
2771                         ha->hdr[t].media_changed = TRUE;
2772                     }
2773                     cmndinfo->OpCode = -1;
2774                 }
2775                 /* retry */
2776                 cmndinfo->priority = HIGH_PRI;
2777                 return 2;
2778             } else {
2779                 /* RESERVE/RELEASE ? */
2780                 if (scp->cmnd[0] == RESERVE) {
2781                     ha->hdr[t].cluster_type |= CLUSTER_RESERVED;
2782                 } else if (scp->cmnd[0] == RELEASE) {
2783                     ha->hdr[t].cluster_type &= ~CLUSTER_RESERVED;
2784                 }
2785                 scp->result = DID_OK << 16;
2786                 scp->sense_buffer[0] = 0;
2787             }
2788         } else {
2789             cmndinfo->status = ha->status;
2790             cmndinfo->info = ha->info;
2791 
2792             if (cmndinfo->OpCode != -1) {
2793                 TRACE2(("gdth_sync_event(): special cmd 0x%x error 0x%x\n",
2794                         cmndinfo->OpCode, ha->status));
2795                 if (cmndinfo->OpCode == GDT_SCAN_START ||
2796                     cmndinfo->OpCode == GDT_SCAN_END) {
2797                     cmndinfo->OpCode = -1;
2798                     /* retry */
2799                     cmndinfo->priority = HIGH_PRI;
2800                     return 2;
2801                 }
2802                 memset((char*)scp->sense_buffer,0,16);
2803                 scp->sense_buffer[0] = 0x70;
2804                 scp->sense_buffer[2] = NOT_READY;
2805                 scp->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
2806             } else if (service == CACHESERVICE) {
2807                 if (ha->status == S_CACHE_UNKNOWN &&
2808                     (ha->hdr[t].cluster_type &
2809                      CLUSTER_RESERVE_STATE) == CLUSTER_RESERVE_STATE) {
2810                     /* bus reset -> force GDT_CLUST_INFO */
2811                     ha->hdr[t].cluster_type &= ~CLUSTER_RESERVED;
2812                 }
2813                 memset((char*)scp->sense_buffer,0,16);
2814                 if (ha->status == (u16)S_CACHE_RESERV) {
2815                     scp->result = (DID_OK << 16) | (RESERVATION_CONFLICT << 1);
2816                 } else {
2817                     scp->sense_buffer[0] = 0x70;
2818                     scp->sense_buffer[2] = NOT_READY;
2819                     scp->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
2820                 }
2821                 if (!cmndinfo->internal_command) {
2822                     ha->dvr.size = sizeof(ha->dvr.eu.sync);
2823                     ha->dvr.eu.sync.ionode  = ha->hanum;
2824                     ha->dvr.eu.sync.service = service;
2825                     ha->dvr.eu.sync.status  = ha->status;
2826                     ha->dvr.eu.sync.info    = ha->info;
2827                     ha->dvr.eu.sync.hostdrive = t;
2828                     if (ha->status >= 0x8000)
2829                         gdth_store_event(ha, ES_SYNC, 0, &ha->dvr);
2830                     else
2831                         gdth_store_event(ha, ES_SYNC, service, &ha->dvr);
2832                 }
2833             } else {
2834                 /* sense buffer filled from controller firmware (DMA) */
2835                 if (ha->status != S_RAW_SCSI || ha->info >= 0x100) {
2836                     scp->result = DID_BAD_TARGET << 16;
2837                 } else {
2838                     scp->result = (DID_OK << 16) | ha->info;
2839                 }
2840             }
2841         }
2842         if (!cmndinfo->wait_for_completion)
2843             cmndinfo->wait_for_completion++;
2844         else
2845             return 1;
2846     }
2847 
2848     return 0;
2849 }
2850 
2851 static char *async_cache_tab[] = {
2852 /* 0*/  "\011\000\002\002\002\004\002\006\004"
2853         "GDT HA %u, service %u, async. status %u/%lu unknown",
2854 /* 1*/  "\011\000\002\002\002\004\002\006\004"
2855         "GDT HA %u, service %u, async. status %u/%lu unknown",
2856 /* 2*/  "\005\000\002\006\004"
2857         "GDT HA %u, Host Drive %lu not ready",
2858 /* 3*/  "\005\000\002\006\004"
2859         "GDT HA %u, Host Drive %lu: REASSIGN not successful and/or data error on reassigned blocks. Drive may crash in the future and should be replaced",
2860 /* 4*/  "\005\000\002\006\004"
2861         "GDT HA %u, mirror update on Host Drive %lu failed",
2862 /* 5*/  "\005\000\002\006\004"
2863         "GDT HA %u, Mirror Drive %lu failed",
2864 /* 6*/  "\005\000\002\006\004"
2865         "GDT HA %u, Mirror Drive %lu: REASSIGN not successful and/or data error on reassigned blocks. Drive may crash in the future and should be replaced",
2866 /* 7*/  "\005\000\002\006\004"
2867         "GDT HA %u, Host Drive %lu write protected",
2868 /* 8*/  "\005\000\002\006\004"
2869         "GDT HA %u, media changed in Host Drive %lu",
2870 /* 9*/  "\005\000\002\006\004"
2871         "GDT HA %u, Host Drive %lu is offline",
2872 /*10*/  "\005\000\002\006\004"
2873         "GDT HA %u, media change of Mirror Drive %lu",
2874 /*11*/  "\005\000\002\006\004"
2875         "GDT HA %u, Mirror Drive %lu is write protected",
2876 /*12*/  "\005\000\002\006\004"
2877         "GDT HA %u, general error on Host Drive %lu. Please check the devices of this drive!",
2878 /*13*/  "\007\000\002\006\002\010\002"
2879         "GDT HA %u, Array Drive %u: Cache Drive %u failed",
2880 /*14*/  "\005\000\002\006\002"
2881         "GDT HA %u, Array Drive %u: FAIL state entered",
2882 /*15*/  "\005\000\002\006\002"
2883         "GDT HA %u, Array Drive %u: error",
2884 /*16*/  "\007\000\002\006\002\010\002"
2885         "GDT HA %u, Array Drive %u: failed drive replaced by Cache Drive %u",
2886 /*17*/  "\005\000\002\006\002"
2887         "GDT HA %u, Array Drive %u: parity build failed",
2888 /*18*/  "\005\000\002\006\002"
2889         "GDT HA %u, Array Drive %u: drive rebuild failed",
2890 /*19*/  "\005\000\002\010\002"
2891         "GDT HA %u, Test of Hot Fix %u failed",
2892 /*20*/  "\005\000\002\006\002"
2893         "GDT HA %u, Array Drive %u: drive build finished successfully",
2894 /*21*/  "\005\000\002\006\002"
2895         "GDT HA %u, Array Drive %u: drive rebuild finished successfully",
2896 /*22*/  "\007\000\002\006\002\010\002"
2897         "GDT HA %u, Array Drive %u: Hot Fix %u activated",
2898 /*23*/  "\005\000\002\006\002"
2899         "GDT HA %u, Host Drive %u: processing of i/o aborted due to serious drive error",
2900 /*24*/  "\005\000\002\010\002"
2901         "GDT HA %u, mirror update on Cache Drive %u completed",
2902 /*25*/  "\005\000\002\010\002"
2903         "GDT HA %u, mirror update on Cache Drive %lu failed",
2904 /*26*/  "\005\000\002\006\002"
2905         "GDT HA %u, Array Drive %u: drive rebuild started",
2906 /*27*/  "\005\000\002\012\001"
2907         "GDT HA %u, Fault bus %u: SHELF OK detected",
2908 /*28*/  "\005\000\002\012\001"
2909         "GDT HA %u, Fault bus %u: SHELF not OK detected",
2910 /*29*/  "\007\000\002\012\001\013\001"
2911         "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug started",
2912 /*30*/  "\007\000\002\012\001\013\001"
2913         "GDT HA %u, Fault bus %u, ID %u: new disk detected",
2914 /*31*/  "\007\000\002\012\001\013\001"
2915         "GDT HA %u, Fault bus %u, ID %u: old disk detected",
2916 /*32*/  "\007\000\002\012\001\013\001"
2917         "GDT HA %u, Fault bus %u, ID %u: plugging an active disk is invalid",
2918 /*33*/  "\007\000\002\012\001\013\001"
2919         "GDT HA %u, Fault bus %u, ID %u: invalid device detected",
2920 /*34*/  "\011\000\002\012\001\013\001\006\004"
2921         "GDT HA %u, Fault bus %u, ID %u: insufficient disk capacity (%lu MB required)",
2922 /*35*/  "\007\000\002\012\001\013\001"
2923         "GDT HA %u, Fault bus %u, ID %u: disk write protected",
2924 /*36*/  "\007\000\002\012\001\013\001"
2925         "GDT HA %u, Fault bus %u, ID %u: disk not available",
2926 /*37*/  "\007\000\002\012\001\006\004"
2927         "GDT HA %u, Fault bus %u: swap detected (%lu)",
2928 /*38*/  "\007\000\002\012\001\013\001"
2929         "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug finished successfully",
2930 /*39*/  "\007\000\002\012\001\013\001"
2931         "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug aborted due to user Hot Plug",
2932 /*40*/  "\007\000\002\012\001\013\001"
2933         "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug aborted",
2934 /*41*/  "\007\000\002\012\001\013\001"
2935         "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug for Hot Fix started",
2936 /*42*/  "\005\000\002\006\002"
2937         "GDT HA %u, Array Drive %u: drive build started",
2938 /*43*/  "\003\000\002"
2939         "GDT HA %u, DRAM parity error detected",
2940 /*44*/  "\005\000\002\006\002"
2941         "GDT HA %u, Mirror Drive %u: update started",
2942 /*45*/  "\007\000\002\006\002\010\002"
2943         "GDT HA %u, Mirror Drive %u: Hot Fix %u activated",
2944 /*46*/  "\005\000\002\006\002"
2945         "GDT HA %u, Array Drive %u: no matching Pool Hot Fix Drive available",
2946 /*47*/  "\005\000\002\006\002"
2947         "GDT HA %u, Array Drive %u: Pool Hot Fix Drive available",
2948 /*48*/  "\005\000\002\006\002"
2949         "GDT HA %u, Mirror Drive %u: no matching Pool Hot Fix Drive available",
2950 /*49*/  "\005\000\002\006\002"
2951         "GDT HA %u, Mirror Drive %u: Pool Hot Fix Drive available",
2952 /*50*/  "\007\000\002\012\001\013\001"
2953         "GDT HA %u, SCSI bus %u, ID %u: IGNORE_WIDE_RESIDUE message received",
2954 /*51*/  "\005\000\002\006\002"
2955         "GDT HA %u, Array Drive %u: expand started",
2956 /*52*/  "\005\000\002\006\002"
2957         "GDT HA %u, Array Drive %u: expand finished successfully",
2958 /*53*/  "\005\000\002\006\002"
2959         "GDT HA %u, Array Drive %u: expand failed",
2960 /*54*/  "\003\000\002"
2961         "GDT HA %u, CPU temperature critical",
2962 /*55*/  "\003\000\002"
2963         "GDT HA %u, CPU temperature OK",
2964 /*56*/  "\005\000\002\006\004"
2965         "GDT HA %u, Host drive %lu created",
2966 /*57*/  "\005\000\002\006\002"
2967         "GDT HA %u, Array Drive %u: expand restarted",
2968 /*58*/  "\005\000\002\006\002"
2969         "GDT HA %u, Array Drive %u: expand stopped",
2970 /*59*/  "\005\000\002\010\002"
2971         "GDT HA %u, Mirror Drive %u: drive build quited",
2972 /*60*/  "\005\000\002\006\002"
2973         "GDT HA %u, Array Drive %u: parity build quited",
2974 /*61*/  "\005\000\002\006\002"
2975         "GDT HA %u, Array Drive %u: drive rebuild quited",
2976 /*62*/  "\005\000\002\006\002"
2977         "GDT HA %u, Array Drive %u: parity verify started",
2978 /*63*/  "\005\000\002\006\002"
2979         "GDT HA %u, Array Drive %u: parity verify done",
2980 /*64*/  "\005\000\002\006\002"
2981         "GDT HA %u, Array Drive %u: parity verify failed",
2982 /*65*/  "\005\000\002\006\002"
2983         "GDT HA %u, Array Drive %u: parity error detected",
2984 /*66*/  "\005\000\002\006\002"
2985         "GDT HA %u, Array Drive %u: parity verify quited",
2986 /*67*/  "\005\000\002\006\002"
2987         "GDT HA %u, Host Drive %u reserved",
2988 /*68*/  "\005\000\002\006\002"
2989         "GDT HA %u, Host Drive %u mounted and released",
2990 /*69*/  "\005\000\002\006\002"
2991         "GDT HA %u, Host Drive %u released",
2992 /*70*/  "\003\000\002"
2993         "GDT HA %u, DRAM error detected and corrected with ECC",
2994 /*71*/  "\003\000\002"
2995         "GDT HA %u, Uncorrectable DRAM error detected with ECC",
2996 /*72*/  "\011\000\002\012\001\013\001\014\001"
2997         "GDT HA %u, SCSI bus %u, ID %u, LUN %u: reassigning block",
2998 /*73*/  "\005\000\002\006\002"
2999         "GDT HA %u, Host drive %u resetted locally",
3000 /*74*/  "\005\000\002\006\002"
3001         "GDT HA %u, Host drive %u resetted remotely",
3002 /*75*/  "\003\000\002"
3003         "GDT HA %u, async. status 75 unknown",
3004 };
3005 
3006 
gdth_async_event(gdth_ha_str * ha)3007 static int gdth_async_event(gdth_ha_str *ha)
3008 {
3009     gdth_cmd_str *cmdp;
3010     int cmd_index;
3011 
3012     cmdp= ha->pccb;
3013     TRACE2(("gdth_async_event() ha %d serv %d\n",
3014             ha->hanum, ha->service));
3015 
3016     if (ha->service == SCREENSERVICE) {
3017         if (ha->status == MSG_REQUEST) {
3018             while (gdth_test_busy(ha))
3019                 gdth_delay(0);
3020             cmdp->Service       = SCREENSERVICE;
3021             cmdp->RequestBuffer = SCREEN_CMND;
3022             cmd_index = gdth_get_cmd_index(ha);
3023             gdth_set_sema0(ha);
3024             cmdp->OpCode        = GDT_READ;
3025             cmdp->BoardNode     = LOCALBOARD;
3026             cmdp->u.screen.reserved  = 0;
3027             cmdp->u.screen.su.msg.msg_handle= MSG_INV_HANDLE;
3028             cmdp->u.screen.su.msg.msg_addr  = ha->msg_phys;
3029             ha->cmd_offs_dpmem = 0;
3030             ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr)
3031                 + sizeof(u64);
3032             ha->cmd_cnt = 0;
3033             gdth_copy_command(ha);
3034             printk("[PCI %d/%d] ",(u16)(ha->brd_phys>>8),
3035                        (u16)((ha->brd_phys>>3)&0x1f));
3036             gdth_release_event(ha);
3037         }
3038 
3039     } else {
3040         if (ha->type == GDT_PCIMPR &&
3041             (ha->fw_vers & 0xff) >= 0x1a) {
3042             ha->dvr.size = 0;
3043             ha->dvr.eu.async.ionode = ha->hanum;
3044             ha->dvr.eu.async.status  = ha->status;
3045             /* severity and event_string already set! */
3046         } else {
3047             ha->dvr.size = sizeof(ha->dvr.eu.async);
3048             ha->dvr.eu.async.ionode   = ha->hanum;
3049             ha->dvr.eu.async.service = ha->service;
3050             ha->dvr.eu.async.status  = ha->status;
3051             ha->dvr.eu.async.info    = ha->info;
3052             *(u32 *)ha->dvr.eu.async.scsi_coord  = ha->info2;
3053         }
3054         gdth_store_event( ha, ES_ASYNC, ha->service, &ha->dvr );
3055         gdth_log_event( &ha->dvr, NULL );
3056 
3057         /* new host drive from expand? */
3058         if (ha->service == CACHESERVICE && ha->status == 56) {
3059             TRACE2(("gdth_async_event(): new host drive %d created\n",
3060                     (u16)ha->info));
3061             /* gdth_analyse_hdrive(hanum, (u16)ha->info); */
3062         }
3063     }
3064     return 1;
3065 }
3066 
gdth_log_event(gdth_evt_data * dvr,char * buffer)3067 static void gdth_log_event(gdth_evt_data *dvr, char *buffer)
3068 {
3069     gdth_stackframe stack;
3070     char *f = NULL;
3071     int i,j;
3072 
3073     TRACE2(("gdth_log_event()\n"));
3074     if (dvr->size == 0) {
3075         if (buffer == NULL) {
3076             printk("Adapter %d: %s\n",dvr->eu.async.ionode,dvr->event_string);
3077         } else {
3078             sprintf(buffer,"Adapter %d: %s\n",
3079                 dvr->eu.async.ionode,dvr->event_string);
3080         }
3081     } else if (dvr->eu.async.service == CACHESERVICE &&
3082         INDEX_OK(dvr->eu.async.status, async_cache_tab)) {
3083         TRACE2(("GDT: Async. event cache service, event no.: %d\n",
3084                 dvr->eu.async.status));
3085 
3086         f = async_cache_tab[dvr->eu.async.status];
3087 
3088         /* i: parameter to push, j: stack element to fill */
3089         for (j=0,i=1; i < f[0]; i+=2) {
3090             switch (f[i+1]) {
3091               case 4:
3092                 stack.b[j++] = *(u32*)&dvr->eu.stream[(int)f[i]];
3093                 break;
3094               case 2:
3095                 stack.b[j++] = *(u16*)&dvr->eu.stream[(int)f[i]];
3096                 break;
3097               case 1:
3098                 stack.b[j++] = *(u8*)&dvr->eu.stream[(int)f[i]];
3099                 break;
3100               default:
3101                 break;
3102             }
3103         }
3104 
3105         if (buffer == NULL) {
3106             printk(&f[(int)f[0]],stack);
3107             printk("\n");
3108         } else {
3109             sprintf(buffer,&f[(int)f[0]],stack);
3110         }
3111 
3112     } else {
3113         if (buffer == NULL) {
3114             printk("GDT HA %u, Unknown async. event service %d event no. %d\n",
3115                    dvr->eu.async.ionode,dvr->eu.async.service,dvr->eu.async.status);
3116         } else {
3117             sprintf(buffer,"GDT HA %u, Unknown async. event service %d event no. %d",
3118                     dvr->eu.async.ionode,dvr->eu.async.service,dvr->eu.async.status);
3119         }
3120     }
3121 }
3122 
3123 #ifdef GDTH_STATISTICS
3124 static u8	gdth_timer_running;
3125 
gdth_timeout(struct timer_list * unused)3126 static void gdth_timeout(struct timer_list *unused)
3127 {
3128     u32 i;
3129     struct scsi_cmnd *nscp;
3130     gdth_ha_str *ha;
3131     unsigned long flags;
3132 
3133     if(unlikely(list_empty(&gdth_instances))) {
3134 	    gdth_timer_running = 0;
3135 	    return;
3136     }
3137 
3138     ha = list_first_entry(&gdth_instances, gdth_ha_str, list);
3139     spin_lock_irqsave(&ha->smp_lock, flags);
3140 
3141     for (act_stats=0,i=0; i<GDTH_MAXCMDS; ++i)
3142         if (ha->cmd_tab[i].cmnd != UNUSED_CMND)
3143             ++act_stats;
3144 
3145     for (act_rq=0,
3146          nscp=ha->req_first; nscp; nscp=(struct scsi_cmnd*)nscp->SCp.ptr)
3147         ++act_rq;
3148 
3149     TRACE2(("gdth_to(): ints %d, ios %d, act_stats %d, act_rq %d\n",
3150             act_ints, act_ios, act_stats, act_rq));
3151     act_ints = act_ios = 0;
3152 
3153     gdth_timer.expires = jiffies + 30 * HZ;
3154     add_timer(&gdth_timer);
3155     spin_unlock_irqrestore(&ha->smp_lock, flags);
3156 }
3157 
gdth_timer_init(void)3158 static void gdth_timer_init(void)
3159 {
3160 	if (gdth_timer_running)
3161 		return;
3162 	gdth_timer_running = 1;
3163 	TRACE2(("gdth_detect(): Initializing timer !\n"));
3164 	gdth_timer.expires = jiffies + HZ;
3165 	add_timer(&gdth_timer);
3166 }
3167 #else
gdth_timer_init(void)3168 static inline void gdth_timer_init(void)
3169 {
3170 }
3171 #endif
3172 
internal_setup(char * str,int * ints)3173 static void __init internal_setup(char *str,int *ints)
3174 {
3175     int i;
3176     char *cur_str, *argv;
3177 
3178     TRACE2(("internal_setup() str %s ints[0] %d\n",
3179             str ? str:"NULL", ints ? ints[0]:0));
3180 
3181     /* analyse string */
3182     argv = str;
3183     while (argv && (cur_str = strchr(argv, ':'))) {
3184         int val = 0, c = *++cur_str;
3185 
3186         if (c == 'n' || c == 'N')
3187             val = 0;
3188         else if (c == 'y' || c == 'Y')
3189             val = 1;
3190         else
3191             val = (int)simple_strtoul(cur_str, NULL, 0);
3192 
3193         if (!strncmp(argv, "disable:", 8))
3194             disable = val;
3195         else if (!strncmp(argv, "reserve_mode:", 13))
3196             reserve_mode = val;
3197         else if (!strncmp(argv, "reverse_scan:", 13))
3198             reverse_scan = val;
3199         else if (!strncmp(argv, "hdr_channel:", 12))
3200             hdr_channel = val;
3201         else if (!strncmp(argv, "max_ids:", 8))
3202             max_ids = val;
3203         else if (!strncmp(argv, "rescan:", 7))
3204             rescan = val;
3205         else if (!strncmp(argv, "shared_access:", 14))
3206             shared_access = val;
3207         else if (!strncmp(argv, "reserve_list:", 13)) {
3208             reserve_list[0] = val;
3209             for (i = 1; i < MAX_RES_ARGS; i++) {
3210                 cur_str = strchr(cur_str, ',');
3211                 if (!cur_str)
3212                     break;
3213                 if (!isdigit((int)*++cur_str)) {
3214                     --cur_str;
3215                     break;
3216                 }
3217                 reserve_list[i] =
3218                     (int)simple_strtoul(cur_str, NULL, 0);
3219             }
3220             if (!cur_str)
3221                 break;
3222             argv = ++cur_str;
3223             continue;
3224         }
3225 
3226         if ((argv = strchr(argv, ',')))
3227             ++argv;
3228     }
3229 }
3230 
option_setup(char * str)3231 int __init option_setup(char *str)
3232 {
3233     int ints[MAXHA];
3234     char *cur = str;
3235     int i = 1;
3236 
3237     TRACE2(("option_setup() str %s\n", str ? str:"NULL"));
3238 
3239     while (cur && isdigit(*cur) && i < MAXHA) {
3240         ints[i++] = simple_strtoul(cur, NULL, 0);
3241         if ((cur = strchr(cur, ',')) != NULL) cur++;
3242     }
3243 
3244     ints[0] = i - 1;
3245     internal_setup(cur, ints);
3246     return 1;
3247 }
3248 
gdth_ctr_name(gdth_ha_str * ha)3249 static const char *gdth_ctr_name(gdth_ha_str *ha)
3250 {
3251     TRACE2(("gdth_ctr_name()\n"));
3252 
3253     if (ha->type == GDT_PCI) {
3254         switch (ha->pdev->device) {
3255           case PCI_DEVICE_ID_VORTEX_GDT60x0:
3256             return("GDT6000/6020/6050");
3257           case PCI_DEVICE_ID_VORTEX_GDT6000B:
3258             return("GDT6000B/6010");
3259         }
3260     }
3261     /* new controllers (GDT_PCINEW, GDT_PCIMPR, ..) use board_info IOCTL! */
3262 
3263     return("");
3264 }
3265 
gdth_info(struct Scsi_Host * shp)3266 static const char *gdth_info(struct Scsi_Host *shp)
3267 {
3268     gdth_ha_str *ha = shost_priv(shp);
3269 
3270     TRACE2(("gdth_info()\n"));
3271     return ((const char *)ha->binfo.type_string);
3272 }
3273 
gdth_timed_out(struct scsi_cmnd * scp)3274 static enum blk_eh_timer_return gdth_timed_out(struct scsi_cmnd *scp)
3275 {
3276 	gdth_ha_str *ha = shost_priv(scp->device->host);
3277 	struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
3278 	u8 b, t;
3279 	unsigned long flags;
3280 	enum blk_eh_timer_return retval = BLK_EH_DONE;
3281 
3282 	TRACE(("%s() cmd 0x%x\n", scp->cmnd[0], __func__));
3283 	b = scp->device->channel;
3284 	t = scp->device->id;
3285 
3286 	/*
3287 	 * We don't really honor the command timeout, but we try to
3288 	 * honor 6 times of the actual command timeout! So reset the
3289 	 * timer if this is less than 6th timeout on this command!
3290 	 */
3291 	if (++cmndinfo->timeout_count < 6)
3292 		retval = BLK_EH_RESET_TIMER;
3293 
3294 	/* Reset the timeout if it is locked IO */
3295 	spin_lock_irqsave(&ha->smp_lock, flags);
3296 	if ((b != ha->virt_bus && ha->raw[BUS_L2P(ha, b)].lock) ||
3297 	    (b == ha->virt_bus && t < MAX_HDRIVES && ha->hdr[t].lock)) {
3298 		TRACE2(("%s(): locked IO, reset timeout\n", __func__));
3299 		retval = BLK_EH_RESET_TIMER;
3300 	}
3301 	spin_unlock_irqrestore(&ha->smp_lock, flags);
3302 
3303 	return retval;
3304 }
3305 
3306 
gdth_eh_bus_reset(struct scsi_cmnd * scp)3307 static int gdth_eh_bus_reset(struct scsi_cmnd *scp)
3308 {
3309     gdth_ha_str *ha = shost_priv(scp->device->host);
3310     int i;
3311     unsigned long flags;
3312     struct scsi_cmnd *cmnd;
3313     u8 b;
3314 
3315     TRACE2(("gdth_eh_bus_reset()\n"));
3316 
3317     b = scp->device->channel;
3318 
3319     /* clear command tab */
3320     spin_lock_irqsave(&ha->smp_lock, flags);
3321     for (i = 0; i < GDTH_MAXCMDS; ++i) {
3322         cmnd = ha->cmd_tab[i].cmnd;
3323         if (!SPECIAL_SCP(cmnd) && cmnd->device->channel == b)
3324             ha->cmd_tab[i].cmnd = UNUSED_CMND;
3325     }
3326     spin_unlock_irqrestore(&ha->smp_lock, flags);
3327 
3328     if (b == ha->virt_bus) {
3329         /* host drives */
3330         for (i = 0; i < MAX_HDRIVES; ++i) {
3331             if (ha->hdr[i].present) {
3332                 spin_lock_irqsave(&ha->smp_lock, flags);
3333                 gdth_polling = TRUE;
3334                 while (gdth_test_busy(ha))
3335                     gdth_delay(0);
3336                 if (gdth_internal_cmd(ha, CACHESERVICE,
3337                                       GDT_CLUST_RESET, i, 0, 0))
3338                     ha->hdr[i].cluster_type &= ~CLUSTER_RESERVED;
3339                 gdth_polling = FALSE;
3340                 spin_unlock_irqrestore(&ha->smp_lock, flags);
3341             }
3342         }
3343     } else {
3344         /* raw devices */
3345         spin_lock_irqsave(&ha->smp_lock, flags);
3346         for (i = 0; i < MAXID; ++i)
3347             ha->raw[BUS_L2P(ha,b)].io_cnt[i] = 0;
3348         gdth_polling = TRUE;
3349         while (gdth_test_busy(ha))
3350             gdth_delay(0);
3351         gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_RESET_BUS,
3352                           BUS_L2P(ha,b), 0, 0);
3353         gdth_polling = FALSE;
3354         spin_unlock_irqrestore(&ha->smp_lock, flags);
3355     }
3356     return SUCCESS;
3357 }
3358 
gdth_bios_param(struct scsi_device * sdev,struct block_device * bdev,sector_t cap,int * ip)3359 static int gdth_bios_param(struct scsi_device *sdev,struct block_device *bdev,sector_t cap,int *ip)
3360 {
3361     u8 b, t;
3362     gdth_ha_str *ha = shost_priv(sdev->host);
3363     struct scsi_device *sd;
3364     unsigned capacity;
3365 
3366     sd = sdev;
3367     capacity = cap;
3368     b = sd->channel;
3369     t = sd->id;
3370     TRACE2(("gdth_bios_param() ha %d bus %d target %d\n", ha->hanum, b, t));
3371 
3372     if (b != ha->virt_bus || ha->hdr[t].heads == 0) {
3373         /* raw device or host drive without mapping information */
3374         TRACE2(("Evaluate mapping\n"));
3375         gdth_eval_mapping(capacity,&ip[2],&ip[0],&ip[1]);
3376     } else {
3377         ip[0] = ha->hdr[t].heads;
3378         ip[1] = ha->hdr[t].secs;
3379         ip[2] = capacity / ip[0] / ip[1];
3380     }
3381 
3382     TRACE2(("gdth_bios_param(): %d heads, %d secs, %d cyls\n",
3383             ip[0],ip[1],ip[2]));
3384     return 0;
3385 }
3386 
3387 
gdth_queuecommand_lck(struct scsi_cmnd * scp,void (* done)(struct scsi_cmnd *))3388 static int gdth_queuecommand_lck(struct scsi_cmnd *scp,
3389 				void (*done)(struct scsi_cmnd *))
3390 {
3391     gdth_ha_str *ha = shost_priv(scp->device->host);
3392     struct gdth_cmndinfo *cmndinfo;
3393 
3394     TRACE(("gdth_queuecommand() cmd 0x%x\n", scp->cmnd[0]));
3395 
3396     cmndinfo = gdth_get_cmndinfo(ha);
3397     BUG_ON(!cmndinfo);
3398 
3399     scp->scsi_done = done;
3400     cmndinfo->timeout_count = 0;
3401     cmndinfo->priority = DEFAULT_PRI;
3402 
3403     return __gdth_queuecommand(ha, scp, cmndinfo);
3404 }
3405 
DEF_SCSI_QCMD(gdth_queuecommand)3406 static DEF_SCSI_QCMD(gdth_queuecommand)
3407 
3408 static int __gdth_queuecommand(gdth_ha_str *ha, struct scsi_cmnd *scp,
3409 				struct gdth_cmndinfo *cmndinfo)
3410 {
3411     scp->host_scribble = (unsigned char *)cmndinfo;
3412     cmndinfo->wait_for_completion = 1;
3413     cmndinfo->phase = -1;
3414     cmndinfo->OpCode = -1;
3415 
3416 #ifdef GDTH_STATISTICS
3417     ++act_ios;
3418 #endif
3419 
3420     gdth_putq(ha, scp, cmndinfo->priority);
3421     gdth_next(ha);
3422     return 0;
3423 }
3424 
3425 
gdth_open(struct inode * inode,struct file * filep)3426 static int gdth_open(struct inode *inode, struct file *filep)
3427 {
3428     gdth_ha_str *ha;
3429 
3430     mutex_lock(&gdth_mutex);
3431     list_for_each_entry(ha, &gdth_instances, list) {
3432         if (!ha->sdev)
3433             ha->sdev = scsi_get_host_dev(ha->shost);
3434     }
3435     mutex_unlock(&gdth_mutex);
3436 
3437     TRACE(("gdth_open()\n"));
3438     return 0;
3439 }
3440 
gdth_close(struct inode * inode,struct file * filep)3441 static int gdth_close(struct inode *inode, struct file *filep)
3442 {
3443     TRACE(("gdth_close()\n"));
3444     return 0;
3445 }
3446 
ioc_event(void __user * arg)3447 static int ioc_event(void __user *arg)
3448 {
3449     gdth_ioctl_event evt;
3450     gdth_ha_str *ha;
3451     unsigned long flags;
3452 
3453     if (copy_from_user(&evt, arg, sizeof(gdth_ioctl_event)))
3454         return -EFAULT;
3455     ha = gdth_find_ha(evt.ionode);
3456     if (!ha)
3457         return -EFAULT;
3458 
3459     if (evt.erase == 0xff) {
3460         if (evt.event.event_source == ES_TEST)
3461             evt.event.event_data.size=sizeof(evt.event.event_data.eu.test);
3462         else if (evt.event.event_source == ES_DRIVER)
3463             evt.event.event_data.size=sizeof(evt.event.event_data.eu.driver);
3464         else if (evt.event.event_source == ES_SYNC)
3465             evt.event.event_data.size=sizeof(evt.event.event_data.eu.sync);
3466         else
3467             evt.event.event_data.size=sizeof(evt.event.event_data.eu.async);
3468         spin_lock_irqsave(&ha->smp_lock, flags);
3469         gdth_store_event(ha, evt.event.event_source, evt.event.event_idx,
3470                          &evt.event.event_data);
3471         spin_unlock_irqrestore(&ha->smp_lock, flags);
3472     } else if (evt.erase == 0xfe) {
3473         gdth_clear_events();
3474     } else if (evt.erase == 0) {
3475         evt.handle = gdth_read_event(ha, evt.handle, &evt.event);
3476     } else {
3477         gdth_readapp_event(ha, evt.erase, &evt.event);
3478     }
3479     if (copy_to_user(arg, &evt, sizeof(gdth_ioctl_event)))
3480         return -EFAULT;
3481     return 0;
3482 }
3483 
ioc_lockdrv(void __user * arg)3484 static int ioc_lockdrv(void __user *arg)
3485 {
3486     gdth_ioctl_lockdrv ldrv;
3487     u8 i, j;
3488     unsigned long flags;
3489     gdth_ha_str *ha;
3490 
3491     if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
3492         return -EFAULT;
3493     ha = gdth_find_ha(ldrv.ionode);
3494     if (!ha)
3495         return -EFAULT;
3496 
3497     for (i = 0; i < ldrv.drive_cnt && i < MAX_HDRIVES; ++i) {
3498         j = ldrv.drives[i];
3499         if (j >= MAX_HDRIVES || !ha->hdr[j].present)
3500             continue;
3501         if (ldrv.lock) {
3502             spin_lock_irqsave(&ha->smp_lock, flags);
3503             ha->hdr[j].lock = 1;
3504             spin_unlock_irqrestore(&ha->smp_lock, flags);
3505             gdth_wait_completion(ha, ha->bus_cnt, j);
3506         } else {
3507             spin_lock_irqsave(&ha->smp_lock, flags);
3508             ha->hdr[j].lock = 0;
3509             spin_unlock_irqrestore(&ha->smp_lock, flags);
3510             gdth_next(ha);
3511         }
3512     }
3513     return 0;
3514 }
3515 
ioc_resetdrv(void __user * arg,char * cmnd)3516 static int ioc_resetdrv(void __user *arg, char *cmnd)
3517 {
3518     gdth_ioctl_reset res;
3519     gdth_cmd_str cmd;
3520     gdth_ha_str *ha;
3521     int rval;
3522 
3523     if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
3524         res.number >= MAX_HDRIVES)
3525         return -EFAULT;
3526     ha = gdth_find_ha(res.ionode);
3527     if (!ha)
3528         return -EFAULT;
3529 
3530     if (!ha->hdr[res.number].present)
3531         return 0;
3532     memset(&cmd, 0, sizeof(gdth_cmd_str));
3533     cmd.Service = CACHESERVICE;
3534     cmd.OpCode = GDT_CLUST_RESET;
3535     if (ha->cache_feat & GDT_64BIT)
3536         cmd.u.cache64.DeviceNo = res.number;
3537     else
3538         cmd.u.cache.DeviceNo = res.number;
3539 
3540     rval = __gdth_execute(ha->sdev, &cmd, cmnd, 30, NULL);
3541     if (rval < 0)
3542         return rval;
3543     res.status = rval;
3544 
3545     if (copy_to_user(arg, &res, sizeof(gdth_ioctl_reset)))
3546         return -EFAULT;
3547     return 0;
3548 }
3549 
gdth_ioc_cacheservice(gdth_ha_str * ha,gdth_ioctl_general * gen,u64 paddr)3550 static void gdth_ioc_cacheservice(gdth_ha_str *ha, gdth_ioctl_general *gen,
3551 		u64 paddr)
3552 {
3553 	if (ha->cache_feat & GDT_64BIT) {
3554 		/* copy elements from 32-bit IOCTL structure */
3555 		gen->command.u.cache64.BlockCnt = gen->command.u.cache.BlockCnt;
3556 		gen->command.u.cache64.BlockNo = gen->command.u.cache.BlockNo;
3557 		gen->command.u.cache64.DeviceNo = gen->command.u.cache.DeviceNo;
3558 
3559 		if (ha->cache_feat & SCATTER_GATHER) {
3560 			gen->command.u.cache64.DestAddr = (u64)-1;
3561 			gen->command.u.cache64.sg_canz = 1;
3562 			gen->command.u.cache64.sg_lst[0].sg_ptr = paddr;
3563 			gen->command.u.cache64.sg_lst[0].sg_len = gen->data_len;
3564 			gen->command.u.cache64.sg_lst[1].sg_len = 0;
3565 		} else {
3566 			gen->command.u.cache64.DestAddr = paddr;
3567 			gen->command.u.cache64.sg_canz = 0;
3568 		}
3569 	} else {
3570 		if (ha->cache_feat & SCATTER_GATHER) {
3571 			gen->command.u.cache.DestAddr = 0xffffffff;
3572 				gen->command.u.cache.sg_canz = 1;
3573 			gen->command.u.cache.sg_lst[0].sg_ptr = (u32)paddr;
3574 			gen->command.u.cache.sg_lst[0].sg_len = gen->data_len;
3575 			gen->command.u.cache.sg_lst[1].sg_len = 0;
3576 		} else {
3577 			gen->command.u.cache.DestAddr = paddr;
3578 			gen->command.u.cache.sg_canz = 0;
3579 		}
3580 	}
3581 }
3582 
gdth_ioc_scsiraw(gdth_ha_str * ha,gdth_ioctl_general * gen,u64 paddr)3583 static void gdth_ioc_scsiraw(gdth_ha_str *ha, gdth_ioctl_general *gen,
3584 		u64 paddr)
3585 {
3586 	if (ha->raw_feat & GDT_64BIT) {
3587 		/* copy elements from 32-bit IOCTL structure */
3588 		char cmd[16];
3589 
3590 		gen->command.u.raw64.sense_len = gen->command.u.raw.sense_len;
3591 		gen->command.u.raw64.bus = gen->command.u.raw.bus;
3592 		gen->command.u.raw64.lun = gen->command.u.raw.lun;
3593 		gen->command.u.raw64.target = gen->command.u.raw.target;
3594 		memcpy(cmd, gen->command.u.raw.cmd, 16);
3595 		memcpy(gen->command.u.raw64.cmd, cmd, 16);
3596 		gen->command.u.raw64.clen = gen->command.u.raw.clen;
3597 		gen->command.u.raw64.sdlen = gen->command.u.raw.sdlen;
3598 		gen->command.u.raw64.direction = gen->command.u.raw.direction;
3599 
3600 		/* addresses */
3601 		if (ha->raw_feat & SCATTER_GATHER) {
3602 			gen->command.u.raw64.sdata = (u64)-1;
3603 			gen->command.u.raw64.sg_ranz = 1;
3604 			gen->command.u.raw64.sg_lst[0].sg_ptr = paddr;
3605 			gen->command.u.raw64.sg_lst[0].sg_len = gen->data_len;
3606 			gen->command.u.raw64.sg_lst[1].sg_len = 0;
3607 		} else {
3608 			gen->command.u.raw64.sdata = paddr;
3609 			gen->command.u.raw64.sg_ranz = 0;
3610                 }
3611 
3612 		gen->command.u.raw64.sense_data = paddr + gen->data_len;
3613 	} else {
3614 		if (ha->raw_feat & SCATTER_GATHER) {
3615 			gen->command.u.raw.sdata = 0xffffffff;
3616 			gen->command.u.raw.sg_ranz = 1;
3617 			gen->command.u.raw.sg_lst[0].sg_ptr = (u32)paddr;
3618 			gen->command.u.raw.sg_lst[0].sg_len = gen->data_len;
3619 			gen->command.u.raw.sg_lst[1].sg_len = 0;
3620 		} else {
3621 			gen->command.u.raw.sdata = paddr;
3622 			gen->command.u.raw.sg_ranz = 0;
3623                 }
3624 
3625 		gen->command.u.raw.sense_data = (u32)paddr + gen->data_len;
3626 	}
3627 }
3628 
ioc_general(void __user * arg,char * cmnd)3629 static int ioc_general(void __user *arg, char *cmnd)
3630 {
3631 	gdth_ioctl_general gen;
3632 	gdth_ha_str *ha;
3633 	char *buf = NULL;
3634 	dma_addr_t paddr;
3635 	int rval;
3636 
3637 	if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
3638 		return -EFAULT;
3639 	ha = gdth_find_ha(gen.ionode);
3640 	if (!ha)
3641 		return -EFAULT;
3642 
3643 	if (gen.data_len > INT_MAX)
3644 		return -EINVAL;
3645 	if (gen.sense_len > INT_MAX)
3646 		return -EINVAL;
3647 	if (gen.data_len + gen.sense_len > INT_MAX)
3648 		return -EINVAL;
3649 
3650 	if (gen.data_len + gen.sense_len > 0) {
3651 		buf = dma_alloc_coherent(&ha->pdev->dev,
3652 				gen.data_len + gen.sense_len, &paddr,
3653 				GFP_KERNEL);
3654 		if (!buf)
3655 			return -EFAULT;
3656 
3657 		rval = -EFAULT;
3658 		if (copy_from_user(buf, arg + sizeof(gdth_ioctl_general),
3659 				   gen.data_len + gen.sense_len))
3660 			goto out_free_buf;
3661 
3662 		if (gen.command.OpCode == GDT_IOCTL)
3663 			gen.command.u.ioctl.p_param = paddr;
3664 		else if (gen.command.Service == CACHESERVICE)
3665 			gdth_ioc_cacheservice(ha, &gen, paddr);
3666 		else if (gen.command.Service == SCSIRAWSERVICE)
3667 			gdth_ioc_scsiraw(ha, &gen, paddr);
3668 		else
3669 			goto out_free_buf;
3670 	}
3671 
3672 	rval = __gdth_execute(ha->sdev, &gen.command, cmnd, gen.timeout,
3673 			&gen.info);
3674 	if (rval < 0)
3675 		goto out_free_buf;
3676 	gen.status = rval;
3677 
3678 	rval = -EFAULT;
3679 	if (copy_to_user(arg + sizeof(gdth_ioctl_general), buf,
3680 			 gen.data_len + gen.sense_len))
3681 		goto out_free_buf;
3682 	if (copy_to_user(arg, &gen,
3683 			sizeof(gdth_ioctl_general) - sizeof(gdth_cmd_str)))
3684 		goto out_free_buf;
3685 
3686 	rval = 0;
3687 out_free_buf:
3688 	if (buf)
3689 		dma_free_coherent(&ha->pdev->dev, gen.data_len + gen.sense_len,
3690 				  buf, paddr);
3691 	return rval;
3692 }
3693 
ioc_hdrlist(void __user * arg,char * cmnd)3694 static int ioc_hdrlist(void __user *arg, char *cmnd)
3695 {
3696     gdth_ioctl_rescan *rsc;
3697     gdth_cmd_str *cmd;
3698     gdth_ha_str *ha;
3699     u8 i;
3700     int rc = -ENOMEM;
3701     u32 cluster_type = 0;
3702 
3703     rsc = kmalloc(sizeof(*rsc), GFP_KERNEL);
3704     cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
3705     if (!rsc || !cmd)
3706         goto free_fail;
3707 
3708     if (copy_from_user(rsc, arg, sizeof(gdth_ioctl_rescan)) ||
3709         (NULL == (ha = gdth_find_ha(rsc->ionode)))) {
3710         rc = -EFAULT;
3711         goto free_fail;
3712     }
3713     memset(cmd, 0, sizeof(gdth_cmd_str));
3714 
3715     for (i = 0; i < MAX_HDRIVES; ++i) {
3716         if (!ha->hdr[i].present) {
3717             rsc->hdr_list[i].bus = 0xff;
3718             continue;
3719         }
3720         rsc->hdr_list[i].bus = ha->virt_bus;
3721         rsc->hdr_list[i].target = i;
3722         rsc->hdr_list[i].lun = 0;
3723         rsc->hdr_list[i].cluster_type = ha->hdr[i].cluster_type;
3724         if (ha->hdr[i].cluster_type & CLUSTER_DRIVE) {
3725             cmd->Service = CACHESERVICE;
3726             cmd->OpCode = GDT_CLUST_INFO;
3727             if (ha->cache_feat & GDT_64BIT)
3728                 cmd->u.cache64.DeviceNo = i;
3729             else
3730                 cmd->u.cache.DeviceNo = i;
3731             if (__gdth_execute(ha->sdev, cmd, cmnd, 30, &cluster_type) == S_OK)
3732                 rsc->hdr_list[i].cluster_type = cluster_type;
3733         }
3734     }
3735 
3736     if (copy_to_user(arg, rsc, sizeof(gdth_ioctl_rescan)))
3737         rc = -EFAULT;
3738     else
3739         rc = 0;
3740 
3741 free_fail:
3742     kfree(rsc);
3743     kfree(cmd);
3744     return rc;
3745 }
3746 
ioc_rescan(void __user * arg,char * cmnd)3747 static int ioc_rescan(void __user *arg, char *cmnd)
3748 {
3749     gdth_ioctl_rescan *rsc;
3750     gdth_cmd_str *cmd;
3751     u16 i, status, hdr_cnt;
3752     u32 info;
3753     int cyls, hds, secs;
3754     int rc = -ENOMEM;
3755     unsigned long flags;
3756     gdth_ha_str *ha;
3757 
3758     rsc = kmalloc(sizeof(*rsc), GFP_KERNEL);
3759     cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
3760     if (!cmd || !rsc)
3761         goto free_fail;
3762 
3763     if (copy_from_user(rsc, arg, sizeof(gdth_ioctl_rescan)) ||
3764         (NULL == (ha = gdth_find_ha(rsc->ionode)))) {
3765         rc = -EFAULT;
3766         goto free_fail;
3767     }
3768     memset(cmd, 0, sizeof(gdth_cmd_str));
3769 
3770     if (rsc->flag == 0) {
3771         /* old method: re-init. cache service */
3772         cmd->Service = CACHESERVICE;
3773         if (ha->cache_feat & GDT_64BIT) {
3774             cmd->OpCode = GDT_X_INIT_HOST;
3775             cmd->u.cache64.DeviceNo = LINUX_OS;
3776         } else {
3777             cmd->OpCode = GDT_INIT;
3778             cmd->u.cache.DeviceNo = LINUX_OS;
3779         }
3780 
3781         status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info);
3782         i = 0;
3783         hdr_cnt = (status == S_OK ? (u16)info : 0);
3784     } else {
3785         i = rsc->hdr_no;
3786         hdr_cnt = i + 1;
3787     }
3788 
3789     for (; i < hdr_cnt && i < MAX_HDRIVES; ++i) {
3790         cmd->Service = CACHESERVICE;
3791         cmd->OpCode = GDT_INFO;
3792         if (ha->cache_feat & GDT_64BIT)
3793             cmd->u.cache64.DeviceNo = i;
3794         else
3795             cmd->u.cache.DeviceNo = i;
3796 
3797         status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info);
3798 
3799         spin_lock_irqsave(&ha->smp_lock, flags);
3800         rsc->hdr_list[i].bus = ha->virt_bus;
3801         rsc->hdr_list[i].target = i;
3802         rsc->hdr_list[i].lun = 0;
3803         if (status != S_OK) {
3804             ha->hdr[i].present = FALSE;
3805         } else {
3806             ha->hdr[i].present = TRUE;
3807             ha->hdr[i].size = info;
3808             /* evaluate mapping */
3809             ha->hdr[i].size &= ~SECS32;
3810             gdth_eval_mapping(ha->hdr[i].size,&cyls,&hds,&secs);
3811             ha->hdr[i].heads = hds;
3812             ha->hdr[i].secs = secs;
3813             /* round size */
3814             ha->hdr[i].size = cyls * hds * secs;
3815         }
3816         spin_unlock_irqrestore(&ha->smp_lock, flags);
3817         if (status != S_OK)
3818             continue;
3819 
3820         /* extended info, if GDT_64BIT, for drives > 2 TB */
3821         /* but we need ha->info2, not yet stored in scp->SCp */
3822 
3823         /* devtype, cluster info, R/W attribs */
3824         cmd->Service = CACHESERVICE;
3825         cmd->OpCode = GDT_DEVTYPE;
3826         if (ha->cache_feat & GDT_64BIT)
3827             cmd->u.cache64.DeviceNo = i;
3828         else
3829             cmd->u.cache.DeviceNo = i;
3830 
3831         status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info);
3832 
3833         spin_lock_irqsave(&ha->smp_lock, flags);
3834         ha->hdr[i].devtype = (status == S_OK ? (u16)info : 0);
3835         spin_unlock_irqrestore(&ha->smp_lock, flags);
3836 
3837         cmd->Service = CACHESERVICE;
3838         cmd->OpCode = GDT_CLUST_INFO;
3839         if (ha->cache_feat & GDT_64BIT)
3840             cmd->u.cache64.DeviceNo = i;
3841         else
3842             cmd->u.cache.DeviceNo = i;
3843 
3844         status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info);
3845 
3846         spin_lock_irqsave(&ha->smp_lock, flags);
3847         ha->hdr[i].cluster_type =
3848             ((status == S_OK && !shared_access) ? (u16)info : 0);
3849         spin_unlock_irqrestore(&ha->smp_lock, flags);
3850         rsc->hdr_list[i].cluster_type = ha->hdr[i].cluster_type;
3851 
3852         cmd->Service = CACHESERVICE;
3853         cmd->OpCode = GDT_RW_ATTRIBS;
3854         if (ha->cache_feat & GDT_64BIT)
3855             cmd->u.cache64.DeviceNo = i;
3856         else
3857             cmd->u.cache.DeviceNo = i;
3858 
3859         status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info);
3860 
3861         spin_lock_irqsave(&ha->smp_lock, flags);
3862         ha->hdr[i].rw_attribs = (status == S_OK ? (u16)info : 0);
3863         spin_unlock_irqrestore(&ha->smp_lock, flags);
3864     }
3865 
3866     if (copy_to_user(arg, rsc, sizeof(gdth_ioctl_rescan)))
3867         rc = -EFAULT;
3868     else
3869         rc = 0;
3870 
3871 free_fail:
3872     kfree(rsc);
3873     kfree(cmd);
3874     return rc;
3875 }
3876 
gdth_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)3877 static int gdth_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
3878 {
3879     gdth_ha_str *ha;
3880     struct scsi_cmnd *scp;
3881     unsigned long flags;
3882     char cmnd[MAX_COMMAND_SIZE];
3883     void __user *argp = (void __user *)arg;
3884 
3885     memset(cmnd, 0xff, 12);
3886 
3887     TRACE(("gdth_ioctl() cmd 0x%x\n", cmd));
3888 
3889     switch (cmd) {
3890       case GDTIOCTL_CTRCNT:
3891       {
3892         int cnt = gdth_ctr_count;
3893         if (put_user(cnt, (int __user *)argp))
3894                 return -EFAULT;
3895         break;
3896       }
3897 
3898       case GDTIOCTL_DRVERS:
3899       {
3900         int ver = (GDTH_VERSION<<8) | GDTH_SUBVERSION;
3901         if (put_user(ver, (int __user *)argp))
3902                 return -EFAULT;
3903         break;
3904       }
3905 
3906       case GDTIOCTL_OSVERS:
3907       {
3908         gdth_ioctl_osvers osv;
3909 
3910         osv.version = (u8)(LINUX_VERSION_CODE >> 16);
3911         osv.subversion = (u8)(LINUX_VERSION_CODE >> 8);
3912         osv.revision = (u16)(LINUX_VERSION_CODE & 0xff);
3913         if (copy_to_user(argp, &osv, sizeof(gdth_ioctl_osvers)))
3914                 return -EFAULT;
3915         break;
3916       }
3917 
3918       case GDTIOCTL_CTRTYPE:
3919       {
3920         gdth_ioctl_ctrtype ctrt;
3921 
3922         if (copy_from_user(&ctrt, argp, sizeof(gdth_ioctl_ctrtype)) ||
3923             (NULL == (ha = gdth_find_ha(ctrt.ionode))))
3924             return -EFAULT;
3925 
3926         if (ha->type != GDT_PCIMPR) {
3927 	    ctrt.type = (u8)((ha->stype<<4) + 6);
3928         } else {
3929             ctrt.type =  (ha->oem_id == OEM_ID_INTEL ? 0xfd : 0xfe);
3930             if (ha->stype >= 0x300)
3931                 ctrt.ext_type = 0x6000 | ha->pdev->subsystem_device;
3932             else
3933                 ctrt.ext_type = 0x6000 | ha->stype;
3934         }
3935         ctrt.device_id = ha->pdev->device;
3936         ctrt.sub_device_id = ha->pdev->subsystem_device;
3937         ctrt.info = ha->brd_phys;
3938         ctrt.oem_id = ha->oem_id;
3939         if (copy_to_user(argp, &ctrt, sizeof(gdth_ioctl_ctrtype)))
3940             return -EFAULT;
3941         break;
3942       }
3943 
3944       case GDTIOCTL_GENERAL:
3945         return ioc_general(argp, cmnd);
3946 
3947       case GDTIOCTL_EVENT:
3948         return ioc_event(argp);
3949 
3950       case GDTIOCTL_LOCKDRV:
3951         return ioc_lockdrv(argp);
3952 
3953       case GDTIOCTL_LOCKCHN:
3954       {
3955         gdth_ioctl_lockchn lchn;
3956         u8 i, j;
3957 
3958         if (copy_from_user(&lchn, argp, sizeof(gdth_ioctl_lockchn)) ||
3959             (NULL == (ha = gdth_find_ha(lchn.ionode))))
3960             return -EFAULT;
3961 
3962         i = lchn.channel;
3963         if (i < ha->bus_cnt) {
3964             if (lchn.lock) {
3965                 spin_lock_irqsave(&ha->smp_lock, flags);
3966                 ha->raw[i].lock = 1;
3967                 spin_unlock_irqrestore(&ha->smp_lock, flags);
3968 		for (j = 0; j < ha->tid_cnt; ++j)
3969                     gdth_wait_completion(ha, i, j);
3970             } else {
3971                 spin_lock_irqsave(&ha->smp_lock, flags);
3972                 ha->raw[i].lock = 0;
3973                 spin_unlock_irqrestore(&ha->smp_lock, flags);
3974 		for (j = 0; j < ha->tid_cnt; ++j)
3975                     gdth_next(ha);
3976             }
3977         }
3978         break;
3979       }
3980 
3981       case GDTIOCTL_RESCAN:
3982         return ioc_rescan(argp, cmnd);
3983 
3984       case GDTIOCTL_HDRLIST:
3985         return ioc_hdrlist(argp, cmnd);
3986 
3987       case GDTIOCTL_RESET_BUS:
3988       {
3989         gdth_ioctl_reset res;
3990         int rval;
3991 
3992         if (copy_from_user(&res, argp, sizeof(gdth_ioctl_reset)) ||
3993             (NULL == (ha = gdth_find_ha(res.ionode))))
3994             return -EFAULT;
3995 
3996         scp  = kzalloc(sizeof(*scp), GFP_KERNEL);
3997         if (!scp)
3998             return -ENOMEM;
3999         scp->device = ha->sdev;
4000         scp->cmd_len = 12;
4001         scp->device->channel = res.number;
4002         rval = gdth_eh_bus_reset(scp);
4003         res.status = (rval == SUCCESS ? S_OK : S_GENERR);
4004         kfree(scp);
4005 
4006         if (copy_to_user(argp, &res, sizeof(gdth_ioctl_reset)))
4007             return -EFAULT;
4008         break;
4009       }
4010 
4011       case GDTIOCTL_RESET_DRV:
4012         return ioc_resetdrv(argp, cmnd);
4013 
4014       default:
4015         break;
4016     }
4017     return 0;
4018 }
4019 
gdth_unlocked_ioctl(struct file * file,unsigned int cmd,unsigned long arg)4020 static long gdth_unlocked_ioctl(struct file *file, unsigned int cmd,
4021 			        unsigned long arg)
4022 {
4023 	int ret;
4024 
4025 	mutex_lock(&gdth_mutex);
4026 	ret = gdth_ioctl(file, cmd, arg);
4027 	mutex_unlock(&gdth_mutex);
4028 
4029 	return ret;
4030 }
4031 
4032 /* flush routine */
gdth_flush(gdth_ha_str * ha)4033 static void gdth_flush(gdth_ha_str *ha)
4034 {
4035     int             i;
4036     gdth_cmd_str    gdtcmd;
4037     char            cmnd[MAX_COMMAND_SIZE];
4038     memset(cmnd, 0xff, MAX_COMMAND_SIZE);
4039 
4040     TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
4041 
4042     for (i = 0; i < MAX_HDRIVES; ++i) {
4043         if (ha->hdr[i].present) {
4044             gdtcmd.BoardNode = LOCALBOARD;
4045             gdtcmd.Service = CACHESERVICE;
4046             gdtcmd.OpCode = GDT_FLUSH;
4047             if (ha->cache_feat & GDT_64BIT) {
4048                 gdtcmd.u.cache64.DeviceNo = i;
4049                 gdtcmd.u.cache64.BlockNo = 1;
4050                 gdtcmd.u.cache64.sg_canz = 0;
4051             } else {
4052                 gdtcmd.u.cache.DeviceNo = i;
4053                 gdtcmd.u.cache.BlockNo = 1;
4054                 gdtcmd.u.cache.sg_canz = 0;
4055             }
4056             TRACE2(("gdth_flush(): flush ha %d drive %d\n", ha->hanum, i));
4057 
4058             gdth_execute(ha->shost, &gdtcmd, cmnd, 30, NULL);
4059         }
4060     }
4061 }
4062 
4063 /* configure lun */
gdth_slave_configure(struct scsi_device * sdev)4064 static int gdth_slave_configure(struct scsi_device *sdev)
4065 {
4066     sdev->skip_ms_page_3f = 1;
4067     sdev->skip_ms_page_8 = 1;
4068     return 0;
4069 }
4070 
4071 static struct scsi_host_template gdth_template = {
4072         .name                   = "GDT SCSI Disk Array Controller",
4073         .info                   = gdth_info,
4074         .queuecommand           = gdth_queuecommand,
4075         .eh_bus_reset_handler   = gdth_eh_bus_reset,
4076         .slave_configure        = gdth_slave_configure,
4077         .bios_param             = gdth_bios_param,
4078         .show_info              = gdth_show_info,
4079         .write_info             = gdth_set_info,
4080 	.eh_timed_out		= gdth_timed_out,
4081         .proc_name              = "gdth",
4082         .can_queue              = GDTH_MAXCMDS,
4083         .this_id                = -1,
4084         .sg_tablesize           = GDTH_MAXSG,
4085         .cmd_per_lun            = GDTH_MAXC_P_L,
4086         .unchecked_isa_dma      = 1,
4087 	.no_write_same		= 1,
4088 };
4089 
gdth_pci_probe_one(gdth_pci_str * pcistr,gdth_ha_str ** ha_out)4090 static int gdth_pci_probe_one(gdth_pci_str *pcistr, gdth_ha_str **ha_out)
4091 {
4092 	struct Scsi_Host *shp;
4093 	gdth_ha_str *ha;
4094 	dma_addr_t scratch_dma_handle = 0;
4095 	int error, i;
4096 	struct pci_dev *pdev = pcistr->pdev;
4097 
4098 	*ha_out = NULL;
4099 
4100 	shp = scsi_host_alloc(&gdth_template, sizeof(gdth_ha_str));
4101 	if (!shp)
4102 		return -ENOMEM;
4103 	ha = shost_priv(shp);
4104 
4105 	error = -ENODEV;
4106 	if (!gdth_init_pci(pdev, pcistr, ha))
4107 		goto out_host_put;
4108 
4109 	/* controller found and initialized */
4110 	printk("Configuring GDT-PCI HA at %d/%d IRQ %u\n",
4111 		pdev->bus->number,
4112 		PCI_SLOT(pdev->devfn),
4113 		ha->irq);
4114 
4115 	error = request_irq(ha->irq, gdth_interrupt,
4116 				IRQF_SHARED, "gdth", ha);
4117 	if (error) {
4118 		printk("GDT-PCI: Unable to allocate IRQ\n");
4119 		goto out_host_put;
4120 	}
4121 
4122 	shp->unchecked_isa_dma = 0;
4123 	shp->irq = ha->irq;
4124 	shp->dma_channel = 0xff;
4125 
4126 	ha->hanum = gdth_ctr_count++;
4127 	ha->shost = shp;
4128 
4129 	ha->pccb = &ha->cmdext;
4130 	ha->ccb_phys = 0L;
4131 
4132 	error = -ENOMEM;
4133 
4134 	ha->pscratch = dma_alloc_coherent(&ha->pdev->dev, GDTH_SCRATCH,
4135 				&scratch_dma_handle, GFP_KERNEL);
4136 	if (!ha->pscratch)
4137 		goto out_free_irq;
4138 	ha->scratch_phys = scratch_dma_handle;
4139 
4140 	ha->pmsg = dma_alloc_coherent(&ha->pdev->dev, sizeof(gdth_msg_str),
4141 				&scratch_dma_handle, GFP_KERNEL);
4142 	if (!ha->pmsg)
4143 		goto out_free_pscratch;
4144 	ha->msg_phys = scratch_dma_handle;
4145 
4146 	ha->scratch_busy = FALSE;
4147 	ha->req_first = NULL;
4148 	ha->tid_cnt = pdev->device >= 0x200 ? MAXID : MAX_HDRIVES;
4149 	if (max_ids > 0 && max_ids < ha->tid_cnt)
4150 		ha->tid_cnt = max_ids;
4151 	for (i = 0; i < GDTH_MAXCMDS; ++i)
4152 		ha->cmd_tab[i].cmnd = UNUSED_CMND;
4153 	ha->scan_mode = rescan ? 0x10 : 0;
4154 
4155 	error = -ENODEV;
4156 	if (!gdth_search_drives(ha)) {
4157 		printk("GDT-PCI %d: Error during device scan\n", ha->hanum);
4158 		goto out_free_pmsg;
4159 	}
4160 
4161 	if (hdr_channel < 0 || hdr_channel > ha->bus_cnt)
4162 		hdr_channel = ha->bus_cnt;
4163 	ha->virt_bus = hdr_channel;
4164 
4165 	/* 64-bit DMA only supported from FW >= x.43 */
4166 	if (!(ha->cache_feat & ha->raw_feat & ha->screen_feat & GDT_64BIT) ||
4167 	    !ha->dma64_support) {
4168 		if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
4169 			printk(KERN_WARNING "GDT-PCI %d: "
4170 				"Unable to set 32-bit DMA\n", ha->hanum);
4171 				goto out_free_pmsg;
4172 		}
4173 	} else {
4174 		shp->max_cmd_len = 16;
4175 		if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
4176 			printk("GDT-PCI %d: 64-bit DMA enabled\n", ha->hanum);
4177 		} else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
4178 			printk(KERN_WARNING "GDT-PCI %d: "
4179 				"Unable to set 64/32-bit DMA\n", ha->hanum);
4180 			goto out_free_pmsg;
4181 		}
4182 	}
4183 
4184 	shp->max_id      = ha->tid_cnt;
4185 	shp->max_lun     = MAXLUN;
4186 	shp->max_channel = ha->bus_cnt;
4187 
4188 	spin_lock_init(&ha->smp_lock);
4189 	gdth_enable_int(ha);
4190 
4191 	error = scsi_add_host(shp, &pdev->dev);
4192 	if (error)
4193 		goto out_free_pmsg;
4194 	list_add_tail(&ha->list, &gdth_instances);
4195 
4196 	pci_set_drvdata(ha->pdev, ha);
4197 	gdth_timer_init();
4198 
4199 	scsi_scan_host(shp);
4200 
4201 	*ha_out = ha;
4202 
4203 	return 0;
4204 
4205  out_free_pmsg:
4206 	dma_free_coherent(&ha->pdev->dev, sizeof(gdth_msg_str),
4207 				ha->pmsg, ha->msg_phys);
4208  out_free_pscratch:
4209 	dma_free_coherent(&ha->pdev->dev, GDTH_SCRATCH,
4210 				ha->pscratch, ha->scratch_phys);
4211  out_free_irq:
4212 	free_irq(ha->irq, ha);
4213 	gdth_ctr_count--;
4214  out_host_put:
4215 	scsi_host_put(shp);
4216 	return error;
4217 }
4218 
gdth_remove_one(gdth_ha_str * ha)4219 static void gdth_remove_one(gdth_ha_str *ha)
4220 {
4221 	struct Scsi_Host *shp = ha->shost;
4222 
4223 	TRACE2(("gdth_remove_one()\n"));
4224 
4225 	scsi_remove_host(shp);
4226 
4227 	gdth_flush(ha);
4228 
4229 	if (ha->sdev) {
4230 		scsi_free_host_dev(ha->sdev);
4231 		ha->sdev = NULL;
4232 	}
4233 
4234 	if (shp->irq)
4235 		free_irq(shp->irq,ha);
4236 
4237 	if (ha->pscratch)
4238 		dma_free_coherent(&ha->pdev->dev, GDTH_SCRATCH,
4239 			ha->pscratch, ha->scratch_phys);
4240 	if (ha->pmsg)
4241 		dma_free_coherent(&ha->pdev->dev, sizeof(gdth_msg_str),
4242 			ha->pmsg, ha->msg_phys);
4243 	if (ha->ccb_phys)
4244 		dma_unmap_single(&ha->pdev->dev, ha->ccb_phys,
4245 			sizeof(gdth_cmd_str), DMA_BIDIRECTIONAL);
4246 
4247 	scsi_host_put(shp);
4248 }
4249 
gdth_halt(struct notifier_block * nb,unsigned long event,void * buf)4250 static int gdth_halt(struct notifier_block *nb, unsigned long event, void *buf)
4251 {
4252 	gdth_ha_str *ha;
4253 
4254 	TRACE2(("gdth_halt() event %d\n", (int)event));
4255 	if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
4256 		return NOTIFY_DONE;
4257 
4258 	list_for_each_entry(ha, &gdth_instances, list)
4259 		gdth_flush(ha);
4260 
4261 	return NOTIFY_OK;
4262 }
4263 
4264 static struct notifier_block gdth_notifier = {
4265     gdth_halt, NULL, 0
4266 };
4267 
gdth_init(void)4268 static int __init gdth_init(void)
4269 {
4270 	if (disable) {
4271 		printk("GDT-HA: Controller driver disabled from"
4272                        " command line !\n");
4273 		return 0;
4274 	}
4275 
4276 	printk("GDT-HA: Storage RAID Controller Driver. Version: %s\n",
4277 	       GDTH_VERSION_STR);
4278 
4279 	/* initializations */
4280 	gdth_polling = TRUE;
4281 	gdth_clear_events();
4282 	timer_setup(&gdth_timer, gdth_timeout, 0);
4283 
4284 	/* scanning for PCI controllers */
4285 	if (pci_register_driver(&gdth_pci_driver)) {
4286 		gdth_ha_str *ha;
4287 
4288 		list_for_each_entry(ha, &gdth_instances, list)
4289 			gdth_remove_one(ha);
4290 		return -ENODEV;
4291 	}
4292 
4293 	TRACE2(("gdth_detect() %d controller detected\n", gdth_ctr_count));
4294 
4295 	major = register_chrdev(0,"gdth", &gdth_fops);
4296 	register_reboot_notifier(&gdth_notifier);
4297 	gdth_polling = FALSE;
4298 	return 0;
4299 }
4300 
gdth_exit(void)4301 static void __exit gdth_exit(void)
4302 {
4303 	gdth_ha_str *ha;
4304 
4305 	unregister_chrdev(major, "gdth");
4306 	unregister_reboot_notifier(&gdth_notifier);
4307 
4308 #ifdef GDTH_STATISTICS
4309 	del_timer_sync(&gdth_timer);
4310 #endif
4311 
4312 	pci_unregister_driver(&gdth_pci_driver);
4313 
4314 	list_for_each_entry(ha, &gdth_instances, list)
4315 		gdth_remove_one(ha);
4316 }
4317 
4318 module_init(gdth_init);
4319 module_exit(gdth_exit);
4320 
4321 #ifndef MODULE
4322 __setup("gdth=", option_setup);
4323 #endif
4324