• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /***************************************************************************
3                           dpti.c  -  description
4                              -------------------
5     begin                : Thu Sep 7 2000
6     copyright            : (C) 2000 by Adaptec
7 
8 			   July 30, 2001 First version being submitted
9 			   for inclusion in the kernel.  V2.4
10 
11     See Documentation/scsi/dpti.txt for history, notes, license info
12     and credits
13  ***************************************************************************/
14 
15 /***************************************************************************
16  *                                                                         *
17  *                                                                         *
18  ***************************************************************************/
19 /***************************************************************************
20  * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
21  - Support 2.6 kernel and DMA-mapping
22  - ioctl fix for raid tools
23  - use schedule_timeout in long long loop
24  **************************************************************************/
25 
26 /*#define DEBUG 1 */
27 /*#define UARTDELAY 1 */
28 
29 #include <linux/module.h>
30 
31 MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
32 MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
33 
34 ////////////////////////////////////////////////////////////////
35 
36 #include <linux/ioctl.h>	/* For SCSI-Passthrough */
37 #include <linux/uaccess.h>
38 
39 #include <linux/stat.h>
40 #include <linux/slab.h>		/* for kmalloc() */
41 #include <linux/pci.h>		/* for PCI support */
42 #include <linux/proc_fs.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>	/* for udelay */
45 #include <linux/interrupt.h>
46 #include <linux/kernel.h>	/* for printk */
47 #include <linux/sched.h>
48 #include <linux/reboot.h>
49 #include <linux/spinlock.h>
50 #include <linux/dma-mapping.h>
51 
52 #include <linux/timer.h>
53 #include <linux/string.h>
54 #include <linux/ioport.h>
55 #include <linux/mutex.h>
56 
57 #include <asm/processor.h>	/* for boot_cpu_data */
58 #include <asm/pgtable.h>
59 #include <asm/io.h>		/* for virt_to_bus, etc. */
60 
61 #include <scsi/scsi.h>
62 #include <scsi/scsi_cmnd.h>
63 #include <scsi/scsi_device.h>
64 #include <scsi/scsi_host.h>
65 #include <scsi/scsi_tcq.h>
66 
67 #include "dpt/dptsig.h"
68 #include "dpti.h"
69 
70 /*============================================================================
71  * Create a binary signature - this is read by dptsig
72  * Needed for our management apps
73  *============================================================================
74  */
75 static DEFINE_MUTEX(adpt_mutex);
76 static dpt_sig_S DPTI_sig = {
77 	{'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
78 #ifdef __i386__
79 	PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
80 #elif defined(__ia64__)
81 	PROC_INTEL, PROC_IA64,
82 #elif defined(__sparc__)
83 	PROC_ULTRASPARC, PROC_ULTRASPARC,
84 #elif defined(__alpha__)
85 	PROC_ALPHA, PROC_ALPHA,
86 #else
87 	(-1),(-1),
88 #endif
89 	 FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
90 	ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
91 	DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
92 };
93 
94 
95 
96 
97 /*============================================================================
98  * Globals
99  *============================================================================
100  */
101 
102 static DEFINE_MUTEX(adpt_configuration_lock);
103 
104 static struct i2o_sys_tbl *sys_tbl;
105 static dma_addr_t sys_tbl_pa;
106 static int sys_tbl_ind;
107 static int sys_tbl_len;
108 
109 static adpt_hba* hba_chain = NULL;
110 static int hba_count = 0;
111 
112 static struct class *adpt_sysfs_class;
113 
114 static long adpt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
115 #ifdef CONFIG_COMPAT
116 static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
117 #endif
118 
119 static const struct file_operations adpt_fops = {
120 	.unlocked_ioctl	= adpt_unlocked_ioctl,
121 	.open		= adpt_open,
122 	.release	= adpt_close,
123 #ifdef CONFIG_COMPAT
124 	.compat_ioctl	= compat_adpt_ioctl,
125 #endif
126 	.llseek		= noop_llseek,
127 };
128 
129 /* Structures and definitions for synchronous message posting.
130  * See adpt_i2o_post_wait() for description
131  * */
132 struct adpt_i2o_post_wait_data
133 {
134 	int status;
135 	u32 id;
136 	adpt_wait_queue_head_t *wq;
137 	struct adpt_i2o_post_wait_data *next;
138 };
139 
140 static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
141 static u32 adpt_post_wait_id = 0;
142 static DEFINE_SPINLOCK(adpt_post_wait_lock);
143 
144 
145 /*============================================================================
146  * 				Functions
147  *============================================================================
148  */
149 
dpt_dma64(adpt_hba * pHba)150 static inline int dpt_dma64(adpt_hba *pHba)
151 {
152 	return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
153 }
154 
dma_high(dma_addr_t addr)155 static inline u32 dma_high(dma_addr_t addr)
156 {
157 	return upper_32_bits(addr);
158 }
159 
dma_low(dma_addr_t addr)160 static inline u32 dma_low(dma_addr_t addr)
161 {
162 	return (u32)addr;
163 }
164 
adpt_read_blink_led(adpt_hba * host)165 static u8 adpt_read_blink_led(adpt_hba* host)
166 {
167 	if (host->FwDebugBLEDflag_P) {
168 		if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
169 			return readb(host->FwDebugBLEDvalue_P);
170 		}
171 	}
172 	return 0;
173 }
174 
175 /*============================================================================
176  * Scsi host template interface functions
177  *============================================================================
178  */
179 
180 #ifdef MODULE
181 static struct pci_device_id dptids[] = {
182 	{ PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
183 	{ PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
184 	{ 0, }
185 };
186 #endif
187 
188 MODULE_DEVICE_TABLE(pci,dptids);
189 
adpt_detect(struct scsi_host_template * sht)190 static int adpt_detect(struct scsi_host_template* sht)
191 {
192 	struct pci_dev *pDev = NULL;
193 	adpt_hba *pHba;
194 	adpt_hba *next;
195 
196 	PINFO("Detecting Adaptec I2O RAID controllers...\n");
197 
198         /* search for all Adatpec I2O RAID cards */
199 	while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
200 		if(pDev->device == PCI_DPT_DEVICE_ID ||
201 		   pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
202 			if(adpt_install_hba(sht, pDev) ){
203 				PERROR("Could not Init an I2O RAID device\n");
204 				PERROR("Will not try to detect others.\n");
205 				return hba_count-1;
206 			}
207 			pci_dev_get(pDev);
208 		}
209 	}
210 
211 	/* In INIT state, Activate IOPs */
212 	for (pHba = hba_chain; pHba; pHba = next) {
213 		next = pHba->next;
214 		// Activate does get status , init outbound, and get hrt
215 		if (adpt_i2o_activate_hba(pHba) < 0) {
216 			adpt_i2o_delete_hba(pHba);
217 		}
218 	}
219 
220 
221 	/* Active IOPs in HOLD state */
222 
223 rebuild_sys_tab:
224 	if (hba_chain == NULL)
225 		return 0;
226 
227 	/*
228 	 * If build_sys_table fails, we kill everything and bail
229 	 * as we can't init the IOPs w/o a system table
230 	 */
231 	if (adpt_i2o_build_sys_table() < 0) {
232 		adpt_i2o_sys_shutdown();
233 		return 0;
234 	}
235 
236 	PDEBUG("HBA's in HOLD state\n");
237 
238 	/* If IOP don't get online, we need to rebuild the System table */
239 	for (pHba = hba_chain; pHba; pHba = pHba->next) {
240 		if (adpt_i2o_online_hba(pHba) < 0) {
241 			adpt_i2o_delete_hba(pHba);
242 			goto rebuild_sys_tab;
243 		}
244 	}
245 
246 	/* Active IOPs now in OPERATIONAL state */
247 	PDEBUG("HBA's in OPERATIONAL state\n");
248 
249 	printk("dpti: If you have a lot of devices this could take a few minutes.\n");
250 	for (pHba = hba_chain; pHba; pHba = next) {
251 		next = pHba->next;
252 		printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
253 		if (adpt_i2o_lct_get(pHba) < 0){
254 			adpt_i2o_delete_hba(pHba);
255 			continue;
256 		}
257 
258 		if (adpt_i2o_parse_lct(pHba) < 0){
259 			adpt_i2o_delete_hba(pHba);
260 			continue;
261 		}
262 		adpt_inquiry(pHba);
263 	}
264 
265 	adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
266 	if (IS_ERR(adpt_sysfs_class)) {
267 		printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
268 		adpt_sysfs_class = NULL;
269 	}
270 
271 	for (pHba = hba_chain; pHba; pHba = next) {
272 		next = pHba->next;
273 		if (adpt_scsi_host_alloc(pHba, sht) < 0){
274 			adpt_i2o_delete_hba(pHba);
275 			continue;
276 		}
277 		pHba->initialized = TRUE;
278 		pHba->state &= ~DPTI_STATE_RESET;
279 		if (adpt_sysfs_class) {
280 			struct device *dev = device_create(adpt_sysfs_class,
281 				NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
282 				"dpti%d", pHba->unit);
283 			if (IS_ERR(dev)) {
284 				printk(KERN_WARNING"dpti%d: unable to "
285 					"create device in dpt_i2o class\n",
286 					pHba->unit);
287 			}
288 		}
289 	}
290 
291 	// Register our control device node
292 	// nodes will need to be created in /dev to access this
293 	// the nodes can not be created from within the driver
294 	if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
295 		adpt_i2o_sys_shutdown();
296 		return 0;
297 	}
298 	return hba_count;
299 }
300 
301 
adpt_release(adpt_hba * pHba)302 static void adpt_release(adpt_hba *pHba)
303 {
304 	struct Scsi_Host *shost = pHba->host;
305 
306 	scsi_remove_host(shost);
307 //	adpt_i2o_quiesce_hba(pHba);
308 	adpt_i2o_delete_hba(pHba);
309 	scsi_host_put(shost);
310 }
311 
312 
adpt_inquiry(adpt_hba * pHba)313 static void adpt_inquiry(adpt_hba* pHba)
314 {
315 	u32 msg[17];
316 	u32 *mptr;
317 	u32 *lenptr;
318 	int direction;
319 	int scsidir;
320 	u32 len;
321 	u32 reqlen;
322 	u8* buf;
323 	dma_addr_t addr;
324 	u8  scb[16];
325 	s32 rcode;
326 
327 	memset(msg, 0, sizeof(msg));
328 	buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
329 	if(!buf){
330 		printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
331 		return;
332 	}
333 	memset((void*)buf, 0, 36);
334 
335 	len = 36;
336 	direction = 0x00000000;
337 	scsidir  =0x40000000;	// DATA IN  (iop<--dev)
338 
339 	if (dpt_dma64(pHba))
340 		reqlen = 17;		// SINGLE SGE, 64 bit
341 	else
342 		reqlen = 14;		// SINGLE SGE, 32 bit
343 	/* Stick the headers on */
344 	msg[0] = reqlen<<16 | SGL_OFFSET_12;
345 	msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
346 	msg[2] = 0;
347 	msg[3]  = 0;
348 	// Adaptec/DPT Private stuff
349 	msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
350 	msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
351 	/* Direction, disconnect ok | sense data | simple queue , CDBLen */
352 	// I2O_SCB_FLAG_ENABLE_DISCONNECT |
353 	// I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
354 	// I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
355 	msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
356 
357 	mptr=msg+7;
358 
359 	memset(scb, 0, sizeof(scb));
360 	// Write SCSI command into the message - always 16 byte block
361 	scb[0] = INQUIRY;
362 	scb[1] = 0;
363 	scb[2] = 0;
364 	scb[3] = 0;
365 	scb[4] = 36;
366 	scb[5] = 0;
367 	// Don't care about the rest of scb
368 
369 	memcpy(mptr, scb, sizeof(scb));
370 	mptr+=4;
371 	lenptr=mptr++;		/* Remember me - fill in when we know */
372 
373 	/* Now fill in the SGList and command */
374 	*lenptr = len;
375 	if (dpt_dma64(pHba)) {
376 		*mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
377 		*mptr++ = 1 << PAGE_SHIFT;
378 		*mptr++ = 0xD0000000|direction|len;
379 		*mptr++ = dma_low(addr);
380 		*mptr++ = dma_high(addr);
381 	} else {
382 		*mptr++ = 0xD0000000|direction|len;
383 		*mptr++ = addr;
384 	}
385 
386 	// Send it on it's way
387 	rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
388 	if (rcode != 0) {
389 		sprintf(pHba->detail, "Adaptec I2O RAID");
390 		printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
391 		if (rcode != -ETIME && rcode != -EINTR)
392 			dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
393 	} else {
394 		memset(pHba->detail, 0, sizeof(pHba->detail));
395 		memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
396 		memcpy(&(pHba->detail[16]), " Model: ", 8);
397 		memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
398 		memcpy(&(pHba->detail[40]), " FW: ", 4);
399 		memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
400 		pHba->detail[48] = '\0';	/* precautionary */
401 		dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
402 	}
403 	adpt_i2o_status_get(pHba);
404 	return ;
405 }
406 
407 
adpt_slave_configure(struct scsi_device * device)408 static int adpt_slave_configure(struct scsi_device * device)
409 {
410 	struct Scsi_Host *host = device->host;
411 	adpt_hba* pHba;
412 
413 	pHba = (adpt_hba *) host->hostdata[0];
414 
415 	if (host->can_queue && device->tagged_supported) {
416 		scsi_change_queue_depth(device,
417 				host->can_queue - 1);
418 	}
419 	return 0;
420 }
421 
adpt_queue_lck(struct scsi_cmnd * cmd,void (* done)(struct scsi_cmnd *))422 static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
423 {
424 	adpt_hba* pHba = NULL;
425 	struct adpt_device* pDev = NULL;	/* dpt per device information */
426 
427 	cmd->scsi_done = done;
428 	/*
429 	 * SCSI REQUEST_SENSE commands will be executed automatically by the
430 	 * Host Adapter for any errors, so they should not be executed
431 	 * explicitly unless the Sense Data is zero indicating that no error
432 	 * occurred.
433 	 */
434 
435 	if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
436 		cmd->result = (DID_OK << 16);
437 		cmd->scsi_done(cmd);
438 		return 0;
439 	}
440 
441 	pHba = (adpt_hba*)cmd->device->host->hostdata[0];
442 	if (!pHba) {
443 		return FAILED;
444 	}
445 
446 	rmb();
447 	if ((pHba->state) & DPTI_STATE_RESET)
448 		return SCSI_MLQUEUE_HOST_BUSY;
449 
450 	// TODO if the cmd->device if offline then I may need to issue a bus rescan
451 	// followed by a get_lct to see if the device is there anymore
452 	if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
453 		/*
454 		 * First command request for this device.  Set up a pointer
455 		 * to the device structure.  This should be a TEST_UNIT_READY
456 		 * command from scan_scsis_single.
457 		 */
458 		if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun)) == NULL) {
459 			// TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
460 			// with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
461 			cmd->result = (DID_NO_CONNECT << 16);
462 			cmd->scsi_done(cmd);
463 			return 0;
464 		}
465 		cmd->device->hostdata = pDev;
466 	}
467 	pDev->pScsi_dev = cmd->device;
468 
469 	/*
470 	 * If we are being called from when the device is being reset,
471 	 * delay processing of the command until later.
472 	 */
473 	if (pDev->state & DPTI_DEV_RESET ) {
474 		return FAILED;
475 	}
476 	return adpt_scsi_to_i2o(pHba, cmd, pDev);
477 }
478 
DEF_SCSI_QCMD(adpt_queue)479 static DEF_SCSI_QCMD(adpt_queue)
480 
481 static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
482 		sector_t capacity, int geom[])
483 {
484 	int heads=-1;
485 	int sectors=-1;
486 	int cylinders=-1;
487 
488 	// *** First lets set the default geometry ****
489 
490 	// If the capacity is less than ox2000
491 	if (capacity < 0x2000 ) {	// floppy
492 		heads = 18;
493 		sectors = 2;
494 	}
495 	// else if between 0x2000 and 0x20000
496 	else if (capacity < 0x20000) {
497 		heads = 64;
498 		sectors = 32;
499 	}
500 	// else if between 0x20000 and 0x40000
501 	else if (capacity < 0x40000) {
502 		heads = 65;
503 		sectors = 63;
504 	}
505 	// else if between 0x4000 and 0x80000
506 	else if (capacity < 0x80000) {
507 		heads = 128;
508 		sectors = 63;
509 	}
510 	// else if greater than 0x80000
511 	else {
512 		heads = 255;
513 		sectors = 63;
514 	}
515 	cylinders = sector_div(capacity, heads * sectors);
516 
517 	// Special case if CDROM
518 	if(sdev->type == 5) {  // CDROM
519 		heads = 252;
520 		sectors = 63;
521 		cylinders = 1111;
522 	}
523 
524 	geom[0] = heads;
525 	geom[1] = sectors;
526 	geom[2] = cylinders;
527 
528 	PDEBUG("adpt_bios_param: exit\n");
529 	return 0;
530 }
531 
532 
adpt_info(struct Scsi_Host * host)533 static const char *adpt_info(struct Scsi_Host *host)
534 {
535 	adpt_hba* pHba;
536 
537 	pHba = (adpt_hba *) host->hostdata[0];
538 	return (char *) (pHba->detail);
539 }
540 
adpt_show_info(struct seq_file * m,struct Scsi_Host * host)541 static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host)
542 {
543 	struct adpt_device* d;
544 	int id;
545 	int chan;
546 	adpt_hba* pHba;
547 	int unit;
548 
549 	// Find HBA (host bus adapter) we are looking for
550 	mutex_lock(&adpt_configuration_lock);
551 	for (pHba = hba_chain; pHba; pHba = pHba->next) {
552 		if (pHba->host == host) {
553 			break;	/* found adapter */
554 		}
555 	}
556 	mutex_unlock(&adpt_configuration_lock);
557 	if (pHba == NULL) {
558 		return 0;
559 	}
560 	host = pHba->host;
561 
562 	seq_printf(m, "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
563 	seq_printf(m, "%s\n", pHba->detail);
564 	seq_printf(m, "SCSI Host=scsi%d  Control Node=/dev/%s  irq=%d\n",
565 			pHba->host->host_no, pHba->name, host->irq);
566 	seq_printf(m, "\tpost fifo size  = %d\n\treply fifo size = %d\n\tsg table size   = %d\n\n",
567 			host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
568 
569 	seq_puts(m, "Devices:\n");
570 	for(chan = 0; chan < MAX_CHANNEL; chan++) {
571 		for(id = 0; id < MAX_ID; id++) {
572 			d = pHba->channel[chan].device[id];
573 			while(d) {
574 				seq_printf(m,"\t%-24.24s", d->pScsi_dev->vendor);
575 				seq_printf(m," Rev: %-8.8s\n", d->pScsi_dev->rev);
576 
577 				unit = d->pI2o_dev->lct_data.tid;
578 				seq_printf(m, "\tTID=%d, (Channel=%d, Target=%d, Lun=%llu)  (%s)\n\n",
579 					       unit, (int)d->scsi_channel, (int)d->scsi_id, d->scsi_lun,
580 					       scsi_device_online(d->pScsi_dev)? "online":"offline");
581 				d = d->next_lun;
582 			}
583 		}
584 	}
585 	return 0;
586 }
587 
588 /*
589  *	Turn a pointer to ioctl reply data into an u32 'context'
590  */
adpt_ioctl_to_context(adpt_hba * pHba,void * reply)591 static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
592 {
593 #if BITS_PER_LONG == 32
594 	return (u32)(unsigned long)reply;
595 #else
596 	ulong flags = 0;
597 	u32 nr, i;
598 
599 	spin_lock_irqsave(pHba->host->host_lock, flags);
600 	nr = ARRAY_SIZE(pHba->ioctl_reply_context);
601 	for (i = 0; i < nr; i++) {
602 		if (pHba->ioctl_reply_context[i] == NULL) {
603 			pHba->ioctl_reply_context[i] = reply;
604 			break;
605 		}
606 	}
607 	spin_unlock_irqrestore(pHba->host->host_lock, flags);
608 	if (i >= nr) {
609 		printk(KERN_WARNING"%s: Too many outstanding "
610 				"ioctl commands\n", pHba->name);
611 		return (u32)-1;
612 	}
613 
614 	return i;
615 #endif
616 }
617 
618 /*
619  *	Go from an u32 'context' to a pointer to ioctl reply data.
620  */
adpt_ioctl_from_context(adpt_hba * pHba,u32 context)621 static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
622 {
623 #if BITS_PER_LONG == 32
624 	return (void *)(unsigned long)context;
625 #else
626 	void *p = pHba->ioctl_reply_context[context];
627 	pHba->ioctl_reply_context[context] = NULL;
628 
629 	return p;
630 #endif
631 }
632 
633 /*===========================================================================
634  * Error Handling routines
635  *===========================================================================
636  */
637 
adpt_abort(struct scsi_cmnd * cmd)638 static int adpt_abort(struct scsi_cmnd * cmd)
639 {
640 	adpt_hba* pHba = NULL;	/* host bus adapter structure */
641 	struct adpt_device* dptdevice;	/* dpt per device information */
642 	u32 msg[5];
643 	int rcode;
644 
645 	pHba = (adpt_hba*) cmd->device->host->hostdata[0];
646 	printk(KERN_INFO"%s: Trying to Abort\n",pHba->name);
647 	if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
648 		printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
649 		return FAILED;
650 	}
651 
652 	memset(msg, 0, sizeof(msg));
653 	msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
654 	msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
655 	msg[2] = 0;
656 	msg[3]= 0;
657 	/* Add 1 to avoid firmware treating it as invalid command */
658 	msg[4] = cmd->request->tag + 1;
659 	if (pHba->host)
660 		spin_lock_irq(pHba->host->host_lock);
661 	rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
662 	if (pHba->host)
663 		spin_unlock_irq(pHba->host->host_lock);
664 	if (rcode != 0) {
665 		if(rcode == -EOPNOTSUPP ){
666 			printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
667 			return FAILED;
668 		}
669 		printk(KERN_INFO"%s: Abort failed.\n",pHba->name);
670 		return FAILED;
671 	}
672 	printk(KERN_INFO"%s: Abort complete.\n",pHba->name);
673 	return SUCCESS;
674 }
675 
676 
677 #define I2O_DEVICE_RESET 0x27
678 // This is the same for BLK and SCSI devices
679 // NOTE this is wrong in the i2o.h definitions
680 // This is not currently supported by our adapter but we issue it anyway
adpt_device_reset(struct scsi_cmnd * cmd)681 static int adpt_device_reset(struct scsi_cmnd* cmd)
682 {
683 	adpt_hba* pHba;
684 	u32 msg[4];
685 	u32 rcode;
686 	int old_state;
687 	struct adpt_device* d = cmd->device->hostdata;
688 
689 	pHba = (void*) cmd->device->host->hostdata[0];
690 	printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
691 	if (!d) {
692 		printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
693 		return FAILED;
694 	}
695 	memset(msg, 0, sizeof(msg));
696 	msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
697 	msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
698 	msg[2] = 0;
699 	msg[3] = 0;
700 
701 	if (pHba->host)
702 		spin_lock_irq(pHba->host->host_lock);
703 	old_state = d->state;
704 	d->state |= DPTI_DEV_RESET;
705 	rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
706 	d->state = old_state;
707 	if (pHba->host)
708 		spin_unlock_irq(pHba->host->host_lock);
709 	if (rcode != 0) {
710 		if(rcode == -EOPNOTSUPP ){
711 			printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
712 			return FAILED;
713 		}
714 		printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
715 		return FAILED;
716 	} else {
717 		printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
718 		return SUCCESS;
719 	}
720 }
721 
722 
723 #define I2O_HBA_BUS_RESET 0x87
724 // This version of bus reset is called by the eh_error handler
adpt_bus_reset(struct scsi_cmnd * cmd)725 static int adpt_bus_reset(struct scsi_cmnd* cmd)
726 {
727 	adpt_hba* pHba;
728 	u32 msg[4];
729 	u32 rcode;
730 
731 	pHba = (adpt_hba*)cmd->device->host->hostdata[0];
732 	memset(msg, 0, sizeof(msg));
733 	printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
734 	msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
735 	msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
736 	msg[2] = 0;
737 	msg[3] = 0;
738 	if (pHba->host)
739 		spin_lock_irq(pHba->host->host_lock);
740 	rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
741 	if (pHba->host)
742 		spin_unlock_irq(pHba->host->host_lock);
743 	if (rcode != 0) {
744 		printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
745 		return FAILED;
746 	} else {
747 		printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
748 		return SUCCESS;
749 	}
750 }
751 
752 // This version of reset is called by the eh_error_handler
__adpt_reset(struct scsi_cmnd * cmd)753 static int __adpt_reset(struct scsi_cmnd* cmd)
754 {
755 	adpt_hba* pHba;
756 	int rcode;
757 	char name[32];
758 
759 	pHba = (adpt_hba*)cmd->device->host->hostdata[0];
760 	strncpy(name, pHba->name, sizeof(name));
761 	printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n", name, cmd->device->channel, pHba->channel[cmd->device->channel].tid);
762 	rcode =  adpt_hba_reset(pHba);
763 	if(rcode == 0){
764 		printk(KERN_WARNING"%s: HBA reset complete\n", name);
765 		return SUCCESS;
766 	} else {
767 		printk(KERN_WARNING"%s: HBA reset failed (%x)\n", name, rcode);
768 		return FAILED;
769 	}
770 }
771 
adpt_reset(struct scsi_cmnd * cmd)772 static int adpt_reset(struct scsi_cmnd* cmd)
773 {
774 	int rc;
775 
776 	spin_lock_irq(cmd->device->host->host_lock);
777 	rc = __adpt_reset(cmd);
778 	spin_unlock_irq(cmd->device->host->host_lock);
779 
780 	return rc;
781 }
782 
783 // This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
adpt_hba_reset(adpt_hba * pHba)784 static int adpt_hba_reset(adpt_hba* pHba)
785 {
786 	int rcode;
787 
788 	pHba->state |= DPTI_STATE_RESET;
789 
790 	// Activate does get status , init outbound, and get hrt
791 	if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
792 		printk(KERN_ERR "%s: Could not activate\n", pHba->name);
793 		adpt_i2o_delete_hba(pHba);
794 		return rcode;
795 	}
796 
797 	if ((rcode=adpt_i2o_build_sys_table()) < 0) {
798 		adpt_i2o_delete_hba(pHba);
799 		return rcode;
800 	}
801 	PDEBUG("%s: in HOLD state\n",pHba->name);
802 
803 	if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
804 		adpt_i2o_delete_hba(pHba);
805 		return rcode;
806 	}
807 	PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
808 
809 	if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
810 		adpt_i2o_delete_hba(pHba);
811 		return rcode;
812 	}
813 
814 	if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
815 		adpt_i2o_delete_hba(pHba);
816 		return rcode;
817 	}
818 	pHba->state &= ~DPTI_STATE_RESET;
819 
820 	adpt_fail_posted_scbs(pHba);
821 	return 0;	/* return success */
822 }
823 
824 /*===========================================================================
825  *
826  *===========================================================================
827  */
828 
829 
adpt_i2o_sys_shutdown(void)830 static void adpt_i2o_sys_shutdown(void)
831 {
832 	adpt_hba *pHba, *pNext;
833 	struct adpt_i2o_post_wait_data *p1, *old;
834 
835 	printk(KERN_INFO "Shutting down Adaptec I2O controllers.\n");
836 	printk(KERN_INFO "   This could take a few minutes if there are many devices attached\n");
837 	/* Delete all IOPs from the controller chain */
838 	/* They should have already been released by the
839 	 * scsi-core
840 	 */
841 	for (pHba = hba_chain; pHba; pHba = pNext) {
842 		pNext = pHba->next;
843 		adpt_i2o_delete_hba(pHba);
844 	}
845 
846 	/* Remove any timedout entries from the wait queue.  */
847 //	spin_lock_irqsave(&adpt_post_wait_lock, flags);
848 	/* Nothing should be outstanding at this point so just
849 	 * free them
850 	 */
851 	for(p1 = adpt_post_wait_queue; p1;) {
852 		old = p1;
853 		p1 = p1->next;
854 		kfree(old);
855 	}
856 //	spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
857 	adpt_post_wait_queue = NULL;
858 
859 	printk(KERN_INFO "Adaptec I2O controllers down.\n");
860 }
861 
adpt_install_hba(struct scsi_host_template * sht,struct pci_dev * pDev)862 static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
863 {
864 
865 	adpt_hba* pHba = NULL;
866 	adpt_hba* p = NULL;
867 	ulong base_addr0_phys = 0;
868 	ulong base_addr1_phys = 0;
869 	u32 hba_map0_area_size = 0;
870 	u32 hba_map1_area_size = 0;
871 	void __iomem *base_addr_virt = NULL;
872 	void __iomem *msg_addr_virt = NULL;
873 	int dma64 = 0;
874 
875 	int raptorFlag = FALSE;
876 
877 	if(pci_enable_device(pDev)) {
878 		return -EINVAL;
879 	}
880 
881 	if (pci_request_regions(pDev, "dpt_i2o")) {
882 		PERROR("dpti: adpt_config_hba: pci request region failed\n");
883 		return -EINVAL;
884 	}
885 
886 	pci_set_master(pDev);
887 
888 	/*
889 	 *	See if we should enable dma64 mode.
890 	 */
891 	if (sizeof(dma_addr_t) > 4 &&
892 	    dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32) &&
893 	    dma_set_mask(&pDev->dev, DMA_BIT_MASK(64)) == 0)
894 		dma64 = 1;
895 
896 	if (!dma64 && dma_set_mask(&pDev->dev, DMA_BIT_MASK(32)) != 0)
897 		return -EINVAL;
898 
899 	/* adapter only supports message blocks below 4GB */
900 	dma_set_coherent_mask(&pDev->dev, DMA_BIT_MASK(32));
901 
902 	base_addr0_phys = pci_resource_start(pDev,0);
903 	hba_map0_area_size = pci_resource_len(pDev,0);
904 
905 	// Check if standard PCI card or single BAR Raptor
906 	if(pDev->device == PCI_DPT_DEVICE_ID){
907 		if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
908 			// Raptor card with this device id needs 4M
909 			hba_map0_area_size = 0x400000;
910 		} else { // Not Raptor - it is a PCI card
911 			if(hba_map0_area_size > 0x100000 ){
912 				hba_map0_area_size = 0x100000;
913 			}
914 		}
915 	} else {// Raptor split BAR config
916 		// Use BAR1 in this configuration
917 		base_addr1_phys = pci_resource_start(pDev,1);
918 		hba_map1_area_size = pci_resource_len(pDev,1);
919 		raptorFlag = TRUE;
920 	}
921 
922 #if BITS_PER_LONG == 64
923 	/*
924 	 *	The original Adaptec 64 bit driver has this comment here:
925 	 *	"x86_64 machines need more optimal mappings"
926 	 *
927 	 *	I assume some HBAs report ridiculously large mappings
928 	 *	and we need to limit them on platforms with IOMMUs.
929 	 */
930 	if (raptorFlag == TRUE) {
931 		if (hba_map0_area_size > 128)
932 			hba_map0_area_size = 128;
933 		if (hba_map1_area_size > 524288)
934 			hba_map1_area_size = 524288;
935 	} else {
936 		if (hba_map0_area_size > 524288)
937 			hba_map0_area_size = 524288;
938 	}
939 #endif
940 
941 	base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
942 	if (!base_addr_virt) {
943 		pci_release_regions(pDev);
944 		PERROR("dpti: adpt_config_hba: io remap failed\n");
945 		return -EINVAL;
946 	}
947 
948         if(raptorFlag == TRUE) {
949 		msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
950 		if (!msg_addr_virt) {
951 			PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
952 			iounmap(base_addr_virt);
953 			pci_release_regions(pDev);
954 			return -EINVAL;
955 		}
956 	} else {
957 		msg_addr_virt = base_addr_virt;
958 	}
959 
960 	// Allocate and zero the data structure
961 	pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
962 	if (!pHba) {
963 		if (msg_addr_virt != base_addr_virt)
964 			iounmap(msg_addr_virt);
965 		iounmap(base_addr_virt);
966 		pci_release_regions(pDev);
967 		return -ENOMEM;
968 	}
969 
970 	mutex_lock(&adpt_configuration_lock);
971 
972 	if(hba_chain != NULL){
973 		for(p = hba_chain; p->next; p = p->next);
974 		p->next = pHba;
975 	} else {
976 		hba_chain = pHba;
977 	}
978 	pHba->next = NULL;
979 	pHba->unit = hba_count;
980 	sprintf(pHba->name, "dpti%d", hba_count);
981 	hba_count++;
982 
983 	mutex_unlock(&adpt_configuration_lock);
984 
985 	pHba->pDev = pDev;
986 	pHba->base_addr_phys = base_addr0_phys;
987 
988 	// Set up the Virtual Base Address of the I2O Device
989 	pHba->base_addr_virt = base_addr_virt;
990 	pHba->msg_addr_virt = msg_addr_virt;
991 	pHba->irq_mask = base_addr_virt+0x30;
992 	pHba->post_port = base_addr_virt+0x40;
993 	pHba->reply_port = base_addr_virt+0x44;
994 
995 	pHba->hrt = NULL;
996 	pHba->lct = NULL;
997 	pHba->lct_size = 0;
998 	pHba->status_block = NULL;
999 	pHba->post_count = 0;
1000 	pHba->state = DPTI_STATE_RESET;
1001 	pHba->pDev = pDev;
1002 	pHba->devices = NULL;
1003 	pHba->dma64 = dma64;
1004 
1005 	// Initializing the spinlocks
1006 	spin_lock_init(&pHba->state_lock);
1007 	spin_lock_init(&adpt_post_wait_lock);
1008 
1009 	if(raptorFlag == 0){
1010 		printk(KERN_INFO "Adaptec I2O RAID controller"
1011 				 " %d at %p size=%x irq=%d%s\n",
1012 			hba_count-1, base_addr_virt,
1013 			hba_map0_area_size, pDev->irq,
1014 			dma64 ? " (64-bit DMA)" : "");
1015 	} else {
1016 		printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
1017 			hba_count-1, pDev->irq,
1018 			dma64 ? " (64-bit DMA)" : "");
1019 		printk(KERN_INFO"     BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1020 		printk(KERN_INFO"     BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1021 	}
1022 
1023 	if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
1024 		printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
1025 		adpt_i2o_delete_hba(pHba);
1026 		return -EINVAL;
1027 	}
1028 
1029 	return 0;
1030 }
1031 
1032 
adpt_i2o_delete_hba(adpt_hba * pHba)1033 static void adpt_i2o_delete_hba(adpt_hba* pHba)
1034 {
1035 	adpt_hba* p1;
1036 	adpt_hba* p2;
1037 	struct i2o_device* d;
1038 	struct i2o_device* next;
1039 	int i;
1040 	int j;
1041 	struct adpt_device* pDev;
1042 	struct adpt_device* pNext;
1043 
1044 
1045 	mutex_lock(&adpt_configuration_lock);
1046 	if(pHba->host){
1047 		free_irq(pHba->host->irq, pHba);
1048 	}
1049 	p2 = NULL;
1050 	for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1051 		if(p1 == pHba) {
1052 			if(p2) {
1053 				p2->next = p1->next;
1054 			} else {
1055 				hba_chain = p1->next;
1056 			}
1057 			break;
1058 		}
1059 	}
1060 
1061 	hba_count--;
1062 	mutex_unlock(&adpt_configuration_lock);
1063 
1064 	iounmap(pHba->base_addr_virt);
1065 	pci_release_regions(pHba->pDev);
1066 	if(pHba->msg_addr_virt != pHba->base_addr_virt){
1067 		iounmap(pHba->msg_addr_virt);
1068 	}
1069 	if(pHba->FwDebugBuffer_P)
1070 	   	iounmap(pHba->FwDebugBuffer_P);
1071 	if(pHba->hrt) {
1072 		dma_free_coherent(&pHba->pDev->dev,
1073 			pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
1074 			pHba->hrt, pHba->hrt_pa);
1075 	}
1076 	if(pHba->lct) {
1077 		dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
1078 			pHba->lct, pHba->lct_pa);
1079 	}
1080 	if(pHba->status_block) {
1081 		dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
1082 			pHba->status_block, pHba->status_block_pa);
1083 	}
1084 	if(pHba->reply_pool) {
1085 		dma_free_coherent(&pHba->pDev->dev,
1086 			pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
1087 			pHba->reply_pool, pHba->reply_pool_pa);
1088 	}
1089 
1090 	for(d = pHba->devices; d ; d = next){
1091 		next = d->next;
1092 		kfree(d);
1093 	}
1094 	for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1095 		for(j = 0; j < MAX_ID; j++){
1096 			if(pHba->channel[i].device[j] != NULL){
1097 				for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1098 					pNext = pDev->next_lun;
1099 					kfree(pDev);
1100 				}
1101 			}
1102 		}
1103 	}
1104 	pci_dev_put(pHba->pDev);
1105 	if (adpt_sysfs_class)
1106 		device_destroy(adpt_sysfs_class,
1107 				MKDEV(DPTI_I2O_MAJOR, pHba->unit));
1108 	kfree(pHba);
1109 
1110 	if(hba_count <= 0){
1111 		unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
1112 		if (adpt_sysfs_class) {
1113 			class_destroy(adpt_sysfs_class);
1114 			adpt_sysfs_class = NULL;
1115 		}
1116 	}
1117 }
1118 
adpt_find_device(adpt_hba * pHba,u32 chan,u32 id,u64 lun)1119 static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u64 lun)
1120 {
1121 	struct adpt_device* d;
1122 
1123 	if(chan < 0 || chan >= MAX_CHANNEL)
1124 		return NULL;
1125 
1126 	d = pHba->channel[chan].device[id];
1127 	if(!d || d->tid == 0) {
1128 		return NULL;
1129 	}
1130 
1131 	/* If it is the only lun at that address then this should match*/
1132 	if(d->scsi_lun == lun){
1133 		return d;
1134 	}
1135 
1136 	/* else we need to look through all the luns */
1137 	for(d=d->next_lun ; d ; d = d->next_lun){
1138 		if(d->scsi_lun == lun){
1139 			return d;
1140 		}
1141 	}
1142 	return NULL;
1143 }
1144 
1145 
adpt_i2o_post_wait(adpt_hba * pHba,u32 * msg,int len,int timeout)1146 static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1147 {
1148 	// I used my own version of the WAIT_QUEUE_HEAD
1149 	// to handle some version differences
1150 	// When embedded in the kernel this could go back to the vanilla one
1151 	ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1152 	int status = 0;
1153 	ulong flags = 0;
1154 	struct adpt_i2o_post_wait_data *p1, *p2;
1155 	struct adpt_i2o_post_wait_data *wait_data =
1156 		kmalloc(sizeof(struct adpt_i2o_post_wait_data), GFP_ATOMIC);
1157 	DECLARE_WAITQUEUE(wait, current);
1158 
1159 	if (!wait_data)
1160 		return -ENOMEM;
1161 
1162 	/*
1163 	 * The spin locking is needed to keep anyone from playing
1164 	 * with the queue pointers and id while we do the same
1165 	 */
1166 	spin_lock_irqsave(&adpt_post_wait_lock, flags);
1167        // TODO we need a MORE unique way of getting ids
1168        // to support async LCT get
1169 	wait_data->next = adpt_post_wait_queue;
1170 	adpt_post_wait_queue = wait_data;
1171 	adpt_post_wait_id++;
1172 	adpt_post_wait_id &= 0x7fff;
1173 	wait_data->id =  adpt_post_wait_id;
1174 	spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1175 
1176 	wait_data->wq = &adpt_wq_i2o_post;
1177 	wait_data->status = -ETIMEDOUT;
1178 
1179 	add_wait_queue(&adpt_wq_i2o_post, &wait);
1180 
1181 	msg[2] |= 0x80000000 | ((u32)wait_data->id);
1182 	timeout *= HZ;
1183 	if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1184 		set_current_state(TASK_INTERRUPTIBLE);
1185 		if(pHba->host)
1186 			spin_unlock_irq(pHba->host->host_lock);
1187 		if (!timeout)
1188 			schedule();
1189 		else{
1190 			timeout = schedule_timeout(timeout);
1191 			if (timeout == 0) {
1192 				// I/O issued, but cannot get result in
1193 				// specified time. Freeing resorces is
1194 				// dangerous.
1195 				status = -ETIME;
1196 			}
1197 		}
1198 		if(pHba->host)
1199 			spin_lock_irq(pHba->host->host_lock);
1200 	}
1201 	remove_wait_queue(&adpt_wq_i2o_post, &wait);
1202 
1203 	if(status == -ETIMEDOUT){
1204 		printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1205 		// We will have to free the wait_data memory during shutdown
1206 		return status;
1207 	}
1208 
1209 	/* Remove the entry from the queue.  */
1210 	p2 = NULL;
1211 	spin_lock_irqsave(&adpt_post_wait_lock, flags);
1212 	for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1213 		if(p1 == wait_data) {
1214 			if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1215 				status = -EOPNOTSUPP;
1216 			}
1217 			if(p2) {
1218 				p2->next = p1->next;
1219 			} else {
1220 				adpt_post_wait_queue = p1->next;
1221 			}
1222 			break;
1223 		}
1224 	}
1225 	spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1226 
1227 	kfree(wait_data);
1228 
1229 	return status;
1230 }
1231 
1232 
adpt_i2o_post_this(adpt_hba * pHba,u32 * data,int len)1233 static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1234 {
1235 
1236 	u32 m = EMPTY_QUEUE;
1237 	u32 __iomem *msg;
1238 	ulong timeout = jiffies + 30*HZ;
1239 	do {
1240 		rmb();
1241 		m = readl(pHba->post_port);
1242 		if (m != EMPTY_QUEUE) {
1243 			break;
1244 		}
1245 		if(time_after(jiffies,timeout)){
1246 			printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1247 			return -ETIMEDOUT;
1248 		}
1249 		schedule_timeout_uninterruptible(1);
1250 	} while(m == EMPTY_QUEUE);
1251 
1252 	msg = pHba->msg_addr_virt + m;
1253 	memcpy_toio(msg, data, len);
1254 	wmb();
1255 
1256 	//post message
1257 	writel(m, pHba->post_port);
1258 	wmb();
1259 
1260 	return 0;
1261 }
1262 
1263 
adpt_i2o_post_wait_complete(u32 context,int status)1264 static void adpt_i2o_post_wait_complete(u32 context, int status)
1265 {
1266 	struct adpt_i2o_post_wait_data *p1 = NULL;
1267 	/*
1268 	 * We need to search through the adpt_post_wait
1269 	 * queue to see if the given message is still
1270 	 * outstanding.  If not, it means that the IOP
1271 	 * took longer to respond to the message than we
1272 	 * had allowed and timer has already expired.
1273 	 * Not much we can do about that except log
1274 	 * it for debug purposes, increase timeout, and recompile
1275 	 *
1276 	 * Lock needed to keep anyone from moving queue pointers
1277 	 * around while we're looking through them.
1278 	 */
1279 
1280 	context &= 0x7fff;
1281 
1282 	spin_lock(&adpt_post_wait_lock);
1283 	for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1284 		if(p1->id == context) {
1285 			p1->status = status;
1286 			spin_unlock(&adpt_post_wait_lock);
1287 			wake_up_interruptible(p1->wq);
1288 			return;
1289 		}
1290 	}
1291 	spin_unlock(&adpt_post_wait_lock);
1292         // If this happens we lose commands that probably really completed
1293 	printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1294 	printk(KERN_DEBUG"      Tasks in wait queue:\n");
1295 	for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1296 		printk(KERN_DEBUG"           %d\n",p1->id);
1297 	}
1298 	return;
1299 }
1300 
adpt_i2o_reset_hba(adpt_hba * pHba)1301 static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1302 {
1303 	u32 msg[8];
1304 	u8* status;
1305 	dma_addr_t addr;
1306 	u32 m = EMPTY_QUEUE ;
1307 	ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1308 
1309 	if(pHba->initialized  == FALSE) {	// First time reset should be quick
1310 		timeout = jiffies + (25*HZ);
1311 	} else {
1312 		adpt_i2o_quiesce_hba(pHba);
1313 	}
1314 
1315 	do {
1316 		rmb();
1317 		m = readl(pHba->post_port);
1318 		if (m != EMPTY_QUEUE) {
1319 			break;
1320 		}
1321 		if(time_after(jiffies,timeout)){
1322 			printk(KERN_WARNING"Timeout waiting for message!\n");
1323 			return -ETIMEDOUT;
1324 		}
1325 		schedule_timeout_uninterruptible(1);
1326 	} while (m == EMPTY_QUEUE);
1327 
1328 	status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
1329 	if(status == NULL) {
1330 		adpt_send_nop(pHba, m);
1331 		printk(KERN_ERR"IOP reset failed - no free memory.\n");
1332 		return -ENOMEM;
1333 	}
1334 	memset(status,0,4);
1335 
1336 	msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1337 	msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1338 	msg[2]=0;
1339 	msg[3]=0;
1340 	msg[4]=0;
1341 	msg[5]=0;
1342 	msg[6]=dma_low(addr);
1343 	msg[7]=dma_high(addr);
1344 
1345 	memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1346 	wmb();
1347 	writel(m, pHba->post_port);
1348 	wmb();
1349 
1350 	while(*status == 0){
1351 		if(time_after(jiffies,timeout)){
1352 			printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
1353 			/* We lose 4 bytes of "status" here, but we cannot
1354 			   free these because controller may awake and corrupt
1355 			   those bytes at any time */
1356 			/* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1357 			return -ETIMEDOUT;
1358 		}
1359 		rmb();
1360 		schedule_timeout_uninterruptible(1);
1361 	}
1362 
1363 	if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
1364 		PDEBUG("%s: Reset in progress...\n", pHba->name);
1365 		// Here we wait for message frame to become available
1366 		// indicated that reset has finished
1367 		do {
1368 			rmb();
1369 			m = readl(pHba->post_port);
1370 			if (m != EMPTY_QUEUE) {
1371 				break;
1372 			}
1373 			if(time_after(jiffies,timeout)){
1374 				printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
1375 				/* We lose 4 bytes of "status" here, but we
1376 				   cannot free these because controller may
1377 				   awake and corrupt those bytes at any time */
1378 				/* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1379 				return -ETIMEDOUT;
1380 			}
1381 			schedule_timeout_uninterruptible(1);
1382 		} while (m == EMPTY_QUEUE);
1383 		// Flush the offset
1384 		adpt_send_nop(pHba, m);
1385 	}
1386 	adpt_i2o_status_get(pHba);
1387 	if(*status == 0x02 ||
1388 			pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1389 		printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1390 				pHba->name);
1391 	} else {
1392 		PDEBUG("%s: Reset completed.\n", pHba->name);
1393 	}
1394 
1395 	dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1396 #ifdef UARTDELAY
1397 	// This delay is to allow someone attached to the card through the debug UART to
1398 	// set up the dump levels that they want before the rest of the initialization sequence
1399 	adpt_delay(20000);
1400 #endif
1401 	return 0;
1402 }
1403 
1404 
adpt_i2o_parse_lct(adpt_hba * pHba)1405 static int adpt_i2o_parse_lct(adpt_hba* pHba)
1406 {
1407 	int i;
1408 	int max;
1409 	int tid;
1410 	struct i2o_device *d;
1411 	i2o_lct *lct = pHba->lct;
1412 	u8 bus_no = 0;
1413 	s16 scsi_id;
1414 	u64 scsi_lun;
1415 	u32 buf[10]; // larger than 7, or 8 ...
1416 	struct adpt_device* pDev;
1417 
1418 	if (lct == NULL) {
1419 		printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1420 		return -1;
1421 	}
1422 
1423 	max = lct->table_size;
1424 	max -= 3;
1425 	max /= 9;
1426 
1427 	for(i=0;i<max;i++) {
1428 		if( lct->lct_entry[i].user_tid != 0xfff){
1429 			/*
1430 			 * If we have hidden devices, we need to inform the upper layers about
1431 			 * the possible maximum id reference to handle device access when
1432 			 * an array is disassembled. This code has no other purpose but to
1433 			 * allow us future access to devices that are currently hidden
1434 			 * behind arrays, hotspares or have not been configured (JBOD mode).
1435 			 */
1436 			if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1437 			    lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1438 			    lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1439 			    	continue;
1440 			}
1441 			tid = lct->lct_entry[i].tid;
1442 			// I2O_DPT_DEVICE_INFO_GROUP_NO;
1443 			if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1444 				continue;
1445 			}
1446 			bus_no = buf[0]>>16;
1447 			scsi_id = buf[1];
1448 			scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1449 			if(bus_no >= MAX_CHANNEL) {	// Something wrong skip it
1450 				printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1451 				continue;
1452 			}
1453 			if (scsi_id >= MAX_ID){
1454 				printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1455 				continue;
1456 			}
1457 			if(bus_no > pHba->top_scsi_channel){
1458 				pHba->top_scsi_channel = bus_no;
1459 			}
1460 			if(scsi_id > pHba->top_scsi_id){
1461 				pHba->top_scsi_id = scsi_id;
1462 			}
1463 			if(scsi_lun > pHba->top_scsi_lun){
1464 				pHba->top_scsi_lun = scsi_lun;
1465 			}
1466 			continue;
1467 		}
1468 		d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1469 		if(d==NULL)
1470 		{
1471 			printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1472 			return -ENOMEM;
1473 		}
1474 
1475 		d->controller = pHba;
1476 		d->next = NULL;
1477 
1478 		memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1479 
1480 		d->flags = 0;
1481 		tid = d->lct_data.tid;
1482 		adpt_i2o_report_hba_unit(pHba, d);
1483 		adpt_i2o_install_device(pHba, d);
1484 	}
1485 	bus_no = 0;
1486 	for(d = pHba->devices; d ; d = d->next) {
1487 		if(d->lct_data.class_id  == I2O_CLASS_BUS_ADAPTER_PORT ||
1488 		   d->lct_data.class_id  == I2O_CLASS_FIBRE_CHANNEL_PORT){
1489 			tid = d->lct_data.tid;
1490 			// TODO get the bus_no from hrt-but for now they are in order
1491 			//bus_no =
1492 			if(bus_no > pHba->top_scsi_channel){
1493 				pHba->top_scsi_channel = bus_no;
1494 			}
1495 			pHba->channel[bus_no].type = d->lct_data.class_id;
1496 			pHba->channel[bus_no].tid = tid;
1497 			if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1498 			{
1499 				pHba->channel[bus_no].scsi_id = buf[1];
1500 				PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1501 			}
1502 			// TODO remove - this is just until we get from hrt
1503 			bus_no++;
1504 			if(bus_no >= MAX_CHANNEL) {	// Something wrong skip it
1505 				printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1506 				break;
1507 			}
1508 		}
1509 	}
1510 
1511 	// Setup adpt_device table
1512 	for(d = pHba->devices; d ; d = d->next) {
1513 		if(d->lct_data.class_id  == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1514 		   d->lct_data.class_id  == I2O_CLASS_SCSI_PERIPHERAL ||
1515 		   d->lct_data.class_id  == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1516 
1517 			tid = d->lct_data.tid;
1518 			scsi_id = -1;
1519 			// I2O_DPT_DEVICE_INFO_GROUP_NO;
1520 			if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1521 				bus_no = buf[0]>>16;
1522 				scsi_id = buf[1];
1523 				scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1524 				if(bus_no >= MAX_CHANNEL) {	// Something wrong skip it
1525 					continue;
1526 				}
1527 				if (scsi_id >= MAX_ID) {
1528 					continue;
1529 				}
1530 				if( pHba->channel[bus_no].device[scsi_id] == NULL){
1531 					pDev =  kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1532 					if(pDev == NULL) {
1533 						return -ENOMEM;
1534 					}
1535 					pHba->channel[bus_no].device[scsi_id] = pDev;
1536 				} else {
1537 					for( pDev = pHba->channel[bus_no].device[scsi_id];
1538 							pDev->next_lun; pDev = pDev->next_lun){
1539 					}
1540 					pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1541 					if(pDev->next_lun == NULL) {
1542 						return -ENOMEM;
1543 					}
1544 					pDev = pDev->next_lun;
1545 				}
1546 				pDev->tid = tid;
1547 				pDev->scsi_channel = bus_no;
1548 				pDev->scsi_id = scsi_id;
1549 				pDev->scsi_lun = scsi_lun;
1550 				pDev->pI2o_dev = d;
1551 				d->owner = pDev;
1552 				pDev->type = (buf[0])&0xff;
1553 				pDev->flags = (buf[0]>>8)&0xff;
1554 				if(scsi_id > pHba->top_scsi_id){
1555 					pHba->top_scsi_id = scsi_id;
1556 				}
1557 				if(scsi_lun > pHba->top_scsi_lun){
1558 					pHba->top_scsi_lun = scsi_lun;
1559 				}
1560 			}
1561 			if(scsi_id == -1){
1562 				printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1563 						d->lct_data.identity_tag);
1564 			}
1565 		}
1566 	}
1567 	return 0;
1568 }
1569 
1570 
1571 /*
1572  *	Each I2O controller has a chain of devices on it - these match
1573  *	the useful parts of the LCT of the board.
1574  */
1575 
adpt_i2o_install_device(adpt_hba * pHba,struct i2o_device * d)1576 static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1577 {
1578 	mutex_lock(&adpt_configuration_lock);
1579 	d->controller=pHba;
1580 	d->owner=NULL;
1581 	d->next=pHba->devices;
1582 	d->prev=NULL;
1583 	if (pHba->devices != NULL){
1584 		pHba->devices->prev=d;
1585 	}
1586 	pHba->devices=d;
1587 	*d->dev_name = 0;
1588 
1589 	mutex_unlock(&adpt_configuration_lock);
1590 	return 0;
1591 }
1592 
adpt_open(struct inode * inode,struct file * file)1593 static int adpt_open(struct inode *inode, struct file *file)
1594 {
1595 	int minor;
1596 	adpt_hba* pHba;
1597 
1598 	mutex_lock(&adpt_mutex);
1599 	//TODO check for root access
1600 	//
1601 	minor = iminor(inode);
1602 	if (minor >= hba_count) {
1603 		mutex_unlock(&adpt_mutex);
1604 		return -ENXIO;
1605 	}
1606 	mutex_lock(&adpt_configuration_lock);
1607 	for (pHba = hba_chain; pHba; pHba = pHba->next) {
1608 		if (pHba->unit == minor) {
1609 			break;	/* found adapter */
1610 		}
1611 	}
1612 	if (pHba == NULL) {
1613 		mutex_unlock(&adpt_configuration_lock);
1614 		mutex_unlock(&adpt_mutex);
1615 		return -ENXIO;
1616 	}
1617 
1618 //	if(pHba->in_use){
1619 	//	mutex_unlock(&adpt_configuration_lock);
1620 //		return -EBUSY;
1621 //	}
1622 
1623 	pHba->in_use = 1;
1624 	mutex_unlock(&adpt_configuration_lock);
1625 	mutex_unlock(&adpt_mutex);
1626 
1627 	return 0;
1628 }
1629 
adpt_close(struct inode * inode,struct file * file)1630 static int adpt_close(struct inode *inode, struct file *file)
1631 {
1632 	int minor;
1633 	adpt_hba* pHba;
1634 
1635 	minor = iminor(inode);
1636 	if (minor >= hba_count) {
1637 		return -ENXIO;
1638 	}
1639 	mutex_lock(&adpt_configuration_lock);
1640 	for (pHba = hba_chain; pHba; pHba = pHba->next) {
1641 		if (pHba->unit == minor) {
1642 			break;	/* found adapter */
1643 		}
1644 	}
1645 	mutex_unlock(&adpt_configuration_lock);
1646 	if (pHba == NULL) {
1647 		return -ENXIO;
1648 	}
1649 
1650 	pHba->in_use = 0;
1651 
1652 	return 0;
1653 }
1654 
1655 
adpt_i2o_passthru(adpt_hba * pHba,u32 __user * arg)1656 static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1657 {
1658 	u32 msg[MAX_MESSAGE_SIZE];
1659 	u32* reply = NULL;
1660 	u32 size = 0;
1661 	u32 reply_size = 0;
1662 	u32 __user *user_msg = arg;
1663 	u32 __user * user_reply = NULL;
1664 	void **sg_list = NULL;
1665 	u32 sg_offset = 0;
1666 	u32 sg_count = 0;
1667 	int sg_index = 0;
1668 	u32 i = 0;
1669 	u32 rcode = 0;
1670 	void *p = NULL;
1671 	dma_addr_t addr;
1672 	ulong flags = 0;
1673 
1674 	memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1675 	// get user msg size in u32s
1676 	if(get_user(size, &user_msg[0])){
1677 		return -EFAULT;
1678 	}
1679 	size = size>>16;
1680 
1681 	user_reply = &user_msg[size];
1682 	if(size > MAX_MESSAGE_SIZE){
1683 		return -EFAULT;
1684 	}
1685 	size *= 4; // Convert to bytes
1686 
1687 	/* Copy in the user's I2O command */
1688 	if(copy_from_user(msg, user_msg, size)) {
1689 		return -EFAULT;
1690 	}
1691 	get_user(reply_size, &user_reply[0]);
1692 	reply_size = reply_size>>16;
1693 	if(reply_size > REPLY_FRAME_SIZE){
1694 		reply_size = REPLY_FRAME_SIZE;
1695 	}
1696 	reply_size *= 4;
1697 	reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
1698 	if(reply == NULL) {
1699 		printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1700 		return -ENOMEM;
1701 	}
1702 	sg_offset = (msg[0]>>4)&0xf;
1703 	msg[2] = 0x40000000; // IOCTL context
1704 	msg[3] = adpt_ioctl_to_context(pHba, reply);
1705 	if (msg[3] == (u32)-1) {
1706 		rcode = -EBUSY;
1707 		goto free;
1708 	}
1709 
1710 	sg_list = kcalloc(pHba->sg_tablesize, sizeof(*sg_list), GFP_KERNEL);
1711 	if (!sg_list) {
1712 		rcode = -ENOMEM;
1713 		goto free;
1714 	}
1715 	if(sg_offset) {
1716 		// TODO add 64 bit API
1717 		struct sg_simple_element *sg =  (struct sg_simple_element*) (msg+sg_offset);
1718 		sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1719 		if (sg_count > pHba->sg_tablesize){
1720 			printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1721 			rcode = -EINVAL;
1722 			goto free;
1723 		}
1724 
1725 		for(i = 0; i < sg_count; i++) {
1726 			int sg_size;
1727 
1728 			if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
1729 				printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i,  sg[i].flag_count);
1730 				rcode = -EINVAL;
1731 				goto cleanup;
1732 			}
1733 			sg_size = sg[i].flag_count & 0xffffff;
1734 			/* Allocate memory for the transfer */
1735 			p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
1736 			if(!p) {
1737 				printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1738 						pHba->name,sg_size,i,sg_count);
1739 				rcode = -ENOMEM;
1740 				goto cleanup;
1741 			}
1742 			sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
1743 			/* Copy in the user's SG buffer if necessary */
1744 			if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
1745 				// sg_simple_element API is 32 bit
1746 				if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
1747 					printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1748 					rcode = -EFAULT;
1749 					goto cleanup;
1750 				}
1751 			}
1752 			/* sg_simple_element API is 32 bit, but addr < 4GB */
1753 			sg[i].addr_bus = addr;
1754 		}
1755 	}
1756 
1757 	do {
1758 		/*
1759 		 * Stop any new commands from enterring the
1760 		 * controller while processing the ioctl
1761 		 */
1762 		if (pHba->host) {
1763 			scsi_block_requests(pHba->host);
1764 			spin_lock_irqsave(pHba->host->host_lock, flags);
1765 		}
1766 		rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1767 		if (rcode != 0)
1768 			printk("adpt_i2o_passthru: post wait failed %d %p\n",
1769 					rcode, reply);
1770 		if (pHba->host) {
1771 			spin_unlock_irqrestore(pHba->host->host_lock, flags);
1772 			scsi_unblock_requests(pHba->host);
1773 		}
1774 	} while (rcode == -ETIMEDOUT);
1775 
1776 	if(rcode){
1777 		goto cleanup;
1778 	}
1779 
1780 	if(sg_offset) {
1781 	/* Copy back the Scatter Gather buffers back to user space */
1782 		u32 j;
1783 		// TODO add 64 bit API
1784 		struct sg_simple_element* sg;
1785 		int sg_size;
1786 
1787 		// re-acquire the original message to handle correctly the sg copy operation
1788 		memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1789 		// get user msg size in u32s
1790 		if(get_user(size, &user_msg[0])){
1791 			rcode = -EFAULT;
1792 			goto cleanup;
1793 		}
1794 		size = size>>16;
1795 		size *= 4;
1796 		if (size > MAX_MESSAGE_SIZE) {
1797 			rcode = -EINVAL;
1798 			goto cleanup;
1799 		}
1800 		/* Copy in the user's I2O command */
1801 		if (copy_from_user (msg, user_msg, size)) {
1802 			rcode = -EFAULT;
1803 			goto cleanup;
1804 		}
1805 		sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1806 
1807 		// TODO add 64 bit API
1808 		sg 	 = (struct sg_simple_element*)(msg + sg_offset);
1809 		for (j = 0; j < sg_count; j++) {
1810 			/* Copy out the SG list to user's buffer if necessary */
1811 			if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
1812 				sg_size = sg[j].flag_count & 0xffffff;
1813 				// sg_simple_element API is 32 bit
1814 				if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
1815 					printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1816 					rcode = -EFAULT;
1817 					goto cleanup;
1818 				}
1819 			}
1820 		}
1821 	}
1822 
1823 	/* Copy back the reply to user space */
1824 	if (reply_size) {
1825 		// we wrote our own values for context - now restore the user supplied ones
1826 		if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1827 			printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1828 			rcode = -EFAULT;
1829 		}
1830 		if(copy_to_user(user_reply, reply, reply_size)) {
1831 			printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1832 			rcode = -EFAULT;
1833 		}
1834 	}
1835 
1836 
1837 cleanup:
1838 	if (rcode != -ETIME && rcode != -EINTR) {
1839 		struct sg_simple_element *sg =
1840 				(struct sg_simple_element*) (msg +sg_offset);
1841 		while(sg_index) {
1842 			if(sg_list[--sg_index]) {
1843 				dma_free_coherent(&pHba->pDev->dev,
1844 					sg[sg_index].flag_count & 0xffffff,
1845 					sg_list[sg_index],
1846 					sg[sg_index].addr_bus);
1847 			}
1848 		}
1849 	}
1850 
1851 free:
1852 	kfree(sg_list);
1853 	kfree(reply);
1854 	return rcode;
1855 }
1856 
1857 #if defined __ia64__
adpt_ia64_info(sysInfo_S * si)1858 static void adpt_ia64_info(sysInfo_S* si)
1859 {
1860 	// This is all the info we need for now
1861 	// We will add more info as our new
1862 	// managmenent utility requires it
1863 	si->processorType = PROC_IA64;
1864 }
1865 #endif
1866 
1867 #if defined __sparc__
adpt_sparc_info(sysInfo_S * si)1868 static void adpt_sparc_info(sysInfo_S* si)
1869 {
1870 	// This is all the info we need for now
1871 	// We will add more info as our new
1872 	// managmenent utility requires it
1873 	si->processorType = PROC_ULTRASPARC;
1874 }
1875 #endif
1876 #if defined __alpha__
adpt_alpha_info(sysInfo_S * si)1877 static void adpt_alpha_info(sysInfo_S* si)
1878 {
1879 	// This is all the info we need for now
1880 	// We will add more info as our new
1881 	// managmenent utility requires it
1882 	si->processorType = PROC_ALPHA;
1883 }
1884 #endif
1885 
1886 #if defined __i386__
1887 
1888 #include <uapi/asm/vm86.h>
1889 
adpt_i386_info(sysInfo_S * si)1890 static void adpt_i386_info(sysInfo_S* si)
1891 {
1892 	// This is all the info we need for now
1893 	// We will add more info as our new
1894 	// managmenent utility requires it
1895 	switch (boot_cpu_data.x86) {
1896 	case CPU_386:
1897 		si->processorType = PROC_386;
1898 		break;
1899 	case CPU_486:
1900 		si->processorType = PROC_486;
1901 		break;
1902 	case CPU_586:
1903 		si->processorType = PROC_PENTIUM;
1904 		break;
1905 	default:  // Just in case
1906 		si->processorType = PROC_PENTIUM;
1907 		break;
1908 	}
1909 }
1910 #endif
1911 
1912 /*
1913  * This routine returns information about the system.  This does not effect
1914  * any logic and if the info is wrong - it doesn't matter.
1915  */
1916 
1917 /* Get all the info we can not get from kernel services */
adpt_system_info(void __user * buffer)1918 static int adpt_system_info(void __user *buffer)
1919 {
1920 	sysInfo_S si;
1921 
1922 	memset(&si, 0, sizeof(si));
1923 
1924 	si.osType = OS_LINUX;
1925 	si.osMajorVersion = 0;
1926 	si.osMinorVersion = 0;
1927 	si.osRevision = 0;
1928 	si.busType = SI_PCI_BUS;
1929 	si.processorFamily = DPTI_sig.dsProcessorFamily;
1930 
1931 #if defined __i386__
1932 	adpt_i386_info(&si);
1933 #elif defined (__ia64__)
1934 	adpt_ia64_info(&si);
1935 #elif defined(__sparc__)
1936 	adpt_sparc_info(&si);
1937 #elif defined (__alpha__)
1938 	adpt_alpha_info(&si);
1939 #else
1940 	si.processorType = 0xff ;
1941 #endif
1942 	if (copy_to_user(buffer, &si, sizeof(si))){
1943 		printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
1944 		return -EFAULT;
1945 	}
1946 
1947 	return 0;
1948 }
1949 
adpt_ioctl(struct inode * inode,struct file * file,uint cmd,ulong arg)1950 static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
1951 {
1952 	int minor;
1953 	int error = 0;
1954 	adpt_hba* pHba;
1955 	ulong flags = 0;
1956 	void __user *argp = (void __user *)arg;
1957 
1958 	minor = iminor(inode);
1959 	if (minor >= DPTI_MAX_HBA){
1960 		return -ENXIO;
1961 	}
1962 	mutex_lock(&adpt_configuration_lock);
1963 	for (pHba = hba_chain; pHba; pHba = pHba->next) {
1964 		if (pHba->unit == minor) {
1965 			break;	/* found adapter */
1966 		}
1967 	}
1968 	mutex_unlock(&adpt_configuration_lock);
1969 	if(pHba == NULL){
1970 		return -ENXIO;
1971 	}
1972 
1973 	while((volatile u32) pHba->state & DPTI_STATE_RESET )
1974 		schedule_timeout_uninterruptible(2);
1975 
1976 	switch (cmd) {
1977 	// TODO: handle 3 cases
1978 	case DPT_SIGNATURE:
1979 		if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
1980 			return -EFAULT;
1981 		}
1982 		break;
1983 	case I2OUSRCMD:
1984 		return adpt_i2o_passthru(pHba, argp);
1985 
1986 	case DPT_CTRLINFO:{
1987 		drvrHBAinfo_S HbaInfo;
1988 
1989 #define FLG_OSD_PCI_VALID 0x0001
1990 #define FLG_OSD_DMA	  0x0002
1991 #define FLG_OSD_I2O	  0x0004
1992 		memset(&HbaInfo, 0, sizeof(HbaInfo));
1993 		HbaInfo.drvrHBAnum = pHba->unit;
1994 		HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
1995 		HbaInfo.blinkState = adpt_read_blink_led(pHba);
1996 		HbaInfo.pciBusNum =  pHba->pDev->bus->number;
1997 		HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
1998 		HbaInfo.Interrupt = pHba->pDev->irq;
1999 		HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
2000 		if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
2001 			printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
2002 			return -EFAULT;
2003 		}
2004 		break;
2005 		}
2006 	case DPT_SYSINFO:
2007 		return adpt_system_info(argp);
2008 	case DPT_BLINKLED:{
2009 		u32 value;
2010 		value = (u32)adpt_read_blink_led(pHba);
2011 		if (copy_to_user(argp, &value, sizeof(value))) {
2012 			return -EFAULT;
2013 		}
2014 		break;
2015 		}
2016 	case I2ORESETCMD: {
2017 		struct Scsi_Host *shost = pHba->host;
2018 
2019 		if (shost)
2020 			spin_lock_irqsave(shost->host_lock, flags);
2021 		adpt_hba_reset(pHba);
2022 		if (shost)
2023 			spin_unlock_irqrestore(shost->host_lock, flags);
2024 		break;
2025 	}
2026 	case I2ORESCANCMD:
2027 		adpt_rescan(pHba);
2028 		break;
2029 	default:
2030 		return -EINVAL;
2031 	}
2032 
2033 	return error;
2034 }
2035 
adpt_unlocked_ioctl(struct file * file,uint cmd,ulong arg)2036 static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg)
2037 {
2038 	struct inode *inode;
2039 	long ret;
2040 
2041 	inode = file_inode(file);
2042 
2043 	mutex_lock(&adpt_mutex);
2044 	ret = adpt_ioctl(inode, file, cmd, arg);
2045 	mutex_unlock(&adpt_mutex);
2046 
2047 	return ret;
2048 }
2049 
2050 #ifdef CONFIG_COMPAT
compat_adpt_ioctl(struct file * file,unsigned int cmd,unsigned long arg)2051 static long compat_adpt_ioctl(struct file *file,
2052 				unsigned int cmd, unsigned long arg)
2053 {
2054 	struct inode *inode;
2055 	long ret;
2056 
2057 	inode = file_inode(file);
2058 
2059 	mutex_lock(&adpt_mutex);
2060 
2061 	switch(cmd) {
2062 		case DPT_SIGNATURE:
2063 		case I2OUSRCMD:
2064 		case DPT_CTRLINFO:
2065 		case DPT_SYSINFO:
2066 		case DPT_BLINKLED:
2067 		case I2ORESETCMD:
2068 		case I2ORESCANCMD:
2069 		case (DPT_TARGET_BUSY & 0xFFFF):
2070 		case DPT_TARGET_BUSY:
2071 			ret = adpt_ioctl(inode, file, cmd, arg);
2072 			break;
2073 		default:
2074 			ret =  -ENOIOCTLCMD;
2075 	}
2076 
2077 	mutex_unlock(&adpt_mutex);
2078 
2079 	return ret;
2080 }
2081 #endif
2082 
adpt_isr(int irq,void * dev_id)2083 static irqreturn_t adpt_isr(int irq, void *dev_id)
2084 {
2085 	struct scsi_cmnd* cmd;
2086 	adpt_hba* pHba = dev_id;
2087 	u32 m;
2088 	void __iomem *reply;
2089 	u32 status=0;
2090 	u32 context;
2091 	ulong flags = 0;
2092 	int handled = 0;
2093 
2094 	if (pHba == NULL){
2095 		printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
2096 		return IRQ_NONE;
2097 	}
2098 	if(pHba->host)
2099 		spin_lock_irqsave(pHba->host->host_lock, flags);
2100 
2101 	while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
2102 		m = readl(pHba->reply_port);
2103 		if(m == EMPTY_QUEUE){
2104 			// Try twice then give up
2105 			rmb();
2106 			m = readl(pHba->reply_port);
2107 			if(m == EMPTY_QUEUE){
2108 				// This really should not happen
2109 				printk(KERN_ERR"dpti: Could not get reply frame\n");
2110 				goto out;
2111 			}
2112 		}
2113 		if (pHba->reply_pool_pa <= m &&
2114 		    m < pHba->reply_pool_pa +
2115 			(pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
2116 			reply = (u8 *)pHba->reply_pool +
2117 						(m - pHba->reply_pool_pa);
2118 		} else {
2119 			/* Ick, we should *never* be here */
2120 			printk(KERN_ERR "dpti: reply frame not from pool\n");
2121 			reply = (u8 *)bus_to_virt(m);
2122 		}
2123 
2124 		if (readl(reply) & MSG_FAIL) {
2125 			u32 old_m = readl(reply+28);
2126 			void __iomem *msg;
2127 			u32 old_context;
2128 			PDEBUG("%s: Failed message\n",pHba->name);
2129 			if(old_m >= 0x100000){
2130 				printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
2131 				writel(m,pHba->reply_port);
2132 				continue;
2133 			}
2134 			// Transaction context is 0 in failed reply frame
2135 			msg = pHba->msg_addr_virt + old_m;
2136 			old_context = readl(msg+12);
2137 			writel(old_context, reply+12);
2138 			adpt_send_nop(pHba, old_m);
2139 		}
2140 		context = readl(reply+8);
2141 		if(context & 0x40000000){ // IOCTL
2142 			void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
2143 			if( p != NULL) {
2144 				memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
2145 			}
2146 			// All IOCTLs will also be post wait
2147 		}
2148 		if(context & 0x80000000){ // Post wait message
2149 			status = readl(reply+16);
2150 			if(status  >> 24){
2151 				status &=  0xffff; /* Get detail status */
2152 			} else {
2153 				status = I2O_POST_WAIT_OK;
2154 			}
2155 			if(!(context & 0x40000000)) {
2156 				/*
2157 				 * The request tag is one less than the command tag
2158 				 * as the firmware might treat a 0 tag as invalid
2159 				 */
2160 				cmd = scsi_host_find_tag(pHba->host,
2161 							 readl(reply + 12) - 1);
2162 				if(cmd != NULL) {
2163 					printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2164 				}
2165 			}
2166 			adpt_i2o_post_wait_complete(context, status);
2167 		} else { // SCSI message
2168 			/*
2169 			 * The request tag is one less than the command tag
2170 			 * as the firmware might treat a 0 tag as invalid
2171 			 */
2172 			cmd = scsi_host_find_tag(pHba->host,
2173 						 readl(reply + 12) - 1);
2174 			if(cmd != NULL){
2175 				scsi_dma_unmap(cmd);
2176 				adpt_i2o_to_scsi(reply, cmd);
2177 			}
2178 		}
2179 		writel(m, pHba->reply_port);
2180 		wmb();
2181 		rmb();
2182 	}
2183 	handled = 1;
2184 out:	if(pHba->host)
2185 		spin_unlock_irqrestore(pHba->host->host_lock, flags);
2186 	return IRQ_RETVAL(handled);
2187 }
2188 
adpt_scsi_to_i2o(adpt_hba * pHba,struct scsi_cmnd * cmd,struct adpt_device * d)2189 static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
2190 {
2191 	int i;
2192 	u32 msg[MAX_MESSAGE_SIZE];
2193 	u32* mptr;
2194 	u32* lptr;
2195 	u32 *lenptr;
2196 	int direction;
2197 	int scsidir;
2198 	int nseg;
2199 	u32 len;
2200 	u32 reqlen;
2201 	s32 rcode;
2202 	dma_addr_t addr;
2203 
2204 	memset(msg, 0 , sizeof(msg));
2205 	len = scsi_bufflen(cmd);
2206 	direction = 0x00000000;
2207 
2208 	scsidir = 0x00000000;			// DATA NO XFER
2209 	if(len) {
2210 		/*
2211 		 * Set SCBFlags to indicate if data is being transferred
2212 		 * in or out, or no data transfer
2213 		 * Note:  Do not have to verify index is less than 0 since
2214 		 * cmd->cmnd[0] is an unsigned char
2215 		 */
2216 		switch(cmd->sc_data_direction){
2217 		case DMA_FROM_DEVICE:
2218 			scsidir  =0x40000000;	// DATA IN  (iop<--dev)
2219 			break;
2220 		case DMA_TO_DEVICE:
2221 			direction=0x04000000;	// SGL OUT
2222 			scsidir  =0x80000000;	// DATA OUT (iop-->dev)
2223 			break;
2224 		case DMA_NONE:
2225 			break;
2226 		case DMA_BIDIRECTIONAL:
2227 			scsidir  =0x40000000;	// DATA IN  (iop<--dev)
2228 			// Assume In - and continue;
2229 			break;
2230 		default:
2231 			printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2232 			     pHba->name, cmd->cmnd[0]);
2233 			cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
2234 			cmd->scsi_done(cmd);
2235 			return 	0;
2236 		}
2237 	}
2238 	// msg[0] is set later
2239 	// I2O_CMD_SCSI_EXEC
2240 	msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2241 	msg[2] = 0;
2242 	/* Add 1 to avoid firmware treating it as invalid command */
2243 	msg[3] = cmd->request->tag + 1;
2244 	// Our cards use the transaction context as the tag for queueing
2245 	// Adaptec/DPT Private stuff
2246 	msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2247 	msg[5] = d->tid;
2248 	/* Direction, disconnect ok | sense data | simple queue , CDBLen */
2249 	// I2O_SCB_FLAG_ENABLE_DISCONNECT |
2250 	// I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
2251 	// I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
2252 	msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2253 
2254 	mptr=msg+7;
2255 
2256 	// Write SCSI command into the message - always 16 byte block
2257 	memset(mptr, 0,  16);
2258 	memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2259 	mptr+=4;
2260 	lenptr=mptr++;		/* Remember me - fill in when we know */
2261 	if (dpt_dma64(pHba)) {
2262 		reqlen = 16;		// SINGLE SGE
2263 		*mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
2264 		*mptr++ = 1 << PAGE_SHIFT;
2265 	} else {
2266 		reqlen = 14;		// SINGLE SGE
2267 	}
2268 	/* Now fill in the SGList and command */
2269 
2270 	nseg = scsi_dma_map(cmd);
2271 	BUG_ON(nseg < 0);
2272 	if (nseg) {
2273 		struct scatterlist *sg;
2274 
2275 		len = 0;
2276 		scsi_for_each_sg(cmd, sg, nseg, i) {
2277 			lptr = mptr;
2278 			*mptr++ = direction|0x10000000|sg_dma_len(sg);
2279 			len+=sg_dma_len(sg);
2280 			addr = sg_dma_address(sg);
2281 			*mptr++ = dma_low(addr);
2282 			if (dpt_dma64(pHba))
2283 				*mptr++ = dma_high(addr);
2284 			/* Make this an end of list */
2285 			if (i == nseg - 1)
2286 				*lptr = direction|0xD0000000|sg_dma_len(sg);
2287 		}
2288 		reqlen = mptr - msg;
2289 		*lenptr = len;
2290 
2291 		if(cmd->underflow && len != cmd->underflow){
2292 			printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2293 				len, cmd->underflow);
2294 		}
2295 	} else {
2296 		*lenptr = len = 0;
2297 		reqlen = 12;
2298 	}
2299 
2300 	/* Stick the headers on */
2301 	msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2302 
2303 	// Send it on it's way
2304 	rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2305 	if (rcode == 0) {
2306 		return 0;
2307 	}
2308 	return rcode;
2309 }
2310 
2311 
adpt_scsi_host_alloc(adpt_hba * pHba,struct scsi_host_template * sht)2312 static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
2313 {
2314 	struct Scsi_Host *host;
2315 
2316 	host = scsi_host_alloc(sht, sizeof(adpt_hba*));
2317 	if (host == NULL) {
2318 		printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
2319 		return -1;
2320 	}
2321 	host->hostdata[0] = (unsigned long)pHba;
2322 	pHba->host = host;
2323 
2324 	host->irq = pHba->pDev->irq;
2325 	/* no IO ports, so don't have to set host->io_port and
2326 	 * host->n_io_port
2327 	 */
2328 	host->io_port = 0;
2329 	host->n_io_port = 0;
2330 				/* see comments in scsi_host.h */
2331 	host->max_id = 16;
2332 	host->max_lun = 256;
2333 	host->max_channel = pHba->top_scsi_channel + 1;
2334 	host->cmd_per_lun = 1;
2335 	host->unique_id = (u32)sys_tbl_pa + pHba->unit;
2336 	host->sg_tablesize = pHba->sg_tablesize;
2337 	host->can_queue = pHba->post_fifo_size;
2338 	host->use_cmd_list = 1;
2339 
2340 	return 0;
2341 }
2342 
2343 
adpt_i2o_to_scsi(void __iomem * reply,struct scsi_cmnd * cmd)2344 static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
2345 {
2346 	adpt_hba* pHba;
2347 	u32 hba_status;
2348 	u32 dev_status;
2349 	u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits
2350 	// I know this would look cleaner if I just read bytes
2351 	// but the model I have been using for all the rest of the
2352 	// io is in 4 byte words - so I keep that model
2353 	u16 detailed_status = readl(reply+16) &0xffff;
2354 	dev_status = (detailed_status & 0xff);
2355 	hba_status = detailed_status >> 8;
2356 
2357 	// calculate resid for sg
2358 	scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20));
2359 
2360 	pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2361 
2362 	cmd->sense_buffer[0] = '\0';  // initialize sense valid flag to false
2363 
2364 	if(!(reply_flags & MSG_FAIL)) {
2365 		switch(detailed_status & I2O_SCSI_DSC_MASK) {
2366 		case I2O_SCSI_DSC_SUCCESS:
2367 			cmd->result = (DID_OK << 16);
2368 			// handle underflow
2369 			if (readl(reply+20) < cmd->underflow) {
2370 				cmd->result = (DID_ERROR <<16);
2371 				printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2372 			}
2373 			break;
2374 		case I2O_SCSI_DSC_REQUEST_ABORTED:
2375 			cmd->result = (DID_ABORT << 16);
2376 			break;
2377 		case I2O_SCSI_DSC_PATH_INVALID:
2378 		case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2379 		case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2380 		case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2381 		case I2O_SCSI_DSC_NO_ADAPTER:
2382 		case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2383 			printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%llu) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2384 				pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2385 			cmd->result = (DID_TIME_OUT << 16);
2386 			break;
2387 		case I2O_SCSI_DSC_ADAPTER_BUSY:
2388 		case I2O_SCSI_DSC_BUS_BUSY:
2389 			cmd->result = (DID_BUS_BUSY << 16);
2390 			break;
2391 		case I2O_SCSI_DSC_SCSI_BUS_RESET:
2392 		case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2393 			cmd->result = (DID_RESET << 16);
2394 			break;
2395 		case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2396 			printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2397 			cmd->result = (DID_PARITY << 16);
2398 			break;
2399 		case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2400 		case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2401 		case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2402 		case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2403 		case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2404 		case I2O_SCSI_DSC_DATA_OVERRUN:
2405 		case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2406 		case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2407 		case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2408 		case I2O_SCSI_DSC_PROVIDE_FAILURE:
2409 		case I2O_SCSI_DSC_REQUEST_TERMINATED:
2410 		case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2411 		case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2412 		case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2413 		case I2O_SCSI_DSC_INVALID_CDB:
2414 		case I2O_SCSI_DSC_LUN_INVALID:
2415 		case I2O_SCSI_DSC_SCSI_TID_INVALID:
2416 		case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2417 		case I2O_SCSI_DSC_NO_NEXUS:
2418 		case I2O_SCSI_DSC_CDB_RECEIVED:
2419 		case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2420 		case I2O_SCSI_DSC_QUEUE_FROZEN:
2421 		case I2O_SCSI_DSC_REQUEST_INVALID:
2422 		default:
2423 			printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2424 				pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2425 			       hba_status, dev_status, cmd->cmnd[0]);
2426 			cmd->result = (DID_ERROR << 16);
2427 			break;
2428 		}
2429 
2430 		// copy over the request sense data if it was a check
2431 		// condition status
2432 		if (dev_status == SAM_STAT_CHECK_CONDITION) {
2433 			u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
2434 			// Copy over the sense data
2435 			memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
2436 			if(cmd->sense_buffer[0] == 0x70 /* class 7 */ &&
2437 			   cmd->sense_buffer[2] == DATA_PROTECT ){
2438 				/* This is to handle an array failed */
2439 				cmd->result = (DID_TIME_OUT << 16);
2440 				printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2441 					pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2442 					hba_status, dev_status, cmd->cmnd[0]);
2443 
2444 			}
2445 		}
2446 	} else {
2447 		/* In this condtion we could not talk to the tid
2448 		 * the card rejected it.  We should signal a retry
2449 		 * for a limitted number of retries.
2450 		 */
2451 		cmd->result = (DID_TIME_OUT << 16);
2452 		printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%llu) tid=%d, cmd=0x%x\n",
2453 			pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2454 			((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2455 	}
2456 
2457 	cmd->result |= (dev_status);
2458 
2459 	if(cmd->scsi_done != NULL){
2460 		cmd->scsi_done(cmd);
2461 	}
2462 	return cmd->result;
2463 }
2464 
2465 
adpt_rescan(adpt_hba * pHba)2466 static s32 adpt_rescan(adpt_hba* pHba)
2467 {
2468 	s32 rcode;
2469 	ulong flags = 0;
2470 
2471 	if(pHba->host)
2472 		spin_lock_irqsave(pHba->host->host_lock, flags);
2473 	if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2474 		goto out;
2475 	if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2476 		goto out;
2477 	rcode = 0;
2478 out:	if(pHba->host)
2479 		spin_unlock_irqrestore(pHba->host->host_lock, flags);
2480 	return rcode;
2481 }
2482 
2483 
adpt_i2o_reparse_lct(adpt_hba * pHba)2484 static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2485 {
2486 	int i;
2487 	int max;
2488 	int tid;
2489 	struct i2o_device *d;
2490 	i2o_lct *lct = pHba->lct;
2491 	u8 bus_no = 0;
2492 	s16 scsi_id;
2493 	u64 scsi_lun;
2494 	u32 buf[10]; // at least 8 u32's
2495 	struct adpt_device* pDev = NULL;
2496 	struct i2o_device* pI2o_dev = NULL;
2497 
2498 	if (lct == NULL) {
2499 		printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2500 		return -1;
2501 	}
2502 
2503 	max = lct->table_size;
2504 	max -= 3;
2505 	max /= 9;
2506 
2507 	// Mark each drive as unscanned
2508 	for (d = pHba->devices; d; d = d->next) {
2509 		pDev =(struct adpt_device*) d->owner;
2510 		if(!pDev){
2511 			continue;
2512 		}
2513 		pDev->state |= DPTI_DEV_UNSCANNED;
2514 	}
2515 
2516 	printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2517 
2518 	for(i=0;i<max;i++) {
2519 		if( lct->lct_entry[i].user_tid != 0xfff){
2520 			continue;
2521 		}
2522 
2523 		if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2524 		    lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2525 		    lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2526 			tid = lct->lct_entry[i].tid;
2527 			if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2528 				printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2529 				continue;
2530 			}
2531 			bus_no = buf[0]>>16;
2532 			if (bus_no >= MAX_CHANNEL) {	/* Something wrong skip it */
2533 				printk(KERN_WARNING
2534 					"%s: Channel number %d out of range\n",
2535 					pHba->name, bus_no);
2536 				continue;
2537 			}
2538 
2539 			scsi_id = buf[1];
2540 			scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
2541 			pDev = pHba->channel[bus_no].device[scsi_id];
2542 			/* da lun */
2543 			while(pDev) {
2544 				if(pDev->scsi_lun == scsi_lun) {
2545 					break;
2546 				}
2547 				pDev = pDev->next_lun;
2548 			}
2549 			if(!pDev ) { // Something new add it
2550 				d = kmalloc(sizeof(struct i2o_device),
2551 					    GFP_ATOMIC);
2552 				if(d==NULL)
2553 				{
2554 					printk(KERN_CRIT "Out of memory for I2O device data.\n");
2555 					return -ENOMEM;
2556 				}
2557 
2558 				d->controller = pHba;
2559 				d->next = NULL;
2560 
2561 				memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2562 
2563 				d->flags = 0;
2564 				adpt_i2o_report_hba_unit(pHba, d);
2565 				adpt_i2o_install_device(pHba, d);
2566 
2567 				pDev = pHba->channel[bus_no].device[scsi_id];
2568 				if( pDev == NULL){
2569 					pDev =
2570 					  kzalloc(sizeof(struct adpt_device),
2571 						  GFP_ATOMIC);
2572 					if(pDev == NULL) {
2573 						return -ENOMEM;
2574 					}
2575 					pHba->channel[bus_no].device[scsi_id] = pDev;
2576 				} else {
2577 					while (pDev->next_lun) {
2578 						pDev = pDev->next_lun;
2579 					}
2580 					pDev = pDev->next_lun =
2581 					  kzalloc(sizeof(struct adpt_device),
2582 						  GFP_ATOMIC);
2583 					if(pDev == NULL) {
2584 						return -ENOMEM;
2585 					}
2586 				}
2587 				pDev->tid = d->lct_data.tid;
2588 				pDev->scsi_channel = bus_no;
2589 				pDev->scsi_id = scsi_id;
2590 				pDev->scsi_lun = scsi_lun;
2591 				pDev->pI2o_dev = d;
2592 				d->owner = pDev;
2593 				pDev->type = (buf[0])&0xff;
2594 				pDev->flags = (buf[0]>>8)&0xff;
2595 				// Too late, SCSI system has made up it's mind, but what the hey ...
2596 				if(scsi_id > pHba->top_scsi_id){
2597 					pHba->top_scsi_id = scsi_id;
2598 				}
2599 				if(scsi_lun > pHba->top_scsi_lun){
2600 					pHba->top_scsi_lun = scsi_lun;
2601 				}
2602 				continue;
2603 			} // end of new i2o device
2604 
2605 			// We found an old device - check it
2606 			while(pDev) {
2607 				if(pDev->scsi_lun == scsi_lun) {
2608 					if(!scsi_device_online(pDev->pScsi_dev)) {
2609 						printk(KERN_WARNING"%s: Setting device (%d,%d,%llu) back online\n",
2610 								pHba->name,bus_no,scsi_id,scsi_lun);
2611 						if (pDev->pScsi_dev) {
2612 							scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
2613 						}
2614 					}
2615 					d = pDev->pI2o_dev;
2616 					if(d->lct_data.tid != tid) { // something changed
2617 						pDev->tid = tid;
2618 						memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2619 						if (pDev->pScsi_dev) {
2620 							pDev->pScsi_dev->changed = TRUE;
2621 							pDev->pScsi_dev->removable = TRUE;
2622 						}
2623 					}
2624 					// Found it - mark it scanned
2625 					pDev->state = DPTI_DEV_ONLINE;
2626 					break;
2627 				}
2628 				pDev = pDev->next_lun;
2629 			}
2630 		}
2631 	}
2632 	for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2633 		pDev =(struct adpt_device*) pI2o_dev->owner;
2634 		if(!pDev){
2635 			continue;
2636 		}
2637 		// Drive offline drives that previously existed but could not be found
2638 		// in the LCT table
2639 		if (pDev->state & DPTI_DEV_UNSCANNED){
2640 			pDev->state = DPTI_DEV_OFFLINE;
2641 			printk(KERN_WARNING"%s: Device (%d,%d,%llu) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2642 			if (pDev->pScsi_dev) {
2643 				scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
2644 			}
2645 		}
2646 	}
2647 	return 0;
2648 }
2649 
adpt_fail_posted_scbs(adpt_hba * pHba)2650 static void adpt_fail_posted_scbs(adpt_hba* pHba)
2651 {
2652 	struct scsi_cmnd* 	cmd = NULL;
2653 	struct scsi_device* 	d = NULL;
2654 
2655 	shost_for_each_device(d, pHba->host) {
2656 		unsigned long flags;
2657 		spin_lock_irqsave(&d->list_lock, flags);
2658 		list_for_each_entry(cmd, &d->cmd_list, list) {
2659 			cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1);
2660 			cmd->scsi_done(cmd);
2661 		}
2662 		spin_unlock_irqrestore(&d->list_lock, flags);
2663 	}
2664 }
2665 
2666 
2667 /*============================================================================
2668  *  Routines from i2o subsystem
2669  *============================================================================
2670  */
2671 
2672 
2673 
2674 /*
2675  *	Bring an I2O controller into HOLD state. See the spec.
2676  */
adpt_i2o_activate_hba(adpt_hba * pHba)2677 static int adpt_i2o_activate_hba(adpt_hba* pHba)
2678 {
2679 	int rcode;
2680 
2681 	if(pHba->initialized ) {
2682 		if (adpt_i2o_status_get(pHba) < 0) {
2683 			if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2684 				printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2685 				return rcode;
2686 			}
2687 			if (adpt_i2o_status_get(pHba) < 0) {
2688 				printk(KERN_INFO "HBA not responding.\n");
2689 				return -1;
2690 			}
2691 		}
2692 
2693 		if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2694 			printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2695 			return -1;
2696 		}
2697 
2698 		if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2699 		    pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2700 		    pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2701 		    pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2702 			adpt_i2o_reset_hba(pHba);
2703 			if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2704 				printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2705 				return -1;
2706 			}
2707 		}
2708 	} else {
2709 		if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2710 			printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2711 			return rcode;
2712 		}
2713 
2714 	}
2715 
2716 	if (adpt_i2o_init_outbound_q(pHba) < 0) {
2717 		return -1;
2718 	}
2719 
2720 	/* In HOLD state */
2721 
2722 	if (adpt_i2o_hrt_get(pHba) < 0) {
2723 		return -1;
2724 	}
2725 
2726 	return 0;
2727 }
2728 
2729 /*
2730  *	Bring a controller online into OPERATIONAL state.
2731  */
2732 
adpt_i2o_online_hba(adpt_hba * pHba)2733 static int adpt_i2o_online_hba(adpt_hba* pHba)
2734 {
2735 	if (adpt_i2o_systab_send(pHba) < 0)
2736 		return -1;
2737 	/* In READY state */
2738 
2739 	if (adpt_i2o_enable_hba(pHba) < 0)
2740 		return -1;
2741 
2742 	/* In OPERATIONAL state  */
2743 	return 0;
2744 }
2745 
adpt_send_nop(adpt_hba * pHba,u32 m)2746 static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2747 {
2748 	u32 __iomem *msg;
2749 	ulong timeout = jiffies + 5*HZ;
2750 
2751 	while(m == EMPTY_QUEUE){
2752 		rmb();
2753 		m = readl(pHba->post_port);
2754 		if(m != EMPTY_QUEUE){
2755 			break;
2756 		}
2757 		if(time_after(jiffies,timeout)){
2758 			printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2759 			return 2;
2760 		}
2761 		schedule_timeout_uninterruptible(1);
2762 	}
2763 	msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2764 	writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2765 	writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2766 	writel( 0,&msg[2]);
2767 	wmb();
2768 
2769 	writel(m, pHba->post_port);
2770 	wmb();
2771 	return 0;
2772 }
2773 
adpt_i2o_init_outbound_q(adpt_hba * pHba)2774 static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2775 {
2776 	u8 *status;
2777 	dma_addr_t addr;
2778 	u32 __iomem *msg = NULL;
2779 	int i;
2780 	ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
2781 	u32 m;
2782 
2783 	do {
2784 		rmb();
2785 		m = readl(pHba->post_port);
2786 		if (m != EMPTY_QUEUE) {
2787 			break;
2788 		}
2789 
2790 		if(time_after(jiffies,timeout)){
2791 			printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2792 			return -ETIMEDOUT;
2793 		}
2794 		schedule_timeout_uninterruptible(1);
2795 	} while(m == EMPTY_QUEUE);
2796 
2797 	msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2798 
2799 	status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
2800 	if (!status) {
2801 		adpt_send_nop(pHba, m);
2802 		printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2803 			pHba->name);
2804 		return -ENOMEM;
2805 	}
2806 	memset(status, 0, 4);
2807 
2808 	writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2809 	writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2810 	writel(0, &msg[2]);
2811 	writel(0x0106, &msg[3]);	/* Transaction context */
2812 	writel(4096, &msg[4]);		/* Host page frame size */
2813 	writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]);	/* Outbound msg frame size and Initcode */
2814 	writel(0xD0000004, &msg[6]);		/* Simple SG LE, EOB */
2815 	writel((u32)addr, &msg[7]);
2816 
2817 	writel(m, pHba->post_port);
2818 	wmb();
2819 
2820 	// Wait for the reply status to come back
2821 	do {
2822 		if (*status) {
2823 			if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
2824 				break;
2825 			}
2826 		}
2827 		rmb();
2828 		if(time_after(jiffies,timeout)){
2829 			printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
2830 			/* We lose 4 bytes of "status" here, but we
2831 			   cannot free these because controller may
2832 			   awake and corrupt those bytes at any time */
2833 			/* dma_free_coherent(&pHba->pDev->dev, 4, status, addr); */
2834 			return -ETIMEDOUT;
2835 		}
2836 		schedule_timeout_uninterruptible(1);
2837 	} while (1);
2838 
2839 	// If the command was successful, fill the fifo with our reply
2840 	// message packets
2841 	if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
2842 		dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2843 		return -2;
2844 	}
2845 	dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2846 
2847 	if(pHba->reply_pool != NULL) {
2848 		dma_free_coherent(&pHba->pDev->dev,
2849 			pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2850 			pHba->reply_pool, pHba->reply_pool_pa);
2851 	}
2852 
2853 	pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
2854 				pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2855 				&pHba->reply_pool_pa, GFP_KERNEL);
2856 	if (!pHba->reply_pool) {
2857 		printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
2858 		return -ENOMEM;
2859 	}
2860 	memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
2861 
2862 	for(i = 0; i < pHba->reply_fifo_size; i++) {
2863 		writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
2864 			pHba->reply_port);
2865 		wmb();
2866 	}
2867 	adpt_i2o_status_get(pHba);
2868 	return 0;
2869 }
2870 
2871 
2872 /*
2873  * I2O System Table.  Contains information about
2874  * all the IOPs in the system.  Used to inform IOPs
2875  * about each other's existence.
2876  *
2877  * sys_tbl_ver is the CurrentChangeIndicator that is
2878  * used by IOPs to track changes.
2879  */
2880 
2881 
2882 
adpt_i2o_status_get(adpt_hba * pHba)2883 static s32 adpt_i2o_status_get(adpt_hba* pHba)
2884 {
2885 	ulong timeout;
2886 	u32 m;
2887 	u32 __iomem *msg;
2888 	u8 *status_block=NULL;
2889 
2890 	if(pHba->status_block == NULL) {
2891 		pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
2892 					sizeof(i2o_status_block),
2893 					&pHba->status_block_pa, GFP_KERNEL);
2894 		if(pHba->status_block == NULL) {
2895 			printk(KERN_ERR
2896 			"dpti%d: Get Status Block failed; Out of memory. \n",
2897 			pHba->unit);
2898 			return -ENOMEM;
2899 		}
2900 	}
2901 	memset(pHba->status_block, 0, sizeof(i2o_status_block));
2902 	status_block = (u8*)(pHba->status_block);
2903 	timeout = jiffies+TMOUT_GETSTATUS*HZ;
2904 	do {
2905 		rmb();
2906 		m = readl(pHba->post_port);
2907 		if (m != EMPTY_QUEUE) {
2908 			break;
2909 		}
2910 		if(time_after(jiffies,timeout)){
2911 			printk(KERN_ERR "%s: Timeout waiting for message !\n",
2912 					pHba->name);
2913 			return -ETIMEDOUT;
2914 		}
2915 		schedule_timeout_uninterruptible(1);
2916 	} while(m==EMPTY_QUEUE);
2917 
2918 
2919 	msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2920 
2921 	writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
2922 	writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
2923 	writel(1, &msg[2]);
2924 	writel(0, &msg[3]);
2925 	writel(0, &msg[4]);
2926 	writel(0, &msg[5]);
2927 	writel( dma_low(pHba->status_block_pa), &msg[6]);
2928 	writel( dma_high(pHba->status_block_pa), &msg[7]);
2929 	writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
2930 
2931 	//post message
2932 	writel(m, pHba->post_port);
2933 	wmb();
2934 
2935 	while(status_block[87]!=0xff){
2936 		if(time_after(jiffies,timeout)){
2937 			printk(KERN_ERR"dpti%d: Get status timeout.\n",
2938 				pHba->unit);
2939 			return -ETIMEDOUT;
2940 		}
2941 		rmb();
2942 		schedule_timeout_uninterruptible(1);
2943 	}
2944 
2945 	// Set up our number of outbound and inbound messages
2946 	pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
2947 	if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
2948 		pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
2949 	}
2950 
2951 	pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
2952 	if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
2953 		pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
2954 	}
2955 
2956 	// Calculate the Scatter Gather list size
2957 	if (dpt_dma64(pHba)) {
2958 		pHba->sg_tablesize
2959 		  = ((pHba->status_block->inbound_frame_size * 4
2960 		  - 14 * sizeof(u32))
2961 		  / (sizeof(struct sg_simple_element) + sizeof(u32)));
2962 	} else {
2963 		pHba->sg_tablesize
2964 		  = ((pHba->status_block->inbound_frame_size * 4
2965 		  - 12 * sizeof(u32))
2966 		  / sizeof(struct sg_simple_element));
2967 	}
2968 	if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
2969 		pHba->sg_tablesize = SG_LIST_ELEMENTS;
2970 	}
2971 
2972 
2973 #ifdef DEBUG
2974 	printk("dpti%d: State = ",pHba->unit);
2975 	switch(pHba->status_block->iop_state) {
2976 		case 0x01:
2977 			printk("INIT\n");
2978 			break;
2979 		case 0x02:
2980 			printk("RESET\n");
2981 			break;
2982 		case 0x04:
2983 			printk("HOLD\n");
2984 			break;
2985 		case 0x05:
2986 			printk("READY\n");
2987 			break;
2988 		case 0x08:
2989 			printk("OPERATIONAL\n");
2990 			break;
2991 		case 0x10:
2992 			printk("FAILED\n");
2993 			break;
2994 		case 0x11:
2995 			printk("FAULTED\n");
2996 			break;
2997 		default:
2998 			printk("%x (unknown!!)\n",pHba->status_block->iop_state);
2999 	}
3000 #endif
3001 	return 0;
3002 }
3003 
3004 /*
3005  * Get the IOP's Logical Configuration Table
3006  */
adpt_i2o_lct_get(adpt_hba * pHba)3007 static int adpt_i2o_lct_get(adpt_hba* pHba)
3008 {
3009 	u32 msg[8];
3010 	int ret;
3011 	u32 buf[16];
3012 
3013 	if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
3014 		pHba->lct_size = pHba->status_block->expected_lct_size;
3015 	}
3016 	do {
3017 		if (pHba->lct == NULL) {
3018 			pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
3019 					pHba->lct_size, &pHba->lct_pa,
3020 					GFP_ATOMIC);
3021 			if(pHba->lct == NULL) {
3022 				printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
3023 					pHba->name);
3024 				return -ENOMEM;
3025 			}
3026 		}
3027 		memset(pHba->lct, 0, pHba->lct_size);
3028 
3029 		msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
3030 		msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
3031 		msg[2] = 0;
3032 		msg[3] = 0;
3033 		msg[4] = 0xFFFFFFFF;	/* All devices */
3034 		msg[5] = 0x00000000;	/* Report now */
3035 		msg[6] = 0xD0000000|pHba->lct_size;
3036 		msg[7] = (u32)pHba->lct_pa;
3037 
3038 		if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
3039 			printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
3040 				pHba->name, ret);
3041 			printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
3042 			return ret;
3043 		}
3044 
3045 		if ((pHba->lct->table_size << 2) > pHba->lct_size) {
3046 			pHba->lct_size = pHba->lct->table_size << 2;
3047 			dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
3048 					pHba->lct, pHba->lct_pa);
3049 			pHba->lct = NULL;
3050 		}
3051 	} while (pHba->lct == NULL);
3052 
3053 	PDEBUG("%s: Hardware resource table read.\n", pHba->name);
3054 
3055 
3056 	// I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
3057 	if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
3058 		pHba->FwDebugBufferSize = buf[1];
3059 		pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
3060 						pHba->FwDebugBufferSize);
3061 		if (pHba->FwDebugBuffer_P) {
3062 			pHba->FwDebugFlags_P     = pHba->FwDebugBuffer_P +
3063 							FW_DEBUG_FLAGS_OFFSET;
3064 			pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
3065 							FW_DEBUG_BLED_OFFSET;
3066 			pHba->FwDebugBLEDflag_P  = pHba->FwDebugBLEDvalue_P + 1;
3067 			pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
3068 						FW_DEBUG_STR_LENGTH_OFFSET;
3069 			pHba->FwDebugBuffer_P += buf[2];
3070 			pHba->FwDebugFlags = 0;
3071 		}
3072 	}
3073 
3074 	return 0;
3075 }
3076 
adpt_i2o_build_sys_table(void)3077 static int adpt_i2o_build_sys_table(void)
3078 {
3079 	adpt_hba* pHba = hba_chain;
3080 	int count = 0;
3081 
3082 	if (sys_tbl)
3083 		dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
3084 					sys_tbl, sys_tbl_pa);
3085 
3086 	sys_tbl_len = sizeof(struct i2o_sys_tbl) +	// Header + IOPs
3087 				(hba_count) * sizeof(struct i2o_sys_tbl_entry);
3088 
3089 	sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
3090 				sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
3091 	if (!sys_tbl) {
3092 		printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
3093 		return -ENOMEM;
3094 	}
3095 	memset(sys_tbl, 0, sys_tbl_len);
3096 
3097 	sys_tbl->num_entries = hba_count;
3098 	sys_tbl->version = I2OVERSION;
3099 	sys_tbl->change_ind = sys_tbl_ind++;
3100 
3101 	for(pHba = hba_chain; pHba; pHba = pHba->next) {
3102 		u64 addr;
3103 		// Get updated Status Block so we have the latest information
3104 		if (adpt_i2o_status_get(pHba)) {
3105 			sys_tbl->num_entries--;
3106 			continue; // try next one
3107 		}
3108 
3109 		sys_tbl->iops[count].org_id = pHba->status_block->org_id;
3110 		sys_tbl->iops[count].iop_id = pHba->unit + 2;
3111 		sys_tbl->iops[count].seg_num = 0;
3112 		sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
3113 		sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
3114 		sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
3115 		sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
3116 		sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
3117 		sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
3118 		addr = pHba->base_addr_phys + 0x40;
3119 		sys_tbl->iops[count].inbound_low = dma_low(addr);
3120 		sys_tbl->iops[count].inbound_high = dma_high(addr);
3121 
3122 		count++;
3123 	}
3124 
3125 #ifdef DEBUG
3126 {
3127 	u32 *table = (u32*)sys_tbl;
3128 	printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
3129 	for(count = 0; count < (sys_tbl_len >>2); count++) {
3130 		printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
3131 			count, table[count]);
3132 	}
3133 }
3134 #endif
3135 
3136 	return 0;
3137 }
3138 
3139 
3140 /*
3141  *	 Dump the information block associated with a given unit (TID)
3142  */
3143 
adpt_i2o_report_hba_unit(adpt_hba * pHba,struct i2o_device * d)3144 static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
3145 {
3146 	char buf[64];
3147 	int unit = d->lct_data.tid;
3148 
3149 	printk(KERN_INFO "TID %3.3d ", unit);
3150 
3151 	if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
3152 	{
3153 		buf[16]=0;
3154 		printk(" Vendor: %-12.12s", buf);
3155 	}
3156 	if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
3157 	{
3158 		buf[16]=0;
3159 		printk(" Device: %-12.12s", buf);
3160 	}
3161 	if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
3162 	{
3163 		buf[8]=0;
3164 		printk(" Rev: %-12.12s\n", buf);
3165 	}
3166 #ifdef DEBUG
3167 	 printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
3168 	 printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
3169 	 printk(KERN_INFO "\tFlags: ");
3170 
3171 	 if(d->lct_data.device_flags&(1<<0))
3172 		  printk("C");	     // ConfigDialog requested
3173 	 if(d->lct_data.device_flags&(1<<1))
3174 		  printk("U");	     // Multi-user capable
3175 	 if(!(d->lct_data.device_flags&(1<<4)))
3176 		  printk("P");	     // Peer service enabled!
3177 	 if(!(d->lct_data.device_flags&(1<<5)))
3178 		  printk("M");	     // Mgmt service enabled!
3179 	 printk("\n");
3180 #endif
3181 }
3182 
3183 #ifdef DEBUG
3184 /*
3185  *	Do i2o class name lookup
3186  */
adpt_i2o_get_class_name(int class)3187 static const char *adpt_i2o_get_class_name(int class)
3188 {
3189 	int idx = 16;
3190 	static char *i2o_class_name[] = {
3191 		"Executive",
3192 		"Device Driver Module",
3193 		"Block Device",
3194 		"Tape Device",
3195 		"LAN Interface",
3196 		"WAN Interface",
3197 		"Fibre Channel Port",
3198 		"Fibre Channel Device",
3199 		"SCSI Device",
3200 		"ATE Port",
3201 		"ATE Device",
3202 		"Floppy Controller",
3203 		"Floppy Device",
3204 		"Secondary Bus Port",
3205 		"Peer Transport Agent",
3206 		"Peer Transport",
3207 		"Unknown"
3208 	};
3209 
3210 	switch(class&0xFFF) {
3211 	case I2O_CLASS_EXECUTIVE:
3212 		idx = 0; break;
3213 	case I2O_CLASS_DDM:
3214 		idx = 1; break;
3215 	case I2O_CLASS_RANDOM_BLOCK_STORAGE:
3216 		idx = 2; break;
3217 	case I2O_CLASS_SEQUENTIAL_STORAGE:
3218 		idx = 3; break;
3219 	case I2O_CLASS_LAN:
3220 		idx = 4; break;
3221 	case I2O_CLASS_WAN:
3222 		idx = 5; break;
3223 	case I2O_CLASS_FIBRE_CHANNEL_PORT:
3224 		idx = 6; break;
3225 	case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
3226 		idx = 7; break;
3227 	case I2O_CLASS_SCSI_PERIPHERAL:
3228 		idx = 8; break;
3229 	case I2O_CLASS_ATE_PORT:
3230 		idx = 9; break;
3231 	case I2O_CLASS_ATE_PERIPHERAL:
3232 		idx = 10; break;
3233 	case I2O_CLASS_FLOPPY_CONTROLLER:
3234 		idx = 11; break;
3235 	case I2O_CLASS_FLOPPY_DEVICE:
3236 		idx = 12; break;
3237 	case I2O_CLASS_BUS_ADAPTER_PORT:
3238 		idx = 13; break;
3239 	case I2O_CLASS_PEER_TRANSPORT_AGENT:
3240 		idx = 14; break;
3241 	case I2O_CLASS_PEER_TRANSPORT:
3242 		idx = 15; break;
3243 	}
3244 	return i2o_class_name[idx];
3245 }
3246 #endif
3247 
3248 
adpt_i2o_hrt_get(adpt_hba * pHba)3249 static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3250 {
3251 	u32 msg[6];
3252 	int ret, size = sizeof(i2o_hrt);
3253 
3254 	do {
3255 		if (pHba->hrt == NULL) {
3256 			pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
3257 					size, &pHba->hrt_pa, GFP_KERNEL);
3258 			if (pHba->hrt == NULL) {
3259 				printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3260 				return -ENOMEM;
3261 			}
3262 		}
3263 
3264 		msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3265 		msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3266 		msg[2]= 0;
3267 		msg[3]= 0;
3268 		msg[4]= (0xD0000000 | size);    /* Simple transaction */
3269 		msg[5]= (u32)pHba->hrt_pa;	/* Dump it here */
3270 
3271 		if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3272 			printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3273 			return ret;
3274 		}
3275 
3276 		if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
3277 			int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3278 			dma_free_coherent(&pHba->pDev->dev, size,
3279 				pHba->hrt, pHba->hrt_pa);
3280 			size = newsize;
3281 			pHba->hrt = NULL;
3282 		}
3283 	} while(pHba->hrt == NULL);
3284 	return 0;
3285 }
3286 
3287 /*
3288  *	 Query one scalar group value or a whole scalar group.
3289  */
adpt_i2o_query_scalar(adpt_hba * pHba,int tid,int group,int field,void * buf,int buflen)3290 static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3291 			int group, int field, void *buf, int buflen)
3292 {
3293 	u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
3294 	u8 *opblk_va;
3295 	dma_addr_t opblk_pa;
3296 	u8 *resblk_va;
3297 	dma_addr_t resblk_pa;
3298 
3299 	int size;
3300 
3301 	/* 8 bytes for header */
3302 	resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3303 			sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
3304 	if (resblk_va == NULL) {
3305 		printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3306 		return -ENOMEM;
3307 	}
3308 
3309 	opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3310 			sizeof(opblk), &opblk_pa, GFP_KERNEL);
3311 	if (opblk_va == NULL) {
3312 		dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3313 			resblk_va, resblk_pa);
3314 		printk(KERN_CRIT "%s: query operation failed; Out of memory.\n",
3315 			pHba->name);
3316 		return -ENOMEM;
3317 	}
3318 	if (field == -1)  		/* whole group */
3319 			opblk[4] = -1;
3320 
3321 	memcpy(opblk_va, opblk, sizeof(opblk));
3322 	size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
3323 		opblk_va, opblk_pa, sizeof(opblk),
3324 		resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
3325 	dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
3326 	if (size == -ETIME) {
3327 		dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3328 							resblk_va, resblk_pa);
3329 		printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3330 		return -ETIME;
3331 	} else if (size == -EINTR) {
3332 		dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3333 							resblk_va, resblk_pa);
3334 		printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3335 		return -EINTR;
3336 	}
3337 
3338 	memcpy(buf, resblk_va+8, buflen);  /* cut off header */
3339 
3340 	dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3341 						resblk_va, resblk_pa);
3342 	if (size < 0)
3343 		return size;
3344 
3345 	return buflen;
3346 }
3347 
3348 
3349 /*	Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
3350  *
3351  *	This function can be used for all UtilParamsGet/Set operations.
3352  *	The OperationBlock is given in opblk-buffer,
3353  *	and results are returned in resblk-buffer.
3354  *	Note that the minimum sized resblk is 8 bytes and contains
3355  *	ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
3356  */
adpt_i2o_issue_params(int cmd,adpt_hba * pHba,int tid,void * opblk_va,dma_addr_t opblk_pa,int oplen,void * resblk_va,dma_addr_t resblk_pa,int reslen)3357 static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
3358 		  void *opblk_va,  dma_addr_t opblk_pa, int oplen,
3359 		void *resblk_va, dma_addr_t resblk_pa, int reslen)
3360 {
3361 	u32 msg[9];
3362 	u32 *res = (u32 *)resblk_va;
3363 	int wait_status;
3364 
3365 	msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3366 	msg[1] = cmd << 24 | HOST_TID << 12 | tid;
3367 	msg[2] = 0;
3368 	msg[3] = 0;
3369 	msg[4] = 0;
3370 	msg[5] = 0x54000000 | oplen;	/* OperationBlock */
3371 	msg[6] = (u32)opblk_pa;
3372 	msg[7] = 0xD0000000 | reslen;	/* ResultBlock */
3373 	msg[8] = (u32)resblk_pa;
3374 
3375 	if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
3376 		printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
3377    		return wait_status; 	/* -DetailedStatus */
3378 	}
3379 
3380 	if (res[1]&0x00FF0000) { 	/* BlockStatus != SUCCESS */
3381 		printk(KERN_WARNING "%s: %s - Error:\n  ErrorInfoSize = 0x%02x, "
3382 			"BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3383 			pHba->name,
3384 			(cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3385 							 : "PARAMS_GET",
3386 			res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3387 		return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
3388 	}
3389 
3390 	return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
3391 }
3392 
3393 
adpt_i2o_quiesce_hba(adpt_hba * pHba)3394 static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3395 {
3396 	u32 msg[4];
3397 	int ret;
3398 
3399 	adpt_i2o_status_get(pHba);
3400 
3401 	/* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
3402 
3403 	if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3404    	   (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3405 		return 0;
3406 	}
3407 
3408 	msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3409 	msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3410 	msg[2] = 0;
3411 	msg[3] = 0;
3412 
3413 	if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3414 		printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3415 				pHba->unit, -ret);
3416 	} else {
3417 		printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3418 	}
3419 
3420 	adpt_i2o_status_get(pHba);
3421 	return ret;
3422 }
3423 
3424 
3425 /*
3426  * Enable IOP. Allows the IOP to resume external operations.
3427  */
adpt_i2o_enable_hba(adpt_hba * pHba)3428 static int adpt_i2o_enable_hba(adpt_hba* pHba)
3429 {
3430 	u32 msg[4];
3431 	int ret;
3432 
3433 	adpt_i2o_status_get(pHba);
3434 	if(!pHba->status_block){
3435 		return -ENOMEM;
3436 	}
3437 	/* Enable only allowed on READY state */
3438 	if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3439 		return 0;
3440 
3441 	if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3442 		return -EINVAL;
3443 
3444 	msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3445 	msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3446 	msg[2]= 0;
3447 	msg[3]= 0;
3448 
3449 	if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3450 		printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
3451 			pHba->name, ret);
3452 	} else {
3453 		PDEBUG("%s: Enabled.\n", pHba->name);
3454 	}
3455 
3456 	adpt_i2o_status_get(pHba);
3457 	return ret;
3458 }
3459 
3460 
adpt_i2o_systab_send(adpt_hba * pHba)3461 static int adpt_i2o_systab_send(adpt_hba* pHba)
3462 {
3463 	u32 msg[12];
3464 	int ret;
3465 
3466 	msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3467 	msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3468 	msg[2] = 0;
3469 	msg[3] = 0;
3470 	msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
3471 	msg[5] = 0;				   /* Segment 0 */
3472 
3473 	/*
3474 	 * Provide three SGL-elements:
3475 	 * System table (SysTab), Private memory space declaration and
3476 	 * Private i/o space declaration
3477 	 */
3478 	msg[6] = 0x54000000 | sys_tbl_len;
3479 	msg[7] = (u32)sys_tbl_pa;
3480 	msg[8] = 0x54000000 | 0;
3481 	msg[9] = 0;
3482 	msg[10] = 0xD4000000 | 0;
3483 	msg[11] = 0;
3484 
3485 	if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3486 		printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
3487 			pHba->name, ret);
3488 	}
3489 #ifdef DEBUG
3490 	else {
3491 		PINFO("%s: SysTab set.\n", pHba->name);
3492 	}
3493 #endif
3494 
3495 	return ret;
3496 }
3497 
3498 
3499 /*============================================================================
3500  *
3501  *============================================================================
3502  */
3503 
3504 
3505 #ifdef UARTDELAY
3506 
adpt_delay(int millisec)3507 static static void adpt_delay(int millisec)
3508 {
3509 	int i;
3510 	for (i = 0; i < millisec; i++) {
3511 		udelay(1000);	/* delay for one millisecond */
3512 	}
3513 }
3514 
3515 #endif
3516 
3517 static struct scsi_host_template driver_template = {
3518 	.module			= THIS_MODULE,
3519 	.name			= "dpt_i2o",
3520 	.proc_name		= "dpt_i2o",
3521 	.show_info		= adpt_show_info,
3522 	.info			= adpt_info,
3523 	.queuecommand		= adpt_queue,
3524 	.eh_abort_handler	= adpt_abort,
3525 	.eh_device_reset_handler = adpt_device_reset,
3526 	.eh_bus_reset_handler	= adpt_bus_reset,
3527 	.eh_host_reset_handler	= adpt_reset,
3528 	.bios_param		= adpt_bios_param,
3529 	.slave_configure	= adpt_slave_configure,
3530 	.can_queue		= MAX_TO_IOP_MESSAGES,
3531 	.this_id		= 7,
3532 };
3533 
adpt_init(void)3534 static int __init adpt_init(void)
3535 {
3536 	int		error;
3537 	adpt_hba	*pHba, *next;
3538 
3539 	printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
3540 
3541 	error = adpt_detect(&driver_template);
3542 	if (error < 0)
3543 		return error;
3544 	if (hba_chain == NULL)
3545 		return -ENODEV;
3546 
3547 	for (pHba = hba_chain; pHba; pHba = pHba->next) {
3548 		error = scsi_add_host(pHba->host, &pHba->pDev->dev);
3549 		if (error)
3550 			goto fail;
3551 		scsi_scan_host(pHba->host);
3552 	}
3553 	return 0;
3554 fail:
3555 	for (pHba = hba_chain; pHba; pHba = next) {
3556 		next = pHba->next;
3557 		scsi_remove_host(pHba->host);
3558 	}
3559 	return error;
3560 }
3561 
adpt_exit(void)3562 static void __exit adpt_exit(void)
3563 {
3564 	adpt_hba	*pHba, *next;
3565 
3566 	for (pHba = hba_chain; pHba; pHba = next) {
3567 		next = pHba->next;
3568 		adpt_release(pHba);
3569 	}
3570 }
3571 
3572 module_init(adpt_init);
3573 module_exit(adpt_exit);
3574 
3575 MODULE_LICENSE("GPL");
3576