• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /***************************************************************************
2                           dpti.c  -  description
3                              -------------------
4     begin                : Thu Sep 7 2000
5     copyright            : (C) 2000 by Adaptec
6 
7 			   July 30, 2001 First version being submitted
8 			   for inclusion in the kernel.  V2.4
9 
10     See Documentation/scsi/dpti.txt for history, notes, license info
11     and credits
12  ***************************************************************************/
13 
14 /***************************************************************************
15  *                                                                         *
16  *   This program is free software; you can redistribute it and/or modify  *
17  *   it under the terms of the GNU General Public License as published by  *
18  *   the Free Software Foundation; either version 2 of the License, or     *
19  *   (at your option) any later version.                                   *
20  *                                                                         *
21  ***************************************************************************/
22 /***************************************************************************
23  * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
24  - Support 2.6 kernel and DMA-mapping
25  - ioctl fix for raid tools
26  - use schedule_timeout in long long loop
27  **************************************************************************/
28 
29 /*#define DEBUG 1 */
30 /*#define UARTDELAY 1 */
31 
32 #include <linux/module.h>
33 
34 MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
35 MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
36 
37 ////////////////////////////////////////////////////////////////
38 
39 #include <linux/ioctl.h>	/* For SCSI-Passthrough */
40 #include <linux/uaccess.h>
41 
42 #include <linux/stat.h>
43 #include <linux/slab.h>		/* for kmalloc() */
44 #include <linux/pci.h>		/* for PCI support */
45 #include <linux/proc_fs.h>
46 #include <linux/blkdev.h>
47 #include <linux/delay.h>	/* for udelay */
48 #include <linux/interrupt.h>
49 #include <linux/kernel.h>	/* for printk */
50 #include <linux/sched.h>
51 #include <linux/reboot.h>
52 #include <linux/spinlock.h>
53 #include <linux/dma-mapping.h>
54 
55 #include <linux/timer.h>
56 #include <linux/string.h>
57 #include <linux/ioport.h>
58 #include <linux/mutex.h>
59 
60 #include <asm/processor.h>	/* for boot_cpu_data */
61 #include <asm/pgtable.h>
62 #include <asm/io.h>		/* for virt_to_bus, etc. */
63 
64 #include <scsi/scsi.h>
65 #include <scsi/scsi_cmnd.h>
66 #include <scsi/scsi_device.h>
67 #include <scsi/scsi_host.h>
68 #include <scsi/scsi_tcq.h>
69 
70 #include "dpt/dptsig.h"
71 #include "dpti.h"
72 
73 /*============================================================================
74  * Create a binary signature - this is read by dptsig
75  * Needed for our management apps
76  *============================================================================
77  */
78 static DEFINE_MUTEX(adpt_mutex);
79 static dpt_sig_S DPTI_sig = {
80 	{'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
81 #ifdef __i386__
82 	PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
83 #elif defined(__ia64__)
84 	PROC_INTEL, PROC_IA64,
85 #elif defined(__sparc__)
86 	PROC_ULTRASPARC, PROC_ULTRASPARC,
87 #elif defined(__alpha__)
88 	PROC_ALPHA, PROC_ALPHA,
89 #else
90 	(-1),(-1),
91 #endif
92 	 FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
93 	ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
94 	DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
95 };
96 
97 
98 
99 
100 /*============================================================================
101  * Globals
102  *============================================================================
103  */
104 
105 static DEFINE_MUTEX(adpt_configuration_lock);
106 
107 static struct i2o_sys_tbl *sys_tbl;
108 static dma_addr_t sys_tbl_pa;
109 static int sys_tbl_ind;
110 static int sys_tbl_len;
111 
112 static adpt_hba* hba_chain = NULL;
113 static int hba_count = 0;
114 
115 static struct class *adpt_sysfs_class;
116 
117 static long adpt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
118 #ifdef CONFIG_COMPAT
119 static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
120 #endif
121 
122 static const struct file_operations adpt_fops = {
123 	.unlocked_ioctl	= adpt_unlocked_ioctl,
124 	.open		= adpt_open,
125 	.release	= adpt_close,
126 #ifdef CONFIG_COMPAT
127 	.compat_ioctl	= compat_adpt_ioctl,
128 #endif
129 	.llseek		= noop_llseek,
130 };
131 
132 /* Structures and definitions for synchronous message posting.
133  * See adpt_i2o_post_wait() for description
134  * */
135 struct adpt_i2o_post_wait_data
136 {
137 	int status;
138 	u32 id;
139 	adpt_wait_queue_head_t *wq;
140 	struct adpt_i2o_post_wait_data *next;
141 };
142 
143 static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
144 static u32 adpt_post_wait_id = 0;
145 static DEFINE_SPINLOCK(adpt_post_wait_lock);
146 
147 
148 /*============================================================================
149  * 				Functions
150  *============================================================================
151  */
152 
dpt_dma64(adpt_hba * pHba)153 static inline int dpt_dma64(adpt_hba *pHba)
154 {
155 	return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
156 }
157 
dma_high(dma_addr_t addr)158 static inline u32 dma_high(dma_addr_t addr)
159 {
160 	return upper_32_bits(addr);
161 }
162 
dma_low(dma_addr_t addr)163 static inline u32 dma_low(dma_addr_t addr)
164 {
165 	return (u32)addr;
166 }
167 
adpt_read_blink_led(adpt_hba * host)168 static u8 adpt_read_blink_led(adpt_hba* host)
169 {
170 	if (host->FwDebugBLEDflag_P) {
171 		if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
172 			return readb(host->FwDebugBLEDvalue_P);
173 		}
174 	}
175 	return 0;
176 }
177 
178 /*============================================================================
179  * Scsi host template interface functions
180  *============================================================================
181  */
182 
183 #ifdef MODULE
184 static struct pci_device_id dptids[] = {
185 	{ PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
186 	{ PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
187 	{ 0, }
188 };
189 #endif
190 
191 MODULE_DEVICE_TABLE(pci,dptids);
192 
adpt_detect(struct scsi_host_template * sht)193 static int adpt_detect(struct scsi_host_template* sht)
194 {
195 	struct pci_dev *pDev = NULL;
196 	adpt_hba *pHba;
197 	adpt_hba *next;
198 
199 	PINFO("Detecting Adaptec I2O RAID controllers...\n");
200 
201         /* search for all Adatpec I2O RAID cards */
202 	while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
203 		if(pDev->device == PCI_DPT_DEVICE_ID ||
204 		   pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
205 			if(adpt_install_hba(sht, pDev) ){
206 				PERROR("Could not Init an I2O RAID device\n");
207 				PERROR("Will not try to detect others.\n");
208 				return hba_count-1;
209 			}
210 			pci_dev_get(pDev);
211 		}
212 	}
213 
214 	/* In INIT state, Activate IOPs */
215 	for (pHba = hba_chain; pHba; pHba = next) {
216 		next = pHba->next;
217 		// Activate does get status , init outbound, and get hrt
218 		if (adpt_i2o_activate_hba(pHba) < 0) {
219 			adpt_i2o_delete_hba(pHba);
220 		}
221 	}
222 
223 
224 	/* Active IOPs in HOLD state */
225 
226 rebuild_sys_tab:
227 	if (hba_chain == NULL)
228 		return 0;
229 
230 	/*
231 	 * If build_sys_table fails, we kill everything and bail
232 	 * as we can't init the IOPs w/o a system table
233 	 */
234 	if (adpt_i2o_build_sys_table() < 0) {
235 		adpt_i2o_sys_shutdown();
236 		return 0;
237 	}
238 
239 	PDEBUG("HBA's in HOLD state\n");
240 
241 	/* If IOP don't get online, we need to rebuild the System table */
242 	for (pHba = hba_chain; pHba; pHba = pHba->next) {
243 		if (adpt_i2o_online_hba(pHba) < 0) {
244 			adpt_i2o_delete_hba(pHba);
245 			goto rebuild_sys_tab;
246 		}
247 	}
248 
249 	/* Active IOPs now in OPERATIONAL state */
250 	PDEBUG("HBA's in OPERATIONAL state\n");
251 
252 	printk("dpti: If you have a lot of devices this could take a few minutes.\n");
253 	for (pHba = hba_chain; pHba; pHba = next) {
254 		next = pHba->next;
255 		printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
256 		if (adpt_i2o_lct_get(pHba) < 0){
257 			adpt_i2o_delete_hba(pHba);
258 			continue;
259 		}
260 
261 		if (adpt_i2o_parse_lct(pHba) < 0){
262 			adpt_i2o_delete_hba(pHba);
263 			continue;
264 		}
265 		adpt_inquiry(pHba);
266 	}
267 
268 	adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
269 	if (IS_ERR(adpt_sysfs_class)) {
270 		printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
271 		adpt_sysfs_class = NULL;
272 	}
273 
274 	for (pHba = hba_chain; pHba; pHba = next) {
275 		next = pHba->next;
276 		if (adpt_scsi_host_alloc(pHba, sht) < 0){
277 			adpt_i2o_delete_hba(pHba);
278 			continue;
279 		}
280 		pHba->initialized = TRUE;
281 		pHba->state &= ~DPTI_STATE_RESET;
282 		if (adpt_sysfs_class) {
283 			struct device *dev = device_create(adpt_sysfs_class,
284 				NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
285 				"dpti%d", pHba->unit);
286 			if (IS_ERR(dev)) {
287 				printk(KERN_WARNING"dpti%d: unable to "
288 					"create device in dpt_i2o class\n",
289 					pHba->unit);
290 			}
291 		}
292 	}
293 
294 	// Register our control device node
295 	// nodes will need to be created in /dev to access this
296 	// the nodes can not be created from within the driver
297 	if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
298 		adpt_i2o_sys_shutdown();
299 		return 0;
300 	}
301 	return hba_count;
302 }
303 
304 
305 /*
306  * scsi_unregister will be called AFTER we return.
307  */
adpt_release(struct Scsi_Host * host)308 static int adpt_release(struct Scsi_Host *host)
309 {
310 	adpt_hba* pHba = (adpt_hba*) host->hostdata[0];
311 //	adpt_i2o_quiesce_hba(pHba);
312 	adpt_i2o_delete_hba(pHba);
313 	scsi_unregister(host);
314 	return 0;
315 }
316 
317 
adpt_inquiry(adpt_hba * pHba)318 static void adpt_inquiry(adpt_hba* pHba)
319 {
320 	u32 msg[17];
321 	u32 *mptr;
322 	u32 *lenptr;
323 	int direction;
324 	int scsidir;
325 	u32 len;
326 	u32 reqlen;
327 	u8* buf;
328 	dma_addr_t addr;
329 	u8  scb[16];
330 	s32 rcode;
331 
332 	memset(msg, 0, sizeof(msg));
333 	buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
334 	if(!buf){
335 		printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
336 		return;
337 	}
338 	memset((void*)buf, 0, 36);
339 
340 	len = 36;
341 	direction = 0x00000000;
342 	scsidir  =0x40000000;	// DATA IN  (iop<--dev)
343 
344 	if (dpt_dma64(pHba))
345 		reqlen = 17;		// SINGLE SGE, 64 bit
346 	else
347 		reqlen = 14;		// SINGLE SGE, 32 bit
348 	/* Stick the headers on */
349 	msg[0] = reqlen<<16 | SGL_OFFSET_12;
350 	msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
351 	msg[2] = 0;
352 	msg[3]  = 0;
353 	// Adaptec/DPT Private stuff
354 	msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
355 	msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
356 	/* Direction, disconnect ok | sense data | simple queue , CDBLen */
357 	// I2O_SCB_FLAG_ENABLE_DISCONNECT |
358 	// I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
359 	// I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
360 	msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
361 
362 	mptr=msg+7;
363 
364 	memset(scb, 0, sizeof(scb));
365 	// Write SCSI command into the message - always 16 byte block
366 	scb[0] = INQUIRY;
367 	scb[1] = 0;
368 	scb[2] = 0;
369 	scb[3] = 0;
370 	scb[4] = 36;
371 	scb[5] = 0;
372 	// Don't care about the rest of scb
373 
374 	memcpy(mptr, scb, sizeof(scb));
375 	mptr+=4;
376 	lenptr=mptr++;		/* Remember me - fill in when we know */
377 
378 	/* Now fill in the SGList and command */
379 	*lenptr = len;
380 	if (dpt_dma64(pHba)) {
381 		*mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
382 		*mptr++ = 1 << PAGE_SHIFT;
383 		*mptr++ = 0xD0000000|direction|len;
384 		*mptr++ = dma_low(addr);
385 		*mptr++ = dma_high(addr);
386 	} else {
387 		*mptr++ = 0xD0000000|direction|len;
388 		*mptr++ = addr;
389 	}
390 
391 	// Send it on it's way
392 	rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
393 	if (rcode != 0) {
394 		sprintf(pHba->detail, "Adaptec I2O RAID");
395 		printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
396 		if (rcode != -ETIME && rcode != -EINTR)
397 			dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
398 	} else {
399 		memset(pHba->detail, 0, sizeof(pHba->detail));
400 		memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
401 		memcpy(&(pHba->detail[16]), " Model: ", 8);
402 		memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
403 		memcpy(&(pHba->detail[40]), " FW: ", 4);
404 		memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
405 		pHba->detail[48] = '\0';	/* precautionary */
406 		dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
407 	}
408 	adpt_i2o_status_get(pHba);
409 	return ;
410 }
411 
412 
adpt_slave_configure(struct scsi_device * device)413 static int adpt_slave_configure(struct scsi_device * device)
414 {
415 	struct Scsi_Host *host = device->host;
416 	adpt_hba* pHba;
417 
418 	pHba = (adpt_hba *) host->hostdata[0];
419 
420 	if (host->can_queue && device->tagged_supported) {
421 		scsi_change_queue_depth(device,
422 				host->can_queue - 1);
423 	}
424 	return 0;
425 }
426 
adpt_queue_lck(struct scsi_cmnd * cmd,void (* done)(struct scsi_cmnd *))427 static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
428 {
429 	adpt_hba* pHba = NULL;
430 	struct adpt_device* pDev = NULL;	/* dpt per device information */
431 
432 	cmd->scsi_done = done;
433 	/*
434 	 * SCSI REQUEST_SENSE commands will be executed automatically by the
435 	 * Host Adapter for any errors, so they should not be executed
436 	 * explicitly unless the Sense Data is zero indicating that no error
437 	 * occurred.
438 	 */
439 
440 	if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
441 		cmd->result = (DID_OK << 16);
442 		cmd->scsi_done(cmd);
443 		return 0;
444 	}
445 
446 	pHba = (adpt_hba*)cmd->device->host->hostdata[0];
447 	if (!pHba) {
448 		return FAILED;
449 	}
450 
451 	rmb();
452 	if ((pHba->state) & DPTI_STATE_RESET)
453 		return SCSI_MLQUEUE_HOST_BUSY;
454 
455 	// TODO if the cmd->device if offline then I may need to issue a bus rescan
456 	// followed by a get_lct to see if the device is there anymore
457 	if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
458 		/*
459 		 * First command request for this device.  Set up a pointer
460 		 * to the device structure.  This should be a TEST_UNIT_READY
461 		 * command from scan_scsis_single.
462 		 */
463 		if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun)) == NULL) {
464 			// TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
465 			// with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
466 			cmd->result = (DID_NO_CONNECT << 16);
467 			cmd->scsi_done(cmd);
468 			return 0;
469 		}
470 		cmd->device->hostdata = pDev;
471 	}
472 	pDev->pScsi_dev = cmd->device;
473 
474 	/*
475 	 * If we are being called from when the device is being reset,
476 	 * delay processing of the command until later.
477 	 */
478 	if (pDev->state & DPTI_DEV_RESET ) {
479 		return FAILED;
480 	}
481 	return adpt_scsi_to_i2o(pHba, cmd, pDev);
482 }
483 
DEF_SCSI_QCMD(adpt_queue)484 static DEF_SCSI_QCMD(adpt_queue)
485 
486 static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
487 		sector_t capacity, int geom[])
488 {
489 	int heads=-1;
490 	int sectors=-1;
491 	int cylinders=-1;
492 
493 	// *** First lets set the default geometry ****
494 
495 	// If the capacity is less than ox2000
496 	if (capacity < 0x2000 ) {	// floppy
497 		heads = 18;
498 		sectors = 2;
499 	}
500 	// else if between 0x2000 and 0x20000
501 	else if (capacity < 0x20000) {
502 		heads = 64;
503 		sectors = 32;
504 	}
505 	// else if between 0x20000 and 0x40000
506 	else if (capacity < 0x40000) {
507 		heads = 65;
508 		sectors = 63;
509 	}
510 	// else if between 0x4000 and 0x80000
511 	else if (capacity < 0x80000) {
512 		heads = 128;
513 		sectors = 63;
514 	}
515 	// else if greater than 0x80000
516 	else {
517 		heads = 255;
518 		sectors = 63;
519 	}
520 	cylinders = sector_div(capacity, heads * sectors);
521 
522 	// Special case if CDROM
523 	if(sdev->type == 5) {  // CDROM
524 		heads = 252;
525 		sectors = 63;
526 		cylinders = 1111;
527 	}
528 
529 	geom[0] = heads;
530 	geom[1] = sectors;
531 	geom[2] = cylinders;
532 
533 	PDEBUG("adpt_bios_param: exit\n");
534 	return 0;
535 }
536 
537 
adpt_info(struct Scsi_Host * host)538 static const char *adpt_info(struct Scsi_Host *host)
539 {
540 	adpt_hba* pHba;
541 
542 	pHba = (adpt_hba *) host->hostdata[0];
543 	return (char *) (pHba->detail);
544 }
545 
adpt_show_info(struct seq_file * m,struct Scsi_Host * host)546 static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host)
547 {
548 	struct adpt_device* d;
549 	int id;
550 	int chan;
551 	adpt_hba* pHba;
552 	int unit;
553 
554 	// Find HBA (host bus adapter) we are looking for
555 	mutex_lock(&adpt_configuration_lock);
556 	for (pHba = hba_chain; pHba; pHba = pHba->next) {
557 		if (pHba->host == host) {
558 			break;	/* found adapter */
559 		}
560 	}
561 	mutex_unlock(&adpt_configuration_lock);
562 	if (pHba == NULL) {
563 		return 0;
564 	}
565 	host = pHba->host;
566 
567 	seq_printf(m, "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
568 	seq_printf(m, "%s\n", pHba->detail);
569 	seq_printf(m, "SCSI Host=scsi%d  Control Node=/dev/%s  irq=%d\n",
570 			pHba->host->host_no, pHba->name, host->irq);
571 	seq_printf(m, "\tpost fifo size  = %d\n\treply fifo size = %d\n\tsg table size   = %d\n\n",
572 			host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
573 
574 	seq_puts(m, "Devices:\n");
575 	for(chan = 0; chan < MAX_CHANNEL; chan++) {
576 		for(id = 0; id < MAX_ID; id++) {
577 			d = pHba->channel[chan].device[id];
578 			while(d) {
579 				seq_printf(m,"\t%-24.24s", d->pScsi_dev->vendor);
580 				seq_printf(m," Rev: %-8.8s\n", d->pScsi_dev->rev);
581 
582 				unit = d->pI2o_dev->lct_data.tid;
583 				seq_printf(m, "\tTID=%d, (Channel=%d, Target=%d, Lun=%llu)  (%s)\n\n",
584 					       unit, (int)d->scsi_channel, (int)d->scsi_id, d->scsi_lun,
585 					       scsi_device_online(d->pScsi_dev)? "online":"offline");
586 				d = d->next_lun;
587 			}
588 		}
589 	}
590 	return 0;
591 }
592 
593 /*
594  *	Turn a struct scsi_cmnd * into a unique 32 bit 'context'.
595  */
adpt_cmd_to_context(struct scsi_cmnd * cmd)596 static u32 adpt_cmd_to_context(struct scsi_cmnd *cmd)
597 {
598 	return (u32)cmd->serial_number;
599 }
600 
601 /*
602  *	Go from a u32 'context' to a struct scsi_cmnd * .
603  *	This could probably be made more efficient.
604  */
605 static struct scsi_cmnd *
adpt_cmd_from_context(adpt_hba * pHba,u32 context)606 	adpt_cmd_from_context(adpt_hba * pHba, u32 context)
607 {
608 	struct scsi_cmnd * cmd;
609 	struct scsi_device * d;
610 
611 	if (context == 0)
612 		return NULL;
613 
614 	spin_unlock(pHba->host->host_lock);
615 	shost_for_each_device(d, pHba->host) {
616 		unsigned long flags;
617 		spin_lock_irqsave(&d->list_lock, flags);
618 		list_for_each_entry(cmd, &d->cmd_list, list) {
619 			if (((u32)cmd->serial_number == context)) {
620 				spin_unlock_irqrestore(&d->list_lock, flags);
621 				scsi_device_put(d);
622 				spin_lock(pHba->host->host_lock);
623 				return cmd;
624 			}
625 		}
626 		spin_unlock_irqrestore(&d->list_lock, flags);
627 	}
628 	spin_lock(pHba->host->host_lock);
629 
630 	return NULL;
631 }
632 
633 /*
634  *	Turn a pointer to ioctl reply data into an u32 'context'
635  */
adpt_ioctl_to_context(adpt_hba * pHba,void * reply)636 static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
637 {
638 #if BITS_PER_LONG == 32
639 	return (u32)(unsigned long)reply;
640 #else
641 	ulong flags = 0;
642 	u32 nr, i;
643 
644 	spin_lock_irqsave(pHba->host->host_lock, flags);
645 	nr = ARRAY_SIZE(pHba->ioctl_reply_context);
646 	for (i = 0; i < nr; i++) {
647 		if (pHba->ioctl_reply_context[i] == NULL) {
648 			pHba->ioctl_reply_context[i] = reply;
649 			break;
650 		}
651 	}
652 	spin_unlock_irqrestore(pHba->host->host_lock, flags);
653 	if (i >= nr) {
654 		printk(KERN_WARNING"%s: Too many outstanding "
655 				"ioctl commands\n", pHba->name);
656 		return (u32)-1;
657 	}
658 
659 	return i;
660 #endif
661 }
662 
663 /*
664  *	Go from an u32 'context' to a pointer to ioctl reply data.
665  */
adpt_ioctl_from_context(adpt_hba * pHba,u32 context)666 static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
667 {
668 #if BITS_PER_LONG == 32
669 	return (void *)(unsigned long)context;
670 #else
671 	void *p = pHba->ioctl_reply_context[context];
672 	pHba->ioctl_reply_context[context] = NULL;
673 
674 	return p;
675 #endif
676 }
677 
678 /*===========================================================================
679  * Error Handling routines
680  *===========================================================================
681  */
682 
adpt_abort(struct scsi_cmnd * cmd)683 static int adpt_abort(struct scsi_cmnd * cmd)
684 {
685 	adpt_hba* pHba = NULL;	/* host bus adapter structure */
686 	struct adpt_device* dptdevice;	/* dpt per device information */
687 	u32 msg[5];
688 	int rcode;
689 
690 	if(cmd->serial_number == 0){
691 		return FAILED;
692 	}
693 	pHba = (adpt_hba*) cmd->device->host->hostdata[0];
694 	printk(KERN_INFO"%s: Trying to Abort\n",pHba->name);
695 	if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
696 		printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
697 		return FAILED;
698 	}
699 
700 	memset(msg, 0, sizeof(msg));
701 	msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
702 	msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
703 	msg[2] = 0;
704 	msg[3]= 0;
705 	msg[4] = adpt_cmd_to_context(cmd);
706 	if (pHba->host)
707 		spin_lock_irq(pHba->host->host_lock);
708 	rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
709 	if (pHba->host)
710 		spin_unlock_irq(pHba->host->host_lock);
711 	if (rcode != 0) {
712 		if(rcode == -EOPNOTSUPP ){
713 			printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
714 			return FAILED;
715 		}
716 		printk(KERN_INFO"%s: Abort failed.\n",pHba->name);
717 		return FAILED;
718 	}
719 	printk(KERN_INFO"%s: Abort complete.\n",pHba->name);
720 	return SUCCESS;
721 }
722 
723 
724 #define I2O_DEVICE_RESET 0x27
725 // This is the same for BLK and SCSI devices
726 // NOTE this is wrong in the i2o.h definitions
727 // This is not currently supported by our adapter but we issue it anyway
adpt_device_reset(struct scsi_cmnd * cmd)728 static int adpt_device_reset(struct scsi_cmnd* cmd)
729 {
730 	adpt_hba* pHba;
731 	u32 msg[4];
732 	u32 rcode;
733 	int old_state;
734 	struct adpt_device* d = cmd->device->hostdata;
735 
736 	pHba = (void*) cmd->device->host->hostdata[0];
737 	printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
738 	if (!d) {
739 		printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
740 		return FAILED;
741 	}
742 	memset(msg, 0, sizeof(msg));
743 	msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
744 	msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
745 	msg[2] = 0;
746 	msg[3] = 0;
747 
748 	if (pHba->host)
749 		spin_lock_irq(pHba->host->host_lock);
750 	old_state = d->state;
751 	d->state |= DPTI_DEV_RESET;
752 	rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
753 	d->state = old_state;
754 	if (pHba->host)
755 		spin_unlock_irq(pHba->host->host_lock);
756 	if (rcode != 0) {
757 		if(rcode == -EOPNOTSUPP ){
758 			printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
759 			return FAILED;
760 		}
761 		printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
762 		return FAILED;
763 	} else {
764 		printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
765 		return SUCCESS;
766 	}
767 }
768 
769 
770 #define I2O_HBA_BUS_RESET 0x87
771 // This version of bus reset is called by the eh_error handler
adpt_bus_reset(struct scsi_cmnd * cmd)772 static int adpt_bus_reset(struct scsi_cmnd* cmd)
773 {
774 	adpt_hba* pHba;
775 	u32 msg[4];
776 	u32 rcode;
777 
778 	pHba = (adpt_hba*)cmd->device->host->hostdata[0];
779 	memset(msg, 0, sizeof(msg));
780 	printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
781 	msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
782 	msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
783 	msg[2] = 0;
784 	msg[3] = 0;
785 	if (pHba->host)
786 		spin_lock_irq(pHba->host->host_lock);
787 	rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
788 	if (pHba->host)
789 		spin_unlock_irq(pHba->host->host_lock);
790 	if (rcode != 0) {
791 		printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
792 		return FAILED;
793 	} else {
794 		printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
795 		return SUCCESS;
796 	}
797 }
798 
799 // This version of reset is called by the eh_error_handler
__adpt_reset(struct scsi_cmnd * cmd)800 static int __adpt_reset(struct scsi_cmnd* cmd)
801 {
802 	adpt_hba* pHba;
803 	int rcode;
804 	pHba = (adpt_hba*)cmd->device->host->hostdata[0];
805 	printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n",pHba->name,cmd->device->channel,pHba->channel[cmd->device->channel].tid );
806 	rcode =  adpt_hba_reset(pHba);
807 	if(rcode == 0){
808 		printk(KERN_WARNING"%s: HBA reset complete\n",pHba->name);
809 		return SUCCESS;
810 	} else {
811 		printk(KERN_WARNING"%s: HBA reset failed (%x)\n",pHba->name, rcode);
812 		return FAILED;
813 	}
814 }
815 
adpt_reset(struct scsi_cmnd * cmd)816 static int adpt_reset(struct scsi_cmnd* cmd)
817 {
818 	int rc;
819 
820 	spin_lock_irq(cmd->device->host->host_lock);
821 	rc = __adpt_reset(cmd);
822 	spin_unlock_irq(cmd->device->host->host_lock);
823 
824 	return rc;
825 }
826 
827 // This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
adpt_hba_reset(adpt_hba * pHba)828 static int adpt_hba_reset(adpt_hba* pHba)
829 {
830 	int rcode;
831 
832 	pHba->state |= DPTI_STATE_RESET;
833 
834 	// Activate does get status , init outbound, and get hrt
835 	if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
836 		printk(KERN_ERR "%s: Could not activate\n", pHba->name);
837 		adpt_i2o_delete_hba(pHba);
838 		return rcode;
839 	}
840 
841 	if ((rcode=adpt_i2o_build_sys_table()) < 0) {
842 		adpt_i2o_delete_hba(pHba);
843 		return rcode;
844 	}
845 	PDEBUG("%s: in HOLD state\n",pHba->name);
846 
847 	if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
848 		adpt_i2o_delete_hba(pHba);
849 		return rcode;
850 	}
851 	PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
852 
853 	if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
854 		adpt_i2o_delete_hba(pHba);
855 		return rcode;
856 	}
857 
858 	if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
859 		adpt_i2o_delete_hba(pHba);
860 		return rcode;
861 	}
862 	pHba->state &= ~DPTI_STATE_RESET;
863 
864 	adpt_fail_posted_scbs(pHba);
865 	return 0;	/* return success */
866 }
867 
868 /*===========================================================================
869  *
870  *===========================================================================
871  */
872 
873 
adpt_i2o_sys_shutdown(void)874 static void adpt_i2o_sys_shutdown(void)
875 {
876 	adpt_hba *pHba, *pNext;
877 	struct adpt_i2o_post_wait_data *p1, *old;
878 
879 	 printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n");
880 	 printk(KERN_INFO"   This could take a few minutes if there are many devices attached\n");
881 	/* Delete all IOPs from the controller chain */
882 	/* They should have already been released by the
883 	 * scsi-core
884 	 */
885 	for (pHba = hba_chain; pHba; pHba = pNext) {
886 		pNext = pHba->next;
887 		adpt_i2o_delete_hba(pHba);
888 	}
889 
890 	/* Remove any timedout entries from the wait queue.  */
891 //	spin_lock_irqsave(&adpt_post_wait_lock, flags);
892 	/* Nothing should be outstanding at this point so just
893 	 * free them
894 	 */
895 	for(p1 = adpt_post_wait_queue; p1;) {
896 		old = p1;
897 		p1 = p1->next;
898 		kfree(old);
899 	}
900 //	spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
901 	adpt_post_wait_queue = NULL;
902 
903 	 printk(KERN_INFO "Adaptec I2O controllers down.\n");
904 }
905 
adpt_install_hba(struct scsi_host_template * sht,struct pci_dev * pDev)906 static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
907 {
908 
909 	adpt_hba* pHba = NULL;
910 	adpt_hba* p = NULL;
911 	ulong base_addr0_phys = 0;
912 	ulong base_addr1_phys = 0;
913 	u32 hba_map0_area_size = 0;
914 	u32 hba_map1_area_size = 0;
915 	void __iomem *base_addr_virt = NULL;
916 	void __iomem *msg_addr_virt = NULL;
917 	int dma64 = 0;
918 
919 	int raptorFlag = FALSE;
920 
921 	if(pci_enable_device(pDev)) {
922 		return -EINVAL;
923 	}
924 
925 	if (pci_request_regions(pDev, "dpt_i2o")) {
926 		PERROR("dpti: adpt_config_hba: pci request region failed\n");
927 		return -EINVAL;
928 	}
929 
930 	pci_set_master(pDev);
931 
932 	/*
933 	 *	See if we should enable dma64 mode.
934 	 */
935 	if (sizeof(dma_addr_t) > 4 &&
936 	    pci_set_dma_mask(pDev, DMA_BIT_MASK(64)) == 0) {
937 		if (dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32))
938 			dma64 = 1;
939 	}
940 	if (!dma64 && pci_set_dma_mask(pDev, DMA_BIT_MASK(32)) != 0)
941 		return -EINVAL;
942 
943 	/* adapter only supports message blocks below 4GB */
944 	pci_set_consistent_dma_mask(pDev, DMA_BIT_MASK(32));
945 
946 	base_addr0_phys = pci_resource_start(pDev,0);
947 	hba_map0_area_size = pci_resource_len(pDev,0);
948 
949 	// Check if standard PCI card or single BAR Raptor
950 	if(pDev->device == PCI_DPT_DEVICE_ID){
951 		if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
952 			// Raptor card with this device id needs 4M
953 			hba_map0_area_size = 0x400000;
954 		} else { // Not Raptor - it is a PCI card
955 			if(hba_map0_area_size > 0x100000 ){
956 				hba_map0_area_size = 0x100000;
957 			}
958 		}
959 	} else {// Raptor split BAR config
960 		// Use BAR1 in this configuration
961 		base_addr1_phys = pci_resource_start(pDev,1);
962 		hba_map1_area_size = pci_resource_len(pDev,1);
963 		raptorFlag = TRUE;
964 	}
965 
966 #if BITS_PER_LONG == 64
967 	/*
968 	 *	The original Adaptec 64 bit driver has this comment here:
969 	 *	"x86_64 machines need more optimal mappings"
970 	 *
971 	 *	I assume some HBAs report ridiculously large mappings
972 	 *	and we need to limit them on platforms with IOMMUs.
973 	 */
974 	if (raptorFlag == TRUE) {
975 		if (hba_map0_area_size > 128)
976 			hba_map0_area_size = 128;
977 		if (hba_map1_area_size > 524288)
978 			hba_map1_area_size = 524288;
979 	} else {
980 		if (hba_map0_area_size > 524288)
981 			hba_map0_area_size = 524288;
982 	}
983 #endif
984 
985 	base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
986 	if (!base_addr_virt) {
987 		pci_release_regions(pDev);
988 		PERROR("dpti: adpt_config_hba: io remap failed\n");
989 		return -EINVAL;
990 	}
991 
992         if(raptorFlag == TRUE) {
993 		msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
994 		if (!msg_addr_virt) {
995 			PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
996 			iounmap(base_addr_virt);
997 			pci_release_regions(pDev);
998 			return -EINVAL;
999 		}
1000 	} else {
1001 		msg_addr_virt = base_addr_virt;
1002 	}
1003 
1004 	// Allocate and zero the data structure
1005 	pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
1006 	if (!pHba) {
1007 		if (msg_addr_virt != base_addr_virt)
1008 			iounmap(msg_addr_virt);
1009 		iounmap(base_addr_virt);
1010 		pci_release_regions(pDev);
1011 		return -ENOMEM;
1012 	}
1013 
1014 	mutex_lock(&adpt_configuration_lock);
1015 
1016 	if(hba_chain != NULL){
1017 		for(p = hba_chain; p->next; p = p->next);
1018 		p->next = pHba;
1019 	} else {
1020 		hba_chain = pHba;
1021 	}
1022 	pHba->next = NULL;
1023 	pHba->unit = hba_count;
1024 	sprintf(pHba->name, "dpti%d", hba_count);
1025 	hba_count++;
1026 
1027 	mutex_unlock(&adpt_configuration_lock);
1028 
1029 	pHba->pDev = pDev;
1030 	pHba->base_addr_phys = base_addr0_phys;
1031 
1032 	// Set up the Virtual Base Address of the I2O Device
1033 	pHba->base_addr_virt = base_addr_virt;
1034 	pHba->msg_addr_virt = msg_addr_virt;
1035 	pHba->irq_mask = base_addr_virt+0x30;
1036 	pHba->post_port = base_addr_virt+0x40;
1037 	pHba->reply_port = base_addr_virt+0x44;
1038 
1039 	pHba->hrt = NULL;
1040 	pHba->lct = NULL;
1041 	pHba->lct_size = 0;
1042 	pHba->status_block = NULL;
1043 	pHba->post_count = 0;
1044 	pHba->state = DPTI_STATE_RESET;
1045 	pHba->pDev = pDev;
1046 	pHba->devices = NULL;
1047 	pHba->dma64 = dma64;
1048 
1049 	// Initializing the spinlocks
1050 	spin_lock_init(&pHba->state_lock);
1051 	spin_lock_init(&adpt_post_wait_lock);
1052 
1053 	if(raptorFlag == 0){
1054 		printk(KERN_INFO "Adaptec I2O RAID controller"
1055 				 " %d at %p size=%x irq=%d%s\n",
1056 			hba_count-1, base_addr_virt,
1057 			hba_map0_area_size, pDev->irq,
1058 			dma64 ? " (64-bit DMA)" : "");
1059 	} else {
1060 		printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
1061 			hba_count-1, pDev->irq,
1062 			dma64 ? " (64-bit DMA)" : "");
1063 		printk(KERN_INFO"     BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1064 		printk(KERN_INFO"     BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1065 	}
1066 
1067 	if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
1068 		printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
1069 		adpt_i2o_delete_hba(pHba);
1070 		return -EINVAL;
1071 	}
1072 
1073 	return 0;
1074 }
1075 
1076 
adpt_i2o_delete_hba(adpt_hba * pHba)1077 static void adpt_i2o_delete_hba(adpt_hba* pHba)
1078 {
1079 	adpt_hba* p1;
1080 	adpt_hba* p2;
1081 	struct i2o_device* d;
1082 	struct i2o_device* next;
1083 	int i;
1084 	int j;
1085 	struct adpt_device* pDev;
1086 	struct adpt_device* pNext;
1087 
1088 
1089 	mutex_lock(&adpt_configuration_lock);
1090 	// scsi_unregister calls our adpt_release which
1091 	// does a quiese
1092 	if(pHba->host){
1093 		free_irq(pHba->host->irq, pHba);
1094 	}
1095 	p2 = NULL;
1096 	for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1097 		if(p1 == pHba) {
1098 			if(p2) {
1099 				p2->next = p1->next;
1100 			} else {
1101 				hba_chain = p1->next;
1102 			}
1103 			break;
1104 		}
1105 	}
1106 
1107 	hba_count--;
1108 	mutex_unlock(&adpt_configuration_lock);
1109 
1110 	iounmap(pHba->base_addr_virt);
1111 	pci_release_regions(pHba->pDev);
1112 	if(pHba->msg_addr_virt != pHba->base_addr_virt){
1113 		iounmap(pHba->msg_addr_virt);
1114 	}
1115 	if(pHba->FwDebugBuffer_P)
1116 	   	iounmap(pHba->FwDebugBuffer_P);
1117 	if(pHba->hrt) {
1118 		dma_free_coherent(&pHba->pDev->dev,
1119 			pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
1120 			pHba->hrt, pHba->hrt_pa);
1121 	}
1122 	if(pHba->lct) {
1123 		dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
1124 			pHba->lct, pHba->lct_pa);
1125 	}
1126 	if(pHba->status_block) {
1127 		dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
1128 			pHba->status_block, pHba->status_block_pa);
1129 	}
1130 	if(pHba->reply_pool) {
1131 		dma_free_coherent(&pHba->pDev->dev,
1132 			pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
1133 			pHba->reply_pool, pHba->reply_pool_pa);
1134 	}
1135 
1136 	for(d = pHba->devices; d ; d = next){
1137 		next = d->next;
1138 		kfree(d);
1139 	}
1140 	for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1141 		for(j = 0; j < MAX_ID; j++){
1142 			if(pHba->channel[i].device[j] != NULL){
1143 				for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1144 					pNext = pDev->next_lun;
1145 					kfree(pDev);
1146 				}
1147 			}
1148 		}
1149 	}
1150 	pci_dev_put(pHba->pDev);
1151 	if (adpt_sysfs_class)
1152 		device_destroy(adpt_sysfs_class,
1153 				MKDEV(DPTI_I2O_MAJOR, pHba->unit));
1154 	kfree(pHba);
1155 
1156 	if(hba_count <= 0){
1157 		unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
1158 		if (adpt_sysfs_class) {
1159 			class_destroy(adpt_sysfs_class);
1160 			adpt_sysfs_class = NULL;
1161 		}
1162 	}
1163 }
1164 
adpt_find_device(adpt_hba * pHba,u32 chan,u32 id,u64 lun)1165 static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u64 lun)
1166 {
1167 	struct adpt_device* d;
1168 
1169 	if(chan < 0 || chan >= MAX_CHANNEL)
1170 		return NULL;
1171 
1172 	d = pHba->channel[chan].device[id];
1173 	if(!d || d->tid == 0) {
1174 		return NULL;
1175 	}
1176 
1177 	/* If it is the only lun at that address then this should match*/
1178 	if(d->scsi_lun == lun){
1179 		return d;
1180 	}
1181 
1182 	/* else we need to look through all the luns */
1183 	for(d=d->next_lun ; d ; d = d->next_lun){
1184 		if(d->scsi_lun == lun){
1185 			return d;
1186 		}
1187 	}
1188 	return NULL;
1189 }
1190 
1191 
adpt_i2o_post_wait(adpt_hba * pHba,u32 * msg,int len,int timeout)1192 static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1193 {
1194 	// I used my own version of the WAIT_QUEUE_HEAD
1195 	// to handle some version differences
1196 	// When embedded in the kernel this could go back to the vanilla one
1197 	ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1198 	int status = 0;
1199 	ulong flags = 0;
1200 	struct adpt_i2o_post_wait_data *p1, *p2;
1201 	struct adpt_i2o_post_wait_data *wait_data =
1202 		kmalloc(sizeof(struct adpt_i2o_post_wait_data), GFP_ATOMIC);
1203 	DECLARE_WAITQUEUE(wait, current);
1204 
1205 	if (!wait_data)
1206 		return -ENOMEM;
1207 
1208 	/*
1209 	 * The spin locking is needed to keep anyone from playing
1210 	 * with the queue pointers and id while we do the same
1211 	 */
1212 	spin_lock_irqsave(&adpt_post_wait_lock, flags);
1213        // TODO we need a MORE unique way of getting ids
1214        // to support async LCT get
1215 	wait_data->next = adpt_post_wait_queue;
1216 	adpt_post_wait_queue = wait_data;
1217 	adpt_post_wait_id++;
1218 	adpt_post_wait_id &= 0x7fff;
1219 	wait_data->id =  adpt_post_wait_id;
1220 	spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1221 
1222 	wait_data->wq = &adpt_wq_i2o_post;
1223 	wait_data->status = -ETIMEDOUT;
1224 
1225 	add_wait_queue(&adpt_wq_i2o_post, &wait);
1226 
1227 	msg[2] |= 0x80000000 | ((u32)wait_data->id);
1228 	timeout *= HZ;
1229 	if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1230 		set_current_state(TASK_INTERRUPTIBLE);
1231 		if(pHba->host)
1232 			spin_unlock_irq(pHba->host->host_lock);
1233 		if (!timeout)
1234 			schedule();
1235 		else{
1236 			timeout = schedule_timeout(timeout);
1237 			if (timeout == 0) {
1238 				// I/O issued, but cannot get result in
1239 				// specified time. Freeing resorces is
1240 				// dangerous.
1241 				status = -ETIME;
1242 			}
1243 		}
1244 		if(pHba->host)
1245 			spin_lock_irq(pHba->host->host_lock);
1246 	}
1247 	remove_wait_queue(&adpt_wq_i2o_post, &wait);
1248 
1249 	if(status == -ETIMEDOUT){
1250 		printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1251 		// We will have to free the wait_data memory during shutdown
1252 		return status;
1253 	}
1254 
1255 	/* Remove the entry from the queue.  */
1256 	p2 = NULL;
1257 	spin_lock_irqsave(&adpt_post_wait_lock, flags);
1258 	for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1259 		if(p1 == wait_data) {
1260 			if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1261 				status = -EOPNOTSUPP;
1262 			}
1263 			if(p2) {
1264 				p2->next = p1->next;
1265 			} else {
1266 				adpt_post_wait_queue = p1->next;
1267 			}
1268 			break;
1269 		}
1270 	}
1271 	spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1272 
1273 	kfree(wait_data);
1274 
1275 	return status;
1276 }
1277 
1278 
adpt_i2o_post_this(adpt_hba * pHba,u32 * data,int len)1279 static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1280 {
1281 
1282 	u32 m = EMPTY_QUEUE;
1283 	u32 __iomem *msg;
1284 	ulong timeout = jiffies + 30*HZ;
1285 	do {
1286 		rmb();
1287 		m = readl(pHba->post_port);
1288 		if (m != EMPTY_QUEUE) {
1289 			break;
1290 		}
1291 		if(time_after(jiffies,timeout)){
1292 			printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1293 			return -ETIMEDOUT;
1294 		}
1295 		schedule_timeout_uninterruptible(1);
1296 	} while(m == EMPTY_QUEUE);
1297 
1298 	msg = pHba->msg_addr_virt + m;
1299 	memcpy_toio(msg, data, len);
1300 	wmb();
1301 
1302 	//post message
1303 	writel(m, pHba->post_port);
1304 	wmb();
1305 
1306 	return 0;
1307 }
1308 
1309 
adpt_i2o_post_wait_complete(u32 context,int status)1310 static void adpt_i2o_post_wait_complete(u32 context, int status)
1311 {
1312 	struct adpt_i2o_post_wait_data *p1 = NULL;
1313 	/*
1314 	 * We need to search through the adpt_post_wait
1315 	 * queue to see if the given message is still
1316 	 * outstanding.  If not, it means that the IOP
1317 	 * took longer to respond to the message than we
1318 	 * had allowed and timer has already expired.
1319 	 * Not much we can do about that except log
1320 	 * it for debug purposes, increase timeout, and recompile
1321 	 *
1322 	 * Lock needed to keep anyone from moving queue pointers
1323 	 * around while we're looking through them.
1324 	 */
1325 
1326 	context &= 0x7fff;
1327 
1328 	spin_lock(&adpt_post_wait_lock);
1329 	for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1330 		if(p1->id == context) {
1331 			p1->status = status;
1332 			spin_unlock(&adpt_post_wait_lock);
1333 			wake_up_interruptible(p1->wq);
1334 			return;
1335 		}
1336 	}
1337 	spin_unlock(&adpt_post_wait_lock);
1338         // If this happens we lose commands that probably really completed
1339 	printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1340 	printk(KERN_DEBUG"      Tasks in wait queue:\n");
1341 	for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1342 		printk(KERN_DEBUG"           %d\n",p1->id);
1343 	}
1344 	return;
1345 }
1346 
adpt_i2o_reset_hba(adpt_hba * pHba)1347 static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1348 {
1349 	u32 msg[8];
1350 	u8* status;
1351 	dma_addr_t addr;
1352 	u32 m = EMPTY_QUEUE ;
1353 	ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1354 
1355 	if(pHba->initialized  == FALSE) {	// First time reset should be quick
1356 		timeout = jiffies + (25*HZ);
1357 	} else {
1358 		adpt_i2o_quiesce_hba(pHba);
1359 	}
1360 
1361 	do {
1362 		rmb();
1363 		m = readl(pHba->post_port);
1364 		if (m != EMPTY_QUEUE) {
1365 			break;
1366 		}
1367 		if(time_after(jiffies,timeout)){
1368 			printk(KERN_WARNING"Timeout waiting for message!\n");
1369 			return -ETIMEDOUT;
1370 		}
1371 		schedule_timeout_uninterruptible(1);
1372 	} while (m == EMPTY_QUEUE);
1373 
1374 	status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
1375 	if(status == NULL) {
1376 		adpt_send_nop(pHba, m);
1377 		printk(KERN_ERR"IOP reset failed - no free memory.\n");
1378 		return -ENOMEM;
1379 	}
1380 	memset(status,0,4);
1381 
1382 	msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1383 	msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1384 	msg[2]=0;
1385 	msg[3]=0;
1386 	msg[4]=0;
1387 	msg[5]=0;
1388 	msg[6]=dma_low(addr);
1389 	msg[7]=dma_high(addr);
1390 
1391 	memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1392 	wmb();
1393 	writel(m, pHba->post_port);
1394 	wmb();
1395 
1396 	while(*status == 0){
1397 		if(time_after(jiffies,timeout)){
1398 			printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
1399 			/* We lose 4 bytes of "status" here, but we cannot
1400 			   free these because controller may awake and corrupt
1401 			   those bytes at any time */
1402 			/* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1403 			return -ETIMEDOUT;
1404 		}
1405 		rmb();
1406 		schedule_timeout_uninterruptible(1);
1407 	}
1408 
1409 	if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
1410 		PDEBUG("%s: Reset in progress...\n", pHba->name);
1411 		// Here we wait for message frame to become available
1412 		// indicated that reset has finished
1413 		do {
1414 			rmb();
1415 			m = readl(pHba->post_port);
1416 			if (m != EMPTY_QUEUE) {
1417 				break;
1418 			}
1419 			if(time_after(jiffies,timeout)){
1420 				printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
1421 				/* We lose 4 bytes of "status" here, but we
1422 				   cannot free these because controller may
1423 				   awake and corrupt those bytes at any time */
1424 				/* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1425 				return -ETIMEDOUT;
1426 			}
1427 			schedule_timeout_uninterruptible(1);
1428 		} while (m == EMPTY_QUEUE);
1429 		// Flush the offset
1430 		adpt_send_nop(pHba, m);
1431 	}
1432 	adpt_i2o_status_get(pHba);
1433 	if(*status == 0x02 ||
1434 			pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1435 		printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1436 				pHba->name);
1437 	} else {
1438 		PDEBUG("%s: Reset completed.\n", pHba->name);
1439 	}
1440 
1441 	dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1442 #ifdef UARTDELAY
1443 	// This delay is to allow someone attached to the card through the debug UART to
1444 	// set up the dump levels that they want before the rest of the initialization sequence
1445 	adpt_delay(20000);
1446 #endif
1447 	return 0;
1448 }
1449 
1450 
adpt_i2o_parse_lct(adpt_hba * pHba)1451 static int adpt_i2o_parse_lct(adpt_hba* pHba)
1452 {
1453 	int i;
1454 	int max;
1455 	int tid;
1456 	struct i2o_device *d;
1457 	i2o_lct *lct = pHba->lct;
1458 	u8 bus_no = 0;
1459 	s16 scsi_id;
1460 	u64 scsi_lun;
1461 	u32 buf[10]; // larger than 7, or 8 ...
1462 	struct adpt_device* pDev;
1463 
1464 	if (lct == NULL) {
1465 		printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1466 		return -1;
1467 	}
1468 
1469 	max = lct->table_size;
1470 	max -= 3;
1471 	max /= 9;
1472 
1473 	for(i=0;i<max;i++) {
1474 		if( lct->lct_entry[i].user_tid != 0xfff){
1475 			/*
1476 			 * If we have hidden devices, we need to inform the upper layers about
1477 			 * the possible maximum id reference to handle device access when
1478 			 * an array is disassembled. This code has no other purpose but to
1479 			 * allow us future access to devices that are currently hidden
1480 			 * behind arrays, hotspares or have not been configured (JBOD mode).
1481 			 */
1482 			if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1483 			    lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1484 			    lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1485 			    	continue;
1486 			}
1487 			tid = lct->lct_entry[i].tid;
1488 			// I2O_DPT_DEVICE_INFO_GROUP_NO;
1489 			if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1490 				continue;
1491 			}
1492 			bus_no = buf[0]>>16;
1493 			scsi_id = buf[1];
1494 			scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1495 			if(bus_no >= MAX_CHANNEL) {	// Something wrong skip it
1496 				printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1497 				continue;
1498 			}
1499 			if (scsi_id >= MAX_ID){
1500 				printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1501 				continue;
1502 			}
1503 			if(bus_no > pHba->top_scsi_channel){
1504 				pHba->top_scsi_channel = bus_no;
1505 			}
1506 			if(scsi_id > pHba->top_scsi_id){
1507 				pHba->top_scsi_id = scsi_id;
1508 			}
1509 			if(scsi_lun > pHba->top_scsi_lun){
1510 				pHba->top_scsi_lun = scsi_lun;
1511 			}
1512 			continue;
1513 		}
1514 		d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1515 		if(d==NULL)
1516 		{
1517 			printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1518 			return -ENOMEM;
1519 		}
1520 
1521 		d->controller = pHba;
1522 		d->next = NULL;
1523 
1524 		memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1525 
1526 		d->flags = 0;
1527 		tid = d->lct_data.tid;
1528 		adpt_i2o_report_hba_unit(pHba, d);
1529 		adpt_i2o_install_device(pHba, d);
1530 	}
1531 	bus_no = 0;
1532 	for(d = pHba->devices; d ; d = d->next) {
1533 		if(d->lct_data.class_id  == I2O_CLASS_BUS_ADAPTER_PORT ||
1534 		   d->lct_data.class_id  == I2O_CLASS_FIBRE_CHANNEL_PORT){
1535 			tid = d->lct_data.tid;
1536 			// TODO get the bus_no from hrt-but for now they are in order
1537 			//bus_no =
1538 			if(bus_no > pHba->top_scsi_channel){
1539 				pHba->top_scsi_channel = bus_no;
1540 			}
1541 			pHba->channel[bus_no].type = d->lct_data.class_id;
1542 			pHba->channel[bus_no].tid = tid;
1543 			if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1544 			{
1545 				pHba->channel[bus_no].scsi_id = buf[1];
1546 				PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1547 			}
1548 			// TODO remove - this is just until we get from hrt
1549 			bus_no++;
1550 			if(bus_no >= MAX_CHANNEL) {	// Something wrong skip it
1551 				printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1552 				break;
1553 			}
1554 		}
1555 	}
1556 
1557 	// Setup adpt_device table
1558 	for(d = pHba->devices; d ; d = d->next) {
1559 		if(d->lct_data.class_id  == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1560 		   d->lct_data.class_id  == I2O_CLASS_SCSI_PERIPHERAL ||
1561 		   d->lct_data.class_id  == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1562 
1563 			tid = d->lct_data.tid;
1564 			scsi_id = -1;
1565 			// I2O_DPT_DEVICE_INFO_GROUP_NO;
1566 			if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1567 				bus_no = buf[0]>>16;
1568 				scsi_id = buf[1];
1569 				scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1570 				if(bus_no >= MAX_CHANNEL) {	// Something wrong skip it
1571 					continue;
1572 				}
1573 				if (scsi_id >= MAX_ID) {
1574 					continue;
1575 				}
1576 				if( pHba->channel[bus_no].device[scsi_id] == NULL){
1577 					pDev =  kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1578 					if(pDev == NULL) {
1579 						return -ENOMEM;
1580 					}
1581 					pHba->channel[bus_no].device[scsi_id] = pDev;
1582 				} else {
1583 					for( pDev = pHba->channel[bus_no].device[scsi_id];
1584 							pDev->next_lun; pDev = pDev->next_lun){
1585 					}
1586 					pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1587 					if(pDev->next_lun == NULL) {
1588 						return -ENOMEM;
1589 					}
1590 					pDev = pDev->next_lun;
1591 				}
1592 				pDev->tid = tid;
1593 				pDev->scsi_channel = bus_no;
1594 				pDev->scsi_id = scsi_id;
1595 				pDev->scsi_lun = scsi_lun;
1596 				pDev->pI2o_dev = d;
1597 				d->owner = pDev;
1598 				pDev->type = (buf[0])&0xff;
1599 				pDev->flags = (buf[0]>>8)&0xff;
1600 				if(scsi_id > pHba->top_scsi_id){
1601 					pHba->top_scsi_id = scsi_id;
1602 				}
1603 				if(scsi_lun > pHba->top_scsi_lun){
1604 					pHba->top_scsi_lun = scsi_lun;
1605 				}
1606 			}
1607 			if(scsi_id == -1){
1608 				printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1609 						d->lct_data.identity_tag);
1610 			}
1611 		}
1612 	}
1613 	return 0;
1614 }
1615 
1616 
1617 /*
1618  *	Each I2O controller has a chain of devices on it - these match
1619  *	the useful parts of the LCT of the board.
1620  */
1621 
adpt_i2o_install_device(adpt_hba * pHba,struct i2o_device * d)1622 static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1623 {
1624 	mutex_lock(&adpt_configuration_lock);
1625 	d->controller=pHba;
1626 	d->owner=NULL;
1627 	d->next=pHba->devices;
1628 	d->prev=NULL;
1629 	if (pHba->devices != NULL){
1630 		pHba->devices->prev=d;
1631 	}
1632 	pHba->devices=d;
1633 	*d->dev_name = 0;
1634 
1635 	mutex_unlock(&adpt_configuration_lock);
1636 	return 0;
1637 }
1638 
adpt_open(struct inode * inode,struct file * file)1639 static int adpt_open(struct inode *inode, struct file *file)
1640 {
1641 	int minor;
1642 	adpt_hba* pHba;
1643 
1644 	mutex_lock(&adpt_mutex);
1645 	//TODO check for root access
1646 	//
1647 	minor = iminor(inode);
1648 	if (minor >= hba_count) {
1649 		mutex_unlock(&adpt_mutex);
1650 		return -ENXIO;
1651 	}
1652 	mutex_lock(&adpt_configuration_lock);
1653 	for (pHba = hba_chain; pHba; pHba = pHba->next) {
1654 		if (pHba->unit == minor) {
1655 			break;	/* found adapter */
1656 		}
1657 	}
1658 	if (pHba == NULL) {
1659 		mutex_unlock(&adpt_configuration_lock);
1660 		mutex_unlock(&adpt_mutex);
1661 		return -ENXIO;
1662 	}
1663 
1664 //	if(pHba->in_use){
1665 	//	mutex_unlock(&adpt_configuration_lock);
1666 //		return -EBUSY;
1667 //	}
1668 
1669 	pHba->in_use = 1;
1670 	mutex_unlock(&adpt_configuration_lock);
1671 	mutex_unlock(&adpt_mutex);
1672 
1673 	return 0;
1674 }
1675 
adpt_close(struct inode * inode,struct file * file)1676 static int adpt_close(struct inode *inode, struct file *file)
1677 {
1678 	int minor;
1679 	adpt_hba* pHba;
1680 
1681 	minor = iminor(inode);
1682 	if (minor >= hba_count) {
1683 		return -ENXIO;
1684 	}
1685 	mutex_lock(&adpt_configuration_lock);
1686 	for (pHba = hba_chain; pHba; pHba = pHba->next) {
1687 		if (pHba->unit == minor) {
1688 			break;	/* found adapter */
1689 		}
1690 	}
1691 	mutex_unlock(&adpt_configuration_lock);
1692 	if (pHba == NULL) {
1693 		return -ENXIO;
1694 	}
1695 
1696 	pHba->in_use = 0;
1697 
1698 	return 0;
1699 }
1700 
1701 
adpt_i2o_passthru(adpt_hba * pHba,u32 __user * arg)1702 static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1703 {
1704 	u32 msg[MAX_MESSAGE_SIZE];
1705 	u32* reply = NULL;
1706 	u32 size = 0;
1707 	u32 reply_size = 0;
1708 	u32 __user *user_msg = arg;
1709 	u32 __user * user_reply = NULL;
1710 	void *sg_list[pHba->sg_tablesize];
1711 	u32 sg_offset = 0;
1712 	u32 sg_count = 0;
1713 	int sg_index = 0;
1714 	u32 i = 0;
1715 	u32 rcode = 0;
1716 	void *p = NULL;
1717 	dma_addr_t addr;
1718 	ulong flags = 0;
1719 
1720 	memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1721 	// get user msg size in u32s
1722 	if(get_user(size, &user_msg[0])){
1723 		return -EFAULT;
1724 	}
1725 	size = size>>16;
1726 
1727 	user_reply = &user_msg[size];
1728 	if(size > MAX_MESSAGE_SIZE){
1729 		return -EFAULT;
1730 	}
1731 	size *= 4; // Convert to bytes
1732 
1733 	/* Copy in the user's I2O command */
1734 	if(copy_from_user(msg, user_msg, size)) {
1735 		return -EFAULT;
1736 	}
1737 	get_user(reply_size, &user_reply[0]);
1738 	reply_size = reply_size>>16;
1739 	if(reply_size > REPLY_FRAME_SIZE){
1740 		reply_size = REPLY_FRAME_SIZE;
1741 	}
1742 	reply_size *= 4;
1743 	reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
1744 	if(reply == NULL) {
1745 		printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1746 		return -ENOMEM;
1747 	}
1748 	sg_offset = (msg[0]>>4)&0xf;
1749 	msg[2] = 0x40000000; // IOCTL context
1750 	msg[3] = adpt_ioctl_to_context(pHba, reply);
1751 	if (msg[3] == (u32)-1) {
1752 		kfree(reply);
1753 		return -EBUSY;
1754 	}
1755 
1756 	memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
1757 	if(sg_offset) {
1758 		// TODO add 64 bit API
1759 		struct sg_simple_element *sg =  (struct sg_simple_element*) (msg+sg_offset);
1760 		sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1761 		if (sg_count > pHba->sg_tablesize){
1762 			printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1763 			kfree (reply);
1764 			return -EINVAL;
1765 		}
1766 
1767 		for(i = 0; i < sg_count; i++) {
1768 			int sg_size;
1769 
1770 			if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
1771 				printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i,  sg[i].flag_count);
1772 				rcode = -EINVAL;
1773 				goto cleanup;
1774 			}
1775 			sg_size = sg[i].flag_count & 0xffffff;
1776 			/* Allocate memory for the transfer */
1777 			p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
1778 			if(!p) {
1779 				printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1780 						pHba->name,sg_size,i,sg_count);
1781 				rcode = -ENOMEM;
1782 				goto cleanup;
1783 			}
1784 			sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
1785 			/* Copy in the user's SG buffer if necessary */
1786 			if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
1787 				// sg_simple_element API is 32 bit
1788 				if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
1789 					printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1790 					rcode = -EFAULT;
1791 					goto cleanup;
1792 				}
1793 			}
1794 			/* sg_simple_element API is 32 bit, but addr < 4GB */
1795 			sg[i].addr_bus = addr;
1796 		}
1797 	}
1798 
1799 	do {
1800 		/*
1801 		 * Stop any new commands from enterring the
1802 		 * controller while processing the ioctl
1803 		 */
1804 		if (pHba->host) {
1805 			scsi_block_requests(pHba->host);
1806 			spin_lock_irqsave(pHba->host->host_lock, flags);
1807 		}
1808 		rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1809 		if (rcode != 0)
1810 			printk("adpt_i2o_passthru: post wait failed %d %p\n",
1811 					rcode, reply);
1812 		if (pHba->host) {
1813 			spin_unlock_irqrestore(pHba->host->host_lock, flags);
1814 			scsi_unblock_requests(pHba->host);
1815 		}
1816 	} while (rcode == -ETIMEDOUT);
1817 
1818 	if(rcode){
1819 		goto cleanup;
1820 	}
1821 
1822 	if(sg_offset) {
1823 	/* Copy back the Scatter Gather buffers back to user space */
1824 		u32 j;
1825 		// TODO add 64 bit API
1826 		struct sg_simple_element* sg;
1827 		int sg_size;
1828 
1829 		// re-acquire the original message to handle correctly the sg copy operation
1830 		memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1831 		// get user msg size in u32s
1832 		if(get_user(size, &user_msg[0])){
1833 			rcode = -EFAULT;
1834 			goto cleanup;
1835 		}
1836 		size = size>>16;
1837 		size *= 4;
1838 		if (size > MAX_MESSAGE_SIZE) {
1839 			rcode = -EINVAL;
1840 			goto cleanup;
1841 		}
1842 		/* Copy in the user's I2O command */
1843 		if (copy_from_user (msg, user_msg, size)) {
1844 			rcode = -EFAULT;
1845 			goto cleanup;
1846 		}
1847 		sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1848 
1849 		// TODO add 64 bit API
1850 		sg 	 = (struct sg_simple_element*)(msg + sg_offset);
1851 		for (j = 0; j < sg_count; j++) {
1852 			/* Copy out the SG list to user's buffer if necessary */
1853 			if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
1854 				sg_size = sg[j].flag_count & 0xffffff;
1855 				// sg_simple_element API is 32 bit
1856 				if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
1857 					printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1858 					rcode = -EFAULT;
1859 					goto cleanup;
1860 				}
1861 			}
1862 		}
1863 	}
1864 
1865 	/* Copy back the reply to user space */
1866 	if (reply_size) {
1867 		// we wrote our own values for context - now restore the user supplied ones
1868 		if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1869 			printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1870 			rcode = -EFAULT;
1871 		}
1872 		if(copy_to_user(user_reply, reply, reply_size)) {
1873 			printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1874 			rcode = -EFAULT;
1875 		}
1876 	}
1877 
1878 
1879 cleanup:
1880 	if (rcode != -ETIME && rcode != -EINTR) {
1881 		struct sg_simple_element *sg =
1882 				(struct sg_simple_element*) (msg +sg_offset);
1883 		kfree (reply);
1884 		while(sg_index) {
1885 			if(sg_list[--sg_index]) {
1886 				dma_free_coherent(&pHba->pDev->dev,
1887 					sg[sg_index].flag_count & 0xffffff,
1888 					sg_list[sg_index],
1889 					sg[sg_index].addr_bus);
1890 			}
1891 		}
1892 	}
1893 	return rcode;
1894 }
1895 
1896 #if defined __ia64__
adpt_ia64_info(sysInfo_S * si)1897 static void adpt_ia64_info(sysInfo_S* si)
1898 {
1899 	// This is all the info we need for now
1900 	// We will add more info as our new
1901 	// managmenent utility requires it
1902 	si->processorType = PROC_IA64;
1903 }
1904 #endif
1905 
1906 #if defined __sparc__
adpt_sparc_info(sysInfo_S * si)1907 static void adpt_sparc_info(sysInfo_S* si)
1908 {
1909 	// This is all the info we need for now
1910 	// We will add more info as our new
1911 	// managmenent utility requires it
1912 	si->processorType = PROC_ULTRASPARC;
1913 }
1914 #endif
1915 #if defined __alpha__
adpt_alpha_info(sysInfo_S * si)1916 static void adpt_alpha_info(sysInfo_S* si)
1917 {
1918 	// This is all the info we need for now
1919 	// We will add more info as our new
1920 	// managmenent utility requires it
1921 	si->processorType = PROC_ALPHA;
1922 }
1923 #endif
1924 
1925 #if defined __i386__
1926 
1927 #include <uapi/asm/vm86.h>
1928 
adpt_i386_info(sysInfo_S * si)1929 static void adpt_i386_info(sysInfo_S* si)
1930 {
1931 	// This is all the info we need for now
1932 	// We will add more info as our new
1933 	// managmenent utility requires it
1934 	switch (boot_cpu_data.x86) {
1935 	case CPU_386:
1936 		si->processorType = PROC_386;
1937 		break;
1938 	case CPU_486:
1939 		si->processorType = PROC_486;
1940 		break;
1941 	case CPU_586:
1942 		si->processorType = PROC_PENTIUM;
1943 		break;
1944 	default:  // Just in case
1945 		si->processorType = PROC_PENTIUM;
1946 		break;
1947 	}
1948 }
1949 #endif
1950 
1951 /*
1952  * This routine returns information about the system.  This does not effect
1953  * any logic and if the info is wrong - it doesn't matter.
1954  */
1955 
1956 /* Get all the info we can not get from kernel services */
adpt_system_info(void __user * buffer)1957 static int adpt_system_info(void __user *buffer)
1958 {
1959 	sysInfo_S si;
1960 
1961 	memset(&si, 0, sizeof(si));
1962 
1963 	si.osType = OS_LINUX;
1964 	si.osMajorVersion = 0;
1965 	si.osMinorVersion = 0;
1966 	si.osRevision = 0;
1967 	si.busType = SI_PCI_BUS;
1968 	si.processorFamily = DPTI_sig.dsProcessorFamily;
1969 
1970 #if defined __i386__
1971 	adpt_i386_info(&si);
1972 #elif defined (__ia64__)
1973 	adpt_ia64_info(&si);
1974 #elif defined(__sparc__)
1975 	adpt_sparc_info(&si);
1976 #elif defined (__alpha__)
1977 	adpt_alpha_info(&si);
1978 #else
1979 	si.processorType = 0xff ;
1980 #endif
1981 	if (copy_to_user(buffer, &si, sizeof(si))){
1982 		printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
1983 		return -EFAULT;
1984 	}
1985 
1986 	return 0;
1987 }
1988 
adpt_ioctl(struct inode * inode,struct file * file,uint cmd,ulong arg)1989 static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
1990 {
1991 	int minor;
1992 	int error = 0;
1993 	adpt_hba* pHba;
1994 	ulong flags = 0;
1995 	void __user *argp = (void __user *)arg;
1996 
1997 	minor = iminor(inode);
1998 	if (minor >= DPTI_MAX_HBA){
1999 		return -ENXIO;
2000 	}
2001 	mutex_lock(&adpt_configuration_lock);
2002 	for (pHba = hba_chain; pHba; pHba = pHba->next) {
2003 		if (pHba->unit == minor) {
2004 			break;	/* found adapter */
2005 		}
2006 	}
2007 	mutex_unlock(&adpt_configuration_lock);
2008 	if(pHba == NULL){
2009 		return -ENXIO;
2010 	}
2011 
2012 	while((volatile u32) pHba->state & DPTI_STATE_RESET )
2013 		schedule_timeout_uninterruptible(2);
2014 
2015 	switch (cmd) {
2016 	// TODO: handle 3 cases
2017 	case DPT_SIGNATURE:
2018 		if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
2019 			return -EFAULT;
2020 		}
2021 		break;
2022 	case I2OUSRCMD:
2023 		return adpt_i2o_passthru(pHba, argp);
2024 
2025 	case DPT_CTRLINFO:{
2026 		drvrHBAinfo_S HbaInfo;
2027 
2028 #define FLG_OSD_PCI_VALID 0x0001
2029 #define FLG_OSD_DMA	  0x0002
2030 #define FLG_OSD_I2O	  0x0004
2031 		memset(&HbaInfo, 0, sizeof(HbaInfo));
2032 		HbaInfo.drvrHBAnum = pHba->unit;
2033 		HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
2034 		HbaInfo.blinkState = adpt_read_blink_led(pHba);
2035 		HbaInfo.pciBusNum =  pHba->pDev->bus->number;
2036 		HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
2037 		HbaInfo.Interrupt = pHba->pDev->irq;
2038 		HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
2039 		if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
2040 			printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
2041 			return -EFAULT;
2042 		}
2043 		break;
2044 		}
2045 	case DPT_SYSINFO:
2046 		return adpt_system_info(argp);
2047 	case DPT_BLINKLED:{
2048 		u32 value;
2049 		value = (u32)adpt_read_blink_led(pHba);
2050 		if (copy_to_user(argp, &value, sizeof(value))) {
2051 			return -EFAULT;
2052 		}
2053 		break;
2054 		}
2055 	case I2ORESETCMD:
2056 		if(pHba->host)
2057 			spin_lock_irqsave(pHba->host->host_lock, flags);
2058 		adpt_hba_reset(pHba);
2059 		if(pHba->host)
2060 			spin_unlock_irqrestore(pHba->host->host_lock, flags);
2061 		break;
2062 	case I2ORESCANCMD:
2063 		adpt_rescan(pHba);
2064 		break;
2065 	default:
2066 		return -EINVAL;
2067 	}
2068 
2069 	return error;
2070 }
2071 
adpt_unlocked_ioctl(struct file * file,uint cmd,ulong arg)2072 static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg)
2073 {
2074 	struct inode *inode;
2075 	long ret;
2076 
2077 	inode = file_inode(file);
2078 
2079 	mutex_lock(&adpt_mutex);
2080 	ret = adpt_ioctl(inode, file, cmd, arg);
2081 	mutex_unlock(&adpt_mutex);
2082 
2083 	return ret;
2084 }
2085 
2086 #ifdef CONFIG_COMPAT
compat_adpt_ioctl(struct file * file,unsigned int cmd,unsigned long arg)2087 static long compat_adpt_ioctl(struct file *file,
2088 				unsigned int cmd, unsigned long arg)
2089 {
2090 	struct inode *inode;
2091 	long ret;
2092 
2093 	inode = file_inode(file);
2094 
2095 	mutex_lock(&adpt_mutex);
2096 
2097 	switch(cmd) {
2098 		case DPT_SIGNATURE:
2099 		case I2OUSRCMD:
2100 		case DPT_CTRLINFO:
2101 		case DPT_SYSINFO:
2102 		case DPT_BLINKLED:
2103 		case I2ORESETCMD:
2104 		case I2ORESCANCMD:
2105 		case (DPT_TARGET_BUSY & 0xFFFF):
2106 		case DPT_TARGET_BUSY:
2107 			ret = adpt_ioctl(inode, file, cmd, arg);
2108 			break;
2109 		default:
2110 			ret =  -ENOIOCTLCMD;
2111 	}
2112 
2113 	mutex_unlock(&adpt_mutex);
2114 
2115 	return ret;
2116 }
2117 #endif
2118 
adpt_isr(int irq,void * dev_id)2119 static irqreturn_t adpt_isr(int irq, void *dev_id)
2120 {
2121 	struct scsi_cmnd* cmd;
2122 	adpt_hba* pHba = dev_id;
2123 	u32 m;
2124 	void __iomem *reply;
2125 	u32 status=0;
2126 	u32 context;
2127 	ulong flags = 0;
2128 	int handled = 0;
2129 
2130 	if (pHba == NULL){
2131 		printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
2132 		return IRQ_NONE;
2133 	}
2134 	if(pHba->host)
2135 		spin_lock_irqsave(pHba->host->host_lock, flags);
2136 
2137 	while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
2138 		m = readl(pHba->reply_port);
2139 		if(m == EMPTY_QUEUE){
2140 			// Try twice then give up
2141 			rmb();
2142 			m = readl(pHba->reply_port);
2143 			if(m == EMPTY_QUEUE){
2144 				// This really should not happen
2145 				printk(KERN_ERR"dpti: Could not get reply frame\n");
2146 				goto out;
2147 			}
2148 		}
2149 		if (pHba->reply_pool_pa <= m &&
2150 		    m < pHba->reply_pool_pa +
2151 			(pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
2152 			reply = (u8 *)pHba->reply_pool +
2153 						(m - pHba->reply_pool_pa);
2154 		} else {
2155 			/* Ick, we should *never* be here */
2156 			printk(KERN_ERR "dpti: reply frame not from pool\n");
2157 			reply = (u8 *)bus_to_virt(m);
2158 		}
2159 
2160 		if (readl(reply) & MSG_FAIL) {
2161 			u32 old_m = readl(reply+28);
2162 			void __iomem *msg;
2163 			u32 old_context;
2164 			PDEBUG("%s: Failed message\n",pHba->name);
2165 			if(old_m >= 0x100000){
2166 				printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
2167 				writel(m,pHba->reply_port);
2168 				continue;
2169 			}
2170 			// Transaction context is 0 in failed reply frame
2171 			msg = pHba->msg_addr_virt + old_m;
2172 			old_context = readl(msg+12);
2173 			writel(old_context, reply+12);
2174 			adpt_send_nop(pHba, old_m);
2175 		}
2176 		context = readl(reply+8);
2177 		if(context & 0x40000000){ // IOCTL
2178 			void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
2179 			if( p != NULL) {
2180 				memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
2181 			}
2182 			// All IOCTLs will also be post wait
2183 		}
2184 		if(context & 0x80000000){ // Post wait message
2185 			status = readl(reply+16);
2186 			if(status  >> 24){
2187 				status &=  0xffff; /* Get detail status */
2188 			} else {
2189 				status = I2O_POST_WAIT_OK;
2190 			}
2191 			if(!(context & 0x40000000)) {
2192 				cmd = adpt_cmd_from_context(pHba,
2193 							readl(reply+12));
2194 				if(cmd != NULL) {
2195 					printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2196 				}
2197 			}
2198 			adpt_i2o_post_wait_complete(context, status);
2199 		} else { // SCSI message
2200 			cmd = adpt_cmd_from_context (pHba, readl(reply+12));
2201 			if(cmd != NULL){
2202 				scsi_dma_unmap(cmd);
2203 				if(cmd->serial_number != 0) { // If not timedout
2204 					adpt_i2o_to_scsi(reply, cmd);
2205 				}
2206 			}
2207 		}
2208 		writel(m, pHba->reply_port);
2209 		wmb();
2210 		rmb();
2211 	}
2212 	handled = 1;
2213 out:	if(pHba->host)
2214 		spin_unlock_irqrestore(pHba->host->host_lock, flags);
2215 	return IRQ_RETVAL(handled);
2216 }
2217 
adpt_scsi_to_i2o(adpt_hba * pHba,struct scsi_cmnd * cmd,struct adpt_device * d)2218 static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
2219 {
2220 	int i;
2221 	u32 msg[MAX_MESSAGE_SIZE];
2222 	u32* mptr;
2223 	u32* lptr;
2224 	u32 *lenptr;
2225 	int direction;
2226 	int scsidir;
2227 	int nseg;
2228 	u32 len;
2229 	u32 reqlen;
2230 	s32 rcode;
2231 	dma_addr_t addr;
2232 
2233 	memset(msg, 0 , sizeof(msg));
2234 	len = scsi_bufflen(cmd);
2235 	direction = 0x00000000;
2236 
2237 	scsidir = 0x00000000;			// DATA NO XFER
2238 	if(len) {
2239 		/*
2240 		 * Set SCBFlags to indicate if data is being transferred
2241 		 * in or out, or no data transfer
2242 		 * Note:  Do not have to verify index is less than 0 since
2243 		 * cmd->cmnd[0] is an unsigned char
2244 		 */
2245 		switch(cmd->sc_data_direction){
2246 		case DMA_FROM_DEVICE:
2247 			scsidir  =0x40000000;	// DATA IN  (iop<--dev)
2248 			break;
2249 		case DMA_TO_DEVICE:
2250 			direction=0x04000000;	// SGL OUT
2251 			scsidir  =0x80000000;	// DATA OUT (iop-->dev)
2252 			break;
2253 		case DMA_NONE:
2254 			break;
2255 		case DMA_BIDIRECTIONAL:
2256 			scsidir  =0x40000000;	// DATA IN  (iop<--dev)
2257 			// Assume In - and continue;
2258 			break;
2259 		default:
2260 			printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2261 			     pHba->name, cmd->cmnd[0]);
2262 			cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
2263 			cmd->scsi_done(cmd);
2264 			return 	0;
2265 		}
2266 	}
2267 	// msg[0] is set later
2268 	// I2O_CMD_SCSI_EXEC
2269 	msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2270 	msg[2] = 0;
2271 	msg[3] = adpt_cmd_to_context(cmd);  /* Want SCSI control block back */
2272 	// Our cards use the transaction context as the tag for queueing
2273 	// Adaptec/DPT Private stuff
2274 	msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2275 	msg[5] = d->tid;
2276 	/* Direction, disconnect ok | sense data | simple queue , CDBLen */
2277 	// I2O_SCB_FLAG_ENABLE_DISCONNECT |
2278 	// I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
2279 	// I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
2280 	msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2281 
2282 	mptr=msg+7;
2283 
2284 	// Write SCSI command into the message - always 16 byte block
2285 	memset(mptr, 0,  16);
2286 	memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2287 	mptr+=4;
2288 	lenptr=mptr++;		/* Remember me - fill in when we know */
2289 	if (dpt_dma64(pHba)) {
2290 		reqlen = 16;		// SINGLE SGE
2291 		*mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
2292 		*mptr++ = 1 << PAGE_SHIFT;
2293 	} else {
2294 		reqlen = 14;		// SINGLE SGE
2295 	}
2296 	/* Now fill in the SGList and command */
2297 
2298 	nseg = scsi_dma_map(cmd);
2299 	BUG_ON(nseg < 0);
2300 	if (nseg) {
2301 		struct scatterlist *sg;
2302 
2303 		len = 0;
2304 		scsi_for_each_sg(cmd, sg, nseg, i) {
2305 			lptr = mptr;
2306 			*mptr++ = direction|0x10000000|sg_dma_len(sg);
2307 			len+=sg_dma_len(sg);
2308 			addr = sg_dma_address(sg);
2309 			*mptr++ = dma_low(addr);
2310 			if (dpt_dma64(pHba))
2311 				*mptr++ = dma_high(addr);
2312 			/* Make this an end of list */
2313 			if (i == nseg - 1)
2314 				*lptr = direction|0xD0000000|sg_dma_len(sg);
2315 		}
2316 		reqlen = mptr - msg;
2317 		*lenptr = len;
2318 
2319 		if(cmd->underflow && len != cmd->underflow){
2320 			printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2321 				len, cmd->underflow);
2322 		}
2323 	} else {
2324 		*lenptr = len = 0;
2325 		reqlen = 12;
2326 	}
2327 
2328 	/* Stick the headers on */
2329 	msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2330 
2331 	// Send it on it's way
2332 	rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2333 	if (rcode == 0) {
2334 		return 0;
2335 	}
2336 	return rcode;
2337 }
2338 
2339 
adpt_scsi_host_alloc(adpt_hba * pHba,struct scsi_host_template * sht)2340 static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
2341 {
2342 	struct Scsi_Host *host;
2343 
2344 	host = scsi_host_alloc(sht, sizeof(adpt_hba*));
2345 	if (host == NULL) {
2346 		printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
2347 		return -1;
2348 	}
2349 	host->hostdata[0] = (unsigned long)pHba;
2350 	pHba->host = host;
2351 
2352 	host->irq = pHba->pDev->irq;
2353 	/* no IO ports, so don't have to set host->io_port and
2354 	 * host->n_io_port
2355 	 */
2356 	host->io_port = 0;
2357 	host->n_io_port = 0;
2358 				/* see comments in scsi_host.h */
2359 	host->max_id = 16;
2360 	host->max_lun = 256;
2361 	host->max_channel = pHba->top_scsi_channel + 1;
2362 	host->cmd_per_lun = 1;
2363 	host->unique_id = (u32)sys_tbl_pa + pHba->unit;
2364 	host->sg_tablesize = pHba->sg_tablesize;
2365 	host->can_queue = pHba->post_fifo_size;
2366 	host->use_cmd_list = 1;
2367 
2368 	return 0;
2369 }
2370 
2371 
adpt_i2o_to_scsi(void __iomem * reply,struct scsi_cmnd * cmd)2372 static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
2373 {
2374 	adpt_hba* pHba;
2375 	u32 hba_status;
2376 	u32 dev_status;
2377 	u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits
2378 	// I know this would look cleaner if I just read bytes
2379 	// but the model I have been using for all the rest of the
2380 	// io is in 4 byte words - so I keep that model
2381 	u16 detailed_status = readl(reply+16) &0xffff;
2382 	dev_status = (detailed_status & 0xff);
2383 	hba_status = detailed_status >> 8;
2384 
2385 	// calculate resid for sg
2386 	scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20));
2387 
2388 	pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2389 
2390 	cmd->sense_buffer[0] = '\0';  // initialize sense valid flag to false
2391 
2392 	if(!(reply_flags & MSG_FAIL)) {
2393 		switch(detailed_status & I2O_SCSI_DSC_MASK) {
2394 		case I2O_SCSI_DSC_SUCCESS:
2395 			cmd->result = (DID_OK << 16);
2396 			// handle underflow
2397 			if (readl(reply+20) < cmd->underflow) {
2398 				cmd->result = (DID_ERROR <<16);
2399 				printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2400 			}
2401 			break;
2402 		case I2O_SCSI_DSC_REQUEST_ABORTED:
2403 			cmd->result = (DID_ABORT << 16);
2404 			break;
2405 		case I2O_SCSI_DSC_PATH_INVALID:
2406 		case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2407 		case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2408 		case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2409 		case I2O_SCSI_DSC_NO_ADAPTER:
2410 		case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2411 			printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%llu) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2412 				pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2413 			cmd->result = (DID_TIME_OUT << 16);
2414 			break;
2415 		case I2O_SCSI_DSC_ADAPTER_BUSY:
2416 		case I2O_SCSI_DSC_BUS_BUSY:
2417 			cmd->result = (DID_BUS_BUSY << 16);
2418 			break;
2419 		case I2O_SCSI_DSC_SCSI_BUS_RESET:
2420 		case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2421 			cmd->result = (DID_RESET << 16);
2422 			break;
2423 		case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2424 			printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2425 			cmd->result = (DID_PARITY << 16);
2426 			break;
2427 		case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2428 		case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2429 		case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2430 		case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2431 		case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2432 		case I2O_SCSI_DSC_DATA_OVERRUN:
2433 		case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2434 		case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2435 		case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2436 		case I2O_SCSI_DSC_PROVIDE_FAILURE:
2437 		case I2O_SCSI_DSC_REQUEST_TERMINATED:
2438 		case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2439 		case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2440 		case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2441 		case I2O_SCSI_DSC_INVALID_CDB:
2442 		case I2O_SCSI_DSC_LUN_INVALID:
2443 		case I2O_SCSI_DSC_SCSI_TID_INVALID:
2444 		case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2445 		case I2O_SCSI_DSC_NO_NEXUS:
2446 		case I2O_SCSI_DSC_CDB_RECEIVED:
2447 		case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2448 		case I2O_SCSI_DSC_QUEUE_FROZEN:
2449 		case I2O_SCSI_DSC_REQUEST_INVALID:
2450 		default:
2451 			printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2452 				pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2453 			       hba_status, dev_status, cmd->cmnd[0]);
2454 			cmd->result = (DID_ERROR << 16);
2455 			break;
2456 		}
2457 
2458 		// copy over the request sense data if it was a check
2459 		// condition status
2460 		if (dev_status == SAM_STAT_CHECK_CONDITION) {
2461 			u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
2462 			// Copy over the sense data
2463 			memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
2464 			if(cmd->sense_buffer[0] == 0x70 /* class 7 */ &&
2465 			   cmd->sense_buffer[2] == DATA_PROTECT ){
2466 				/* This is to handle an array failed */
2467 				cmd->result = (DID_TIME_OUT << 16);
2468 				printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2469 					pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2470 					hba_status, dev_status, cmd->cmnd[0]);
2471 
2472 			}
2473 		}
2474 	} else {
2475 		/* In this condtion we could not talk to the tid
2476 		 * the card rejected it.  We should signal a retry
2477 		 * for a limitted number of retries.
2478 		 */
2479 		cmd->result = (DID_TIME_OUT << 16);
2480 		printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%llu) tid=%d, cmd=0x%x\n",
2481 			pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2482 			((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2483 	}
2484 
2485 	cmd->result |= (dev_status);
2486 
2487 	if(cmd->scsi_done != NULL){
2488 		cmd->scsi_done(cmd);
2489 	}
2490 	return cmd->result;
2491 }
2492 
2493 
adpt_rescan(adpt_hba * pHba)2494 static s32 adpt_rescan(adpt_hba* pHba)
2495 {
2496 	s32 rcode;
2497 	ulong flags = 0;
2498 
2499 	if(pHba->host)
2500 		spin_lock_irqsave(pHba->host->host_lock, flags);
2501 	if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2502 		goto out;
2503 	if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2504 		goto out;
2505 	rcode = 0;
2506 out:	if(pHba->host)
2507 		spin_unlock_irqrestore(pHba->host->host_lock, flags);
2508 	return rcode;
2509 }
2510 
2511 
adpt_i2o_reparse_lct(adpt_hba * pHba)2512 static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2513 {
2514 	int i;
2515 	int max;
2516 	int tid;
2517 	struct i2o_device *d;
2518 	i2o_lct *lct = pHba->lct;
2519 	u8 bus_no = 0;
2520 	s16 scsi_id;
2521 	u64 scsi_lun;
2522 	u32 buf[10]; // at least 8 u32's
2523 	struct adpt_device* pDev = NULL;
2524 	struct i2o_device* pI2o_dev = NULL;
2525 
2526 	if (lct == NULL) {
2527 		printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2528 		return -1;
2529 	}
2530 
2531 	max = lct->table_size;
2532 	max -= 3;
2533 	max /= 9;
2534 
2535 	// Mark each drive as unscanned
2536 	for (d = pHba->devices; d; d = d->next) {
2537 		pDev =(struct adpt_device*) d->owner;
2538 		if(!pDev){
2539 			continue;
2540 		}
2541 		pDev->state |= DPTI_DEV_UNSCANNED;
2542 	}
2543 
2544 	printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2545 
2546 	for(i=0;i<max;i++) {
2547 		if( lct->lct_entry[i].user_tid != 0xfff){
2548 			continue;
2549 		}
2550 
2551 		if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2552 		    lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2553 		    lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2554 			tid = lct->lct_entry[i].tid;
2555 			if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2556 				printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2557 				continue;
2558 			}
2559 			bus_no = buf[0]>>16;
2560 			if (bus_no >= MAX_CHANNEL) {	/* Something wrong skip it */
2561 				printk(KERN_WARNING
2562 					"%s: Channel number %d out of range\n",
2563 					pHba->name, bus_no);
2564 				continue;
2565 			}
2566 
2567 			scsi_id = buf[1];
2568 			scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
2569 			pDev = pHba->channel[bus_no].device[scsi_id];
2570 			/* da lun */
2571 			while(pDev) {
2572 				if(pDev->scsi_lun == scsi_lun) {
2573 					break;
2574 				}
2575 				pDev = pDev->next_lun;
2576 			}
2577 			if(!pDev ) { // Something new add it
2578 				d = kmalloc(sizeof(struct i2o_device),
2579 					    GFP_ATOMIC);
2580 				if(d==NULL)
2581 				{
2582 					printk(KERN_CRIT "Out of memory for I2O device data.\n");
2583 					return -ENOMEM;
2584 				}
2585 
2586 				d->controller = pHba;
2587 				d->next = NULL;
2588 
2589 				memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2590 
2591 				d->flags = 0;
2592 				adpt_i2o_report_hba_unit(pHba, d);
2593 				adpt_i2o_install_device(pHba, d);
2594 
2595 				pDev = pHba->channel[bus_no].device[scsi_id];
2596 				if( pDev == NULL){
2597 					pDev =
2598 					  kzalloc(sizeof(struct adpt_device),
2599 						  GFP_ATOMIC);
2600 					if(pDev == NULL) {
2601 						return -ENOMEM;
2602 					}
2603 					pHba->channel[bus_no].device[scsi_id] = pDev;
2604 				} else {
2605 					while (pDev->next_lun) {
2606 						pDev = pDev->next_lun;
2607 					}
2608 					pDev = pDev->next_lun =
2609 					  kzalloc(sizeof(struct adpt_device),
2610 						  GFP_ATOMIC);
2611 					if(pDev == NULL) {
2612 						return -ENOMEM;
2613 					}
2614 				}
2615 				pDev->tid = d->lct_data.tid;
2616 				pDev->scsi_channel = bus_no;
2617 				pDev->scsi_id = scsi_id;
2618 				pDev->scsi_lun = scsi_lun;
2619 				pDev->pI2o_dev = d;
2620 				d->owner = pDev;
2621 				pDev->type = (buf[0])&0xff;
2622 				pDev->flags = (buf[0]>>8)&0xff;
2623 				// Too late, SCSI system has made up it's mind, but what the hey ...
2624 				if(scsi_id > pHba->top_scsi_id){
2625 					pHba->top_scsi_id = scsi_id;
2626 				}
2627 				if(scsi_lun > pHba->top_scsi_lun){
2628 					pHba->top_scsi_lun = scsi_lun;
2629 				}
2630 				continue;
2631 			} // end of new i2o device
2632 
2633 			// We found an old device - check it
2634 			while(pDev) {
2635 				if(pDev->scsi_lun == scsi_lun) {
2636 					if(!scsi_device_online(pDev->pScsi_dev)) {
2637 						printk(KERN_WARNING"%s: Setting device (%d,%d,%llu) back online\n",
2638 								pHba->name,bus_no,scsi_id,scsi_lun);
2639 						if (pDev->pScsi_dev) {
2640 							scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
2641 						}
2642 					}
2643 					d = pDev->pI2o_dev;
2644 					if(d->lct_data.tid != tid) { // something changed
2645 						pDev->tid = tid;
2646 						memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2647 						if (pDev->pScsi_dev) {
2648 							pDev->pScsi_dev->changed = TRUE;
2649 							pDev->pScsi_dev->removable = TRUE;
2650 						}
2651 					}
2652 					// Found it - mark it scanned
2653 					pDev->state = DPTI_DEV_ONLINE;
2654 					break;
2655 				}
2656 				pDev = pDev->next_lun;
2657 			}
2658 		}
2659 	}
2660 	for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2661 		pDev =(struct adpt_device*) pI2o_dev->owner;
2662 		if(!pDev){
2663 			continue;
2664 		}
2665 		// Drive offline drives that previously existed but could not be found
2666 		// in the LCT table
2667 		if (pDev->state & DPTI_DEV_UNSCANNED){
2668 			pDev->state = DPTI_DEV_OFFLINE;
2669 			printk(KERN_WARNING"%s: Device (%d,%d,%llu) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2670 			if (pDev->pScsi_dev) {
2671 				scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
2672 			}
2673 		}
2674 	}
2675 	return 0;
2676 }
2677 
adpt_fail_posted_scbs(adpt_hba * pHba)2678 static void adpt_fail_posted_scbs(adpt_hba* pHba)
2679 {
2680 	struct scsi_cmnd* 	cmd = NULL;
2681 	struct scsi_device* 	d = NULL;
2682 
2683 	shost_for_each_device(d, pHba->host) {
2684 		unsigned long flags;
2685 		spin_lock_irqsave(&d->list_lock, flags);
2686 		list_for_each_entry(cmd, &d->cmd_list, list) {
2687 			if(cmd->serial_number == 0){
2688 				continue;
2689 			}
2690 			cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1);
2691 			cmd->scsi_done(cmd);
2692 		}
2693 		spin_unlock_irqrestore(&d->list_lock, flags);
2694 	}
2695 }
2696 
2697 
2698 /*============================================================================
2699  *  Routines from i2o subsystem
2700  *============================================================================
2701  */
2702 
2703 
2704 
2705 /*
2706  *	Bring an I2O controller into HOLD state. See the spec.
2707  */
adpt_i2o_activate_hba(adpt_hba * pHba)2708 static int adpt_i2o_activate_hba(adpt_hba* pHba)
2709 {
2710 	int rcode;
2711 
2712 	if(pHba->initialized ) {
2713 		if (adpt_i2o_status_get(pHba) < 0) {
2714 			if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2715 				printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2716 				return rcode;
2717 			}
2718 			if (adpt_i2o_status_get(pHba) < 0) {
2719 				printk(KERN_INFO "HBA not responding.\n");
2720 				return -1;
2721 			}
2722 		}
2723 
2724 		if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2725 			printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2726 			return -1;
2727 		}
2728 
2729 		if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2730 		    pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2731 		    pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2732 		    pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2733 			adpt_i2o_reset_hba(pHba);
2734 			if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2735 				printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2736 				return -1;
2737 			}
2738 		}
2739 	} else {
2740 		if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2741 			printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2742 			return rcode;
2743 		}
2744 
2745 	}
2746 
2747 	if (adpt_i2o_init_outbound_q(pHba) < 0) {
2748 		return -1;
2749 	}
2750 
2751 	/* In HOLD state */
2752 
2753 	if (adpt_i2o_hrt_get(pHba) < 0) {
2754 		return -1;
2755 	}
2756 
2757 	return 0;
2758 }
2759 
2760 /*
2761  *	Bring a controller online into OPERATIONAL state.
2762  */
2763 
adpt_i2o_online_hba(adpt_hba * pHba)2764 static int adpt_i2o_online_hba(adpt_hba* pHba)
2765 {
2766 	if (adpt_i2o_systab_send(pHba) < 0)
2767 		return -1;
2768 	/* In READY state */
2769 
2770 	if (adpt_i2o_enable_hba(pHba) < 0)
2771 		return -1;
2772 
2773 	/* In OPERATIONAL state  */
2774 	return 0;
2775 }
2776 
adpt_send_nop(adpt_hba * pHba,u32 m)2777 static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2778 {
2779 	u32 __iomem *msg;
2780 	ulong timeout = jiffies + 5*HZ;
2781 
2782 	while(m == EMPTY_QUEUE){
2783 		rmb();
2784 		m = readl(pHba->post_port);
2785 		if(m != EMPTY_QUEUE){
2786 			break;
2787 		}
2788 		if(time_after(jiffies,timeout)){
2789 			printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2790 			return 2;
2791 		}
2792 		schedule_timeout_uninterruptible(1);
2793 	}
2794 	msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2795 	writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2796 	writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2797 	writel( 0,&msg[2]);
2798 	wmb();
2799 
2800 	writel(m, pHba->post_port);
2801 	wmb();
2802 	return 0;
2803 }
2804 
adpt_i2o_init_outbound_q(adpt_hba * pHba)2805 static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2806 {
2807 	u8 *status;
2808 	dma_addr_t addr;
2809 	u32 __iomem *msg = NULL;
2810 	int i;
2811 	ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
2812 	u32 m;
2813 
2814 	do {
2815 		rmb();
2816 		m = readl(pHba->post_port);
2817 		if (m != EMPTY_QUEUE) {
2818 			break;
2819 		}
2820 
2821 		if(time_after(jiffies,timeout)){
2822 			printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2823 			return -ETIMEDOUT;
2824 		}
2825 		schedule_timeout_uninterruptible(1);
2826 	} while(m == EMPTY_QUEUE);
2827 
2828 	msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2829 
2830 	status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
2831 	if (!status) {
2832 		adpt_send_nop(pHba, m);
2833 		printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2834 			pHba->name);
2835 		return -ENOMEM;
2836 	}
2837 	memset(status, 0, 4);
2838 
2839 	writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2840 	writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2841 	writel(0, &msg[2]);
2842 	writel(0x0106, &msg[3]);	/* Transaction context */
2843 	writel(4096, &msg[4]);		/* Host page frame size */
2844 	writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]);	/* Outbound msg frame size and Initcode */
2845 	writel(0xD0000004, &msg[6]);		/* Simple SG LE, EOB */
2846 	writel((u32)addr, &msg[7]);
2847 
2848 	writel(m, pHba->post_port);
2849 	wmb();
2850 
2851 	// Wait for the reply status to come back
2852 	do {
2853 		if (*status) {
2854 			if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
2855 				break;
2856 			}
2857 		}
2858 		rmb();
2859 		if(time_after(jiffies,timeout)){
2860 			printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
2861 			/* We lose 4 bytes of "status" here, but we
2862 			   cannot free these because controller may
2863 			   awake and corrupt those bytes at any time */
2864 			/* dma_free_coherent(&pHba->pDev->dev, 4, status, addr); */
2865 			return -ETIMEDOUT;
2866 		}
2867 		schedule_timeout_uninterruptible(1);
2868 	} while (1);
2869 
2870 	// If the command was successful, fill the fifo with our reply
2871 	// message packets
2872 	if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
2873 		dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2874 		return -2;
2875 	}
2876 	dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2877 
2878 	if(pHba->reply_pool != NULL) {
2879 		dma_free_coherent(&pHba->pDev->dev,
2880 			pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2881 			pHba->reply_pool, pHba->reply_pool_pa);
2882 	}
2883 
2884 	pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
2885 				pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2886 				&pHba->reply_pool_pa, GFP_KERNEL);
2887 	if (!pHba->reply_pool) {
2888 		printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
2889 		return -ENOMEM;
2890 	}
2891 	memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
2892 
2893 	for(i = 0; i < pHba->reply_fifo_size; i++) {
2894 		writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
2895 			pHba->reply_port);
2896 		wmb();
2897 	}
2898 	adpt_i2o_status_get(pHba);
2899 	return 0;
2900 }
2901 
2902 
2903 /*
2904  * I2O System Table.  Contains information about
2905  * all the IOPs in the system.  Used to inform IOPs
2906  * about each other's existence.
2907  *
2908  * sys_tbl_ver is the CurrentChangeIndicator that is
2909  * used by IOPs to track changes.
2910  */
2911 
2912 
2913 
adpt_i2o_status_get(adpt_hba * pHba)2914 static s32 adpt_i2o_status_get(adpt_hba* pHba)
2915 {
2916 	ulong timeout;
2917 	u32 m;
2918 	u32 __iomem *msg;
2919 	u8 *status_block=NULL;
2920 
2921 	if(pHba->status_block == NULL) {
2922 		pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
2923 					sizeof(i2o_status_block),
2924 					&pHba->status_block_pa, GFP_KERNEL);
2925 		if(pHba->status_block == NULL) {
2926 			printk(KERN_ERR
2927 			"dpti%d: Get Status Block failed; Out of memory. \n",
2928 			pHba->unit);
2929 			return -ENOMEM;
2930 		}
2931 	}
2932 	memset(pHba->status_block, 0, sizeof(i2o_status_block));
2933 	status_block = (u8*)(pHba->status_block);
2934 	timeout = jiffies+TMOUT_GETSTATUS*HZ;
2935 	do {
2936 		rmb();
2937 		m = readl(pHba->post_port);
2938 		if (m != EMPTY_QUEUE) {
2939 			break;
2940 		}
2941 		if(time_after(jiffies,timeout)){
2942 			printk(KERN_ERR "%s: Timeout waiting for message !\n",
2943 					pHba->name);
2944 			return -ETIMEDOUT;
2945 		}
2946 		schedule_timeout_uninterruptible(1);
2947 	} while(m==EMPTY_QUEUE);
2948 
2949 
2950 	msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2951 
2952 	writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
2953 	writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
2954 	writel(1, &msg[2]);
2955 	writel(0, &msg[3]);
2956 	writel(0, &msg[4]);
2957 	writel(0, &msg[5]);
2958 	writel( dma_low(pHba->status_block_pa), &msg[6]);
2959 	writel( dma_high(pHba->status_block_pa), &msg[7]);
2960 	writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
2961 
2962 	//post message
2963 	writel(m, pHba->post_port);
2964 	wmb();
2965 
2966 	while(status_block[87]!=0xff){
2967 		if(time_after(jiffies,timeout)){
2968 			printk(KERN_ERR"dpti%d: Get status timeout.\n",
2969 				pHba->unit);
2970 			return -ETIMEDOUT;
2971 		}
2972 		rmb();
2973 		schedule_timeout_uninterruptible(1);
2974 	}
2975 
2976 	// Set up our number of outbound and inbound messages
2977 	pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
2978 	if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
2979 		pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
2980 	}
2981 
2982 	pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
2983 	if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
2984 		pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
2985 	}
2986 
2987 	// Calculate the Scatter Gather list size
2988 	if (dpt_dma64(pHba)) {
2989 		pHba->sg_tablesize
2990 		  = ((pHba->status_block->inbound_frame_size * 4
2991 		  - 14 * sizeof(u32))
2992 		  / (sizeof(struct sg_simple_element) + sizeof(u32)));
2993 	} else {
2994 		pHba->sg_tablesize
2995 		  = ((pHba->status_block->inbound_frame_size * 4
2996 		  - 12 * sizeof(u32))
2997 		  / sizeof(struct sg_simple_element));
2998 	}
2999 	if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
3000 		pHba->sg_tablesize = SG_LIST_ELEMENTS;
3001 	}
3002 
3003 
3004 #ifdef DEBUG
3005 	printk("dpti%d: State = ",pHba->unit);
3006 	switch(pHba->status_block->iop_state) {
3007 		case 0x01:
3008 			printk("INIT\n");
3009 			break;
3010 		case 0x02:
3011 			printk("RESET\n");
3012 			break;
3013 		case 0x04:
3014 			printk("HOLD\n");
3015 			break;
3016 		case 0x05:
3017 			printk("READY\n");
3018 			break;
3019 		case 0x08:
3020 			printk("OPERATIONAL\n");
3021 			break;
3022 		case 0x10:
3023 			printk("FAILED\n");
3024 			break;
3025 		case 0x11:
3026 			printk("FAULTED\n");
3027 			break;
3028 		default:
3029 			printk("%x (unknown!!)\n",pHba->status_block->iop_state);
3030 	}
3031 #endif
3032 	return 0;
3033 }
3034 
3035 /*
3036  * Get the IOP's Logical Configuration Table
3037  */
adpt_i2o_lct_get(adpt_hba * pHba)3038 static int adpt_i2o_lct_get(adpt_hba* pHba)
3039 {
3040 	u32 msg[8];
3041 	int ret;
3042 	u32 buf[16];
3043 
3044 	if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
3045 		pHba->lct_size = pHba->status_block->expected_lct_size;
3046 	}
3047 	do {
3048 		if (pHba->lct == NULL) {
3049 			pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
3050 					pHba->lct_size, &pHba->lct_pa,
3051 					GFP_ATOMIC);
3052 			if(pHba->lct == NULL) {
3053 				printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
3054 					pHba->name);
3055 				return -ENOMEM;
3056 			}
3057 		}
3058 		memset(pHba->lct, 0, pHba->lct_size);
3059 
3060 		msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
3061 		msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
3062 		msg[2] = 0;
3063 		msg[3] = 0;
3064 		msg[4] = 0xFFFFFFFF;	/* All devices */
3065 		msg[5] = 0x00000000;	/* Report now */
3066 		msg[6] = 0xD0000000|pHba->lct_size;
3067 		msg[7] = (u32)pHba->lct_pa;
3068 
3069 		if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
3070 			printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
3071 				pHba->name, ret);
3072 			printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
3073 			return ret;
3074 		}
3075 
3076 		if ((pHba->lct->table_size << 2) > pHba->lct_size) {
3077 			pHba->lct_size = pHba->lct->table_size << 2;
3078 			dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
3079 					pHba->lct, pHba->lct_pa);
3080 			pHba->lct = NULL;
3081 		}
3082 	} while (pHba->lct == NULL);
3083 
3084 	PDEBUG("%s: Hardware resource table read.\n", pHba->name);
3085 
3086 
3087 	// I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
3088 	if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
3089 		pHba->FwDebugBufferSize = buf[1];
3090 		pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
3091 						pHba->FwDebugBufferSize);
3092 		if (pHba->FwDebugBuffer_P) {
3093 			pHba->FwDebugFlags_P     = pHba->FwDebugBuffer_P +
3094 							FW_DEBUG_FLAGS_OFFSET;
3095 			pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
3096 							FW_DEBUG_BLED_OFFSET;
3097 			pHba->FwDebugBLEDflag_P  = pHba->FwDebugBLEDvalue_P + 1;
3098 			pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
3099 						FW_DEBUG_STR_LENGTH_OFFSET;
3100 			pHba->FwDebugBuffer_P += buf[2];
3101 			pHba->FwDebugFlags = 0;
3102 		}
3103 	}
3104 
3105 	return 0;
3106 }
3107 
adpt_i2o_build_sys_table(void)3108 static int adpt_i2o_build_sys_table(void)
3109 {
3110 	adpt_hba* pHba = hba_chain;
3111 	int count = 0;
3112 
3113 	if (sys_tbl)
3114 		dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
3115 					sys_tbl, sys_tbl_pa);
3116 
3117 	sys_tbl_len = sizeof(struct i2o_sys_tbl) +	// Header + IOPs
3118 				(hba_count) * sizeof(struct i2o_sys_tbl_entry);
3119 
3120 	sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
3121 				sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
3122 	if (!sys_tbl) {
3123 		printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
3124 		return -ENOMEM;
3125 	}
3126 	memset(sys_tbl, 0, sys_tbl_len);
3127 
3128 	sys_tbl->num_entries = hba_count;
3129 	sys_tbl->version = I2OVERSION;
3130 	sys_tbl->change_ind = sys_tbl_ind++;
3131 
3132 	for(pHba = hba_chain; pHba; pHba = pHba->next) {
3133 		u64 addr;
3134 		// Get updated Status Block so we have the latest information
3135 		if (adpt_i2o_status_get(pHba)) {
3136 			sys_tbl->num_entries--;
3137 			continue; // try next one
3138 		}
3139 
3140 		sys_tbl->iops[count].org_id = pHba->status_block->org_id;
3141 		sys_tbl->iops[count].iop_id = pHba->unit + 2;
3142 		sys_tbl->iops[count].seg_num = 0;
3143 		sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
3144 		sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
3145 		sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
3146 		sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
3147 		sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
3148 		sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
3149 		addr = pHba->base_addr_phys + 0x40;
3150 		sys_tbl->iops[count].inbound_low = dma_low(addr);
3151 		sys_tbl->iops[count].inbound_high = dma_high(addr);
3152 
3153 		count++;
3154 	}
3155 
3156 #ifdef DEBUG
3157 {
3158 	u32 *table = (u32*)sys_tbl;
3159 	printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
3160 	for(count = 0; count < (sys_tbl_len >>2); count++) {
3161 		printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
3162 			count, table[count]);
3163 	}
3164 }
3165 #endif
3166 
3167 	return 0;
3168 }
3169 
3170 
3171 /*
3172  *	 Dump the information block associated with a given unit (TID)
3173  */
3174 
adpt_i2o_report_hba_unit(adpt_hba * pHba,struct i2o_device * d)3175 static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
3176 {
3177 	char buf[64];
3178 	int unit = d->lct_data.tid;
3179 
3180 	printk(KERN_INFO "TID %3.3d ", unit);
3181 
3182 	if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
3183 	{
3184 		buf[16]=0;
3185 		printk(" Vendor: %-12.12s", buf);
3186 	}
3187 	if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
3188 	{
3189 		buf[16]=0;
3190 		printk(" Device: %-12.12s", buf);
3191 	}
3192 	if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
3193 	{
3194 		buf[8]=0;
3195 		printk(" Rev: %-12.12s\n", buf);
3196 	}
3197 #ifdef DEBUG
3198 	 printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
3199 	 printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
3200 	 printk(KERN_INFO "\tFlags: ");
3201 
3202 	 if(d->lct_data.device_flags&(1<<0))
3203 		  printk("C");	     // ConfigDialog requested
3204 	 if(d->lct_data.device_flags&(1<<1))
3205 		  printk("U");	     // Multi-user capable
3206 	 if(!(d->lct_data.device_flags&(1<<4)))
3207 		  printk("P");	     // Peer service enabled!
3208 	 if(!(d->lct_data.device_flags&(1<<5)))
3209 		  printk("M");	     // Mgmt service enabled!
3210 	 printk("\n");
3211 #endif
3212 }
3213 
3214 #ifdef DEBUG
3215 /*
3216  *	Do i2o class name lookup
3217  */
adpt_i2o_get_class_name(int class)3218 static const char *adpt_i2o_get_class_name(int class)
3219 {
3220 	int idx = 16;
3221 	static char *i2o_class_name[] = {
3222 		"Executive",
3223 		"Device Driver Module",
3224 		"Block Device",
3225 		"Tape Device",
3226 		"LAN Interface",
3227 		"WAN Interface",
3228 		"Fibre Channel Port",
3229 		"Fibre Channel Device",
3230 		"SCSI Device",
3231 		"ATE Port",
3232 		"ATE Device",
3233 		"Floppy Controller",
3234 		"Floppy Device",
3235 		"Secondary Bus Port",
3236 		"Peer Transport Agent",
3237 		"Peer Transport",
3238 		"Unknown"
3239 	};
3240 
3241 	switch(class&0xFFF) {
3242 	case I2O_CLASS_EXECUTIVE:
3243 		idx = 0; break;
3244 	case I2O_CLASS_DDM:
3245 		idx = 1; break;
3246 	case I2O_CLASS_RANDOM_BLOCK_STORAGE:
3247 		idx = 2; break;
3248 	case I2O_CLASS_SEQUENTIAL_STORAGE:
3249 		idx = 3; break;
3250 	case I2O_CLASS_LAN:
3251 		idx = 4; break;
3252 	case I2O_CLASS_WAN:
3253 		idx = 5; break;
3254 	case I2O_CLASS_FIBRE_CHANNEL_PORT:
3255 		idx = 6; break;
3256 	case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
3257 		idx = 7; break;
3258 	case I2O_CLASS_SCSI_PERIPHERAL:
3259 		idx = 8; break;
3260 	case I2O_CLASS_ATE_PORT:
3261 		idx = 9; break;
3262 	case I2O_CLASS_ATE_PERIPHERAL:
3263 		idx = 10; break;
3264 	case I2O_CLASS_FLOPPY_CONTROLLER:
3265 		idx = 11; break;
3266 	case I2O_CLASS_FLOPPY_DEVICE:
3267 		idx = 12; break;
3268 	case I2O_CLASS_BUS_ADAPTER_PORT:
3269 		idx = 13; break;
3270 	case I2O_CLASS_PEER_TRANSPORT_AGENT:
3271 		idx = 14; break;
3272 	case I2O_CLASS_PEER_TRANSPORT:
3273 		idx = 15; break;
3274 	}
3275 	return i2o_class_name[idx];
3276 }
3277 #endif
3278 
3279 
adpt_i2o_hrt_get(adpt_hba * pHba)3280 static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3281 {
3282 	u32 msg[6];
3283 	int ret, size = sizeof(i2o_hrt);
3284 
3285 	do {
3286 		if (pHba->hrt == NULL) {
3287 			pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
3288 					size, &pHba->hrt_pa, GFP_KERNEL);
3289 			if (pHba->hrt == NULL) {
3290 				printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3291 				return -ENOMEM;
3292 			}
3293 		}
3294 
3295 		msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3296 		msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3297 		msg[2]= 0;
3298 		msg[3]= 0;
3299 		msg[4]= (0xD0000000 | size);    /* Simple transaction */
3300 		msg[5]= (u32)pHba->hrt_pa;	/* Dump it here */
3301 
3302 		if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3303 			printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3304 			return ret;
3305 		}
3306 
3307 		if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
3308 			int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3309 			dma_free_coherent(&pHba->pDev->dev, size,
3310 				pHba->hrt, pHba->hrt_pa);
3311 			size = newsize;
3312 			pHba->hrt = NULL;
3313 		}
3314 	} while(pHba->hrt == NULL);
3315 	return 0;
3316 }
3317 
3318 /*
3319  *	 Query one scalar group value or a whole scalar group.
3320  */
adpt_i2o_query_scalar(adpt_hba * pHba,int tid,int group,int field,void * buf,int buflen)3321 static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3322 			int group, int field, void *buf, int buflen)
3323 {
3324 	u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
3325 	u8 *opblk_va;
3326 	dma_addr_t opblk_pa;
3327 	u8 *resblk_va;
3328 	dma_addr_t resblk_pa;
3329 
3330 	int size;
3331 
3332 	/* 8 bytes for header */
3333 	resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3334 			sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
3335 	if (resblk_va == NULL) {
3336 		printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3337 		return -ENOMEM;
3338 	}
3339 
3340 	opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3341 			sizeof(opblk), &opblk_pa, GFP_KERNEL);
3342 	if (opblk_va == NULL) {
3343 		dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3344 			resblk_va, resblk_pa);
3345 		printk(KERN_CRIT "%s: query operation failed; Out of memory.\n",
3346 			pHba->name);
3347 		return -ENOMEM;
3348 	}
3349 	if (field == -1)  		/* whole group */
3350 			opblk[4] = -1;
3351 
3352 	memcpy(opblk_va, opblk, sizeof(opblk));
3353 	size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
3354 		opblk_va, opblk_pa, sizeof(opblk),
3355 		resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
3356 	dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
3357 	if (size == -ETIME) {
3358 		dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3359 							resblk_va, resblk_pa);
3360 		printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3361 		return -ETIME;
3362 	} else if (size == -EINTR) {
3363 		dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3364 							resblk_va, resblk_pa);
3365 		printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3366 		return -EINTR;
3367 	}
3368 
3369 	memcpy(buf, resblk_va+8, buflen);  /* cut off header */
3370 
3371 	dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3372 						resblk_va, resblk_pa);
3373 	if (size < 0)
3374 		return size;
3375 
3376 	return buflen;
3377 }
3378 
3379 
3380 /*	Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
3381  *
3382  *	This function can be used for all UtilParamsGet/Set operations.
3383  *	The OperationBlock is given in opblk-buffer,
3384  *	and results are returned in resblk-buffer.
3385  *	Note that the minimum sized resblk is 8 bytes and contains
3386  *	ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
3387  */
adpt_i2o_issue_params(int cmd,adpt_hba * pHba,int tid,void * opblk_va,dma_addr_t opblk_pa,int oplen,void * resblk_va,dma_addr_t resblk_pa,int reslen)3388 static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
3389 		  void *opblk_va,  dma_addr_t opblk_pa, int oplen,
3390 		void *resblk_va, dma_addr_t resblk_pa, int reslen)
3391 {
3392 	u32 msg[9];
3393 	u32 *res = (u32 *)resblk_va;
3394 	int wait_status;
3395 
3396 	msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3397 	msg[1] = cmd << 24 | HOST_TID << 12 | tid;
3398 	msg[2] = 0;
3399 	msg[3] = 0;
3400 	msg[4] = 0;
3401 	msg[5] = 0x54000000 | oplen;	/* OperationBlock */
3402 	msg[6] = (u32)opblk_pa;
3403 	msg[7] = 0xD0000000 | reslen;	/* ResultBlock */
3404 	msg[8] = (u32)resblk_pa;
3405 
3406 	if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
3407 		printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
3408    		return wait_status; 	/* -DetailedStatus */
3409 	}
3410 
3411 	if (res[1]&0x00FF0000) { 	/* BlockStatus != SUCCESS */
3412 		printk(KERN_WARNING "%s: %s - Error:\n  ErrorInfoSize = 0x%02x, "
3413 			"BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3414 			pHba->name,
3415 			(cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3416 							 : "PARAMS_GET",
3417 			res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3418 		return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
3419 	}
3420 
3421 	 return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
3422 }
3423 
3424 
adpt_i2o_quiesce_hba(adpt_hba * pHba)3425 static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3426 {
3427 	u32 msg[4];
3428 	int ret;
3429 
3430 	adpt_i2o_status_get(pHba);
3431 
3432 	/* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
3433 
3434 	if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3435    	   (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3436 		return 0;
3437 	}
3438 
3439 	msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3440 	msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3441 	msg[2] = 0;
3442 	msg[3] = 0;
3443 
3444 	if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3445 		printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3446 				pHba->unit, -ret);
3447 	} else {
3448 		printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3449 	}
3450 
3451 	adpt_i2o_status_get(pHba);
3452 	return ret;
3453 }
3454 
3455 
3456 /*
3457  * Enable IOP. Allows the IOP to resume external operations.
3458  */
adpt_i2o_enable_hba(adpt_hba * pHba)3459 static int adpt_i2o_enable_hba(adpt_hba* pHba)
3460 {
3461 	u32 msg[4];
3462 	int ret;
3463 
3464 	adpt_i2o_status_get(pHba);
3465 	if(!pHba->status_block){
3466 		return -ENOMEM;
3467 	}
3468 	/* Enable only allowed on READY state */
3469 	if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3470 		return 0;
3471 
3472 	if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3473 		return -EINVAL;
3474 
3475 	msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3476 	msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3477 	msg[2]= 0;
3478 	msg[3]= 0;
3479 
3480 	if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3481 		printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
3482 			pHba->name, ret);
3483 	} else {
3484 		PDEBUG("%s: Enabled.\n", pHba->name);
3485 	}
3486 
3487 	adpt_i2o_status_get(pHba);
3488 	return ret;
3489 }
3490 
3491 
adpt_i2o_systab_send(adpt_hba * pHba)3492 static int adpt_i2o_systab_send(adpt_hba* pHba)
3493 {
3494 	 u32 msg[12];
3495 	 int ret;
3496 
3497 	msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3498 	msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3499 	msg[2] = 0;
3500 	msg[3] = 0;
3501 	msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
3502 	msg[5] = 0;				   /* Segment 0 */
3503 
3504 	/*
3505 	 * Provide three SGL-elements:
3506 	 * System table (SysTab), Private memory space declaration and
3507 	 * Private i/o space declaration
3508 	 */
3509 	msg[6] = 0x54000000 | sys_tbl_len;
3510 	msg[7] = (u32)sys_tbl_pa;
3511 	msg[8] = 0x54000000 | 0;
3512 	msg[9] = 0;
3513 	msg[10] = 0xD4000000 | 0;
3514 	msg[11] = 0;
3515 
3516 	if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3517 		printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
3518 			pHba->name, ret);
3519 	}
3520 #ifdef DEBUG
3521 	else {
3522 		PINFO("%s: SysTab set.\n", pHba->name);
3523 	}
3524 #endif
3525 
3526 	return ret;
3527  }
3528 
3529 
3530 /*============================================================================
3531  *
3532  *============================================================================
3533  */
3534 
3535 
3536 #ifdef UARTDELAY
3537 
adpt_delay(int millisec)3538 static static void adpt_delay(int millisec)
3539 {
3540 	int i;
3541 	for (i = 0; i < millisec; i++) {
3542 		udelay(1000);	/* delay for one millisecond */
3543 	}
3544 }
3545 
3546 #endif
3547 
3548 static struct scsi_host_template driver_template = {
3549 	.module			= THIS_MODULE,
3550 	.name			= "dpt_i2o",
3551 	.proc_name		= "dpt_i2o",
3552 	.show_info		= adpt_show_info,
3553 	.info			= adpt_info,
3554 	.queuecommand		= adpt_queue,
3555 	.eh_abort_handler	= adpt_abort,
3556 	.eh_device_reset_handler = adpt_device_reset,
3557 	.eh_bus_reset_handler	= adpt_bus_reset,
3558 	.eh_host_reset_handler	= adpt_reset,
3559 	.bios_param		= adpt_bios_param,
3560 	.slave_configure	= adpt_slave_configure,
3561 	.can_queue		= MAX_TO_IOP_MESSAGES,
3562 	.this_id		= 7,
3563 	.use_clustering		= ENABLE_CLUSTERING,
3564 };
3565 
adpt_init(void)3566 static int __init adpt_init(void)
3567 {
3568 	int		error;
3569 	adpt_hba	*pHba, *next;
3570 
3571 	printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
3572 
3573 	error = adpt_detect(&driver_template);
3574 	if (error < 0)
3575 		return error;
3576 	if (hba_chain == NULL)
3577 		return -ENODEV;
3578 
3579 	for (pHba = hba_chain; pHba; pHba = pHba->next) {
3580 		error = scsi_add_host(pHba->host, &pHba->pDev->dev);
3581 		if (error)
3582 			goto fail;
3583 		scsi_scan_host(pHba->host);
3584 	}
3585 	return 0;
3586 fail:
3587 	for (pHba = hba_chain; pHba; pHba = next) {
3588 		next = pHba->next;
3589 		scsi_remove_host(pHba->host);
3590 	}
3591 	return error;
3592 }
3593 
adpt_exit(void)3594 static void __exit adpt_exit(void)
3595 {
3596 	adpt_hba	*pHba, *next;
3597 
3598 	for (pHba = hba_chain; pHba; pHba = pHba->next)
3599 		scsi_remove_host(pHba->host);
3600 	for (pHba = hba_chain; pHba; pHba = next) {
3601 		next = pHba->next;
3602 		adpt_release(pHba->host);
3603 	}
3604 }
3605 
3606 module_init(adpt_init);
3607 module_exit(adpt_exit);
3608 
3609 MODULE_LICENSE("GPL");
3610