1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /***************************************************************************
3 dpti.c - description
4 -------------------
5 begin : Thu Sep 7 2000
6 copyright : (C) 2000 by Adaptec
7
8 July 30, 2001 First version being submitted
9 for inclusion in the kernel. V2.4
10
11 See Documentation/scsi/dpti.txt for history, notes, license info
12 and credits
13 ***************************************************************************/
14
15 /***************************************************************************
16 * *
17 * *
18 ***************************************************************************/
19 /***************************************************************************
20 * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
21 - Support 2.6 kernel and DMA-mapping
22 - ioctl fix for raid tools
23 - use schedule_timeout in long long loop
24 **************************************************************************/
25
26 /*#define DEBUG 1 */
27 /*#define UARTDELAY 1 */
28
29 #include <linux/module.h>
30
31 MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
32 MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
33
34 ////////////////////////////////////////////////////////////////
35
36 #include <linux/ioctl.h> /* For SCSI-Passthrough */
37 #include <linux/uaccess.h>
38
39 #include <linux/stat.h>
40 #include <linux/slab.h> /* for kmalloc() */
41 #include <linux/pci.h> /* for PCI support */
42 #include <linux/proc_fs.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h> /* for udelay */
45 #include <linux/interrupt.h>
46 #include <linux/kernel.h> /* for printk */
47 #include <linux/sched.h>
48 #include <linux/reboot.h>
49 #include <linux/spinlock.h>
50 #include <linux/dma-mapping.h>
51
52 #include <linux/timer.h>
53 #include <linux/string.h>
54 #include <linux/ioport.h>
55 #include <linux/mutex.h>
56
57 #include <asm/processor.h> /* for boot_cpu_data */
58 #include <asm/pgtable.h>
59 #include <asm/io.h>
60
61 #include <scsi/scsi.h>
62 #include <scsi/scsi_cmnd.h>
63 #include <scsi/scsi_device.h>
64 #include <scsi/scsi_host.h>
65 #include <scsi/scsi_tcq.h>
66
67 #include "dpt/dptsig.h"
68 #include "dpti.h"
69
70 /*============================================================================
71 * Create a binary signature - this is read by dptsig
72 * Needed for our management apps
73 *============================================================================
74 */
75 static DEFINE_MUTEX(adpt_mutex);
76 static dpt_sig_S DPTI_sig = {
77 {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
78 #ifdef __i386__
79 PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
80 #elif defined(__ia64__)
81 PROC_INTEL, PROC_IA64,
82 #elif defined(__sparc__)
83 PROC_ULTRASPARC, PROC_ULTRASPARC,
84 #elif defined(__alpha__)
85 PROC_ALPHA, PROC_ALPHA,
86 #else
87 (-1),(-1),
88 #endif
89 FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
90 ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
91 DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
92 };
93
94
95
96
97 /*============================================================================
98 * Globals
99 *============================================================================
100 */
101
102 static DEFINE_MUTEX(adpt_configuration_lock);
103
104 static struct i2o_sys_tbl *sys_tbl;
105 static dma_addr_t sys_tbl_pa;
106 static int sys_tbl_ind;
107 static int sys_tbl_len;
108
109 static adpt_hba* hba_chain = NULL;
110 static int hba_count = 0;
111
112 static struct class *adpt_sysfs_class;
113
114 static long adpt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
115 #ifdef CONFIG_COMPAT
116 static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
117 #endif
118
119 static const struct file_operations adpt_fops = {
120 .unlocked_ioctl = adpt_unlocked_ioctl,
121 .open = adpt_open,
122 .release = adpt_close,
123 #ifdef CONFIG_COMPAT
124 .compat_ioctl = compat_adpt_ioctl,
125 #endif
126 .llseek = noop_llseek,
127 };
128
129 /* Structures and definitions for synchronous message posting.
130 * See adpt_i2o_post_wait() for description
131 * */
132 struct adpt_i2o_post_wait_data
133 {
134 int status;
135 u32 id;
136 adpt_wait_queue_head_t *wq;
137 struct adpt_i2o_post_wait_data *next;
138 };
139
140 static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
141 static u32 adpt_post_wait_id = 0;
142 static DEFINE_SPINLOCK(adpt_post_wait_lock);
143
144
145 /*============================================================================
146 * Functions
147 *============================================================================
148 */
149
dpt_dma64(adpt_hba * pHba)150 static inline int dpt_dma64(adpt_hba *pHba)
151 {
152 return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
153 }
154
dma_high(dma_addr_t addr)155 static inline u32 dma_high(dma_addr_t addr)
156 {
157 return upper_32_bits(addr);
158 }
159
dma_low(dma_addr_t addr)160 static inline u32 dma_low(dma_addr_t addr)
161 {
162 return (u32)addr;
163 }
164
adpt_read_blink_led(adpt_hba * host)165 static u8 adpt_read_blink_led(adpt_hba* host)
166 {
167 if (host->FwDebugBLEDflag_P) {
168 if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
169 return readb(host->FwDebugBLEDvalue_P);
170 }
171 }
172 return 0;
173 }
174
175 /*============================================================================
176 * Scsi host template interface functions
177 *============================================================================
178 */
179
180 #ifdef MODULE
181 static struct pci_device_id dptids[] = {
182 { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
183 { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
184 { 0, }
185 };
186 #endif
187
188 MODULE_DEVICE_TABLE(pci,dptids);
189
adpt_detect(struct scsi_host_template * sht)190 static int adpt_detect(struct scsi_host_template* sht)
191 {
192 struct pci_dev *pDev = NULL;
193 adpt_hba *pHba;
194 adpt_hba *next;
195
196 PINFO("Detecting Adaptec I2O RAID controllers...\n");
197
198 /* search for all Adatpec I2O RAID cards */
199 while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
200 if(pDev->device == PCI_DPT_DEVICE_ID ||
201 pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
202 if(adpt_install_hba(sht, pDev) ){
203 PERROR("Could not Init an I2O RAID device\n");
204 PERROR("Will not try to detect others.\n");
205 return hba_count-1;
206 }
207 pci_dev_get(pDev);
208 }
209 }
210
211 /* In INIT state, Activate IOPs */
212 for (pHba = hba_chain; pHba; pHba = next) {
213 next = pHba->next;
214 // Activate does get status , init outbound, and get hrt
215 if (adpt_i2o_activate_hba(pHba) < 0) {
216 adpt_i2o_delete_hba(pHba);
217 }
218 }
219
220
221 /* Active IOPs in HOLD state */
222
223 rebuild_sys_tab:
224 if (hba_chain == NULL)
225 return 0;
226
227 /*
228 * If build_sys_table fails, we kill everything and bail
229 * as we can't init the IOPs w/o a system table
230 */
231 if (adpt_i2o_build_sys_table() < 0) {
232 adpt_i2o_sys_shutdown();
233 return 0;
234 }
235
236 PDEBUG("HBA's in HOLD state\n");
237
238 /* If IOP don't get online, we need to rebuild the System table */
239 for (pHba = hba_chain; pHba; pHba = pHba->next) {
240 if (adpt_i2o_online_hba(pHba) < 0) {
241 adpt_i2o_delete_hba(pHba);
242 goto rebuild_sys_tab;
243 }
244 }
245
246 /* Active IOPs now in OPERATIONAL state */
247 PDEBUG("HBA's in OPERATIONAL state\n");
248
249 printk("dpti: If you have a lot of devices this could take a few minutes.\n");
250 for (pHba = hba_chain; pHba; pHba = next) {
251 next = pHba->next;
252 printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
253 if (adpt_i2o_lct_get(pHba) < 0){
254 adpt_i2o_delete_hba(pHba);
255 continue;
256 }
257
258 if (adpt_i2o_parse_lct(pHba) < 0){
259 adpt_i2o_delete_hba(pHba);
260 continue;
261 }
262 adpt_inquiry(pHba);
263 }
264
265 adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
266 if (IS_ERR(adpt_sysfs_class)) {
267 printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
268 adpt_sysfs_class = NULL;
269 }
270
271 for (pHba = hba_chain; pHba; pHba = next) {
272 next = pHba->next;
273 if (adpt_scsi_host_alloc(pHba, sht) < 0){
274 adpt_i2o_delete_hba(pHba);
275 continue;
276 }
277 pHba->initialized = TRUE;
278 pHba->state &= ~DPTI_STATE_RESET;
279 if (adpt_sysfs_class) {
280 struct device *dev = device_create(adpt_sysfs_class,
281 NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
282 "dpti%d", pHba->unit);
283 if (IS_ERR(dev)) {
284 printk(KERN_WARNING"dpti%d: unable to "
285 "create device in dpt_i2o class\n",
286 pHba->unit);
287 }
288 }
289 }
290
291 // Register our control device node
292 // nodes will need to be created in /dev to access this
293 // the nodes can not be created from within the driver
294 if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
295 adpt_i2o_sys_shutdown();
296 return 0;
297 }
298 return hba_count;
299 }
300
301
adpt_release(adpt_hba * pHba)302 static void adpt_release(adpt_hba *pHba)
303 {
304 struct Scsi_Host *shost = pHba->host;
305
306 scsi_remove_host(shost);
307 // adpt_i2o_quiesce_hba(pHba);
308 adpt_i2o_delete_hba(pHba);
309 scsi_host_put(shost);
310 }
311
312
adpt_inquiry(adpt_hba * pHba)313 static void adpt_inquiry(adpt_hba* pHba)
314 {
315 u32 msg[17];
316 u32 *mptr;
317 u32 *lenptr;
318 int direction;
319 int scsidir;
320 u32 len;
321 u32 reqlen;
322 u8* buf;
323 dma_addr_t addr;
324 u8 scb[16];
325 s32 rcode;
326
327 memset(msg, 0, sizeof(msg));
328 buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
329 if(!buf){
330 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
331 return;
332 }
333 memset((void*)buf, 0, 36);
334
335 len = 36;
336 direction = 0x00000000;
337 scsidir =0x40000000; // DATA IN (iop<--dev)
338
339 if (dpt_dma64(pHba))
340 reqlen = 17; // SINGLE SGE, 64 bit
341 else
342 reqlen = 14; // SINGLE SGE, 32 bit
343 /* Stick the headers on */
344 msg[0] = reqlen<<16 | SGL_OFFSET_12;
345 msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
346 msg[2] = 0;
347 msg[3] = 0;
348 // Adaptec/DPT Private stuff
349 msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
350 msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
351 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
352 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
353 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
354 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
355 msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
356
357 mptr=msg+7;
358
359 memset(scb, 0, sizeof(scb));
360 // Write SCSI command into the message - always 16 byte block
361 scb[0] = INQUIRY;
362 scb[1] = 0;
363 scb[2] = 0;
364 scb[3] = 0;
365 scb[4] = 36;
366 scb[5] = 0;
367 // Don't care about the rest of scb
368
369 memcpy(mptr, scb, sizeof(scb));
370 mptr+=4;
371 lenptr=mptr++; /* Remember me - fill in when we know */
372
373 /* Now fill in the SGList and command */
374 *lenptr = len;
375 if (dpt_dma64(pHba)) {
376 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
377 *mptr++ = 1 << PAGE_SHIFT;
378 *mptr++ = 0xD0000000|direction|len;
379 *mptr++ = dma_low(addr);
380 *mptr++ = dma_high(addr);
381 } else {
382 *mptr++ = 0xD0000000|direction|len;
383 *mptr++ = addr;
384 }
385
386 // Send it on it's way
387 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
388 if (rcode != 0) {
389 sprintf(pHba->detail, "Adaptec I2O RAID");
390 printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
391 if (rcode != -ETIME && rcode != -EINTR)
392 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
393 } else {
394 memset(pHba->detail, 0, sizeof(pHba->detail));
395 memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
396 memcpy(&(pHba->detail[16]), " Model: ", 8);
397 memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
398 memcpy(&(pHba->detail[40]), " FW: ", 4);
399 memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
400 pHba->detail[48] = '\0'; /* precautionary */
401 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
402 }
403 adpt_i2o_status_get(pHba);
404 return ;
405 }
406
407
adpt_slave_configure(struct scsi_device * device)408 static int adpt_slave_configure(struct scsi_device * device)
409 {
410 struct Scsi_Host *host = device->host;
411 adpt_hba* pHba;
412
413 pHba = (adpt_hba *) host->hostdata[0];
414
415 if (host->can_queue && device->tagged_supported) {
416 scsi_change_queue_depth(device,
417 host->can_queue - 1);
418 }
419 return 0;
420 }
421
adpt_queue_lck(struct scsi_cmnd * cmd,void (* done)(struct scsi_cmnd *))422 static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
423 {
424 adpt_hba* pHba = NULL;
425 struct adpt_device* pDev = NULL; /* dpt per device information */
426
427 cmd->scsi_done = done;
428 /*
429 * SCSI REQUEST_SENSE commands will be executed automatically by the
430 * Host Adapter for any errors, so they should not be executed
431 * explicitly unless the Sense Data is zero indicating that no error
432 * occurred.
433 */
434
435 if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
436 cmd->result = (DID_OK << 16);
437 cmd->scsi_done(cmd);
438 return 0;
439 }
440
441 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
442 if (!pHba) {
443 return FAILED;
444 }
445
446 rmb();
447 if ((pHba->state) & DPTI_STATE_RESET)
448 return SCSI_MLQUEUE_HOST_BUSY;
449
450 // TODO if the cmd->device if offline then I may need to issue a bus rescan
451 // followed by a get_lct to see if the device is there anymore
452 if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
453 /*
454 * First command request for this device. Set up a pointer
455 * to the device structure. This should be a TEST_UNIT_READY
456 * command from scan_scsis_single.
457 */
458 if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun)) == NULL) {
459 // TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
460 // with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
461 cmd->result = (DID_NO_CONNECT << 16);
462 cmd->scsi_done(cmd);
463 return 0;
464 }
465 cmd->device->hostdata = pDev;
466 }
467 pDev->pScsi_dev = cmd->device;
468
469 /*
470 * If we are being called from when the device is being reset,
471 * delay processing of the command until later.
472 */
473 if (pDev->state & DPTI_DEV_RESET ) {
474 return FAILED;
475 }
476 return adpt_scsi_to_i2o(pHba, cmd, pDev);
477 }
478
DEF_SCSI_QCMD(adpt_queue)479 static DEF_SCSI_QCMD(adpt_queue)
480
481 static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
482 sector_t capacity, int geom[])
483 {
484 int heads=-1;
485 int sectors=-1;
486 int cylinders=-1;
487
488 // *** First lets set the default geometry ****
489
490 // If the capacity is less than ox2000
491 if (capacity < 0x2000 ) { // floppy
492 heads = 18;
493 sectors = 2;
494 }
495 // else if between 0x2000 and 0x20000
496 else if (capacity < 0x20000) {
497 heads = 64;
498 sectors = 32;
499 }
500 // else if between 0x20000 and 0x40000
501 else if (capacity < 0x40000) {
502 heads = 65;
503 sectors = 63;
504 }
505 // else if between 0x4000 and 0x80000
506 else if (capacity < 0x80000) {
507 heads = 128;
508 sectors = 63;
509 }
510 // else if greater than 0x80000
511 else {
512 heads = 255;
513 sectors = 63;
514 }
515 cylinders = sector_div(capacity, heads * sectors);
516
517 // Special case if CDROM
518 if(sdev->type == 5) { // CDROM
519 heads = 252;
520 sectors = 63;
521 cylinders = 1111;
522 }
523
524 geom[0] = heads;
525 geom[1] = sectors;
526 geom[2] = cylinders;
527
528 PDEBUG("adpt_bios_param: exit\n");
529 return 0;
530 }
531
532
adpt_info(struct Scsi_Host * host)533 static const char *adpt_info(struct Scsi_Host *host)
534 {
535 adpt_hba* pHba;
536
537 pHba = (adpt_hba *) host->hostdata[0];
538 return (char *) (pHba->detail);
539 }
540
adpt_show_info(struct seq_file * m,struct Scsi_Host * host)541 static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host)
542 {
543 struct adpt_device* d;
544 int id;
545 int chan;
546 adpt_hba* pHba;
547 int unit;
548
549 // Find HBA (host bus adapter) we are looking for
550 mutex_lock(&adpt_configuration_lock);
551 for (pHba = hba_chain; pHba; pHba = pHba->next) {
552 if (pHba->host == host) {
553 break; /* found adapter */
554 }
555 }
556 mutex_unlock(&adpt_configuration_lock);
557 if (pHba == NULL) {
558 return 0;
559 }
560 host = pHba->host;
561
562 seq_printf(m, "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
563 seq_printf(m, "%s\n", pHba->detail);
564 seq_printf(m, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
565 pHba->host->host_no, pHba->name, host->irq);
566 seq_printf(m, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
567 host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
568
569 seq_puts(m, "Devices:\n");
570 for(chan = 0; chan < MAX_CHANNEL; chan++) {
571 for(id = 0; id < MAX_ID; id++) {
572 d = pHba->channel[chan].device[id];
573 while(d) {
574 seq_printf(m,"\t%-24.24s", d->pScsi_dev->vendor);
575 seq_printf(m," Rev: %-8.8s\n", d->pScsi_dev->rev);
576
577 unit = d->pI2o_dev->lct_data.tid;
578 seq_printf(m, "\tTID=%d, (Channel=%d, Target=%d, Lun=%llu) (%s)\n\n",
579 unit, (int)d->scsi_channel, (int)d->scsi_id, d->scsi_lun,
580 scsi_device_online(d->pScsi_dev)? "online":"offline");
581 d = d->next_lun;
582 }
583 }
584 }
585 return 0;
586 }
587
588 /*===========================================================================
589 * Error Handling routines
590 *===========================================================================
591 */
592
adpt_abort(struct scsi_cmnd * cmd)593 static int adpt_abort(struct scsi_cmnd * cmd)
594 {
595 adpt_hba* pHba = NULL; /* host bus adapter structure */
596 struct adpt_device* dptdevice; /* dpt per device information */
597 u32 msg[5];
598 int rcode;
599
600 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
601 printk(KERN_INFO"%s: Trying to Abort\n",pHba->name);
602 if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
603 printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
604 return FAILED;
605 }
606
607 memset(msg, 0, sizeof(msg));
608 msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
609 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
610 msg[2] = 0;
611 msg[3]= 0;
612 /* Add 1 to avoid firmware treating it as invalid command */
613 msg[4] = cmd->request->tag + 1;
614 if (pHba->host)
615 spin_lock_irq(pHba->host->host_lock);
616 rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
617 if (pHba->host)
618 spin_unlock_irq(pHba->host->host_lock);
619 if (rcode != 0) {
620 if(rcode == -EOPNOTSUPP ){
621 printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
622 return FAILED;
623 }
624 printk(KERN_INFO"%s: Abort failed.\n",pHba->name);
625 return FAILED;
626 }
627 printk(KERN_INFO"%s: Abort complete.\n",pHba->name);
628 return SUCCESS;
629 }
630
631
632 #define I2O_DEVICE_RESET 0x27
633 // This is the same for BLK and SCSI devices
634 // NOTE this is wrong in the i2o.h definitions
635 // This is not currently supported by our adapter but we issue it anyway
adpt_device_reset(struct scsi_cmnd * cmd)636 static int adpt_device_reset(struct scsi_cmnd* cmd)
637 {
638 adpt_hba* pHba;
639 u32 msg[4];
640 u32 rcode;
641 int old_state;
642 struct adpt_device* d = cmd->device->hostdata;
643
644 pHba = (void*) cmd->device->host->hostdata[0];
645 printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
646 if (!d) {
647 printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
648 return FAILED;
649 }
650 memset(msg, 0, sizeof(msg));
651 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
652 msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
653 msg[2] = 0;
654 msg[3] = 0;
655
656 if (pHba->host)
657 spin_lock_irq(pHba->host->host_lock);
658 old_state = d->state;
659 d->state |= DPTI_DEV_RESET;
660 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
661 d->state = old_state;
662 if (pHba->host)
663 spin_unlock_irq(pHba->host->host_lock);
664 if (rcode != 0) {
665 if(rcode == -EOPNOTSUPP ){
666 printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
667 return FAILED;
668 }
669 printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
670 return FAILED;
671 } else {
672 printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
673 return SUCCESS;
674 }
675 }
676
677
678 #define I2O_HBA_BUS_RESET 0x87
679 // This version of bus reset is called by the eh_error handler
adpt_bus_reset(struct scsi_cmnd * cmd)680 static int adpt_bus_reset(struct scsi_cmnd* cmd)
681 {
682 adpt_hba* pHba;
683 u32 msg[4];
684 u32 rcode;
685
686 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
687 memset(msg, 0, sizeof(msg));
688 printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
689 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
690 msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
691 msg[2] = 0;
692 msg[3] = 0;
693 if (pHba->host)
694 spin_lock_irq(pHba->host->host_lock);
695 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
696 if (pHba->host)
697 spin_unlock_irq(pHba->host->host_lock);
698 if (rcode != 0) {
699 printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
700 return FAILED;
701 } else {
702 printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
703 return SUCCESS;
704 }
705 }
706
707 // This version of reset is called by the eh_error_handler
__adpt_reset(struct scsi_cmnd * cmd)708 static int __adpt_reset(struct scsi_cmnd* cmd)
709 {
710 adpt_hba* pHba;
711 int rcode;
712 char name[32];
713
714 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
715 strncpy(name, pHba->name, sizeof(name));
716 printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n", name, cmd->device->channel, pHba->channel[cmd->device->channel].tid);
717 rcode = adpt_hba_reset(pHba);
718 if(rcode == 0){
719 printk(KERN_WARNING"%s: HBA reset complete\n", name);
720 return SUCCESS;
721 } else {
722 printk(KERN_WARNING"%s: HBA reset failed (%x)\n", name, rcode);
723 return FAILED;
724 }
725 }
726
adpt_reset(struct scsi_cmnd * cmd)727 static int adpt_reset(struct scsi_cmnd* cmd)
728 {
729 int rc;
730
731 spin_lock_irq(cmd->device->host->host_lock);
732 rc = __adpt_reset(cmd);
733 spin_unlock_irq(cmd->device->host->host_lock);
734
735 return rc;
736 }
737
738 // This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
adpt_hba_reset(adpt_hba * pHba)739 static int adpt_hba_reset(adpt_hba* pHba)
740 {
741 int rcode;
742
743 pHba->state |= DPTI_STATE_RESET;
744
745 // Activate does get status , init outbound, and get hrt
746 if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
747 printk(KERN_ERR "%s: Could not activate\n", pHba->name);
748 adpt_i2o_delete_hba(pHba);
749 return rcode;
750 }
751
752 if ((rcode=adpt_i2o_build_sys_table()) < 0) {
753 adpt_i2o_delete_hba(pHba);
754 return rcode;
755 }
756 PDEBUG("%s: in HOLD state\n",pHba->name);
757
758 if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
759 adpt_i2o_delete_hba(pHba);
760 return rcode;
761 }
762 PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
763
764 if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
765 adpt_i2o_delete_hba(pHba);
766 return rcode;
767 }
768
769 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
770 adpt_i2o_delete_hba(pHba);
771 return rcode;
772 }
773 pHba->state &= ~DPTI_STATE_RESET;
774
775 adpt_fail_posted_scbs(pHba);
776 return 0; /* return success */
777 }
778
779 /*===========================================================================
780 *
781 *===========================================================================
782 */
783
784
adpt_i2o_sys_shutdown(void)785 static void adpt_i2o_sys_shutdown(void)
786 {
787 adpt_hba *pHba, *pNext;
788 struct adpt_i2o_post_wait_data *p1, *old;
789
790 printk(KERN_INFO "Shutting down Adaptec I2O controllers.\n");
791 printk(KERN_INFO " This could take a few minutes if there are many devices attached\n");
792 /* Delete all IOPs from the controller chain */
793 /* They should have already been released by the
794 * scsi-core
795 */
796 for (pHba = hba_chain; pHba; pHba = pNext) {
797 pNext = pHba->next;
798 adpt_i2o_delete_hba(pHba);
799 }
800
801 /* Remove any timedout entries from the wait queue. */
802 // spin_lock_irqsave(&adpt_post_wait_lock, flags);
803 /* Nothing should be outstanding at this point so just
804 * free them
805 */
806 for(p1 = adpt_post_wait_queue; p1;) {
807 old = p1;
808 p1 = p1->next;
809 kfree(old);
810 }
811 // spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
812 adpt_post_wait_queue = NULL;
813
814 printk(KERN_INFO "Adaptec I2O controllers down.\n");
815 }
816
adpt_install_hba(struct scsi_host_template * sht,struct pci_dev * pDev)817 static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
818 {
819
820 adpt_hba* pHba = NULL;
821 adpt_hba* p = NULL;
822 ulong base_addr0_phys = 0;
823 ulong base_addr1_phys = 0;
824 u32 hba_map0_area_size = 0;
825 u32 hba_map1_area_size = 0;
826 void __iomem *base_addr_virt = NULL;
827 void __iomem *msg_addr_virt = NULL;
828 int dma64 = 0;
829
830 int raptorFlag = FALSE;
831
832 if(pci_enable_device(pDev)) {
833 return -EINVAL;
834 }
835
836 if (pci_request_regions(pDev, "dpt_i2o")) {
837 PERROR("dpti: adpt_config_hba: pci request region failed\n");
838 return -EINVAL;
839 }
840
841 pci_set_master(pDev);
842
843 /*
844 * See if we should enable dma64 mode.
845 */
846 if (sizeof(dma_addr_t) > 4 &&
847 dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32) &&
848 dma_set_mask(&pDev->dev, DMA_BIT_MASK(64)) == 0)
849 dma64 = 1;
850
851 if (!dma64 && dma_set_mask(&pDev->dev, DMA_BIT_MASK(32)) != 0)
852 return -EINVAL;
853
854 /* adapter only supports message blocks below 4GB */
855 dma_set_coherent_mask(&pDev->dev, DMA_BIT_MASK(32));
856
857 base_addr0_phys = pci_resource_start(pDev,0);
858 hba_map0_area_size = pci_resource_len(pDev,0);
859
860 // Check if standard PCI card or single BAR Raptor
861 if(pDev->device == PCI_DPT_DEVICE_ID){
862 if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
863 // Raptor card with this device id needs 4M
864 hba_map0_area_size = 0x400000;
865 } else { // Not Raptor - it is a PCI card
866 if(hba_map0_area_size > 0x100000 ){
867 hba_map0_area_size = 0x100000;
868 }
869 }
870 } else {// Raptor split BAR config
871 // Use BAR1 in this configuration
872 base_addr1_phys = pci_resource_start(pDev,1);
873 hba_map1_area_size = pci_resource_len(pDev,1);
874 raptorFlag = TRUE;
875 }
876
877 #if BITS_PER_LONG == 64
878 /*
879 * The original Adaptec 64 bit driver has this comment here:
880 * "x86_64 machines need more optimal mappings"
881 *
882 * I assume some HBAs report ridiculously large mappings
883 * and we need to limit them on platforms with IOMMUs.
884 */
885 if (raptorFlag == TRUE) {
886 if (hba_map0_area_size > 128)
887 hba_map0_area_size = 128;
888 if (hba_map1_area_size > 524288)
889 hba_map1_area_size = 524288;
890 } else {
891 if (hba_map0_area_size > 524288)
892 hba_map0_area_size = 524288;
893 }
894 #endif
895
896 base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
897 if (!base_addr_virt) {
898 pci_release_regions(pDev);
899 PERROR("dpti: adpt_config_hba: io remap failed\n");
900 return -EINVAL;
901 }
902
903 if(raptorFlag == TRUE) {
904 msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
905 if (!msg_addr_virt) {
906 PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
907 iounmap(base_addr_virt);
908 pci_release_regions(pDev);
909 return -EINVAL;
910 }
911 } else {
912 msg_addr_virt = base_addr_virt;
913 }
914
915 // Allocate and zero the data structure
916 pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
917 if (!pHba) {
918 if (msg_addr_virt != base_addr_virt)
919 iounmap(msg_addr_virt);
920 iounmap(base_addr_virt);
921 pci_release_regions(pDev);
922 return -ENOMEM;
923 }
924
925 mutex_lock(&adpt_configuration_lock);
926
927 if(hba_chain != NULL){
928 for(p = hba_chain; p->next; p = p->next);
929 p->next = pHba;
930 } else {
931 hba_chain = pHba;
932 }
933 pHba->next = NULL;
934 pHba->unit = hba_count;
935 sprintf(pHba->name, "dpti%d", hba_count);
936 hba_count++;
937
938 mutex_unlock(&adpt_configuration_lock);
939
940 pHba->pDev = pDev;
941 pHba->base_addr_phys = base_addr0_phys;
942
943 // Set up the Virtual Base Address of the I2O Device
944 pHba->base_addr_virt = base_addr_virt;
945 pHba->msg_addr_virt = msg_addr_virt;
946 pHba->irq_mask = base_addr_virt+0x30;
947 pHba->post_port = base_addr_virt+0x40;
948 pHba->reply_port = base_addr_virt+0x44;
949
950 pHba->hrt = NULL;
951 pHba->lct = NULL;
952 pHba->lct_size = 0;
953 pHba->status_block = NULL;
954 pHba->post_count = 0;
955 pHba->state = DPTI_STATE_RESET;
956 pHba->pDev = pDev;
957 pHba->devices = NULL;
958 pHba->dma64 = dma64;
959
960 // Initializing the spinlocks
961 spin_lock_init(&pHba->state_lock);
962 spin_lock_init(&adpt_post_wait_lock);
963
964 if(raptorFlag == 0){
965 printk(KERN_INFO "Adaptec I2O RAID controller"
966 " %d at %p size=%x irq=%d%s\n",
967 hba_count-1, base_addr_virt,
968 hba_map0_area_size, pDev->irq,
969 dma64 ? " (64-bit DMA)" : "");
970 } else {
971 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
972 hba_count-1, pDev->irq,
973 dma64 ? " (64-bit DMA)" : "");
974 printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
975 printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
976 }
977
978 if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
979 printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
980 adpt_i2o_delete_hba(pHba);
981 return -EINVAL;
982 }
983
984 return 0;
985 }
986
987
adpt_i2o_delete_hba(adpt_hba * pHba)988 static void adpt_i2o_delete_hba(adpt_hba* pHba)
989 {
990 adpt_hba* p1;
991 adpt_hba* p2;
992 struct i2o_device* d;
993 struct i2o_device* next;
994 int i;
995 int j;
996 struct adpt_device* pDev;
997 struct adpt_device* pNext;
998
999
1000 mutex_lock(&adpt_configuration_lock);
1001 if(pHba->host){
1002 free_irq(pHba->host->irq, pHba);
1003 }
1004 p2 = NULL;
1005 for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1006 if(p1 == pHba) {
1007 if(p2) {
1008 p2->next = p1->next;
1009 } else {
1010 hba_chain = p1->next;
1011 }
1012 break;
1013 }
1014 }
1015
1016 hba_count--;
1017 mutex_unlock(&adpt_configuration_lock);
1018
1019 iounmap(pHba->base_addr_virt);
1020 pci_release_regions(pHba->pDev);
1021 if(pHba->msg_addr_virt != pHba->base_addr_virt){
1022 iounmap(pHba->msg_addr_virt);
1023 }
1024 if(pHba->FwDebugBuffer_P)
1025 iounmap(pHba->FwDebugBuffer_P);
1026 if(pHba->hrt) {
1027 dma_free_coherent(&pHba->pDev->dev,
1028 pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
1029 pHba->hrt, pHba->hrt_pa);
1030 }
1031 if(pHba->lct) {
1032 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
1033 pHba->lct, pHba->lct_pa);
1034 }
1035 if(pHba->status_block) {
1036 dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
1037 pHba->status_block, pHba->status_block_pa);
1038 }
1039 if(pHba->reply_pool) {
1040 dma_free_coherent(&pHba->pDev->dev,
1041 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
1042 pHba->reply_pool, pHba->reply_pool_pa);
1043 }
1044
1045 for(d = pHba->devices; d ; d = next){
1046 next = d->next;
1047 kfree(d);
1048 }
1049 for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1050 for(j = 0; j < MAX_ID; j++){
1051 if(pHba->channel[i].device[j] != NULL){
1052 for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1053 pNext = pDev->next_lun;
1054 kfree(pDev);
1055 }
1056 }
1057 }
1058 }
1059 pci_dev_put(pHba->pDev);
1060 if (adpt_sysfs_class)
1061 device_destroy(adpt_sysfs_class,
1062 MKDEV(DPTI_I2O_MAJOR, pHba->unit));
1063 kfree(pHba);
1064
1065 if(hba_count <= 0){
1066 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
1067 if (adpt_sysfs_class) {
1068 class_destroy(adpt_sysfs_class);
1069 adpt_sysfs_class = NULL;
1070 }
1071 }
1072 }
1073
adpt_find_device(adpt_hba * pHba,u32 chan,u32 id,u64 lun)1074 static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u64 lun)
1075 {
1076 struct adpt_device* d;
1077
1078 if(chan < 0 || chan >= MAX_CHANNEL)
1079 return NULL;
1080
1081 d = pHba->channel[chan].device[id];
1082 if(!d || d->tid == 0) {
1083 return NULL;
1084 }
1085
1086 /* If it is the only lun at that address then this should match*/
1087 if(d->scsi_lun == lun){
1088 return d;
1089 }
1090
1091 /* else we need to look through all the luns */
1092 for(d=d->next_lun ; d ; d = d->next_lun){
1093 if(d->scsi_lun == lun){
1094 return d;
1095 }
1096 }
1097 return NULL;
1098 }
1099
1100
adpt_i2o_post_wait(adpt_hba * pHba,u32 * msg,int len,int timeout)1101 static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1102 {
1103 // I used my own version of the WAIT_QUEUE_HEAD
1104 // to handle some version differences
1105 // When embedded in the kernel this could go back to the vanilla one
1106 ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1107 int status = 0;
1108 ulong flags = 0;
1109 struct adpt_i2o_post_wait_data *p1, *p2;
1110 struct adpt_i2o_post_wait_data *wait_data =
1111 kmalloc(sizeof(struct adpt_i2o_post_wait_data), GFP_ATOMIC);
1112 DECLARE_WAITQUEUE(wait, current);
1113
1114 if (!wait_data)
1115 return -ENOMEM;
1116
1117 /*
1118 * The spin locking is needed to keep anyone from playing
1119 * with the queue pointers and id while we do the same
1120 */
1121 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1122 // TODO we need a MORE unique way of getting ids
1123 // to support async LCT get
1124 wait_data->next = adpt_post_wait_queue;
1125 adpt_post_wait_queue = wait_data;
1126 adpt_post_wait_id++;
1127 adpt_post_wait_id &= 0x7fff;
1128 wait_data->id = adpt_post_wait_id;
1129 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1130
1131 wait_data->wq = &adpt_wq_i2o_post;
1132 wait_data->status = -ETIMEDOUT;
1133
1134 add_wait_queue(&adpt_wq_i2o_post, &wait);
1135
1136 msg[2] |= 0x80000000 | ((u32)wait_data->id);
1137 timeout *= HZ;
1138 if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1139 set_current_state(TASK_INTERRUPTIBLE);
1140 if(pHba->host)
1141 spin_unlock_irq(pHba->host->host_lock);
1142 if (!timeout)
1143 schedule();
1144 else{
1145 timeout = schedule_timeout(timeout);
1146 if (timeout == 0) {
1147 // I/O issued, but cannot get result in
1148 // specified time. Freeing resorces is
1149 // dangerous.
1150 status = -ETIME;
1151 }
1152 }
1153 if(pHba->host)
1154 spin_lock_irq(pHba->host->host_lock);
1155 }
1156 remove_wait_queue(&adpt_wq_i2o_post, &wait);
1157
1158 if(status == -ETIMEDOUT){
1159 printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1160 // We will have to free the wait_data memory during shutdown
1161 return status;
1162 }
1163
1164 /* Remove the entry from the queue. */
1165 p2 = NULL;
1166 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1167 for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1168 if(p1 == wait_data) {
1169 if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1170 status = -EOPNOTSUPP;
1171 }
1172 if(p2) {
1173 p2->next = p1->next;
1174 } else {
1175 adpt_post_wait_queue = p1->next;
1176 }
1177 break;
1178 }
1179 }
1180 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1181
1182 kfree(wait_data);
1183
1184 return status;
1185 }
1186
1187
adpt_i2o_post_this(adpt_hba * pHba,u32 * data,int len)1188 static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1189 {
1190
1191 u32 m = EMPTY_QUEUE;
1192 u32 __iomem *msg;
1193 ulong timeout = jiffies + 30*HZ;
1194 do {
1195 rmb();
1196 m = readl(pHba->post_port);
1197 if (m != EMPTY_QUEUE) {
1198 break;
1199 }
1200 if(time_after(jiffies,timeout)){
1201 printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1202 return -ETIMEDOUT;
1203 }
1204 schedule_timeout_uninterruptible(1);
1205 } while(m == EMPTY_QUEUE);
1206
1207 msg = pHba->msg_addr_virt + m;
1208 memcpy_toio(msg, data, len);
1209 wmb();
1210
1211 //post message
1212 writel(m, pHba->post_port);
1213 wmb();
1214
1215 return 0;
1216 }
1217
1218
adpt_i2o_post_wait_complete(u32 context,int status)1219 static void adpt_i2o_post_wait_complete(u32 context, int status)
1220 {
1221 struct adpt_i2o_post_wait_data *p1 = NULL;
1222 /*
1223 * We need to search through the adpt_post_wait
1224 * queue to see if the given message is still
1225 * outstanding. If not, it means that the IOP
1226 * took longer to respond to the message than we
1227 * had allowed and timer has already expired.
1228 * Not much we can do about that except log
1229 * it for debug purposes, increase timeout, and recompile
1230 *
1231 * Lock needed to keep anyone from moving queue pointers
1232 * around while we're looking through them.
1233 */
1234
1235 context &= 0x7fff;
1236
1237 spin_lock(&adpt_post_wait_lock);
1238 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1239 if(p1->id == context) {
1240 p1->status = status;
1241 spin_unlock(&adpt_post_wait_lock);
1242 wake_up_interruptible(p1->wq);
1243 return;
1244 }
1245 }
1246 spin_unlock(&adpt_post_wait_lock);
1247 // If this happens we lose commands that probably really completed
1248 printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1249 printk(KERN_DEBUG" Tasks in wait queue:\n");
1250 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1251 printk(KERN_DEBUG" %d\n",p1->id);
1252 }
1253 return;
1254 }
1255
adpt_i2o_reset_hba(adpt_hba * pHba)1256 static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1257 {
1258 u32 msg[8];
1259 u8* status;
1260 dma_addr_t addr;
1261 u32 m = EMPTY_QUEUE ;
1262 ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1263
1264 if(pHba->initialized == FALSE) { // First time reset should be quick
1265 timeout = jiffies + (25*HZ);
1266 } else {
1267 adpt_i2o_quiesce_hba(pHba);
1268 }
1269
1270 do {
1271 rmb();
1272 m = readl(pHba->post_port);
1273 if (m != EMPTY_QUEUE) {
1274 break;
1275 }
1276 if(time_after(jiffies,timeout)){
1277 printk(KERN_WARNING"Timeout waiting for message!\n");
1278 return -ETIMEDOUT;
1279 }
1280 schedule_timeout_uninterruptible(1);
1281 } while (m == EMPTY_QUEUE);
1282
1283 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
1284 if(status == NULL) {
1285 adpt_send_nop(pHba, m);
1286 printk(KERN_ERR"IOP reset failed - no free memory.\n");
1287 return -ENOMEM;
1288 }
1289 memset(status,0,4);
1290
1291 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1292 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1293 msg[2]=0;
1294 msg[3]=0;
1295 msg[4]=0;
1296 msg[5]=0;
1297 msg[6]=dma_low(addr);
1298 msg[7]=dma_high(addr);
1299
1300 memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1301 wmb();
1302 writel(m, pHba->post_port);
1303 wmb();
1304
1305 while(*status == 0){
1306 if(time_after(jiffies,timeout)){
1307 printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
1308 /* We lose 4 bytes of "status" here, but we cannot
1309 free these because controller may awake and corrupt
1310 those bytes at any time */
1311 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1312 return -ETIMEDOUT;
1313 }
1314 rmb();
1315 schedule_timeout_uninterruptible(1);
1316 }
1317
1318 if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
1319 PDEBUG("%s: Reset in progress...\n", pHba->name);
1320 // Here we wait for message frame to become available
1321 // indicated that reset has finished
1322 do {
1323 rmb();
1324 m = readl(pHba->post_port);
1325 if (m != EMPTY_QUEUE) {
1326 break;
1327 }
1328 if(time_after(jiffies,timeout)){
1329 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
1330 /* We lose 4 bytes of "status" here, but we
1331 cannot free these because controller may
1332 awake and corrupt those bytes at any time */
1333 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1334 return -ETIMEDOUT;
1335 }
1336 schedule_timeout_uninterruptible(1);
1337 } while (m == EMPTY_QUEUE);
1338 // Flush the offset
1339 adpt_send_nop(pHba, m);
1340 }
1341 adpt_i2o_status_get(pHba);
1342 if(*status == 0x02 ||
1343 pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1344 printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1345 pHba->name);
1346 } else {
1347 PDEBUG("%s: Reset completed.\n", pHba->name);
1348 }
1349
1350 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1351 #ifdef UARTDELAY
1352 // This delay is to allow someone attached to the card through the debug UART to
1353 // set up the dump levels that they want before the rest of the initialization sequence
1354 adpt_delay(20000);
1355 #endif
1356 return 0;
1357 }
1358
1359
adpt_i2o_parse_lct(adpt_hba * pHba)1360 static int adpt_i2o_parse_lct(adpt_hba* pHba)
1361 {
1362 int i;
1363 int max;
1364 int tid;
1365 struct i2o_device *d;
1366 i2o_lct *lct = pHba->lct;
1367 u8 bus_no = 0;
1368 s16 scsi_id;
1369 u64 scsi_lun;
1370 u32 buf[10]; // larger than 7, or 8 ...
1371 struct adpt_device* pDev;
1372
1373 if (lct == NULL) {
1374 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1375 return -1;
1376 }
1377
1378 max = lct->table_size;
1379 max -= 3;
1380 max /= 9;
1381
1382 for(i=0;i<max;i++) {
1383 if( lct->lct_entry[i].user_tid != 0xfff){
1384 /*
1385 * If we have hidden devices, we need to inform the upper layers about
1386 * the possible maximum id reference to handle device access when
1387 * an array is disassembled. This code has no other purpose but to
1388 * allow us future access to devices that are currently hidden
1389 * behind arrays, hotspares or have not been configured (JBOD mode).
1390 */
1391 if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1392 lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1393 lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1394 continue;
1395 }
1396 tid = lct->lct_entry[i].tid;
1397 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1398 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1399 continue;
1400 }
1401 bus_no = buf[0]>>16;
1402 scsi_id = buf[1];
1403 scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1404 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1405 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1406 continue;
1407 }
1408 if (scsi_id >= MAX_ID){
1409 printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1410 continue;
1411 }
1412 if(bus_no > pHba->top_scsi_channel){
1413 pHba->top_scsi_channel = bus_no;
1414 }
1415 if(scsi_id > pHba->top_scsi_id){
1416 pHba->top_scsi_id = scsi_id;
1417 }
1418 if(scsi_lun > pHba->top_scsi_lun){
1419 pHba->top_scsi_lun = scsi_lun;
1420 }
1421 continue;
1422 }
1423 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1424 if(d==NULL)
1425 {
1426 printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1427 return -ENOMEM;
1428 }
1429
1430 d->controller = pHba;
1431 d->next = NULL;
1432
1433 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1434
1435 d->flags = 0;
1436 tid = d->lct_data.tid;
1437 adpt_i2o_report_hba_unit(pHba, d);
1438 adpt_i2o_install_device(pHba, d);
1439 }
1440 bus_no = 0;
1441 for(d = pHba->devices; d ; d = d->next) {
1442 if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT ||
1443 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PORT){
1444 tid = d->lct_data.tid;
1445 // TODO get the bus_no from hrt-but for now they are in order
1446 //bus_no =
1447 if(bus_no > pHba->top_scsi_channel){
1448 pHba->top_scsi_channel = bus_no;
1449 }
1450 pHba->channel[bus_no].type = d->lct_data.class_id;
1451 pHba->channel[bus_no].tid = tid;
1452 if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1453 {
1454 pHba->channel[bus_no].scsi_id = buf[1];
1455 PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1456 }
1457 // TODO remove - this is just until we get from hrt
1458 bus_no++;
1459 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1460 printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1461 break;
1462 }
1463 }
1464 }
1465
1466 // Setup adpt_device table
1467 for(d = pHba->devices; d ; d = d->next) {
1468 if(d->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1469 d->lct_data.class_id == I2O_CLASS_SCSI_PERIPHERAL ||
1470 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1471
1472 tid = d->lct_data.tid;
1473 scsi_id = -1;
1474 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1475 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1476 bus_no = buf[0]>>16;
1477 scsi_id = buf[1];
1478 scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1479 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1480 continue;
1481 }
1482 if (scsi_id >= MAX_ID) {
1483 continue;
1484 }
1485 if( pHba->channel[bus_no].device[scsi_id] == NULL){
1486 pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1487 if(pDev == NULL) {
1488 return -ENOMEM;
1489 }
1490 pHba->channel[bus_no].device[scsi_id] = pDev;
1491 } else {
1492 for( pDev = pHba->channel[bus_no].device[scsi_id];
1493 pDev->next_lun; pDev = pDev->next_lun){
1494 }
1495 pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1496 if(pDev->next_lun == NULL) {
1497 return -ENOMEM;
1498 }
1499 pDev = pDev->next_lun;
1500 }
1501 pDev->tid = tid;
1502 pDev->scsi_channel = bus_no;
1503 pDev->scsi_id = scsi_id;
1504 pDev->scsi_lun = scsi_lun;
1505 pDev->pI2o_dev = d;
1506 d->owner = pDev;
1507 pDev->type = (buf[0])&0xff;
1508 pDev->flags = (buf[0]>>8)&0xff;
1509 if(scsi_id > pHba->top_scsi_id){
1510 pHba->top_scsi_id = scsi_id;
1511 }
1512 if(scsi_lun > pHba->top_scsi_lun){
1513 pHba->top_scsi_lun = scsi_lun;
1514 }
1515 }
1516 if(scsi_id == -1){
1517 printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1518 d->lct_data.identity_tag);
1519 }
1520 }
1521 }
1522 return 0;
1523 }
1524
1525
1526 /*
1527 * Each I2O controller has a chain of devices on it - these match
1528 * the useful parts of the LCT of the board.
1529 */
1530
adpt_i2o_install_device(adpt_hba * pHba,struct i2o_device * d)1531 static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1532 {
1533 mutex_lock(&adpt_configuration_lock);
1534 d->controller=pHba;
1535 d->owner=NULL;
1536 d->next=pHba->devices;
1537 d->prev=NULL;
1538 if (pHba->devices != NULL){
1539 pHba->devices->prev=d;
1540 }
1541 pHba->devices=d;
1542 *d->dev_name = 0;
1543
1544 mutex_unlock(&adpt_configuration_lock);
1545 return 0;
1546 }
1547
adpt_open(struct inode * inode,struct file * file)1548 static int adpt_open(struct inode *inode, struct file *file)
1549 {
1550 int minor;
1551 adpt_hba* pHba;
1552
1553 mutex_lock(&adpt_mutex);
1554 //TODO check for root access
1555 //
1556 minor = iminor(inode);
1557 if (minor >= hba_count) {
1558 mutex_unlock(&adpt_mutex);
1559 return -ENXIO;
1560 }
1561 mutex_lock(&adpt_configuration_lock);
1562 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1563 if (pHba->unit == minor) {
1564 break; /* found adapter */
1565 }
1566 }
1567 if (pHba == NULL) {
1568 mutex_unlock(&adpt_configuration_lock);
1569 mutex_unlock(&adpt_mutex);
1570 return -ENXIO;
1571 }
1572
1573 // if(pHba->in_use){
1574 // mutex_unlock(&adpt_configuration_lock);
1575 // return -EBUSY;
1576 // }
1577
1578 pHba->in_use = 1;
1579 mutex_unlock(&adpt_configuration_lock);
1580 mutex_unlock(&adpt_mutex);
1581
1582 return 0;
1583 }
1584
adpt_close(struct inode * inode,struct file * file)1585 static int adpt_close(struct inode *inode, struct file *file)
1586 {
1587 int minor;
1588 adpt_hba* pHba;
1589
1590 minor = iminor(inode);
1591 if (minor >= hba_count) {
1592 return -ENXIO;
1593 }
1594 mutex_lock(&adpt_configuration_lock);
1595 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1596 if (pHba->unit == minor) {
1597 break; /* found adapter */
1598 }
1599 }
1600 mutex_unlock(&adpt_configuration_lock);
1601 if (pHba == NULL) {
1602 return -ENXIO;
1603 }
1604
1605 pHba->in_use = 0;
1606
1607 return 0;
1608 }
1609
1610 #if defined __ia64__
adpt_ia64_info(sysInfo_S * si)1611 static void adpt_ia64_info(sysInfo_S* si)
1612 {
1613 // This is all the info we need for now
1614 // We will add more info as our new
1615 // managmenent utility requires it
1616 si->processorType = PROC_IA64;
1617 }
1618 #endif
1619
1620 #if defined __sparc__
adpt_sparc_info(sysInfo_S * si)1621 static void adpt_sparc_info(sysInfo_S* si)
1622 {
1623 // This is all the info we need for now
1624 // We will add more info as our new
1625 // managmenent utility requires it
1626 si->processorType = PROC_ULTRASPARC;
1627 }
1628 #endif
1629 #if defined __alpha__
adpt_alpha_info(sysInfo_S * si)1630 static void adpt_alpha_info(sysInfo_S* si)
1631 {
1632 // This is all the info we need for now
1633 // We will add more info as our new
1634 // managmenent utility requires it
1635 si->processorType = PROC_ALPHA;
1636 }
1637 #endif
1638
1639 #if defined __i386__
1640
1641 #include <uapi/asm/vm86.h>
1642
adpt_i386_info(sysInfo_S * si)1643 static void adpt_i386_info(sysInfo_S* si)
1644 {
1645 // This is all the info we need for now
1646 // We will add more info as our new
1647 // managmenent utility requires it
1648 switch (boot_cpu_data.x86) {
1649 case CPU_386:
1650 si->processorType = PROC_386;
1651 break;
1652 case CPU_486:
1653 si->processorType = PROC_486;
1654 break;
1655 case CPU_586:
1656 si->processorType = PROC_PENTIUM;
1657 break;
1658 default: // Just in case
1659 si->processorType = PROC_PENTIUM;
1660 break;
1661 }
1662 }
1663 #endif
1664
1665 /*
1666 * This routine returns information about the system. This does not effect
1667 * any logic and if the info is wrong - it doesn't matter.
1668 */
1669
1670 /* Get all the info we can not get from kernel services */
adpt_system_info(void __user * buffer)1671 static int adpt_system_info(void __user *buffer)
1672 {
1673 sysInfo_S si;
1674
1675 memset(&si, 0, sizeof(si));
1676
1677 si.osType = OS_LINUX;
1678 si.osMajorVersion = 0;
1679 si.osMinorVersion = 0;
1680 si.osRevision = 0;
1681 si.busType = SI_PCI_BUS;
1682 si.processorFamily = DPTI_sig.dsProcessorFamily;
1683
1684 #if defined __i386__
1685 adpt_i386_info(&si);
1686 #elif defined (__ia64__)
1687 adpt_ia64_info(&si);
1688 #elif defined(__sparc__)
1689 adpt_sparc_info(&si);
1690 #elif defined (__alpha__)
1691 adpt_alpha_info(&si);
1692 #else
1693 si.processorType = 0xff ;
1694 #endif
1695 if (copy_to_user(buffer, &si, sizeof(si))){
1696 printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
1697 return -EFAULT;
1698 }
1699
1700 return 0;
1701 }
1702
adpt_ioctl(struct inode * inode,struct file * file,uint cmd,ulong arg)1703 static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
1704 {
1705 int minor;
1706 int error = 0;
1707 adpt_hba* pHba;
1708 ulong flags = 0;
1709 void __user *argp = (void __user *)arg;
1710
1711 minor = iminor(inode);
1712 if (minor >= DPTI_MAX_HBA){
1713 return -ENXIO;
1714 }
1715 mutex_lock(&adpt_configuration_lock);
1716 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1717 if (pHba->unit == minor) {
1718 break; /* found adapter */
1719 }
1720 }
1721 mutex_unlock(&adpt_configuration_lock);
1722 if(pHba == NULL){
1723 return -ENXIO;
1724 }
1725
1726 while((volatile u32) pHba->state & DPTI_STATE_RESET )
1727 schedule_timeout_uninterruptible(2);
1728
1729 switch (cmd) {
1730 // TODO: handle 3 cases
1731 case DPT_SIGNATURE:
1732 if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
1733 return -EFAULT;
1734 }
1735 break;
1736
1737 case DPT_CTRLINFO:{
1738 drvrHBAinfo_S HbaInfo;
1739
1740 #define FLG_OSD_PCI_VALID 0x0001
1741 #define FLG_OSD_DMA 0x0002
1742 #define FLG_OSD_I2O 0x0004
1743 memset(&HbaInfo, 0, sizeof(HbaInfo));
1744 HbaInfo.drvrHBAnum = pHba->unit;
1745 HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
1746 HbaInfo.blinkState = adpt_read_blink_led(pHba);
1747 HbaInfo.pciBusNum = pHba->pDev->bus->number;
1748 HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
1749 HbaInfo.Interrupt = pHba->pDev->irq;
1750 HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
1751 if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
1752 printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
1753 return -EFAULT;
1754 }
1755 break;
1756 }
1757 case DPT_SYSINFO:
1758 return adpt_system_info(argp);
1759 case DPT_BLINKLED:{
1760 u32 value;
1761 value = (u32)adpt_read_blink_led(pHba);
1762 if (copy_to_user(argp, &value, sizeof(value))) {
1763 return -EFAULT;
1764 }
1765 break;
1766 }
1767 case I2ORESETCMD: {
1768 struct Scsi_Host *shost = pHba->host;
1769
1770 if (shost)
1771 spin_lock_irqsave(shost->host_lock, flags);
1772 adpt_hba_reset(pHba);
1773 if (shost)
1774 spin_unlock_irqrestore(shost->host_lock, flags);
1775 break;
1776 }
1777 case I2ORESCANCMD:
1778 adpt_rescan(pHba);
1779 break;
1780 default:
1781 return -EINVAL;
1782 }
1783
1784 return error;
1785 }
1786
adpt_unlocked_ioctl(struct file * file,uint cmd,ulong arg)1787 static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg)
1788 {
1789 struct inode *inode;
1790 long ret;
1791
1792 inode = file_inode(file);
1793
1794 mutex_lock(&adpt_mutex);
1795 ret = adpt_ioctl(inode, file, cmd, arg);
1796 mutex_unlock(&adpt_mutex);
1797
1798 return ret;
1799 }
1800
1801 #ifdef CONFIG_COMPAT
compat_adpt_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1802 static long compat_adpt_ioctl(struct file *file,
1803 unsigned int cmd, unsigned long arg)
1804 {
1805 struct inode *inode;
1806 long ret;
1807
1808 inode = file_inode(file);
1809
1810 mutex_lock(&adpt_mutex);
1811
1812 switch(cmd) {
1813 case DPT_SIGNATURE:
1814 case I2OUSRCMD:
1815 case DPT_CTRLINFO:
1816 case DPT_SYSINFO:
1817 case DPT_BLINKLED:
1818 case I2ORESETCMD:
1819 case I2ORESCANCMD:
1820 case (DPT_TARGET_BUSY & 0xFFFF):
1821 case DPT_TARGET_BUSY:
1822 ret = adpt_ioctl(inode, file, cmd, arg);
1823 break;
1824 default:
1825 ret = -ENOIOCTLCMD;
1826 }
1827
1828 mutex_unlock(&adpt_mutex);
1829
1830 return ret;
1831 }
1832 #endif
1833
adpt_isr(int irq,void * dev_id)1834 static irqreturn_t adpt_isr(int irq, void *dev_id)
1835 {
1836 struct scsi_cmnd* cmd;
1837 adpt_hba* pHba = dev_id;
1838 u32 m;
1839 void __iomem *reply;
1840 u32 status=0;
1841 u32 context;
1842 ulong flags = 0;
1843 int handled = 0;
1844
1845 if (pHba == NULL){
1846 printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
1847 return IRQ_NONE;
1848 }
1849 if(pHba->host)
1850 spin_lock_irqsave(pHba->host->host_lock, flags);
1851
1852 while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
1853 m = readl(pHba->reply_port);
1854 if(m == EMPTY_QUEUE){
1855 // Try twice then give up
1856 rmb();
1857 m = readl(pHba->reply_port);
1858 if(m == EMPTY_QUEUE){
1859 // This really should not happen
1860 printk(KERN_ERR"dpti: Could not get reply frame\n");
1861 goto out;
1862 }
1863 }
1864 if (pHba->reply_pool_pa <= m &&
1865 m < pHba->reply_pool_pa +
1866 (pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
1867 reply = (u8 *)pHba->reply_pool +
1868 (m - pHba->reply_pool_pa);
1869 } else {
1870 /* Ick, we should *never* be here */
1871 printk(KERN_ERR "dpti: reply frame not from pool\n");
1872 continue;
1873 }
1874
1875 if (readl(reply) & MSG_FAIL) {
1876 u32 old_m = readl(reply+28);
1877 void __iomem *msg;
1878 u32 old_context;
1879 PDEBUG("%s: Failed message\n",pHba->name);
1880 if(old_m >= 0x100000){
1881 printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
1882 writel(m,pHba->reply_port);
1883 continue;
1884 }
1885 // Transaction context is 0 in failed reply frame
1886 msg = pHba->msg_addr_virt + old_m;
1887 old_context = readl(msg+12);
1888 writel(old_context, reply+12);
1889 adpt_send_nop(pHba, old_m);
1890 }
1891 context = readl(reply+8);
1892 if(context & 0x80000000){ // Post wait message
1893 status = readl(reply+16);
1894 if(status >> 24){
1895 status &= 0xffff; /* Get detail status */
1896 } else {
1897 status = I2O_POST_WAIT_OK;
1898 }
1899 /*
1900 * The request tag is one less than the command tag
1901 * as the firmware might treat a 0 tag as invalid
1902 */
1903 cmd = scsi_host_find_tag(pHba->host,
1904 readl(reply + 12) - 1);
1905 if(cmd != NULL) {
1906 printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
1907 }
1908 adpt_i2o_post_wait_complete(context, status);
1909 } else { // SCSI message
1910 /*
1911 * The request tag is one less than the command tag
1912 * as the firmware might treat a 0 tag as invalid
1913 */
1914 cmd = scsi_host_find_tag(pHba->host,
1915 readl(reply + 12) - 1);
1916 if(cmd != NULL){
1917 scsi_dma_unmap(cmd);
1918 adpt_i2o_to_scsi(reply, cmd);
1919 }
1920 }
1921 writel(m, pHba->reply_port);
1922 wmb();
1923 rmb();
1924 }
1925 handled = 1;
1926 out: if(pHba->host)
1927 spin_unlock_irqrestore(pHba->host->host_lock, flags);
1928 return IRQ_RETVAL(handled);
1929 }
1930
adpt_scsi_to_i2o(adpt_hba * pHba,struct scsi_cmnd * cmd,struct adpt_device * d)1931 static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
1932 {
1933 int i;
1934 u32 msg[MAX_MESSAGE_SIZE];
1935 u32* mptr;
1936 u32* lptr;
1937 u32 *lenptr;
1938 int direction;
1939 int scsidir;
1940 int nseg;
1941 u32 len;
1942 u32 reqlen;
1943 s32 rcode;
1944 dma_addr_t addr;
1945
1946 memset(msg, 0 , sizeof(msg));
1947 len = scsi_bufflen(cmd);
1948 direction = 0x00000000;
1949
1950 scsidir = 0x00000000; // DATA NO XFER
1951 if(len) {
1952 /*
1953 * Set SCBFlags to indicate if data is being transferred
1954 * in or out, or no data transfer
1955 * Note: Do not have to verify index is less than 0 since
1956 * cmd->cmnd[0] is an unsigned char
1957 */
1958 switch(cmd->sc_data_direction){
1959 case DMA_FROM_DEVICE:
1960 scsidir =0x40000000; // DATA IN (iop<--dev)
1961 break;
1962 case DMA_TO_DEVICE:
1963 direction=0x04000000; // SGL OUT
1964 scsidir =0x80000000; // DATA OUT (iop-->dev)
1965 break;
1966 case DMA_NONE:
1967 break;
1968 case DMA_BIDIRECTIONAL:
1969 scsidir =0x40000000; // DATA IN (iop<--dev)
1970 // Assume In - and continue;
1971 break;
1972 default:
1973 printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
1974 pHba->name, cmd->cmnd[0]);
1975 cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
1976 cmd->scsi_done(cmd);
1977 return 0;
1978 }
1979 }
1980 // msg[0] is set later
1981 // I2O_CMD_SCSI_EXEC
1982 msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
1983 msg[2] = 0;
1984 /* Add 1 to avoid firmware treating it as invalid command */
1985 msg[3] = cmd->request->tag + 1;
1986 // Our cards use the transaction context as the tag for queueing
1987 // Adaptec/DPT Private stuff
1988 msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
1989 msg[5] = d->tid;
1990 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
1991 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
1992 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
1993 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
1994 msg[6] = scsidir|0x20a00000|cmd->cmd_len;
1995
1996 mptr=msg+7;
1997
1998 // Write SCSI command into the message - always 16 byte block
1999 memset(mptr, 0, 16);
2000 memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2001 mptr+=4;
2002 lenptr=mptr++; /* Remember me - fill in when we know */
2003 if (dpt_dma64(pHba)) {
2004 reqlen = 16; // SINGLE SGE
2005 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
2006 *mptr++ = 1 << PAGE_SHIFT;
2007 } else {
2008 reqlen = 14; // SINGLE SGE
2009 }
2010 /* Now fill in the SGList and command */
2011
2012 nseg = scsi_dma_map(cmd);
2013 BUG_ON(nseg < 0);
2014 if (nseg) {
2015 struct scatterlist *sg;
2016
2017 len = 0;
2018 scsi_for_each_sg(cmd, sg, nseg, i) {
2019 lptr = mptr;
2020 *mptr++ = direction|0x10000000|sg_dma_len(sg);
2021 len+=sg_dma_len(sg);
2022 addr = sg_dma_address(sg);
2023 *mptr++ = dma_low(addr);
2024 if (dpt_dma64(pHba))
2025 *mptr++ = dma_high(addr);
2026 /* Make this an end of list */
2027 if (i == nseg - 1)
2028 *lptr = direction|0xD0000000|sg_dma_len(sg);
2029 }
2030 reqlen = mptr - msg;
2031 *lenptr = len;
2032
2033 if(cmd->underflow && len != cmd->underflow){
2034 printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2035 len, cmd->underflow);
2036 }
2037 } else {
2038 *lenptr = len = 0;
2039 reqlen = 12;
2040 }
2041
2042 /* Stick the headers on */
2043 msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2044
2045 // Send it on it's way
2046 rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2047 if (rcode == 0) {
2048 return 0;
2049 }
2050 return rcode;
2051 }
2052
2053
adpt_scsi_host_alloc(adpt_hba * pHba,struct scsi_host_template * sht)2054 static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
2055 {
2056 struct Scsi_Host *host;
2057
2058 host = scsi_host_alloc(sht, sizeof(adpt_hba*));
2059 if (host == NULL) {
2060 printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
2061 return -1;
2062 }
2063 host->hostdata[0] = (unsigned long)pHba;
2064 pHba->host = host;
2065
2066 host->irq = pHba->pDev->irq;
2067 /* no IO ports, so don't have to set host->io_port and
2068 * host->n_io_port
2069 */
2070 host->io_port = 0;
2071 host->n_io_port = 0;
2072 /* see comments in scsi_host.h */
2073 host->max_id = 16;
2074 host->max_lun = 256;
2075 host->max_channel = pHba->top_scsi_channel + 1;
2076 host->cmd_per_lun = 1;
2077 host->unique_id = (u32)sys_tbl_pa + pHba->unit;
2078 host->sg_tablesize = pHba->sg_tablesize;
2079 host->can_queue = pHba->post_fifo_size;
2080 host->use_cmd_list = 1;
2081
2082 return 0;
2083 }
2084
2085
adpt_i2o_to_scsi(void __iomem * reply,struct scsi_cmnd * cmd)2086 static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
2087 {
2088 adpt_hba* pHba;
2089 u32 hba_status;
2090 u32 dev_status;
2091 u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits
2092 // I know this would look cleaner if I just read bytes
2093 // but the model I have been using for all the rest of the
2094 // io is in 4 byte words - so I keep that model
2095 u16 detailed_status = readl(reply+16) &0xffff;
2096 dev_status = (detailed_status & 0xff);
2097 hba_status = detailed_status >> 8;
2098
2099 // calculate resid for sg
2100 scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20));
2101
2102 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2103
2104 cmd->sense_buffer[0] = '\0'; // initialize sense valid flag to false
2105
2106 if(!(reply_flags & MSG_FAIL)) {
2107 switch(detailed_status & I2O_SCSI_DSC_MASK) {
2108 case I2O_SCSI_DSC_SUCCESS:
2109 cmd->result = (DID_OK << 16);
2110 // handle underflow
2111 if (readl(reply+20) < cmd->underflow) {
2112 cmd->result = (DID_ERROR <<16);
2113 printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2114 }
2115 break;
2116 case I2O_SCSI_DSC_REQUEST_ABORTED:
2117 cmd->result = (DID_ABORT << 16);
2118 break;
2119 case I2O_SCSI_DSC_PATH_INVALID:
2120 case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2121 case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2122 case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2123 case I2O_SCSI_DSC_NO_ADAPTER:
2124 case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2125 printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%llu) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2126 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2127 cmd->result = (DID_TIME_OUT << 16);
2128 break;
2129 case I2O_SCSI_DSC_ADAPTER_BUSY:
2130 case I2O_SCSI_DSC_BUS_BUSY:
2131 cmd->result = (DID_BUS_BUSY << 16);
2132 break;
2133 case I2O_SCSI_DSC_SCSI_BUS_RESET:
2134 case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2135 cmd->result = (DID_RESET << 16);
2136 break;
2137 case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2138 printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2139 cmd->result = (DID_PARITY << 16);
2140 break;
2141 case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2142 case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2143 case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2144 case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2145 case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2146 case I2O_SCSI_DSC_DATA_OVERRUN:
2147 case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2148 case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2149 case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2150 case I2O_SCSI_DSC_PROVIDE_FAILURE:
2151 case I2O_SCSI_DSC_REQUEST_TERMINATED:
2152 case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2153 case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2154 case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2155 case I2O_SCSI_DSC_INVALID_CDB:
2156 case I2O_SCSI_DSC_LUN_INVALID:
2157 case I2O_SCSI_DSC_SCSI_TID_INVALID:
2158 case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2159 case I2O_SCSI_DSC_NO_NEXUS:
2160 case I2O_SCSI_DSC_CDB_RECEIVED:
2161 case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2162 case I2O_SCSI_DSC_QUEUE_FROZEN:
2163 case I2O_SCSI_DSC_REQUEST_INVALID:
2164 default:
2165 printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2166 pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2167 hba_status, dev_status, cmd->cmnd[0]);
2168 cmd->result = (DID_ERROR << 16);
2169 break;
2170 }
2171
2172 // copy over the request sense data if it was a check
2173 // condition status
2174 if (dev_status == SAM_STAT_CHECK_CONDITION) {
2175 u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
2176 // Copy over the sense data
2177 memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
2178 if(cmd->sense_buffer[0] == 0x70 /* class 7 */ &&
2179 cmd->sense_buffer[2] == DATA_PROTECT ){
2180 /* This is to handle an array failed */
2181 cmd->result = (DID_TIME_OUT << 16);
2182 printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2183 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2184 hba_status, dev_status, cmd->cmnd[0]);
2185
2186 }
2187 }
2188 } else {
2189 /* In this condtion we could not talk to the tid
2190 * the card rejected it. We should signal a retry
2191 * for a limitted number of retries.
2192 */
2193 cmd->result = (DID_TIME_OUT << 16);
2194 printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%llu) tid=%d, cmd=0x%x\n",
2195 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2196 ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2197 }
2198
2199 cmd->result |= (dev_status);
2200
2201 if(cmd->scsi_done != NULL){
2202 cmd->scsi_done(cmd);
2203 }
2204 return cmd->result;
2205 }
2206
2207
adpt_rescan(adpt_hba * pHba)2208 static s32 adpt_rescan(adpt_hba* pHba)
2209 {
2210 s32 rcode;
2211 ulong flags = 0;
2212
2213 if(pHba->host)
2214 spin_lock_irqsave(pHba->host->host_lock, flags);
2215 if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2216 goto out;
2217 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2218 goto out;
2219 rcode = 0;
2220 out: if(pHba->host)
2221 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2222 return rcode;
2223 }
2224
2225
adpt_i2o_reparse_lct(adpt_hba * pHba)2226 static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2227 {
2228 int i;
2229 int max;
2230 int tid;
2231 struct i2o_device *d;
2232 i2o_lct *lct = pHba->lct;
2233 u8 bus_no = 0;
2234 s16 scsi_id;
2235 u64 scsi_lun;
2236 u32 buf[10]; // at least 8 u32's
2237 struct adpt_device* pDev = NULL;
2238 struct i2o_device* pI2o_dev = NULL;
2239
2240 if (lct == NULL) {
2241 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2242 return -1;
2243 }
2244
2245 max = lct->table_size;
2246 max -= 3;
2247 max /= 9;
2248
2249 // Mark each drive as unscanned
2250 for (d = pHba->devices; d; d = d->next) {
2251 pDev =(struct adpt_device*) d->owner;
2252 if(!pDev){
2253 continue;
2254 }
2255 pDev->state |= DPTI_DEV_UNSCANNED;
2256 }
2257
2258 printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2259
2260 for(i=0;i<max;i++) {
2261 if( lct->lct_entry[i].user_tid != 0xfff){
2262 continue;
2263 }
2264
2265 if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2266 lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2267 lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2268 tid = lct->lct_entry[i].tid;
2269 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2270 printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2271 continue;
2272 }
2273 bus_no = buf[0]>>16;
2274 if (bus_no >= MAX_CHANNEL) { /* Something wrong skip it */
2275 printk(KERN_WARNING
2276 "%s: Channel number %d out of range\n",
2277 pHba->name, bus_no);
2278 continue;
2279 }
2280
2281 scsi_id = buf[1];
2282 scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
2283 pDev = pHba->channel[bus_no].device[scsi_id];
2284 /* da lun */
2285 while(pDev) {
2286 if(pDev->scsi_lun == scsi_lun) {
2287 break;
2288 }
2289 pDev = pDev->next_lun;
2290 }
2291 if(!pDev ) { // Something new add it
2292 d = kmalloc(sizeof(struct i2o_device),
2293 GFP_ATOMIC);
2294 if(d==NULL)
2295 {
2296 printk(KERN_CRIT "Out of memory for I2O device data.\n");
2297 return -ENOMEM;
2298 }
2299
2300 d->controller = pHba;
2301 d->next = NULL;
2302
2303 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2304
2305 d->flags = 0;
2306 adpt_i2o_report_hba_unit(pHba, d);
2307 adpt_i2o_install_device(pHba, d);
2308
2309 pDev = pHba->channel[bus_no].device[scsi_id];
2310 if( pDev == NULL){
2311 pDev =
2312 kzalloc(sizeof(struct adpt_device),
2313 GFP_ATOMIC);
2314 if(pDev == NULL) {
2315 return -ENOMEM;
2316 }
2317 pHba->channel[bus_no].device[scsi_id] = pDev;
2318 } else {
2319 while (pDev->next_lun) {
2320 pDev = pDev->next_lun;
2321 }
2322 pDev = pDev->next_lun =
2323 kzalloc(sizeof(struct adpt_device),
2324 GFP_ATOMIC);
2325 if(pDev == NULL) {
2326 return -ENOMEM;
2327 }
2328 }
2329 pDev->tid = d->lct_data.tid;
2330 pDev->scsi_channel = bus_no;
2331 pDev->scsi_id = scsi_id;
2332 pDev->scsi_lun = scsi_lun;
2333 pDev->pI2o_dev = d;
2334 d->owner = pDev;
2335 pDev->type = (buf[0])&0xff;
2336 pDev->flags = (buf[0]>>8)&0xff;
2337 // Too late, SCSI system has made up it's mind, but what the hey ...
2338 if(scsi_id > pHba->top_scsi_id){
2339 pHba->top_scsi_id = scsi_id;
2340 }
2341 if(scsi_lun > pHba->top_scsi_lun){
2342 pHba->top_scsi_lun = scsi_lun;
2343 }
2344 continue;
2345 } // end of new i2o device
2346
2347 // We found an old device - check it
2348 while(pDev) {
2349 if(pDev->scsi_lun == scsi_lun) {
2350 if(!scsi_device_online(pDev->pScsi_dev)) {
2351 printk(KERN_WARNING"%s: Setting device (%d,%d,%llu) back online\n",
2352 pHba->name,bus_no,scsi_id,scsi_lun);
2353 if (pDev->pScsi_dev) {
2354 scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
2355 }
2356 }
2357 d = pDev->pI2o_dev;
2358 if(d->lct_data.tid != tid) { // something changed
2359 pDev->tid = tid;
2360 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2361 if (pDev->pScsi_dev) {
2362 pDev->pScsi_dev->changed = TRUE;
2363 pDev->pScsi_dev->removable = TRUE;
2364 }
2365 }
2366 // Found it - mark it scanned
2367 pDev->state = DPTI_DEV_ONLINE;
2368 break;
2369 }
2370 pDev = pDev->next_lun;
2371 }
2372 }
2373 }
2374 for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2375 pDev =(struct adpt_device*) pI2o_dev->owner;
2376 if(!pDev){
2377 continue;
2378 }
2379 // Drive offline drives that previously existed but could not be found
2380 // in the LCT table
2381 if (pDev->state & DPTI_DEV_UNSCANNED){
2382 pDev->state = DPTI_DEV_OFFLINE;
2383 printk(KERN_WARNING"%s: Device (%d,%d,%llu) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2384 if (pDev->pScsi_dev) {
2385 scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
2386 }
2387 }
2388 }
2389 return 0;
2390 }
2391
adpt_fail_posted_scbs(adpt_hba * pHba)2392 static void adpt_fail_posted_scbs(adpt_hba* pHba)
2393 {
2394 struct scsi_cmnd* cmd = NULL;
2395 struct scsi_device* d = NULL;
2396
2397 shost_for_each_device(d, pHba->host) {
2398 unsigned long flags;
2399 spin_lock_irqsave(&d->list_lock, flags);
2400 list_for_each_entry(cmd, &d->cmd_list, list) {
2401 cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1);
2402 cmd->scsi_done(cmd);
2403 }
2404 spin_unlock_irqrestore(&d->list_lock, flags);
2405 }
2406 }
2407
2408
2409 /*============================================================================
2410 * Routines from i2o subsystem
2411 *============================================================================
2412 */
2413
2414
2415
2416 /*
2417 * Bring an I2O controller into HOLD state. See the spec.
2418 */
adpt_i2o_activate_hba(adpt_hba * pHba)2419 static int adpt_i2o_activate_hba(adpt_hba* pHba)
2420 {
2421 int rcode;
2422
2423 if(pHba->initialized ) {
2424 if (adpt_i2o_status_get(pHba) < 0) {
2425 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2426 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2427 return rcode;
2428 }
2429 if (adpt_i2o_status_get(pHba) < 0) {
2430 printk(KERN_INFO "HBA not responding.\n");
2431 return -1;
2432 }
2433 }
2434
2435 if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2436 printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2437 return -1;
2438 }
2439
2440 if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2441 pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2442 pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2443 pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2444 adpt_i2o_reset_hba(pHba);
2445 if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2446 printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2447 return -1;
2448 }
2449 }
2450 } else {
2451 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2452 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2453 return rcode;
2454 }
2455
2456 }
2457
2458 if (adpt_i2o_init_outbound_q(pHba) < 0) {
2459 return -1;
2460 }
2461
2462 /* In HOLD state */
2463
2464 if (adpt_i2o_hrt_get(pHba) < 0) {
2465 return -1;
2466 }
2467
2468 return 0;
2469 }
2470
2471 /*
2472 * Bring a controller online into OPERATIONAL state.
2473 */
2474
adpt_i2o_online_hba(adpt_hba * pHba)2475 static int adpt_i2o_online_hba(adpt_hba* pHba)
2476 {
2477 if (adpt_i2o_systab_send(pHba) < 0)
2478 return -1;
2479 /* In READY state */
2480
2481 if (adpt_i2o_enable_hba(pHba) < 0)
2482 return -1;
2483
2484 /* In OPERATIONAL state */
2485 return 0;
2486 }
2487
adpt_send_nop(adpt_hba * pHba,u32 m)2488 static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2489 {
2490 u32 __iomem *msg;
2491 ulong timeout = jiffies + 5*HZ;
2492
2493 while(m == EMPTY_QUEUE){
2494 rmb();
2495 m = readl(pHba->post_port);
2496 if(m != EMPTY_QUEUE){
2497 break;
2498 }
2499 if(time_after(jiffies,timeout)){
2500 printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2501 return 2;
2502 }
2503 schedule_timeout_uninterruptible(1);
2504 }
2505 msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2506 writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2507 writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2508 writel( 0,&msg[2]);
2509 wmb();
2510
2511 writel(m, pHba->post_port);
2512 wmb();
2513 return 0;
2514 }
2515
adpt_i2o_init_outbound_q(adpt_hba * pHba)2516 static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2517 {
2518 u8 *status;
2519 dma_addr_t addr;
2520 u32 __iomem *msg = NULL;
2521 int i;
2522 ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
2523 u32 m;
2524
2525 do {
2526 rmb();
2527 m = readl(pHba->post_port);
2528 if (m != EMPTY_QUEUE) {
2529 break;
2530 }
2531
2532 if(time_after(jiffies,timeout)){
2533 printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2534 return -ETIMEDOUT;
2535 }
2536 schedule_timeout_uninterruptible(1);
2537 } while(m == EMPTY_QUEUE);
2538
2539 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2540
2541 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
2542 if (!status) {
2543 adpt_send_nop(pHba, m);
2544 printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2545 pHba->name);
2546 return -ENOMEM;
2547 }
2548 memset(status, 0, 4);
2549
2550 writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2551 writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2552 writel(0, &msg[2]);
2553 writel(0x0106, &msg[3]); /* Transaction context */
2554 writel(4096, &msg[4]); /* Host page frame size */
2555 writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */
2556 writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */
2557 writel((u32)addr, &msg[7]);
2558
2559 writel(m, pHba->post_port);
2560 wmb();
2561
2562 // Wait for the reply status to come back
2563 do {
2564 if (*status) {
2565 if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
2566 break;
2567 }
2568 }
2569 rmb();
2570 if(time_after(jiffies,timeout)){
2571 printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
2572 /* We lose 4 bytes of "status" here, but we
2573 cannot free these because controller may
2574 awake and corrupt those bytes at any time */
2575 /* dma_free_coherent(&pHba->pDev->dev, 4, status, addr); */
2576 return -ETIMEDOUT;
2577 }
2578 schedule_timeout_uninterruptible(1);
2579 } while (1);
2580
2581 // If the command was successful, fill the fifo with our reply
2582 // message packets
2583 if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
2584 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2585 return -2;
2586 }
2587 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2588
2589 if(pHba->reply_pool != NULL) {
2590 dma_free_coherent(&pHba->pDev->dev,
2591 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2592 pHba->reply_pool, pHba->reply_pool_pa);
2593 }
2594
2595 pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
2596 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2597 &pHba->reply_pool_pa, GFP_KERNEL);
2598 if (!pHba->reply_pool) {
2599 printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
2600 return -ENOMEM;
2601 }
2602 memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
2603
2604 for(i = 0; i < pHba->reply_fifo_size; i++) {
2605 writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
2606 pHba->reply_port);
2607 wmb();
2608 }
2609 adpt_i2o_status_get(pHba);
2610 return 0;
2611 }
2612
2613
2614 /*
2615 * I2O System Table. Contains information about
2616 * all the IOPs in the system. Used to inform IOPs
2617 * about each other's existence.
2618 *
2619 * sys_tbl_ver is the CurrentChangeIndicator that is
2620 * used by IOPs to track changes.
2621 */
2622
2623
2624
adpt_i2o_status_get(adpt_hba * pHba)2625 static s32 adpt_i2o_status_get(adpt_hba* pHba)
2626 {
2627 ulong timeout;
2628 u32 m;
2629 u32 __iomem *msg;
2630 u8 *status_block=NULL;
2631
2632 if(pHba->status_block == NULL) {
2633 pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
2634 sizeof(i2o_status_block),
2635 &pHba->status_block_pa, GFP_KERNEL);
2636 if(pHba->status_block == NULL) {
2637 printk(KERN_ERR
2638 "dpti%d: Get Status Block failed; Out of memory. \n",
2639 pHba->unit);
2640 return -ENOMEM;
2641 }
2642 }
2643 memset(pHba->status_block, 0, sizeof(i2o_status_block));
2644 status_block = (u8*)(pHba->status_block);
2645 timeout = jiffies+TMOUT_GETSTATUS*HZ;
2646 do {
2647 rmb();
2648 m = readl(pHba->post_port);
2649 if (m != EMPTY_QUEUE) {
2650 break;
2651 }
2652 if(time_after(jiffies,timeout)){
2653 printk(KERN_ERR "%s: Timeout waiting for message !\n",
2654 pHba->name);
2655 return -ETIMEDOUT;
2656 }
2657 schedule_timeout_uninterruptible(1);
2658 } while(m==EMPTY_QUEUE);
2659
2660
2661 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2662
2663 writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
2664 writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
2665 writel(1, &msg[2]);
2666 writel(0, &msg[3]);
2667 writel(0, &msg[4]);
2668 writel(0, &msg[5]);
2669 writel( dma_low(pHba->status_block_pa), &msg[6]);
2670 writel( dma_high(pHba->status_block_pa), &msg[7]);
2671 writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
2672
2673 //post message
2674 writel(m, pHba->post_port);
2675 wmb();
2676
2677 while(status_block[87]!=0xff){
2678 if(time_after(jiffies,timeout)){
2679 printk(KERN_ERR"dpti%d: Get status timeout.\n",
2680 pHba->unit);
2681 return -ETIMEDOUT;
2682 }
2683 rmb();
2684 schedule_timeout_uninterruptible(1);
2685 }
2686
2687 // Set up our number of outbound and inbound messages
2688 pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
2689 if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
2690 pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
2691 }
2692
2693 pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
2694 if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
2695 pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
2696 }
2697
2698 // Calculate the Scatter Gather list size
2699 if (dpt_dma64(pHba)) {
2700 pHba->sg_tablesize
2701 = ((pHba->status_block->inbound_frame_size * 4
2702 - 14 * sizeof(u32))
2703 / (sizeof(struct sg_simple_element) + sizeof(u32)));
2704 } else {
2705 pHba->sg_tablesize
2706 = ((pHba->status_block->inbound_frame_size * 4
2707 - 12 * sizeof(u32))
2708 / sizeof(struct sg_simple_element));
2709 }
2710 if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
2711 pHba->sg_tablesize = SG_LIST_ELEMENTS;
2712 }
2713
2714
2715 #ifdef DEBUG
2716 printk("dpti%d: State = ",pHba->unit);
2717 switch(pHba->status_block->iop_state) {
2718 case 0x01:
2719 printk("INIT\n");
2720 break;
2721 case 0x02:
2722 printk("RESET\n");
2723 break;
2724 case 0x04:
2725 printk("HOLD\n");
2726 break;
2727 case 0x05:
2728 printk("READY\n");
2729 break;
2730 case 0x08:
2731 printk("OPERATIONAL\n");
2732 break;
2733 case 0x10:
2734 printk("FAILED\n");
2735 break;
2736 case 0x11:
2737 printk("FAULTED\n");
2738 break;
2739 default:
2740 printk("%x (unknown!!)\n",pHba->status_block->iop_state);
2741 }
2742 #endif
2743 return 0;
2744 }
2745
2746 /*
2747 * Get the IOP's Logical Configuration Table
2748 */
adpt_i2o_lct_get(adpt_hba * pHba)2749 static int adpt_i2o_lct_get(adpt_hba* pHba)
2750 {
2751 u32 msg[8];
2752 int ret;
2753 u32 buf[16];
2754
2755 if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
2756 pHba->lct_size = pHba->status_block->expected_lct_size;
2757 }
2758 do {
2759 if (pHba->lct == NULL) {
2760 pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
2761 pHba->lct_size, &pHba->lct_pa,
2762 GFP_ATOMIC);
2763 if(pHba->lct == NULL) {
2764 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
2765 pHba->name);
2766 return -ENOMEM;
2767 }
2768 }
2769 memset(pHba->lct, 0, pHba->lct_size);
2770
2771 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
2772 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
2773 msg[2] = 0;
2774 msg[3] = 0;
2775 msg[4] = 0xFFFFFFFF; /* All devices */
2776 msg[5] = 0x00000000; /* Report now */
2777 msg[6] = 0xD0000000|pHba->lct_size;
2778 msg[7] = (u32)pHba->lct_pa;
2779
2780 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
2781 printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
2782 pHba->name, ret);
2783 printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
2784 return ret;
2785 }
2786
2787 if ((pHba->lct->table_size << 2) > pHba->lct_size) {
2788 pHba->lct_size = pHba->lct->table_size << 2;
2789 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
2790 pHba->lct, pHba->lct_pa);
2791 pHba->lct = NULL;
2792 }
2793 } while (pHba->lct == NULL);
2794
2795 PDEBUG("%s: Hardware resource table read.\n", pHba->name);
2796
2797
2798 // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
2799 if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
2800 pHba->FwDebugBufferSize = buf[1];
2801 pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
2802 pHba->FwDebugBufferSize);
2803 if (pHba->FwDebugBuffer_P) {
2804 pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P +
2805 FW_DEBUG_FLAGS_OFFSET;
2806 pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
2807 FW_DEBUG_BLED_OFFSET;
2808 pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
2809 pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
2810 FW_DEBUG_STR_LENGTH_OFFSET;
2811 pHba->FwDebugBuffer_P += buf[2];
2812 pHba->FwDebugFlags = 0;
2813 }
2814 }
2815
2816 return 0;
2817 }
2818
adpt_i2o_build_sys_table(void)2819 static int adpt_i2o_build_sys_table(void)
2820 {
2821 adpt_hba* pHba = hba_chain;
2822 int count = 0;
2823
2824 if (sys_tbl)
2825 dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
2826 sys_tbl, sys_tbl_pa);
2827
2828 sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs
2829 (hba_count) * sizeof(struct i2o_sys_tbl_entry);
2830
2831 sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
2832 sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
2833 if (!sys_tbl) {
2834 printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
2835 return -ENOMEM;
2836 }
2837 memset(sys_tbl, 0, sys_tbl_len);
2838
2839 sys_tbl->num_entries = hba_count;
2840 sys_tbl->version = I2OVERSION;
2841 sys_tbl->change_ind = sys_tbl_ind++;
2842
2843 for(pHba = hba_chain; pHba; pHba = pHba->next) {
2844 u64 addr;
2845 // Get updated Status Block so we have the latest information
2846 if (adpt_i2o_status_get(pHba)) {
2847 sys_tbl->num_entries--;
2848 continue; // try next one
2849 }
2850
2851 sys_tbl->iops[count].org_id = pHba->status_block->org_id;
2852 sys_tbl->iops[count].iop_id = pHba->unit + 2;
2853 sys_tbl->iops[count].seg_num = 0;
2854 sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
2855 sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
2856 sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
2857 sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
2858 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
2859 sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
2860 addr = pHba->base_addr_phys + 0x40;
2861 sys_tbl->iops[count].inbound_low = dma_low(addr);
2862 sys_tbl->iops[count].inbound_high = dma_high(addr);
2863
2864 count++;
2865 }
2866
2867 #ifdef DEBUG
2868 {
2869 u32 *table = (u32*)sys_tbl;
2870 printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
2871 for(count = 0; count < (sys_tbl_len >>2); count++) {
2872 printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
2873 count, table[count]);
2874 }
2875 }
2876 #endif
2877
2878 return 0;
2879 }
2880
2881
2882 /*
2883 * Dump the information block associated with a given unit (TID)
2884 */
2885
adpt_i2o_report_hba_unit(adpt_hba * pHba,struct i2o_device * d)2886 static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
2887 {
2888 char buf[64];
2889 int unit = d->lct_data.tid;
2890
2891 printk(KERN_INFO "TID %3.3d ", unit);
2892
2893 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
2894 {
2895 buf[16]=0;
2896 printk(" Vendor: %-12.12s", buf);
2897 }
2898 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
2899 {
2900 buf[16]=0;
2901 printk(" Device: %-12.12s", buf);
2902 }
2903 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
2904 {
2905 buf[8]=0;
2906 printk(" Rev: %-12.12s\n", buf);
2907 }
2908 #ifdef DEBUG
2909 printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
2910 printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
2911 printk(KERN_INFO "\tFlags: ");
2912
2913 if(d->lct_data.device_flags&(1<<0))
2914 printk("C"); // ConfigDialog requested
2915 if(d->lct_data.device_flags&(1<<1))
2916 printk("U"); // Multi-user capable
2917 if(!(d->lct_data.device_flags&(1<<4)))
2918 printk("P"); // Peer service enabled!
2919 if(!(d->lct_data.device_flags&(1<<5)))
2920 printk("M"); // Mgmt service enabled!
2921 printk("\n");
2922 #endif
2923 }
2924
2925 #ifdef DEBUG
2926 /*
2927 * Do i2o class name lookup
2928 */
adpt_i2o_get_class_name(int class)2929 static const char *adpt_i2o_get_class_name(int class)
2930 {
2931 int idx = 16;
2932 static char *i2o_class_name[] = {
2933 "Executive",
2934 "Device Driver Module",
2935 "Block Device",
2936 "Tape Device",
2937 "LAN Interface",
2938 "WAN Interface",
2939 "Fibre Channel Port",
2940 "Fibre Channel Device",
2941 "SCSI Device",
2942 "ATE Port",
2943 "ATE Device",
2944 "Floppy Controller",
2945 "Floppy Device",
2946 "Secondary Bus Port",
2947 "Peer Transport Agent",
2948 "Peer Transport",
2949 "Unknown"
2950 };
2951
2952 switch(class&0xFFF) {
2953 case I2O_CLASS_EXECUTIVE:
2954 idx = 0; break;
2955 case I2O_CLASS_DDM:
2956 idx = 1; break;
2957 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
2958 idx = 2; break;
2959 case I2O_CLASS_SEQUENTIAL_STORAGE:
2960 idx = 3; break;
2961 case I2O_CLASS_LAN:
2962 idx = 4; break;
2963 case I2O_CLASS_WAN:
2964 idx = 5; break;
2965 case I2O_CLASS_FIBRE_CHANNEL_PORT:
2966 idx = 6; break;
2967 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
2968 idx = 7; break;
2969 case I2O_CLASS_SCSI_PERIPHERAL:
2970 idx = 8; break;
2971 case I2O_CLASS_ATE_PORT:
2972 idx = 9; break;
2973 case I2O_CLASS_ATE_PERIPHERAL:
2974 idx = 10; break;
2975 case I2O_CLASS_FLOPPY_CONTROLLER:
2976 idx = 11; break;
2977 case I2O_CLASS_FLOPPY_DEVICE:
2978 idx = 12; break;
2979 case I2O_CLASS_BUS_ADAPTER_PORT:
2980 idx = 13; break;
2981 case I2O_CLASS_PEER_TRANSPORT_AGENT:
2982 idx = 14; break;
2983 case I2O_CLASS_PEER_TRANSPORT:
2984 idx = 15; break;
2985 }
2986 return i2o_class_name[idx];
2987 }
2988 #endif
2989
2990
adpt_i2o_hrt_get(adpt_hba * pHba)2991 static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
2992 {
2993 u32 msg[6];
2994 int ret, size = sizeof(i2o_hrt);
2995
2996 do {
2997 if (pHba->hrt == NULL) {
2998 pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
2999 size, &pHba->hrt_pa, GFP_KERNEL);
3000 if (pHba->hrt == NULL) {
3001 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3002 return -ENOMEM;
3003 }
3004 }
3005
3006 msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3007 msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3008 msg[2]= 0;
3009 msg[3]= 0;
3010 msg[4]= (0xD0000000 | size); /* Simple transaction */
3011 msg[5]= (u32)pHba->hrt_pa; /* Dump it here */
3012
3013 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3014 printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3015 return ret;
3016 }
3017
3018 if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
3019 int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3020 dma_free_coherent(&pHba->pDev->dev, size,
3021 pHba->hrt, pHba->hrt_pa);
3022 size = newsize;
3023 pHba->hrt = NULL;
3024 }
3025 } while(pHba->hrt == NULL);
3026 return 0;
3027 }
3028
3029 /*
3030 * Query one scalar group value or a whole scalar group.
3031 */
adpt_i2o_query_scalar(adpt_hba * pHba,int tid,int group,int field,void * buf,int buflen)3032 static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3033 int group, int field, void *buf, int buflen)
3034 {
3035 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
3036 u8 *opblk_va;
3037 dma_addr_t opblk_pa;
3038 u8 *resblk_va;
3039 dma_addr_t resblk_pa;
3040
3041 int size;
3042
3043 /* 8 bytes for header */
3044 resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3045 sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
3046 if (resblk_va == NULL) {
3047 printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3048 return -ENOMEM;
3049 }
3050
3051 opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3052 sizeof(opblk), &opblk_pa, GFP_KERNEL);
3053 if (opblk_va == NULL) {
3054 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3055 resblk_va, resblk_pa);
3056 printk(KERN_CRIT "%s: query operation failed; Out of memory.\n",
3057 pHba->name);
3058 return -ENOMEM;
3059 }
3060 if (field == -1) /* whole group */
3061 opblk[4] = -1;
3062
3063 memcpy(opblk_va, opblk, sizeof(opblk));
3064 size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
3065 opblk_va, opblk_pa, sizeof(opblk),
3066 resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
3067 dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
3068 if (size == -ETIME) {
3069 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3070 resblk_va, resblk_pa);
3071 printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3072 return -ETIME;
3073 } else if (size == -EINTR) {
3074 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3075 resblk_va, resblk_pa);
3076 printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3077 return -EINTR;
3078 }
3079
3080 memcpy(buf, resblk_va+8, buflen); /* cut off header */
3081
3082 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3083 resblk_va, resblk_pa);
3084 if (size < 0)
3085 return size;
3086
3087 return buflen;
3088 }
3089
3090
3091 /* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
3092 *
3093 * This function can be used for all UtilParamsGet/Set operations.
3094 * The OperationBlock is given in opblk-buffer,
3095 * and results are returned in resblk-buffer.
3096 * Note that the minimum sized resblk is 8 bytes and contains
3097 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
3098 */
adpt_i2o_issue_params(int cmd,adpt_hba * pHba,int tid,void * opblk_va,dma_addr_t opblk_pa,int oplen,void * resblk_va,dma_addr_t resblk_pa,int reslen)3099 static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
3100 void *opblk_va, dma_addr_t opblk_pa, int oplen,
3101 void *resblk_va, dma_addr_t resblk_pa, int reslen)
3102 {
3103 u32 msg[9];
3104 u32 *res = (u32 *)resblk_va;
3105 int wait_status;
3106
3107 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3108 msg[1] = cmd << 24 | HOST_TID << 12 | tid;
3109 msg[2] = 0;
3110 msg[3] = 0;
3111 msg[4] = 0;
3112 msg[5] = 0x54000000 | oplen; /* OperationBlock */
3113 msg[6] = (u32)opblk_pa;
3114 msg[7] = 0xD0000000 | reslen; /* ResultBlock */
3115 msg[8] = (u32)resblk_pa;
3116
3117 if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
3118 printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
3119 return wait_status; /* -DetailedStatus */
3120 }
3121
3122 if (res[1]&0x00FF0000) { /* BlockStatus != SUCCESS */
3123 printk(KERN_WARNING "%s: %s - Error:\n ErrorInfoSize = 0x%02x, "
3124 "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3125 pHba->name,
3126 (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3127 : "PARAMS_GET",
3128 res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3129 return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
3130 }
3131
3132 return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
3133 }
3134
3135
adpt_i2o_quiesce_hba(adpt_hba * pHba)3136 static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3137 {
3138 u32 msg[4];
3139 int ret;
3140
3141 adpt_i2o_status_get(pHba);
3142
3143 /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
3144
3145 if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3146 (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3147 return 0;
3148 }
3149
3150 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3151 msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3152 msg[2] = 0;
3153 msg[3] = 0;
3154
3155 if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3156 printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3157 pHba->unit, -ret);
3158 } else {
3159 printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3160 }
3161
3162 adpt_i2o_status_get(pHba);
3163 return ret;
3164 }
3165
3166
3167 /*
3168 * Enable IOP. Allows the IOP to resume external operations.
3169 */
adpt_i2o_enable_hba(adpt_hba * pHba)3170 static int adpt_i2o_enable_hba(adpt_hba* pHba)
3171 {
3172 u32 msg[4];
3173 int ret;
3174
3175 adpt_i2o_status_get(pHba);
3176 if(!pHba->status_block){
3177 return -ENOMEM;
3178 }
3179 /* Enable only allowed on READY state */
3180 if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3181 return 0;
3182
3183 if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3184 return -EINVAL;
3185
3186 msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3187 msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3188 msg[2]= 0;
3189 msg[3]= 0;
3190
3191 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3192 printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
3193 pHba->name, ret);
3194 } else {
3195 PDEBUG("%s: Enabled.\n", pHba->name);
3196 }
3197
3198 adpt_i2o_status_get(pHba);
3199 return ret;
3200 }
3201
3202
adpt_i2o_systab_send(adpt_hba * pHba)3203 static int adpt_i2o_systab_send(adpt_hba* pHba)
3204 {
3205 u32 msg[12];
3206 int ret;
3207
3208 msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3209 msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3210 msg[2] = 0;
3211 msg[3] = 0;
3212 msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
3213 msg[5] = 0; /* Segment 0 */
3214
3215 /*
3216 * Provide three SGL-elements:
3217 * System table (SysTab), Private memory space declaration and
3218 * Private i/o space declaration
3219 */
3220 msg[6] = 0x54000000 | sys_tbl_len;
3221 msg[7] = (u32)sys_tbl_pa;
3222 msg[8] = 0x54000000 | 0;
3223 msg[9] = 0;
3224 msg[10] = 0xD4000000 | 0;
3225 msg[11] = 0;
3226
3227 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3228 printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
3229 pHba->name, ret);
3230 }
3231 #ifdef DEBUG
3232 else {
3233 PINFO("%s: SysTab set.\n", pHba->name);
3234 }
3235 #endif
3236
3237 return ret;
3238 }
3239
3240
3241 /*============================================================================
3242 *
3243 *============================================================================
3244 */
3245
3246
3247 #ifdef UARTDELAY
3248
adpt_delay(int millisec)3249 static static void adpt_delay(int millisec)
3250 {
3251 int i;
3252 for (i = 0; i < millisec; i++) {
3253 udelay(1000); /* delay for one millisecond */
3254 }
3255 }
3256
3257 #endif
3258
3259 static struct scsi_host_template driver_template = {
3260 .module = THIS_MODULE,
3261 .name = "dpt_i2o",
3262 .proc_name = "dpt_i2o",
3263 .show_info = adpt_show_info,
3264 .info = adpt_info,
3265 .queuecommand = adpt_queue,
3266 .eh_abort_handler = adpt_abort,
3267 .eh_device_reset_handler = adpt_device_reset,
3268 .eh_bus_reset_handler = adpt_bus_reset,
3269 .eh_host_reset_handler = adpt_reset,
3270 .bios_param = adpt_bios_param,
3271 .slave_configure = adpt_slave_configure,
3272 .can_queue = MAX_TO_IOP_MESSAGES,
3273 .this_id = 7,
3274 };
3275
adpt_init(void)3276 static int __init adpt_init(void)
3277 {
3278 int error;
3279 adpt_hba *pHba, *next;
3280
3281 printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
3282
3283 error = adpt_detect(&driver_template);
3284 if (error < 0)
3285 return error;
3286 if (hba_chain == NULL)
3287 return -ENODEV;
3288
3289 for (pHba = hba_chain; pHba; pHba = pHba->next) {
3290 error = scsi_add_host(pHba->host, &pHba->pDev->dev);
3291 if (error)
3292 goto fail;
3293 scsi_scan_host(pHba->host);
3294 }
3295 return 0;
3296 fail:
3297 for (pHba = hba_chain; pHba; pHba = next) {
3298 next = pHba->next;
3299 scsi_remove_host(pHba->host);
3300 }
3301 return error;
3302 }
3303
adpt_exit(void)3304 static void __exit adpt_exit(void)
3305 {
3306 adpt_hba *pHba, *next;
3307
3308 for (pHba = hba_chain; pHba; pHba = next) {
3309 next = pHba->next;
3310 adpt_release(pHba);
3311 }
3312 }
3313
3314 module_init(adpt_init);
3315 module_exit(adpt_exit);
3316
3317 MODULE_LICENSE("GPL");
3318