1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /***************************************************************************
3 dpti.c - description
4 -------------------
5 begin : Thu Sep 7 2000
6 copyright : (C) 2000 by Adaptec
7
8 July 30, 2001 First version being submitted
9 for inclusion in the kernel. V2.4
10
11 See Documentation/scsi/dpti.rst for history, notes, license info
12 and credits
13 ***************************************************************************/
14
15 /***************************************************************************
16 * *
17 * *
18 ***************************************************************************/
19 /***************************************************************************
20 * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
21 - Support 2.6 kernel and DMA-mapping
22 - ioctl fix for raid tools
23 - use schedule_timeout in long long loop
24 **************************************************************************/
25
26 /*#define DEBUG 1 */
27 /*#define UARTDELAY 1 */
28
29 #include <linux/module.h>
30 #include <linux/pgtable.h>
31
32 MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
33 MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
34
35 ////////////////////////////////////////////////////////////////
36
37 #include <linux/ioctl.h> /* For SCSI-Passthrough */
38 #include <linux/uaccess.h>
39
40 #include <linux/stat.h>
41 #include <linux/slab.h> /* for kmalloc() */
42 #include <linux/pci.h> /* for PCI support */
43 #include <linux/proc_fs.h>
44 #include <linux/blkdev.h>
45 #include <linux/delay.h> /* for udelay */
46 #include <linux/interrupt.h>
47 #include <linux/kernel.h> /* for printk */
48 #include <linux/sched.h>
49 #include <linux/reboot.h>
50 #include <linux/spinlock.h>
51 #include <linux/dma-mapping.h>
52
53 #include <linux/timer.h>
54 #include <linux/string.h>
55 #include <linux/ioport.h>
56 #include <linux/mutex.h>
57
58 #include <asm/processor.h> /* for boot_cpu_data */
59 #include <asm/io.h> /* for virt_to_bus, etc. */
60
61 #include <scsi/scsi.h>
62 #include <scsi/scsi_cmnd.h>
63 #include <scsi/scsi_device.h>
64 #include <scsi/scsi_host.h>
65 #include <scsi/scsi_tcq.h>
66
67 #include "dpt/dptsig.h"
68 #include "dpti.h"
69
70 /*============================================================================
71 * Create a binary signature - this is read by dptsig
72 * Needed for our management apps
73 *============================================================================
74 */
75 static DEFINE_MUTEX(adpt_mutex);
76 static dpt_sig_S DPTI_sig = {
77 {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
78 #ifdef __i386__
79 PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
80 #elif defined(__ia64__)
81 PROC_INTEL, PROC_IA64,
82 #elif defined(__sparc__)
83 PROC_ULTRASPARC, PROC_ULTRASPARC,
84 #elif defined(__alpha__)
85 PROC_ALPHA, PROC_ALPHA,
86 #else
87 (-1),(-1),
88 #endif
89 FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
90 ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
91 DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
92 };
93
94
95
96
97 /*============================================================================
98 * Globals
99 *============================================================================
100 */
101
102 static DEFINE_MUTEX(adpt_configuration_lock);
103
104 static struct i2o_sys_tbl *sys_tbl;
105 static dma_addr_t sys_tbl_pa;
106 static int sys_tbl_ind;
107 static int sys_tbl_len;
108
109 static adpt_hba* hba_chain = NULL;
110 static int hba_count = 0;
111
112 static struct class *adpt_sysfs_class;
113
114 static long adpt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
115 #ifdef CONFIG_COMPAT
116 static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
117 #endif
118
119 static const struct file_operations adpt_fops = {
120 .unlocked_ioctl = adpt_unlocked_ioctl,
121 .open = adpt_open,
122 .release = adpt_close,
123 #ifdef CONFIG_COMPAT
124 .compat_ioctl = compat_adpt_ioctl,
125 #endif
126 .llseek = noop_llseek,
127 };
128
129 /* Structures and definitions for synchronous message posting.
130 * See adpt_i2o_post_wait() for description
131 * */
132 struct adpt_i2o_post_wait_data
133 {
134 int status;
135 u32 id;
136 adpt_wait_queue_head_t *wq;
137 struct adpt_i2o_post_wait_data *next;
138 };
139
140 static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
141 static u32 adpt_post_wait_id = 0;
142 static DEFINE_SPINLOCK(adpt_post_wait_lock);
143
144
145 /*============================================================================
146 * Functions
147 *============================================================================
148 */
149
dpt_dma64(adpt_hba * pHba)150 static inline int dpt_dma64(adpt_hba *pHba)
151 {
152 return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
153 }
154
dma_high(dma_addr_t addr)155 static inline u32 dma_high(dma_addr_t addr)
156 {
157 return upper_32_bits(addr);
158 }
159
dma_low(dma_addr_t addr)160 static inline u32 dma_low(dma_addr_t addr)
161 {
162 return (u32)addr;
163 }
164
adpt_read_blink_led(adpt_hba * host)165 static u8 adpt_read_blink_led(adpt_hba* host)
166 {
167 if (host->FwDebugBLEDflag_P) {
168 if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
169 return readb(host->FwDebugBLEDvalue_P);
170 }
171 }
172 return 0;
173 }
174
175 /*============================================================================
176 * Scsi host template interface functions
177 *============================================================================
178 */
179
180 #ifdef MODULE
181 static struct pci_device_id dptids[] = {
182 { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
183 { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
184 { 0, }
185 };
186 #endif
187
188 MODULE_DEVICE_TABLE(pci,dptids);
189
adpt_detect(struct scsi_host_template * sht)190 static int adpt_detect(struct scsi_host_template* sht)
191 {
192 struct pci_dev *pDev = NULL;
193 adpt_hba *pHba;
194 adpt_hba *next;
195
196 PINFO("Detecting Adaptec I2O RAID controllers...\n");
197
198 /* search for all Adatpec I2O RAID cards */
199 while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
200 if(pDev->device == PCI_DPT_DEVICE_ID ||
201 pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
202 if(adpt_install_hba(sht, pDev) ){
203 PERROR("Could not Init an I2O RAID device\n");
204 PERROR("Will not try to detect others.\n");
205 return hba_count-1;
206 }
207 pci_dev_get(pDev);
208 }
209 }
210
211 /* In INIT state, Activate IOPs */
212 for (pHba = hba_chain; pHba; pHba = next) {
213 next = pHba->next;
214 // Activate does get status , init outbound, and get hrt
215 if (adpt_i2o_activate_hba(pHba) < 0) {
216 adpt_i2o_delete_hba(pHba);
217 }
218 }
219
220
221 /* Active IOPs in HOLD state */
222
223 rebuild_sys_tab:
224 if (hba_chain == NULL)
225 return 0;
226
227 /*
228 * If build_sys_table fails, we kill everything and bail
229 * as we can't init the IOPs w/o a system table
230 */
231 if (adpt_i2o_build_sys_table() < 0) {
232 adpt_i2o_sys_shutdown();
233 return 0;
234 }
235
236 PDEBUG("HBA's in HOLD state\n");
237
238 /* If IOP don't get online, we need to rebuild the System table */
239 for (pHba = hba_chain; pHba; pHba = pHba->next) {
240 if (adpt_i2o_online_hba(pHba) < 0) {
241 adpt_i2o_delete_hba(pHba);
242 goto rebuild_sys_tab;
243 }
244 }
245
246 /* Active IOPs now in OPERATIONAL state */
247 PDEBUG("HBA's in OPERATIONAL state\n");
248
249 printk("dpti: If you have a lot of devices this could take a few minutes.\n");
250 for (pHba = hba_chain; pHba; pHba = next) {
251 next = pHba->next;
252 printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
253 if (adpt_i2o_lct_get(pHba) < 0){
254 adpt_i2o_delete_hba(pHba);
255 continue;
256 }
257
258 if (adpt_i2o_parse_lct(pHba) < 0){
259 adpt_i2o_delete_hba(pHba);
260 continue;
261 }
262 adpt_inquiry(pHba);
263 }
264
265 adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
266 if (IS_ERR(adpt_sysfs_class)) {
267 printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
268 adpt_sysfs_class = NULL;
269 }
270
271 for (pHba = hba_chain; pHba; pHba = next) {
272 next = pHba->next;
273 if (adpt_scsi_host_alloc(pHba, sht) < 0){
274 adpt_i2o_delete_hba(pHba);
275 continue;
276 }
277 pHba->initialized = TRUE;
278 pHba->state &= ~DPTI_STATE_RESET;
279 if (adpt_sysfs_class) {
280 struct device *dev = device_create(adpt_sysfs_class,
281 NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
282 "dpti%d", pHba->unit);
283 if (IS_ERR(dev)) {
284 printk(KERN_WARNING"dpti%d: unable to "
285 "create device in dpt_i2o class\n",
286 pHba->unit);
287 }
288 }
289 }
290
291 // Register our control device node
292 // nodes will need to be created in /dev to access this
293 // the nodes can not be created from within the driver
294 if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
295 adpt_i2o_sys_shutdown();
296 return 0;
297 }
298 return hba_count;
299 }
300
301
adpt_release(adpt_hba * pHba)302 static void adpt_release(adpt_hba *pHba)
303 {
304 struct Scsi_Host *shost = pHba->host;
305
306 scsi_remove_host(shost);
307 // adpt_i2o_quiesce_hba(pHba);
308 adpt_i2o_delete_hba(pHba);
309 scsi_host_put(shost);
310 }
311
312
adpt_inquiry(adpt_hba * pHba)313 static void adpt_inquiry(adpt_hba* pHba)
314 {
315 u32 msg[17];
316 u32 *mptr;
317 u32 *lenptr;
318 int direction;
319 int scsidir;
320 u32 len;
321 u32 reqlen;
322 u8* buf;
323 dma_addr_t addr;
324 u8 scb[16];
325 s32 rcode;
326
327 memset(msg, 0, sizeof(msg));
328 buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
329 if(!buf){
330 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
331 return;
332 }
333 memset((void*)buf, 0, 36);
334
335 len = 36;
336 direction = 0x00000000;
337 scsidir =0x40000000; // DATA IN (iop<--dev)
338
339 if (dpt_dma64(pHba))
340 reqlen = 17; // SINGLE SGE, 64 bit
341 else
342 reqlen = 14; // SINGLE SGE, 32 bit
343 /* Stick the headers on */
344 msg[0] = reqlen<<16 | SGL_OFFSET_12;
345 msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
346 msg[2] = 0;
347 msg[3] = 0;
348 // Adaptec/DPT Private stuff
349 msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
350 msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
351 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
352 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
353 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
354 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
355 msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
356
357 mptr=msg+7;
358
359 memset(scb, 0, sizeof(scb));
360 // Write SCSI command into the message - always 16 byte block
361 scb[0] = INQUIRY;
362 scb[1] = 0;
363 scb[2] = 0;
364 scb[3] = 0;
365 scb[4] = 36;
366 scb[5] = 0;
367 // Don't care about the rest of scb
368
369 memcpy(mptr, scb, sizeof(scb));
370 mptr+=4;
371 lenptr=mptr++; /* Remember me - fill in when we know */
372
373 /* Now fill in the SGList and command */
374 *lenptr = len;
375 if (dpt_dma64(pHba)) {
376 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
377 *mptr++ = 1 << PAGE_SHIFT;
378 *mptr++ = 0xD0000000|direction|len;
379 *mptr++ = dma_low(addr);
380 *mptr++ = dma_high(addr);
381 } else {
382 *mptr++ = 0xD0000000|direction|len;
383 *mptr++ = addr;
384 }
385
386 // Send it on it's way
387 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
388 if (rcode != 0) {
389 sprintf(pHba->detail, "Adaptec I2O RAID");
390 printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
391 if (rcode != -ETIME && rcode != -EINTR)
392 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
393 } else {
394 memset(pHba->detail, 0, sizeof(pHba->detail));
395 memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
396 memcpy(&(pHba->detail[16]), " Model: ", 8);
397 memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
398 memcpy(&(pHba->detail[40]), " FW: ", 4);
399 memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
400 pHba->detail[48] = '\0'; /* precautionary */
401 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
402 }
403 adpt_i2o_status_get(pHba);
404 return ;
405 }
406
407
adpt_slave_configure(struct scsi_device * device)408 static int adpt_slave_configure(struct scsi_device * device)
409 {
410 struct Scsi_Host *host = device->host;
411
412 if (host->can_queue && device->tagged_supported) {
413 scsi_change_queue_depth(device,
414 host->can_queue - 1);
415 }
416 return 0;
417 }
418
adpt_queue_lck(struct scsi_cmnd * cmd,void (* done)(struct scsi_cmnd *))419 static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
420 {
421 adpt_hba* pHba = NULL;
422 struct adpt_device* pDev = NULL; /* dpt per device information */
423
424 cmd->scsi_done = done;
425 /*
426 * SCSI REQUEST_SENSE commands will be executed automatically by the
427 * Host Adapter for any errors, so they should not be executed
428 * explicitly unless the Sense Data is zero indicating that no error
429 * occurred.
430 */
431
432 if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
433 cmd->result = (DID_OK << 16);
434 cmd->scsi_done(cmd);
435 return 0;
436 }
437
438 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
439 if (!pHba) {
440 return FAILED;
441 }
442
443 rmb();
444 if ((pHba->state) & DPTI_STATE_RESET)
445 return SCSI_MLQUEUE_HOST_BUSY;
446
447 // TODO if the cmd->device if offline then I may need to issue a bus rescan
448 // followed by a get_lct to see if the device is there anymore
449 if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
450 /*
451 * First command request for this device. Set up a pointer
452 * to the device structure. This should be a TEST_UNIT_READY
453 * command from scan_scsis_single.
454 */
455 if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun)) == NULL) {
456 // TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
457 // with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
458 cmd->result = (DID_NO_CONNECT << 16);
459 cmd->scsi_done(cmd);
460 return 0;
461 }
462 cmd->device->hostdata = pDev;
463 }
464 pDev->pScsi_dev = cmd->device;
465
466 /*
467 * If we are being called from when the device is being reset,
468 * delay processing of the command until later.
469 */
470 if (pDev->state & DPTI_DEV_RESET ) {
471 return FAILED;
472 }
473 return adpt_scsi_to_i2o(pHba, cmd, pDev);
474 }
475
DEF_SCSI_QCMD(adpt_queue)476 static DEF_SCSI_QCMD(adpt_queue)
477
478 static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
479 sector_t capacity, int geom[])
480 {
481 int heads=-1;
482 int sectors=-1;
483 int cylinders=-1;
484
485 // *** First lets set the default geometry ****
486
487 // If the capacity is less than ox2000
488 if (capacity < 0x2000 ) { // floppy
489 heads = 18;
490 sectors = 2;
491 }
492 // else if between 0x2000 and 0x20000
493 else if (capacity < 0x20000) {
494 heads = 64;
495 sectors = 32;
496 }
497 // else if between 0x20000 and 0x40000
498 else if (capacity < 0x40000) {
499 heads = 65;
500 sectors = 63;
501 }
502 // else if between 0x4000 and 0x80000
503 else if (capacity < 0x80000) {
504 heads = 128;
505 sectors = 63;
506 }
507 // else if greater than 0x80000
508 else {
509 heads = 255;
510 sectors = 63;
511 }
512 cylinders = sector_div(capacity, heads * sectors);
513
514 // Special case if CDROM
515 if(sdev->type == 5) { // CDROM
516 heads = 252;
517 sectors = 63;
518 cylinders = 1111;
519 }
520
521 geom[0] = heads;
522 geom[1] = sectors;
523 geom[2] = cylinders;
524
525 PDEBUG("adpt_bios_param: exit\n");
526 return 0;
527 }
528
529
adpt_info(struct Scsi_Host * host)530 static const char *adpt_info(struct Scsi_Host *host)
531 {
532 adpt_hba* pHba;
533
534 pHba = (adpt_hba *) host->hostdata[0];
535 return (char *) (pHba->detail);
536 }
537
adpt_show_info(struct seq_file * m,struct Scsi_Host * host)538 static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host)
539 {
540 struct adpt_device* d;
541 int id;
542 int chan;
543 adpt_hba* pHba;
544 int unit;
545
546 // Find HBA (host bus adapter) we are looking for
547 mutex_lock(&adpt_configuration_lock);
548 for (pHba = hba_chain; pHba; pHba = pHba->next) {
549 if (pHba->host == host) {
550 break; /* found adapter */
551 }
552 }
553 mutex_unlock(&adpt_configuration_lock);
554 if (pHba == NULL) {
555 return 0;
556 }
557 host = pHba->host;
558
559 seq_printf(m, "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
560 seq_printf(m, "%s\n", pHba->detail);
561 seq_printf(m, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
562 pHba->host->host_no, pHba->name, host->irq);
563 seq_printf(m, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
564 host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
565
566 seq_puts(m, "Devices:\n");
567 for(chan = 0; chan < MAX_CHANNEL; chan++) {
568 for(id = 0; id < MAX_ID; id++) {
569 d = pHba->channel[chan].device[id];
570 while(d) {
571 seq_printf(m,"\t%-24.24s", d->pScsi_dev->vendor);
572 seq_printf(m," Rev: %-8.8s\n", d->pScsi_dev->rev);
573
574 unit = d->pI2o_dev->lct_data.tid;
575 seq_printf(m, "\tTID=%d, (Channel=%d, Target=%d, Lun=%llu) (%s)\n\n",
576 unit, (int)d->scsi_channel, (int)d->scsi_id, d->scsi_lun,
577 scsi_device_online(d->pScsi_dev)? "online":"offline");
578 d = d->next_lun;
579 }
580 }
581 }
582 return 0;
583 }
584
585 /*===========================================================================
586 * Error Handling routines
587 *===========================================================================
588 */
589
adpt_abort(struct scsi_cmnd * cmd)590 static int adpt_abort(struct scsi_cmnd * cmd)
591 {
592 adpt_hba* pHba = NULL; /* host bus adapter structure */
593 struct adpt_device* dptdevice; /* dpt per device information */
594 u32 msg[5];
595 int rcode;
596
597 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
598 printk(KERN_INFO"%s: Trying to Abort\n",pHba->name);
599 if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
600 printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
601 return FAILED;
602 }
603
604 memset(msg, 0, sizeof(msg));
605 msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
606 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
607 msg[2] = 0;
608 msg[3]= 0;
609 /* Add 1 to avoid firmware treating it as invalid command */
610 msg[4] = cmd->request->tag + 1;
611 if (pHba->host)
612 spin_lock_irq(pHba->host->host_lock);
613 rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
614 if (pHba->host)
615 spin_unlock_irq(pHba->host->host_lock);
616 if (rcode != 0) {
617 if(rcode == -EOPNOTSUPP ){
618 printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
619 return FAILED;
620 }
621 printk(KERN_INFO"%s: Abort failed.\n",pHba->name);
622 return FAILED;
623 }
624 printk(KERN_INFO"%s: Abort complete.\n",pHba->name);
625 return SUCCESS;
626 }
627
628
629 #define I2O_DEVICE_RESET 0x27
630 // This is the same for BLK and SCSI devices
631 // NOTE this is wrong in the i2o.h definitions
632 // This is not currently supported by our adapter but we issue it anyway
adpt_device_reset(struct scsi_cmnd * cmd)633 static int adpt_device_reset(struct scsi_cmnd* cmd)
634 {
635 adpt_hba* pHba;
636 u32 msg[4];
637 u32 rcode;
638 int old_state;
639 struct adpt_device* d = cmd->device->hostdata;
640
641 pHba = (void*) cmd->device->host->hostdata[0];
642 printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
643 if (!d) {
644 printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
645 return FAILED;
646 }
647 memset(msg, 0, sizeof(msg));
648 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
649 msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
650 msg[2] = 0;
651 msg[3] = 0;
652
653 if (pHba->host)
654 spin_lock_irq(pHba->host->host_lock);
655 old_state = d->state;
656 d->state |= DPTI_DEV_RESET;
657 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
658 d->state = old_state;
659 if (pHba->host)
660 spin_unlock_irq(pHba->host->host_lock);
661 if (rcode != 0) {
662 if(rcode == -EOPNOTSUPP ){
663 printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
664 return FAILED;
665 }
666 printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
667 return FAILED;
668 } else {
669 printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
670 return SUCCESS;
671 }
672 }
673
674
675 #define I2O_HBA_BUS_RESET 0x87
676 // This version of bus reset is called by the eh_error handler
adpt_bus_reset(struct scsi_cmnd * cmd)677 static int adpt_bus_reset(struct scsi_cmnd* cmd)
678 {
679 adpt_hba* pHba;
680 u32 msg[4];
681 u32 rcode;
682
683 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
684 memset(msg, 0, sizeof(msg));
685 printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
686 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
687 msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
688 msg[2] = 0;
689 msg[3] = 0;
690 if (pHba->host)
691 spin_lock_irq(pHba->host->host_lock);
692 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
693 if (pHba->host)
694 spin_unlock_irq(pHba->host->host_lock);
695 if (rcode != 0) {
696 printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
697 return FAILED;
698 } else {
699 printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
700 return SUCCESS;
701 }
702 }
703
704 // This version of reset is called by the eh_error_handler
__adpt_reset(struct scsi_cmnd * cmd)705 static int __adpt_reset(struct scsi_cmnd* cmd)
706 {
707 adpt_hba* pHba;
708 int rcode;
709 char name[32];
710
711 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
712 strncpy(name, pHba->name, sizeof(name));
713 printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n", name, cmd->device->channel, pHba->channel[cmd->device->channel].tid);
714 rcode = adpt_hba_reset(pHba);
715 if(rcode == 0){
716 printk(KERN_WARNING"%s: HBA reset complete\n", name);
717 return SUCCESS;
718 } else {
719 printk(KERN_WARNING"%s: HBA reset failed (%x)\n", name, rcode);
720 return FAILED;
721 }
722 }
723
adpt_reset(struct scsi_cmnd * cmd)724 static int adpt_reset(struct scsi_cmnd* cmd)
725 {
726 int rc;
727
728 spin_lock_irq(cmd->device->host->host_lock);
729 rc = __adpt_reset(cmd);
730 spin_unlock_irq(cmd->device->host->host_lock);
731
732 return rc;
733 }
734
735 // This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
adpt_hba_reset(adpt_hba * pHba)736 static int adpt_hba_reset(adpt_hba* pHba)
737 {
738 int rcode;
739
740 pHba->state |= DPTI_STATE_RESET;
741
742 // Activate does get status , init outbound, and get hrt
743 if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
744 printk(KERN_ERR "%s: Could not activate\n", pHba->name);
745 adpt_i2o_delete_hba(pHba);
746 return rcode;
747 }
748
749 if ((rcode=adpt_i2o_build_sys_table()) < 0) {
750 adpt_i2o_delete_hba(pHba);
751 return rcode;
752 }
753 PDEBUG("%s: in HOLD state\n",pHba->name);
754
755 if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
756 adpt_i2o_delete_hba(pHba);
757 return rcode;
758 }
759 PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
760
761 if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
762 adpt_i2o_delete_hba(pHba);
763 return rcode;
764 }
765
766 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
767 adpt_i2o_delete_hba(pHba);
768 return rcode;
769 }
770 pHba->state &= ~DPTI_STATE_RESET;
771
772 scsi_host_complete_all_commands(pHba->host, DID_RESET);
773 return 0; /* return success */
774 }
775
776 /*===========================================================================
777 *
778 *===========================================================================
779 */
780
781
adpt_i2o_sys_shutdown(void)782 static void adpt_i2o_sys_shutdown(void)
783 {
784 adpt_hba *pHba, *pNext;
785 struct adpt_i2o_post_wait_data *p1, *old;
786
787 printk(KERN_INFO "Shutting down Adaptec I2O controllers.\n");
788 printk(KERN_INFO " This could take a few minutes if there are many devices attached\n");
789 /* Delete all IOPs from the controller chain */
790 /* They should have already been released by the
791 * scsi-core
792 */
793 for (pHba = hba_chain; pHba; pHba = pNext) {
794 pNext = pHba->next;
795 adpt_i2o_delete_hba(pHba);
796 }
797
798 /* Remove any timedout entries from the wait queue. */
799 // spin_lock_irqsave(&adpt_post_wait_lock, flags);
800 /* Nothing should be outstanding at this point so just
801 * free them
802 */
803 for(p1 = adpt_post_wait_queue; p1;) {
804 old = p1;
805 p1 = p1->next;
806 kfree(old);
807 }
808 // spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
809 adpt_post_wait_queue = NULL;
810
811 printk(KERN_INFO "Adaptec I2O controllers down.\n");
812 }
813
adpt_install_hba(struct scsi_host_template * sht,struct pci_dev * pDev)814 static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
815 {
816
817 adpt_hba* pHba = NULL;
818 adpt_hba* p = NULL;
819 ulong base_addr0_phys = 0;
820 ulong base_addr1_phys = 0;
821 u32 hba_map0_area_size = 0;
822 u32 hba_map1_area_size = 0;
823 void __iomem *base_addr_virt = NULL;
824 void __iomem *msg_addr_virt = NULL;
825 int dma64 = 0;
826
827 int raptorFlag = FALSE;
828
829 if(pci_enable_device(pDev)) {
830 return -EINVAL;
831 }
832
833 if (pci_request_regions(pDev, "dpt_i2o")) {
834 PERROR("dpti: adpt_config_hba: pci request region failed\n");
835 return -EINVAL;
836 }
837
838 pci_set_master(pDev);
839
840 /*
841 * See if we should enable dma64 mode.
842 */
843 if (sizeof(dma_addr_t) > 4 &&
844 dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32) &&
845 dma_set_mask(&pDev->dev, DMA_BIT_MASK(64)) == 0)
846 dma64 = 1;
847
848 if (!dma64 && dma_set_mask(&pDev->dev, DMA_BIT_MASK(32)) != 0)
849 return -EINVAL;
850
851 /* adapter only supports message blocks below 4GB */
852 dma_set_coherent_mask(&pDev->dev, DMA_BIT_MASK(32));
853
854 base_addr0_phys = pci_resource_start(pDev,0);
855 hba_map0_area_size = pci_resource_len(pDev,0);
856
857 // Check if standard PCI card or single BAR Raptor
858 if(pDev->device == PCI_DPT_DEVICE_ID){
859 if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
860 // Raptor card with this device id needs 4M
861 hba_map0_area_size = 0x400000;
862 } else { // Not Raptor - it is a PCI card
863 if(hba_map0_area_size > 0x100000 ){
864 hba_map0_area_size = 0x100000;
865 }
866 }
867 } else {// Raptor split BAR config
868 // Use BAR1 in this configuration
869 base_addr1_phys = pci_resource_start(pDev,1);
870 hba_map1_area_size = pci_resource_len(pDev,1);
871 raptorFlag = TRUE;
872 }
873
874 #if BITS_PER_LONG == 64
875 /*
876 * The original Adaptec 64 bit driver has this comment here:
877 * "x86_64 machines need more optimal mappings"
878 *
879 * I assume some HBAs report ridiculously large mappings
880 * and we need to limit them on platforms with IOMMUs.
881 */
882 if (raptorFlag == TRUE) {
883 if (hba_map0_area_size > 128)
884 hba_map0_area_size = 128;
885 if (hba_map1_area_size > 524288)
886 hba_map1_area_size = 524288;
887 } else {
888 if (hba_map0_area_size > 524288)
889 hba_map0_area_size = 524288;
890 }
891 #endif
892
893 base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
894 if (!base_addr_virt) {
895 pci_release_regions(pDev);
896 PERROR("dpti: adpt_config_hba: io remap failed\n");
897 return -EINVAL;
898 }
899
900 if(raptorFlag == TRUE) {
901 msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
902 if (!msg_addr_virt) {
903 PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
904 iounmap(base_addr_virt);
905 pci_release_regions(pDev);
906 return -EINVAL;
907 }
908 } else {
909 msg_addr_virt = base_addr_virt;
910 }
911
912 // Allocate and zero the data structure
913 pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
914 if (!pHba) {
915 if (msg_addr_virt != base_addr_virt)
916 iounmap(msg_addr_virt);
917 iounmap(base_addr_virt);
918 pci_release_regions(pDev);
919 return -ENOMEM;
920 }
921
922 mutex_lock(&adpt_configuration_lock);
923
924 if(hba_chain != NULL){
925 for(p = hba_chain; p->next; p = p->next);
926 p->next = pHba;
927 } else {
928 hba_chain = pHba;
929 }
930 pHba->next = NULL;
931 pHba->unit = hba_count;
932 sprintf(pHba->name, "dpti%d", hba_count);
933 hba_count++;
934
935 mutex_unlock(&adpt_configuration_lock);
936
937 pHba->pDev = pDev;
938 pHba->base_addr_phys = base_addr0_phys;
939
940 // Set up the Virtual Base Address of the I2O Device
941 pHba->base_addr_virt = base_addr_virt;
942 pHba->msg_addr_virt = msg_addr_virt;
943 pHba->irq_mask = base_addr_virt+0x30;
944 pHba->post_port = base_addr_virt+0x40;
945 pHba->reply_port = base_addr_virt+0x44;
946
947 pHba->hrt = NULL;
948 pHba->lct = NULL;
949 pHba->lct_size = 0;
950 pHba->status_block = NULL;
951 pHba->post_count = 0;
952 pHba->state = DPTI_STATE_RESET;
953 pHba->pDev = pDev;
954 pHba->devices = NULL;
955 pHba->dma64 = dma64;
956
957 // Initializing the spinlocks
958 spin_lock_init(&pHba->state_lock);
959 spin_lock_init(&adpt_post_wait_lock);
960
961 if(raptorFlag == 0){
962 printk(KERN_INFO "Adaptec I2O RAID controller"
963 " %d at %p size=%x irq=%d%s\n",
964 hba_count-1, base_addr_virt,
965 hba_map0_area_size, pDev->irq,
966 dma64 ? " (64-bit DMA)" : "");
967 } else {
968 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
969 hba_count-1, pDev->irq,
970 dma64 ? " (64-bit DMA)" : "");
971 printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
972 printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
973 }
974
975 if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
976 printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
977 adpt_i2o_delete_hba(pHba);
978 return -EINVAL;
979 }
980
981 return 0;
982 }
983
984
adpt_i2o_delete_hba(adpt_hba * pHba)985 static void adpt_i2o_delete_hba(adpt_hba* pHba)
986 {
987 adpt_hba* p1;
988 adpt_hba* p2;
989 struct i2o_device* d;
990 struct i2o_device* next;
991 int i;
992 int j;
993 struct adpt_device* pDev;
994 struct adpt_device* pNext;
995
996
997 mutex_lock(&adpt_configuration_lock);
998 if(pHba->host){
999 free_irq(pHba->host->irq, pHba);
1000 }
1001 p2 = NULL;
1002 for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1003 if(p1 == pHba) {
1004 if(p2) {
1005 p2->next = p1->next;
1006 } else {
1007 hba_chain = p1->next;
1008 }
1009 break;
1010 }
1011 }
1012
1013 hba_count--;
1014 mutex_unlock(&adpt_configuration_lock);
1015
1016 iounmap(pHba->base_addr_virt);
1017 pci_release_regions(pHba->pDev);
1018 if(pHba->msg_addr_virt != pHba->base_addr_virt){
1019 iounmap(pHba->msg_addr_virt);
1020 }
1021 if(pHba->FwDebugBuffer_P)
1022 iounmap(pHba->FwDebugBuffer_P);
1023 if(pHba->hrt) {
1024 dma_free_coherent(&pHba->pDev->dev,
1025 pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
1026 pHba->hrt, pHba->hrt_pa);
1027 }
1028 if(pHba->lct) {
1029 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
1030 pHba->lct, pHba->lct_pa);
1031 }
1032 if(pHba->status_block) {
1033 dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
1034 pHba->status_block, pHba->status_block_pa);
1035 }
1036 if(pHba->reply_pool) {
1037 dma_free_coherent(&pHba->pDev->dev,
1038 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
1039 pHba->reply_pool, pHba->reply_pool_pa);
1040 }
1041
1042 for(d = pHba->devices; d ; d = next){
1043 next = d->next;
1044 kfree(d);
1045 }
1046 for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1047 for(j = 0; j < MAX_ID; j++){
1048 if(pHba->channel[i].device[j] != NULL){
1049 for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1050 pNext = pDev->next_lun;
1051 kfree(pDev);
1052 }
1053 }
1054 }
1055 }
1056 pci_dev_put(pHba->pDev);
1057 if (adpt_sysfs_class)
1058 device_destroy(adpt_sysfs_class,
1059 MKDEV(DPTI_I2O_MAJOR, pHba->unit));
1060 kfree(pHba);
1061
1062 if(hba_count <= 0){
1063 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
1064 if (adpt_sysfs_class) {
1065 class_destroy(adpt_sysfs_class);
1066 adpt_sysfs_class = NULL;
1067 }
1068 }
1069 }
1070
adpt_find_device(adpt_hba * pHba,u32 chan,u32 id,u64 lun)1071 static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u64 lun)
1072 {
1073 struct adpt_device* d;
1074
1075 if (chan >= MAX_CHANNEL)
1076 return NULL;
1077
1078 d = pHba->channel[chan].device[id];
1079 if(!d || d->tid == 0) {
1080 return NULL;
1081 }
1082
1083 /* If it is the only lun at that address then this should match*/
1084 if(d->scsi_lun == lun){
1085 return d;
1086 }
1087
1088 /* else we need to look through all the luns */
1089 for(d=d->next_lun ; d ; d = d->next_lun){
1090 if(d->scsi_lun == lun){
1091 return d;
1092 }
1093 }
1094 return NULL;
1095 }
1096
1097
adpt_i2o_post_wait(adpt_hba * pHba,u32 * msg,int len,int timeout)1098 static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1099 {
1100 // I used my own version of the WAIT_QUEUE_HEAD
1101 // to handle some version differences
1102 // When embedded in the kernel this could go back to the vanilla one
1103 ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1104 int status = 0;
1105 ulong flags = 0;
1106 struct adpt_i2o_post_wait_data *p1, *p2;
1107 struct adpt_i2o_post_wait_data *wait_data =
1108 kmalloc(sizeof(struct adpt_i2o_post_wait_data), GFP_ATOMIC);
1109 DECLARE_WAITQUEUE(wait, current);
1110
1111 if (!wait_data)
1112 return -ENOMEM;
1113
1114 /*
1115 * The spin locking is needed to keep anyone from playing
1116 * with the queue pointers and id while we do the same
1117 */
1118 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1119 // TODO we need a MORE unique way of getting ids
1120 // to support async LCT get
1121 wait_data->next = adpt_post_wait_queue;
1122 adpt_post_wait_queue = wait_data;
1123 adpt_post_wait_id++;
1124 adpt_post_wait_id &= 0x7fff;
1125 wait_data->id = adpt_post_wait_id;
1126 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1127
1128 wait_data->wq = &adpt_wq_i2o_post;
1129 wait_data->status = -ETIMEDOUT;
1130
1131 add_wait_queue(&adpt_wq_i2o_post, &wait);
1132
1133 msg[2] |= 0x80000000 | ((u32)wait_data->id);
1134 timeout *= HZ;
1135 if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1136 set_current_state(TASK_INTERRUPTIBLE);
1137 if(pHba->host)
1138 spin_unlock_irq(pHba->host->host_lock);
1139 if (!timeout)
1140 schedule();
1141 else{
1142 timeout = schedule_timeout(timeout);
1143 if (timeout == 0) {
1144 // I/O issued, but cannot get result in
1145 // specified time. Freeing resorces is
1146 // dangerous.
1147 status = -ETIME;
1148 }
1149 }
1150 if(pHba->host)
1151 spin_lock_irq(pHba->host->host_lock);
1152 }
1153 remove_wait_queue(&adpt_wq_i2o_post, &wait);
1154
1155 if(status == -ETIMEDOUT){
1156 printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1157 // We will have to free the wait_data memory during shutdown
1158 return status;
1159 }
1160
1161 /* Remove the entry from the queue. */
1162 p2 = NULL;
1163 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1164 for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1165 if(p1 == wait_data) {
1166 if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1167 status = -EOPNOTSUPP;
1168 }
1169 if(p2) {
1170 p2->next = p1->next;
1171 } else {
1172 adpt_post_wait_queue = p1->next;
1173 }
1174 break;
1175 }
1176 }
1177 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1178
1179 kfree(wait_data);
1180
1181 return status;
1182 }
1183
1184
adpt_i2o_post_this(adpt_hba * pHba,u32 * data,int len)1185 static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1186 {
1187
1188 u32 m = EMPTY_QUEUE;
1189 u32 __iomem *msg;
1190 ulong timeout = jiffies + 30*HZ;
1191 do {
1192 rmb();
1193 m = readl(pHba->post_port);
1194 if (m != EMPTY_QUEUE) {
1195 break;
1196 }
1197 if(time_after(jiffies,timeout)){
1198 printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1199 return -ETIMEDOUT;
1200 }
1201 schedule_timeout_uninterruptible(1);
1202 } while(m == EMPTY_QUEUE);
1203
1204 msg = pHba->msg_addr_virt + m;
1205 memcpy_toio(msg, data, len);
1206 wmb();
1207
1208 //post message
1209 writel(m, pHba->post_port);
1210 wmb();
1211
1212 return 0;
1213 }
1214
1215
adpt_i2o_post_wait_complete(u32 context,int status)1216 static void adpt_i2o_post_wait_complete(u32 context, int status)
1217 {
1218 struct adpt_i2o_post_wait_data *p1 = NULL;
1219 /*
1220 * We need to search through the adpt_post_wait
1221 * queue to see if the given message is still
1222 * outstanding. If not, it means that the IOP
1223 * took longer to respond to the message than we
1224 * had allowed and timer has already expired.
1225 * Not much we can do about that except log
1226 * it for debug purposes, increase timeout, and recompile
1227 *
1228 * Lock needed to keep anyone from moving queue pointers
1229 * around while we're looking through them.
1230 */
1231
1232 context &= 0x7fff;
1233
1234 spin_lock(&adpt_post_wait_lock);
1235 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1236 if(p1->id == context) {
1237 p1->status = status;
1238 spin_unlock(&adpt_post_wait_lock);
1239 wake_up_interruptible(p1->wq);
1240 return;
1241 }
1242 }
1243 spin_unlock(&adpt_post_wait_lock);
1244 // If this happens we lose commands that probably really completed
1245 printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1246 printk(KERN_DEBUG" Tasks in wait queue:\n");
1247 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1248 printk(KERN_DEBUG" %d\n",p1->id);
1249 }
1250 return;
1251 }
1252
adpt_i2o_reset_hba(adpt_hba * pHba)1253 static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1254 {
1255 u32 msg[8];
1256 u8* status;
1257 dma_addr_t addr;
1258 u32 m = EMPTY_QUEUE ;
1259 ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1260
1261 if(pHba->initialized == FALSE) { // First time reset should be quick
1262 timeout = jiffies + (25*HZ);
1263 } else {
1264 adpt_i2o_quiesce_hba(pHba);
1265 }
1266
1267 do {
1268 rmb();
1269 m = readl(pHba->post_port);
1270 if (m != EMPTY_QUEUE) {
1271 break;
1272 }
1273 if(time_after(jiffies,timeout)){
1274 printk(KERN_WARNING"Timeout waiting for message!\n");
1275 return -ETIMEDOUT;
1276 }
1277 schedule_timeout_uninterruptible(1);
1278 } while (m == EMPTY_QUEUE);
1279
1280 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
1281 if(status == NULL) {
1282 adpt_send_nop(pHba, m);
1283 printk(KERN_ERR"IOP reset failed - no free memory.\n");
1284 return -ENOMEM;
1285 }
1286
1287 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1288 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1289 msg[2]=0;
1290 msg[3]=0;
1291 msg[4]=0;
1292 msg[5]=0;
1293 msg[6]=dma_low(addr);
1294 msg[7]=dma_high(addr);
1295
1296 memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1297 wmb();
1298 writel(m, pHba->post_port);
1299 wmb();
1300
1301 while(*status == 0){
1302 if(time_after(jiffies,timeout)){
1303 printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
1304 /* We lose 4 bytes of "status" here, but we cannot
1305 free these because controller may awake and corrupt
1306 those bytes at any time */
1307 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1308 return -ETIMEDOUT;
1309 }
1310 rmb();
1311 schedule_timeout_uninterruptible(1);
1312 }
1313
1314 if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
1315 PDEBUG("%s: Reset in progress...\n", pHba->name);
1316 // Here we wait for message frame to become available
1317 // indicated that reset has finished
1318 do {
1319 rmb();
1320 m = readl(pHba->post_port);
1321 if (m != EMPTY_QUEUE) {
1322 break;
1323 }
1324 if(time_after(jiffies,timeout)){
1325 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
1326 /* We lose 4 bytes of "status" here, but we
1327 cannot free these because controller may
1328 awake and corrupt those bytes at any time */
1329 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1330 return -ETIMEDOUT;
1331 }
1332 schedule_timeout_uninterruptible(1);
1333 } while (m == EMPTY_QUEUE);
1334 // Flush the offset
1335 adpt_send_nop(pHba, m);
1336 }
1337 adpt_i2o_status_get(pHba);
1338 if(*status == 0x02 ||
1339 pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1340 printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1341 pHba->name);
1342 } else {
1343 PDEBUG("%s: Reset completed.\n", pHba->name);
1344 }
1345
1346 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1347 #ifdef UARTDELAY
1348 // This delay is to allow someone attached to the card through the debug UART to
1349 // set up the dump levels that they want before the rest of the initialization sequence
1350 adpt_delay(20000);
1351 #endif
1352 return 0;
1353 }
1354
1355
adpt_i2o_parse_lct(adpt_hba * pHba)1356 static int adpt_i2o_parse_lct(adpt_hba* pHba)
1357 {
1358 int i;
1359 int max;
1360 int tid;
1361 struct i2o_device *d;
1362 i2o_lct *lct = pHba->lct;
1363 u8 bus_no = 0;
1364 s16 scsi_id;
1365 u64 scsi_lun;
1366 u32 buf[10]; // larger than 7, or 8 ...
1367 struct adpt_device* pDev;
1368
1369 if (lct == NULL) {
1370 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1371 return -1;
1372 }
1373
1374 max = lct->table_size;
1375 max -= 3;
1376 max /= 9;
1377
1378 for(i=0;i<max;i++) {
1379 if( lct->lct_entry[i].user_tid != 0xfff){
1380 /*
1381 * If we have hidden devices, we need to inform the upper layers about
1382 * the possible maximum id reference to handle device access when
1383 * an array is disassembled. This code has no other purpose but to
1384 * allow us future access to devices that are currently hidden
1385 * behind arrays, hotspares or have not been configured (JBOD mode).
1386 */
1387 if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1388 lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1389 lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1390 continue;
1391 }
1392 tid = lct->lct_entry[i].tid;
1393 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1394 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1395 continue;
1396 }
1397 bus_no = buf[0]>>16;
1398 scsi_id = buf[1];
1399 scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1400 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1401 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1402 continue;
1403 }
1404 if (scsi_id >= MAX_ID){
1405 printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1406 continue;
1407 }
1408 if(bus_no > pHba->top_scsi_channel){
1409 pHba->top_scsi_channel = bus_no;
1410 }
1411 if(scsi_id > pHba->top_scsi_id){
1412 pHba->top_scsi_id = scsi_id;
1413 }
1414 if(scsi_lun > pHba->top_scsi_lun){
1415 pHba->top_scsi_lun = scsi_lun;
1416 }
1417 continue;
1418 }
1419 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1420 if(d==NULL)
1421 {
1422 printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1423 return -ENOMEM;
1424 }
1425
1426 d->controller = pHba;
1427 d->next = NULL;
1428
1429 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1430
1431 d->flags = 0;
1432 tid = d->lct_data.tid;
1433 adpt_i2o_report_hba_unit(pHba, d);
1434 adpt_i2o_install_device(pHba, d);
1435 }
1436 bus_no = 0;
1437 for(d = pHba->devices; d ; d = d->next) {
1438 if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT ||
1439 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PORT){
1440 tid = d->lct_data.tid;
1441 // TODO get the bus_no from hrt-but for now they are in order
1442 //bus_no =
1443 if(bus_no > pHba->top_scsi_channel){
1444 pHba->top_scsi_channel = bus_no;
1445 }
1446 pHba->channel[bus_no].type = d->lct_data.class_id;
1447 pHba->channel[bus_no].tid = tid;
1448 if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1449 {
1450 pHba->channel[bus_no].scsi_id = buf[1];
1451 PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1452 }
1453 // TODO remove - this is just until we get from hrt
1454 bus_no++;
1455 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1456 printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1457 break;
1458 }
1459 }
1460 }
1461
1462 // Setup adpt_device table
1463 for(d = pHba->devices; d ; d = d->next) {
1464 if(d->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1465 d->lct_data.class_id == I2O_CLASS_SCSI_PERIPHERAL ||
1466 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1467
1468 tid = d->lct_data.tid;
1469 scsi_id = -1;
1470 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1471 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1472 bus_no = buf[0]>>16;
1473 scsi_id = buf[1];
1474 scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1475 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1476 continue;
1477 }
1478 if (scsi_id >= MAX_ID) {
1479 continue;
1480 }
1481 if( pHba->channel[bus_no].device[scsi_id] == NULL){
1482 pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1483 if(pDev == NULL) {
1484 return -ENOMEM;
1485 }
1486 pHba->channel[bus_no].device[scsi_id] = pDev;
1487 } else {
1488 for( pDev = pHba->channel[bus_no].device[scsi_id];
1489 pDev->next_lun; pDev = pDev->next_lun){
1490 }
1491 pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1492 if(pDev->next_lun == NULL) {
1493 return -ENOMEM;
1494 }
1495 pDev = pDev->next_lun;
1496 }
1497 pDev->tid = tid;
1498 pDev->scsi_channel = bus_no;
1499 pDev->scsi_id = scsi_id;
1500 pDev->scsi_lun = scsi_lun;
1501 pDev->pI2o_dev = d;
1502 d->owner = pDev;
1503 pDev->type = (buf[0])&0xff;
1504 pDev->flags = (buf[0]>>8)&0xff;
1505 if(scsi_id > pHba->top_scsi_id){
1506 pHba->top_scsi_id = scsi_id;
1507 }
1508 if(scsi_lun > pHba->top_scsi_lun){
1509 pHba->top_scsi_lun = scsi_lun;
1510 }
1511 }
1512 if(scsi_id == -1){
1513 printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1514 d->lct_data.identity_tag);
1515 }
1516 }
1517 }
1518 return 0;
1519 }
1520
1521
1522 /*
1523 * Each I2O controller has a chain of devices on it - these match
1524 * the useful parts of the LCT of the board.
1525 */
1526
adpt_i2o_install_device(adpt_hba * pHba,struct i2o_device * d)1527 static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1528 {
1529 mutex_lock(&adpt_configuration_lock);
1530 d->controller=pHba;
1531 d->owner=NULL;
1532 d->next=pHba->devices;
1533 d->prev=NULL;
1534 if (pHba->devices != NULL){
1535 pHba->devices->prev=d;
1536 }
1537 pHba->devices=d;
1538 *d->dev_name = 0;
1539
1540 mutex_unlock(&adpt_configuration_lock);
1541 return 0;
1542 }
1543
adpt_open(struct inode * inode,struct file * file)1544 static int adpt_open(struct inode *inode, struct file *file)
1545 {
1546 int minor;
1547 adpt_hba* pHba;
1548
1549 mutex_lock(&adpt_mutex);
1550 //TODO check for root access
1551 //
1552 minor = iminor(inode);
1553 if (minor >= hba_count) {
1554 mutex_unlock(&adpt_mutex);
1555 return -ENXIO;
1556 }
1557 mutex_lock(&adpt_configuration_lock);
1558 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1559 if (pHba->unit == minor) {
1560 break; /* found adapter */
1561 }
1562 }
1563 if (pHba == NULL) {
1564 mutex_unlock(&adpt_configuration_lock);
1565 mutex_unlock(&adpt_mutex);
1566 return -ENXIO;
1567 }
1568
1569 // if(pHba->in_use){
1570 // mutex_unlock(&adpt_configuration_lock);
1571 // return -EBUSY;
1572 // }
1573
1574 pHba->in_use = 1;
1575 mutex_unlock(&adpt_configuration_lock);
1576 mutex_unlock(&adpt_mutex);
1577
1578 return 0;
1579 }
1580
adpt_close(struct inode * inode,struct file * file)1581 static int adpt_close(struct inode *inode, struct file *file)
1582 {
1583 int minor;
1584 adpt_hba* pHba;
1585
1586 minor = iminor(inode);
1587 if (minor >= hba_count) {
1588 return -ENXIO;
1589 }
1590 mutex_lock(&adpt_configuration_lock);
1591 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1592 if (pHba->unit == minor) {
1593 break; /* found adapter */
1594 }
1595 }
1596 mutex_unlock(&adpt_configuration_lock);
1597 if (pHba == NULL) {
1598 return -ENXIO;
1599 }
1600
1601 pHba->in_use = 0;
1602
1603 return 0;
1604 }
1605
1606 #if defined __ia64__
adpt_ia64_info(sysInfo_S * si)1607 static void adpt_ia64_info(sysInfo_S* si)
1608 {
1609 // This is all the info we need for now
1610 // We will add more info as our new
1611 // managmenent utility requires it
1612 si->processorType = PROC_IA64;
1613 }
1614 #endif
1615
1616 #if defined __sparc__
adpt_sparc_info(sysInfo_S * si)1617 static void adpt_sparc_info(sysInfo_S* si)
1618 {
1619 // This is all the info we need for now
1620 // We will add more info as our new
1621 // managmenent utility requires it
1622 si->processorType = PROC_ULTRASPARC;
1623 }
1624 #endif
1625 #if defined __alpha__
adpt_alpha_info(sysInfo_S * si)1626 static void adpt_alpha_info(sysInfo_S* si)
1627 {
1628 // This is all the info we need for now
1629 // We will add more info as our new
1630 // managmenent utility requires it
1631 si->processorType = PROC_ALPHA;
1632 }
1633 #endif
1634
1635 #if defined __i386__
1636
1637 #include <uapi/asm/vm86.h>
1638
adpt_i386_info(sysInfo_S * si)1639 static void adpt_i386_info(sysInfo_S* si)
1640 {
1641 // This is all the info we need for now
1642 // We will add more info as our new
1643 // managmenent utility requires it
1644 switch (boot_cpu_data.x86) {
1645 case CPU_386:
1646 si->processorType = PROC_386;
1647 break;
1648 case CPU_486:
1649 si->processorType = PROC_486;
1650 break;
1651 case CPU_586:
1652 si->processorType = PROC_PENTIUM;
1653 break;
1654 default: // Just in case
1655 si->processorType = PROC_PENTIUM;
1656 break;
1657 }
1658 }
1659 #endif
1660
1661 /*
1662 * This routine returns information about the system. This does not effect
1663 * any logic and if the info is wrong - it doesn't matter.
1664 */
1665
1666 /* Get all the info we can not get from kernel services */
adpt_system_info(void __user * buffer)1667 static int adpt_system_info(void __user *buffer)
1668 {
1669 sysInfo_S si;
1670
1671 memset(&si, 0, sizeof(si));
1672
1673 si.osType = OS_LINUX;
1674 si.osMajorVersion = 0;
1675 si.osMinorVersion = 0;
1676 si.osRevision = 0;
1677 si.busType = SI_PCI_BUS;
1678 si.processorFamily = DPTI_sig.dsProcessorFamily;
1679
1680 #if defined __i386__
1681 adpt_i386_info(&si);
1682 #elif defined (__ia64__)
1683 adpt_ia64_info(&si);
1684 #elif defined(__sparc__)
1685 adpt_sparc_info(&si);
1686 #elif defined (__alpha__)
1687 adpt_alpha_info(&si);
1688 #else
1689 si.processorType = 0xff ;
1690 #endif
1691 if (copy_to_user(buffer, &si, sizeof(si))){
1692 printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
1693 return -EFAULT;
1694 }
1695
1696 return 0;
1697 }
1698
adpt_ioctl(struct inode * inode,struct file * file,uint cmd,ulong arg)1699 static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
1700 {
1701 int minor;
1702 int error = 0;
1703 adpt_hba* pHba;
1704 ulong flags = 0;
1705 void __user *argp = (void __user *)arg;
1706
1707 minor = iminor(inode);
1708 if (minor >= DPTI_MAX_HBA){
1709 return -ENXIO;
1710 }
1711 mutex_lock(&adpt_configuration_lock);
1712 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1713 if (pHba->unit == minor) {
1714 break; /* found adapter */
1715 }
1716 }
1717 mutex_unlock(&adpt_configuration_lock);
1718 if(pHba == NULL){
1719 return -ENXIO;
1720 }
1721
1722 while((volatile u32) pHba->state & DPTI_STATE_RESET )
1723 schedule_timeout_uninterruptible(2);
1724
1725 switch (cmd) {
1726 // TODO: handle 3 cases
1727 case DPT_SIGNATURE:
1728 if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
1729 return -EFAULT;
1730 }
1731 break;
1732
1733 case DPT_CTRLINFO:{
1734 drvrHBAinfo_S HbaInfo;
1735
1736 #define FLG_OSD_PCI_VALID 0x0001
1737 #define FLG_OSD_DMA 0x0002
1738 #define FLG_OSD_I2O 0x0004
1739 memset(&HbaInfo, 0, sizeof(HbaInfo));
1740 HbaInfo.drvrHBAnum = pHba->unit;
1741 HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
1742 HbaInfo.blinkState = adpt_read_blink_led(pHba);
1743 HbaInfo.pciBusNum = pHba->pDev->bus->number;
1744 HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
1745 HbaInfo.Interrupt = pHba->pDev->irq;
1746 HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
1747 if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
1748 printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
1749 return -EFAULT;
1750 }
1751 break;
1752 }
1753 case DPT_SYSINFO:
1754 return adpt_system_info(argp);
1755 case DPT_BLINKLED:{
1756 u32 value;
1757 value = (u32)adpt_read_blink_led(pHba);
1758 if (copy_to_user(argp, &value, sizeof(value))) {
1759 return -EFAULT;
1760 }
1761 break;
1762 }
1763 case I2ORESETCMD: {
1764 struct Scsi_Host *shost = pHba->host;
1765
1766 if (shost)
1767 spin_lock_irqsave(shost->host_lock, flags);
1768 adpt_hba_reset(pHba);
1769 if (shost)
1770 spin_unlock_irqrestore(shost->host_lock, flags);
1771 break;
1772 }
1773 case I2ORESCANCMD:
1774 adpt_rescan(pHba);
1775 break;
1776 default:
1777 return -EINVAL;
1778 }
1779
1780 return error;
1781 }
1782
adpt_unlocked_ioctl(struct file * file,uint cmd,ulong arg)1783 static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg)
1784 {
1785 struct inode *inode;
1786 long ret;
1787
1788 inode = file_inode(file);
1789
1790 mutex_lock(&adpt_mutex);
1791 ret = adpt_ioctl(inode, file, cmd, arg);
1792 mutex_unlock(&adpt_mutex);
1793
1794 return ret;
1795 }
1796
1797 #ifdef CONFIG_COMPAT
compat_adpt_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1798 static long compat_adpt_ioctl(struct file *file,
1799 unsigned int cmd, unsigned long arg)
1800 {
1801 struct inode *inode;
1802 long ret;
1803
1804 inode = file_inode(file);
1805
1806 mutex_lock(&adpt_mutex);
1807
1808 switch(cmd) {
1809 case DPT_SIGNATURE:
1810 case I2OUSRCMD:
1811 case DPT_CTRLINFO:
1812 case DPT_SYSINFO:
1813 case DPT_BLINKLED:
1814 case I2ORESETCMD:
1815 case I2ORESCANCMD:
1816 case (DPT_TARGET_BUSY & 0xFFFF):
1817 case DPT_TARGET_BUSY:
1818 ret = adpt_ioctl(inode, file, cmd, arg);
1819 break;
1820 default:
1821 ret = -ENOIOCTLCMD;
1822 }
1823
1824 mutex_unlock(&adpt_mutex);
1825
1826 return ret;
1827 }
1828 #endif
1829
adpt_isr(int irq,void * dev_id)1830 static irqreturn_t adpt_isr(int irq, void *dev_id)
1831 {
1832 struct scsi_cmnd* cmd;
1833 adpt_hba* pHba = dev_id;
1834 u32 m;
1835 void __iomem *reply;
1836 u32 status=0;
1837 u32 context;
1838 ulong flags = 0;
1839 int handled = 0;
1840
1841 if (pHba == NULL){
1842 printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
1843 return IRQ_NONE;
1844 }
1845 if(pHba->host)
1846 spin_lock_irqsave(pHba->host->host_lock, flags);
1847
1848 while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
1849 m = readl(pHba->reply_port);
1850 if(m == EMPTY_QUEUE){
1851 // Try twice then give up
1852 rmb();
1853 m = readl(pHba->reply_port);
1854 if(m == EMPTY_QUEUE){
1855 // This really should not happen
1856 printk(KERN_ERR"dpti: Could not get reply frame\n");
1857 goto out;
1858 }
1859 }
1860 if (pHba->reply_pool_pa <= m &&
1861 m < pHba->reply_pool_pa +
1862 (pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
1863 reply = (u8 *)pHba->reply_pool +
1864 (m - pHba->reply_pool_pa);
1865 } else {
1866 /* Ick, we should *never* be here */
1867 printk(KERN_ERR "dpti: reply frame not from pool\n");
1868 reply = (u8 *)bus_to_virt(m);
1869 }
1870
1871 if (readl(reply) & MSG_FAIL) {
1872 u32 old_m = readl(reply+28);
1873 void __iomem *msg;
1874 u32 old_context;
1875 PDEBUG("%s: Failed message\n",pHba->name);
1876 if(old_m >= 0x100000){
1877 printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
1878 writel(m,pHba->reply_port);
1879 continue;
1880 }
1881 // Transaction context is 0 in failed reply frame
1882 msg = pHba->msg_addr_virt + old_m;
1883 old_context = readl(msg+12);
1884 writel(old_context, reply+12);
1885 adpt_send_nop(pHba, old_m);
1886 }
1887 context = readl(reply+8);
1888 if(context & 0x80000000){ // Post wait message
1889 status = readl(reply+16);
1890 if(status >> 24){
1891 status &= 0xffff; /* Get detail status */
1892 } else {
1893 status = I2O_POST_WAIT_OK;
1894 }
1895 /*
1896 * The request tag is one less than the command tag
1897 * as the firmware might treat a 0 tag as invalid
1898 */
1899 cmd = scsi_host_find_tag(pHba->host,
1900 readl(reply + 12) - 1);
1901 if(cmd != NULL) {
1902 printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
1903 }
1904 adpt_i2o_post_wait_complete(context, status);
1905 } else { // SCSI message
1906 /*
1907 * The request tag is one less than the command tag
1908 * as the firmware might treat a 0 tag as invalid
1909 */
1910 cmd = scsi_host_find_tag(pHba->host,
1911 readl(reply + 12) - 1);
1912 if(cmd != NULL){
1913 scsi_dma_unmap(cmd);
1914 adpt_i2o_scsi_complete(reply, cmd);
1915 }
1916 }
1917 writel(m, pHba->reply_port);
1918 wmb();
1919 rmb();
1920 }
1921 handled = 1;
1922 out: if(pHba->host)
1923 spin_unlock_irqrestore(pHba->host->host_lock, flags);
1924 return IRQ_RETVAL(handled);
1925 }
1926
adpt_scsi_to_i2o(adpt_hba * pHba,struct scsi_cmnd * cmd,struct adpt_device * d)1927 static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
1928 {
1929 int i;
1930 u32 msg[MAX_MESSAGE_SIZE];
1931 u32* mptr;
1932 u32* lptr;
1933 u32 *lenptr;
1934 int direction;
1935 int scsidir;
1936 int nseg;
1937 u32 len;
1938 u32 reqlen;
1939 s32 rcode;
1940 dma_addr_t addr;
1941
1942 memset(msg, 0 , sizeof(msg));
1943 len = scsi_bufflen(cmd);
1944 direction = 0x00000000;
1945
1946 scsidir = 0x00000000; // DATA NO XFER
1947 if(len) {
1948 /*
1949 * Set SCBFlags to indicate if data is being transferred
1950 * in or out, or no data transfer
1951 * Note: Do not have to verify index is less than 0 since
1952 * cmd->cmnd[0] is an unsigned char
1953 */
1954 switch(cmd->sc_data_direction){
1955 case DMA_FROM_DEVICE:
1956 scsidir =0x40000000; // DATA IN (iop<--dev)
1957 break;
1958 case DMA_TO_DEVICE:
1959 direction=0x04000000; // SGL OUT
1960 scsidir =0x80000000; // DATA OUT (iop-->dev)
1961 break;
1962 case DMA_NONE:
1963 break;
1964 case DMA_BIDIRECTIONAL:
1965 scsidir =0x40000000; // DATA IN (iop<--dev)
1966 // Assume In - and continue;
1967 break;
1968 default:
1969 printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
1970 pHba->name, cmd->cmnd[0]);
1971 cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
1972 cmd->scsi_done(cmd);
1973 return 0;
1974 }
1975 }
1976 // msg[0] is set later
1977 // I2O_CMD_SCSI_EXEC
1978 msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
1979 msg[2] = 0;
1980 /* Add 1 to avoid firmware treating it as invalid command */
1981 msg[3] = cmd->request->tag + 1;
1982 // Our cards use the transaction context as the tag for queueing
1983 // Adaptec/DPT Private stuff
1984 msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
1985 msg[5] = d->tid;
1986 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
1987 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
1988 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
1989 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
1990 msg[6] = scsidir|0x20a00000|cmd->cmd_len;
1991
1992 mptr=msg+7;
1993
1994 // Write SCSI command into the message - always 16 byte block
1995 memset(mptr, 0, 16);
1996 memcpy(mptr, cmd->cmnd, cmd->cmd_len);
1997 mptr+=4;
1998 lenptr=mptr++; /* Remember me - fill in when we know */
1999 if (dpt_dma64(pHba)) {
2000 reqlen = 16; // SINGLE SGE
2001 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
2002 *mptr++ = 1 << PAGE_SHIFT;
2003 } else {
2004 reqlen = 14; // SINGLE SGE
2005 }
2006 /* Now fill in the SGList and command */
2007
2008 nseg = scsi_dma_map(cmd);
2009 BUG_ON(nseg < 0);
2010 if (nseg) {
2011 struct scatterlist *sg;
2012
2013 len = 0;
2014 scsi_for_each_sg(cmd, sg, nseg, i) {
2015 lptr = mptr;
2016 *mptr++ = direction|0x10000000|sg_dma_len(sg);
2017 len+=sg_dma_len(sg);
2018 addr = sg_dma_address(sg);
2019 *mptr++ = dma_low(addr);
2020 if (dpt_dma64(pHba))
2021 *mptr++ = dma_high(addr);
2022 /* Make this an end of list */
2023 if (i == nseg - 1)
2024 *lptr = direction|0xD0000000|sg_dma_len(sg);
2025 }
2026 reqlen = mptr - msg;
2027 *lenptr = len;
2028
2029 if(cmd->underflow && len != cmd->underflow){
2030 printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2031 len, cmd->underflow);
2032 }
2033 } else {
2034 *lenptr = len = 0;
2035 reqlen = 12;
2036 }
2037
2038 /* Stick the headers on */
2039 msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2040
2041 // Send it on it's way
2042 rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2043 if (rcode == 0) {
2044 return 0;
2045 }
2046 return rcode;
2047 }
2048
2049
adpt_scsi_host_alloc(adpt_hba * pHba,struct scsi_host_template * sht)2050 static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
2051 {
2052 struct Scsi_Host *host;
2053
2054 host = scsi_host_alloc(sht, sizeof(adpt_hba*));
2055 if (host == NULL) {
2056 printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
2057 return -1;
2058 }
2059 host->hostdata[0] = (unsigned long)pHba;
2060 pHba->host = host;
2061
2062 host->irq = pHba->pDev->irq;
2063 /* no IO ports, so don't have to set host->io_port and
2064 * host->n_io_port
2065 */
2066 host->io_port = 0;
2067 host->n_io_port = 0;
2068 /* see comments in scsi_host.h */
2069 host->max_id = 16;
2070 host->max_lun = 256;
2071 host->max_channel = pHba->top_scsi_channel + 1;
2072 host->cmd_per_lun = 1;
2073 host->unique_id = (u32)sys_tbl_pa + pHba->unit;
2074 host->sg_tablesize = pHba->sg_tablesize;
2075 host->can_queue = pHba->post_fifo_size;
2076
2077 return 0;
2078 }
2079
2080
adpt_i2o_scsi_complete(void __iomem * reply,struct scsi_cmnd * cmd)2081 static void adpt_i2o_scsi_complete(void __iomem *reply, struct scsi_cmnd *cmd)
2082 {
2083 adpt_hba* pHba;
2084 u32 hba_status;
2085 u32 dev_status;
2086 u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits
2087 // I know this would look cleaner if I just read bytes
2088 // but the model I have been using for all the rest of the
2089 // io is in 4 byte words - so I keep that model
2090 u16 detailed_status = readl(reply+16) &0xffff;
2091 dev_status = (detailed_status & 0xff);
2092 hba_status = detailed_status >> 8;
2093
2094 // calculate resid for sg
2095 scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20));
2096
2097 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2098
2099 cmd->sense_buffer[0] = '\0'; // initialize sense valid flag to false
2100
2101 if(!(reply_flags & MSG_FAIL)) {
2102 switch(detailed_status & I2O_SCSI_DSC_MASK) {
2103 case I2O_SCSI_DSC_SUCCESS:
2104 cmd->result = (DID_OK << 16);
2105 // handle underflow
2106 if (readl(reply+20) < cmd->underflow) {
2107 cmd->result = (DID_ERROR <<16);
2108 printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2109 }
2110 break;
2111 case I2O_SCSI_DSC_REQUEST_ABORTED:
2112 cmd->result = (DID_ABORT << 16);
2113 break;
2114 case I2O_SCSI_DSC_PATH_INVALID:
2115 case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2116 case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2117 case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2118 case I2O_SCSI_DSC_NO_ADAPTER:
2119 case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2120 printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%llu) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2121 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2122 cmd->result = (DID_TIME_OUT << 16);
2123 break;
2124 case I2O_SCSI_DSC_ADAPTER_BUSY:
2125 case I2O_SCSI_DSC_BUS_BUSY:
2126 cmd->result = (DID_BUS_BUSY << 16);
2127 break;
2128 case I2O_SCSI_DSC_SCSI_BUS_RESET:
2129 case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2130 cmd->result = (DID_RESET << 16);
2131 break;
2132 case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2133 printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2134 cmd->result = (DID_PARITY << 16);
2135 break;
2136 case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2137 case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2138 case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2139 case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2140 case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2141 case I2O_SCSI_DSC_DATA_OVERRUN:
2142 case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2143 case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2144 case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2145 case I2O_SCSI_DSC_PROVIDE_FAILURE:
2146 case I2O_SCSI_DSC_REQUEST_TERMINATED:
2147 case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2148 case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2149 case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2150 case I2O_SCSI_DSC_INVALID_CDB:
2151 case I2O_SCSI_DSC_LUN_INVALID:
2152 case I2O_SCSI_DSC_SCSI_TID_INVALID:
2153 case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2154 case I2O_SCSI_DSC_NO_NEXUS:
2155 case I2O_SCSI_DSC_CDB_RECEIVED:
2156 case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2157 case I2O_SCSI_DSC_QUEUE_FROZEN:
2158 case I2O_SCSI_DSC_REQUEST_INVALID:
2159 default:
2160 printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2161 pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2162 hba_status, dev_status, cmd->cmnd[0]);
2163 cmd->result = (DID_ERROR << 16);
2164 break;
2165 }
2166
2167 // copy over the request sense data if it was a check
2168 // condition status
2169 if (dev_status == SAM_STAT_CHECK_CONDITION) {
2170 u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
2171 // Copy over the sense data
2172 memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
2173 if(cmd->sense_buffer[0] == 0x70 /* class 7 */ &&
2174 cmd->sense_buffer[2] == DATA_PROTECT ){
2175 /* This is to handle an array failed */
2176 cmd->result = (DID_TIME_OUT << 16);
2177 printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2178 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2179 hba_status, dev_status, cmd->cmnd[0]);
2180
2181 }
2182 }
2183 } else {
2184 /* In this condtion we could not talk to the tid
2185 * the card rejected it. We should signal a retry
2186 * for a limitted number of retries.
2187 */
2188 cmd->result = (DID_TIME_OUT << 16);
2189 printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%llu) tid=%d, cmd=0x%x\n",
2190 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2191 ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2192 }
2193
2194 cmd->result |= (dev_status);
2195
2196 if(cmd->scsi_done != NULL){
2197 cmd->scsi_done(cmd);
2198 }
2199 }
2200
2201
adpt_rescan(adpt_hba * pHba)2202 static s32 adpt_rescan(adpt_hba* pHba)
2203 {
2204 s32 rcode;
2205 ulong flags = 0;
2206
2207 if(pHba->host)
2208 spin_lock_irqsave(pHba->host->host_lock, flags);
2209 if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2210 goto out;
2211 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2212 goto out;
2213 rcode = 0;
2214 out: if(pHba->host)
2215 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2216 return rcode;
2217 }
2218
2219
adpt_i2o_reparse_lct(adpt_hba * pHba)2220 static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2221 {
2222 int i;
2223 int max;
2224 int tid;
2225 struct i2o_device *d;
2226 i2o_lct *lct = pHba->lct;
2227 u8 bus_no = 0;
2228 s16 scsi_id;
2229 u64 scsi_lun;
2230 u32 buf[10]; // at least 8 u32's
2231 struct adpt_device* pDev = NULL;
2232 struct i2o_device* pI2o_dev = NULL;
2233
2234 if (lct == NULL) {
2235 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2236 return -1;
2237 }
2238
2239 max = lct->table_size;
2240 max -= 3;
2241 max /= 9;
2242
2243 // Mark each drive as unscanned
2244 for (d = pHba->devices; d; d = d->next) {
2245 pDev =(struct adpt_device*) d->owner;
2246 if(!pDev){
2247 continue;
2248 }
2249 pDev->state |= DPTI_DEV_UNSCANNED;
2250 }
2251
2252 printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2253
2254 for(i=0;i<max;i++) {
2255 if( lct->lct_entry[i].user_tid != 0xfff){
2256 continue;
2257 }
2258
2259 if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2260 lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2261 lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2262 tid = lct->lct_entry[i].tid;
2263 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2264 printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2265 continue;
2266 }
2267 bus_no = buf[0]>>16;
2268 if (bus_no >= MAX_CHANNEL) { /* Something wrong skip it */
2269 printk(KERN_WARNING
2270 "%s: Channel number %d out of range\n",
2271 pHba->name, bus_no);
2272 continue;
2273 }
2274
2275 scsi_id = buf[1];
2276 scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
2277 pDev = pHba->channel[bus_no].device[scsi_id];
2278 /* da lun */
2279 while(pDev) {
2280 if(pDev->scsi_lun == scsi_lun) {
2281 break;
2282 }
2283 pDev = pDev->next_lun;
2284 }
2285 if(!pDev ) { // Something new add it
2286 d = kmalloc(sizeof(struct i2o_device),
2287 GFP_ATOMIC);
2288 if(d==NULL)
2289 {
2290 printk(KERN_CRIT "Out of memory for I2O device data.\n");
2291 return -ENOMEM;
2292 }
2293
2294 d->controller = pHba;
2295 d->next = NULL;
2296
2297 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2298
2299 d->flags = 0;
2300 adpt_i2o_report_hba_unit(pHba, d);
2301 adpt_i2o_install_device(pHba, d);
2302
2303 pDev = pHba->channel[bus_no].device[scsi_id];
2304 if( pDev == NULL){
2305 pDev =
2306 kzalloc(sizeof(struct adpt_device),
2307 GFP_ATOMIC);
2308 if(pDev == NULL) {
2309 return -ENOMEM;
2310 }
2311 pHba->channel[bus_no].device[scsi_id] = pDev;
2312 } else {
2313 while (pDev->next_lun) {
2314 pDev = pDev->next_lun;
2315 }
2316 pDev = pDev->next_lun =
2317 kzalloc(sizeof(struct adpt_device),
2318 GFP_ATOMIC);
2319 if(pDev == NULL) {
2320 return -ENOMEM;
2321 }
2322 }
2323 pDev->tid = d->lct_data.tid;
2324 pDev->scsi_channel = bus_no;
2325 pDev->scsi_id = scsi_id;
2326 pDev->scsi_lun = scsi_lun;
2327 pDev->pI2o_dev = d;
2328 d->owner = pDev;
2329 pDev->type = (buf[0])&0xff;
2330 pDev->flags = (buf[0]>>8)&0xff;
2331 // Too late, SCSI system has made up it's mind, but what the hey ...
2332 if(scsi_id > pHba->top_scsi_id){
2333 pHba->top_scsi_id = scsi_id;
2334 }
2335 if(scsi_lun > pHba->top_scsi_lun){
2336 pHba->top_scsi_lun = scsi_lun;
2337 }
2338 continue;
2339 } // end of new i2o device
2340
2341 // We found an old device - check it
2342 while(pDev) {
2343 if(pDev->scsi_lun == scsi_lun) {
2344 if(!scsi_device_online(pDev->pScsi_dev)) {
2345 printk(KERN_WARNING"%s: Setting device (%d,%d,%llu) back online\n",
2346 pHba->name,bus_no,scsi_id,scsi_lun);
2347 if (pDev->pScsi_dev) {
2348 scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
2349 }
2350 }
2351 d = pDev->pI2o_dev;
2352 if(d->lct_data.tid != tid) { // something changed
2353 pDev->tid = tid;
2354 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2355 if (pDev->pScsi_dev) {
2356 pDev->pScsi_dev->changed = TRUE;
2357 pDev->pScsi_dev->removable = TRUE;
2358 }
2359 }
2360 // Found it - mark it scanned
2361 pDev->state = DPTI_DEV_ONLINE;
2362 break;
2363 }
2364 pDev = pDev->next_lun;
2365 }
2366 }
2367 }
2368 for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2369 pDev =(struct adpt_device*) pI2o_dev->owner;
2370 if(!pDev){
2371 continue;
2372 }
2373 // Drive offline drives that previously existed but could not be found
2374 // in the LCT table
2375 if (pDev->state & DPTI_DEV_UNSCANNED){
2376 pDev->state = DPTI_DEV_OFFLINE;
2377 printk(KERN_WARNING"%s: Device (%d,%d,%llu) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2378 if (pDev->pScsi_dev) {
2379 scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
2380 }
2381 }
2382 }
2383 return 0;
2384 }
2385
2386 /*============================================================================
2387 * Routines from i2o subsystem
2388 *============================================================================
2389 */
2390
2391
2392
2393 /*
2394 * Bring an I2O controller into HOLD state. See the spec.
2395 */
adpt_i2o_activate_hba(adpt_hba * pHba)2396 static int adpt_i2o_activate_hba(adpt_hba* pHba)
2397 {
2398 int rcode;
2399
2400 if(pHba->initialized ) {
2401 if (adpt_i2o_status_get(pHba) < 0) {
2402 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2403 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2404 return rcode;
2405 }
2406 if (adpt_i2o_status_get(pHba) < 0) {
2407 printk(KERN_INFO "HBA not responding.\n");
2408 return -1;
2409 }
2410 }
2411
2412 if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2413 printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2414 return -1;
2415 }
2416
2417 if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2418 pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2419 pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2420 pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2421 adpt_i2o_reset_hba(pHba);
2422 if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2423 printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2424 return -1;
2425 }
2426 }
2427 } else {
2428 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2429 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2430 return rcode;
2431 }
2432
2433 }
2434
2435 if (adpt_i2o_init_outbound_q(pHba) < 0) {
2436 return -1;
2437 }
2438
2439 /* In HOLD state */
2440
2441 if (adpt_i2o_hrt_get(pHba) < 0) {
2442 return -1;
2443 }
2444
2445 return 0;
2446 }
2447
2448 /*
2449 * Bring a controller online into OPERATIONAL state.
2450 */
2451
adpt_i2o_online_hba(adpt_hba * pHba)2452 static int adpt_i2o_online_hba(adpt_hba* pHba)
2453 {
2454 if (adpt_i2o_systab_send(pHba) < 0)
2455 return -1;
2456 /* In READY state */
2457
2458 if (adpt_i2o_enable_hba(pHba) < 0)
2459 return -1;
2460
2461 /* In OPERATIONAL state */
2462 return 0;
2463 }
2464
adpt_send_nop(adpt_hba * pHba,u32 m)2465 static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2466 {
2467 u32 __iomem *msg;
2468 ulong timeout = jiffies + 5*HZ;
2469
2470 while(m == EMPTY_QUEUE){
2471 rmb();
2472 m = readl(pHba->post_port);
2473 if(m != EMPTY_QUEUE){
2474 break;
2475 }
2476 if(time_after(jiffies,timeout)){
2477 printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2478 return 2;
2479 }
2480 schedule_timeout_uninterruptible(1);
2481 }
2482 msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2483 writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2484 writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2485 writel( 0,&msg[2]);
2486 wmb();
2487
2488 writel(m, pHba->post_port);
2489 wmb();
2490 return 0;
2491 }
2492
adpt_i2o_init_outbound_q(adpt_hba * pHba)2493 static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2494 {
2495 u8 *status;
2496 dma_addr_t addr;
2497 u32 __iomem *msg = NULL;
2498 int i;
2499 ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
2500 u32 m;
2501
2502 do {
2503 rmb();
2504 m = readl(pHba->post_port);
2505 if (m != EMPTY_QUEUE) {
2506 break;
2507 }
2508
2509 if(time_after(jiffies,timeout)){
2510 printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2511 return -ETIMEDOUT;
2512 }
2513 schedule_timeout_uninterruptible(1);
2514 } while(m == EMPTY_QUEUE);
2515
2516 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2517
2518 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
2519 if (!status) {
2520 adpt_send_nop(pHba, m);
2521 printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2522 pHba->name);
2523 return -ENOMEM;
2524 }
2525
2526 writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2527 writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2528 writel(0, &msg[2]);
2529 writel(0x0106, &msg[3]); /* Transaction context */
2530 writel(4096, &msg[4]); /* Host page frame size */
2531 writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */
2532 writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */
2533 writel((u32)addr, &msg[7]);
2534
2535 writel(m, pHba->post_port);
2536 wmb();
2537
2538 // Wait for the reply status to come back
2539 do {
2540 if (*status) {
2541 if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
2542 break;
2543 }
2544 }
2545 rmb();
2546 if(time_after(jiffies,timeout)){
2547 printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
2548 /* We lose 4 bytes of "status" here, but we
2549 cannot free these because controller may
2550 awake and corrupt those bytes at any time */
2551 /* dma_free_coherent(&pHba->pDev->dev, 4, status, addr); */
2552 return -ETIMEDOUT;
2553 }
2554 schedule_timeout_uninterruptible(1);
2555 } while (1);
2556
2557 // If the command was successful, fill the fifo with our reply
2558 // message packets
2559 if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
2560 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2561 return -2;
2562 }
2563 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2564
2565 if(pHba->reply_pool != NULL) {
2566 dma_free_coherent(&pHba->pDev->dev,
2567 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2568 pHba->reply_pool, pHba->reply_pool_pa);
2569 }
2570
2571 pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
2572 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2573 &pHba->reply_pool_pa, GFP_KERNEL);
2574 if (!pHba->reply_pool) {
2575 printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
2576 return -ENOMEM;
2577 }
2578
2579 for(i = 0; i < pHba->reply_fifo_size; i++) {
2580 writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
2581 pHba->reply_port);
2582 wmb();
2583 }
2584 adpt_i2o_status_get(pHba);
2585 return 0;
2586 }
2587
2588
2589 /*
2590 * I2O System Table. Contains information about
2591 * all the IOPs in the system. Used to inform IOPs
2592 * about each other's existence.
2593 *
2594 * sys_tbl_ver is the CurrentChangeIndicator that is
2595 * used by IOPs to track changes.
2596 */
2597
2598
2599
adpt_i2o_status_get(adpt_hba * pHba)2600 static s32 adpt_i2o_status_get(adpt_hba* pHba)
2601 {
2602 ulong timeout;
2603 u32 m;
2604 u32 __iomem *msg;
2605 u8 *status_block=NULL;
2606
2607 if(pHba->status_block == NULL) {
2608 pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
2609 sizeof(i2o_status_block),
2610 &pHba->status_block_pa, GFP_KERNEL);
2611 if(pHba->status_block == NULL) {
2612 printk(KERN_ERR
2613 "dpti%d: Get Status Block failed; Out of memory. \n",
2614 pHba->unit);
2615 return -ENOMEM;
2616 }
2617 }
2618 memset(pHba->status_block, 0, sizeof(i2o_status_block));
2619 status_block = (u8*)(pHba->status_block);
2620 timeout = jiffies+TMOUT_GETSTATUS*HZ;
2621 do {
2622 rmb();
2623 m = readl(pHba->post_port);
2624 if (m != EMPTY_QUEUE) {
2625 break;
2626 }
2627 if(time_after(jiffies,timeout)){
2628 printk(KERN_ERR "%s: Timeout waiting for message !\n",
2629 pHba->name);
2630 return -ETIMEDOUT;
2631 }
2632 schedule_timeout_uninterruptible(1);
2633 } while(m==EMPTY_QUEUE);
2634
2635
2636 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2637
2638 writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
2639 writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
2640 writel(1, &msg[2]);
2641 writel(0, &msg[3]);
2642 writel(0, &msg[4]);
2643 writel(0, &msg[5]);
2644 writel( dma_low(pHba->status_block_pa), &msg[6]);
2645 writel( dma_high(pHba->status_block_pa), &msg[7]);
2646 writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
2647
2648 //post message
2649 writel(m, pHba->post_port);
2650 wmb();
2651
2652 while(status_block[87]!=0xff){
2653 if(time_after(jiffies,timeout)){
2654 printk(KERN_ERR"dpti%d: Get status timeout.\n",
2655 pHba->unit);
2656 return -ETIMEDOUT;
2657 }
2658 rmb();
2659 schedule_timeout_uninterruptible(1);
2660 }
2661
2662 // Set up our number of outbound and inbound messages
2663 pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
2664 if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
2665 pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
2666 }
2667
2668 pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
2669 if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
2670 pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
2671 }
2672
2673 // Calculate the Scatter Gather list size
2674 if (dpt_dma64(pHba)) {
2675 pHba->sg_tablesize
2676 = ((pHba->status_block->inbound_frame_size * 4
2677 - 14 * sizeof(u32))
2678 / (sizeof(struct sg_simple_element) + sizeof(u32)));
2679 } else {
2680 pHba->sg_tablesize
2681 = ((pHba->status_block->inbound_frame_size * 4
2682 - 12 * sizeof(u32))
2683 / sizeof(struct sg_simple_element));
2684 }
2685 if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
2686 pHba->sg_tablesize = SG_LIST_ELEMENTS;
2687 }
2688
2689
2690 #ifdef DEBUG
2691 printk("dpti%d: State = ",pHba->unit);
2692 switch(pHba->status_block->iop_state) {
2693 case 0x01:
2694 printk("INIT\n");
2695 break;
2696 case 0x02:
2697 printk("RESET\n");
2698 break;
2699 case 0x04:
2700 printk("HOLD\n");
2701 break;
2702 case 0x05:
2703 printk("READY\n");
2704 break;
2705 case 0x08:
2706 printk("OPERATIONAL\n");
2707 break;
2708 case 0x10:
2709 printk("FAILED\n");
2710 break;
2711 case 0x11:
2712 printk("FAULTED\n");
2713 break;
2714 default:
2715 printk("%x (unknown!!)\n",pHba->status_block->iop_state);
2716 }
2717 #endif
2718 return 0;
2719 }
2720
2721 /*
2722 * Get the IOP's Logical Configuration Table
2723 */
adpt_i2o_lct_get(adpt_hba * pHba)2724 static int adpt_i2o_lct_get(adpt_hba* pHba)
2725 {
2726 u32 msg[8];
2727 int ret;
2728 u32 buf[16];
2729
2730 if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
2731 pHba->lct_size = pHba->status_block->expected_lct_size;
2732 }
2733 do {
2734 if (pHba->lct == NULL) {
2735 pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
2736 pHba->lct_size, &pHba->lct_pa,
2737 GFP_ATOMIC);
2738 if(pHba->lct == NULL) {
2739 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
2740 pHba->name);
2741 return -ENOMEM;
2742 }
2743 }
2744 memset(pHba->lct, 0, pHba->lct_size);
2745
2746 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
2747 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
2748 msg[2] = 0;
2749 msg[3] = 0;
2750 msg[4] = 0xFFFFFFFF; /* All devices */
2751 msg[5] = 0x00000000; /* Report now */
2752 msg[6] = 0xD0000000|pHba->lct_size;
2753 msg[7] = (u32)pHba->lct_pa;
2754
2755 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
2756 printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
2757 pHba->name, ret);
2758 printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
2759 return ret;
2760 }
2761
2762 if ((pHba->lct->table_size << 2) > pHba->lct_size) {
2763 pHba->lct_size = pHba->lct->table_size << 2;
2764 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
2765 pHba->lct, pHba->lct_pa);
2766 pHba->lct = NULL;
2767 }
2768 } while (pHba->lct == NULL);
2769
2770 PDEBUG("%s: Hardware resource table read.\n", pHba->name);
2771
2772
2773 // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
2774 if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
2775 pHba->FwDebugBufferSize = buf[1];
2776 pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
2777 pHba->FwDebugBufferSize);
2778 if (pHba->FwDebugBuffer_P) {
2779 pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P +
2780 FW_DEBUG_FLAGS_OFFSET;
2781 pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
2782 FW_DEBUG_BLED_OFFSET;
2783 pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
2784 pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
2785 FW_DEBUG_STR_LENGTH_OFFSET;
2786 pHba->FwDebugBuffer_P += buf[2];
2787 pHba->FwDebugFlags = 0;
2788 }
2789 }
2790
2791 return 0;
2792 }
2793
adpt_i2o_build_sys_table(void)2794 static int adpt_i2o_build_sys_table(void)
2795 {
2796 adpt_hba* pHba = hba_chain;
2797 int count = 0;
2798
2799 if (sys_tbl)
2800 dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
2801 sys_tbl, sys_tbl_pa);
2802
2803 sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs
2804 (hba_count) * sizeof(struct i2o_sys_tbl_entry);
2805
2806 sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
2807 sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
2808 if (!sys_tbl) {
2809 printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
2810 return -ENOMEM;
2811 }
2812
2813 sys_tbl->num_entries = hba_count;
2814 sys_tbl->version = I2OVERSION;
2815 sys_tbl->change_ind = sys_tbl_ind++;
2816
2817 for(pHba = hba_chain; pHba; pHba = pHba->next) {
2818 u64 addr;
2819 // Get updated Status Block so we have the latest information
2820 if (adpt_i2o_status_get(pHba)) {
2821 sys_tbl->num_entries--;
2822 continue; // try next one
2823 }
2824
2825 sys_tbl->iops[count].org_id = pHba->status_block->org_id;
2826 sys_tbl->iops[count].iop_id = pHba->unit + 2;
2827 sys_tbl->iops[count].seg_num = 0;
2828 sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
2829 sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
2830 sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
2831 sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
2832 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
2833 sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
2834 addr = pHba->base_addr_phys + 0x40;
2835 sys_tbl->iops[count].inbound_low = dma_low(addr);
2836 sys_tbl->iops[count].inbound_high = dma_high(addr);
2837
2838 count++;
2839 }
2840
2841 #ifdef DEBUG
2842 {
2843 u32 *table = (u32*)sys_tbl;
2844 printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
2845 for(count = 0; count < (sys_tbl_len >>2); count++) {
2846 printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
2847 count, table[count]);
2848 }
2849 }
2850 #endif
2851
2852 return 0;
2853 }
2854
2855
2856 /*
2857 * Dump the information block associated with a given unit (TID)
2858 */
2859
adpt_i2o_report_hba_unit(adpt_hba * pHba,struct i2o_device * d)2860 static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
2861 {
2862 char buf[64];
2863 int unit = d->lct_data.tid;
2864
2865 printk(KERN_INFO "TID %3.3d ", unit);
2866
2867 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
2868 {
2869 buf[16]=0;
2870 printk(" Vendor: %-12.12s", buf);
2871 }
2872 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
2873 {
2874 buf[16]=0;
2875 printk(" Device: %-12.12s", buf);
2876 }
2877 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
2878 {
2879 buf[8]=0;
2880 printk(" Rev: %-12.12s\n", buf);
2881 }
2882 #ifdef DEBUG
2883 printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
2884 printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
2885 printk(KERN_INFO "\tFlags: ");
2886
2887 if(d->lct_data.device_flags&(1<<0))
2888 printk("C"); // ConfigDialog requested
2889 if(d->lct_data.device_flags&(1<<1))
2890 printk("U"); // Multi-user capable
2891 if(!(d->lct_data.device_flags&(1<<4)))
2892 printk("P"); // Peer service enabled!
2893 if(!(d->lct_data.device_flags&(1<<5)))
2894 printk("M"); // Mgmt service enabled!
2895 printk("\n");
2896 #endif
2897 }
2898
2899 #ifdef DEBUG
2900 /*
2901 * Do i2o class name lookup
2902 */
adpt_i2o_get_class_name(int class)2903 static const char *adpt_i2o_get_class_name(int class)
2904 {
2905 int idx = 16;
2906 static char *i2o_class_name[] = {
2907 "Executive",
2908 "Device Driver Module",
2909 "Block Device",
2910 "Tape Device",
2911 "LAN Interface",
2912 "WAN Interface",
2913 "Fibre Channel Port",
2914 "Fibre Channel Device",
2915 "SCSI Device",
2916 "ATE Port",
2917 "ATE Device",
2918 "Floppy Controller",
2919 "Floppy Device",
2920 "Secondary Bus Port",
2921 "Peer Transport Agent",
2922 "Peer Transport",
2923 "Unknown"
2924 };
2925
2926 switch(class&0xFFF) {
2927 case I2O_CLASS_EXECUTIVE:
2928 idx = 0; break;
2929 case I2O_CLASS_DDM:
2930 idx = 1; break;
2931 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
2932 idx = 2; break;
2933 case I2O_CLASS_SEQUENTIAL_STORAGE:
2934 idx = 3; break;
2935 case I2O_CLASS_LAN:
2936 idx = 4; break;
2937 case I2O_CLASS_WAN:
2938 idx = 5; break;
2939 case I2O_CLASS_FIBRE_CHANNEL_PORT:
2940 idx = 6; break;
2941 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
2942 idx = 7; break;
2943 case I2O_CLASS_SCSI_PERIPHERAL:
2944 idx = 8; break;
2945 case I2O_CLASS_ATE_PORT:
2946 idx = 9; break;
2947 case I2O_CLASS_ATE_PERIPHERAL:
2948 idx = 10; break;
2949 case I2O_CLASS_FLOPPY_CONTROLLER:
2950 idx = 11; break;
2951 case I2O_CLASS_FLOPPY_DEVICE:
2952 idx = 12; break;
2953 case I2O_CLASS_BUS_ADAPTER_PORT:
2954 idx = 13; break;
2955 case I2O_CLASS_PEER_TRANSPORT_AGENT:
2956 idx = 14; break;
2957 case I2O_CLASS_PEER_TRANSPORT:
2958 idx = 15; break;
2959 }
2960 return i2o_class_name[idx];
2961 }
2962 #endif
2963
2964
adpt_i2o_hrt_get(adpt_hba * pHba)2965 static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
2966 {
2967 u32 msg[6];
2968 int ret, size = sizeof(i2o_hrt);
2969
2970 do {
2971 if (pHba->hrt == NULL) {
2972 pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
2973 size, &pHba->hrt_pa, GFP_KERNEL);
2974 if (pHba->hrt == NULL) {
2975 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
2976 return -ENOMEM;
2977 }
2978 }
2979
2980 msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
2981 msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
2982 msg[2]= 0;
2983 msg[3]= 0;
2984 msg[4]= (0xD0000000 | size); /* Simple transaction */
2985 msg[5]= (u32)pHba->hrt_pa; /* Dump it here */
2986
2987 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
2988 printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
2989 return ret;
2990 }
2991
2992 if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
2993 int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
2994 dma_free_coherent(&pHba->pDev->dev, size,
2995 pHba->hrt, pHba->hrt_pa);
2996 size = newsize;
2997 pHba->hrt = NULL;
2998 }
2999 } while(pHba->hrt == NULL);
3000 return 0;
3001 }
3002
3003 /*
3004 * Query one scalar group value or a whole scalar group.
3005 */
adpt_i2o_query_scalar(adpt_hba * pHba,int tid,int group,int field,void * buf,int buflen)3006 static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3007 int group, int field, void *buf, int buflen)
3008 {
3009 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
3010 u8 *opblk_va;
3011 dma_addr_t opblk_pa;
3012 u8 *resblk_va;
3013 dma_addr_t resblk_pa;
3014
3015 int size;
3016
3017 /* 8 bytes for header */
3018 resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3019 sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
3020 if (resblk_va == NULL) {
3021 printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3022 return -ENOMEM;
3023 }
3024
3025 opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3026 sizeof(opblk), &opblk_pa, GFP_KERNEL);
3027 if (opblk_va == NULL) {
3028 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3029 resblk_va, resblk_pa);
3030 printk(KERN_CRIT "%s: query operation failed; Out of memory.\n",
3031 pHba->name);
3032 return -ENOMEM;
3033 }
3034 if (field == -1) /* whole group */
3035 opblk[4] = -1;
3036
3037 memcpy(opblk_va, opblk, sizeof(opblk));
3038 size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
3039 opblk_va, opblk_pa, sizeof(opblk),
3040 resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
3041 dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
3042 if (size == -ETIME) {
3043 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3044 resblk_va, resblk_pa);
3045 printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3046 return -ETIME;
3047 } else if (size == -EINTR) {
3048 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3049 resblk_va, resblk_pa);
3050 printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3051 return -EINTR;
3052 }
3053
3054 memcpy(buf, resblk_va+8, buflen); /* cut off header */
3055
3056 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3057 resblk_va, resblk_pa);
3058 if (size < 0)
3059 return size;
3060
3061 return buflen;
3062 }
3063
3064
3065 /* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
3066 *
3067 * This function can be used for all UtilParamsGet/Set operations.
3068 * The OperationBlock is given in opblk-buffer,
3069 * and results are returned in resblk-buffer.
3070 * Note that the minimum sized resblk is 8 bytes and contains
3071 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
3072 */
adpt_i2o_issue_params(int cmd,adpt_hba * pHba,int tid,void * opblk_va,dma_addr_t opblk_pa,int oplen,void * resblk_va,dma_addr_t resblk_pa,int reslen)3073 static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
3074 void *opblk_va, dma_addr_t opblk_pa, int oplen,
3075 void *resblk_va, dma_addr_t resblk_pa, int reslen)
3076 {
3077 u32 msg[9];
3078 u32 *res = (u32 *)resblk_va;
3079 int wait_status;
3080
3081 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3082 msg[1] = cmd << 24 | HOST_TID << 12 | tid;
3083 msg[2] = 0;
3084 msg[3] = 0;
3085 msg[4] = 0;
3086 msg[5] = 0x54000000 | oplen; /* OperationBlock */
3087 msg[6] = (u32)opblk_pa;
3088 msg[7] = 0xD0000000 | reslen; /* ResultBlock */
3089 msg[8] = (u32)resblk_pa;
3090
3091 if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
3092 printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
3093 return wait_status; /* -DetailedStatus */
3094 }
3095
3096 if (res[1]&0x00FF0000) { /* BlockStatus != SUCCESS */
3097 printk(KERN_WARNING "%s: %s - Error:\n ErrorInfoSize = 0x%02x, "
3098 "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3099 pHba->name,
3100 (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3101 : "PARAMS_GET",
3102 res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3103 return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
3104 }
3105
3106 return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
3107 }
3108
3109
adpt_i2o_quiesce_hba(adpt_hba * pHba)3110 static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3111 {
3112 u32 msg[4];
3113 int ret;
3114
3115 adpt_i2o_status_get(pHba);
3116
3117 /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
3118
3119 if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3120 (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3121 return 0;
3122 }
3123
3124 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3125 msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3126 msg[2] = 0;
3127 msg[3] = 0;
3128
3129 if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3130 printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3131 pHba->unit, -ret);
3132 } else {
3133 printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3134 }
3135
3136 adpt_i2o_status_get(pHba);
3137 return ret;
3138 }
3139
3140
3141 /*
3142 * Enable IOP. Allows the IOP to resume external operations.
3143 */
adpt_i2o_enable_hba(adpt_hba * pHba)3144 static int adpt_i2o_enable_hba(adpt_hba* pHba)
3145 {
3146 u32 msg[4];
3147 int ret;
3148
3149 adpt_i2o_status_get(pHba);
3150 if(!pHba->status_block){
3151 return -ENOMEM;
3152 }
3153 /* Enable only allowed on READY state */
3154 if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3155 return 0;
3156
3157 if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3158 return -EINVAL;
3159
3160 msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3161 msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3162 msg[2]= 0;
3163 msg[3]= 0;
3164
3165 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3166 printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
3167 pHba->name, ret);
3168 } else {
3169 PDEBUG("%s: Enabled.\n", pHba->name);
3170 }
3171
3172 adpt_i2o_status_get(pHba);
3173 return ret;
3174 }
3175
3176
adpt_i2o_systab_send(adpt_hba * pHba)3177 static int adpt_i2o_systab_send(adpt_hba* pHba)
3178 {
3179 u32 msg[12];
3180 int ret;
3181
3182 msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3183 msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3184 msg[2] = 0;
3185 msg[3] = 0;
3186 msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
3187 msg[5] = 0; /* Segment 0 */
3188
3189 /*
3190 * Provide three SGL-elements:
3191 * System table (SysTab), Private memory space declaration and
3192 * Private i/o space declaration
3193 */
3194 msg[6] = 0x54000000 | sys_tbl_len;
3195 msg[7] = (u32)sys_tbl_pa;
3196 msg[8] = 0x54000000 | 0;
3197 msg[9] = 0;
3198 msg[10] = 0xD4000000 | 0;
3199 msg[11] = 0;
3200
3201 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3202 printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
3203 pHba->name, ret);
3204 }
3205 #ifdef DEBUG
3206 else {
3207 PINFO("%s: SysTab set.\n", pHba->name);
3208 }
3209 #endif
3210
3211 return ret;
3212 }
3213
3214
3215 /*============================================================================
3216 *
3217 *============================================================================
3218 */
3219
3220
3221 #ifdef UARTDELAY
3222
adpt_delay(int millisec)3223 static static void adpt_delay(int millisec)
3224 {
3225 int i;
3226 for (i = 0; i < millisec; i++) {
3227 udelay(1000); /* delay for one millisecond */
3228 }
3229 }
3230
3231 #endif
3232
3233 static struct scsi_host_template driver_template = {
3234 .module = THIS_MODULE,
3235 .name = "dpt_i2o",
3236 .proc_name = "dpt_i2o",
3237 .show_info = adpt_show_info,
3238 .info = adpt_info,
3239 .queuecommand = adpt_queue,
3240 .eh_abort_handler = adpt_abort,
3241 .eh_device_reset_handler = adpt_device_reset,
3242 .eh_bus_reset_handler = adpt_bus_reset,
3243 .eh_host_reset_handler = adpt_reset,
3244 .bios_param = adpt_bios_param,
3245 .slave_configure = adpt_slave_configure,
3246 .can_queue = MAX_TO_IOP_MESSAGES,
3247 .this_id = 7,
3248 };
3249
adpt_init(void)3250 static int __init adpt_init(void)
3251 {
3252 int error;
3253 adpt_hba *pHba, *next;
3254
3255 printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
3256
3257 error = adpt_detect(&driver_template);
3258 if (error < 0)
3259 return error;
3260 if (hba_chain == NULL)
3261 return -ENODEV;
3262
3263 for (pHba = hba_chain; pHba; pHba = pHba->next) {
3264 error = scsi_add_host(pHba->host, &pHba->pDev->dev);
3265 if (error)
3266 goto fail;
3267 scsi_scan_host(pHba->host);
3268 }
3269 return 0;
3270 fail:
3271 for (pHba = hba_chain; pHba; pHba = next) {
3272 next = pHba->next;
3273 scsi_remove_host(pHba->host);
3274 }
3275 return error;
3276 }
3277
adpt_exit(void)3278 static void __exit adpt_exit(void)
3279 {
3280 adpt_hba *pHba, *next;
3281
3282 for (pHba = hba_chain; pHba; pHba = next) {
3283 next = pHba->next;
3284 adpt_release(pHba);
3285 }
3286 }
3287
3288 module_init(adpt_init);
3289 module_exit(adpt_exit);
3290
3291 MODULE_LICENSE("GPL");
3292