1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 *
4 * Linux MegaRAID device driver
5 *
6 * Copyright (c) 2002 LSI Logic Corporation.
7 *
8 * Copyright (c) 2002 Red Hat, Inc. All rights reserved.
9 * - fixes
10 * - speed-ups (list handling fixes, issued_list, optimizations.)
11 * - lots of cleanups.
12 *
13 * Copyright (c) 2003 Christoph Hellwig <hch@lst.de>
14 * - new-style, hotplug-aware pci probing and scsi registration
15 *
16 * Version : v2.00.4 Mon Nov 14 14:02:43 EST 2005 - Seokmann Ju
17 * <Seokmann.Ju@lsil.com>
18 *
19 * Description: Linux device driver for LSI Logic MegaRAID controller
20 *
21 * Supported controllers: MegaRAID 418, 428, 438, 466, 762, 467, 471, 490, 493
22 * 518, 520, 531, 532
23 *
24 * This driver is supported by LSI Logic, with assistance from Red Hat, Dell,
25 * and others. Please send updates to the mailing list
26 * linux-scsi@vger.kernel.org .
27 */
28
29 #include <linux/mm.h>
30 #include <linux/fs.h>
31 #include <linux/blkdev.h>
32 #include <linux/uaccess.h>
33 #include <asm/io.h>
34 #include <linux/completion.h>
35 #include <linux/delay.h>
36 #include <linux/proc_fs.h>
37 #include <linux/seq_file.h>
38 #include <linux/reboot.h>
39 #include <linux/module.h>
40 #include <linux/list.h>
41 #include <linux/interrupt.h>
42 #include <linux/pci.h>
43 #include <linux/init.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/mutex.h>
46 #include <linux/slab.h>
47 #include <scsi/scsicam.h>
48
49 #include "scsi.h"
50 #include <scsi/scsi_host.h>
51
52 #include "megaraid.h"
53
54 #define MEGARAID_MODULE_VERSION "2.00.4"
55
56 MODULE_AUTHOR ("sju@lsil.com");
57 MODULE_DESCRIPTION ("LSI Logic MegaRAID legacy driver");
58 MODULE_LICENSE ("GPL");
59 MODULE_VERSION(MEGARAID_MODULE_VERSION);
60
61 static DEFINE_MUTEX(megadev_mutex);
62 static unsigned int max_cmd_per_lun = DEF_CMD_PER_LUN;
63 module_param(max_cmd_per_lun, uint, 0);
64 MODULE_PARM_DESC(max_cmd_per_lun, "Maximum number of commands which can be issued to a single LUN (default=DEF_CMD_PER_LUN=63)");
65
66 static unsigned short int max_sectors_per_io = MAX_SECTORS_PER_IO;
67 module_param(max_sectors_per_io, ushort, 0);
68 MODULE_PARM_DESC(max_sectors_per_io, "Maximum number of sectors per I/O request (default=MAX_SECTORS_PER_IO=128)");
69
70
71 static unsigned short int max_mbox_busy_wait = MBOX_BUSY_WAIT;
72 module_param(max_mbox_busy_wait, ushort, 0);
73 MODULE_PARM_DESC(max_mbox_busy_wait, "Maximum wait for mailbox in microseconds if busy (default=MBOX_BUSY_WAIT=10)");
74
75 #define RDINDOOR(adapter) readl((adapter)->mmio_base + 0x20)
76 #define RDOUTDOOR(adapter) readl((adapter)->mmio_base + 0x2C)
77 #define WRINDOOR(adapter,value) writel(value, (adapter)->mmio_base + 0x20)
78 #define WROUTDOOR(adapter,value) writel(value, (adapter)->mmio_base + 0x2C)
79
80 /*
81 * Global variables
82 */
83
84 static int hba_count;
85 static adapter_t *hba_soft_state[MAX_CONTROLLERS];
86 static struct proc_dir_entry *mega_proc_dir_entry;
87
88 /* For controller re-ordering */
89 static struct mega_hbas mega_hbas[MAX_CONTROLLERS];
90
91 static long
92 megadev_unlocked_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
93
94 /*
95 * The File Operations structure for the serial/ioctl interface of the driver
96 */
97 static const struct file_operations megadev_fops = {
98 .owner = THIS_MODULE,
99 .unlocked_ioctl = megadev_unlocked_ioctl,
100 .open = megadev_open,
101 .llseek = noop_llseek,
102 };
103
104 /*
105 * Array to structures for storing the information about the controllers. This
106 * information is sent to the user level applications, when they do an ioctl
107 * for this information.
108 */
109 static struct mcontroller mcontroller[MAX_CONTROLLERS];
110
111 /* The current driver version */
112 static u32 driver_ver = 0x02000000;
113
114 /* major number used by the device for character interface */
115 static int major;
116
117 #define IS_RAID_CH(hba, ch) (((hba)->mega_ch_class >> (ch)) & 0x01)
118
119
120 /*
121 * Debug variable to print some diagnostic messages
122 */
123 static int trace_level;
124
125 /**
126 * mega_setup_mailbox()
127 * @adapter - pointer to our soft state
128 *
129 * Allocates a 8 byte aligned memory for the handshake mailbox.
130 */
131 static int
mega_setup_mailbox(adapter_t * adapter)132 mega_setup_mailbox(adapter_t *adapter)
133 {
134 unsigned long align;
135
136 adapter->una_mbox64 = pci_alloc_consistent(adapter->dev,
137 sizeof(mbox64_t), &adapter->una_mbox64_dma);
138
139 if( !adapter->una_mbox64 ) return -1;
140
141 adapter->mbox = &adapter->una_mbox64->mbox;
142
143 adapter->mbox = (mbox_t *)((((unsigned long) adapter->mbox) + 15) &
144 (~0UL ^ 0xFUL));
145
146 adapter->mbox64 = (mbox64_t *)(((unsigned long)adapter->mbox) - 8);
147
148 align = ((void *)adapter->mbox) - ((void *)&adapter->una_mbox64->mbox);
149
150 adapter->mbox_dma = adapter->una_mbox64_dma + 8 + align;
151
152 /*
153 * Register the mailbox if the controller is an io-mapped controller
154 */
155 if( adapter->flag & BOARD_IOMAP ) {
156
157 outb(adapter->mbox_dma & 0xFF,
158 adapter->host->io_port + MBOX_PORT0);
159
160 outb((adapter->mbox_dma >> 8) & 0xFF,
161 adapter->host->io_port + MBOX_PORT1);
162
163 outb((adapter->mbox_dma >> 16) & 0xFF,
164 adapter->host->io_port + MBOX_PORT2);
165
166 outb((adapter->mbox_dma >> 24) & 0xFF,
167 adapter->host->io_port + MBOX_PORT3);
168
169 outb(ENABLE_MBOX_BYTE,
170 adapter->host->io_port + ENABLE_MBOX_REGION);
171
172 irq_ack(adapter);
173
174 irq_enable(adapter);
175 }
176
177 return 0;
178 }
179
180
181 /*
182 * mega_query_adapter()
183 * @adapter - pointer to our soft state
184 *
185 * Issue the adapter inquiry commands to the controller and find out
186 * information and parameter about the devices attached
187 */
188 static int
mega_query_adapter(adapter_t * adapter)189 mega_query_adapter(adapter_t *adapter)
190 {
191 dma_addr_t prod_info_dma_handle;
192 mega_inquiry3 *inquiry3;
193 u8 raw_mbox[sizeof(struct mbox_out)];
194 mbox_t *mbox;
195 int retval;
196
197 /* Initialize adapter inquiry mailbox */
198
199 mbox = (mbox_t *)raw_mbox;
200
201 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
202 memset(&mbox->m_out, 0, sizeof(raw_mbox));
203
204 /*
205 * Try to issue Inquiry3 command
206 * if not succeeded, then issue MEGA_MBOXCMD_ADAPTERINQ command and
207 * update enquiry3 structure
208 */
209 mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle;
210
211 inquiry3 = (mega_inquiry3 *)adapter->mega_buffer;
212
213 raw_mbox[0] = FC_NEW_CONFIG; /* i.e. mbox->cmd=0xA1 */
214 raw_mbox[2] = NC_SUBOP_ENQUIRY3; /* i.e. 0x0F */
215 raw_mbox[3] = ENQ3_GET_SOLICITED_FULL; /* i.e. 0x02 */
216
217 /* Issue a blocking command to the card */
218 if ((retval = issue_scb_block(adapter, raw_mbox))) {
219 /* the adapter does not support 40ld */
220
221 mraid_ext_inquiry *ext_inq;
222 mraid_inquiry *inq;
223 dma_addr_t dma_handle;
224
225 ext_inq = pci_alloc_consistent(adapter->dev,
226 sizeof(mraid_ext_inquiry), &dma_handle);
227
228 if( ext_inq == NULL ) return -1;
229
230 inq = &ext_inq->raid_inq;
231
232 mbox->m_out.xferaddr = (u32)dma_handle;
233
234 /*issue old 0x04 command to adapter */
235 mbox->m_out.cmd = MEGA_MBOXCMD_ADPEXTINQ;
236
237 issue_scb_block(adapter, raw_mbox);
238
239 /*
240 * update Enquiry3 and ProductInfo structures with
241 * mraid_inquiry structure
242 */
243 mega_8_to_40ld(inq, inquiry3,
244 (mega_product_info *)&adapter->product_info);
245
246 pci_free_consistent(adapter->dev, sizeof(mraid_ext_inquiry),
247 ext_inq, dma_handle);
248
249 } else { /*adapter supports 40ld */
250 adapter->flag |= BOARD_40LD;
251
252 /*
253 * get product_info, which is static information and will be
254 * unchanged
255 */
256 prod_info_dma_handle = pci_map_single(adapter->dev, (void *)
257 &adapter->product_info,
258 sizeof(mega_product_info), PCI_DMA_FROMDEVICE);
259
260 mbox->m_out.xferaddr = prod_info_dma_handle;
261
262 raw_mbox[0] = FC_NEW_CONFIG; /* i.e. mbox->cmd=0xA1 */
263 raw_mbox[2] = NC_SUBOP_PRODUCT_INFO; /* i.e. 0x0E */
264
265 if ((retval = issue_scb_block(adapter, raw_mbox)))
266 dev_warn(&adapter->dev->dev,
267 "Product_info cmd failed with error: %d\n",
268 retval);
269
270 pci_unmap_single(adapter->dev, prod_info_dma_handle,
271 sizeof(mega_product_info), PCI_DMA_FROMDEVICE);
272 }
273
274
275 /*
276 * kernel scans the channels from 0 to <= max_channel
277 */
278 adapter->host->max_channel =
279 adapter->product_info.nchannels + NVIRT_CHAN -1;
280
281 adapter->host->max_id = 16; /* max targets per channel */
282
283 adapter->host->max_lun = 7; /* Up to 7 luns for non disk devices */
284
285 adapter->host->cmd_per_lun = max_cmd_per_lun;
286
287 adapter->numldrv = inquiry3->num_ldrv;
288
289 adapter->max_cmds = adapter->product_info.max_commands;
290
291 if(adapter->max_cmds > MAX_COMMANDS)
292 adapter->max_cmds = MAX_COMMANDS;
293
294 adapter->host->can_queue = adapter->max_cmds - 1;
295
296 /*
297 * Get the maximum number of scatter-gather elements supported by this
298 * firmware
299 */
300 mega_get_max_sgl(adapter);
301
302 adapter->host->sg_tablesize = adapter->sglen;
303
304 /* use HP firmware and bios version encoding
305 Note: fw_version[0|1] and bios_version[0|1] were originally shifted
306 right 8 bits making them zero. This 0 value was hardcoded to fix
307 sparse warnings. */
308 if (adapter->product_info.subsysvid == PCI_VENDOR_ID_HP) {
309 snprintf(adapter->fw_version, sizeof(adapter->fw_version),
310 "%c%d%d.%d%d",
311 adapter->product_info.fw_version[2],
312 0,
313 adapter->product_info.fw_version[1] & 0x0f,
314 0,
315 adapter->product_info.fw_version[0] & 0x0f);
316 snprintf(adapter->bios_version, sizeof(adapter->fw_version),
317 "%c%d%d.%d%d",
318 adapter->product_info.bios_version[2],
319 0,
320 adapter->product_info.bios_version[1] & 0x0f,
321 0,
322 adapter->product_info.bios_version[0] & 0x0f);
323 } else {
324 memcpy(adapter->fw_version,
325 (char *)adapter->product_info.fw_version, 4);
326 adapter->fw_version[4] = 0;
327
328 memcpy(adapter->bios_version,
329 (char *)adapter->product_info.bios_version, 4);
330
331 adapter->bios_version[4] = 0;
332 }
333
334 dev_notice(&adapter->dev->dev, "[%s:%s] detected %d logical drives\n",
335 adapter->fw_version, adapter->bios_version, adapter->numldrv);
336
337 /*
338 * Do we support extended (>10 bytes) cdbs
339 */
340 adapter->support_ext_cdb = mega_support_ext_cdb(adapter);
341 if (adapter->support_ext_cdb)
342 dev_notice(&adapter->dev->dev, "supports extended CDBs\n");
343
344
345 return 0;
346 }
347
348 /**
349 * mega_runpendq()
350 * @adapter - pointer to our soft state
351 *
352 * Runs through the list of pending requests.
353 */
354 static inline void
mega_runpendq(adapter_t * adapter)355 mega_runpendq(adapter_t *adapter)
356 {
357 if(!list_empty(&adapter->pending_list))
358 __mega_runpendq(adapter);
359 }
360
361 /*
362 * megaraid_queue()
363 * @scmd - Issue this scsi command
364 * @done - the callback hook into the scsi mid-layer
365 *
366 * The command queuing entry point for the mid-layer.
367 */
368 static int
megaraid_queue_lck(struct scsi_cmnd * scmd,void (* done)(struct scsi_cmnd *))369 megaraid_queue_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
370 {
371 adapter_t *adapter;
372 scb_t *scb;
373 int busy=0;
374 unsigned long flags;
375
376 adapter = (adapter_t *)scmd->device->host->hostdata;
377
378 scmd->scsi_done = done;
379
380
381 /*
382 * Allocate and build a SCB request
383 * busy flag will be set if mega_build_cmd() command could not
384 * allocate scb. We will return non-zero status in that case.
385 * NOTE: scb can be null even though certain commands completed
386 * successfully, e.g., MODE_SENSE and TEST_UNIT_READY, we would
387 * return 0 in that case.
388 */
389
390 spin_lock_irqsave(&adapter->lock, flags);
391 scb = mega_build_cmd(adapter, scmd, &busy);
392 if (!scb)
393 goto out;
394
395 scb->state |= SCB_PENDQ;
396 list_add_tail(&scb->list, &adapter->pending_list);
397
398 /*
399 * Check if the HBA is in quiescent state, e.g., during a
400 * delete logical drive opertion. If it is, don't run
401 * the pending_list.
402 */
403 if (atomic_read(&adapter->quiescent) == 0)
404 mega_runpendq(adapter);
405
406 busy = 0;
407 out:
408 spin_unlock_irqrestore(&adapter->lock, flags);
409 return busy;
410 }
411
DEF_SCSI_QCMD(megaraid_queue)412 static DEF_SCSI_QCMD(megaraid_queue)
413
414 /**
415 * mega_allocate_scb()
416 * @adapter - pointer to our soft state
417 * @cmd - scsi command from the mid-layer
418 *
419 * Allocate a SCB structure. This is the central structure for controller
420 * commands.
421 */
422 static inline scb_t *
423 mega_allocate_scb(adapter_t *adapter, struct scsi_cmnd *cmd)
424 {
425 struct list_head *head = &adapter->free_list;
426 scb_t *scb;
427
428 /* Unlink command from Free List */
429 if( !list_empty(head) ) {
430
431 scb = list_entry(head->next, scb_t, list);
432
433 list_del_init(head->next);
434
435 scb->state = SCB_ACTIVE;
436 scb->cmd = cmd;
437 scb->dma_type = MEGA_DMA_TYPE_NONE;
438
439 return scb;
440 }
441
442 return NULL;
443 }
444
445 /**
446 * mega_get_ldrv_num()
447 * @adapter - pointer to our soft state
448 * @cmd - scsi mid layer command
449 * @channel - channel on the controller
450 *
451 * Calculate the logical drive number based on the information in scsi command
452 * and the channel number.
453 */
454 static inline int
mega_get_ldrv_num(adapter_t * adapter,struct scsi_cmnd * cmd,int channel)455 mega_get_ldrv_num(adapter_t *adapter, struct scsi_cmnd *cmd, int channel)
456 {
457 int tgt;
458 int ldrv_num;
459
460 tgt = cmd->device->id;
461
462 if ( tgt > adapter->this_id )
463 tgt--; /* we do not get inquires for initiator id */
464
465 ldrv_num = (channel * 15) + tgt;
466
467
468 /*
469 * If we have a logical drive with boot enabled, project it first
470 */
471 if( adapter->boot_ldrv_enabled ) {
472 if( ldrv_num == 0 ) {
473 ldrv_num = adapter->boot_ldrv;
474 }
475 else {
476 if( ldrv_num <= adapter->boot_ldrv ) {
477 ldrv_num--;
478 }
479 }
480 }
481
482 /*
483 * If "delete logical drive" feature is enabled on this controller.
484 * Do only if at least one delete logical drive operation was done.
485 *
486 * Also, after logical drive deletion, instead of logical drive number,
487 * the value returned should be 0x80+logical drive id.
488 *
489 * These is valid only for IO commands.
490 */
491
492 if (adapter->support_random_del && adapter->read_ldidmap )
493 switch (cmd->cmnd[0]) {
494 case READ_6: /* fall through */
495 case WRITE_6: /* fall through */
496 case READ_10: /* fall through */
497 case WRITE_10:
498 ldrv_num += 0x80;
499 }
500
501 return ldrv_num;
502 }
503
504 /**
505 * mega_build_cmd()
506 * @adapter - pointer to our soft state
507 * @cmd - Prepare using this scsi command
508 * @busy - busy flag if no resources
509 *
510 * Prepares a command and scatter gather list for the controller. This routine
511 * also finds out if the commands is intended for a logical drive or a
512 * physical device and prepares the controller command accordingly.
513 *
514 * We also re-order the logical drives and physical devices based on their
515 * boot settings.
516 */
517 static scb_t *
mega_build_cmd(adapter_t * adapter,struct scsi_cmnd * cmd,int * busy)518 mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
519 {
520 mega_ext_passthru *epthru;
521 mega_passthru *pthru;
522 scb_t *scb;
523 mbox_t *mbox;
524 u32 seg;
525 char islogical;
526 int max_ldrv_num;
527 int channel = 0;
528 int target = 0;
529 int ldrv_num = 0; /* logical drive number */
530
531 /*
532 * We know what channels our logical drives are on - mega_find_card()
533 */
534 islogical = adapter->logdrv_chan[cmd->device->channel];
535
536 /*
537 * The theory: If physical drive is chosen for boot, all the physical
538 * devices are exported before the logical drives, otherwise physical
539 * devices are pushed after logical drives, in which case - Kernel sees
540 * the physical devices on virtual channel which is obviously converted
541 * to actual channel on the HBA.
542 */
543 if( adapter->boot_pdrv_enabled ) {
544 if( islogical ) {
545 /* logical channel */
546 channel = cmd->device->channel -
547 adapter->product_info.nchannels;
548 }
549 else {
550 /* this is physical channel */
551 channel = cmd->device->channel;
552 target = cmd->device->id;
553
554 /*
555 * boot from a physical disk, that disk needs to be
556 * exposed first IF both the channels are SCSI, then
557 * booting from the second channel is not allowed.
558 */
559 if( target == 0 ) {
560 target = adapter->boot_pdrv_tgt;
561 }
562 else if( target == adapter->boot_pdrv_tgt ) {
563 target = 0;
564 }
565 }
566 }
567 else {
568 if( islogical ) {
569 /* this is the logical channel */
570 channel = cmd->device->channel;
571 }
572 else {
573 /* physical channel */
574 channel = cmd->device->channel - NVIRT_CHAN;
575 target = cmd->device->id;
576 }
577 }
578
579
580 if(islogical) {
581
582 /* have just LUN 0 for each target on virtual channels */
583 if (cmd->device->lun) {
584 cmd->result = (DID_BAD_TARGET << 16);
585 cmd->scsi_done(cmd);
586 return NULL;
587 }
588
589 ldrv_num = mega_get_ldrv_num(adapter, cmd, channel);
590
591
592 max_ldrv_num = (adapter->flag & BOARD_40LD) ?
593 MAX_LOGICAL_DRIVES_40LD : MAX_LOGICAL_DRIVES_8LD;
594
595 /*
596 * max_ldrv_num increases by 0x80 if some logical drive was
597 * deleted.
598 */
599 if(adapter->read_ldidmap)
600 max_ldrv_num += 0x80;
601
602 if(ldrv_num > max_ldrv_num ) {
603 cmd->result = (DID_BAD_TARGET << 16);
604 cmd->scsi_done(cmd);
605 return NULL;
606 }
607
608 }
609 else {
610 if( cmd->device->lun > 7) {
611 /*
612 * Do not support lun >7 for physically accessed
613 * devices
614 */
615 cmd->result = (DID_BAD_TARGET << 16);
616 cmd->scsi_done(cmd);
617 return NULL;
618 }
619 }
620
621 /*
622 *
623 * Logical drive commands
624 *
625 */
626 if(islogical) {
627 switch (cmd->cmnd[0]) {
628 case TEST_UNIT_READY:
629 #if MEGA_HAVE_CLUSTERING
630 /*
631 * Do we support clustering and is the support enabled
632 * If no, return success always
633 */
634 if( !adapter->has_cluster ) {
635 cmd->result = (DID_OK << 16);
636 cmd->scsi_done(cmd);
637 return NULL;
638 }
639
640 if(!(scb = mega_allocate_scb(adapter, cmd))) {
641 *busy = 1;
642 return NULL;
643 }
644
645 scb->raw_mbox[0] = MEGA_CLUSTER_CMD;
646 scb->raw_mbox[2] = MEGA_RESERVATION_STATUS;
647 scb->raw_mbox[3] = ldrv_num;
648
649 scb->dma_direction = PCI_DMA_NONE;
650
651 return scb;
652 #else
653 cmd->result = (DID_OK << 16);
654 cmd->scsi_done(cmd);
655 return NULL;
656 #endif
657
658 case MODE_SENSE: {
659 char *buf;
660 struct scatterlist *sg;
661
662 sg = scsi_sglist(cmd);
663 buf = kmap_atomic(sg_page(sg)) + sg->offset;
664
665 memset(buf, 0, cmd->cmnd[4]);
666 kunmap_atomic(buf - sg->offset);
667
668 cmd->result = (DID_OK << 16);
669 cmd->scsi_done(cmd);
670 return NULL;
671 }
672
673 case READ_CAPACITY:
674 case INQUIRY:
675
676 if(!(adapter->flag & (1L << cmd->device->channel))) {
677
678 dev_notice(&adapter->dev->dev,
679 "scsi%d: scanning scsi channel %d "
680 "for logical drives\n",
681 adapter->host->host_no,
682 cmd->device->channel);
683
684 adapter->flag |= (1L << cmd->device->channel);
685 }
686
687 /* Allocate a SCB and initialize passthru */
688 if(!(scb = mega_allocate_scb(adapter, cmd))) {
689 *busy = 1;
690 return NULL;
691 }
692 pthru = scb->pthru;
693
694 mbox = (mbox_t *)scb->raw_mbox;
695 memset(mbox, 0, sizeof(scb->raw_mbox));
696 memset(pthru, 0, sizeof(mega_passthru));
697
698 pthru->timeout = 0;
699 pthru->ars = 1;
700 pthru->reqsenselen = 14;
701 pthru->islogical = 1;
702 pthru->logdrv = ldrv_num;
703 pthru->cdblen = cmd->cmd_len;
704 memcpy(pthru->cdb, cmd->cmnd, cmd->cmd_len);
705
706 if( adapter->has_64bit_addr ) {
707 mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU64;
708 }
709 else {
710 mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU;
711 }
712
713 scb->dma_direction = PCI_DMA_FROMDEVICE;
714
715 pthru->numsgelements = mega_build_sglist(adapter, scb,
716 &pthru->dataxferaddr, &pthru->dataxferlen);
717
718 mbox->m_out.xferaddr = scb->pthru_dma_addr;
719
720 return scb;
721
722 case READ_6:
723 case WRITE_6:
724 case READ_10:
725 case WRITE_10:
726 case READ_12:
727 case WRITE_12:
728
729 /* Allocate a SCB and initialize mailbox */
730 if(!(scb = mega_allocate_scb(adapter, cmd))) {
731 *busy = 1;
732 return NULL;
733 }
734 mbox = (mbox_t *)scb->raw_mbox;
735
736 memset(mbox, 0, sizeof(scb->raw_mbox));
737 mbox->m_out.logdrv = ldrv_num;
738
739 /*
740 * A little hack: 2nd bit is zero for all scsi read
741 * commands and is set for all scsi write commands
742 */
743 if( adapter->has_64bit_addr ) {
744 mbox->m_out.cmd = (*cmd->cmnd & 0x02) ?
745 MEGA_MBOXCMD_LWRITE64:
746 MEGA_MBOXCMD_LREAD64 ;
747 }
748 else {
749 mbox->m_out.cmd = (*cmd->cmnd & 0x02) ?
750 MEGA_MBOXCMD_LWRITE:
751 MEGA_MBOXCMD_LREAD ;
752 }
753
754 /*
755 * 6-byte READ(0x08) or WRITE(0x0A) cdb
756 */
757 if( cmd->cmd_len == 6 ) {
758 mbox->m_out.numsectors = (u32) cmd->cmnd[4];
759 mbox->m_out.lba =
760 ((u32)cmd->cmnd[1] << 16) |
761 ((u32)cmd->cmnd[2] << 8) |
762 (u32)cmd->cmnd[3];
763
764 mbox->m_out.lba &= 0x1FFFFF;
765
766 #if MEGA_HAVE_STATS
767 /*
768 * Take modulo 0x80, since the logical drive
769 * number increases by 0x80 when a logical
770 * drive was deleted
771 */
772 if (*cmd->cmnd == READ_6) {
773 adapter->nreads[ldrv_num%0x80]++;
774 adapter->nreadblocks[ldrv_num%0x80] +=
775 mbox->m_out.numsectors;
776 } else {
777 adapter->nwrites[ldrv_num%0x80]++;
778 adapter->nwriteblocks[ldrv_num%0x80] +=
779 mbox->m_out.numsectors;
780 }
781 #endif
782 }
783
784 /*
785 * 10-byte READ(0x28) or WRITE(0x2A) cdb
786 */
787 if( cmd->cmd_len == 10 ) {
788 mbox->m_out.numsectors =
789 (u32)cmd->cmnd[8] |
790 ((u32)cmd->cmnd[7] << 8);
791 mbox->m_out.lba =
792 ((u32)cmd->cmnd[2] << 24) |
793 ((u32)cmd->cmnd[3] << 16) |
794 ((u32)cmd->cmnd[4] << 8) |
795 (u32)cmd->cmnd[5];
796
797 #if MEGA_HAVE_STATS
798 if (*cmd->cmnd == READ_10) {
799 adapter->nreads[ldrv_num%0x80]++;
800 adapter->nreadblocks[ldrv_num%0x80] +=
801 mbox->m_out.numsectors;
802 } else {
803 adapter->nwrites[ldrv_num%0x80]++;
804 adapter->nwriteblocks[ldrv_num%0x80] +=
805 mbox->m_out.numsectors;
806 }
807 #endif
808 }
809
810 /*
811 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
812 */
813 if( cmd->cmd_len == 12 ) {
814 mbox->m_out.lba =
815 ((u32)cmd->cmnd[2] << 24) |
816 ((u32)cmd->cmnd[3] << 16) |
817 ((u32)cmd->cmnd[4] << 8) |
818 (u32)cmd->cmnd[5];
819
820 mbox->m_out.numsectors =
821 ((u32)cmd->cmnd[6] << 24) |
822 ((u32)cmd->cmnd[7] << 16) |
823 ((u32)cmd->cmnd[8] << 8) |
824 (u32)cmd->cmnd[9];
825
826 #if MEGA_HAVE_STATS
827 if (*cmd->cmnd == READ_12) {
828 adapter->nreads[ldrv_num%0x80]++;
829 adapter->nreadblocks[ldrv_num%0x80] +=
830 mbox->m_out.numsectors;
831 } else {
832 adapter->nwrites[ldrv_num%0x80]++;
833 adapter->nwriteblocks[ldrv_num%0x80] +=
834 mbox->m_out.numsectors;
835 }
836 #endif
837 }
838
839 /*
840 * If it is a read command
841 */
842 if( (*cmd->cmnd & 0x0F) == 0x08 ) {
843 scb->dma_direction = PCI_DMA_FROMDEVICE;
844 }
845 else {
846 scb->dma_direction = PCI_DMA_TODEVICE;
847 }
848
849 /* Calculate Scatter-Gather info */
850 mbox->m_out.numsgelements = mega_build_sglist(adapter, scb,
851 (u32 *)&mbox->m_out.xferaddr, &seg);
852
853 return scb;
854
855 #if MEGA_HAVE_CLUSTERING
856 case RESERVE: /* Fall through */
857 case RELEASE:
858
859 /*
860 * Do we support clustering and is the support enabled
861 */
862 if( ! adapter->has_cluster ) {
863
864 cmd->result = (DID_BAD_TARGET << 16);
865 cmd->scsi_done(cmd);
866 return NULL;
867 }
868
869 /* Allocate a SCB and initialize mailbox */
870 if(!(scb = mega_allocate_scb(adapter, cmd))) {
871 *busy = 1;
872 return NULL;
873 }
874
875 scb->raw_mbox[0] = MEGA_CLUSTER_CMD;
876 scb->raw_mbox[2] = ( *cmd->cmnd == RESERVE ) ?
877 MEGA_RESERVE_LD : MEGA_RELEASE_LD;
878
879 scb->raw_mbox[3] = ldrv_num;
880
881 scb->dma_direction = PCI_DMA_NONE;
882
883 return scb;
884 #endif
885
886 default:
887 cmd->result = (DID_BAD_TARGET << 16);
888 cmd->scsi_done(cmd);
889 return NULL;
890 }
891 }
892
893 /*
894 * Passthru drive commands
895 */
896 else {
897 /* Allocate a SCB and initialize passthru */
898 if(!(scb = mega_allocate_scb(adapter, cmd))) {
899 *busy = 1;
900 return NULL;
901 }
902
903 mbox = (mbox_t *)scb->raw_mbox;
904 memset(mbox, 0, sizeof(scb->raw_mbox));
905
906 if( adapter->support_ext_cdb ) {
907
908 epthru = mega_prepare_extpassthru(adapter, scb, cmd,
909 channel, target);
910
911 mbox->m_out.cmd = MEGA_MBOXCMD_EXTPTHRU;
912
913 mbox->m_out.xferaddr = scb->epthru_dma_addr;
914
915 }
916 else {
917
918 pthru = mega_prepare_passthru(adapter, scb, cmd,
919 channel, target);
920
921 /* Initialize mailbox */
922 if( adapter->has_64bit_addr ) {
923 mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU64;
924 }
925 else {
926 mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU;
927 }
928
929 mbox->m_out.xferaddr = scb->pthru_dma_addr;
930
931 }
932 return scb;
933 }
934 return NULL;
935 }
936
937
938 /**
939 * mega_prepare_passthru()
940 * @adapter - pointer to our soft state
941 * @scb - our scsi control block
942 * @cmd - scsi command from the mid-layer
943 * @channel - actual channel on the controller
944 * @target - actual id on the controller.
945 *
946 * prepare a command for the scsi physical devices.
947 */
948 static mega_passthru *
mega_prepare_passthru(adapter_t * adapter,scb_t * scb,struct scsi_cmnd * cmd,int channel,int target)949 mega_prepare_passthru(adapter_t *adapter, scb_t *scb, struct scsi_cmnd *cmd,
950 int channel, int target)
951 {
952 mega_passthru *pthru;
953
954 pthru = scb->pthru;
955 memset(pthru, 0, sizeof (mega_passthru));
956
957 /* 0=6sec/1=60sec/2=10min/3=3hrs */
958 pthru->timeout = 2;
959
960 pthru->ars = 1;
961 pthru->reqsenselen = 14;
962 pthru->islogical = 0;
963
964 pthru->channel = (adapter->flag & BOARD_40LD) ? 0 : channel;
965
966 pthru->target = (adapter->flag & BOARD_40LD) ?
967 (channel << 4) | target : target;
968
969 pthru->cdblen = cmd->cmd_len;
970 pthru->logdrv = cmd->device->lun;
971
972 memcpy(pthru->cdb, cmd->cmnd, cmd->cmd_len);
973
974 /* Not sure about the direction */
975 scb->dma_direction = PCI_DMA_BIDIRECTIONAL;
976
977 /* Special Code for Handling READ_CAPA/ INQ using bounce buffers */
978 switch (cmd->cmnd[0]) {
979 case INQUIRY:
980 case READ_CAPACITY:
981 if(!(adapter->flag & (1L << cmd->device->channel))) {
982
983 dev_notice(&adapter->dev->dev,
984 "scsi%d: scanning scsi channel %d [P%d] "
985 "for physical devices\n",
986 adapter->host->host_no,
987 cmd->device->channel, channel);
988
989 adapter->flag |= (1L << cmd->device->channel);
990 }
991 /* Fall through */
992 default:
993 pthru->numsgelements = mega_build_sglist(adapter, scb,
994 &pthru->dataxferaddr, &pthru->dataxferlen);
995 break;
996 }
997 return pthru;
998 }
999
1000
1001 /**
1002 * mega_prepare_extpassthru()
1003 * @adapter - pointer to our soft state
1004 * @scb - our scsi control block
1005 * @cmd - scsi command from the mid-layer
1006 * @channel - actual channel on the controller
1007 * @target - actual id on the controller.
1008 *
1009 * prepare a command for the scsi physical devices. This rountine prepares
1010 * commands for devices which can take extended CDBs (>10 bytes)
1011 */
1012 static mega_ext_passthru *
mega_prepare_extpassthru(adapter_t * adapter,scb_t * scb,struct scsi_cmnd * cmd,int channel,int target)1013 mega_prepare_extpassthru(adapter_t *adapter, scb_t *scb,
1014 struct scsi_cmnd *cmd,
1015 int channel, int target)
1016 {
1017 mega_ext_passthru *epthru;
1018
1019 epthru = scb->epthru;
1020 memset(epthru, 0, sizeof(mega_ext_passthru));
1021
1022 /* 0=6sec/1=60sec/2=10min/3=3hrs */
1023 epthru->timeout = 2;
1024
1025 epthru->ars = 1;
1026 epthru->reqsenselen = 14;
1027 epthru->islogical = 0;
1028
1029 epthru->channel = (adapter->flag & BOARD_40LD) ? 0 : channel;
1030 epthru->target = (adapter->flag & BOARD_40LD) ?
1031 (channel << 4) | target : target;
1032
1033 epthru->cdblen = cmd->cmd_len;
1034 epthru->logdrv = cmd->device->lun;
1035
1036 memcpy(epthru->cdb, cmd->cmnd, cmd->cmd_len);
1037
1038 /* Not sure about the direction */
1039 scb->dma_direction = PCI_DMA_BIDIRECTIONAL;
1040
1041 switch(cmd->cmnd[0]) {
1042 case INQUIRY:
1043 case READ_CAPACITY:
1044 if(!(adapter->flag & (1L << cmd->device->channel))) {
1045
1046 dev_notice(&adapter->dev->dev,
1047 "scsi%d: scanning scsi channel %d [P%d] "
1048 "for physical devices\n",
1049 adapter->host->host_no,
1050 cmd->device->channel, channel);
1051
1052 adapter->flag |= (1L << cmd->device->channel);
1053 }
1054 /* Fall through */
1055 default:
1056 epthru->numsgelements = mega_build_sglist(adapter, scb,
1057 &epthru->dataxferaddr, &epthru->dataxferlen);
1058 break;
1059 }
1060
1061 return epthru;
1062 }
1063
1064 static void
__mega_runpendq(adapter_t * adapter)1065 __mega_runpendq(adapter_t *adapter)
1066 {
1067 scb_t *scb;
1068 struct list_head *pos, *next;
1069
1070 /* Issue any pending commands to the card */
1071 list_for_each_safe(pos, next, &adapter->pending_list) {
1072
1073 scb = list_entry(pos, scb_t, list);
1074
1075 if( !(scb->state & SCB_ISSUED) ) {
1076
1077 if( issue_scb(adapter, scb) != 0 )
1078 return;
1079 }
1080 }
1081
1082 return;
1083 }
1084
1085
1086 /**
1087 * issue_scb()
1088 * @adapter - pointer to our soft state
1089 * @scb - scsi control block
1090 *
1091 * Post a command to the card if the mailbox is available, otherwise return
1092 * busy. We also take the scb from the pending list if the mailbox is
1093 * available.
1094 */
1095 static int
issue_scb(adapter_t * adapter,scb_t * scb)1096 issue_scb(adapter_t *adapter, scb_t *scb)
1097 {
1098 volatile mbox64_t *mbox64 = adapter->mbox64;
1099 volatile mbox_t *mbox = adapter->mbox;
1100 unsigned int i = 0;
1101
1102 if(unlikely(mbox->m_in.busy)) {
1103 do {
1104 udelay(1);
1105 i++;
1106 } while( mbox->m_in.busy && (i < max_mbox_busy_wait) );
1107
1108 if(mbox->m_in.busy) return -1;
1109 }
1110
1111 /* Copy mailbox data into host structure */
1112 memcpy((char *)&mbox->m_out, (char *)scb->raw_mbox,
1113 sizeof(struct mbox_out));
1114
1115 mbox->m_out.cmdid = scb->idx; /* Set cmdid */
1116 mbox->m_in.busy = 1; /* Set busy */
1117
1118
1119 /*
1120 * Increment the pending queue counter
1121 */
1122 atomic_inc(&adapter->pend_cmds);
1123
1124 switch (mbox->m_out.cmd) {
1125 case MEGA_MBOXCMD_LREAD64:
1126 case MEGA_MBOXCMD_LWRITE64:
1127 case MEGA_MBOXCMD_PASSTHRU64:
1128 case MEGA_MBOXCMD_EXTPTHRU:
1129 mbox64->xfer_segment_lo = mbox->m_out.xferaddr;
1130 mbox64->xfer_segment_hi = 0;
1131 mbox->m_out.xferaddr = 0xFFFFFFFF;
1132 break;
1133 default:
1134 mbox64->xfer_segment_lo = 0;
1135 mbox64->xfer_segment_hi = 0;
1136 }
1137
1138 /*
1139 * post the command
1140 */
1141 scb->state |= SCB_ISSUED;
1142
1143 if( likely(adapter->flag & BOARD_MEMMAP) ) {
1144 mbox->m_in.poll = 0;
1145 mbox->m_in.ack = 0;
1146 WRINDOOR(adapter, adapter->mbox_dma | 0x1);
1147 }
1148 else {
1149 irq_enable(adapter);
1150 issue_command(adapter);
1151 }
1152
1153 return 0;
1154 }
1155
1156 /*
1157 * Wait until the controller's mailbox is available
1158 */
1159 static inline int
mega_busywait_mbox(adapter_t * adapter)1160 mega_busywait_mbox (adapter_t *adapter)
1161 {
1162 if (adapter->mbox->m_in.busy)
1163 return __mega_busywait_mbox(adapter);
1164 return 0;
1165 }
1166
1167 /**
1168 * issue_scb_block()
1169 * @adapter - pointer to our soft state
1170 * @raw_mbox - the mailbox
1171 *
1172 * Issue a scb in synchronous and non-interrupt mode
1173 */
1174 static int
issue_scb_block(adapter_t * adapter,u_char * raw_mbox)1175 issue_scb_block(adapter_t *adapter, u_char *raw_mbox)
1176 {
1177 volatile mbox64_t *mbox64 = adapter->mbox64;
1178 volatile mbox_t *mbox = adapter->mbox;
1179 u8 byte;
1180
1181 /* Wait until mailbox is free */
1182 if(mega_busywait_mbox (adapter))
1183 goto bug_blocked_mailbox;
1184
1185 /* Copy mailbox data into host structure */
1186 memcpy((char *) mbox, raw_mbox, sizeof(struct mbox_out));
1187 mbox->m_out.cmdid = 0xFE;
1188 mbox->m_in.busy = 1;
1189
1190 switch (raw_mbox[0]) {
1191 case MEGA_MBOXCMD_LREAD64:
1192 case MEGA_MBOXCMD_LWRITE64:
1193 case MEGA_MBOXCMD_PASSTHRU64:
1194 case MEGA_MBOXCMD_EXTPTHRU:
1195 mbox64->xfer_segment_lo = mbox->m_out.xferaddr;
1196 mbox64->xfer_segment_hi = 0;
1197 mbox->m_out.xferaddr = 0xFFFFFFFF;
1198 break;
1199 default:
1200 mbox64->xfer_segment_lo = 0;
1201 mbox64->xfer_segment_hi = 0;
1202 }
1203
1204 if( likely(adapter->flag & BOARD_MEMMAP) ) {
1205 mbox->m_in.poll = 0;
1206 mbox->m_in.ack = 0;
1207 mbox->m_in.numstatus = 0xFF;
1208 mbox->m_in.status = 0xFF;
1209 WRINDOOR(adapter, adapter->mbox_dma | 0x1);
1210
1211 while((volatile u8)mbox->m_in.numstatus == 0xFF)
1212 cpu_relax();
1213
1214 mbox->m_in.numstatus = 0xFF;
1215
1216 while( (volatile u8)mbox->m_in.poll != 0x77 )
1217 cpu_relax();
1218
1219 mbox->m_in.poll = 0;
1220 mbox->m_in.ack = 0x77;
1221
1222 WRINDOOR(adapter, adapter->mbox_dma | 0x2);
1223
1224 while(RDINDOOR(adapter) & 0x2)
1225 cpu_relax();
1226 }
1227 else {
1228 irq_disable(adapter);
1229 issue_command(adapter);
1230
1231 while (!((byte = irq_state(adapter)) & INTR_VALID))
1232 cpu_relax();
1233
1234 set_irq_state(adapter, byte);
1235 irq_enable(adapter);
1236 irq_ack(adapter);
1237 }
1238
1239 return mbox->m_in.status;
1240
1241 bug_blocked_mailbox:
1242 dev_warn(&adapter->dev->dev, "Blocked mailbox......!!\n");
1243 udelay (1000);
1244 return -1;
1245 }
1246
1247
1248 /**
1249 * megaraid_isr_iomapped()
1250 * @irq - irq
1251 * @devp - pointer to our soft state
1252 *
1253 * Interrupt service routine for io-mapped controllers.
1254 * Find out if our device is interrupting. If yes, acknowledge the interrupt
1255 * and service the completed commands.
1256 */
1257 static irqreturn_t
megaraid_isr_iomapped(int irq,void * devp)1258 megaraid_isr_iomapped(int irq, void *devp)
1259 {
1260 adapter_t *adapter = devp;
1261 unsigned long flags;
1262 u8 status;
1263 u8 nstatus;
1264 u8 completed[MAX_FIRMWARE_STATUS];
1265 u8 byte;
1266 int handled = 0;
1267
1268
1269 /*
1270 * loop till F/W has more commands for us to complete.
1271 */
1272 spin_lock_irqsave(&adapter->lock, flags);
1273
1274 do {
1275 /* Check if a valid interrupt is pending */
1276 byte = irq_state(adapter);
1277 if( (byte & VALID_INTR_BYTE) == 0 ) {
1278 /*
1279 * No more pending commands
1280 */
1281 goto out_unlock;
1282 }
1283 set_irq_state(adapter, byte);
1284
1285 while((nstatus = (volatile u8)adapter->mbox->m_in.numstatus)
1286 == 0xFF)
1287 cpu_relax();
1288 adapter->mbox->m_in.numstatus = 0xFF;
1289
1290 status = adapter->mbox->m_in.status;
1291
1292 /*
1293 * decrement the pending queue counter
1294 */
1295 atomic_sub(nstatus, &adapter->pend_cmds);
1296
1297 memcpy(completed, (void *)adapter->mbox->m_in.completed,
1298 nstatus);
1299
1300 /* Acknowledge interrupt */
1301 irq_ack(adapter);
1302
1303 mega_cmd_done(adapter, completed, nstatus, status);
1304
1305 mega_rundoneq(adapter);
1306
1307 handled = 1;
1308
1309 /* Loop through any pending requests */
1310 if(atomic_read(&adapter->quiescent) == 0) {
1311 mega_runpendq(adapter);
1312 }
1313
1314 } while(1);
1315
1316 out_unlock:
1317
1318 spin_unlock_irqrestore(&adapter->lock, flags);
1319
1320 return IRQ_RETVAL(handled);
1321 }
1322
1323
1324 /**
1325 * megaraid_isr_memmapped()
1326 * @irq - irq
1327 * @devp - pointer to our soft state
1328 *
1329 * Interrupt service routine for memory-mapped controllers.
1330 * Find out if our device is interrupting. If yes, acknowledge the interrupt
1331 * and service the completed commands.
1332 */
1333 static irqreturn_t
megaraid_isr_memmapped(int irq,void * devp)1334 megaraid_isr_memmapped(int irq, void *devp)
1335 {
1336 adapter_t *adapter = devp;
1337 unsigned long flags;
1338 u8 status;
1339 u32 dword = 0;
1340 u8 nstatus;
1341 u8 completed[MAX_FIRMWARE_STATUS];
1342 int handled = 0;
1343
1344
1345 /*
1346 * loop till F/W has more commands for us to complete.
1347 */
1348 spin_lock_irqsave(&adapter->lock, flags);
1349
1350 do {
1351 /* Check if a valid interrupt is pending */
1352 dword = RDOUTDOOR(adapter);
1353 if(dword != 0x10001234) {
1354 /*
1355 * No more pending commands
1356 */
1357 goto out_unlock;
1358 }
1359 WROUTDOOR(adapter, 0x10001234);
1360
1361 while((nstatus = (volatile u8)adapter->mbox->m_in.numstatus)
1362 == 0xFF) {
1363 cpu_relax();
1364 }
1365 adapter->mbox->m_in.numstatus = 0xFF;
1366
1367 status = adapter->mbox->m_in.status;
1368
1369 /*
1370 * decrement the pending queue counter
1371 */
1372 atomic_sub(nstatus, &adapter->pend_cmds);
1373
1374 memcpy(completed, (void *)adapter->mbox->m_in.completed,
1375 nstatus);
1376
1377 /* Acknowledge interrupt */
1378 WRINDOOR(adapter, 0x2);
1379
1380 handled = 1;
1381
1382 while( RDINDOOR(adapter) & 0x02 )
1383 cpu_relax();
1384
1385 mega_cmd_done(adapter, completed, nstatus, status);
1386
1387 mega_rundoneq(adapter);
1388
1389 /* Loop through any pending requests */
1390 if(atomic_read(&adapter->quiescent) == 0) {
1391 mega_runpendq(adapter);
1392 }
1393
1394 } while(1);
1395
1396 out_unlock:
1397
1398 spin_unlock_irqrestore(&adapter->lock, flags);
1399
1400 return IRQ_RETVAL(handled);
1401 }
1402 /**
1403 * mega_cmd_done()
1404 * @adapter - pointer to our soft state
1405 * @completed - array of ids of completed commands
1406 * @nstatus - number of completed commands
1407 * @status - status of the last command completed
1408 *
1409 * Complete the commands and call the scsi mid-layer callback hooks.
1410 */
1411 static void
mega_cmd_done(adapter_t * adapter,u8 completed[],int nstatus,int status)1412 mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
1413 {
1414 mega_ext_passthru *epthru = NULL;
1415 struct scatterlist *sgl;
1416 struct scsi_cmnd *cmd = NULL;
1417 mega_passthru *pthru = NULL;
1418 mbox_t *mbox = NULL;
1419 u8 c;
1420 scb_t *scb;
1421 int islogical;
1422 int cmdid;
1423 int i;
1424
1425 /*
1426 * for all the commands completed, call the mid-layer callback routine
1427 * and free the scb.
1428 */
1429 for( i = 0; i < nstatus; i++ ) {
1430
1431 cmdid = completed[i];
1432
1433 /*
1434 * Only free SCBs for the commands coming down from the
1435 * mid-layer, not for which were issued internally
1436 *
1437 * For internal command, restore the status returned by the
1438 * firmware so that user can interpret it.
1439 */
1440 if (cmdid == CMDID_INT_CMDS) {
1441 scb = &adapter->int_scb;
1442
1443 list_del_init(&scb->list);
1444 scb->state = SCB_FREE;
1445
1446 adapter->int_status = status;
1447 complete(&adapter->int_waitq);
1448 } else {
1449 scb = &adapter->scb_list[cmdid];
1450
1451 /*
1452 * Make sure f/w has completed a valid command
1453 */
1454 if( !(scb->state & SCB_ISSUED) || scb->cmd == NULL ) {
1455 dev_crit(&adapter->dev->dev, "invalid command "
1456 "Id %d, scb->state:%x, scsi cmd:%p\n",
1457 cmdid, scb->state, scb->cmd);
1458
1459 continue;
1460 }
1461
1462 /*
1463 * Was a abort issued for this command
1464 */
1465 if( scb->state & SCB_ABORT ) {
1466
1467 dev_warn(&adapter->dev->dev,
1468 "aborted cmd [%x] complete\n",
1469 scb->idx);
1470
1471 scb->cmd->result = (DID_ABORT << 16);
1472
1473 list_add_tail(SCSI_LIST(scb->cmd),
1474 &adapter->completed_list);
1475
1476 mega_free_scb(adapter, scb);
1477
1478 continue;
1479 }
1480
1481 /*
1482 * Was a reset issued for this command
1483 */
1484 if( scb->state & SCB_RESET ) {
1485
1486 dev_warn(&adapter->dev->dev,
1487 "reset cmd [%x] complete\n",
1488 scb->idx);
1489
1490 scb->cmd->result = (DID_RESET << 16);
1491
1492 list_add_tail(SCSI_LIST(scb->cmd),
1493 &adapter->completed_list);
1494
1495 mega_free_scb (adapter, scb);
1496
1497 continue;
1498 }
1499
1500 cmd = scb->cmd;
1501 pthru = scb->pthru;
1502 epthru = scb->epthru;
1503 mbox = (mbox_t *)scb->raw_mbox;
1504
1505 #if MEGA_HAVE_STATS
1506 {
1507
1508 int logdrv = mbox->m_out.logdrv;
1509
1510 islogical = adapter->logdrv_chan[cmd->channel];
1511 /*
1512 * Maintain an error counter for the logical drive.
1513 * Some application like SNMP agent need such
1514 * statistics
1515 */
1516 if( status && islogical && (cmd->cmnd[0] == READ_6 ||
1517 cmd->cmnd[0] == READ_10 ||
1518 cmd->cmnd[0] == READ_12)) {
1519 /*
1520 * Logical drive number increases by 0x80 when
1521 * a logical drive is deleted
1522 */
1523 adapter->rd_errors[logdrv%0x80]++;
1524 }
1525
1526 if( status && islogical && (cmd->cmnd[0] == WRITE_6 ||
1527 cmd->cmnd[0] == WRITE_10 ||
1528 cmd->cmnd[0] == WRITE_12)) {
1529 /*
1530 * Logical drive number increases by 0x80 when
1531 * a logical drive is deleted
1532 */
1533 adapter->wr_errors[logdrv%0x80]++;
1534 }
1535
1536 }
1537 #endif
1538 }
1539
1540 /*
1541 * Do not return the presence of hard disk on the channel so,
1542 * inquiry sent, and returned data==hard disk or removable
1543 * hard disk and not logical, request should return failure! -
1544 * PJ
1545 */
1546 islogical = adapter->logdrv_chan[cmd->device->channel];
1547 if( cmd->cmnd[0] == INQUIRY && !islogical ) {
1548
1549 sgl = scsi_sglist(cmd);
1550 if( sg_page(sgl) ) {
1551 c = *(unsigned char *) sg_virt(&sgl[0]);
1552 } else {
1553 dev_warn(&adapter->dev->dev, "invalid sg\n");
1554 c = 0;
1555 }
1556
1557 if(IS_RAID_CH(adapter, cmd->device->channel) &&
1558 ((c & 0x1F ) == TYPE_DISK)) {
1559 status = 0xF0;
1560 }
1561 }
1562
1563 /* clear result; otherwise, success returns corrupt value */
1564 cmd->result = 0;
1565
1566 /* Convert MegaRAID status to Linux error code */
1567 switch (status) {
1568 case 0x00: /* SUCCESS , i.e. SCSI_STATUS_GOOD */
1569 cmd->result |= (DID_OK << 16);
1570 break;
1571
1572 case 0x02: /* ERROR_ABORTED, i.e.
1573 SCSI_STATUS_CHECK_CONDITION */
1574
1575 /* set sense_buffer and result fields */
1576 if( mbox->m_out.cmd == MEGA_MBOXCMD_PASSTHRU ||
1577 mbox->m_out.cmd == MEGA_MBOXCMD_PASSTHRU64 ) {
1578
1579 memcpy(cmd->sense_buffer, pthru->reqsensearea,
1580 14);
1581
1582 cmd->result = (DRIVER_SENSE << 24) |
1583 (DID_OK << 16) |
1584 (CHECK_CONDITION << 1);
1585 }
1586 else {
1587 if (mbox->m_out.cmd == MEGA_MBOXCMD_EXTPTHRU) {
1588
1589 memcpy(cmd->sense_buffer,
1590 epthru->reqsensearea, 14);
1591
1592 cmd->result = (DRIVER_SENSE << 24) |
1593 (DID_OK << 16) |
1594 (CHECK_CONDITION << 1);
1595 } else {
1596 cmd->sense_buffer[0] = 0x70;
1597 cmd->sense_buffer[2] = ABORTED_COMMAND;
1598 cmd->result |= (CHECK_CONDITION << 1);
1599 }
1600 }
1601 break;
1602
1603 case 0x08: /* ERR_DEST_DRIVE_FAILED, i.e.
1604 SCSI_STATUS_BUSY */
1605 cmd->result |= (DID_BUS_BUSY << 16) | status;
1606 break;
1607
1608 default:
1609 #if MEGA_HAVE_CLUSTERING
1610 /*
1611 * If TEST_UNIT_READY fails, we know
1612 * MEGA_RESERVATION_STATUS failed
1613 */
1614 if( cmd->cmnd[0] == TEST_UNIT_READY ) {
1615 cmd->result |= (DID_ERROR << 16) |
1616 (RESERVATION_CONFLICT << 1);
1617 }
1618 else
1619 /*
1620 * Error code returned is 1 if Reserve or Release
1621 * failed or the input parameter is invalid
1622 */
1623 if( status == 1 &&
1624 (cmd->cmnd[0] == RESERVE ||
1625 cmd->cmnd[0] == RELEASE) ) {
1626
1627 cmd->result |= (DID_ERROR << 16) |
1628 (RESERVATION_CONFLICT << 1);
1629 }
1630 else
1631 #endif
1632 cmd->result |= (DID_BAD_TARGET << 16)|status;
1633 }
1634
1635 mega_free_scb(adapter, scb);
1636
1637 /* Add Scsi_Command to end of completed queue */
1638 list_add_tail(SCSI_LIST(cmd), &adapter->completed_list);
1639 }
1640 }
1641
1642
1643 /*
1644 * mega_runpendq()
1645 *
1646 * Run through the list of completed requests and finish it
1647 */
1648 static void
mega_rundoneq(adapter_t * adapter)1649 mega_rundoneq (adapter_t *adapter)
1650 {
1651 struct scsi_cmnd *cmd;
1652 struct list_head *pos;
1653
1654 list_for_each(pos, &adapter->completed_list) {
1655
1656 struct scsi_pointer* spos = (struct scsi_pointer *)pos;
1657
1658 cmd = list_entry(spos, struct scsi_cmnd, SCp);
1659 cmd->scsi_done(cmd);
1660 }
1661
1662 INIT_LIST_HEAD(&adapter->completed_list);
1663 }
1664
1665
1666 /*
1667 * Free a SCB structure
1668 * Note: We assume the scsi commands associated with this scb is not free yet.
1669 */
1670 static void
mega_free_scb(adapter_t * adapter,scb_t * scb)1671 mega_free_scb(adapter_t *adapter, scb_t *scb)
1672 {
1673 switch( scb->dma_type ) {
1674
1675 case MEGA_DMA_TYPE_NONE:
1676 break;
1677
1678 case MEGA_SGLIST:
1679 scsi_dma_unmap(scb->cmd);
1680 break;
1681 default:
1682 break;
1683 }
1684
1685 /*
1686 * Remove from the pending list
1687 */
1688 list_del_init(&scb->list);
1689
1690 /* Link the scb back into free list */
1691 scb->state = SCB_FREE;
1692 scb->cmd = NULL;
1693
1694 list_add(&scb->list, &adapter->free_list);
1695 }
1696
1697
1698 static int
__mega_busywait_mbox(adapter_t * adapter)1699 __mega_busywait_mbox (adapter_t *adapter)
1700 {
1701 volatile mbox_t *mbox = adapter->mbox;
1702 long counter;
1703
1704 for (counter = 0; counter < 10000; counter++) {
1705 if (!mbox->m_in.busy)
1706 return 0;
1707 udelay(100);
1708 cond_resched();
1709 }
1710 return -1; /* give up after 1 second */
1711 }
1712
1713 /*
1714 * Copies data to SGLIST
1715 * Note: For 64 bit cards, we need a minimum of one SG element for read/write
1716 */
1717 static int
mega_build_sglist(adapter_t * adapter,scb_t * scb,u32 * buf,u32 * len)1718 mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len)
1719 {
1720 struct scatterlist *sg;
1721 struct scsi_cmnd *cmd;
1722 int sgcnt;
1723 int idx;
1724
1725 cmd = scb->cmd;
1726
1727 /*
1728 * Copy Scatter-Gather list info into controller structure.
1729 *
1730 * The number of sg elements returned must not exceed our limit
1731 */
1732 sgcnt = scsi_dma_map(cmd);
1733
1734 scb->dma_type = MEGA_SGLIST;
1735
1736 BUG_ON(sgcnt > adapter->sglen || sgcnt < 0);
1737
1738 *len = 0;
1739
1740 if (scsi_sg_count(cmd) == 1 && !adapter->has_64bit_addr) {
1741 sg = scsi_sglist(cmd);
1742 scb->dma_h_bulkdata = sg_dma_address(sg);
1743 *buf = (u32)scb->dma_h_bulkdata;
1744 *len = sg_dma_len(sg);
1745 return 0;
1746 }
1747
1748 scsi_for_each_sg(cmd, sg, sgcnt, idx) {
1749 if (adapter->has_64bit_addr) {
1750 scb->sgl64[idx].address = sg_dma_address(sg);
1751 *len += scb->sgl64[idx].length = sg_dma_len(sg);
1752 } else {
1753 scb->sgl[idx].address = sg_dma_address(sg);
1754 *len += scb->sgl[idx].length = sg_dma_len(sg);
1755 }
1756 }
1757
1758 /* Reset pointer and length fields */
1759 *buf = scb->sgl_dma_addr;
1760
1761 /* Return count of SG requests */
1762 return sgcnt;
1763 }
1764
1765
1766 /*
1767 * mega_8_to_40ld()
1768 *
1769 * takes all info in AdapterInquiry structure and puts it into ProductInfo and
1770 * Enquiry3 structures for later use
1771 */
1772 static void
mega_8_to_40ld(mraid_inquiry * inquiry,mega_inquiry3 * enquiry3,mega_product_info * product_info)1773 mega_8_to_40ld(mraid_inquiry *inquiry, mega_inquiry3 *enquiry3,
1774 mega_product_info *product_info)
1775 {
1776 int i;
1777
1778 product_info->max_commands = inquiry->adapter_info.max_commands;
1779 enquiry3->rebuild_rate = inquiry->adapter_info.rebuild_rate;
1780 product_info->nchannels = inquiry->adapter_info.nchannels;
1781
1782 for (i = 0; i < 4; i++) {
1783 product_info->fw_version[i] =
1784 inquiry->adapter_info.fw_version[i];
1785
1786 product_info->bios_version[i] =
1787 inquiry->adapter_info.bios_version[i];
1788 }
1789 enquiry3->cache_flush_interval =
1790 inquiry->adapter_info.cache_flush_interval;
1791
1792 product_info->dram_size = inquiry->adapter_info.dram_size;
1793
1794 enquiry3->num_ldrv = inquiry->logdrv_info.num_ldrv;
1795
1796 for (i = 0; i < MAX_LOGICAL_DRIVES_8LD; i++) {
1797 enquiry3->ldrv_size[i] = inquiry->logdrv_info.ldrv_size[i];
1798 enquiry3->ldrv_prop[i] = inquiry->logdrv_info.ldrv_prop[i];
1799 enquiry3->ldrv_state[i] = inquiry->logdrv_info.ldrv_state[i];
1800 }
1801
1802 for (i = 0; i < (MAX_PHYSICAL_DRIVES); i++)
1803 enquiry3->pdrv_state[i] = inquiry->pdrv_info.pdrv_state[i];
1804 }
1805
1806 static inline void
mega_free_sgl(adapter_t * adapter)1807 mega_free_sgl(adapter_t *adapter)
1808 {
1809 scb_t *scb;
1810 int i;
1811
1812 for(i = 0; i < adapter->max_cmds; i++) {
1813
1814 scb = &adapter->scb_list[i];
1815
1816 if( scb->sgl64 ) {
1817 pci_free_consistent(adapter->dev,
1818 sizeof(mega_sgl64) * adapter->sglen,
1819 scb->sgl64,
1820 scb->sgl_dma_addr);
1821
1822 scb->sgl64 = NULL;
1823 }
1824
1825 if( scb->pthru ) {
1826 pci_free_consistent(adapter->dev, sizeof(mega_passthru),
1827 scb->pthru, scb->pthru_dma_addr);
1828
1829 scb->pthru = NULL;
1830 }
1831
1832 if( scb->epthru ) {
1833 pci_free_consistent(adapter->dev,
1834 sizeof(mega_ext_passthru),
1835 scb->epthru, scb->epthru_dma_addr);
1836
1837 scb->epthru = NULL;
1838 }
1839
1840 }
1841 }
1842
1843
1844 /*
1845 * Get information about the card/driver
1846 */
1847 const char *
megaraid_info(struct Scsi_Host * host)1848 megaraid_info(struct Scsi_Host *host)
1849 {
1850 static char buffer[512];
1851 adapter_t *adapter;
1852
1853 adapter = (adapter_t *)host->hostdata;
1854
1855 sprintf (buffer,
1856 "LSI Logic MegaRAID %s %d commands %d targs %d chans %d luns",
1857 adapter->fw_version, adapter->product_info.max_commands,
1858 adapter->host->max_id, adapter->host->max_channel,
1859 (u32)adapter->host->max_lun);
1860 return buffer;
1861 }
1862
1863 /*
1864 * Abort a previous SCSI request. Only commands on the pending list can be
1865 * aborted. All the commands issued to the F/W must complete.
1866 */
1867 static int
megaraid_abort(struct scsi_cmnd * cmd)1868 megaraid_abort(struct scsi_cmnd *cmd)
1869 {
1870 adapter_t *adapter;
1871 int rval;
1872
1873 adapter = (adapter_t *)cmd->device->host->hostdata;
1874
1875 rval = megaraid_abort_and_reset(adapter, cmd, SCB_ABORT);
1876
1877 /*
1878 * This is required here to complete any completed requests
1879 * to be communicated over to the mid layer.
1880 */
1881 mega_rundoneq(adapter);
1882
1883 return rval;
1884 }
1885
1886
1887 static int
megaraid_reset(struct scsi_cmnd * cmd)1888 megaraid_reset(struct scsi_cmnd *cmd)
1889 {
1890 adapter_t *adapter;
1891 megacmd_t mc;
1892 int rval;
1893
1894 adapter = (adapter_t *)cmd->device->host->hostdata;
1895
1896 #if MEGA_HAVE_CLUSTERING
1897 mc.cmd = MEGA_CLUSTER_CMD;
1898 mc.opcode = MEGA_RESET_RESERVATIONS;
1899
1900 if( mega_internal_command(adapter, &mc, NULL) != 0 ) {
1901 dev_warn(&adapter->dev->dev, "reservation reset failed\n");
1902 }
1903 else {
1904 dev_info(&adapter->dev->dev, "reservation reset\n");
1905 }
1906 #endif
1907
1908 spin_lock_irq(&adapter->lock);
1909
1910 rval = megaraid_abort_and_reset(adapter, cmd, SCB_RESET);
1911
1912 /*
1913 * This is required here to complete any completed requests
1914 * to be communicated over to the mid layer.
1915 */
1916 mega_rundoneq(adapter);
1917 spin_unlock_irq(&adapter->lock);
1918
1919 return rval;
1920 }
1921
1922 /**
1923 * megaraid_abort_and_reset()
1924 * @adapter - megaraid soft state
1925 * @cmd - scsi command to be aborted or reset
1926 * @aor - abort or reset flag
1927 *
1928 * Try to locate the scsi command in the pending queue. If found and is not
1929 * issued to the controller, abort/reset it. Otherwise return failure
1930 */
1931 static int
megaraid_abort_and_reset(adapter_t * adapter,struct scsi_cmnd * cmd,int aor)1932 megaraid_abort_and_reset(adapter_t *adapter, struct scsi_cmnd *cmd, int aor)
1933 {
1934 struct list_head *pos, *next;
1935 scb_t *scb;
1936
1937 dev_warn(&adapter->dev->dev, "%s cmd=%x <c=%d t=%d l=%d>\n",
1938 (aor == SCB_ABORT)? "ABORTING":"RESET",
1939 cmd->cmnd[0], cmd->device->channel,
1940 cmd->device->id, (u32)cmd->device->lun);
1941
1942 if(list_empty(&adapter->pending_list))
1943 return FAILED;
1944
1945 list_for_each_safe(pos, next, &adapter->pending_list) {
1946
1947 scb = list_entry(pos, scb_t, list);
1948
1949 if (scb->cmd == cmd) { /* Found command */
1950
1951 scb->state |= aor;
1952
1953 /*
1954 * Check if this command has firmware ownership. If
1955 * yes, we cannot reset this command. Whenever f/w
1956 * completes this command, we will return appropriate
1957 * status from ISR.
1958 */
1959 if( scb->state & SCB_ISSUED ) {
1960
1961 dev_warn(&adapter->dev->dev,
1962 "%s[%x], fw owner\n",
1963 (aor==SCB_ABORT) ? "ABORTING":"RESET",
1964 scb->idx);
1965
1966 return FAILED;
1967 }
1968 else {
1969
1970 /*
1971 * Not yet issued! Remove from the pending
1972 * list
1973 */
1974 dev_warn(&adapter->dev->dev,
1975 "%s-[%x], driver owner\n",
1976 (aor==SCB_ABORT) ? "ABORTING":"RESET",
1977 scb->idx);
1978
1979 mega_free_scb(adapter, scb);
1980
1981 if( aor == SCB_ABORT ) {
1982 cmd->result = (DID_ABORT << 16);
1983 }
1984 else {
1985 cmd->result = (DID_RESET << 16);
1986 }
1987
1988 list_add_tail(SCSI_LIST(cmd),
1989 &adapter->completed_list);
1990
1991 return SUCCESS;
1992 }
1993 }
1994 }
1995
1996 return FAILED;
1997 }
1998
1999 static inline int
make_local_pdev(adapter_t * adapter,struct pci_dev ** pdev)2000 make_local_pdev(adapter_t *adapter, struct pci_dev **pdev)
2001 {
2002 *pdev = pci_alloc_dev(NULL);
2003
2004 if( *pdev == NULL ) return -1;
2005
2006 memcpy(*pdev, adapter->dev, sizeof(struct pci_dev));
2007
2008 if( pci_set_dma_mask(*pdev, DMA_BIT_MASK(32)) != 0 ) {
2009 kfree(*pdev);
2010 return -1;
2011 }
2012
2013 return 0;
2014 }
2015
2016 static inline void
free_local_pdev(struct pci_dev * pdev)2017 free_local_pdev(struct pci_dev *pdev)
2018 {
2019 kfree(pdev);
2020 }
2021
2022 /**
2023 * mega_allocate_inquiry()
2024 * @dma_handle - handle returned for dma address
2025 * @pdev - handle to pci device
2026 *
2027 * allocates memory for inquiry structure
2028 */
2029 static inline void *
mega_allocate_inquiry(dma_addr_t * dma_handle,struct pci_dev * pdev)2030 mega_allocate_inquiry(dma_addr_t *dma_handle, struct pci_dev *pdev)
2031 {
2032 return pci_alloc_consistent(pdev, sizeof(mega_inquiry3), dma_handle);
2033 }
2034
2035
2036 static inline void
mega_free_inquiry(void * inquiry,dma_addr_t dma_handle,struct pci_dev * pdev)2037 mega_free_inquiry(void *inquiry, dma_addr_t dma_handle, struct pci_dev *pdev)
2038 {
2039 pci_free_consistent(pdev, sizeof(mega_inquiry3), inquiry, dma_handle);
2040 }
2041
2042
2043 #ifdef CONFIG_PROC_FS
2044 /* Following code handles /proc fs */
2045
2046 /**
2047 * proc_show_config()
2048 * @m - Synthetic file construction data
2049 * @v - File iterator
2050 *
2051 * Display configuration information about the controller.
2052 */
2053 static int
proc_show_config(struct seq_file * m,void * v)2054 proc_show_config(struct seq_file *m, void *v)
2055 {
2056
2057 adapter_t *adapter = m->private;
2058
2059 seq_puts(m, MEGARAID_VERSION);
2060 if(adapter->product_info.product_name[0])
2061 seq_printf(m, "%s\n", adapter->product_info.product_name);
2062
2063 seq_puts(m, "Controller Type: ");
2064
2065 if( adapter->flag & BOARD_MEMMAP )
2066 seq_puts(m, "438/466/467/471/493/518/520/531/532\n");
2067 else
2068 seq_puts(m, "418/428/434\n");
2069
2070 if(adapter->flag & BOARD_40LD)
2071 seq_puts(m, "Controller Supports 40 Logical Drives\n");
2072
2073 if(adapter->flag & BOARD_64BIT)
2074 seq_puts(m, "Controller capable of 64-bit memory addressing\n");
2075 if( adapter->has_64bit_addr )
2076 seq_puts(m, "Controller using 64-bit memory addressing\n");
2077 else
2078 seq_puts(m, "Controller is not using 64-bit memory addressing\n");
2079
2080 seq_printf(m, "Base = %08lx, Irq = %d, ",
2081 adapter->base, adapter->host->irq);
2082
2083 seq_printf(m, "Logical Drives = %d, Channels = %d\n",
2084 adapter->numldrv, adapter->product_info.nchannels);
2085
2086 seq_printf(m, "Version =%s:%s, DRAM = %dMb\n",
2087 adapter->fw_version, adapter->bios_version,
2088 adapter->product_info.dram_size);
2089
2090 seq_printf(m, "Controller Queue Depth = %d, Driver Queue Depth = %d\n",
2091 adapter->product_info.max_commands, adapter->max_cmds);
2092
2093 seq_printf(m, "support_ext_cdb = %d\n", adapter->support_ext_cdb);
2094 seq_printf(m, "support_random_del = %d\n", adapter->support_random_del);
2095 seq_printf(m, "boot_ldrv_enabled = %d\n", adapter->boot_ldrv_enabled);
2096 seq_printf(m, "boot_ldrv = %d\n", adapter->boot_ldrv);
2097 seq_printf(m, "boot_pdrv_enabled = %d\n", adapter->boot_pdrv_enabled);
2098 seq_printf(m, "boot_pdrv_ch = %d\n", adapter->boot_pdrv_ch);
2099 seq_printf(m, "boot_pdrv_tgt = %d\n", adapter->boot_pdrv_tgt);
2100 seq_printf(m, "quiescent = %d\n",
2101 atomic_read(&adapter->quiescent));
2102 seq_printf(m, "has_cluster = %d\n", adapter->has_cluster);
2103
2104 seq_puts(m, "\nModule Parameters:\n");
2105 seq_printf(m, "max_cmd_per_lun = %d\n", max_cmd_per_lun);
2106 seq_printf(m, "max_sectors_per_io = %d\n", max_sectors_per_io);
2107 return 0;
2108 }
2109
2110 /**
2111 * proc_show_stat()
2112 * @m - Synthetic file construction data
2113 * @v - File iterator
2114 *
2115 * Display statistical information about the I/O activity.
2116 */
2117 static int
proc_show_stat(struct seq_file * m,void * v)2118 proc_show_stat(struct seq_file *m, void *v)
2119 {
2120 adapter_t *adapter = m->private;
2121 #if MEGA_HAVE_STATS
2122 int i;
2123 #endif
2124
2125 seq_puts(m, "Statistical Information for this controller\n");
2126 seq_printf(m, "pend_cmds = %d\n", atomic_read(&adapter->pend_cmds));
2127 #if MEGA_HAVE_STATS
2128 for(i = 0; i < adapter->numldrv; i++) {
2129 seq_printf(m, "Logical Drive %d:\n", i);
2130 seq_printf(m, "\tReads Issued = %lu, Writes Issued = %lu\n",
2131 adapter->nreads[i], adapter->nwrites[i]);
2132 seq_printf(m, "\tSectors Read = %lu, Sectors Written = %lu\n",
2133 adapter->nreadblocks[i], adapter->nwriteblocks[i]);
2134 seq_printf(m, "\tRead errors = %lu, Write errors = %lu\n\n",
2135 adapter->rd_errors[i], adapter->wr_errors[i]);
2136 }
2137 #else
2138 seq_puts(m, "IO and error counters not compiled in driver.\n");
2139 #endif
2140 return 0;
2141 }
2142
2143
2144 /**
2145 * proc_show_mbox()
2146 * @m - Synthetic file construction data
2147 * @v - File iterator
2148 *
2149 * Display mailbox information for the last command issued. This information
2150 * is good for debugging.
2151 */
2152 static int
proc_show_mbox(struct seq_file * m,void * v)2153 proc_show_mbox(struct seq_file *m, void *v)
2154 {
2155 adapter_t *adapter = m->private;
2156 volatile mbox_t *mbox = adapter->mbox;
2157
2158 seq_puts(m, "Contents of Mail Box Structure\n");
2159 seq_printf(m, " Fw Command = 0x%02x\n", mbox->m_out.cmd);
2160 seq_printf(m, " Cmd Sequence = 0x%02x\n", mbox->m_out.cmdid);
2161 seq_printf(m, " No of Sectors= %04d\n", mbox->m_out.numsectors);
2162 seq_printf(m, " LBA = 0x%02x\n", mbox->m_out.lba);
2163 seq_printf(m, " DTA = 0x%08x\n", mbox->m_out.xferaddr);
2164 seq_printf(m, " Logical Drive= 0x%02x\n", mbox->m_out.logdrv);
2165 seq_printf(m, " No of SG Elmt= 0x%02x\n", mbox->m_out.numsgelements);
2166 seq_printf(m, " Busy = %01x\n", mbox->m_in.busy);
2167 seq_printf(m, " Status = 0x%02x\n", mbox->m_in.status);
2168 return 0;
2169 }
2170
2171
2172 /**
2173 * proc_show_rebuild_rate()
2174 * @m - Synthetic file construction data
2175 * @v - File iterator
2176 *
2177 * Display current rebuild rate
2178 */
2179 static int
proc_show_rebuild_rate(struct seq_file * m,void * v)2180 proc_show_rebuild_rate(struct seq_file *m, void *v)
2181 {
2182 adapter_t *adapter = m->private;
2183 dma_addr_t dma_handle;
2184 caddr_t inquiry;
2185 struct pci_dev *pdev;
2186
2187 if( make_local_pdev(adapter, &pdev) != 0 )
2188 return 0;
2189
2190 if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL )
2191 goto free_pdev;
2192
2193 if( mega_adapinq(adapter, dma_handle) != 0 ) {
2194 seq_puts(m, "Adapter inquiry failed.\n");
2195 dev_warn(&adapter->dev->dev, "inquiry failed\n");
2196 goto free_inquiry;
2197 }
2198
2199 if( adapter->flag & BOARD_40LD )
2200 seq_printf(m, "Rebuild Rate: [%d%%]\n",
2201 ((mega_inquiry3 *)inquiry)->rebuild_rate);
2202 else
2203 seq_printf(m, "Rebuild Rate: [%d%%]\n",
2204 ((mraid_ext_inquiry *)
2205 inquiry)->raid_inq.adapter_info.rebuild_rate);
2206
2207 free_inquiry:
2208 mega_free_inquiry(inquiry, dma_handle, pdev);
2209 free_pdev:
2210 free_local_pdev(pdev);
2211 return 0;
2212 }
2213
2214
2215 /**
2216 * proc_show_battery()
2217 * @m - Synthetic file construction data
2218 * @v - File iterator
2219 *
2220 * Display information about the battery module on the controller.
2221 */
2222 static int
proc_show_battery(struct seq_file * m,void * v)2223 proc_show_battery(struct seq_file *m, void *v)
2224 {
2225 adapter_t *adapter = m->private;
2226 dma_addr_t dma_handle;
2227 caddr_t inquiry;
2228 struct pci_dev *pdev;
2229 u8 battery_status;
2230
2231 if( make_local_pdev(adapter, &pdev) != 0 )
2232 return 0;
2233
2234 if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL )
2235 goto free_pdev;
2236
2237 if( mega_adapinq(adapter, dma_handle) != 0 ) {
2238 seq_puts(m, "Adapter inquiry failed.\n");
2239 dev_warn(&adapter->dev->dev, "inquiry failed\n");
2240 goto free_inquiry;
2241 }
2242
2243 if( adapter->flag & BOARD_40LD ) {
2244 battery_status = ((mega_inquiry3 *)inquiry)->battery_status;
2245 }
2246 else {
2247 battery_status = ((mraid_ext_inquiry *)inquiry)->
2248 raid_inq.adapter_info.battery_status;
2249 }
2250
2251 /*
2252 * Decode the battery status
2253 */
2254 seq_printf(m, "Battery Status:[%d]", battery_status);
2255
2256 if(battery_status == MEGA_BATT_CHARGE_DONE)
2257 seq_puts(m, " Charge Done");
2258
2259 if(battery_status & MEGA_BATT_MODULE_MISSING)
2260 seq_puts(m, " Module Missing");
2261
2262 if(battery_status & MEGA_BATT_LOW_VOLTAGE)
2263 seq_puts(m, " Low Voltage");
2264
2265 if(battery_status & MEGA_BATT_TEMP_HIGH)
2266 seq_puts(m, " Temperature High");
2267
2268 if(battery_status & MEGA_BATT_PACK_MISSING)
2269 seq_puts(m, " Pack Missing");
2270
2271 if(battery_status & MEGA_BATT_CHARGE_INPROG)
2272 seq_puts(m, " Charge In-progress");
2273
2274 if(battery_status & MEGA_BATT_CHARGE_FAIL)
2275 seq_puts(m, " Charge Fail");
2276
2277 if(battery_status & MEGA_BATT_CYCLES_EXCEEDED)
2278 seq_puts(m, " Cycles Exceeded");
2279
2280 seq_putc(m, '\n');
2281
2282 free_inquiry:
2283 mega_free_inquiry(inquiry, dma_handle, pdev);
2284 free_pdev:
2285 free_local_pdev(pdev);
2286 return 0;
2287 }
2288
2289
2290 /*
2291 * Display scsi inquiry
2292 */
2293 static void
mega_print_inquiry(struct seq_file * m,char * scsi_inq)2294 mega_print_inquiry(struct seq_file *m, char *scsi_inq)
2295 {
2296 int i;
2297
2298 seq_puts(m, " Vendor: ");
2299 seq_write(m, scsi_inq + 8, 8);
2300 seq_puts(m, " Model: ");
2301 seq_write(m, scsi_inq + 16, 16);
2302 seq_puts(m, " Rev: ");
2303 seq_write(m, scsi_inq + 32, 4);
2304 seq_putc(m, '\n');
2305
2306 i = scsi_inq[0] & 0x1f;
2307 seq_printf(m, " Type: %s ", scsi_device_type(i));
2308
2309 seq_printf(m, " ANSI SCSI revision: %02x",
2310 scsi_inq[2] & 0x07);
2311
2312 if( (scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1 )
2313 seq_puts(m, " CCS\n");
2314 else
2315 seq_putc(m, '\n');
2316 }
2317
2318 /**
2319 * proc_show_pdrv()
2320 * @m - Synthetic file construction data
2321 * @page - buffer to write the data in
2322 * @adapter - pointer to our soft state
2323 *
2324 * Display information about the physical drives.
2325 */
2326 static int
proc_show_pdrv(struct seq_file * m,adapter_t * adapter,int channel)2327 proc_show_pdrv(struct seq_file *m, adapter_t *adapter, int channel)
2328 {
2329 dma_addr_t dma_handle;
2330 char *scsi_inq;
2331 dma_addr_t scsi_inq_dma_handle;
2332 caddr_t inquiry;
2333 struct pci_dev *pdev;
2334 u8 *pdrv_state;
2335 u8 state;
2336 int tgt;
2337 int max_channels;
2338 int i;
2339
2340 if( make_local_pdev(adapter, &pdev) != 0 )
2341 return 0;
2342
2343 if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL )
2344 goto free_pdev;
2345
2346 if( mega_adapinq(adapter, dma_handle) != 0 ) {
2347 seq_puts(m, "Adapter inquiry failed.\n");
2348 dev_warn(&adapter->dev->dev, "inquiry failed\n");
2349 goto free_inquiry;
2350 }
2351
2352
2353 scsi_inq = pci_alloc_consistent(pdev, 256, &scsi_inq_dma_handle);
2354 if( scsi_inq == NULL ) {
2355 seq_puts(m, "memory not available for scsi inq.\n");
2356 goto free_inquiry;
2357 }
2358
2359 if( adapter->flag & BOARD_40LD ) {
2360 pdrv_state = ((mega_inquiry3 *)inquiry)->pdrv_state;
2361 }
2362 else {
2363 pdrv_state = ((mraid_ext_inquiry *)inquiry)->
2364 raid_inq.pdrv_info.pdrv_state;
2365 }
2366
2367 max_channels = adapter->product_info.nchannels;
2368
2369 if( channel >= max_channels ) {
2370 goto free_pci;
2371 }
2372
2373 for( tgt = 0; tgt <= MAX_TARGET; tgt++ ) {
2374
2375 i = channel*16 + tgt;
2376
2377 state = *(pdrv_state + i);
2378 switch( state & 0x0F ) {
2379 case PDRV_ONLINE:
2380 seq_printf(m, "Channel:%2d Id:%2d State: Online",
2381 channel, tgt);
2382 break;
2383
2384 case PDRV_FAILED:
2385 seq_printf(m, "Channel:%2d Id:%2d State: Failed",
2386 channel, tgt);
2387 break;
2388
2389 case PDRV_RBLD:
2390 seq_printf(m, "Channel:%2d Id:%2d State: Rebuild",
2391 channel, tgt);
2392 break;
2393
2394 case PDRV_HOTSPARE:
2395 seq_printf(m, "Channel:%2d Id:%2d State: Hot spare",
2396 channel, tgt);
2397 break;
2398
2399 default:
2400 seq_printf(m, "Channel:%2d Id:%2d State: Un-configured",
2401 channel, tgt);
2402 break;
2403 }
2404
2405 /*
2406 * This interface displays inquiries for disk drives
2407 * only. Inquries for logical drives and non-disk
2408 * devices are available through /proc/scsi/scsi
2409 */
2410 memset(scsi_inq, 0, 256);
2411 if( mega_internal_dev_inquiry(adapter, channel, tgt,
2412 scsi_inq_dma_handle) ||
2413 (scsi_inq[0] & 0x1F) != TYPE_DISK ) {
2414 continue;
2415 }
2416
2417 /*
2418 * Check for overflow. We print less than 240
2419 * characters for inquiry
2420 */
2421 seq_puts(m, ".\n");
2422 mega_print_inquiry(m, scsi_inq);
2423 }
2424
2425 free_pci:
2426 pci_free_consistent(pdev, 256, scsi_inq, scsi_inq_dma_handle);
2427 free_inquiry:
2428 mega_free_inquiry(inquiry, dma_handle, pdev);
2429 free_pdev:
2430 free_local_pdev(pdev);
2431 return 0;
2432 }
2433
2434 /**
2435 * proc_show_pdrv_ch0()
2436 * @m - Synthetic file construction data
2437 * @v - File iterator
2438 *
2439 * Display information about the physical drives on physical channel 0.
2440 */
2441 static int
proc_show_pdrv_ch0(struct seq_file * m,void * v)2442 proc_show_pdrv_ch0(struct seq_file *m, void *v)
2443 {
2444 return proc_show_pdrv(m, m->private, 0);
2445 }
2446
2447
2448 /**
2449 * proc_show_pdrv_ch1()
2450 * @m - Synthetic file construction data
2451 * @v - File iterator
2452 *
2453 * Display information about the physical drives on physical channel 1.
2454 */
2455 static int
proc_show_pdrv_ch1(struct seq_file * m,void * v)2456 proc_show_pdrv_ch1(struct seq_file *m, void *v)
2457 {
2458 return proc_show_pdrv(m, m->private, 1);
2459 }
2460
2461
2462 /**
2463 * proc_show_pdrv_ch2()
2464 * @m - Synthetic file construction data
2465 * @v - File iterator
2466 *
2467 * Display information about the physical drives on physical channel 2.
2468 */
2469 static int
proc_show_pdrv_ch2(struct seq_file * m,void * v)2470 proc_show_pdrv_ch2(struct seq_file *m, void *v)
2471 {
2472 return proc_show_pdrv(m, m->private, 2);
2473 }
2474
2475
2476 /**
2477 * proc_show_pdrv_ch3()
2478 * @m - Synthetic file construction data
2479 * @v - File iterator
2480 *
2481 * Display information about the physical drives on physical channel 3.
2482 */
2483 static int
proc_show_pdrv_ch3(struct seq_file * m,void * v)2484 proc_show_pdrv_ch3(struct seq_file *m, void *v)
2485 {
2486 return proc_show_pdrv(m, m->private, 3);
2487 }
2488
2489
2490 /**
2491 * proc_show_rdrv()
2492 * @m - Synthetic file construction data
2493 * @adapter - pointer to our soft state
2494 * @start - starting logical drive to display
2495 * @end - ending logical drive to display
2496 *
2497 * We do not print the inquiry information since its already available through
2498 * /proc/scsi/scsi interface
2499 */
2500 static int
proc_show_rdrv(struct seq_file * m,adapter_t * adapter,int start,int end)2501 proc_show_rdrv(struct seq_file *m, adapter_t *adapter, int start, int end )
2502 {
2503 dma_addr_t dma_handle;
2504 logdrv_param *lparam;
2505 megacmd_t mc;
2506 char *disk_array;
2507 dma_addr_t disk_array_dma_handle;
2508 caddr_t inquiry;
2509 struct pci_dev *pdev;
2510 u8 *rdrv_state;
2511 int num_ldrv;
2512 u32 array_sz;
2513 int i;
2514
2515 if( make_local_pdev(adapter, &pdev) != 0 )
2516 return 0;
2517
2518 if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL )
2519 goto free_pdev;
2520
2521 if( mega_adapinq(adapter, dma_handle) != 0 ) {
2522 seq_puts(m, "Adapter inquiry failed.\n");
2523 dev_warn(&adapter->dev->dev, "inquiry failed\n");
2524 goto free_inquiry;
2525 }
2526
2527 memset(&mc, 0, sizeof(megacmd_t));
2528
2529 if( adapter->flag & BOARD_40LD ) {
2530 array_sz = sizeof(disk_array_40ld);
2531
2532 rdrv_state = ((mega_inquiry3 *)inquiry)->ldrv_state;
2533
2534 num_ldrv = ((mega_inquiry3 *)inquiry)->num_ldrv;
2535 }
2536 else {
2537 array_sz = sizeof(disk_array_8ld);
2538
2539 rdrv_state = ((mraid_ext_inquiry *)inquiry)->
2540 raid_inq.logdrv_info.ldrv_state;
2541
2542 num_ldrv = ((mraid_ext_inquiry *)inquiry)->
2543 raid_inq.logdrv_info.num_ldrv;
2544 }
2545
2546 disk_array = pci_alloc_consistent(pdev, array_sz,
2547 &disk_array_dma_handle);
2548
2549 if( disk_array == NULL ) {
2550 seq_puts(m, "memory not available.\n");
2551 goto free_inquiry;
2552 }
2553
2554 mc.xferaddr = (u32)disk_array_dma_handle;
2555
2556 if( adapter->flag & BOARD_40LD ) {
2557 mc.cmd = FC_NEW_CONFIG;
2558 mc.opcode = OP_DCMD_READ_CONFIG;
2559
2560 if( mega_internal_command(adapter, &mc, NULL) ) {
2561 seq_puts(m, "40LD read config failed.\n");
2562 goto free_pci;
2563 }
2564
2565 }
2566 else {
2567 mc.cmd = NEW_READ_CONFIG_8LD;
2568
2569 if( mega_internal_command(adapter, &mc, NULL) ) {
2570 mc.cmd = READ_CONFIG_8LD;
2571 if( mega_internal_command(adapter, &mc, NULL) ) {
2572 seq_puts(m, "8LD read config failed.\n");
2573 goto free_pci;
2574 }
2575 }
2576 }
2577
2578 for( i = start; i < ( (end+1 < num_ldrv) ? end+1 : num_ldrv ); i++ ) {
2579
2580 if( adapter->flag & BOARD_40LD ) {
2581 lparam =
2582 &((disk_array_40ld *)disk_array)->ldrv[i].lparam;
2583 }
2584 else {
2585 lparam =
2586 &((disk_array_8ld *)disk_array)->ldrv[i].lparam;
2587 }
2588
2589 /*
2590 * Check for overflow. We print less than 240 characters for
2591 * information about each logical drive.
2592 */
2593 seq_printf(m, "Logical drive:%2d:, ", i);
2594
2595 switch( rdrv_state[i] & 0x0F ) {
2596 case RDRV_OFFLINE:
2597 seq_puts(m, "state: offline");
2598 break;
2599 case RDRV_DEGRADED:
2600 seq_puts(m, "state: degraded");
2601 break;
2602 case RDRV_OPTIMAL:
2603 seq_puts(m, "state: optimal");
2604 break;
2605 case RDRV_DELETED:
2606 seq_puts(m, "state: deleted");
2607 break;
2608 default:
2609 seq_puts(m, "state: unknown");
2610 break;
2611 }
2612
2613 /*
2614 * Check if check consistency or initialization is going on
2615 * for this logical drive.
2616 */
2617 if( (rdrv_state[i] & 0xF0) == 0x20 )
2618 seq_puts(m, ", check-consistency in progress");
2619 else if( (rdrv_state[i] & 0xF0) == 0x10 )
2620 seq_puts(m, ", initialization in progress");
2621
2622 seq_putc(m, '\n');
2623
2624 seq_printf(m, "Span depth:%3d, ", lparam->span_depth);
2625 seq_printf(m, "RAID level:%3d, ", lparam->level);
2626 seq_printf(m, "Stripe size:%3d, ",
2627 lparam->stripe_sz ? lparam->stripe_sz/2: 128);
2628 seq_printf(m, "Row size:%3d\n", lparam->row_size);
2629
2630 seq_puts(m, "Read Policy: ");
2631 switch(lparam->read_ahead) {
2632 case NO_READ_AHEAD:
2633 seq_puts(m, "No read ahead, ");
2634 break;
2635 case READ_AHEAD:
2636 seq_puts(m, "Read ahead, ");
2637 break;
2638 case ADAP_READ_AHEAD:
2639 seq_puts(m, "Adaptive, ");
2640 break;
2641
2642 }
2643
2644 seq_puts(m, "Write Policy: ");
2645 switch(lparam->write_mode) {
2646 case WRMODE_WRITE_THRU:
2647 seq_puts(m, "Write thru, ");
2648 break;
2649 case WRMODE_WRITE_BACK:
2650 seq_puts(m, "Write back, ");
2651 break;
2652 }
2653
2654 seq_puts(m, "Cache Policy: ");
2655 switch(lparam->direct_io) {
2656 case CACHED_IO:
2657 seq_puts(m, "Cached IO\n\n");
2658 break;
2659 case DIRECT_IO:
2660 seq_puts(m, "Direct IO\n\n");
2661 break;
2662 }
2663 }
2664
2665 free_pci:
2666 pci_free_consistent(pdev, array_sz, disk_array,
2667 disk_array_dma_handle);
2668 free_inquiry:
2669 mega_free_inquiry(inquiry, dma_handle, pdev);
2670 free_pdev:
2671 free_local_pdev(pdev);
2672 return 0;
2673 }
2674
2675 /**
2676 * proc_show_rdrv_10()
2677 * @m - Synthetic file construction data
2678 * @v - File iterator
2679 *
2680 * Display real time information about the logical drives 0 through 9.
2681 */
2682 static int
proc_show_rdrv_10(struct seq_file * m,void * v)2683 proc_show_rdrv_10(struct seq_file *m, void *v)
2684 {
2685 return proc_show_rdrv(m, m->private, 0, 9);
2686 }
2687
2688
2689 /**
2690 * proc_show_rdrv_20()
2691 * @m - Synthetic file construction data
2692 * @v - File iterator
2693 *
2694 * Display real time information about the logical drives 0 through 9.
2695 */
2696 static int
proc_show_rdrv_20(struct seq_file * m,void * v)2697 proc_show_rdrv_20(struct seq_file *m, void *v)
2698 {
2699 return proc_show_rdrv(m, m->private, 10, 19);
2700 }
2701
2702
2703 /**
2704 * proc_show_rdrv_30()
2705 * @m - Synthetic file construction data
2706 * @v - File iterator
2707 *
2708 * Display real time information about the logical drives 0 through 9.
2709 */
2710 static int
proc_show_rdrv_30(struct seq_file * m,void * v)2711 proc_show_rdrv_30(struct seq_file *m, void *v)
2712 {
2713 return proc_show_rdrv(m, m->private, 20, 29);
2714 }
2715
2716
2717 /**
2718 * proc_show_rdrv_40()
2719 * @m - Synthetic file construction data
2720 * @v - File iterator
2721 *
2722 * Display real time information about the logical drives 0 through 9.
2723 */
2724 static int
proc_show_rdrv_40(struct seq_file * m,void * v)2725 proc_show_rdrv_40(struct seq_file *m, void *v)
2726 {
2727 return proc_show_rdrv(m, m->private, 30, 39);
2728 }
2729
2730 /**
2731 * mega_create_proc_entry()
2732 * @index - index in soft state array
2733 * @parent - parent node for this /proc entry
2734 *
2735 * Creates /proc entries for our controllers.
2736 */
2737 static void
mega_create_proc_entry(int index,struct proc_dir_entry * parent)2738 mega_create_proc_entry(int index, struct proc_dir_entry *parent)
2739 {
2740 adapter_t *adapter = hba_soft_state[index];
2741 struct proc_dir_entry *dir;
2742 u8 string[16];
2743
2744 sprintf(string, "hba%d", adapter->host->host_no);
2745 dir = proc_mkdir_data(string, 0, parent, adapter);
2746 if (!dir) {
2747 dev_warn(&adapter->dev->dev, "proc_mkdir failed\n");
2748 return;
2749 }
2750
2751 proc_create_single_data("config", S_IRUSR, dir,
2752 proc_show_config, adapter);
2753 proc_create_single_data("stat", S_IRUSR, dir,
2754 proc_show_stat, adapter);
2755 proc_create_single_data("mailbox", S_IRUSR, dir,
2756 proc_show_mbox, adapter);
2757 #if MEGA_HAVE_ENH_PROC
2758 proc_create_single_data("rebuild-rate", S_IRUSR, dir,
2759 proc_show_rebuild_rate, adapter);
2760 proc_create_single_data("battery-status", S_IRUSR, dir,
2761 proc_show_battery, adapter);
2762 proc_create_single_data("diskdrives-ch0", S_IRUSR, dir,
2763 proc_show_pdrv_ch0, adapter);
2764 proc_create_single_data("diskdrives-ch1", S_IRUSR, dir,
2765 proc_show_pdrv_ch1, adapter);
2766 proc_create_single_data("diskdrives-ch2", S_IRUSR, dir,
2767 proc_show_pdrv_ch2, adapter);
2768 proc_create_single_data("diskdrives-ch3", S_IRUSR, dir,
2769 proc_show_pdrv_ch3, adapter);
2770 proc_create_single_data("raiddrives-0-9", S_IRUSR, dir,
2771 proc_show_rdrv_10, adapter);
2772 proc_create_single_data("raiddrives-10-19", S_IRUSR, dir,
2773 proc_show_rdrv_20, adapter);
2774 proc_create_single_data("raiddrives-20-29", S_IRUSR, dir,
2775 proc_show_rdrv_30, adapter);
2776 proc_create_single_data("raiddrives-30-39", S_IRUSR, dir,
2777 proc_show_rdrv_40, adapter);
2778 #endif
2779 }
2780
2781 #else
mega_create_proc_entry(int index,struct proc_dir_entry * parent)2782 static inline void mega_create_proc_entry(int index, struct proc_dir_entry *parent)
2783 {
2784 }
2785 #endif
2786
2787
2788 /**
2789 * megaraid_biosparam()
2790 *
2791 * Return the disk geometry for a particular disk
2792 */
2793 static int
megaraid_biosparam(struct scsi_device * sdev,struct block_device * bdev,sector_t capacity,int geom[])2794 megaraid_biosparam(struct scsi_device *sdev, struct block_device *bdev,
2795 sector_t capacity, int geom[])
2796 {
2797 adapter_t *adapter;
2798 unsigned char *bh;
2799 int heads;
2800 int sectors;
2801 int cylinders;
2802 int rval;
2803
2804 /* Get pointer to host config structure */
2805 adapter = (adapter_t *)sdev->host->hostdata;
2806
2807 if (IS_RAID_CH(adapter, sdev->channel)) {
2808 /* Default heads (64) & sectors (32) */
2809 heads = 64;
2810 sectors = 32;
2811 cylinders = (ulong)capacity / (heads * sectors);
2812
2813 /*
2814 * Handle extended translation size for logical drives
2815 * > 1Gb
2816 */
2817 if ((ulong)capacity >= 0x200000) {
2818 heads = 255;
2819 sectors = 63;
2820 cylinders = (ulong)capacity / (heads * sectors);
2821 }
2822
2823 /* return result */
2824 geom[0] = heads;
2825 geom[1] = sectors;
2826 geom[2] = cylinders;
2827 }
2828 else {
2829 bh = scsi_bios_ptable(bdev);
2830
2831 if( bh ) {
2832 rval = scsi_partsize(bh, capacity,
2833 &geom[2], &geom[0], &geom[1]);
2834 kfree(bh);
2835 if( rval != -1 )
2836 return rval;
2837 }
2838
2839 dev_info(&adapter->dev->dev,
2840 "invalid partition on this disk on channel %d\n",
2841 sdev->channel);
2842
2843 /* Default heads (64) & sectors (32) */
2844 heads = 64;
2845 sectors = 32;
2846 cylinders = (ulong)capacity / (heads * sectors);
2847
2848 /* Handle extended translation size for logical drives > 1Gb */
2849 if ((ulong)capacity >= 0x200000) {
2850 heads = 255;
2851 sectors = 63;
2852 cylinders = (ulong)capacity / (heads * sectors);
2853 }
2854
2855 /* return result */
2856 geom[0] = heads;
2857 geom[1] = sectors;
2858 geom[2] = cylinders;
2859 }
2860
2861 return 0;
2862 }
2863
2864 /**
2865 * mega_init_scb()
2866 * @adapter - pointer to our soft state
2867 *
2868 * Allocate memory for the various pointers in the scb structures:
2869 * scatter-gather list pointer, passthru and extended passthru structure
2870 * pointers.
2871 */
2872 static int
mega_init_scb(adapter_t * adapter)2873 mega_init_scb(adapter_t *adapter)
2874 {
2875 scb_t *scb;
2876 int i;
2877
2878 for( i = 0; i < adapter->max_cmds; i++ ) {
2879
2880 scb = &adapter->scb_list[i];
2881
2882 scb->sgl64 = NULL;
2883 scb->sgl = NULL;
2884 scb->pthru = NULL;
2885 scb->epthru = NULL;
2886 }
2887
2888 for( i = 0; i < adapter->max_cmds; i++ ) {
2889
2890 scb = &adapter->scb_list[i];
2891
2892 scb->idx = i;
2893
2894 scb->sgl64 = pci_alloc_consistent(adapter->dev,
2895 sizeof(mega_sgl64) * adapter->sglen,
2896 &scb->sgl_dma_addr);
2897
2898 scb->sgl = (mega_sglist *)scb->sgl64;
2899
2900 if( !scb->sgl ) {
2901 dev_warn(&adapter->dev->dev, "RAID: Can't allocate sglist\n");
2902 mega_free_sgl(adapter);
2903 return -1;
2904 }
2905
2906 scb->pthru = pci_alloc_consistent(adapter->dev,
2907 sizeof(mega_passthru),
2908 &scb->pthru_dma_addr);
2909
2910 if( !scb->pthru ) {
2911 dev_warn(&adapter->dev->dev, "RAID: Can't allocate passthru\n");
2912 mega_free_sgl(adapter);
2913 return -1;
2914 }
2915
2916 scb->epthru = pci_alloc_consistent(adapter->dev,
2917 sizeof(mega_ext_passthru),
2918 &scb->epthru_dma_addr);
2919
2920 if( !scb->epthru ) {
2921 dev_warn(&adapter->dev->dev,
2922 "Can't allocate extended passthru\n");
2923 mega_free_sgl(adapter);
2924 return -1;
2925 }
2926
2927
2928 scb->dma_type = MEGA_DMA_TYPE_NONE;
2929
2930 /*
2931 * Link to free list
2932 * lock not required since we are loading the driver, so no
2933 * commands possible right now.
2934 */
2935 scb->state = SCB_FREE;
2936 scb->cmd = NULL;
2937 list_add(&scb->list, &adapter->free_list);
2938 }
2939
2940 return 0;
2941 }
2942
2943
2944 /**
2945 * megadev_open()
2946 * @inode - unused
2947 * @filep - unused
2948 *
2949 * Routines for the character/ioctl interface to the driver. Find out if this
2950 * is a valid open.
2951 */
2952 static int
megadev_open(struct inode * inode,struct file * filep)2953 megadev_open (struct inode *inode, struct file *filep)
2954 {
2955 /*
2956 * Only allow superuser to access private ioctl interface
2957 */
2958 if( !capable(CAP_SYS_ADMIN) ) return -EACCES;
2959
2960 return 0;
2961 }
2962
2963
2964 /**
2965 * megadev_ioctl()
2966 * @inode - Our device inode
2967 * @filep - unused
2968 * @cmd - ioctl command
2969 * @arg - user buffer
2970 *
2971 * ioctl entry point for our private ioctl interface. We move the data in from
2972 * the user space, prepare the command (if necessary, convert the old MIMD
2973 * ioctl to new ioctl command), and issue a synchronous command to the
2974 * controller.
2975 */
2976 static int
megadev_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)2977 megadev_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
2978 {
2979 adapter_t *adapter;
2980 nitioctl_t uioc;
2981 int adapno;
2982 int rval;
2983 mega_passthru __user *upthru; /* user address for passthru */
2984 mega_passthru *pthru; /* copy user passthru here */
2985 dma_addr_t pthru_dma_hndl;
2986 void *data = NULL; /* data to be transferred */
2987 dma_addr_t data_dma_hndl; /* dma handle for data xfer area */
2988 megacmd_t mc;
2989 megastat_t __user *ustats;
2990 int num_ldrv;
2991 u32 uxferaddr = 0;
2992 struct pci_dev *pdev;
2993
2994 ustats = NULL; /* avoid compilation warnings */
2995 num_ldrv = 0;
2996
2997 /*
2998 * Make sure only USCSICMD are issued through this interface.
2999 * MIMD application would still fire different command.
3000 */
3001 if( (_IOC_TYPE(cmd) != MEGAIOC_MAGIC) && (cmd != USCSICMD) ) {
3002 return -EINVAL;
3003 }
3004
3005 /*
3006 * Check and convert a possible MIMD command to NIT command.
3007 * mega_m_to_n() copies the data from the user space, so we do not
3008 * have to do it here.
3009 * NOTE: We will need some user address to copyout the data, therefore
3010 * the inteface layer will also provide us with the required user
3011 * addresses.
3012 */
3013 memset(&uioc, 0, sizeof(nitioctl_t));
3014 if( (rval = mega_m_to_n( (void __user *)arg, &uioc)) != 0 )
3015 return rval;
3016
3017
3018 switch( uioc.opcode ) {
3019
3020 case GET_DRIVER_VER:
3021 if( put_user(driver_ver, (u32 __user *)uioc.uioc_uaddr) )
3022 return (-EFAULT);
3023
3024 break;
3025
3026 case GET_N_ADAP:
3027 if( put_user(hba_count, (u32 __user *)uioc.uioc_uaddr) )
3028 return (-EFAULT);
3029
3030 /*
3031 * Shucks. MIMD interface returns a positive value for number
3032 * of adapters. TODO: Change it to return 0 when there is no
3033 * applicatio using mimd interface.
3034 */
3035 return hba_count;
3036
3037 case GET_ADAP_INFO:
3038
3039 /*
3040 * Which adapter
3041 */
3042 if( (adapno = GETADAP(uioc.adapno)) >= hba_count )
3043 return (-ENODEV);
3044
3045 if( copy_to_user(uioc.uioc_uaddr, mcontroller+adapno,
3046 sizeof(struct mcontroller)) )
3047 return (-EFAULT);
3048 break;
3049
3050 #if MEGA_HAVE_STATS
3051
3052 case GET_STATS:
3053 /*
3054 * Which adapter
3055 */
3056 if( (adapno = GETADAP(uioc.adapno)) >= hba_count )
3057 return (-ENODEV);
3058
3059 adapter = hba_soft_state[adapno];
3060
3061 ustats = uioc.uioc_uaddr;
3062
3063 if( copy_from_user(&num_ldrv, &ustats->num_ldrv, sizeof(int)) )
3064 return (-EFAULT);
3065
3066 /*
3067 * Check for the validity of the logical drive number
3068 */
3069 if( num_ldrv >= MAX_LOGICAL_DRIVES_40LD ) return -EINVAL;
3070
3071 if( copy_to_user(ustats->nreads, adapter->nreads,
3072 num_ldrv*sizeof(u32)) )
3073 return -EFAULT;
3074
3075 if( copy_to_user(ustats->nreadblocks, adapter->nreadblocks,
3076 num_ldrv*sizeof(u32)) )
3077 return -EFAULT;
3078
3079 if( copy_to_user(ustats->nwrites, adapter->nwrites,
3080 num_ldrv*sizeof(u32)) )
3081 return -EFAULT;
3082
3083 if( copy_to_user(ustats->nwriteblocks, adapter->nwriteblocks,
3084 num_ldrv*sizeof(u32)) )
3085 return -EFAULT;
3086
3087 if( copy_to_user(ustats->rd_errors, adapter->rd_errors,
3088 num_ldrv*sizeof(u32)) )
3089 return -EFAULT;
3090
3091 if( copy_to_user(ustats->wr_errors, adapter->wr_errors,
3092 num_ldrv*sizeof(u32)) )
3093 return -EFAULT;
3094
3095 return 0;
3096
3097 #endif
3098 case MBOX_CMD:
3099
3100 /*
3101 * Which adapter
3102 */
3103 if( (adapno = GETADAP(uioc.adapno)) >= hba_count )
3104 return (-ENODEV);
3105
3106 adapter = hba_soft_state[adapno];
3107
3108 /*
3109 * Deletion of logical drive is a special case. The adapter
3110 * should be quiescent before this command is issued.
3111 */
3112 if( uioc.uioc_rmbox[0] == FC_DEL_LOGDRV &&
3113 uioc.uioc_rmbox[2] == OP_DEL_LOGDRV ) {
3114
3115 /*
3116 * Do we support this feature
3117 */
3118 if( !adapter->support_random_del ) {
3119 dev_warn(&adapter->dev->dev, "logdrv "
3120 "delete on non-supporting F/W\n");
3121
3122 return (-EINVAL);
3123 }
3124
3125 rval = mega_del_logdrv( adapter, uioc.uioc_rmbox[3] );
3126
3127 if( rval == 0 ) {
3128 memset(&mc, 0, sizeof(megacmd_t));
3129
3130 mc.status = rval;
3131
3132 rval = mega_n_to_m((void __user *)arg, &mc);
3133 }
3134
3135 return rval;
3136 }
3137 /*
3138 * This interface only support the regular passthru commands.
3139 * Reject extended passthru and 64-bit passthru
3140 */
3141 if( uioc.uioc_rmbox[0] == MEGA_MBOXCMD_PASSTHRU64 ||
3142 uioc.uioc_rmbox[0] == MEGA_MBOXCMD_EXTPTHRU ) {
3143
3144 dev_warn(&adapter->dev->dev, "rejected passthru\n");
3145
3146 return (-EINVAL);
3147 }
3148
3149 /*
3150 * For all internal commands, the buffer must be allocated in
3151 * <4GB address range
3152 */
3153 if( make_local_pdev(adapter, &pdev) != 0 )
3154 return -EIO;
3155
3156 /* Is it a passthru command or a DCMD */
3157 if( uioc.uioc_rmbox[0] == MEGA_MBOXCMD_PASSTHRU ) {
3158 /* Passthru commands */
3159
3160 pthru = pci_alloc_consistent(pdev,
3161 sizeof(mega_passthru),
3162 &pthru_dma_hndl);
3163
3164 if( pthru == NULL ) {
3165 free_local_pdev(pdev);
3166 return (-ENOMEM);
3167 }
3168
3169 /*
3170 * The user passthru structure
3171 */
3172 upthru = (mega_passthru __user *)(unsigned long)MBOX(uioc)->xferaddr;
3173
3174 /*
3175 * Copy in the user passthru here.
3176 */
3177 if( copy_from_user(pthru, upthru,
3178 sizeof(mega_passthru)) ) {
3179
3180 pci_free_consistent(pdev,
3181 sizeof(mega_passthru), pthru,
3182 pthru_dma_hndl);
3183
3184 free_local_pdev(pdev);
3185
3186 return (-EFAULT);
3187 }
3188
3189 /*
3190 * Is there a data transfer
3191 */
3192 if( pthru->dataxferlen ) {
3193 data = pci_alloc_consistent(pdev,
3194 pthru->dataxferlen,
3195 &data_dma_hndl);
3196
3197 if( data == NULL ) {
3198 pci_free_consistent(pdev,
3199 sizeof(mega_passthru),
3200 pthru,
3201 pthru_dma_hndl);
3202
3203 free_local_pdev(pdev);
3204
3205 return (-ENOMEM);
3206 }
3207
3208 /*
3209 * Save the user address and point the kernel
3210 * address at just allocated memory
3211 */
3212 uxferaddr = pthru->dataxferaddr;
3213 pthru->dataxferaddr = data_dma_hndl;
3214 }
3215
3216
3217 /*
3218 * Is data coming down-stream
3219 */
3220 if( pthru->dataxferlen && (uioc.flags & UIOC_WR) ) {
3221 /*
3222 * Get the user data
3223 */
3224 if( copy_from_user(data, (char __user *)(unsigned long) uxferaddr,
3225 pthru->dataxferlen) ) {
3226 rval = (-EFAULT);
3227 goto freemem_and_return;
3228 }
3229 }
3230
3231 memset(&mc, 0, sizeof(megacmd_t));
3232
3233 mc.cmd = MEGA_MBOXCMD_PASSTHRU;
3234 mc.xferaddr = (u32)pthru_dma_hndl;
3235
3236 /*
3237 * Issue the command
3238 */
3239 mega_internal_command(adapter, &mc, pthru);
3240
3241 rval = mega_n_to_m((void __user *)arg, &mc);
3242
3243 if( rval ) goto freemem_and_return;
3244
3245
3246 /*
3247 * Is data going up-stream
3248 */
3249 if( pthru->dataxferlen && (uioc.flags & UIOC_RD) ) {
3250 if( copy_to_user((char __user *)(unsigned long) uxferaddr, data,
3251 pthru->dataxferlen) ) {
3252 rval = (-EFAULT);
3253 }
3254 }
3255
3256 /*
3257 * Send the request sense data also, irrespective of
3258 * whether the user has asked for it or not.
3259 */
3260 if (copy_to_user(upthru->reqsensearea,
3261 pthru->reqsensearea, 14))
3262 rval = -EFAULT;
3263
3264 freemem_and_return:
3265 if( pthru->dataxferlen ) {
3266 pci_free_consistent(pdev,
3267 pthru->dataxferlen, data,
3268 data_dma_hndl);
3269 }
3270
3271 pci_free_consistent(pdev, sizeof(mega_passthru),
3272 pthru, pthru_dma_hndl);
3273
3274 free_local_pdev(pdev);
3275
3276 return rval;
3277 }
3278 else {
3279 /* DCMD commands */
3280
3281 /*
3282 * Is there a data transfer
3283 */
3284 if( uioc.xferlen ) {
3285 data = pci_alloc_consistent(pdev,
3286 uioc.xferlen, &data_dma_hndl);
3287
3288 if( data == NULL ) {
3289 free_local_pdev(pdev);
3290 return (-ENOMEM);
3291 }
3292
3293 uxferaddr = MBOX(uioc)->xferaddr;
3294 }
3295
3296 /*
3297 * Is data coming down-stream
3298 */
3299 if( uioc.xferlen && (uioc.flags & UIOC_WR) ) {
3300 /*
3301 * Get the user data
3302 */
3303 if( copy_from_user(data, (char __user *)(unsigned long) uxferaddr,
3304 uioc.xferlen) ) {
3305
3306 pci_free_consistent(pdev,
3307 uioc.xferlen,
3308 data, data_dma_hndl);
3309
3310 free_local_pdev(pdev);
3311
3312 return (-EFAULT);
3313 }
3314 }
3315
3316 memcpy(&mc, MBOX(uioc), sizeof(megacmd_t));
3317
3318 mc.xferaddr = (u32)data_dma_hndl;
3319
3320 /*
3321 * Issue the command
3322 */
3323 mega_internal_command(adapter, &mc, NULL);
3324
3325 rval = mega_n_to_m((void __user *)arg, &mc);
3326
3327 if( rval ) {
3328 if( uioc.xferlen ) {
3329 pci_free_consistent(pdev,
3330 uioc.xferlen, data,
3331 data_dma_hndl);
3332 }
3333
3334 free_local_pdev(pdev);
3335
3336 return rval;
3337 }
3338
3339 /*
3340 * Is data going up-stream
3341 */
3342 if( uioc.xferlen && (uioc.flags & UIOC_RD) ) {
3343 if( copy_to_user((char __user *)(unsigned long) uxferaddr, data,
3344 uioc.xferlen) ) {
3345
3346 rval = (-EFAULT);
3347 }
3348 }
3349
3350 if( uioc.xferlen ) {
3351 pci_free_consistent(pdev,
3352 uioc.xferlen, data,
3353 data_dma_hndl);
3354 }
3355
3356 free_local_pdev(pdev);
3357
3358 return rval;
3359 }
3360
3361 default:
3362 return (-EINVAL);
3363 }
3364
3365 return 0;
3366 }
3367
3368 static long
megadev_unlocked_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)3369 megadev_unlocked_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
3370 {
3371 int ret;
3372
3373 mutex_lock(&megadev_mutex);
3374 ret = megadev_ioctl(filep, cmd, arg);
3375 mutex_unlock(&megadev_mutex);
3376
3377 return ret;
3378 }
3379
3380 /**
3381 * mega_m_to_n()
3382 * @arg - user address
3383 * @uioc - new ioctl structure
3384 *
3385 * A thin layer to convert older mimd interface ioctl structure to NIT ioctl
3386 * structure
3387 *
3388 * Converts the older mimd ioctl structure to newer NIT structure
3389 */
3390 static int
mega_m_to_n(void __user * arg,nitioctl_t * uioc)3391 mega_m_to_n(void __user *arg, nitioctl_t *uioc)
3392 {
3393 struct uioctl_t uioc_mimd;
3394 char signature[8] = {0};
3395 u8 opcode;
3396 u8 subopcode;
3397
3398
3399 /*
3400 * check is the application conforms to NIT. We do not have to do much
3401 * in that case.
3402 * We exploit the fact that the signature is stored in the very
3403 * beginning of the structure.
3404 */
3405
3406 if( copy_from_user(signature, arg, 7) )
3407 return (-EFAULT);
3408
3409 if( memcmp(signature, "MEGANIT", 7) == 0 ) {
3410
3411 /*
3412 * NOTE NOTE: The nit ioctl is still under flux because of
3413 * change of mailbox definition, in HPE. No applications yet
3414 * use this interface and let's not have applications use this
3415 * interface till the new specifitions are in place.
3416 */
3417 return -EINVAL;
3418 #if 0
3419 if( copy_from_user(uioc, arg, sizeof(nitioctl_t)) )
3420 return (-EFAULT);
3421 return 0;
3422 #endif
3423 }
3424
3425 /*
3426 * Else assume we have mimd uioctl_t as arg. Convert to nitioctl_t
3427 *
3428 * Get the user ioctl structure
3429 */
3430 if( copy_from_user(&uioc_mimd, arg, sizeof(struct uioctl_t)) )
3431 return (-EFAULT);
3432
3433
3434 /*
3435 * Get the opcode and subopcode for the commands
3436 */
3437 opcode = uioc_mimd.ui.fcs.opcode;
3438 subopcode = uioc_mimd.ui.fcs.subopcode;
3439
3440 switch (opcode) {
3441 case 0x82:
3442
3443 switch (subopcode) {
3444
3445 case MEGAIOC_QDRVRVER: /* Query driver version */
3446 uioc->opcode = GET_DRIVER_VER;
3447 uioc->uioc_uaddr = uioc_mimd.data;
3448 break;
3449
3450 case MEGAIOC_QNADAP: /* Get # of adapters */
3451 uioc->opcode = GET_N_ADAP;
3452 uioc->uioc_uaddr = uioc_mimd.data;
3453 break;
3454
3455 case MEGAIOC_QADAPINFO: /* Get adapter information */
3456 uioc->opcode = GET_ADAP_INFO;
3457 uioc->adapno = uioc_mimd.ui.fcs.adapno;
3458 uioc->uioc_uaddr = uioc_mimd.data;
3459 break;
3460
3461 default:
3462 return(-EINVAL);
3463 }
3464
3465 break;
3466
3467
3468 case 0x81:
3469
3470 uioc->opcode = MBOX_CMD;
3471 uioc->adapno = uioc_mimd.ui.fcs.adapno;
3472
3473 memcpy(uioc->uioc_rmbox, uioc_mimd.mbox, 18);
3474
3475 uioc->xferlen = uioc_mimd.ui.fcs.length;
3476
3477 if( uioc_mimd.outlen ) uioc->flags = UIOC_RD;
3478 if( uioc_mimd.inlen ) uioc->flags |= UIOC_WR;
3479
3480 break;
3481
3482 case 0x80:
3483
3484 uioc->opcode = MBOX_CMD;
3485 uioc->adapno = uioc_mimd.ui.fcs.adapno;
3486
3487 memcpy(uioc->uioc_rmbox, uioc_mimd.mbox, 18);
3488
3489 /*
3490 * Choose the xferlen bigger of input and output data
3491 */
3492 uioc->xferlen = uioc_mimd.outlen > uioc_mimd.inlen ?
3493 uioc_mimd.outlen : uioc_mimd.inlen;
3494
3495 if( uioc_mimd.outlen ) uioc->flags = UIOC_RD;
3496 if( uioc_mimd.inlen ) uioc->flags |= UIOC_WR;
3497
3498 break;
3499
3500 default:
3501 return (-EINVAL);
3502
3503 }
3504
3505 return 0;
3506 }
3507
3508 /*
3509 * mega_n_to_m()
3510 * @arg - user address
3511 * @mc - mailbox command
3512 *
3513 * Updates the status information to the application, depending on application
3514 * conforms to older mimd ioctl interface or newer NIT ioctl interface
3515 */
3516 static int
mega_n_to_m(void __user * arg,megacmd_t * mc)3517 mega_n_to_m(void __user *arg, megacmd_t *mc)
3518 {
3519 nitioctl_t __user *uiocp;
3520 megacmd_t __user *umc;
3521 mega_passthru __user *upthru;
3522 struct uioctl_t __user *uioc_mimd;
3523 char signature[8] = {0};
3524
3525 /*
3526 * check is the application conforms to NIT.
3527 */
3528 if( copy_from_user(signature, arg, 7) )
3529 return -EFAULT;
3530
3531 if( memcmp(signature, "MEGANIT", 7) == 0 ) {
3532
3533 uiocp = arg;
3534
3535 if( put_user(mc->status, (u8 __user *)&MBOX_P(uiocp)->status) )
3536 return (-EFAULT);
3537
3538 if( mc->cmd == MEGA_MBOXCMD_PASSTHRU ) {
3539
3540 umc = MBOX_P(uiocp);
3541
3542 if (get_user(upthru, (mega_passthru __user * __user *)&umc->xferaddr))
3543 return -EFAULT;
3544
3545 if( put_user(mc->status, (u8 __user *)&upthru->scsistatus))
3546 return (-EFAULT);
3547 }
3548 }
3549 else {
3550 uioc_mimd = arg;
3551
3552 if( put_user(mc->status, (u8 __user *)&uioc_mimd->mbox[17]) )
3553 return (-EFAULT);
3554
3555 if( mc->cmd == MEGA_MBOXCMD_PASSTHRU ) {
3556
3557 umc = (megacmd_t __user *)uioc_mimd->mbox;
3558
3559 if (get_user(upthru, (mega_passthru __user * __user *)&umc->xferaddr))
3560 return (-EFAULT);
3561
3562 if( put_user(mc->status, (u8 __user *)&upthru->scsistatus) )
3563 return (-EFAULT);
3564 }
3565 }
3566
3567 return 0;
3568 }
3569
3570
3571 /*
3572 * MEGARAID 'FW' commands.
3573 */
3574
3575 /**
3576 * mega_is_bios_enabled()
3577 * @adapter - pointer to our soft state
3578 *
3579 * issue command to find out if the BIOS is enabled for this controller
3580 */
3581 static int
mega_is_bios_enabled(adapter_t * adapter)3582 mega_is_bios_enabled(adapter_t *adapter)
3583 {
3584 unsigned char raw_mbox[sizeof(struct mbox_out)];
3585 mbox_t *mbox;
3586 int ret;
3587
3588 mbox = (mbox_t *)raw_mbox;
3589
3590 memset(&mbox->m_out, 0, sizeof(raw_mbox));
3591
3592 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
3593
3594 mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle;
3595
3596 raw_mbox[0] = IS_BIOS_ENABLED;
3597 raw_mbox[2] = GET_BIOS;
3598
3599
3600 ret = issue_scb_block(adapter, raw_mbox);
3601
3602 return *(char *)adapter->mega_buffer;
3603 }
3604
3605
3606 /**
3607 * mega_enum_raid_scsi()
3608 * @adapter - pointer to our soft state
3609 *
3610 * Find out what channels are RAID/SCSI. This information is used to
3611 * differentiate the virtual channels and physical channels and to support
3612 * ROMB feature and non-disk devices.
3613 */
3614 static void
mega_enum_raid_scsi(adapter_t * adapter)3615 mega_enum_raid_scsi(adapter_t *adapter)
3616 {
3617 unsigned char raw_mbox[sizeof(struct mbox_out)];
3618 mbox_t *mbox;
3619 int i;
3620
3621 mbox = (mbox_t *)raw_mbox;
3622
3623 memset(&mbox->m_out, 0, sizeof(raw_mbox));
3624
3625 /*
3626 * issue command to find out what channels are raid/scsi
3627 */
3628 raw_mbox[0] = CHNL_CLASS;
3629 raw_mbox[2] = GET_CHNL_CLASS;
3630
3631 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
3632
3633 mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle;
3634
3635 /*
3636 * Non-ROMB firmware fail this command, so all channels
3637 * must be shown RAID
3638 */
3639 adapter->mega_ch_class = 0xFF;
3640
3641 if(!issue_scb_block(adapter, raw_mbox)) {
3642 adapter->mega_ch_class = *((char *)adapter->mega_buffer);
3643
3644 }
3645
3646 for( i = 0; i < adapter->product_info.nchannels; i++ ) {
3647 if( (adapter->mega_ch_class >> i) & 0x01 ) {
3648 dev_info(&adapter->dev->dev, "channel[%d] is raid\n",
3649 i);
3650 }
3651 else {
3652 dev_info(&adapter->dev->dev, "channel[%d] is scsi\n",
3653 i);
3654 }
3655 }
3656
3657 return;
3658 }
3659
3660
3661 /**
3662 * mega_get_boot_drv()
3663 * @adapter - pointer to our soft state
3664 *
3665 * Find out which device is the boot device. Note, any logical drive or any
3666 * phyical device (e.g., a CDROM) can be designated as a boot device.
3667 */
3668 static void
mega_get_boot_drv(adapter_t * adapter)3669 mega_get_boot_drv(adapter_t *adapter)
3670 {
3671 struct private_bios_data *prv_bios_data;
3672 unsigned char raw_mbox[sizeof(struct mbox_out)];
3673 mbox_t *mbox;
3674 u16 cksum = 0;
3675 u8 *cksum_p;
3676 u8 boot_pdrv;
3677 int i;
3678
3679 mbox = (mbox_t *)raw_mbox;
3680
3681 memset(&mbox->m_out, 0, sizeof(raw_mbox));
3682
3683 raw_mbox[0] = BIOS_PVT_DATA;
3684 raw_mbox[2] = GET_BIOS_PVT_DATA;
3685
3686 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
3687
3688 mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle;
3689
3690 adapter->boot_ldrv_enabled = 0;
3691 adapter->boot_ldrv = 0;
3692
3693 adapter->boot_pdrv_enabled = 0;
3694 adapter->boot_pdrv_ch = 0;
3695 adapter->boot_pdrv_tgt = 0;
3696
3697 if(issue_scb_block(adapter, raw_mbox) == 0) {
3698 prv_bios_data =
3699 (struct private_bios_data *)adapter->mega_buffer;
3700
3701 cksum = 0;
3702 cksum_p = (char *)prv_bios_data;
3703 for (i = 0; i < 14; i++ ) {
3704 cksum += (u16)(*cksum_p++);
3705 }
3706
3707 if (prv_bios_data->cksum == (u16)(0-cksum) ) {
3708
3709 /*
3710 * If MSB is set, a physical drive is set as boot
3711 * device
3712 */
3713 if( prv_bios_data->boot_drv & 0x80 ) {
3714 adapter->boot_pdrv_enabled = 1;
3715 boot_pdrv = prv_bios_data->boot_drv & 0x7F;
3716 adapter->boot_pdrv_ch = boot_pdrv / 16;
3717 adapter->boot_pdrv_tgt = boot_pdrv % 16;
3718 }
3719 else {
3720 adapter->boot_ldrv_enabled = 1;
3721 adapter->boot_ldrv = prv_bios_data->boot_drv;
3722 }
3723 }
3724 }
3725
3726 }
3727
3728 /**
3729 * mega_support_random_del()
3730 * @adapter - pointer to our soft state
3731 *
3732 * Find out if this controller supports random deletion and addition of
3733 * logical drives
3734 */
3735 static int
mega_support_random_del(adapter_t * adapter)3736 mega_support_random_del(adapter_t *adapter)
3737 {
3738 unsigned char raw_mbox[sizeof(struct mbox_out)];
3739 mbox_t *mbox;
3740 int rval;
3741
3742 mbox = (mbox_t *)raw_mbox;
3743
3744 memset(&mbox->m_out, 0, sizeof(raw_mbox));
3745
3746 /*
3747 * issue command
3748 */
3749 raw_mbox[0] = FC_DEL_LOGDRV;
3750 raw_mbox[2] = OP_SUP_DEL_LOGDRV;
3751
3752 rval = issue_scb_block(adapter, raw_mbox);
3753
3754 return !rval;
3755 }
3756
3757
3758 /**
3759 * mega_support_ext_cdb()
3760 * @adapter - pointer to our soft state
3761 *
3762 * Find out if this firmware support cdblen > 10
3763 */
3764 static int
mega_support_ext_cdb(adapter_t * adapter)3765 mega_support_ext_cdb(adapter_t *adapter)
3766 {
3767 unsigned char raw_mbox[sizeof(struct mbox_out)];
3768 mbox_t *mbox;
3769 int rval;
3770
3771 mbox = (mbox_t *)raw_mbox;
3772
3773 memset(&mbox->m_out, 0, sizeof(raw_mbox));
3774 /*
3775 * issue command to find out if controller supports extended CDBs.
3776 */
3777 raw_mbox[0] = 0xA4;
3778 raw_mbox[2] = 0x16;
3779
3780 rval = issue_scb_block(adapter, raw_mbox);
3781
3782 return !rval;
3783 }
3784
3785
3786 /**
3787 * mega_del_logdrv()
3788 * @adapter - pointer to our soft state
3789 * @logdrv - logical drive to be deleted
3790 *
3791 * Delete the specified logical drive. It is the responsibility of the user
3792 * app to let the OS know about this operation.
3793 */
3794 static int
mega_del_logdrv(adapter_t * adapter,int logdrv)3795 mega_del_logdrv(adapter_t *adapter, int logdrv)
3796 {
3797 unsigned long flags;
3798 scb_t *scb;
3799 int rval;
3800
3801 /*
3802 * Stop sending commands to the controller, queue them internally.
3803 * When deletion is complete, ISR will flush the queue.
3804 */
3805 atomic_set(&adapter->quiescent, 1);
3806
3807 /*
3808 * Wait till all the issued commands are complete and there are no
3809 * commands in the pending queue
3810 */
3811 while (atomic_read(&adapter->pend_cmds) > 0 ||
3812 !list_empty(&adapter->pending_list))
3813 msleep(1000); /* sleep for 1s */
3814
3815 rval = mega_do_del_logdrv(adapter, logdrv);
3816
3817 spin_lock_irqsave(&adapter->lock, flags);
3818
3819 /*
3820 * If delete operation was successful, add 0x80 to the logical drive
3821 * ids for commands in the pending queue.
3822 */
3823 if (adapter->read_ldidmap) {
3824 struct list_head *pos;
3825 list_for_each(pos, &adapter->pending_list) {
3826 scb = list_entry(pos, scb_t, list);
3827 if (scb->pthru->logdrv < 0x80 )
3828 scb->pthru->logdrv += 0x80;
3829 }
3830 }
3831
3832 atomic_set(&adapter->quiescent, 0);
3833
3834 mega_runpendq(adapter);
3835
3836 spin_unlock_irqrestore(&adapter->lock, flags);
3837
3838 return rval;
3839 }
3840
3841
3842 static int
mega_do_del_logdrv(adapter_t * adapter,int logdrv)3843 mega_do_del_logdrv(adapter_t *adapter, int logdrv)
3844 {
3845 megacmd_t mc;
3846 int rval;
3847
3848 memset( &mc, 0, sizeof(megacmd_t));
3849
3850 mc.cmd = FC_DEL_LOGDRV;
3851 mc.opcode = OP_DEL_LOGDRV;
3852 mc.subopcode = logdrv;
3853
3854 rval = mega_internal_command(adapter, &mc, NULL);
3855
3856 /* log this event */
3857 if(rval) {
3858 dev_warn(&adapter->dev->dev, "Delete LD-%d failed", logdrv);
3859 return rval;
3860 }
3861
3862 /*
3863 * After deleting first logical drive, the logical drives must be
3864 * addressed by adding 0x80 to the logical drive id.
3865 */
3866 adapter->read_ldidmap = 1;
3867
3868 return rval;
3869 }
3870
3871
3872 /**
3873 * mega_get_max_sgl()
3874 * @adapter - pointer to our soft state
3875 *
3876 * Find out the maximum number of scatter-gather elements supported by this
3877 * version of the firmware
3878 */
3879 static void
mega_get_max_sgl(adapter_t * adapter)3880 mega_get_max_sgl(adapter_t *adapter)
3881 {
3882 unsigned char raw_mbox[sizeof(struct mbox_out)];
3883 mbox_t *mbox;
3884
3885 mbox = (mbox_t *)raw_mbox;
3886
3887 memset(mbox, 0, sizeof(raw_mbox));
3888
3889 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
3890
3891 mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle;
3892
3893 raw_mbox[0] = MAIN_MISC_OPCODE;
3894 raw_mbox[2] = GET_MAX_SG_SUPPORT;
3895
3896
3897 if( issue_scb_block(adapter, raw_mbox) ) {
3898 /*
3899 * f/w does not support this command. Choose the default value
3900 */
3901 adapter->sglen = MIN_SGLIST;
3902 }
3903 else {
3904 adapter->sglen = *((char *)adapter->mega_buffer);
3905
3906 /*
3907 * Make sure this is not more than the resources we are
3908 * planning to allocate
3909 */
3910 if ( adapter->sglen > MAX_SGLIST )
3911 adapter->sglen = MAX_SGLIST;
3912 }
3913
3914 return;
3915 }
3916
3917
3918 /**
3919 * mega_support_cluster()
3920 * @adapter - pointer to our soft state
3921 *
3922 * Find out if this firmware support cluster calls.
3923 */
3924 static int
mega_support_cluster(adapter_t * adapter)3925 mega_support_cluster(adapter_t *adapter)
3926 {
3927 unsigned char raw_mbox[sizeof(struct mbox_out)];
3928 mbox_t *mbox;
3929
3930 mbox = (mbox_t *)raw_mbox;
3931
3932 memset(mbox, 0, sizeof(raw_mbox));
3933
3934 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
3935
3936 mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle;
3937
3938 /*
3939 * Try to get the initiator id. This command will succeed iff the
3940 * clustering is available on this HBA.
3941 */
3942 raw_mbox[0] = MEGA_GET_TARGET_ID;
3943
3944 if( issue_scb_block(adapter, raw_mbox) == 0 ) {
3945
3946 /*
3947 * Cluster support available. Get the initiator target id.
3948 * Tell our id to mid-layer too.
3949 */
3950 adapter->this_id = *(u32 *)adapter->mega_buffer;
3951 adapter->host->this_id = adapter->this_id;
3952
3953 return 1;
3954 }
3955
3956 return 0;
3957 }
3958
3959 #ifdef CONFIG_PROC_FS
3960 /**
3961 * mega_adapinq()
3962 * @adapter - pointer to our soft state
3963 * @dma_handle - DMA address of the buffer
3964 *
3965 * Issue internal commands while interrupts are available.
3966 * We only issue direct mailbox commands from within the driver. ioctl()
3967 * interface using these routines can issue passthru commands.
3968 */
3969 static int
mega_adapinq(adapter_t * adapter,dma_addr_t dma_handle)3970 mega_adapinq(adapter_t *adapter, dma_addr_t dma_handle)
3971 {
3972 megacmd_t mc;
3973
3974 memset(&mc, 0, sizeof(megacmd_t));
3975
3976 if( adapter->flag & BOARD_40LD ) {
3977 mc.cmd = FC_NEW_CONFIG;
3978 mc.opcode = NC_SUBOP_ENQUIRY3;
3979 mc.subopcode = ENQ3_GET_SOLICITED_FULL;
3980 }
3981 else {
3982 mc.cmd = MEGA_MBOXCMD_ADPEXTINQ;
3983 }
3984
3985 mc.xferaddr = (u32)dma_handle;
3986
3987 if ( mega_internal_command(adapter, &mc, NULL) != 0 ) {
3988 return -1;
3989 }
3990
3991 return 0;
3992 }
3993
3994
3995 /** mega_internal_dev_inquiry()
3996 * @adapter - pointer to our soft state
3997 * @ch - channel for this device
3998 * @tgt - ID of this device
3999 * @buf_dma_handle - DMA address of the buffer
4000 *
4001 * Issue the scsi inquiry for the specified device.
4002 */
4003 static int
mega_internal_dev_inquiry(adapter_t * adapter,u8 ch,u8 tgt,dma_addr_t buf_dma_handle)4004 mega_internal_dev_inquiry(adapter_t *adapter, u8 ch, u8 tgt,
4005 dma_addr_t buf_dma_handle)
4006 {
4007 mega_passthru *pthru;
4008 dma_addr_t pthru_dma_handle;
4009 megacmd_t mc;
4010 int rval;
4011 struct pci_dev *pdev;
4012
4013
4014 /*
4015 * For all internal commands, the buffer must be allocated in <4GB
4016 * address range
4017 */
4018 if( make_local_pdev(adapter, &pdev) != 0 ) return -1;
4019
4020 pthru = pci_alloc_consistent(pdev, sizeof(mega_passthru),
4021 &pthru_dma_handle);
4022
4023 if( pthru == NULL ) {
4024 free_local_pdev(pdev);
4025 return -1;
4026 }
4027
4028 pthru->timeout = 2;
4029 pthru->ars = 1;
4030 pthru->reqsenselen = 14;
4031 pthru->islogical = 0;
4032
4033 pthru->channel = (adapter->flag & BOARD_40LD) ? 0 : ch;
4034
4035 pthru->target = (adapter->flag & BOARD_40LD) ? (ch << 4)|tgt : tgt;
4036
4037 pthru->cdblen = 6;
4038
4039 pthru->cdb[0] = INQUIRY;
4040 pthru->cdb[1] = 0;
4041 pthru->cdb[2] = 0;
4042 pthru->cdb[3] = 0;
4043 pthru->cdb[4] = 255;
4044 pthru->cdb[5] = 0;
4045
4046
4047 pthru->dataxferaddr = (u32)buf_dma_handle;
4048 pthru->dataxferlen = 256;
4049
4050 memset(&mc, 0, sizeof(megacmd_t));
4051
4052 mc.cmd = MEGA_MBOXCMD_PASSTHRU;
4053 mc.xferaddr = (u32)pthru_dma_handle;
4054
4055 rval = mega_internal_command(adapter, &mc, pthru);
4056
4057 pci_free_consistent(pdev, sizeof(mega_passthru), pthru,
4058 pthru_dma_handle);
4059
4060 free_local_pdev(pdev);
4061
4062 return rval;
4063 }
4064 #endif
4065
4066 /**
4067 * mega_internal_command()
4068 * @adapter - pointer to our soft state
4069 * @mc - the mailbox command
4070 * @pthru - Passthru structure for DCDB commands
4071 *
4072 * Issue the internal commands in interrupt mode.
4073 * The last argument is the address of the passthru structure if the command
4074 * to be fired is a passthru command
4075 *
4076 * Note: parameter 'pthru' is null for non-passthru commands.
4077 */
4078 static int
mega_internal_command(adapter_t * adapter,megacmd_t * mc,mega_passthru * pthru)4079 mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru)
4080 {
4081 unsigned long flags;
4082 scb_t *scb;
4083 int rval;
4084
4085 /*
4086 * The internal commands share one command id and hence are
4087 * serialized. This is so because we want to reserve maximum number of
4088 * available command ids for the I/O commands.
4089 */
4090 mutex_lock(&adapter->int_mtx);
4091
4092 scb = &adapter->int_scb;
4093 memset(scb, 0, sizeof(scb_t));
4094
4095 scb->idx = CMDID_INT_CMDS;
4096 scb->state |= SCB_ACTIVE | SCB_PENDQ;
4097
4098 memcpy(scb->raw_mbox, mc, sizeof(megacmd_t));
4099
4100 /*
4101 * Is it a passthru command
4102 */
4103 if (mc->cmd == MEGA_MBOXCMD_PASSTHRU)
4104 scb->pthru = pthru;
4105
4106 spin_lock_irqsave(&adapter->lock, flags);
4107 list_add_tail(&scb->list, &adapter->pending_list);
4108 /*
4109 * Check if the HBA is in quiescent state, e.g., during a
4110 * delete logical drive opertion. If it is, don't run
4111 * the pending_list.
4112 */
4113 if (atomic_read(&adapter->quiescent) == 0)
4114 mega_runpendq(adapter);
4115 spin_unlock_irqrestore(&adapter->lock, flags);
4116
4117 wait_for_completion(&adapter->int_waitq);
4118
4119 mc->status = rval = adapter->int_status;
4120
4121 /*
4122 * Print a debug message for all failed commands. Applications can use
4123 * this information.
4124 */
4125 if (rval && trace_level) {
4126 dev_info(&adapter->dev->dev, "cmd [%x, %x, %x] status:[%x]\n",
4127 mc->cmd, mc->opcode, mc->subopcode, rval);
4128 }
4129
4130 mutex_unlock(&adapter->int_mtx);
4131 return rval;
4132 }
4133
4134 static struct scsi_host_template megaraid_template = {
4135 .module = THIS_MODULE,
4136 .name = "MegaRAID",
4137 .proc_name = "megaraid_legacy",
4138 .info = megaraid_info,
4139 .queuecommand = megaraid_queue,
4140 .bios_param = megaraid_biosparam,
4141 .max_sectors = MAX_SECTORS_PER_IO,
4142 .can_queue = MAX_COMMANDS,
4143 .this_id = DEFAULT_INITIATOR_ID,
4144 .sg_tablesize = MAX_SGLIST,
4145 .cmd_per_lun = DEF_CMD_PER_LUN,
4146 .eh_abort_handler = megaraid_abort,
4147 .eh_device_reset_handler = megaraid_reset,
4148 .eh_bus_reset_handler = megaraid_reset,
4149 .eh_host_reset_handler = megaraid_reset,
4150 .no_write_same = 1,
4151 };
4152
4153 static int
megaraid_probe_one(struct pci_dev * pdev,const struct pci_device_id * id)4154 megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4155 {
4156 struct Scsi_Host *host;
4157 adapter_t *adapter;
4158 unsigned long mega_baseport, tbase, flag = 0;
4159 u16 subsysid, subsysvid;
4160 u8 pci_bus, pci_dev_func;
4161 int irq, i, j;
4162 int error = -ENODEV;
4163
4164 if (hba_count >= MAX_CONTROLLERS)
4165 goto out;
4166
4167 if (pci_enable_device(pdev))
4168 goto out;
4169 pci_set_master(pdev);
4170
4171 pci_bus = pdev->bus->number;
4172 pci_dev_func = pdev->devfn;
4173
4174 /*
4175 * The megaraid3 stuff reports the ID of the Intel part which is not
4176 * remotely specific to the megaraid
4177 */
4178 if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
4179 u16 magic;
4180 /*
4181 * Don't fall over the Compaq management cards using the same
4182 * PCI identifier
4183 */
4184 if (pdev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ &&
4185 pdev->subsystem_device == 0xC000)
4186 goto out_disable_device;
4187 /* Now check the magic signature byte */
4188 pci_read_config_word(pdev, PCI_CONF_AMISIG, &magic);
4189 if (magic != HBA_SIGNATURE_471 && magic != HBA_SIGNATURE)
4190 goto out_disable_device;
4191 /* Ok it is probably a megaraid */
4192 }
4193
4194 /*
4195 * For these vendor and device ids, signature offsets are not
4196 * valid and 64 bit is implicit
4197 */
4198 if (id->driver_data & BOARD_64BIT)
4199 flag |= BOARD_64BIT;
4200 else {
4201 u32 magic64;
4202
4203 pci_read_config_dword(pdev, PCI_CONF_AMISIG64, &magic64);
4204 if (magic64 == HBA_SIGNATURE_64BIT)
4205 flag |= BOARD_64BIT;
4206 }
4207
4208 subsysvid = pdev->subsystem_vendor;
4209 subsysid = pdev->subsystem_device;
4210
4211 dev_notice(&pdev->dev, "found 0x%4.04x:0x%4.04x\n",
4212 id->vendor, id->device);
4213
4214 /* Read the base port and IRQ from PCI */
4215 mega_baseport = pci_resource_start(pdev, 0);
4216 irq = pdev->irq;
4217
4218 tbase = mega_baseport;
4219 if (pci_resource_flags(pdev, 0) & IORESOURCE_MEM) {
4220 flag |= BOARD_MEMMAP;
4221
4222 if (!request_mem_region(mega_baseport, 128, "megaraid")) {
4223 dev_warn(&pdev->dev, "mem region busy!\n");
4224 goto out_disable_device;
4225 }
4226
4227 mega_baseport = (unsigned long)ioremap(mega_baseport, 128);
4228 if (!mega_baseport) {
4229 dev_warn(&pdev->dev, "could not map hba memory\n");
4230 goto out_release_region;
4231 }
4232 } else {
4233 flag |= BOARD_IOMAP;
4234 mega_baseport += 0x10;
4235
4236 if (!request_region(mega_baseport, 16, "megaraid"))
4237 goto out_disable_device;
4238 }
4239
4240 /* Initialize SCSI Host structure */
4241 host = scsi_host_alloc(&megaraid_template, sizeof(adapter_t));
4242 if (!host)
4243 goto out_iounmap;
4244
4245 adapter = (adapter_t *)host->hostdata;
4246 memset(adapter, 0, sizeof(adapter_t));
4247
4248 dev_notice(&pdev->dev,
4249 "scsi%d:Found MegaRAID controller at 0x%lx, IRQ:%d\n",
4250 host->host_no, mega_baseport, irq);
4251
4252 adapter->base = mega_baseport;
4253 if (flag & BOARD_MEMMAP)
4254 adapter->mmio_base = (void __iomem *) mega_baseport;
4255
4256 INIT_LIST_HEAD(&adapter->free_list);
4257 INIT_LIST_HEAD(&adapter->pending_list);
4258 INIT_LIST_HEAD(&adapter->completed_list);
4259
4260 adapter->flag = flag;
4261 spin_lock_init(&adapter->lock);
4262
4263 host->cmd_per_lun = max_cmd_per_lun;
4264 host->max_sectors = max_sectors_per_io;
4265
4266 adapter->dev = pdev;
4267 adapter->host = host;
4268
4269 adapter->host->irq = irq;
4270
4271 if (flag & BOARD_MEMMAP)
4272 adapter->host->base = tbase;
4273 else {
4274 adapter->host->io_port = tbase;
4275 adapter->host->n_io_port = 16;
4276 }
4277
4278 adapter->host->unique_id = (pci_bus << 8) | pci_dev_func;
4279
4280 /*
4281 * Allocate buffer to issue internal commands.
4282 */
4283 adapter->mega_buffer = pci_alloc_consistent(adapter->dev,
4284 MEGA_BUFFER_SIZE, &adapter->buf_dma_handle);
4285 if (!adapter->mega_buffer) {
4286 dev_warn(&pdev->dev, "out of RAM\n");
4287 goto out_host_put;
4288 }
4289
4290 adapter->scb_list = kmalloc_array(MAX_COMMANDS, sizeof(scb_t),
4291 GFP_KERNEL);
4292 if (!adapter->scb_list) {
4293 dev_warn(&pdev->dev, "out of RAM\n");
4294 goto out_free_cmd_buffer;
4295 }
4296
4297 if (request_irq(irq, (adapter->flag & BOARD_MEMMAP) ?
4298 megaraid_isr_memmapped : megaraid_isr_iomapped,
4299 IRQF_SHARED, "megaraid", adapter)) {
4300 dev_warn(&pdev->dev, "Couldn't register IRQ %d!\n", irq);
4301 goto out_free_scb_list;
4302 }
4303
4304 if (mega_setup_mailbox(adapter))
4305 goto out_free_irq;
4306
4307 if (mega_query_adapter(adapter))
4308 goto out_free_mbox;
4309
4310 /*
4311 * Have checks for some buggy f/w
4312 */
4313 if ((subsysid == 0x1111) && (subsysvid == 0x1111)) {
4314 /*
4315 * Which firmware
4316 */
4317 if (!strcmp(adapter->fw_version, "3.00") ||
4318 !strcmp(adapter->fw_version, "3.01")) {
4319
4320 dev_warn(&pdev->dev,
4321 "Your card is a Dell PERC "
4322 "2/SC RAID controller with "
4323 "firmware\nmegaraid: 3.00 or 3.01. "
4324 "This driver is known to have "
4325 "corruption issues\nmegaraid: with "
4326 "those firmware versions on this "
4327 "specific card. In order\nmegaraid: "
4328 "to protect your data, please upgrade "
4329 "your firmware to version\nmegaraid: "
4330 "3.10 or later, available from the "
4331 "Dell Technical Support web\n"
4332 "megaraid: site at\nhttp://support."
4333 "dell.com/us/en/filelib/download/"
4334 "index.asp?fileid=2940\n"
4335 );
4336 }
4337 }
4338
4339 /*
4340 * If we have a HP 1M(0x60E7)/2M(0x60E8) controller with
4341 * firmware H.01.07, H.01.08, and H.01.09 disable 64 bit
4342 * support, since this firmware cannot handle 64 bit
4343 * addressing
4344 */
4345 if ((subsysvid == PCI_VENDOR_ID_HP) &&
4346 ((subsysid == 0x60E7) || (subsysid == 0x60E8))) {
4347 /*
4348 * which firmware
4349 */
4350 if (!strcmp(adapter->fw_version, "H01.07") ||
4351 !strcmp(adapter->fw_version, "H01.08") ||
4352 !strcmp(adapter->fw_version, "H01.09") ) {
4353 dev_warn(&pdev->dev,
4354 "Firmware H.01.07, "
4355 "H.01.08, and H.01.09 on 1M/2M "
4356 "controllers\n"
4357 "do not support 64 bit "
4358 "addressing.\nDISABLING "
4359 "64 bit support.\n");
4360 adapter->flag &= ~BOARD_64BIT;
4361 }
4362 }
4363
4364 if (mega_is_bios_enabled(adapter))
4365 mega_hbas[hba_count].is_bios_enabled = 1;
4366 mega_hbas[hba_count].hostdata_addr = adapter;
4367
4368 /*
4369 * Find out which channel is raid and which is scsi. This is
4370 * for ROMB support.
4371 */
4372 mega_enum_raid_scsi(adapter);
4373
4374 /*
4375 * Find out if a logical drive is set as the boot drive. If
4376 * there is one, will make that as the first logical drive.
4377 * ROMB: Do we have to boot from a physical drive. Then all
4378 * the physical drives would appear before the logical disks.
4379 * Else, all the physical drives would be exported to the mid
4380 * layer after logical drives.
4381 */
4382 mega_get_boot_drv(adapter);
4383
4384 if (adapter->boot_pdrv_enabled) {
4385 j = adapter->product_info.nchannels;
4386 for( i = 0; i < j; i++ )
4387 adapter->logdrv_chan[i] = 0;
4388 for( i = j; i < NVIRT_CHAN + j; i++ )
4389 adapter->logdrv_chan[i] = 1;
4390 } else {
4391 for (i = 0; i < NVIRT_CHAN; i++)
4392 adapter->logdrv_chan[i] = 1;
4393 for (i = NVIRT_CHAN; i < MAX_CHANNELS+NVIRT_CHAN; i++)
4394 adapter->logdrv_chan[i] = 0;
4395 adapter->mega_ch_class <<= NVIRT_CHAN;
4396 }
4397
4398 /*
4399 * Do we support random deletion and addition of logical
4400 * drives
4401 */
4402 adapter->read_ldidmap = 0; /* set it after first logdrv
4403 delete cmd */
4404 adapter->support_random_del = mega_support_random_del(adapter);
4405
4406 /* Initialize SCBs */
4407 if (mega_init_scb(adapter))
4408 goto out_free_mbox;
4409
4410 /*
4411 * Reset the pending commands counter
4412 */
4413 atomic_set(&adapter->pend_cmds, 0);
4414
4415 /*
4416 * Reset the adapter quiescent flag
4417 */
4418 atomic_set(&adapter->quiescent, 0);
4419
4420 hba_soft_state[hba_count] = adapter;
4421
4422 /*
4423 * Fill in the structure which needs to be passed back to the
4424 * application when it does an ioctl() for controller related
4425 * information.
4426 */
4427 i = hba_count;
4428
4429 mcontroller[i].base = mega_baseport;
4430 mcontroller[i].irq = irq;
4431 mcontroller[i].numldrv = adapter->numldrv;
4432 mcontroller[i].pcibus = pci_bus;
4433 mcontroller[i].pcidev = id->device;
4434 mcontroller[i].pcifun = PCI_FUNC (pci_dev_func);
4435 mcontroller[i].pciid = -1;
4436 mcontroller[i].pcivendor = id->vendor;
4437 mcontroller[i].pcislot = PCI_SLOT(pci_dev_func);
4438 mcontroller[i].uid = (pci_bus << 8) | pci_dev_func;
4439
4440
4441 /* Set the Mode of addressing to 64 bit if we can */
4442 if ((adapter->flag & BOARD_64BIT) && (sizeof(dma_addr_t) == 8)) {
4443 pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
4444 adapter->has_64bit_addr = 1;
4445 } else {
4446 pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4447 adapter->has_64bit_addr = 0;
4448 }
4449
4450 mutex_init(&adapter->int_mtx);
4451 init_completion(&adapter->int_waitq);
4452
4453 adapter->this_id = DEFAULT_INITIATOR_ID;
4454 adapter->host->this_id = DEFAULT_INITIATOR_ID;
4455
4456 #if MEGA_HAVE_CLUSTERING
4457 /*
4458 * Is cluster support enabled on this controller
4459 * Note: In a cluster the HBAs ( the initiators ) will have
4460 * different target IDs and we cannot assume it to be 7. Call
4461 * to mega_support_cluster() will get the target ids also if
4462 * the cluster support is available
4463 */
4464 adapter->has_cluster = mega_support_cluster(adapter);
4465 if (adapter->has_cluster) {
4466 dev_notice(&pdev->dev,
4467 "Cluster driver, initiator id:%d\n",
4468 adapter->this_id);
4469 }
4470 #endif
4471
4472 pci_set_drvdata(pdev, host);
4473
4474 mega_create_proc_entry(hba_count, mega_proc_dir_entry);
4475
4476 error = scsi_add_host(host, &pdev->dev);
4477 if (error)
4478 goto out_free_mbox;
4479
4480 scsi_scan_host(host);
4481 hba_count++;
4482 return 0;
4483
4484 out_free_mbox:
4485 pci_free_consistent(adapter->dev, sizeof(mbox64_t),
4486 adapter->una_mbox64, adapter->una_mbox64_dma);
4487 out_free_irq:
4488 free_irq(adapter->host->irq, adapter);
4489 out_free_scb_list:
4490 kfree(adapter->scb_list);
4491 out_free_cmd_buffer:
4492 pci_free_consistent(adapter->dev, MEGA_BUFFER_SIZE,
4493 adapter->mega_buffer, adapter->buf_dma_handle);
4494 out_host_put:
4495 scsi_host_put(host);
4496 out_iounmap:
4497 if (flag & BOARD_MEMMAP)
4498 iounmap((void *)mega_baseport);
4499 out_release_region:
4500 if (flag & BOARD_MEMMAP)
4501 release_mem_region(tbase, 128);
4502 else
4503 release_region(mega_baseport, 16);
4504 out_disable_device:
4505 pci_disable_device(pdev);
4506 out:
4507 return error;
4508 }
4509
4510 static void
__megaraid_shutdown(adapter_t * adapter)4511 __megaraid_shutdown(adapter_t *adapter)
4512 {
4513 u_char raw_mbox[sizeof(struct mbox_out)];
4514 mbox_t *mbox = (mbox_t *)raw_mbox;
4515 int i;
4516
4517 /* Flush adapter cache */
4518 memset(&mbox->m_out, 0, sizeof(raw_mbox));
4519 raw_mbox[0] = FLUSH_ADAPTER;
4520
4521 free_irq(adapter->host->irq, adapter);
4522
4523 /* Issue a blocking (interrupts disabled) command to the card */
4524 issue_scb_block(adapter, raw_mbox);
4525
4526 /* Flush disks cache */
4527 memset(&mbox->m_out, 0, sizeof(raw_mbox));
4528 raw_mbox[0] = FLUSH_SYSTEM;
4529
4530 /* Issue a blocking (interrupts disabled) command to the card */
4531 issue_scb_block(adapter, raw_mbox);
4532
4533 if (atomic_read(&adapter->pend_cmds) > 0)
4534 dev_warn(&adapter->dev->dev, "pending commands!!\n");
4535
4536 /*
4537 * Have a delibrate delay to make sure all the caches are
4538 * actually flushed.
4539 */
4540 for (i = 0; i <= 10; i++)
4541 mdelay(1000);
4542 }
4543
4544 static void
megaraid_remove_one(struct pci_dev * pdev)4545 megaraid_remove_one(struct pci_dev *pdev)
4546 {
4547 struct Scsi_Host *host = pci_get_drvdata(pdev);
4548 adapter_t *adapter = (adapter_t *)host->hostdata;
4549 char buf[12] = { 0 };
4550
4551 scsi_remove_host(host);
4552
4553 __megaraid_shutdown(adapter);
4554
4555 /* Free our resources */
4556 if (adapter->flag & BOARD_MEMMAP) {
4557 iounmap((void *)adapter->base);
4558 release_mem_region(adapter->host->base, 128);
4559 } else
4560 release_region(adapter->base, 16);
4561
4562 mega_free_sgl(adapter);
4563
4564 sprintf(buf, "hba%d", adapter->host->host_no);
4565 remove_proc_subtree(buf, mega_proc_dir_entry);
4566
4567 pci_free_consistent(adapter->dev, MEGA_BUFFER_SIZE,
4568 adapter->mega_buffer, adapter->buf_dma_handle);
4569 kfree(adapter->scb_list);
4570 pci_free_consistent(adapter->dev, sizeof(mbox64_t),
4571 adapter->una_mbox64, adapter->una_mbox64_dma);
4572
4573 scsi_host_put(host);
4574 pci_disable_device(pdev);
4575
4576 hba_count--;
4577 }
4578
4579 static void
megaraid_shutdown(struct pci_dev * pdev)4580 megaraid_shutdown(struct pci_dev *pdev)
4581 {
4582 struct Scsi_Host *host = pci_get_drvdata(pdev);
4583 adapter_t *adapter = (adapter_t *)host->hostdata;
4584
4585 __megaraid_shutdown(adapter);
4586 }
4587
4588 static struct pci_device_id megaraid_pci_tbl[] = {
4589 {PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID,
4590 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
4591 {PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID2,
4592 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
4593 {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_AMI_MEGARAID3,
4594 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
4595 {0,}
4596 };
4597 MODULE_DEVICE_TABLE(pci, megaraid_pci_tbl);
4598
4599 static struct pci_driver megaraid_pci_driver = {
4600 .name = "megaraid_legacy",
4601 .id_table = megaraid_pci_tbl,
4602 .probe = megaraid_probe_one,
4603 .remove = megaraid_remove_one,
4604 .shutdown = megaraid_shutdown,
4605 };
4606
megaraid_init(void)4607 static int __init megaraid_init(void)
4608 {
4609 int error;
4610
4611 if ((max_cmd_per_lun <= 0) || (max_cmd_per_lun > MAX_CMD_PER_LUN))
4612 max_cmd_per_lun = MAX_CMD_PER_LUN;
4613 if (max_mbox_busy_wait > MBOX_BUSY_WAIT)
4614 max_mbox_busy_wait = MBOX_BUSY_WAIT;
4615
4616 #ifdef CONFIG_PROC_FS
4617 mega_proc_dir_entry = proc_mkdir("megaraid", NULL);
4618 if (!mega_proc_dir_entry) {
4619 printk(KERN_WARNING
4620 "megaraid: failed to create megaraid root\n");
4621 }
4622 #endif
4623 error = pci_register_driver(&megaraid_pci_driver);
4624 if (error) {
4625 #ifdef CONFIG_PROC_FS
4626 remove_proc_entry("megaraid", NULL);
4627 #endif
4628 return error;
4629 }
4630
4631 /*
4632 * Register the driver as a character device, for applications
4633 * to access it for ioctls.
4634 * First argument (major) to register_chrdev implies a dynamic
4635 * major number allocation.
4636 */
4637 major = register_chrdev(0, "megadev_legacy", &megadev_fops);
4638 if (!major) {
4639 printk(KERN_WARNING
4640 "megaraid: failed to register char device\n");
4641 }
4642
4643 return 0;
4644 }
4645
megaraid_exit(void)4646 static void __exit megaraid_exit(void)
4647 {
4648 /*
4649 * Unregister the character device interface to the driver.
4650 */
4651 unregister_chrdev(major, "megadev_legacy");
4652
4653 pci_unregister_driver(&megaraid_pci_driver);
4654
4655 #ifdef CONFIG_PROC_FS
4656 remove_proc_entry("megaraid", NULL);
4657 #endif
4658 }
4659
4660 module_init(megaraid_init);
4661 module_exit(megaraid_exit);
4662
4663 /* vi: set ts=8 sw=8 tw=78: */
4664