1 /*
2 *******************************************************************************
3 ** O.S : Linux
4 ** FILE NAME : arcmsr_hba.c
5 ** BY : Nick Cheng, C.L. Huang
6 ** Description: SCSI RAID Device Driver for Areca RAID Controller
7 *******************************************************************************
8 ** Copyright (C) 2002 - 2014, Areca Technology Corporation All rights reserved
9 **
10 ** Web site: www.areca.com.tw
11 ** E-mail: support@areca.com.tw
12 **
13 ** This program is free software; you can redistribute it and/or modify
14 ** it under the terms of the GNU General Public License version 2 as
15 ** published by the Free Software Foundation.
16 ** This program is distributed in the hope that it will be useful,
17 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
18 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 ** GNU General Public License for more details.
20 *******************************************************************************
21 ** Redistribution and use in source and binary forms, with or without
22 ** modification, are permitted provided that the following conditions
23 ** are met:
24 ** 1. Redistributions of source code must retain the above copyright
25 ** notice, this list of conditions and the following disclaimer.
26 ** 2. Redistributions in binary form must reproduce the above copyright
27 ** notice, this list of conditions and the following disclaimer in the
28 ** documentation and/or other materials provided with the distribution.
29 ** 3. The name of the author may not be used to endorse or promote products
30 ** derived from this software without specific prior written permission.
31 **
32 ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
33 ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
34 ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
35 ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
36 ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,BUT
37 ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38 ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
39 ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40 ** (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
41 ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 *******************************************************************************
43 ** For history of changes, see Documentation/scsi/ChangeLog.arcmsr
44 ** Firmware Specification, see Documentation/scsi/arcmsr_spec.rst
45 *******************************************************************************
46 */
47 #include <linux/module.h>
48 #include <linux/reboot.h>
49 #include <linux/spinlock.h>
50 #include <linux/pci_ids.h>
51 #include <linux/interrupt.h>
52 #include <linux/moduleparam.h>
53 #include <linux/errno.h>
54 #include <linux/types.h>
55 #include <linux/delay.h>
56 #include <linux/dma-mapping.h>
57 #include <linux/timer.h>
58 #include <linux/slab.h>
59 #include <linux/pci.h>
60 #include <linux/aer.h>
61 #include <linux/circ_buf.h>
62 #include <asm/dma.h>
63 #include <asm/io.h>
64 #include <linux/uaccess.h>
65 #include <scsi/scsi_host.h>
66 #include <scsi/scsi.h>
67 #include <scsi/scsi_cmnd.h>
68 #include <scsi/scsi_tcq.h>
69 #include <scsi/scsi_device.h>
70 #include <scsi/scsi_transport.h>
71 #include <scsi/scsicam.h>
72 #include "arcmsr.h"
73 MODULE_AUTHOR("Nick Cheng, C.L. Huang <support@areca.com.tw>");
74 MODULE_DESCRIPTION("Areca ARC11xx/12xx/16xx/188x SAS/SATA RAID Controller Driver");
75 MODULE_LICENSE("Dual BSD/GPL");
76 MODULE_VERSION(ARCMSR_DRIVER_VERSION);
77
78 static int msix_enable = 1;
79 module_param(msix_enable, int, S_IRUGO);
80 MODULE_PARM_DESC(msix_enable, "Enable MSI-X interrupt(0 ~ 1), msix_enable=1(enable), =0(disable)");
81
82 static int msi_enable = 1;
83 module_param(msi_enable, int, S_IRUGO);
84 MODULE_PARM_DESC(msi_enable, "Enable MSI interrupt(0 ~ 1), msi_enable=1(enable), =0(disable)");
85
86 static int host_can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD;
87 module_param(host_can_queue, int, S_IRUGO);
88 MODULE_PARM_DESC(host_can_queue, " adapter queue depth(32 ~ 1024), default is 128");
89
90 static int cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN;
91 module_param(cmd_per_lun, int, S_IRUGO);
92 MODULE_PARM_DESC(cmd_per_lun, " device queue depth(1 ~ 128), default is 32");
93
94 static int dma_mask_64 = 0;
95 module_param(dma_mask_64, int, S_IRUGO);
96 MODULE_PARM_DESC(dma_mask_64, " set DMA mask to 64 bits(0 ~ 1), dma_mask_64=1(64 bits), =0(32 bits)");
97
98 static int set_date_time = 0;
99 module_param(set_date_time, int, S_IRUGO);
100 MODULE_PARM_DESC(set_date_time, " send date, time to iop(0 ~ 1), set_date_time=1(enable), default(=0) is disable");
101
102 static int cmd_timeout = ARCMSR_DEFAULT_TIMEOUT;
103 module_param(cmd_timeout, int, S_IRUGO);
104 MODULE_PARM_DESC(cmd_timeout, " scsi cmd timeout(0 ~ 120 sec.), default is 90");
105
106 #define ARCMSR_SLEEPTIME 10
107 #define ARCMSR_RETRYCOUNT 12
108
109 static wait_queue_head_t wait_q;
110 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
111 struct scsi_cmnd *cmd);
112 static int arcmsr_iop_confirm(struct AdapterControlBlock *acb);
113 static int arcmsr_abort(struct scsi_cmnd *);
114 static int arcmsr_bus_reset(struct scsi_cmnd *);
115 static int arcmsr_bios_param(struct scsi_device *sdev,
116 struct block_device *bdev, sector_t capacity, int *info);
117 static int arcmsr_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
118 static int arcmsr_probe(struct pci_dev *pdev,
119 const struct pci_device_id *id);
120 static int __maybe_unused arcmsr_suspend(struct device *dev);
121 static int __maybe_unused arcmsr_resume(struct device *dev);
122 static void arcmsr_remove(struct pci_dev *pdev);
123 static void arcmsr_shutdown(struct pci_dev *pdev);
124 static void arcmsr_iop_init(struct AdapterControlBlock *acb);
125 static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb);
126 static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb);
127 static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
128 u32 intmask_org);
129 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
130 static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb);
131 static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb);
132 static void arcmsr_request_device_map(struct timer_list *t);
133 static void arcmsr_message_isr_bh_fn(struct work_struct *work);
134 static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb);
135 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb);
136 static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *pACB);
137 static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb);
138 static void arcmsr_hbaE_message_isr(struct AdapterControlBlock *acb);
139 static void arcmsr_hbaE_postqueue_isr(struct AdapterControlBlock *acb);
140 static void arcmsr_hbaF_postqueue_isr(struct AdapterControlBlock *acb);
141 static void arcmsr_hardware_reset(struct AdapterControlBlock *acb);
142 static const char *arcmsr_info(struct Scsi_Host *);
143 static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
144 static void arcmsr_free_irq(struct pci_dev *, struct AdapterControlBlock *);
145 static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb);
146 static void arcmsr_set_iop_datetime(struct timer_list *);
147 static int arcmsr_slave_config(struct scsi_device *sdev);
arcmsr_adjust_disk_queue_depth(struct scsi_device * sdev,int queue_depth)148 static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_depth)
149 {
150 if (queue_depth > ARCMSR_MAX_CMD_PERLUN)
151 queue_depth = ARCMSR_MAX_CMD_PERLUN;
152 return scsi_change_queue_depth(sdev, queue_depth);
153 }
154
155 static struct scsi_host_template arcmsr_scsi_host_template = {
156 .module = THIS_MODULE,
157 .name = "Areca SAS/SATA RAID driver",
158 .info = arcmsr_info,
159 .queuecommand = arcmsr_queue_command,
160 .eh_abort_handler = arcmsr_abort,
161 .eh_bus_reset_handler = arcmsr_bus_reset,
162 .bios_param = arcmsr_bios_param,
163 .slave_configure = arcmsr_slave_config,
164 .change_queue_depth = arcmsr_adjust_disk_queue_depth,
165 .can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD,
166 .this_id = ARCMSR_SCSI_INITIATOR_ID,
167 .sg_tablesize = ARCMSR_DEFAULT_SG_ENTRIES,
168 .max_sectors = ARCMSR_MAX_XFER_SECTORS_C,
169 .cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN,
170 .shost_attrs = arcmsr_host_attrs,
171 .no_write_same = 1,
172 };
173
174 static struct pci_device_id arcmsr_device_id_table[] = {
175 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110),
176 .driver_data = ACB_ADAPTER_TYPE_A},
177 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120),
178 .driver_data = ACB_ADAPTER_TYPE_A},
179 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1130),
180 .driver_data = ACB_ADAPTER_TYPE_A},
181 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1160),
182 .driver_data = ACB_ADAPTER_TYPE_A},
183 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1170),
184 .driver_data = ACB_ADAPTER_TYPE_A},
185 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1200),
186 .driver_data = ACB_ADAPTER_TYPE_B},
187 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1201),
188 .driver_data = ACB_ADAPTER_TYPE_B},
189 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1202),
190 .driver_data = ACB_ADAPTER_TYPE_B},
191 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1203),
192 .driver_data = ACB_ADAPTER_TYPE_B},
193 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1210),
194 .driver_data = ACB_ADAPTER_TYPE_A},
195 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1214),
196 .driver_data = ACB_ADAPTER_TYPE_D},
197 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1220),
198 .driver_data = ACB_ADAPTER_TYPE_A},
199 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1230),
200 .driver_data = ACB_ADAPTER_TYPE_A},
201 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1260),
202 .driver_data = ACB_ADAPTER_TYPE_A},
203 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1270),
204 .driver_data = ACB_ADAPTER_TYPE_A},
205 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1280),
206 .driver_data = ACB_ADAPTER_TYPE_A},
207 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1380),
208 .driver_data = ACB_ADAPTER_TYPE_A},
209 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1381),
210 .driver_data = ACB_ADAPTER_TYPE_A},
211 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1680),
212 .driver_data = ACB_ADAPTER_TYPE_A},
213 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1681),
214 .driver_data = ACB_ADAPTER_TYPE_A},
215 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1880),
216 .driver_data = ACB_ADAPTER_TYPE_C},
217 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1883),
218 .driver_data = ACB_ADAPTER_TYPE_C},
219 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1884),
220 .driver_data = ACB_ADAPTER_TYPE_E},
221 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1886_0),
222 .driver_data = ACB_ADAPTER_TYPE_F},
223 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1886),
224 .driver_data = ACB_ADAPTER_TYPE_F},
225 {0, 0}, /* Terminating entry */
226 };
227 MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table);
228
229 static SIMPLE_DEV_PM_OPS(arcmsr_pm_ops, arcmsr_suspend, arcmsr_resume);
230
231 static struct pci_driver arcmsr_pci_driver = {
232 .name = "arcmsr",
233 .id_table = arcmsr_device_id_table,
234 .probe = arcmsr_probe,
235 .remove = arcmsr_remove,
236 .driver.pm = &arcmsr_pm_ops,
237 .shutdown = arcmsr_shutdown,
238 };
239 /*
240 ****************************************************************************
241 ****************************************************************************
242 */
243
arcmsr_free_io_queue(struct AdapterControlBlock * acb)244 static void arcmsr_free_io_queue(struct AdapterControlBlock *acb)
245 {
246 switch (acb->adapter_type) {
247 case ACB_ADAPTER_TYPE_B:
248 case ACB_ADAPTER_TYPE_D:
249 case ACB_ADAPTER_TYPE_E:
250 case ACB_ADAPTER_TYPE_F:
251 dma_free_coherent(&acb->pdev->dev, acb->ioqueue_size,
252 acb->dma_coherent2, acb->dma_coherent_handle2);
253 break;
254 }
255 }
256
arcmsr_remap_pciregion(struct AdapterControlBlock * acb)257 static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb)
258 {
259 struct pci_dev *pdev = acb->pdev;
260 switch (acb->adapter_type){
261 case ACB_ADAPTER_TYPE_A:{
262 acb->pmuA = ioremap(pci_resource_start(pdev,0), pci_resource_len(pdev,0));
263 if (!acb->pmuA) {
264 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
265 return false;
266 }
267 break;
268 }
269 case ACB_ADAPTER_TYPE_B:{
270 void __iomem *mem_base0, *mem_base1;
271 mem_base0 = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
272 if (!mem_base0) {
273 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
274 return false;
275 }
276 mem_base1 = ioremap(pci_resource_start(pdev, 2), pci_resource_len(pdev, 2));
277 if (!mem_base1) {
278 iounmap(mem_base0);
279 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
280 return false;
281 }
282 acb->mem_base0 = mem_base0;
283 acb->mem_base1 = mem_base1;
284 break;
285 }
286 case ACB_ADAPTER_TYPE_C:{
287 acb->pmuC = ioremap(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
288 if (!acb->pmuC) {
289 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
290 return false;
291 }
292 if (readl(&acb->pmuC->outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
293 writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &acb->pmuC->outbound_doorbell_clear);/*clear interrupt*/
294 return true;
295 }
296 break;
297 }
298 case ACB_ADAPTER_TYPE_D: {
299 void __iomem *mem_base0;
300 unsigned long addr, range;
301
302 addr = (unsigned long)pci_resource_start(pdev, 0);
303 range = pci_resource_len(pdev, 0);
304 mem_base0 = ioremap(addr, range);
305 if (!mem_base0) {
306 pr_notice("arcmsr%d: memory mapping region fail\n",
307 acb->host->host_no);
308 return false;
309 }
310 acb->mem_base0 = mem_base0;
311 break;
312 }
313 case ACB_ADAPTER_TYPE_E: {
314 acb->pmuE = ioremap(pci_resource_start(pdev, 1),
315 pci_resource_len(pdev, 1));
316 if (!acb->pmuE) {
317 pr_notice("arcmsr%d: memory mapping region fail \n",
318 acb->host->host_no);
319 return false;
320 }
321 writel(0, &acb->pmuE->host_int_status); /*clear interrupt*/
322 writel(ARCMSR_HBEMU_DOORBELL_SYNC, &acb->pmuE->iobound_doorbell); /* synchronize doorbell to 0 */
323 acb->in_doorbell = 0;
324 acb->out_doorbell = 0;
325 break;
326 }
327 case ACB_ADAPTER_TYPE_F: {
328 acb->pmuF = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
329 if (!acb->pmuF) {
330 pr_notice("arcmsr%d: memory mapping region fail\n",
331 acb->host->host_no);
332 return false;
333 }
334 writel(0, &acb->pmuF->host_int_status); /* clear interrupt */
335 writel(ARCMSR_HBFMU_DOORBELL_SYNC, &acb->pmuF->iobound_doorbell);
336 acb->in_doorbell = 0;
337 acb->out_doorbell = 0;
338 break;
339 }
340 }
341 return true;
342 }
343
arcmsr_unmap_pciregion(struct AdapterControlBlock * acb)344 static void arcmsr_unmap_pciregion(struct AdapterControlBlock *acb)
345 {
346 switch (acb->adapter_type) {
347 case ACB_ADAPTER_TYPE_A:
348 iounmap(acb->pmuA);
349 break;
350 case ACB_ADAPTER_TYPE_B:
351 iounmap(acb->mem_base0);
352 iounmap(acb->mem_base1);
353 break;
354 case ACB_ADAPTER_TYPE_C:
355 iounmap(acb->pmuC);
356 break;
357 case ACB_ADAPTER_TYPE_D:
358 iounmap(acb->mem_base0);
359 break;
360 case ACB_ADAPTER_TYPE_E:
361 iounmap(acb->pmuE);
362 break;
363 case ACB_ADAPTER_TYPE_F:
364 iounmap(acb->pmuF);
365 break;
366 }
367 }
368
arcmsr_do_interrupt(int irq,void * dev_id)369 static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id)
370 {
371 irqreturn_t handle_state;
372 struct AdapterControlBlock *acb = dev_id;
373
374 handle_state = arcmsr_interrupt(acb);
375 return handle_state;
376 }
377
arcmsr_bios_param(struct scsi_device * sdev,struct block_device * bdev,sector_t capacity,int * geom)378 static int arcmsr_bios_param(struct scsi_device *sdev,
379 struct block_device *bdev, sector_t capacity, int *geom)
380 {
381 int heads, sectors, cylinders, total_capacity;
382
383 if (scsi_partsize(bdev, capacity, geom))
384 return 0;
385
386 total_capacity = capacity;
387 heads = 64;
388 sectors = 32;
389 cylinders = total_capacity / (heads * sectors);
390 if (cylinders > 1024) {
391 heads = 255;
392 sectors = 63;
393 cylinders = total_capacity / (heads * sectors);
394 }
395 geom[0] = heads;
396 geom[1] = sectors;
397 geom[2] = cylinders;
398 return 0;
399 }
400
arcmsr_hbaA_wait_msgint_ready(struct AdapterControlBlock * acb)401 static uint8_t arcmsr_hbaA_wait_msgint_ready(struct AdapterControlBlock *acb)
402 {
403 struct MessageUnit_A __iomem *reg = acb->pmuA;
404 int i;
405
406 for (i = 0; i < 2000; i++) {
407 if (readl(®->outbound_intstatus) &
408 ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
409 writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT,
410 ®->outbound_intstatus);
411 return true;
412 }
413 msleep(10);
414 } /* max 20 seconds */
415
416 return false;
417 }
418
arcmsr_hbaB_wait_msgint_ready(struct AdapterControlBlock * acb)419 static uint8_t arcmsr_hbaB_wait_msgint_ready(struct AdapterControlBlock *acb)
420 {
421 struct MessageUnit_B *reg = acb->pmuB;
422 int i;
423
424 for (i = 0; i < 2000; i++) {
425 if (readl(reg->iop2drv_doorbell)
426 & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
427 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN,
428 reg->iop2drv_doorbell);
429 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT,
430 reg->drv2iop_doorbell);
431 return true;
432 }
433 msleep(10);
434 } /* max 20 seconds */
435
436 return false;
437 }
438
arcmsr_hbaC_wait_msgint_ready(struct AdapterControlBlock * pACB)439 static uint8_t arcmsr_hbaC_wait_msgint_ready(struct AdapterControlBlock *pACB)
440 {
441 struct MessageUnit_C __iomem *phbcmu = pACB->pmuC;
442 int i;
443
444 for (i = 0; i < 2000; i++) {
445 if (readl(&phbcmu->outbound_doorbell)
446 & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
447 writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR,
448 &phbcmu->outbound_doorbell_clear); /*clear interrupt*/
449 return true;
450 }
451 msleep(10);
452 } /* max 20 seconds */
453
454 return false;
455 }
456
arcmsr_hbaD_wait_msgint_ready(struct AdapterControlBlock * pACB)457 static bool arcmsr_hbaD_wait_msgint_ready(struct AdapterControlBlock *pACB)
458 {
459 struct MessageUnit_D *reg = pACB->pmuD;
460 int i;
461
462 for (i = 0; i < 2000; i++) {
463 if (readl(reg->outbound_doorbell)
464 & ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) {
465 writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE,
466 reg->outbound_doorbell);
467 return true;
468 }
469 msleep(10);
470 } /* max 20 seconds */
471 return false;
472 }
473
arcmsr_hbaE_wait_msgint_ready(struct AdapterControlBlock * pACB)474 static bool arcmsr_hbaE_wait_msgint_ready(struct AdapterControlBlock *pACB)
475 {
476 int i;
477 uint32_t read_doorbell;
478 struct MessageUnit_E __iomem *phbcmu = pACB->pmuE;
479
480 for (i = 0; i < 2000; i++) {
481 read_doorbell = readl(&phbcmu->iobound_doorbell);
482 if ((read_doorbell ^ pACB->in_doorbell) & ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE) {
483 writel(0, &phbcmu->host_int_status); /*clear interrupt*/
484 pACB->in_doorbell = read_doorbell;
485 return true;
486 }
487 msleep(10);
488 } /* max 20 seconds */
489 return false;
490 }
491
arcmsr_hbaA_flush_cache(struct AdapterControlBlock * acb)492 static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb)
493 {
494 struct MessageUnit_A __iomem *reg = acb->pmuA;
495 int retry_count = 30;
496 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, ®->inbound_msgaddr0);
497 do {
498 if (arcmsr_hbaA_wait_msgint_ready(acb))
499 break;
500 else {
501 retry_count--;
502 printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
503 timeout, retry count down = %d \n", acb->host->host_no, retry_count);
504 }
505 } while (retry_count != 0);
506 }
507
arcmsr_hbaB_flush_cache(struct AdapterControlBlock * acb)508 static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb)
509 {
510 struct MessageUnit_B *reg = acb->pmuB;
511 int retry_count = 30;
512 writel(ARCMSR_MESSAGE_FLUSH_CACHE, reg->drv2iop_doorbell);
513 do {
514 if (arcmsr_hbaB_wait_msgint_ready(acb))
515 break;
516 else {
517 retry_count--;
518 printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
519 timeout,retry count down = %d \n", acb->host->host_no, retry_count);
520 }
521 } while (retry_count != 0);
522 }
523
arcmsr_hbaC_flush_cache(struct AdapterControlBlock * pACB)524 static void arcmsr_hbaC_flush_cache(struct AdapterControlBlock *pACB)
525 {
526 struct MessageUnit_C __iomem *reg = pACB->pmuC;
527 int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */
528 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, ®->inbound_msgaddr0);
529 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell);
530 do {
531 if (arcmsr_hbaC_wait_msgint_ready(pACB)) {
532 break;
533 } else {
534 retry_count--;
535 printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
536 timeout,retry count down = %d \n", pACB->host->host_no, retry_count);
537 }
538 } while (retry_count != 0);
539 return;
540 }
541
arcmsr_hbaD_flush_cache(struct AdapterControlBlock * pACB)542 static void arcmsr_hbaD_flush_cache(struct AdapterControlBlock *pACB)
543 {
544 int retry_count = 15;
545 struct MessageUnit_D *reg = pACB->pmuD;
546
547 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, reg->inbound_msgaddr0);
548 do {
549 if (arcmsr_hbaD_wait_msgint_ready(pACB))
550 break;
551
552 retry_count--;
553 pr_notice("arcmsr%d: wait 'flush adapter "
554 "cache' timeout, retry count down = %d\n",
555 pACB->host->host_no, retry_count);
556 } while (retry_count != 0);
557 }
558
arcmsr_hbaE_flush_cache(struct AdapterControlBlock * pACB)559 static void arcmsr_hbaE_flush_cache(struct AdapterControlBlock *pACB)
560 {
561 int retry_count = 30;
562 struct MessageUnit_E __iomem *reg = pACB->pmuE;
563
564 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, ®->inbound_msgaddr0);
565 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
566 writel(pACB->out_doorbell, ®->iobound_doorbell);
567 do {
568 if (arcmsr_hbaE_wait_msgint_ready(pACB))
569 break;
570 retry_count--;
571 pr_notice("arcmsr%d: wait 'flush adapter "
572 "cache' timeout, retry count down = %d\n",
573 pACB->host->host_no, retry_count);
574 } while (retry_count != 0);
575 }
576
arcmsr_flush_adapter_cache(struct AdapterControlBlock * acb)577 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
578 {
579 switch (acb->adapter_type) {
580
581 case ACB_ADAPTER_TYPE_A:
582 arcmsr_hbaA_flush_cache(acb);
583 break;
584 case ACB_ADAPTER_TYPE_B:
585 arcmsr_hbaB_flush_cache(acb);
586 break;
587 case ACB_ADAPTER_TYPE_C:
588 arcmsr_hbaC_flush_cache(acb);
589 break;
590 case ACB_ADAPTER_TYPE_D:
591 arcmsr_hbaD_flush_cache(acb);
592 break;
593 case ACB_ADAPTER_TYPE_E:
594 case ACB_ADAPTER_TYPE_F:
595 arcmsr_hbaE_flush_cache(acb);
596 break;
597 }
598 }
599
arcmsr_hbaB_assign_regAddr(struct AdapterControlBlock * acb)600 static void arcmsr_hbaB_assign_regAddr(struct AdapterControlBlock *acb)
601 {
602 struct MessageUnit_B *reg = acb->pmuB;
603
604 if (acb->pdev->device == PCI_DEVICE_ID_ARECA_1203) {
605 reg->drv2iop_doorbell = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_1203);
606 reg->drv2iop_doorbell_mask = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_MASK_1203);
607 reg->iop2drv_doorbell = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_1203);
608 reg->iop2drv_doorbell_mask = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_MASK_1203);
609 } else {
610 reg->drv2iop_doorbell= MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL);
611 reg->drv2iop_doorbell_mask = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_MASK);
612 reg->iop2drv_doorbell = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL);
613 reg->iop2drv_doorbell_mask = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_MASK);
614 }
615 reg->message_wbuffer = MEM_BASE1(ARCMSR_MESSAGE_WBUFFER);
616 reg->message_rbuffer = MEM_BASE1(ARCMSR_MESSAGE_RBUFFER);
617 reg->message_rwbuffer = MEM_BASE1(ARCMSR_MESSAGE_RWBUFFER);
618 }
619
arcmsr_hbaD_assign_regAddr(struct AdapterControlBlock * acb)620 static void arcmsr_hbaD_assign_regAddr(struct AdapterControlBlock *acb)
621 {
622 struct MessageUnit_D *reg = acb->pmuD;
623
624 reg->chip_id = MEM_BASE0(ARCMSR_ARC1214_CHIP_ID);
625 reg->cpu_mem_config = MEM_BASE0(ARCMSR_ARC1214_CPU_MEMORY_CONFIGURATION);
626 reg->i2o_host_interrupt_mask = MEM_BASE0(ARCMSR_ARC1214_I2_HOST_INTERRUPT_MASK);
627 reg->sample_at_reset = MEM_BASE0(ARCMSR_ARC1214_SAMPLE_RESET);
628 reg->reset_request = MEM_BASE0(ARCMSR_ARC1214_RESET_REQUEST);
629 reg->host_int_status = MEM_BASE0(ARCMSR_ARC1214_MAIN_INTERRUPT_STATUS);
630 reg->pcief0_int_enable = MEM_BASE0(ARCMSR_ARC1214_PCIE_F0_INTERRUPT_ENABLE);
631 reg->inbound_msgaddr0 = MEM_BASE0(ARCMSR_ARC1214_INBOUND_MESSAGE0);
632 reg->inbound_msgaddr1 = MEM_BASE0(ARCMSR_ARC1214_INBOUND_MESSAGE1);
633 reg->outbound_msgaddr0 = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_MESSAGE0);
634 reg->outbound_msgaddr1 = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_MESSAGE1);
635 reg->inbound_doorbell = MEM_BASE0(ARCMSR_ARC1214_INBOUND_DOORBELL);
636 reg->outbound_doorbell = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_DOORBELL);
637 reg->outbound_doorbell_enable = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_DOORBELL_ENABLE);
638 reg->inboundlist_base_low = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_BASE_LOW);
639 reg->inboundlist_base_high = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_BASE_HIGH);
640 reg->inboundlist_write_pointer = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_WRITE_POINTER);
641 reg->outboundlist_base_low = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_BASE_LOW);
642 reg->outboundlist_base_high = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_BASE_HIGH);
643 reg->outboundlist_copy_pointer = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_COPY_POINTER);
644 reg->outboundlist_read_pointer = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_READ_POINTER);
645 reg->outboundlist_interrupt_cause = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_INTERRUPT_CAUSE);
646 reg->outboundlist_interrupt_enable = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_INTERRUPT_ENABLE);
647 reg->message_wbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_WBUFFER);
648 reg->message_rbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RBUFFER);
649 reg->msgcode_rwbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RWBUFFER);
650 }
651
arcmsr_hbaF_assign_regAddr(struct AdapterControlBlock * acb)652 static void arcmsr_hbaF_assign_regAddr(struct AdapterControlBlock *acb)
653 {
654 dma_addr_t host_buffer_dma;
655 struct MessageUnit_F __iomem *pmuF;
656
657 memset(acb->dma_coherent2, 0xff, acb->completeQ_size);
658 acb->message_wbuffer = (uint32_t *)round_up((unsigned long)acb->dma_coherent2 +
659 acb->completeQ_size, 4);
660 acb->message_rbuffer = ((void *)acb->message_wbuffer) + 0x100;
661 acb->msgcode_rwbuffer = ((void *)acb->message_wbuffer) + 0x200;
662 memset((void *)acb->message_wbuffer, 0, MESG_RW_BUFFER_SIZE);
663 host_buffer_dma = round_up(acb->dma_coherent_handle2 + acb->completeQ_size, 4);
664 pmuF = acb->pmuF;
665 /* host buffer low address, bit0:1 all buffer active */
666 writel(lower_32_bits(host_buffer_dma | 1), &pmuF->inbound_msgaddr0);
667 /* host buffer high address */
668 writel(upper_32_bits(host_buffer_dma), &pmuF->inbound_msgaddr1);
669 /* set host buffer physical address */
670 writel(ARCMSR_HBFMU_DOORBELL_SYNC1, &pmuF->iobound_doorbell);
671 }
672
arcmsr_alloc_io_queue(struct AdapterControlBlock * acb)673 static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb)
674 {
675 bool rtn = true;
676 void *dma_coherent;
677 dma_addr_t dma_coherent_handle;
678 struct pci_dev *pdev = acb->pdev;
679
680 switch (acb->adapter_type) {
681 case ACB_ADAPTER_TYPE_B: {
682 acb->ioqueue_size = roundup(sizeof(struct MessageUnit_B), 32);
683 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size,
684 &dma_coherent_handle, GFP_KERNEL);
685 if (!dma_coherent) {
686 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
687 return false;
688 }
689 acb->dma_coherent_handle2 = dma_coherent_handle;
690 acb->dma_coherent2 = dma_coherent;
691 acb->pmuB = (struct MessageUnit_B *)dma_coherent;
692 arcmsr_hbaB_assign_regAddr(acb);
693 }
694 break;
695 case ACB_ADAPTER_TYPE_D: {
696 acb->ioqueue_size = roundup(sizeof(struct MessageUnit_D), 32);
697 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size,
698 &dma_coherent_handle, GFP_KERNEL);
699 if (!dma_coherent) {
700 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
701 return false;
702 }
703 acb->dma_coherent_handle2 = dma_coherent_handle;
704 acb->dma_coherent2 = dma_coherent;
705 acb->pmuD = (struct MessageUnit_D *)dma_coherent;
706 arcmsr_hbaD_assign_regAddr(acb);
707 }
708 break;
709 case ACB_ADAPTER_TYPE_E: {
710 uint32_t completeQ_size;
711 completeQ_size = sizeof(struct deliver_completeQ) * ARCMSR_MAX_HBE_DONEQUEUE + 128;
712 acb->ioqueue_size = roundup(completeQ_size, 32);
713 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size,
714 &dma_coherent_handle, GFP_KERNEL);
715 if (!dma_coherent){
716 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
717 return false;
718 }
719 acb->dma_coherent_handle2 = dma_coherent_handle;
720 acb->dma_coherent2 = dma_coherent;
721 acb->pCompletionQ = dma_coherent;
722 acb->completionQ_entry = acb->ioqueue_size / sizeof(struct deliver_completeQ);
723 acb->doneq_index = 0;
724 }
725 break;
726 case ACB_ADAPTER_TYPE_F: {
727 uint32_t QueueDepth;
728 uint32_t depthTbl[] = {256, 512, 1024, 128, 64, 32};
729
730 arcmsr_wait_firmware_ready(acb);
731 QueueDepth = depthTbl[readl(&acb->pmuF->outbound_msgaddr1) & 7];
732 acb->completeQ_size = sizeof(struct deliver_completeQ) * QueueDepth + 128;
733 acb->ioqueue_size = roundup(acb->completeQ_size + MESG_RW_BUFFER_SIZE, 32);
734 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size,
735 &dma_coherent_handle, GFP_KERNEL);
736 if (!dma_coherent) {
737 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
738 return false;
739 }
740 acb->dma_coherent_handle2 = dma_coherent_handle;
741 acb->dma_coherent2 = dma_coherent;
742 acb->pCompletionQ = dma_coherent;
743 acb->completionQ_entry = acb->completeQ_size / sizeof(struct deliver_completeQ);
744 acb->doneq_index = 0;
745 arcmsr_hbaF_assign_regAddr(acb);
746 }
747 break;
748 default:
749 break;
750 }
751 return rtn;
752 }
753
arcmsr_alloc_ccb_pool(struct AdapterControlBlock * acb)754 static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
755 {
756 struct pci_dev *pdev = acb->pdev;
757 void *dma_coherent;
758 dma_addr_t dma_coherent_handle;
759 struct CommandControlBlock *ccb_tmp;
760 int i = 0, j = 0;
761 unsigned long cdb_phyaddr, next_ccb_phy;
762 unsigned long roundup_ccbsize;
763 unsigned long max_xfer_len;
764 unsigned long max_sg_entrys;
765 uint32_t firm_config_version, curr_phy_upper32;
766
767 for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
768 for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
769 acb->devstate[i][j] = ARECA_RAID_GONE;
770
771 max_xfer_len = ARCMSR_MAX_XFER_LEN;
772 max_sg_entrys = ARCMSR_DEFAULT_SG_ENTRIES;
773 firm_config_version = acb->firm_cfg_version;
774 if((firm_config_version & 0xFF) >= 3){
775 max_xfer_len = (ARCMSR_CDB_SG_PAGE_LENGTH << ((firm_config_version >> 8) & 0xFF)) * 1024;/* max 4M byte */
776 max_sg_entrys = (max_xfer_len/4096);
777 }
778 acb->host->max_sectors = max_xfer_len/512;
779 acb->host->sg_tablesize = max_sg_entrys;
780 roundup_ccbsize = roundup(sizeof(struct CommandControlBlock) + (max_sg_entrys - 1) * sizeof(struct SG64ENTRY), 32);
781 acb->uncache_size = roundup_ccbsize * acb->maxFreeCCB;
782 if (acb->adapter_type != ACB_ADAPTER_TYPE_F)
783 acb->uncache_size += acb->ioqueue_size;
784 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->uncache_size, &dma_coherent_handle, GFP_KERNEL);
785 if(!dma_coherent){
786 printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error\n", acb->host->host_no);
787 return -ENOMEM;
788 }
789 acb->dma_coherent = dma_coherent;
790 acb->dma_coherent_handle = dma_coherent_handle;
791 memset(dma_coherent, 0, acb->uncache_size);
792 acb->ccbsize = roundup_ccbsize;
793 ccb_tmp = dma_coherent;
794 curr_phy_upper32 = upper_32_bits(dma_coherent_handle);
795 acb->vir2phy_offset = (unsigned long)dma_coherent - (unsigned long)dma_coherent_handle;
796 for(i = 0; i < acb->maxFreeCCB; i++){
797 cdb_phyaddr = (unsigned long)dma_coherent_handle + offsetof(struct CommandControlBlock, arcmsr_cdb);
798 switch (acb->adapter_type) {
799 case ACB_ADAPTER_TYPE_A:
800 case ACB_ADAPTER_TYPE_B:
801 ccb_tmp->cdb_phyaddr = cdb_phyaddr >> 5;
802 break;
803 case ACB_ADAPTER_TYPE_C:
804 case ACB_ADAPTER_TYPE_D:
805 case ACB_ADAPTER_TYPE_E:
806 case ACB_ADAPTER_TYPE_F:
807 ccb_tmp->cdb_phyaddr = cdb_phyaddr;
808 break;
809 }
810 acb->pccb_pool[i] = ccb_tmp;
811 ccb_tmp->acb = acb;
812 ccb_tmp->smid = (u32)i << 16;
813 INIT_LIST_HEAD(&ccb_tmp->list);
814 next_ccb_phy = dma_coherent_handle + roundup_ccbsize;
815 if (upper_32_bits(next_ccb_phy) != curr_phy_upper32) {
816 acb->maxFreeCCB = i;
817 acb->host->can_queue = i;
818 break;
819 }
820 else
821 list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
822 ccb_tmp = (struct CommandControlBlock *)((unsigned long)ccb_tmp + roundup_ccbsize);
823 dma_coherent_handle = next_ccb_phy;
824 }
825 if (acb->adapter_type != ACB_ADAPTER_TYPE_F) {
826 acb->dma_coherent_handle2 = dma_coherent_handle;
827 acb->dma_coherent2 = ccb_tmp;
828 }
829 switch (acb->adapter_type) {
830 case ACB_ADAPTER_TYPE_B:
831 acb->pmuB = (struct MessageUnit_B *)acb->dma_coherent2;
832 arcmsr_hbaB_assign_regAddr(acb);
833 break;
834 case ACB_ADAPTER_TYPE_D:
835 acb->pmuD = (struct MessageUnit_D *)acb->dma_coherent2;
836 arcmsr_hbaD_assign_regAddr(acb);
837 break;
838 case ACB_ADAPTER_TYPE_E:
839 acb->pCompletionQ = acb->dma_coherent2;
840 acb->completionQ_entry = acb->ioqueue_size / sizeof(struct deliver_completeQ);
841 acb->doneq_index = 0;
842 break;
843 }
844 return 0;
845 }
846
arcmsr_message_isr_bh_fn(struct work_struct * work)847 static void arcmsr_message_isr_bh_fn(struct work_struct *work)
848 {
849 struct AdapterControlBlock *acb = container_of(work,
850 struct AdapterControlBlock, arcmsr_do_message_isr_bh);
851 char *acb_dev_map = (char *)acb->device_map;
852 uint32_t __iomem *signature = NULL;
853 char __iomem *devicemap = NULL;
854 int target, lun;
855 struct scsi_device *psdev;
856 char diff, temp;
857
858 switch (acb->adapter_type) {
859 case ACB_ADAPTER_TYPE_A: {
860 struct MessageUnit_A __iomem *reg = acb->pmuA;
861
862 signature = (uint32_t __iomem *)(®->message_rwbuffer[0]);
863 devicemap = (char __iomem *)(®->message_rwbuffer[21]);
864 break;
865 }
866 case ACB_ADAPTER_TYPE_B: {
867 struct MessageUnit_B *reg = acb->pmuB;
868
869 signature = (uint32_t __iomem *)(®->message_rwbuffer[0]);
870 devicemap = (char __iomem *)(®->message_rwbuffer[21]);
871 break;
872 }
873 case ACB_ADAPTER_TYPE_C: {
874 struct MessageUnit_C __iomem *reg = acb->pmuC;
875
876 signature = (uint32_t __iomem *)(®->msgcode_rwbuffer[0]);
877 devicemap = (char __iomem *)(®->msgcode_rwbuffer[21]);
878 break;
879 }
880 case ACB_ADAPTER_TYPE_D: {
881 struct MessageUnit_D *reg = acb->pmuD;
882
883 signature = (uint32_t __iomem *)(®->msgcode_rwbuffer[0]);
884 devicemap = (char __iomem *)(®->msgcode_rwbuffer[21]);
885 break;
886 }
887 case ACB_ADAPTER_TYPE_E: {
888 struct MessageUnit_E __iomem *reg = acb->pmuE;
889
890 signature = (uint32_t __iomem *)(®->msgcode_rwbuffer[0]);
891 devicemap = (char __iomem *)(®->msgcode_rwbuffer[21]);
892 break;
893 }
894 case ACB_ADAPTER_TYPE_F: {
895 signature = (uint32_t __iomem *)(&acb->msgcode_rwbuffer[0]);
896 devicemap = (char __iomem *)(&acb->msgcode_rwbuffer[21]);
897 break;
898 }
899 }
900 if (readl(signature) != ARCMSR_SIGNATURE_GET_CONFIG)
901 return;
902 for (target = 0; target < ARCMSR_MAX_TARGETID - 1;
903 target++) {
904 temp = readb(devicemap);
905 diff = (*acb_dev_map) ^ temp;
906 if (diff != 0) {
907 *acb_dev_map = temp;
908 for (lun = 0; lun < ARCMSR_MAX_TARGETLUN;
909 lun++) {
910 if ((diff & 0x01) == 1 &&
911 (temp & 0x01) == 1) {
912 scsi_add_device(acb->host,
913 0, target, lun);
914 } else if ((diff & 0x01) == 1
915 && (temp & 0x01) == 0) {
916 psdev = scsi_device_lookup(acb->host,
917 0, target, lun);
918 if (psdev != NULL) {
919 scsi_remove_device(psdev);
920 scsi_device_put(psdev);
921 }
922 }
923 temp >>= 1;
924 diff >>= 1;
925 }
926 }
927 devicemap++;
928 acb_dev_map++;
929 }
930 acb->acb_flags &= ~ACB_F_MSG_GET_CONFIG;
931 }
932
933 static int
arcmsr_request_irq(struct pci_dev * pdev,struct AdapterControlBlock * acb)934 arcmsr_request_irq(struct pci_dev *pdev, struct AdapterControlBlock *acb)
935 {
936 unsigned long flags;
937 int nvec, i;
938
939 if (msix_enable == 0)
940 goto msi_int0;
941 nvec = pci_alloc_irq_vectors(pdev, 1, ARCMST_NUM_MSIX_VECTORS,
942 PCI_IRQ_MSIX);
943 if (nvec > 0) {
944 pr_info("arcmsr%d: msi-x enabled\n", acb->host->host_no);
945 flags = 0;
946 } else {
947 msi_int0:
948 if (msi_enable == 1) {
949 nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
950 if (nvec == 1) {
951 dev_info(&pdev->dev, "msi enabled\n");
952 goto msi_int1;
953 }
954 }
955 nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY);
956 if (nvec < 1)
957 return FAILED;
958 msi_int1:
959 flags = IRQF_SHARED;
960 }
961
962 acb->vector_count = nvec;
963 for (i = 0; i < nvec; i++) {
964 if (request_irq(pci_irq_vector(pdev, i), arcmsr_do_interrupt,
965 flags, "arcmsr", acb)) {
966 pr_warn("arcmsr%d: request_irq =%d failed!\n",
967 acb->host->host_no, pci_irq_vector(pdev, i));
968 goto out_free_irq;
969 }
970 }
971
972 return SUCCESS;
973 out_free_irq:
974 while (--i >= 0)
975 free_irq(pci_irq_vector(pdev, i), acb);
976 pci_free_irq_vectors(pdev);
977 return FAILED;
978 }
979
arcmsr_init_get_devmap_timer(struct AdapterControlBlock * pacb)980 static void arcmsr_init_get_devmap_timer(struct AdapterControlBlock *pacb)
981 {
982 INIT_WORK(&pacb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
983 pacb->fw_flag = FW_NORMAL;
984 timer_setup(&pacb->eternal_timer, arcmsr_request_device_map, 0);
985 pacb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
986 add_timer(&pacb->eternal_timer);
987 }
988
arcmsr_init_set_datetime_timer(struct AdapterControlBlock * pacb)989 static void arcmsr_init_set_datetime_timer(struct AdapterControlBlock *pacb)
990 {
991 timer_setup(&pacb->refresh_timer, arcmsr_set_iop_datetime, 0);
992 pacb->refresh_timer.expires = jiffies + msecs_to_jiffies(60 * 1000);
993 add_timer(&pacb->refresh_timer);
994 }
995
arcmsr_set_dma_mask(struct AdapterControlBlock * acb)996 static int arcmsr_set_dma_mask(struct AdapterControlBlock *acb)
997 {
998 struct pci_dev *pcidev = acb->pdev;
999
1000 if (IS_DMA64) {
1001 if (((acb->adapter_type == ACB_ADAPTER_TYPE_A) && !dma_mask_64) ||
1002 dma_set_mask(&pcidev->dev, DMA_BIT_MASK(64)))
1003 goto dma32;
1004 if (dma_set_coherent_mask(&pcidev->dev, DMA_BIT_MASK(64)) ||
1005 dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(64))) {
1006 printk("arcmsr: set DMA 64 mask failed\n");
1007 return -ENXIO;
1008 }
1009 } else {
1010 dma32:
1011 if (dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32)) ||
1012 dma_set_coherent_mask(&pcidev->dev, DMA_BIT_MASK(32)) ||
1013 dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32))) {
1014 printk("arcmsr: set DMA 32-bit mask failed\n");
1015 return -ENXIO;
1016 }
1017 }
1018 return 0;
1019 }
1020
arcmsr_probe(struct pci_dev * pdev,const struct pci_device_id * id)1021 static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1022 {
1023 struct Scsi_Host *host;
1024 struct AdapterControlBlock *acb;
1025 uint8_t bus,dev_fun;
1026 int error;
1027 error = pci_enable_device(pdev);
1028 if(error){
1029 return -ENODEV;
1030 }
1031 host = scsi_host_alloc(&arcmsr_scsi_host_template, sizeof(struct AdapterControlBlock));
1032 if(!host){
1033 goto pci_disable_dev;
1034 }
1035 init_waitqueue_head(&wait_q);
1036 bus = pdev->bus->number;
1037 dev_fun = pdev->devfn;
1038 acb = (struct AdapterControlBlock *) host->hostdata;
1039 memset(acb,0,sizeof(struct AdapterControlBlock));
1040 acb->pdev = pdev;
1041 acb->adapter_type = id->driver_data;
1042 if (arcmsr_set_dma_mask(acb))
1043 goto scsi_host_release;
1044 acb->host = host;
1045 host->max_lun = ARCMSR_MAX_TARGETLUN;
1046 host->max_id = ARCMSR_MAX_TARGETID; /*16:8*/
1047 host->max_cmd_len = 16; /*this is issue of 64bit LBA ,over 2T byte*/
1048 if ((host_can_queue < ARCMSR_MIN_OUTSTANDING_CMD) || (host_can_queue > ARCMSR_MAX_OUTSTANDING_CMD))
1049 host_can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD;
1050 host->can_queue = host_can_queue; /* max simultaneous cmds */
1051 if ((cmd_per_lun < ARCMSR_MIN_CMD_PERLUN) || (cmd_per_lun > ARCMSR_MAX_CMD_PERLUN))
1052 cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN;
1053 host->cmd_per_lun = cmd_per_lun;
1054 host->this_id = ARCMSR_SCSI_INITIATOR_ID;
1055 host->unique_id = (bus << 8) | dev_fun;
1056 pci_set_drvdata(pdev, host);
1057 pci_set_master(pdev);
1058 error = pci_request_regions(pdev, "arcmsr");
1059 if(error){
1060 goto scsi_host_release;
1061 }
1062 spin_lock_init(&acb->eh_lock);
1063 spin_lock_init(&acb->ccblist_lock);
1064 spin_lock_init(&acb->postq_lock);
1065 spin_lock_init(&acb->doneq_lock);
1066 spin_lock_init(&acb->rqbuffer_lock);
1067 spin_lock_init(&acb->wqbuffer_lock);
1068 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
1069 ACB_F_MESSAGE_RQBUFFER_CLEARED |
1070 ACB_F_MESSAGE_WQBUFFER_READED);
1071 acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
1072 INIT_LIST_HEAD(&acb->ccb_free_list);
1073 error = arcmsr_remap_pciregion(acb);
1074 if(!error){
1075 goto pci_release_regs;
1076 }
1077 error = arcmsr_alloc_io_queue(acb);
1078 if (!error)
1079 goto unmap_pci_region;
1080 error = arcmsr_get_firmware_spec(acb);
1081 if(!error){
1082 goto free_hbb_mu;
1083 }
1084 if (acb->adapter_type != ACB_ADAPTER_TYPE_F)
1085 arcmsr_free_io_queue(acb);
1086 error = arcmsr_alloc_ccb_pool(acb);
1087 if(error){
1088 goto unmap_pci_region;
1089 }
1090 error = scsi_add_host(host, &pdev->dev);
1091 if(error){
1092 goto free_ccb_pool;
1093 }
1094 if (arcmsr_request_irq(pdev, acb) == FAILED)
1095 goto scsi_host_remove;
1096 arcmsr_iop_init(acb);
1097 arcmsr_init_get_devmap_timer(acb);
1098 if (set_date_time)
1099 arcmsr_init_set_datetime_timer(acb);
1100 if(arcmsr_alloc_sysfs_attr(acb))
1101 goto out_free_sysfs;
1102 scsi_scan_host(host);
1103 return 0;
1104 out_free_sysfs:
1105 if (set_date_time)
1106 del_timer_sync(&acb->refresh_timer);
1107 del_timer_sync(&acb->eternal_timer);
1108 flush_work(&acb->arcmsr_do_message_isr_bh);
1109 arcmsr_stop_adapter_bgrb(acb);
1110 arcmsr_flush_adapter_cache(acb);
1111 arcmsr_free_irq(pdev, acb);
1112 scsi_host_remove:
1113 scsi_remove_host(host);
1114 free_ccb_pool:
1115 arcmsr_free_ccb_pool(acb);
1116 goto unmap_pci_region;
1117 free_hbb_mu:
1118 arcmsr_free_io_queue(acb);
1119 unmap_pci_region:
1120 arcmsr_unmap_pciregion(acb);
1121 pci_release_regs:
1122 pci_release_regions(pdev);
1123 scsi_host_release:
1124 scsi_host_put(host);
1125 pci_disable_dev:
1126 pci_disable_device(pdev);
1127 return -ENODEV;
1128 }
1129
arcmsr_free_irq(struct pci_dev * pdev,struct AdapterControlBlock * acb)1130 static void arcmsr_free_irq(struct pci_dev *pdev,
1131 struct AdapterControlBlock *acb)
1132 {
1133 int i;
1134
1135 for (i = 0; i < acb->vector_count; i++)
1136 free_irq(pci_irq_vector(pdev, i), acb);
1137 pci_free_irq_vectors(pdev);
1138 }
1139
arcmsr_suspend(struct device * dev)1140 static int __maybe_unused arcmsr_suspend(struct device *dev)
1141 {
1142 struct pci_dev *pdev = to_pci_dev(dev);
1143 struct Scsi_Host *host = pci_get_drvdata(pdev);
1144 struct AdapterControlBlock *acb =
1145 (struct AdapterControlBlock *)host->hostdata;
1146
1147 arcmsr_disable_outbound_ints(acb);
1148 arcmsr_free_irq(pdev, acb);
1149 del_timer_sync(&acb->eternal_timer);
1150 if (set_date_time)
1151 del_timer_sync(&acb->refresh_timer);
1152 flush_work(&acb->arcmsr_do_message_isr_bh);
1153 arcmsr_stop_adapter_bgrb(acb);
1154 arcmsr_flush_adapter_cache(acb);
1155 return 0;
1156 }
1157
arcmsr_resume(struct device * dev)1158 static int __maybe_unused arcmsr_resume(struct device *dev)
1159 {
1160 struct pci_dev *pdev = to_pci_dev(dev);
1161 struct Scsi_Host *host = pci_get_drvdata(pdev);
1162 struct AdapterControlBlock *acb =
1163 (struct AdapterControlBlock *)host->hostdata;
1164
1165 if (arcmsr_set_dma_mask(acb))
1166 goto controller_unregister;
1167 if (arcmsr_request_irq(pdev, acb) == FAILED)
1168 goto controller_stop;
1169 switch (acb->adapter_type) {
1170 case ACB_ADAPTER_TYPE_B: {
1171 struct MessageUnit_B *reg = acb->pmuB;
1172 uint32_t i;
1173 for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
1174 reg->post_qbuffer[i] = 0;
1175 reg->done_qbuffer[i] = 0;
1176 }
1177 reg->postq_index = 0;
1178 reg->doneq_index = 0;
1179 break;
1180 }
1181 case ACB_ADAPTER_TYPE_E:
1182 writel(0, &acb->pmuE->host_int_status);
1183 writel(ARCMSR_HBEMU_DOORBELL_SYNC, &acb->pmuE->iobound_doorbell);
1184 acb->in_doorbell = 0;
1185 acb->out_doorbell = 0;
1186 acb->doneq_index = 0;
1187 break;
1188 case ACB_ADAPTER_TYPE_F:
1189 writel(0, &acb->pmuF->host_int_status);
1190 writel(ARCMSR_HBFMU_DOORBELL_SYNC, &acb->pmuF->iobound_doorbell);
1191 acb->in_doorbell = 0;
1192 acb->out_doorbell = 0;
1193 acb->doneq_index = 0;
1194 arcmsr_hbaF_assign_regAddr(acb);
1195 break;
1196 }
1197 arcmsr_iop_init(acb);
1198 arcmsr_init_get_devmap_timer(acb);
1199 if (set_date_time)
1200 arcmsr_init_set_datetime_timer(acb);
1201 return 0;
1202 controller_stop:
1203 arcmsr_stop_adapter_bgrb(acb);
1204 arcmsr_flush_adapter_cache(acb);
1205 controller_unregister:
1206 scsi_remove_host(host);
1207 arcmsr_free_ccb_pool(acb);
1208 if (acb->adapter_type == ACB_ADAPTER_TYPE_F)
1209 arcmsr_free_io_queue(acb);
1210 arcmsr_unmap_pciregion(acb);
1211 scsi_host_put(host);
1212 return -ENODEV;
1213 }
1214
arcmsr_hbaA_abort_allcmd(struct AdapterControlBlock * acb)1215 static uint8_t arcmsr_hbaA_abort_allcmd(struct AdapterControlBlock *acb)
1216 {
1217 struct MessageUnit_A __iomem *reg = acb->pmuA;
1218 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, ®->inbound_msgaddr0);
1219 if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
1220 printk(KERN_NOTICE
1221 "arcmsr%d: wait 'abort all outstanding command' timeout\n"
1222 , acb->host->host_no);
1223 return false;
1224 }
1225 return true;
1226 }
1227
arcmsr_hbaB_abort_allcmd(struct AdapterControlBlock * acb)1228 static uint8_t arcmsr_hbaB_abort_allcmd(struct AdapterControlBlock *acb)
1229 {
1230 struct MessageUnit_B *reg = acb->pmuB;
1231
1232 writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell);
1233 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
1234 printk(KERN_NOTICE
1235 "arcmsr%d: wait 'abort all outstanding command' timeout\n"
1236 , acb->host->host_no);
1237 return false;
1238 }
1239 return true;
1240 }
arcmsr_hbaC_abort_allcmd(struct AdapterControlBlock * pACB)1241 static uint8_t arcmsr_hbaC_abort_allcmd(struct AdapterControlBlock *pACB)
1242 {
1243 struct MessageUnit_C __iomem *reg = pACB->pmuC;
1244 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, ®->inbound_msgaddr0);
1245 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell);
1246 if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
1247 printk(KERN_NOTICE
1248 "arcmsr%d: wait 'abort all outstanding command' timeout\n"
1249 , pACB->host->host_no);
1250 return false;
1251 }
1252 return true;
1253 }
1254
arcmsr_hbaD_abort_allcmd(struct AdapterControlBlock * pACB)1255 static uint8_t arcmsr_hbaD_abort_allcmd(struct AdapterControlBlock *pACB)
1256 {
1257 struct MessageUnit_D *reg = pACB->pmuD;
1258
1259 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, reg->inbound_msgaddr0);
1260 if (!arcmsr_hbaD_wait_msgint_ready(pACB)) {
1261 pr_notice("arcmsr%d: wait 'abort all outstanding "
1262 "command' timeout\n", pACB->host->host_no);
1263 return false;
1264 }
1265 return true;
1266 }
1267
arcmsr_hbaE_abort_allcmd(struct AdapterControlBlock * pACB)1268 static uint8_t arcmsr_hbaE_abort_allcmd(struct AdapterControlBlock *pACB)
1269 {
1270 struct MessageUnit_E __iomem *reg = pACB->pmuE;
1271
1272 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, ®->inbound_msgaddr0);
1273 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
1274 writel(pACB->out_doorbell, ®->iobound_doorbell);
1275 if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
1276 pr_notice("arcmsr%d: wait 'abort all outstanding "
1277 "command' timeout\n", pACB->host->host_no);
1278 return false;
1279 }
1280 return true;
1281 }
1282
arcmsr_abort_allcmd(struct AdapterControlBlock * acb)1283 static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
1284 {
1285 uint8_t rtnval = 0;
1286 switch (acb->adapter_type) {
1287 case ACB_ADAPTER_TYPE_A:
1288 rtnval = arcmsr_hbaA_abort_allcmd(acb);
1289 break;
1290 case ACB_ADAPTER_TYPE_B:
1291 rtnval = arcmsr_hbaB_abort_allcmd(acb);
1292 break;
1293 case ACB_ADAPTER_TYPE_C:
1294 rtnval = arcmsr_hbaC_abort_allcmd(acb);
1295 break;
1296 case ACB_ADAPTER_TYPE_D:
1297 rtnval = arcmsr_hbaD_abort_allcmd(acb);
1298 break;
1299 case ACB_ADAPTER_TYPE_E:
1300 case ACB_ADAPTER_TYPE_F:
1301 rtnval = arcmsr_hbaE_abort_allcmd(acb);
1302 break;
1303 }
1304 return rtnval;
1305 }
1306
arcmsr_pci_unmap_dma(struct CommandControlBlock * ccb)1307 static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb)
1308 {
1309 struct scsi_cmnd *pcmd = ccb->pcmd;
1310
1311 scsi_dma_unmap(pcmd);
1312 }
1313
arcmsr_ccb_complete(struct CommandControlBlock * ccb)1314 static void arcmsr_ccb_complete(struct CommandControlBlock *ccb)
1315 {
1316 struct AdapterControlBlock *acb = ccb->acb;
1317 struct scsi_cmnd *pcmd = ccb->pcmd;
1318 unsigned long flags;
1319 atomic_dec(&acb->ccboutstandingcount);
1320 arcmsr_pci_unmap_dma(ccb);
1321 ccb->startdone = ARCMSR_CCB_DONE;
1322 spin_lock_irqsave(&acb->ccblist_lock, flags);
1323 list_add_tail(&ccb->list, &acb->ccb_free_list);
1324 spin_unlock_irqrestore(&acb->ccblist_lock, flags);
1325 pcmd->scsi_done(pcmd);
1326 }
1327
arcmsr_report_sense_info(struct CommandControlBlock * ccb)1328 static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
1329 {
1330 struct scsi_cmnd *pcmd = ccb->pcmd;
1331
1332 pcmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
1333 if (pcmd->sense_buffer) {
1334 struct SENSE_DATA *sensebuffer;
1335
1336 memcpy_and_pad(pcmd->sense_buffer,
1337 SCSI_SENSE_BUFFERSIZE,
1338 ccb->arcmsr_cdb.SenseData,
1339 sizeof(ccb->arcmsr_cdb.SenseData),
1340 0);
1341
1342 sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer;
1343 sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
1344 sensebuffer->Valid = 1;
1345 }
1346 }
1347
arcmsr_disable_outbound_ints(struct AdapterControlBlock * acb)1348 static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
1349 {
1350 u32 orig_mask = 0;
1351 switch (acb->adapter_type) {
1352 case ACB_ADAPTER_TYPE_A : {
1353 struct MessageUnit_A __iomem *reg = acb->pmuA;
1354 orig_mask = readl(®->outbound_intmask);
1355 writel(orig_mask|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, \
1356 ®->outbound_intmask);
1357 }
1358 break;
1359 case ACB_ADAPTER_TYPE_B : {
1360 struct MessageUnit_B *reg = acb->pmuB;
1361 orig_mask = readl(reg->iop2drv_doorbell_mask);
1362 writel(0, reg->iop2drv_doorbell_mask);
1363 }
1364 break;
1365 case ACB_ADAPTER_TYPE_C:{
1366 struct MessageUnit_C __iomem *reg = acb->pmuC;
1367 /* disable all outbound interrupt */
1368 orig_mask = readl(®->host_int_mask); /* disable outbound message0 int */
1369 writel(orig_mask|ARCMSR_HBCMU_ALL_INTMASKENABLE, ®->host_int_mask);
1370 }
1371 break;
1372 case ACB_ADAPTER_TYPE_D: {
1373 struct MessageUnit_D *reg = acb->pmuD;
1374 /* disable all outbound interrupt */
1375 writel(ARCMSR_ARC1214_ALL_INT_DISABLE, reg->pcief0_int_enable);
1376 }
1377 break;
1378 case ACB_ADAPTER_TYPE_E:
1379 case ACB_ADAPTER_TYPE_F: {
1380 struct MessageUnit_E __iomem *reg = acb->pmuE;
1381 orig_mask = readl(®->host_int_mask);
1382 writel(orig_mask | ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR | ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR, ®->host_int_mask);
1383 readl(®->host_int_mask); /* Dummy readl to force pci flush */
1384 }
1385 break;
1386 }
1387 return orig_mask;
1388 }
1389
arcmsr_report_ccb_state(struct AdapterControlBlock * acb,struct CommandControlBlock * ccb,bool error)1390 static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb,
1391 struct CommandControlBlock *ccb, bool error)
1392 {
1393 uint8_t id, lun;
1394 id = ccb->pcmd->device->id;
1395 lun = ccb->pcmd->device->lun;
1396 if (!error) {
1397 if (acb->devstate[id][lun] == ARECA_RAID_GONE)
1398 acb->devstate[id][lun] = ARECA_RAID_GOOD;
1399 ccb->pcmd->result = DID_OK << 16;
1400 arcmsr_ccb_complete(ccb);
1401 }else{
1402 switch (ccb->arcmsr_cdb.DeviceStatus) {
1403 case ARCMSR_DEV_SELECT_TIMEOUT: {
1404 acb->devstate[id][lun] = ARECA_RAID_GONE;
1405 ccb->pcmd->result = DID_NO_CONNECT << 16;
1406 arcmsr_ccb_complete(ccb);
1407 }
1408 break;
1409
1410 case ARCMSR_DEV_ABORTED:
1411
1412 case ARCMSR_DEV_INIT_FAIL: {
1413 acb->devstate[id][lun] = ARECA_RAID_GONE;
1414 ccb->pcmd->result = DID_BAD_TARGET << 16;
1415 arcmsr_ccb_complete(ccb);
1416 }
1417 break;
1418
1419 case ARCMSR_DEV_CHECK_CONDITION: {
1420 acb->devstate[id][lun] = ARECA_RAID_GOOD;
1421 arcmsr_report_sense_info(ccb);
1422 arcmsr_ccb_complete(ccb);
1423 }
1424 break;
1425
1426 default:
1427 printk(KERN_NOTICE
1428 "arcmsr%d: scsi id = %d lun = %d isr get command error done, \
1429 but got unknown DeviceStatus = 0x%x \n"
1430 , acb->host->host_no
1431 , id
1432 , lun
1433 , ccb->arcmsr_cdb.DeviceStatus);
1434 acb->devstate[id][lun] = ARECA_RAID_GONE;
1435 ccb->pcmd->result = DID_NO_CONNECT << 16;
1436 arcmsr_ccb_complete(ccb);
1437 break;
1438 }
1439 }
1440 }
1441
arcmsr_drain_donequeue(struct AdapterControlBlock * acb,struct CommandControlBlock * pCCB,bool error)1442 static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct CommandControlBlock *pCCB, bool error)
1443 {
1444 if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
1445 if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
1446 struct scsi_cmnd *abortcmd = pCCB->pcmd;
1447 if (abortcmd) {
1448 abortcmd->result |= DID_ABORT << 16;
1449 arcmsr_ccb_complete(pCCB);
1450 printk(KERN_NOTICE "arcmsr%d: pCCB ='0x%p' isr got aborted command \n",
1451 acb->host->host_no, pCCB);
1452 }
1453 return;
1454 }
1455 printk(KERN_NOTICE "arcmsr%d: isr get an illegal ccb command \
1456 done acb = '0x%p'"
1457 "ccb = '0x%p' ccbacb = '0x%p' startdone = 0x%x"
1458 " ccboutstandingcount = %d \n"
1459 , acb->host->host_no
1460 , acb
1461 , pCCB
1462 , pCCB->acb
1463 , pCCB->startdone
1464 , atomic_read(&acb->ccboutstandingcount));
1465 return;
1466 }
1467 arcmsr_report_ccb_state(acb, pCCB, error);
1468 }
1469
arcmsr_done4abort_postqueue(struct AdapterControlBlock * acb)1470 static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
1471 {
1472 int i = 0;
1473 uint32_t flag_ccb;
1474 struct ARCMSR_CDB *pARCMSR_CDB;
1475 bool error;
1476 struct CommandControlBlock *pCCB;
1477 unsigned long ccb_cdb_phy;
1478
1479 switch (acb->adapter_type) {
1480
1481 case ACB_ADAPTER_TYPE_A: {
1482 struct MessageUnit_A __iomem *reg = acb->pmuA;
1483 uint32_t outbound_intstatus;
1484 outbound_intstatus = readl(®->outbound_intstatus) &
1485 acb->outbound_int_enable;
1486 /*clear and abort all outbound posted Q*/
1487 writel(outbound_intstatus, ®->outbound_intstatus);/*clear interrupt*/
1488 while(((flag_ccb = readl(®->outbound_queueport)) != 0xFFFFFFFF)
1489 && (i++ < acb->maxOutstanding)) {
1490 ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff;
1491 if (acb->cdb_phyadd_hipart)
1492 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
1493 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
1494 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
1495 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
1496 arcmsr_drain_donequeue(acb, pCCB, error);
1497 }
1498 }
1499 break;
1500
1501 case ACB_ADAPTER_TYPE_B: {
1502 struct MessageUnit_B *reg = acb->pmuB;
1503 /*clear all outbound posted Q*/
1504 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); /* clear doorbell interrupt */
1505 for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
1506 flag_ccb = reg->done_qbuffer[i];
1507 if (flag_ccb != 0) {
1508 reg->done_qbuffer[i] = 0;
1509 ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff;
1510 if (acb->cdb_phyadd_hipart)
1511 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
1512 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
1513 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
1514 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
1515 arcmsr_drain_donequeue(acb, pCCB, error);
1516 }
1517 reg->post_qbuffer[i] = 0;
1518 }
1519 reg->doneq_index = 0;
1520 reg->postq_index = 0;
1521 }
1522 break;
1523 case ACB_ADAPTER_TYPE_C: {
1524 struct MessageUnit_C __iomem *reg = acb->pmuC;
1525 while ((readl(®->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < acb->maxOutstanding)) {
1526 /*need to do*/
1527 flag_ccb = readl(®->outbound_queueport_low);
1528 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
1529 if (acb->cdb_phyadd_hipart)
1530 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
1531 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
1532 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
1533 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
1534 arcmsr_drain_donequeue(acb, pCCB, error);
1535 }
1536 }
1537 break;
1538 case ACB_ADAPTER_TYPE_D: {
1539 struct MessageUnit_D *pmu = acb->pmuD;
1540 uint32_t outbound_write_pointer;
1541 uint32_t doneq_index, index_stripped, addressLow, residual, toggle;
1542 unsigned long flags;
1543
1544 residual = atomic_read(&acb->ccboutstandingcount);
1545 for (i = 0; i < residual; i++) {
1546 spin_lock_irqsave(&acb->doneq_lock, flags);
1547 outbound_write_pointer =
1548 pmu->done_qbuffer[0].addressLow + 1;
1549 doneq_index = pmu->doneq_index;
1550 if ((doneq_index & 0xFFF) !=
1551 (outbound_write_pointer & 0xFFF)) {
1552 toggle = doneq_index & 0x4000;
1553 index_stripped = (doneq_index & 0xFFF) + 1;
1554 index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE;
1555 pmu->doneq_index = index_stripped ? (index_stripped | toggle) :
1556 ((toggle ^ 0x4000) + 1);
1557 doneq_index = pmu->doneq_index;
1558 spin_unlock_irqrestore(&acb->doneq_lock, flags);
1559 addressLow = pmu->done_qbuffer[doneq_index &
1560 0xFFF].addressLow;
1561 ccb_cdb_phy = (addressLow & 0xFFFFFFF0);
1562 if (acb->cdb_phyadd_hipart)
1563 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
1564 pARCMSR_CDB = (struct ARCMSR_CDB *)
1565 (acb->vir2phy_offset + ccb_cdb_phy);
1566 pCCB = container_of(pARCMSR_CDB,
1567 struct CommandControlBlock, arcmsr_cdb);
1568 error = (addressLow &
1569 ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ?
1570 true : false;
1571 arcmsr_drain_donequeue(acb, pCCB, error);
1572 writel(doneq_index,
1573 pmu->outboundlist_read_pointer);
1574 } else {
1575 spin_unlock_irqrestore(&acb->doneq_lock, flags);
1576 mdelay(10);
1577 }
1578 }
1579 pmu->postq_index = 0;
1580 pmu->doneq_index = 0x40FF;
1581 }
1582 break;
1583 case ACB_ADAPTER_TYPE_E:
1584 arcmsr_hbaE_postqueue_isr(acb);
1585 break;
1586 case ACB_ADAPTER_TYPE_F:
1587 arcmsr_hbaF_postqueue_isr(acb);
1588 break;
1589 }
1590 }
1591
arcmsr_remove_scsi_devices(struct AdapterControlBlock * acb)1592 static void arcmsr_remove_scsi_devices(struct AdapterControlBlock *acb)
1593 {
1594 char *acb_dev_map = (char *)acb->device_map;
1595 int target, lun, i;
1596 struct scsi_device *psdev;
1597 struct CommandControlBlock *ccb;
1598 char temp;
1599
1600 for (i = 0; i < acb->maxFreeCCB; i++) {
1601 ccb = acb->pccb_pool[i];
1602 if (ccb->startdone == ARCMSR_CCB_START) {
1603 ccb->pcmd->result = DID_NO_CONNECT << 16;
1604 arcmsr_pci_unmap_dma(ccb);
1605 ccb->pcmd->scsi_done(ccb->pcmd);
1606 }
1607 }
1608 for (target = 0; target < ARCMSR_MAX_TARGETID; target++) {
1609 temp = *acb_dev_map;
1610 if (temp) {
1611 for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
1612 if (temp & 1) {
1613 psdev = scsi_device_lookup(acb->host,
1614 0, target, lun);
1615 if (psdev != NULL) {
1616 scsi_remove_device(psdev);
1617 scsi_device_put(psdev);
1618 }
1619 }
1620 temp >>= 1;
1621 }
1622 *acb_dev_map = 0;
1623 }
1624 acb_dev_map++;
1625 }
1626 }
1627
arcmsr_free_pcidev(struct AdapterControlBlock * acb)1628 static void arcmsr_free_pcidev(struct AdapterControlBlock *acb)
1629 {
1630 struct pci_dev *pdev;
1631 struct Scsi_Host *host;
1632
1633 host = acb->host;
1634 arcmsr_free_sysfs_attr(acb);
1635 scsi_remove_host(host);
1636 flush_work(&acb->arcmsr_do_message_isr_bh);
1637 del_timer_sync(&acb->eternal_timer);
1638 if (set_date_time)
1639 del_timer_sync(&acb->refresh_timer);
1640 pdev = acb->pdev;
1641 arcmsr_free_irq(pdev, acb);
1642 arcmsr_free_ccb_pool(acb);
1643 if (acb->adapter_type == ACB_ADAPTER_TYPE_F)
1644 arcmsr_free_io_queue(acb);
1645 arcmsr_unmap_pciregion(acb);
1646 pci_release_regions(pdev);
1647 scsi_host_put(host);
1648 pci_disable_device(pdev);
1649 }
1650
arcmsr_remove(struct pci_dev * pdev)1651 static void arcmsr_remove(struct pci_dev *pdev)
1652 {
1653 struct Scsi_Host *host = pci_get_drvdata(pdev);
1654 struct AdapterControlBlock *acb =
1655 (struct AdapterControlBlock *) host->hostdata;
1656 int poll_count = 0;
1657 uint16_t dev_id;
1658
1659 pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id);
1660 if (dev_id == 0xffff) {
1661 acb->acb_flags &= ~ACB_F_IOP_INITED;
1662 acb->acb_flags |= ACB_F_ADAPTER_REMOVED;
1663 arcmsr_remove_scsi_devices(acb);
1664 arcmsr_free_pcidev(acb);
1665 return;
1666 }
1667 arcmsr_free_sysfs_attr(acb);
1668 scsi_remove_host(host);
1669 flush_work(&acb->arcmsr_do_message_isr_bh);
1670 del_timer_sync(&acb->eternal_timer);
1671 if (set_date_time)
1672 del_timer_sync(&acb->refresh_timer);
1673 arcmsr_disable_outbound_ints(acb);
1674 arcmsr_stop_adapter_bgrb(acb);
1675 arcmsr_flush_adapter_cache(acb);
1676 acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
1677 acb->acb_flags &= ~ACB_F_IOP_INITED;
1678
1679 for (poll_count = 0; poll_count < acb->maxOutstanding; poll_count++){
1680 if (!atomic_read(&acb->ccboutstandingcount))
1681 break;
1682 arcmsr_interrupt(acb);/* FIXME: need spinlock */
1683 msleep(25);
1684 }
1685
1686 if (atomic_read(&acb->ccboutstandingcount)) {
1687 int i;
1688
1689 arcmsr_abort_allcmd(acb);
1690 arcmsr_done4abort_postqueue(acb);
1691 for (i = 0; i < acb->maxFreeCCB; i++) {
1692 struct CommandControlBlock *ccb = acb->pccb_pool[i];
1693 if (ccb->startdone == ARCMSR_CCB_START) {
1694 ccb->startdone = ARCMSR_CCB_ABORTED;
1695 ccb->pcmd->result = DID_ABORT << 16;
1696 arcmsr_ccb_complete(ccb);
1697 }
1698 }
1699 }
1700 arcmsr_free_irq(pdev, acb);
1701 arcmsr_free_ccb_pool(acb);
1702 if (acb->adapter_type == ACB_ADAPTER_TYPE_F)
1703 arcmsr_free_io_queue(acb);
1704 arcmsr_unmap_pciregion(acb);
1705 pci_release_regions(pdev);
1706 scsi_host_put(host);
1707 pci_disable_device(pdev);
1708 }
1709
arcmsr_shutdown(struct pci_dev * pdev)1710 static void arcmsr_shutdown(struct pci_dev *pdev)
1711 {
1712 struct Scsi_Host *host = pci_get_drvdata(pdev);
1713 struct AdapterControlBlock *acb =
1714 (struct AdapterControlBlock *)host->hostdata;
1715 if (acb->acb_flags & ACB_F_ADAPTER_REMOVED)
1716 return;
1717 del_timer_sync(&acb->eternal_timer);
1718 if (set_date_time)
1719 del_timer_sync(&acb->refresh_timer);
1720 arcmsr_disable_outbound_ints(acb);
1721 arcmsr_free_irq(pdev, acb);
1722 flush_work(&acb->arcmsr_do_message_isr_bh);
1723 arcmsr_stop_adapter_bgrb(acb);
1724 arcmsr_flush_adapter_cache(acb);
1725 }
1726
arcmsr_module_init(void)1727 static int arcmsr_module_init(void)
1728 {
1729 int error = 0;
1730 error = pci_register_driver(&arcmsr_pci_driver);
1731 return error;
1732 }
1733
arcmsr_module_exit(void)1734 static void arcmsr_module_exit(void)
1735 {
1736 pci_unregister_driver(&arcmsr_pci_driver);
1737 }
1738 module_init(arcmsr_module_init);
1739 module_exit(arcmsr_module_exit);
1740
arcmsr_enable_outbound_ints(struct AdapterControlBlock * acb,u32 intmask_org)1741 static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
1742 u32 intmask_org)
1743 {
1744 u32 mask;
1745 switch (acb->adapter_type) {
1746
1747 case ACB_ADAPTER_TYPE_A: {
1748 struct MessageUnit_A __iomem *reg = acb->pmuA;
1749 mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE |
1750 ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE|
1751 ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE);
1752 writel(mask, ®->outbound_intmask);
1753 acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
1754 }
1755 break;
1756
1757 case ACB_ADAPTER_TYPE_B: {
1758 struct MessageUnit_B *reg = acb->pmuB;
1759 mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK |
1760 ARCMSR_IOP2DRV_DATA_READ_OK |
1761 ARCMSR_IOP2DRV_CDB_DONE |
1762 ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
1763 writel(mask, reg->iop2drv_doorbell_mask);
1764 acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
1765 }
1766 break;
1767 case ACB_ADAPTER_TYPE_C: {
1768 struct MessageUnit_C __iomem *reg = acb->pmuC;
1769 mask = ~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK|ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK);
1770 writel(intmask_org & mask, ®->host_int_mask);
1771 acb->outbound_int_enable = ~(intmask_org & mask) & 0x0000000f;
1772 }
1773 break;
1774 case ACB_ADAPTER_TYPE_D: {
1775 struct MessageUnit_D *reg = acb->pmuD;
1776
1777 mask = ARCMSR_ARC1214_ALL_INT_ENABLE;
1778 writel(intmask_org | mask, reg->pcief0_int_enable);
1779 break;
1780 }
1781 case ACB_ADAPTER_TYPE_E:
1782 case ACB_ADAPTER_TYPE_F: {
1783 struct MessageUnit_E __iomem *reg = acb->pmuE;
1784
1785 mask = ~(ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR | ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR);
1786 writel(intmask_org & mask, ®->host_int_mask);
1787 break;
1788 }
1789 }
1790 }
1791
arcmsr_build_ccb(struct AdapterControlBlock * acb,struct CommandControlBlock * ccb,struct scsi_cmnd * pcmd)1792 static int arcmsr_build_ccb(struct AdapterControlBlock *acb,
1793 struct CommandControlBlock *ccb, struct scsi_cmnd *pcmd)
1794 {
1795 struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
1796 int8_t *psge = (int8_t *)&arcmsr_cdb->u;
1797 __le32 address_lo, address_hi;
1798 int arccdbsize = 0x30;
1799 __le32 length = 0;
1800 int i;
1801 struct scatterlist *sg;
1802 int nseg;
1803 ccb->pcmd = pcmd;
1804 memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB));
1805 arcmsr_cdb->TargetID = pcmd->device->id;
1806 arcmsr_cdb->LUN = pcmd->device->lun;
1807 arcmsr_cdb->Function = 1;
1808 arcmsr_cdb->msgContext = 0;
1809 memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len);
1810
1811 nseg = scsi_dma_map(pcmd);
1812 if (unlikely(nseg > acb->host->sg_tablesize || nseg < 0))
1813 return FAILED;
1814 scsi_for_each_sg(pcmd, sg, nseg, i) {
1815 /* Get the physical address of the current data pointer */
1816 length = cpu_to_le32(sg_dma_len(sg));
1817 address_lo = cpu_to_le32(dma_addr_lo32(sg_dma_address(sg)));
1818 address_hi = cpu_to_le32(dma_addr_hi32(sg_dma_address(sg)));
1819 if (address_hi == 0) {
1820 struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge;
1821
1822 pdma_sg->address = address_lo;
1823 pdma_sg->length = length;
1824 psge += sizeof (struct SG32ENTRY);
1825 arccdbsize += sizeof (struct SG32ENTRY);
1826 } else {
1827 struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge;
1828
1829 pdma_sg->addresshigh = address_hi;
1830 pdma_sg->address = address_lo;
1831 pdma_sg->length = length|cpu_to_le32(IS_SG64_ADDR);
1832 psge += sizeof (struct SG64ENTRY);
1833 arccdbsize += sizeof (struct SG64ENTRY);
1834 }
1835 }
1836 arcmsr_cdb->sgcount = (uint8_t)nseg;
1837 arcmsr_cdb->DataLength = scsi_bufflen(pcmd);
1838 arcmsr_cdb->msgPages = arccdbsize/0x100 + (arccdbsize % 0x100 ? 1 : 0);
1839 if ( arccdbsize > 256)
1840 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
1841 if (pcmd->sc_data_direction == DMA_TO_DEVICE)
1842 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
1843 ccb->arc_cdb_size = arccdbsize;
1844 return SUCCESS;
1845 }
1846
arcmsr_post_ccb(struct AdapterControlBlock * acb,struct CommandControlBlock * ccb)1847 static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb)
1848 {
1849 uint32_t cdb_phyaddr = ccb->cdb_phyaddr;
1850 struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
1851 atomic_inc(&acb->ccboutstandingcount);
1852 ccb->startdone = ARCMSR_CCB_START;
1853 switch (acb->adapter_type) {
1854 case ACB_ADAPTER_TYPE_A: {
1855 struct MessageUnit_A __iomem *reg = acb->pmuA;
1856
1857 if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE)
1858 writel(cdb_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,
1859 ®->inbound_queueport);
1860 else
1861 writel(cdb_phyaddr, ®->inbound_queueport);
1862 break;
1863 }
1864
1865 case ACB_ADAPTER_TYPE_B: {
1866 struct MessageUnit_B *reg = acb->pmuB;
1867 uint32_t ending_index, index = reg->postq_index;
1868
1869 ending_index = ((index + 1) % ARCMSR_MAX_HBB_POSTQUEUE);
1870 reg->post_qbuffer[ending_index] = 0;
1871 if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
1872 reg->post_qbuffer[index] =
1873 cdb_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE;
1874 } else {
1875 reg->post_qbuffer[index] = cdb_phyaddr;
1876 }
1877 index++;
1878 index %= ARCMSR_MAX_HBB_POSTQUEUE;/*if last index number set it to 0 */
1879 reg->postq_index = index;
1880 writel(ARCMSR_DRV2IOP_CDB_POSTED, reg->drv2iop_doorbell);
1881 }
1882 break;
1883 case ACB_ADAPTER_TYPE_C: {
1884 struct MessageUnit_C __iomem *phbcmu = acb->pmuC;
1885 uint32_t ccb_post_stamp, arc_cdb_size;
1886
1887 arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size;
1888 ccb_post_stamp = (cdb_phyaddr | ((arc_cdb_size - 1) >> 6) | 1);
1889 writel(upper_32_bits(ccb->cdb_phyaddr), &phbcmu->inbound_queueport_high);
1890 writel(ccb_post_stamp, &phbcmu->inbound_queueport_low);
1891 }
1892 break;
1893 case ACB_ADAPTER_TYPE_D: {
1894 struct MessageUnit_D *pmu = acb->pmuD;
1895 u16 index_stripped;
1896 u16 postq_index, toggle;
1897 unsigned long flags;
1898 struct InBound_SRB *pinbound_srb;
1899
1900 spin_lock_irqsave(&acb->postq_lock, flags);
1901 postq_index = pmu->postq_index;
1902 pinbound_srb = (struct InBound_SRB *)&(pmu->post_qbuffer[postq_index & 0xFF]);
1903 pinbound_srb->addressHigh = upper_32_bits(ccb->cdb_phyaddr);
1904 pinbound_srb->addressLow = cdb_phyaddr;
1905 pinbound_srb->length = ccb->arc_cdb_size >> 2;
1906 arcmsr_cdb->msgContext = dma_addr_lo32(cdb_phyaddr);
1907 toggle = postq_index & 0x4000;
1908 index_stripped = postq_index + 1;
1909 index_stripped &= (ARCMSR_MAX_ARC1214_POSTQUEUE - 1);
1910 pmu->postq_index = index_stripped ? (index_stripped | toggle) :
1911 (toggle ^ 0x4000);
1912 writel(postq_index, pmu->inboundlist_write_pointer);
1913 spin_unlock_irqrestore(&acb->postq_lock, flags);
1914 break;
1915 }
1916 case ACB_ADAPTER_TYPE_E: {
1917 struct MessageUnit_E __iomem *pmu = acb->pmuE;
1918 u32 ccb_post_stamp, arc_cdb_size;
1919
1920 arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size;
1921 ccb_post_stamp = (ccb->smid | ((arc_cdb_size - 1) >> 6));
1922 writel(0, &pmu->inbound_queueport_high);
1923 writel(ccb_post_stamp, &pmu->inbound_queueport_low);
1924 break;
1925 }
1926 case ACB_ADAPTER_TYPE_F: {
1927 struct MessageUnit_F __iomem *pmu = acb->pmuF;
1928 u32 ccb_post_stamp, arc_cdb_size;
1929
1930 if (ccb->arc_cdb_size <= 0x300)
1931 arc_cdb_size = (ccb->arc_cdb_size - 1) >> 6 | 1;
1932 else {
1933 arc_cdb_size = ((ccb->arc_cdb_size + 0xff) >> 8) + 2;
1934 if (arc_cdb_size > 0xF)
1935 arc_cdb_size = 0xF;
1936 arc_cdb_size = (arc_cdb_size << 1) | 1;
1937 }
1938 ccb_post_stamp = (ccb->smid | arc_cdb_size);
1939 writel(0, &pmu->inbound_queueport_high);
1940 writel(ccb_post_stamp, &pmu->inbound_queueport_low);
1941 break;
1942 }
1943 }
1944 }
1945
arcmsr_hbaA_stop_bgrb(struct AdapterControlBlock * acb)1946 static void arcmsr_hbaA_stop_bgrb(struct AdapterControlBlock *acb)
1947 {
1948 struct MessageUnit_A __iomem *reg = acb->pmuA;
1949 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
1950 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, ®->inbound_msgaddr0);
1951 if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
1952 printk(KERN_NOTICE
1953 "arcmsr%d: wait 'stop adapter background rebuild' timeout\n"
1954 , acb->host->host_no);
1955 }
1956 }
1957
arcmsr_hbaB_stop_bgrb(struct AdapterControlBlock * acb)1958 static void arcmsr_hbaB_stop_bgrb(struct AdapterControlBlock *acb)
1959 {
1960 struct MessageUnit_B *reg = acb->pmuB;
1961 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
1962 writel(ARCMSR_MESSAGE_STOP_BGRB, reg->drv2iop_doorbell);
1963
1964 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
1965 printk(KERN_NOTICE
1966 "arcmsr%d: wait 'stop adapter background rebuild' timeout\n"
1967 , acb->host->host_no);
1968 }
1969 }
1970
arcmsr_hbaC_stop_bgrb(struct AdapterControlBlock * pACB)1971 static void arcmsr_hbaC_stop_bgrb(struct AdapterControlBlock *pACB)
1972 {
1973 struct MessageUnit_C __iomem *reg = pACB->pmuC;
1974 pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
1975 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, ®->inbound_msgaddr0);
1976 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell);
1977 if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
1978 printk(KERN_NOTICE
1979 "arcmsr%d: wait 'stop adapter background rebuild' timeout\n"
1980 , pACB->host->host_no);
1981 }
1982 return;
1983 }
1984
arcmsr_hbaD_stop_bgrb(struct AdapterControlBlock * pACB)1985 static void arcmsr_hbaD_stop_bgrb(struct AdapterControlBlock *pACB)
1986 {
1987 struct MessageUnit_D *reg = pACB->pmuD;
1988
1989 pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
1990 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, reg->inbound_msgaddr0);
1991 if (!arcmsr_hbaD_wait_msgint_ready(pACB))
1992 pr_notice("arcmsr%d: wait 'stop adapter background rebuild' "
1993 "timeout\n", pACB->host->host_no);
1994 }
1995
arcmsr_hbaE_stop_bgrb(struct AdapterControlBlock * pACB)1996 static void arcmsr_hbaE_stop_bgrb(struct AdapterControlBlock *pACB)
1997 {
1998 struct MessageUnit_E __iomem *reg = pACB->pmuE;
1999
2000 pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
2001 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, ®->inbound_msgaddr0);
2002 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
2003 writel(pACB->out_doorbell, ®->iobound_doorbell);
2004 if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
2005 pr_notice("arcmsr%d: wait 'stop adapter background rebuild' "
2006 "timeout\n", pACB->host->host_no);
2007 }
2008 }
2009
arcmsr_stop_adapter_bgrb(struct AdapterControlBlock * acb)2010 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
2011 {
2012 switch (acb->adapter_type) {
2013 case ACB_ADAPTER_TYPE_A:
2014 arcmsr_hbaA_stop_bgrb(acb);
2015 break;
2016 case ACB_ADAPTER_TYPE_B:
2017 arcmsr_hbaB_stop_bgrb(acb);
2018 break;
2019 case ACB_ADAPTER_TYPE_C:
2020 arcmsr_hbaC_stop_bgrb(acb);
2021 break;
2022 case ACB_ADAPTER_TYPE_D:
2023 arcmsr_hbaD_stop_bgrb(acb);
2024 break;
2025 case ACB_ADAPTER_TYPE_E:
2026 case ACB_ADAPTER_TYPE_F:
2027 arcmsr_hbaE_stop_bgrb(acb);
2028 break;
2029 }
2030 }
2031
arcmsr_free_ccb_pool(struct AdapterControlBlock * acb)2032 static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb)
2033 {
2034 dma_free_coherent(&acb->pdev->dev, acb->uncache_size, acb->dma_coherent, acb->dma_coherent_handle);
2035 }
2036
arcmsr_iop_message_read(struct AdapterControlBlock * acb)2037 static void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
2038 {
2039 switch (acb->adapter_type) {
2040 case ACB_ADAPTER_TYPE_A: {
2041 struct MessageUnit_A __iomem *reg = acb->pmuA;
2042 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, ®->inbound_doorbell);
2043 }
2044 break;
2045 case ACB_ADAPTER_TYPE_B: {
2046 struct MessageUnit_B *reg = acb->pmuB;
2047 writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
2048 }
2049 break;
2050 case ACB_ADAPTER_TYPE_C: {
2051 struct MessageUnit_C __iomem *reg = acb->pmuC;
2052
2053 writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, ®->inbound_doorbell);
2054 }
2055 break;
2056 case ACB_ADAPTER_TYPE_D: {
2057 struct MessageUnit_D *reg = acb->pmuD;
2058 writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ,
2059 reg->inbound_doorbell);
2060 }
2061 break;
2062 case ACB_ADAPTER_TYPE_E:
2063 case ACB_ADAPTER_TYPE_F: {
2064 struct MessageUnit_E __iomem *reg = acb->pmuE;
2065 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK;
2066 writel(acb->out_doorbell, ®->iobound_doorbell);
2067 }
2068 break;
2069 }
2070 }
2071
arcmsr_iop_message_wrote(struct AdapterControlBlock * acb)2072 static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
2073 {
2074 switch (acb->adapter_type) {
2075 case ACB_ADAPTER_TYPE_A: {
2076 struct MessageUnit_A __iomem *reg = acb->pmuA;
2077 /*
2078 ** push inbound doorbell tell iop, driver data write ok
2079 ** and wait reply on next hwinterrupt for next Qbuffer post
2080 */
2081 writel(ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK, ®->inbound_doorbell);
2082 }
2083 break;
2084
2085 case ACB_ADAPTER_TYPE_B: {
2086 struct MessageUnit_B *reg = acb->pmuB;
2087 /*
2088 ** push inbound doorbell tell iop, driver data write ok
2089 ** and wait reply on next hwinterrupt for next Qbuffer post
2090 */
2091 writel(ARCMSR_DRV2IOP_DATA_WRITE_OK, reg->drv2iop_doorbell);
2092 }
2093 break;
2094 case ACB_ADAPTER_TYPE_C: {
2095 struct MessageUnit_C __iomem *reg = acb->pmuC;
2096 /*
2097 ** push inbound doorbell tell iop, driver data write ok
2098 ** and wait reply on next hwinterrupt for next Qbuffer post
2099 */
2100 writel(ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK, ®->inbound_doorbell);
2101 }
2102 break;
2103 case ACB_ADAPTER_TYPE_D: {
2104 struct MessageUnit_D *reg = acb->pmuD;
2105 writel(ARCMSR_ARC1214_DRV2IOP_DATA_IN_READY,
2106 reg->inbound_doorbell);
2107 }
2108 break;
2109 case ACB_ADAPTER_TYPE_E:
2110 case ACB_ADAPTER_TYPE_F: {
2111 struct MessageUnit_E __iomem *reg = acb->pmuE;
2112 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_WRITE_OK;
2113 writel(acb->out_doorbell, ®->iobound_doorbell);
2114 }
2115 break;
2116 }
2117 }
2118
arcmsr_get_iop_rqbuffer(struct AdapterControlBlock * acb)2119 struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb)
2120 {
2121 struct QBUFFER __iomem *qbuffer = NULL;
2122 switch (acb->adapter_type) {
2123
2124 case ACB_ADAPTER_TYPE_A: {
2125 struct MessageUnit_A __iomem *reg = acb->pmuA;
2126 qbuffer = (struct QBUFFER __iomem *)®->message_rbuffer;
2127 }
2128 break;
2129 case ACB_ADAPTER_TYPE_B: {
2130 struct MessageUnit_B *reg = acb->pmuB;
2131 qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer;
2132 }
2133 break;
2134 case ACB_ADAPTER_TYPE_C: {
2135 struct MessageUnit_C __iomem *phbcmu = acb->pmuC;
2136 qbuffer = (struct QBUFFER __iomem *)&phbcmu->message_rbuffer;
2137 }
2138 break;
2139 case ACB_ADAPTER_TYPE_D: {
2140 struct MessageUnit_D *reg = acb->pmuD;
2141 qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer;
2142 }
2143 break;
2144 case ACB_ADAPTER_TYPE_E: {
2145 struct MessageUnit_E __iomem *reg = acb->pmuE;
2146 qbuffer = (struct QBUFFER __iomem *)®->message_rbuffer;
2147 }
2148 break;
2149 case ACB_ADAPTER_TYPE_F: {
2150 qbuffer = (struct QBUFFER __iomem *)acb->message_rbuffer;
2151 }
2152 break;
2153 }
2154 return qbuffer;
2155 }
2156
arcmsr_get_iop_wqbuffer(struct AdapterControlBlock * acb)2157 static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBlock *acb)
2158 {
2159 struct QBUFFER __iomem *pqbuffer = NULL;
2160 switch (acb->adapter_type) {
2161
2162 case ACB_ADAPTER_TYPE_A: {
2163 struct MessageUnit_A __iomem *reg = acb->pmuA;
2164 pqbuffer = (struct QBUFFER __iomem *) ®->message_wbuffer;
2165 }
2166 break;
2167 case ACB_ADAPTER_TYPE_B: {
2168 struct MessageUnit_B *reg = acb->pmuB;
2169 pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer;
2170 }
2171 break;
2172 case ACB_ADAPTER_TYPE_C: {
2173 struct MessageUnit_C __iomem *reg = acb->pmuC;
2174 pqbuffer = (struct QBUFFER __iomem *)®->message_wbuffer;
2175 }
2176 break;
2177 case ACB_ADAPTER_TYPE_D: {
2178 struct MessageUnit_D *reg = acb->pmuD;
2179 pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer;
2180 }
2181 break;
2182 case ACB_ADAPTER_TYPE_E: {
2183 struct MessageUnit_E __iomem *reg = acb->pmuE;
2184 pqbuffer = (struct QBUFFER __iomem *)®->message_wbuffer;
2185 }
2186 break;
2187 case ACB_ADAPTER_TYPE_F:
2188 pqbuffer = (struct QBUFFER __iomem *)acb->message_wbuffer;
2189 break;
2190 }
2191 return pqbuffer;
2192 }
2193
2194 static uint32_t
arcmsr_Read_iop_rqbuffer_in_DWORD(struct AdapterControlBlock * acb,struct QBUFFER __iomem * prbuffer)2195 arcmsr_Read_iop_rqbuffer_in_DWORD(struct AdapterControlBlock *acb,
2196 struct QBUFFER __iomem *prbuffer)
2197 {
2198 uint8_t *pQbuffer;
2199 uint8_t *buf1 = NULL;
2200 uint32_t __iomem *iop_data;
2201 uint32_t iop_len, data_len, *buf2 = NULL;
2202
2203 iop_data = (uint32_t __iomem *)prbuffer->data;
2204 iop_len = readl(&prbuffer->data_len);
2205 if (iop_len > 0) {
2206 buf1 = kmalloc(128, GFP_ATOMIC);
2207 buf2 = (uint32_t *)buf1;
2208 if (buf1 == NULL)
2209 return 0;
2210 data_len = iop_len;
2211 while (data_len >= 4) {
2212 *buf2++ = readl(iop_data);
2213 iop_data++;
2214 data_len -= 4;
2215 }
2216 if (data_len)
2217 *buf2 = readl(iop_data);
2218 buf2 = (uint32_t *)buf1;
2219 }
2220 while (iop_len > 0) {
2221 pQbuffer = &acb->rqbuffer[acb->rqbuf_putIndex];
2222 *pQbuffer = *buf1;
2223 acb->rqbuf_putIndex++;
2224 /* if last, index number set it to 0 */
2225 acb->rqbuf_putIndex %= ARCMSR_MAX_QBUFFER;
2226 buf1++;
2227 iop_len--;
2228 }
2229 kfree(buf2);
2230 /* let IOP know data has been read */
2231 arcmsr_iop_message_read(acb);
2232 return 1;
2233 }
2234
2235 uint32_t
arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock * acb,struct QBUFFER __iomem * prbuffer)2236 arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *acb,
2237 struct QBUFFER __iomem *prbuffer) {
2238
2239 uint8_t *pQbuffer;
2240 uint8_t __iomem *iop_data;
2241 uint32_t iop_len;
2242
2243 if (acb->adapter_type > ACB_ADAPTER_TYPE_B)
2244 return arcmsr_Read_iop_rqbuffer_in_DWORD(acb, prbuffer);
2245 iop_data = (uint8_t __iomem *)prbuffer->data;
2246 iop_len = readl(&prbuffer->data_len);
2247 while (iop_len > 0) {
2248 pQbuffer = &acb->rqbuffer[acb->rqbuf_putIndex];
2249 *pQbuffer = readb(iop_data);
2250 acb->rqbuf_putIndex++;
2251 acb->rqbuf_putIndex %= ARCMSR_MAX_QBUFFER;
2252 iop_data++;
2253 iop_len--;
2254 }
2255 arcmsr_iop_message_read(acb);
2256 return 1;
2257 }
2258
arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock * acb)2259 static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb)
2260 {
2261 unsigned long flags;
2262 struct QBUFFER __iomem *prbuffer;
2263 int32_t buf_empty_len;
2264
2265 spin_lock_irqsave(&acb->rqbuffer_lock, flags);
2266 prbuffer = arcmsr_get_iop_rqbuffer(acb);
2267 buf_empty_len = (acb->rqbuf_putIndex - acb->rqbuf_getIndex - 1) &
2268 (ARCMSR_MAX_QBUFFER - 1);
2269 if (buf_empty_len >= readl(&prbuffer->data_len)) {
2270 if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0)
2271 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
2272 } else
2273 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
2274 spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
2275 }
2276
arcmsr_write_ioctldata2iop_in_DWORD(struct AdapterControlBlock * acb)2277 static void arcmsr_write_ioctldata2iop_in_DWORD(struct AdapterControlBlock *acb)
2278 {
2279 uint8_t *pQbuffer;
2280 struct QBUFFER __iomem *pwbuffer;
2281 uint8_t *buf1 = NULL;
2282 uint32_t __iomem *iop_data;
2283 uint32_t allxfer_len = 0, data_len, *buf2 = NULL, data;
2284
2285 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
2286 buf1 = kmalloc(128, GFP_ATOMIC);
2287 buf2 = (uint32_t *)buf1;
2288 if (buf1 == NULL)
2289 return;
2290
2291 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
2292 pwbuffer = arcmsr_get_iop_wqbuffer(acb);
2293 iop_data = (uint32_t __iomem *)pwbuffer->data;
2294 while ((acb->wqbuf_getIndex != acb->wqbuf_putIndex)
2295 && (allxfer_len < 124)) {
2296 pQbuffer = &acb->wqbuffer[acb->wqbuf_getIndex];
2297 *buf1 = *pQbuffer;
2298 acb->wqbuf_getIndex++;
2299 acb->wqbuf_getIndex %= ARCMSR_MAX_QBUFFER;
2300 buf1++;
2301 allxfer_len++;
2302 }
2303 data_len = allxfer_len;
2304 buf1 = (uint8_t *)buf2;
2305 while (data_len >= 4) {
2306 data = *buf2++;
2307 writel(data, iop_data);
2308 iop_data++;
2309 data_len -= 4;
2310 }
2311 if (data_len) {
2312 data = *buf2;
2313 writel(data, iop_data);
2314 }
2315 writel(allxfer_len, &pwbuffer->data_len);
2316 kfree(buf1);
2317 arcmsr_iop_message_wrote(acb);
2318 }
2319 }
2320
2321 void
arcmsr_write_ioctldata2iop(struct AdapterControlBlock * acb)2322 arcmsr_write_ioctldata2iop(struct AdapterControlBlock *acb)
2323 {
2324 uint8_t *pQbuffer;
2325 struct QBUFFER __iomem *pwbuffer;
2326 uint8_t __iomem *iop_data;
2327 int32_t allxfer_len = 0;
2328
2329 if (acb->adapter_type > ACB_ADAPTER_TYPE_B) {
2330 arcmsr_write_ioctldata2iop_in_DWORD(acb);
2331 return;
2332 }
2333 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
2334 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
2335 pwbuffer = arcmsr_get_iop_wqbuffer(acb);
2336 iop_data = (uint8_t __iomem *)pwbuffer->data;
2337 while ((acb->wqbuf_getIndex != acb->wqbuf_putIndex)
2338 && (allxfer_len < 124)) {
2339 pQbuffer = &acb->wqbuffer[acb->wqbuf_getIndex];
2340 writeb(*pQbuffer, iop_data);
2341 acb->wqbuf_getIndex++;
2342 acb->wqbuf_getIndex %= ARCMSR_MAX_QBUFFER;
2343 iop_data++;
2344 allxfer_len++;
2345 }
2346 writel(allxfer_len, &pwbuffer->data_len);
2347 arcmsr_iop_message_wrote(acb);
2348 }
2349 }
2350
arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock * acb)2351 static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb)
2352 {
2353 unsigned long flags;
2354
2355 spin_lock_irqsave(&acb->wqbuffer_lock, flags);
2356 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED;
2357 if (acb->wqbuf_getIndex != acb->wqbuf_putIndex)
2358 arcmsr_write_ioctldata2iop(acb);
2359 if (acb->wqbuf_getIndex == acb->wqbuf_putIndex)
2360 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
2361 spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
2362 }
2363
arcmsr_hbaA_doorbell_isr(struct AdapterControlBlock * acb)2364 static void arcmsr_hbaA_doorbell_isr(struct AdapterControlBlock *acb)
2365 {
2366 uint32_t outbound_doorbell;
2367 struct MessageUnit_A __iomem *reg = acb->pmuA;
2368 outbound_doorbell = readl(®->outbound_doorbell);
2369 do {
2370 writel(outbound_doorbell, ®->outbound_doorbell);
2371 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK)
2372 arcmsr_iop2drv_data_wrote_handle(acb);
2373 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK)
2374 arcmsr_iop2drv_data_read_handle(acb);
2375 outbound_doorbell = readl(®->outbound_doorbell);
2376 } while (outbound_doorbell & (ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK
2377 | ARCMSR_OUTBOUND_IOP331_DATA_READ_OK));
2378 }
arcmsr_hbaC_doorbell_isr(struct AdapterControlBlock * pACB)2379 static void arcmsr_hbaC_doorbell_isr(struct AdapterControlBlock *pACB)
2380 {
2381 uint32_t outbound_doorbell;
2382 struct MessageUnit_C __iomem *reg = pACB->pmuC;
2383 /*
2384 *******************************************************************
2385 ** Maybe here we need to check wrqbuffer_lock is lock or not
2386 ** DOORBELL: din! don!
2387 ** check if there are any mail need to pack from firmware
2388 *******************************************************************
2389 */
2390 outbound_doorbell = readl(®->outbound_doorbell);
2391 do {
2392 writel(outbound_doorbell, ®->outbound_doorbell_clear);
2393 readl(®->outbound_doorbell_clear);
2394 if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK)
2395 arcmsr_iop2drv_data_wrote_handle(pACB);
2396 if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK)
2397 arcmsr_iop2drv_data_read_handle(pACB);
2398 if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE)
2399 arcmsr_hbaC_message_isr(pACB);
2400 outbound_doorbell = readl(®->outbound_doorbell);
2401 } while (outbound_doorbell & (ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK
2402 | ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK
2403 | ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE));
2404 }
2405
arcmsr_hbaD_doorbell_isr(struct AdapterControlBlock * pACB)2406 static void arcmsr_hbaD_doorbell_isr(struct AdapterControlBlock *pACB)
2407 {
2408 uint32_t outbound_doorbell;
2409 struct MessageUnit_D *pmu = pACB->pmuD;
2410
2411 outbound_doorbell = readl(pmu->outbound_doorbell);
2412 do {
2413 writel(outbound_doorbell, pmu->outbound_doorbell);
2414 if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE)
2415 arcmsr_hbaD_message_isr(pACB);
2416 if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK)
2417 arcmsr_iop2drv_data_wrote_handle(pACB);
2418 if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_DATA_READ_OK)
2419 arcmsr_iop2drv_data_read_handle(pACB);
2420 outbound_doorbell = readl(pmu->outbound_doorbell);
2421 } while (outbound_doorbell & (ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK
2422 | ARCMSR_ARC1214_IOP2DRV_DATA_READ_OK
2423 | ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE));
2424 }
2425
arcmsr_hbaE_doorbell_isr(struct AdapterControlBlock * pACB)2426 static void arcmsr_hbaE_doorbell_isr(struct AdapterControlBlock *pACB)
2427 {
2428 uint32_t outbound_doorbell, in_doorbell, tmp, i;
2429 struct MessageUnit_E __iomem *reg = pACB->pmuE;
2430
2431 if (pACB->adapter_type == ACB_ADAPTER_TYPE_F) {
2432 for (i = 0; i < 5; i++) {
2433 in_doorbell = readl(®->iobound_doorbell);
2434 if (in_doorbell != 0)
2435 break;
2436 }
2437 } else
2438 in_doorbell = readl(®->iobound_doorbell);
2439 outbound_doorbell = in_doorbell ^ pACB->in_doorbell;
2440 do {
2441 writel(0, ®->host_int_status); /* clear interrupt */
2442 if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK) {
2443 arcmsr_iop2drv_data_wrote_handle(pACB);
2444 }
2445 if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_DATA_READ_OK) {
2446 arcmsr_iop2drv_data_read_handle(pACB);
2447 }
2448 if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE) {
2449 arcmsr_hbaE_message_isr(pACB);
2450 }
2451 tmp = in_doorbell;
2452 in_doorbell = readl(®->iobound_doorbell);
2453 outbound_doorbell = tmp ^ in_doorbell;
2454 } while (outbound_doorbell & (ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK
2455 | ARCMSR_HBEMU_IOP2DRV_DATA_READ_OK
2456 | ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE));
2457 pACB->in_doorbell = in_doorbell;
2458 }
2459
arcmsr_hbaA_postqueue_isr(struct AdapterControlBlock * acb)2460 static void arcmsr_hbaA_postqueue_isr(struct AdapterControlBlock *acb)
2461 {
2462 uint32_t flag_ccb;
2463 struct MessageUnit_A __iomem *reg = acb->pmuA;
2464 struct ARCMSR_CDB *pARCMSR_CDB;
2465 struct CommandControlBlock *pCCB;
2466 bool error;
2467 unsigned long cdb_phy_addr;
2468
2469 while ((flag_ccb = readl(®->outbound_queueport)) != 0xFFFFFFFF) {
2470 cdb_phy_addr = (flag_ccb << 5) & 0xffffffff;
2471 if (acb->cdb_phyadd_hipart)
2472 cdb_phy_addr = cdb_phy_addr | acb->cdb_phyadd_hipart;
2473 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + cdb_phy_addr);
2474 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
2475 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
2476 arcmsr_drain_donequeue(acb, pCCB, error);
2477 }
2478 }
arcmsr_hbaB_postqueue_isr(struct AdapterControlBlock * acb)2479 static void arcmsr_hbaB_postqueue_isr(struct AdapterControlBlock *acb)
2480 {
2481 uint32_t index;
2482 uint32_t flag_ccb;
2483 struct MessageUnit_B *reg = acb->pmuB;
2484 struct ARCMSR_CDB *pARCMSR_CDB;
2485 struct CommandControlBlock *pCCB;
2486 bool error;
2487 unsigned long cdb_phy_addr;
2488
2489 index = reg->doneq_index;
2490 while ((flag_ccb = reg->done_qbuffer[index]) != 0) {
2491 cdb_phy_addr = (flag_ccb << 5) & 0xffffffff;
2492 if (acb->cdb_phyadd_hipart)
2493 cdb_phy_addr = cdb_phy_addr | acb->cdb_phyadd_hipart;
2494 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + cdb_phy_addr);
2495 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
2496 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
2497 arcmsr_drain_donequeue(acb, pCCB, error);
2498 reg->done_qbuffer[index] = 0;
2499 index++;
2500 index %= ARCMSR_MAX_HBB_POSTQUEUE;
2501 reg->doneq_index = index;
2502 }
2503 }
2504
arcmsr_hbaC_postqueue_isr(struct AdapterControlBlock * acb)2505 static void arcmsr_hbaC_postqueue_isr(struct AdapterControlBlock *acb)
2506 {
2507 struct MessageUnit_C __iomem *phbcmu;
2508 struct ARCMSR_CDB *arcmsr_cdb;
2509 struct CommandControlBlock *ccb;
2510 uint32_t flag_ccb, throttling = 0;
2511 unsigned long ccb_cdb_phy;
2512 int error;
2513
2514 phbcmu = acb->pmuC;
2515 /* areca cdb command done */
2516 /* Use correct offset and size for syncing */
2517
2518 while ((flag_ccb = readl(&phbcmu->outbound_queueport_low)) !=
2519 0xFFFFFFFF) {
2520 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
2521 if (acb->cdb_phyadd_hipart)
2522 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
2523 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset
2524 + ccb_cdb_phy);
2525 ccb = container_of(arcmsr_cdb, struct CommandControlBlock,
2526 arcmsr_cdb);
2527 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1)
2528 ? true : false;
2529 /* check if command done with no error */
2530 arcmsr_drain_donequeue(acb, ccb, error);
2531 throttling++;
2532 if (throttling == ARCMSR_HBC_ISR_THROTTLING_LEVEL) {
2533 writel(ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING,
2534 &phbcmu->inbound_doorbell);
2535 throttling = 0;
2536 }
2537 }
2538 }
2539
arcmsr_hbaD_postqueue_isr(struct AdapterControlBlock * acb)2540 static void arcmsr_hbaD_postqueue_isr(struct AdapterControlBlock *acb)
2541 {
2542 u32 outbound_write_pointer, doneq_index, index_stripped, toggle;
2543 uint32_t addressLow;
2544 int error;
2545 struct MessageUnit_D *pmu;
2546 struct ARCMSR_CDB *arcmsr_cdb;
2547 struct CommandControlBlock *ccb;
2548 unsigned long flags, ccb_cdb_phy;
2549
2550 spin_lock_irqsave(&acb->doneq_lock, flags);
2551 pmu = acb->pmuD;
2552 outbound_write_pointer = pmu->done_qbuffer[0].addressLow + 1;
2553 doneq_index = pmu->doneq_index;
2554 if ((doneq_index & 0xFFF) != (outbound_write_pointer & 0xFFF)) {
2555 do {
2556 toggle = doneq_index & 0x4000;
2557 index_stripped = (doneq_index & 0xFFF) + 1;
2558 index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE;
2559 pmu->doneq_index = index_stripped ? (index_stripped | toggle) :
2560 ((toggle ^ 0x4000) + 1);
2561 doneq_index = pmu->doneq_index;
2562 addressLow = pmu->done_qbuffer[doneq_index &
2563 0xFFF].addressLow;
2564 ccb_cdb_phy = (addressLow & 0xFFFFFFF0);
2565 if (acb->cdb_phyadd_hipart)
2566 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
2567 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset
2568 + ccb_cdb_phy);
2569 ccb = container_of(arcmsr_cdb,
2570 struct CommandControlBlock, arcmsr_cdb);
2571 error = (addressLow & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1)
2572 ? true : false;
2573 arcmsr_drain_donequeue(acb, ccb, error);
2574 writel(doneq_index, pmu->outboundlist_read_pointer);
2575 } while ((doneq_index & 0xFFF) !=
2576 (outbound_write_pointer & 0xFFF));
2577 }
2578 writel(ARCMSR_ARC1214_OUTBOUND_LIST_INTERRUPT_CLEAR,
2579 pmu->outboundlist_interrupt_cause);
2580 readl(pmu->outboundlist_interrupt_cause);
2581 spin_unlock_irqrestore(&acb->doneq_lock, flags);
2582 }
2583
arcmsr_hbaE_postqueue_isr(struct AdapterControlBlock * acb)2584 static void arcmsr_hbaE_postqueue_isr(struct AdapterControlBlock *acb)
2585 {
2586 uint32_t doneq_index;
2587 uint16_t cmdSMID;
2588 int error;
2589 struct MessageUnit_E __iomem *pmu;
2590 struct CommandControlBlock *ccb;
2591 unsigned long flags;
2592
2593 spin_lock_irqsave(&acb->doneq_lock, flags);
2594 doneq_index = acb->doneq_index;
2595 pmu = acb->pmuE;
2596 while ((readl(&pmu->reply_post_producer_index) & 0xFFFF) != doneq_index) {
2597 cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID;
2598 ccb = acb->pccb_pool[cmdSMID];
2599 error = (acb->pCompletionQ[doneq_index].cmdFlag
2600 & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
2601 arcmsr_drain_donequeue(acb, ccb, error);
2602 doneq_index++;
2603 if (doneq_index >= acb->completionQ_entry)
2604 doneq_index = 0;
2605 }
2606 acb->doneq_index = doneq_index;
2607 writel(doneq_index, &pmu->reply_post_consumer_index);
2608 spin_unlock_irqrestore(&acb->doneq_lock, flags);
2609 }
2610
arcmsr_hbaF_postqueue_isr(struct AdapterControlBlock * acb)2611 static void arcmsr_hbaF_postqueue_isr(struct AdapterControlBlock *acb)
2612 {
2613 uint32_t doneq_index;
2614 uint16_t cmdSMID;
2615 int error;
2616 struct MessageUnit_F __iomem *phbcmu;
2617 struct CommandControlBlock *ccb;
2618 unsigned long flags;
2619
2620 spin_lock_irqsave(&acb->doneq_lock, flags);
2621 doneq_index = acb->doneq_index;
2622 phbcmu = acb->pmuF;
2623 while (1) {
2624 cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID;
2625 if (cmdSMID == 0xffff)
2626 break;
2627 ccb = acb->pccb_pool[cmdSMID];
2628 error = (acb->pCompletionQ[doneq_index].cmdFlag &
2629 ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
2630 arcmsr_drain_donequeue(acb, ccb, error);
2631 acb->pCompletionQ[doneq_index].cmdSMID = 0xffff;
2632 doneq_index++;
2633 if (doneq_index >= acb->completionQ_entry)
2634 doneq_index = 0;
2635 }
2636 acb->doneq_index = doneq_index;
2637 writel(doneq_index, &phbcmu->reply_post_consumer_index);
2638 spin_unlock_irqrestore(&acb->doneq_lock, flags);
2639 }
2640
2641 /*
2642 **********************************************************************************
2643 ** Handle a message interrupt
2644 **
2645 ** The only message interrupt we expect is in response to a query for the current adapter config.
2646 ** We want this in order to compare the drivemap so that we can detect newly-attached drives.
2647 **********************************************************************************
2648 */
arcmsr_hbaA_message_isr(struct AdapterControlBlock * acb)2649 static void arcmsr_hbaA_message_isr(struct AdapterControlBlock *acb)
2650 {
2651 struct MessageUnit_A __iomem *reg = acb->pmuA;
2652 /*clear interrupt and message state*/
2653 writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, ®->outbound_intstatus);
2654 if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
2655 schedule_work(&acb->arcmsr_do_message_isr_bh);
2656 }
arcmsr_hbaB_message_isr(struct AdapterControlBlock * acb)2657 static void arcmsr_hbaB_message_isr(struct AdapterControlBlock *acb)
2658 {
2659 struct MessageUnit_B *reg = acb->pmuB;
2660
2661 /*clear interrupt and message state*/
2662 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
2663 if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
2664 schedule_work(&acb->arcmsr_do_message_isr_bh);
2665 }
2666 /*
2667 **********************************************************************************
2668 ** Handle a message interrupt
2669 **
2670 ** The only message interrupt we expect is in response to a query for the
2671 ** current adapter config.
2672 ** We want this in order to compare the drivemap so that we can detect newly-attached drives.
2673 **********************************************************************************
2674 */
arcmsr_hbaC_message_isr(struct AdapterControlBlock * acb)2675 static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *acb)
2676 {
2677 struct MessageUnit_C __iomem *reg = acb->pmuC;
2678 /*clear interrupt and message state*/
2679 writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, ®->outbound_doorbell_clear);
2680 if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
2681 schedule_work(&acb->arcmsr_do_message_isr_bh);
2682 }
2683
arcmsr_hbaD_message_isr(struct AdapterControlBlock * acb)2684 static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb)
2685 {
2686 struct MessageUnit_D *reg = acb->pmuD;
2687
2688 writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE, reg->outbound_doorbell);
2689 readl(reg->outbound_doorbell);
2690 if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
2691 schedule_work(&acb->arcmsr_do_message_isr_bh);
2692 }
2693
arcmsr_hbaE_message_isr(struct AdapterControlBlock * acb)2694 static void arcmsr_hbaE_message_isr(struct AdapterControlBlock *acb)
2695 {
2696 struct MessageUnit_E __iomem *reg = acb->pmuE;
2697
2698 writel(0, ®->host_int_status);
2699 if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
2700 schedule_work(&acb->arcmsr_do_message_isr_bh);
2701 }
2702
arcmsr_hbaA_handle_isr(struct AdapterControlBlock * acb)2703 static int arcmsr_hbaA_handle_isr(struct AdapterControlBlock *acb)
2704 {
2705 uint32_t outbound_intstatus;
2706 struct MessageUnit_A __iomem *reg = acb->pmuA;
2707 outbound_intstatus = readl(®->outbound_intstatus) &
2708 acb->outbound_int_enable;
2709 if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT))
2710 return IRQ_NONE;
2711 do {
2712 writel(outbound_intstatus, ®->outbound_intstatus);
2713 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT)
2714 arcmsr_hbaA_doorbell_isr(acb);
2715 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT)
2716 arcmsr_hbaA_postqueue_isr(acb);
2717 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT)
2718 arcmsr_hbaA_message_isr(acb);
2719 outbound_intstatus = readl(®->outbound_intstatus) &
2720 acb->outbound_int_enable;
2721 } while (outbound_intstatus & (ARCMSR_MU_OUTBOUND_DOORBELL_INT
2722 | ARCMSR_MU_OUTBOUND_POSTQUEUE_INT
2723 | ARCMSR_MU_OUTBOUND_MESSAGE0_INT));
2724 return IRQ_HANDLED;
2725 }
2726
arcmsr_hbaB_handle_isr(struct AdapterControlBlock * acb)2727 static int arcmsr_hbaB_handle_isr(struct AdapterControlBlock *acb)
2728 {
2729 uint32_t outbound_doorbell;
2730 struct MessageUnit_B *reg = acb->pmuB;
2731 outbound_doorbell = readl(reg->iop2drv_doorbell) &
2732 acb->outbound_int_enable;
2733 if (!outbound_doorbell)
2734 return IRQ_NONE;
2735 do {
2736 writel(~outbound_doorbell, reg->iop2drv_doorbell);
2737 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell);
2738 if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK)
2739 arcmsr_iop2drv_data_wrote_handle(acb);
2740 if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK)
2741 arcmsr_iop2drv_data_read_handle(acb);
2742 if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE)
2743 arcmsr_hbaB_postqueue_isr(acb);
2744 if (outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE)
2745 arcmsr_hbaB_message_isr(acb);
2746 outbound_doorbell = readl(reg->iop2drv_doorbell) &
2747 acb->outbound_int_enable;
2748 } while (outbound_doorbell & (ARCMSR_IOP2DRV_DATA_WRITE_OK
2749 | ARCMSR_IOP2DRV_DATA_READ_OK
2750 | ARCMSR_IOP2DRV_CDB_DONE
2751 | ARCMSR_IOP2DRV_MESSAGE_CMD_DONE));
2752 return IRQ_HANDLED;
2753 }
2754
arcmsr_hbaC_handle_isr(struct AdapterControlBlock * pACB)2755 static int arcmsr_hbaC_handle_isr(struct AdapterControlBlock *pACB)
2756 {
2757 uint32_t host_interrupt_status;
2758 struct MessageUnit_C __iomem *phbcmu = pACB->pmuC;
2759 /*
2760 *********************************************
2761 ** check outbound intstatus
2762 *********************************************
2763 */
2764 host_interrupt_status = readl(&phbcmu->host_int_status) &
2765 (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR |
2766 ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR);
2767 if (!host_interrupt_status)
2768 return IRQ_NONE;
2769 do {
2770 if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR)
2771 arcmsr_hbaC_doorbell_isr(pACB);
2772 /* MU post queue interrupts*/
2773 if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR)
2774 arcmsr_hbaC_postqueue_isr(pACB);
2775 host_interrupt_status = readl(&phbcmu->host_int_status);
2776 } while (host_interrupt_status & (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR |
2777 ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR));
2778 return IRQ_HANDLED;
2779 }
2780
arcmsr_hbaD_handle_isr(struct AdapterControlBlock * pACB)2781 static irqreturn_t arcmsr_hbaD_handle_isr(struct AdapterControlBlock *pACB)
2782 {
2783 u32 host_interrupt_status;
2784 struct MessageUnit_D *pmu = pACB->pmuD;
2785
2786 host_interrupt_status = readl(pmu->host_int_status) &
2787 (ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR |
2788 ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR);
2789 if (!host_interrupt_status)
2790 return IRQ_NONE;
2791 do {
2792 /* MU post queue interrupts*/
2793 if (host_interrupt_status &
2794 ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR)
2795 arcmsr_hbaD_postqueue_isr(pACB);
2796 if (host_interrupt_status &
2797 ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR)
2798 arcmsr_hbaD_doorbell_isr(pACB);
2799 host_interrupt_status = readl(pmu->host_int_status);
2800 } while (host_interrupt_status &
2801 (ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR |
2802 ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR));
2803 return IRQ_HANDLED;
2804 }
2805
arcmsr_hbaE_handle_isr(struct AdapterControlBlock * pACB)2806 static irqreturn_t arcmsr_hbaE_handle_isr(struct AdapterControlBlock *pACB)
2807 {
2808 uint32_t host_interrupt_status;
2809 struct MessageUnit_E __iomem *pmu = pACB->pmuE;
2810
2811 host_interrupt_status = readl(&pmu->host_int_status) &
2812 (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR |
2813 ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR);
2814 if (!host_interrupt_status)
2815 return IRQ_NONE;
2816 do {
2817 /* MU ioctl transfer doorbell interrupts*/
2818 if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR) {
2819 arcmsr_hbaE_doorbell_isr(pACB);
2820 }
2821 /* MU post queue interrupts*/
2822 if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR) {
2823 arcmsr_hbaE_postqueue_isr(pACB);
2824 }
2825 host_interrupt_status = readl(&pmu->host_int_status);
2826 } while (host_interrupt_status & (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR |
2827 ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR));
2828 return IRQ_HANDLED;
2829 }
2830
arcmsr_hbaF_handle_isr(struct AdapterControlBlock * pACB)2831 static irqreturn_t arcmsr_hbaF_handle_isr(struct AdapterControlBlock *pACB)
2832 {
2833 uint32_t host_interrupt_status;
2834 struct MessageUnit_F __iomem *phbcmu = pACB->pmuF;
2835
2836 host_interrupt_status = readl(&phbcmu->host_int_status) &
2837 (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR |
2838 ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR);
2839 if (!host_interrupt_status)
2840 return IRQ_NONE;
2841 do {
2842 /* MU post queue interrupts*/
2843 if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR)
2844 arcmsr_hbaF_postqueue_isr(pACB);
2845
2846 /* MU ioctl transfer doorbell interrupts*/
2847 if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR)
2848 arcmsr_hbaE_doorbell_isr(pACB);
2849
2850 host_interrupt_status = readl(&phbcmu->host_int_status);
2851 } while (host_interrupt_status & (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR |
2852 ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR));
2853 return IRQ_HANDLED;
2854 }
2855
arcmsr_interrupt(struct AdapterControlBlock * acb)2856 static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
2857 {
2858 switch (acb->adapter_type) {
2859 case ACB_ADAPTER_TYPE_A:
2860 return arcmsr_hbaA_handle_isr(acb);
2861 case ACB_ADAPTER_TYPE_B:
2862 return arcmsr_hbaB_handle_isr(acb);
2863 case ACB_ADAPTER_TYPE_C:
2864 return arcmsr_hbaC_handle_isr(acb);
2865 case ACB_ADAPTER_TYPE_D:
2866 return arcmsr_hbaD_handle_isr(acb);
2867 case ACB_ADAPTER_TYPE_E:
2868 return arcmsr_hbaE_handle_isr(acb);
2869 case ACB_ADAPTER_TYPE_F:
2870 return arcmsr_hbaF_handle_isr(acb);
2871 default:
2872 return IRQ_NONE;
2873 }
2874 }
2875
arcmsr_iop_parking(struct AdapterControlBlock * acb)2876 static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
2877 {
2878 if (acb) {
2879 /* stop adapter background rebuild */
2880 if (acb->acb_flags & ACB_F_MSG_START_BGRB) {
2881 uint32_t intmask_org;
2882 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
2883 intmask_org = arcmsr_disable_outbound_ints(acb);
2884 arcmsr_stop_adapter_bgrb(acb);
2885 arcmsr_flush_adapter_cache(acb);
2886 arcmsr_enable_outbound_ints(acb, intmask_org);
2887 }
2888 }
2889 }
2890
2891
arcmsr_clear_iop2drv_rqueue_buffer(struct AdapterControlBlock * acb)2892 void arcmsr_clear_iop2drv_rqueue_buffer(struct AdapterControlBlock *acb)
2893 {
2894 uint32_t i;
2895
2896 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2897 for (i = 0; i < 15; i++) {
2898 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2899 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2900 acb->rqbuf_getIndex = 0;
2901 acb->rqbuf_putIndex = 0;
2902 arcmsr_iop_message_read(acb);
2903 mdelay(30);
2904 } else if (acb->rqbuf_getIndex !=
2905 acb->rqbuf_putIndex) {
2906 acb->rqbuf_getIndex = 0;
2907 acb->rqbuf_putIndex = 0;
2908 mdelay(30);
2909 } else
2910 break;
2911 }
2912 }
2913 }
2914
arcmsr_iop_message_xfer(struct AdapterControlBlock * acb,struct scsi_cmnd * cmd)2915 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
2916 struct scsi_cmnd *cmd)
2917 {
2918 char *buffer;
2919 unsigned short use_sg;
2920 int retvalue = 0, transfer_len = 0;
2921 unsigned long flags;
2922 struct CMD_MESSAGE_FIELD *pcmdmessagefld;
2923 uint32_t controlcode = (uint32_t)cmd->cmnd[5] << 24 |
2924 (uint32_t)cmd->cmnd[6] << 16 |
2925 (uint32_t)cmd->cmnd[7] << 8 |
2926 (uint32_t)cmd->cmnd[8];
2927 struct scatterlist *sg;
2928
2929 use_sg = scsi_sg_count(cmd);
2930 sg = scsi_sglist(cmd);
2931 buffer = kmap_atomic(sg_page(sg)) + sg->offset;
2932 if (use_sg > 1) {
2933 retvalue = ARCMSR_MESSAGE_FAIL;
2934 goto message_out;
2935 }
2936 transfer_len += sg->length;
2937 if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
2938 retvalue = ARCMSR_MESSAGE_FAIL;
2939 pr_info("%s: ARCMSR_MESSAGE_FAIL!\n", __func__);
2940 goto message_out;
2941 }
2942 pcmdmessagefld = (struct CMD_MESSAGE_FIELD *)buffer;
2943 switch (controlcode) {
2944 case ARCMSR_MESSAGE_READ_RQBUFFER: {
2945 unsigned char *ver_addr;
2946 uint8_t *ptmpQbuffer;
2947 uint32_t allxfer_len = 0;
2948 ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC);
2949 if (!ver_addr) {
2950 retvalue = ARCMSR_MESSAGE_FAIL;
2951 pr_info("%s: memory not enough!\n", __func__);
2952 goto message_out;
2953 }
2954 ptmpQbuffer = ver_addr;
2955 spin_lock_irqsave(&acb->rqbuffer_lock, flags);
2956 if (acb->rqbuf_getIndex != acb->rqbuf_putIndex) {
2957 unsigned int tail = acb->rqbuf_getIndex;
2958 unsigned int head = acb->rqbuf_putIndex;
2959 unsigned int cnt_to_end = CIRC_CNT_TO_END(head, tail, ARCMSR_MAX_QBUFFER);
2960
2961 allxfer_len = CIRC_CNT(head, tail, ARCMSR_MAX_QBUFFER);
2962 if (allxfer_len > ARCMSR_API_DATA_BUFLEN)
2963 allxfer_len = ARCMSR_API_DATA_BUFLEN;
2964
2965 if (allxfer_len <= cnt_to_end)
2966 memcpy(ptmpQbuffer, acb->rqbuffer + tail, allxfer_len);
2967 else {
2968 memcpy(ptmpQbuffer, acb->rqbuffer + tail, cnt_to_end);
2969 memcpy(ptmpQbuffer + cnt_to_end, acb->rqbuffer, allxfer_len - cnt_to_end);
2970 }
2971 acb->rqbuf_getIndex = (acb->rqbuf_getIndex + allxfer_len) % ARCMSR_MAX_QBUFFER;
2972 }
2973 memcpy(pcmdmessagefld->messagedatabuffer, ver_addr,
2974 allxfer_len);
2975 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2976 struct QBUFFER __iomem *prbuffer;
2977 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2978 prbuffer = arcmsr_get_iop_rqbuffer(acb);
2979 if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0)
2980 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
2981 }
2982 spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
2983 kfree(ver_addr);
2984 pcmdmessagefld->cmdmessage.Length = allxfer_len;
2985 if (acb->fw_flag == FW_DEADLOCK)
2986 pcmdmessagefld->cmdmessage.ReturnCode =
2987 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
2988 else
2989 pcmdmessagefld->cmdmessage.ReturnCode =
2990 ARCMSR_MESSAGE_RETURNCODE_OK;
2991 break;
2992 }
2993 case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
2994 unsigned char *ver_addr;
2995 uint32_t user_len;
2996 int32_t cnt2end;
2997 uint8_t *pQbuffer, *ptmpuserbuffer;
2998
2999 user_len = pcmdmessagefld->cmdmessage.Length;
3000 if (user_len > ARCMSR_API_DATA_BUFLEN) {
3001 retvalue = ARCMSR_MESSAGE_FAIL;
3002 goto message_out;
3003 }
3004
3005 ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC);
3006 if (!ver_addr) {
3007 retvalue = ARCMSR_MESSAGE_FAIL;
3008 goto message_out;
3009 }
3010 ptmpuserbuffer = ver_addr;
3011
3012 memcpy(ptmpuserbuffer,
3013 pcmdmessagefld->messagedatabuffer, user_len);
3014 spin_lock_irqsave(&acb->wqbuffer_lock, flags);
3015 if (acb->wqbuf_putIndex != acb->wqbuf_getIndex) {
3016 struct SENSE_DATA *sensebuffer =
3017 (struct SENSE_DATA *)cmd->sense_buffer;
3018 arcmsr_write_ioctldata2iop(acb);
3019 /* has error report sensedata */
3020 sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
3021 sensebuffer->SenseKey = ILLEGAL_REQUEST;
3022 sensebuffer->AdditionalSenseLength = 0x0A;
3023 sensebuffer->AdditionalSenseCode = 0x20;
3024 sensebuffer->Valid = 1;
3025 retvalue = ARCMSR_MESSAGE_FAIL;
3026 } else {
3027 pQbuffer = &acb->wqbuffer[acb->wqbuf_putIndex];
3028 cnt2end = ARCMSR_MAX_QBUFFER - acb->wqbuf_putIndex;
3029 if (user_len > cnt2end) {
3030 memcpy(pQbuffer, ptmpuserbuffer, cnt2end);
3031 ptmpuserbuffer += cnt2end;
3032 user_len -= cnt2end;
3033 acb->wqbuf_putIndex = 0;
3034 pQbuffer = acb->wqbuffer;
3035 }
3036 memcpy(pQbuffer, ptmpuserbuffer, user_len);
3037 acb->wqbuf_putIndex += user_len;
3038 acb->wqbuf_putIndex %= ARCMSR_MAX_QBUFFER;
3039 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
3040 acb->acb_flags &=
3041 ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
3042 arcmsr_write_ioctldata2iop(acb);
3043 }
3044 }
3045 spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
3046 kfree(ver_addr);
3047 if (acb->fw_flag == FW_DEADLOCK)
3048 pcmdmessagefld->cmdmessage.ReturnCode =
3049 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
3050 else
3051 pcmdmessagefld->cmdmessage.ReturnCode =
3052 ARCMSR_MESSAGE_RETURNCODE_OK;
3053 break;
3054 }
3055 case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
3056 uint8_t *pQbuffer = acb->rqbuffer;
3057
3058 arcmsr_clear_iop2drv_rqueue_buffer(acb);
3059 spin_lock_irqsave(&acb->rqbuffer_lock, flags);
3060 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
3061 acb->rqbuf_getIndex = 0;
3062 acb->rqbuf_putIndex = 0;
3063 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
3064 spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
3065 if (acb->fw_flag == FW_DEADLOCK)
3066 pcmdmessagefld->cmdmessage.ReturnCode =
3067 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
3068 else
3069 pcmdmessagefld->cmdmessage.ReturnCode =
3070 ARCMSR_MESSAGE_RETURNCODE_OK;
3071 break;
3072 }
3073 case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
3074 uint8_t *pQbuffer = acb->wqbuffer;
3075 spin_lock_irqsave(&acb->wqbuffer_lock, flags);
3076 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
3077 ACB_F_MESSAGE_WQBUFFER_READED);
3078 acb->wqbuf_getIndex = 0;
3079 acb->wqbuf_putIndex = 0;
3080 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
3081 spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
3082 if (acb->fw_flag == FW_DEADLOCK)
3083 pcmdmessagefld->cmdmessage.ReturnCode =
3084 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
3085 else
3086 pcmdmessagefld->cmdmessage.ReturnCode =
3087 ARCMSR_MESSAGE_RETURNCODE_OK;
3088 break;
3089 }
3090 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
3091 uint8_t *pQbuffer;
3092 arcmsr_clear_iop2drv_rqueue_buffer(acb);
3093 spin_lock_irqsave(&acb->rqbuffer_lock, flags);
3094 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
3095 acb->rqbuf_getIndex = 0;
3096 acb->rqbuf_putIndex = 0;
3097 pQbuffer = acb->rqbuffer;
3098 memset(pQbuffer, 0, sizeof(struct QBUFFER));
3099 spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
3100 spin_lock_irqsave(&acb->wqbuffer_lock, flags);
3101 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
3102 ACB_F_MESSAGE_WQBUFFER_READED);
3103 acb->wqbuf_getIndex = 0;
3104 acb->wqbuf_putIndex = 0;
3105 pQbuffer = acb->wqbuffer;
3106 memset(pQbuffer, 0, sizeof(struct QBUFFER));
3107 spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
3108 if (acb->fw_flag == FW_DEADLOCK)
3109 pcmdmessagefld->cmdmessage.ReturnCode =
3110 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
3111 else
3112 pcmdmessagefld->cmdmessage.ReturnCode =
3113 ARCMSR_MESSAGE_RETURNCODE_OK;
3114 break;
3115 }
3116 case ARCMSR_MESSAGE_RETURN_CODE_3F: {
3117 if (acb->fw_flag == FW_DEADLOCK)
3118 pcmdmessagefld->cmdmessage.ReturnCode =
3119 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
3120 else
3121 pcmdmessagefld->cmdmessage.ReturnCode =
3122 ARCMSR_MESSAGE_RETURNCODE_3F;
3123 break;
3124 }
3125 case ARCMSR_MESSAGE_SAY_HELLO: {
3126 int8_t *hello_string = "Hello! I am ARCMSR";
3127 if (acb->fw_flag == FW_DEADLOCK)
3128 pcmdmessagefld->cmdmessage.ReturnCode =
3129 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
3130 else
3131 pcmdmessagefld->cmdmessage.ReturnCode =
3132 ARCMSR_MESSAGE_RETURNCODE_OK;
3133 memcpy(pcmdmessagefld->messagedatabuffer,
3134 hello_string, (int16_t)strlen(hello_string));
3135 break;
3136 }
3137 case ARCMSR_MESSAGE_SAY_GOODBYE: {
3138 if (acb->fw_flag == FW_DEADLOCK)
3139 pcmdmessagefld->cmdmessage.ReturnCode =
3140 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
3141 else
3142 pcmdmessagefld->cmdmessage.ReturnCode =
3143 ARCMSR_MESSAGE_RETURNCODE_OK;
3144 arcmsr_iop_parking(acb);
3145 break;
3146 }
3147 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: {
3148 if (acb->fw_flag == FW_DEADLOCK)
3149 pcmdmessagefld->cmdmessage.ReturnCode =
3150 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
3151 else
3152 pcmdmessagefld->cmdmessage.ReturnCode =
3153 ARCMSR_MESSAGE_RETURNCODE_OK;
3154 arcmsr_flush_adapter_cache(acb);
3155 break;
3156 }
3157 default:
3158 retvalue = ARCMSR_MESSAGE_FAIL;
3159 pr_info("%s: unknown controlcode!\n", __func__);
3160 }
3161 message_out:
3162 if (use_sg) {
3163 struct scatterlist *sg = scsi_sglist(cmd);
3164 kunmap_atomic(buffer - sg->offset);
3165 }
3166 return retvalue;
3167 }
3168
arcmsr_get_freeccb(struct AdapterControlBlock * acb)3169 static struct CommandControlBlock *arcmsr_get_freeccb(struct AdapterControlBlock *acb)
3170 {
3171 struct list_head *head;
3172 struct CommandControlBlock *ccb = NULL;
3173 unsigned long flags;
3174
3175 spin_lock_irqsave(&acb->ccblist_lock, flags);
3176 head = &acb->ccb_free_list;
3177 if (!list_empty(head)) {
3178 ccb = list_entry(head->next, struct CommandControlBlock, list);
3179 list_del_init(&ccb->list);
3180 }else{
3181 spin_unlock_irqrestore(&acb->ccblist_lock, flags);
3182 return NULL;
3183 }
3184 spin_unlock_irqrestore(&acb->ccblist_lock, flags);
3185 return ccb;
3186 }
3187
arcmsr_handle_virtual_command(struct AdapterControlBlock * acb,struct scsi_cmnd * cmd)3188 static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
3189 struct scsi_cmnd *cmd)
3190 {
3191 switch (cmd->cmnd[0]) {
3192 case INQUIRY: {
3193 unsigned char inqdata[36];
3194 char *buffer;
3195 struct scatterlist *sg;
3196
3197 if (cmd->device->lun) {
3198 cmd->result = (DID_TIME_OUT << 16);
3199 cmd->scsi_done(cmd);
3200 return;
3201 }
3202 inqdata[0] = TYPE_PROCESSOR;
3203 /* Periph Qualifier & Periph Dev Type */
3204 inqdata[1] = 0;
3205 /* rem media bit & Dev Type Modifier */
3206 inqdata[2] = 0;
3207 /* ISO, ECMA, & ANSI versions */
3208 inqdata[4] = 31;
3209 /* length of additional data */
3210 memcpy(&inqdata[8], "Areca ", 8);
3211 /* Vendor Identification */
3212 memcpy(&inqdata[16], "RAID controller ", 16);
3213 /* Product Identification */
3214 memcpy(&inqdata[32], "R001", 4); /* Product Revision */
3215
3216 sg = scsi_sglist(cmd);
3217 buffer = kmap_atomic(sg_page(sg)) + sg->offset;
3218
3219 memcpy(buffer, inqdata, sizeof(inqdata));
3220 sg = scsi_sglist(cmd);
3221 kunmap_atomic(buffer - sg->offset);
3222
3223 cmd->scsi_done(cmd);
3224 }
3225 break;
3226 case WRITE_BUFFER:
3227 case READ_BUFFER: {
3228 if (arcmsr_iop_message_xfer(acb, cmd))
3229 cmd->result = (DID_ERROR << 16);
3230 cmd->scsi_done(cmd);
3231 }
3232 break;
3233 default:
3234 cmd->scsi_done(cmd);
3235 }
3236 }
3237
arcmsr_queue_command_lck(struct scsi_cmnd * cmd,void (* done)(struct scsi_cmnd *))3238 static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
3239 void (* done)(struct scsi_cmnd *))
3240 {
3241 struct Scsi_Host *host = cmd->device->host;
3242 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
3243 struct CommandControlBlock *ccb;
3244 int target = cmd->device->id;
3245
3246 if (acb->acb_flags & ACB_F_ADAPTER_REMOVED) {
3247 cmd->result = (DID_NO_CONNECT << 16);
3248 cmd->scsi_done(cmd);
3249 return 0;
3250 }
3251 cmd->scsi_done = done;
3252 cmd->host_scribble = NULL;
3253 cmd->result = 0;
3254 if (target == 16) {
3255 /* virtual device for iop message transfer */
3256 arcmsr_handle_virtual_command(acb, cmd);
3257 return 0;
3258 }
3259 ccb = arcmsr_get_freeccb(acb);
3260 if (!ccb)
3261 return SCSI_MLQUEUE_HOST_BUSY;
3262 if (arcmsr_build_ccb( acb, ccb, cmd ) == FAILED) {
3263 cmd->result = (DID_ERROR << 16) | SAM_STAT_RESERVATION_CONFLICT;
3264 cmd->scsi_done(cmd);
3265 return 0;
3266 }
3267 arcmsr_post_ccb(acb, ccb);
3268 return 0;
3269 }
3270
DEF_SCSI_QCMD(arcmsr_queue_command)3271 static DEF_SCSI_QCMD(arcmsr_queue_command)
3272
3273 static int arcmsr_slave_config(struct scsi_device *sdev)
3274 {
3275 unsigned int dev_timeout;
3276
3277 dev_timeout = sdev->request_queue->rq_timeout;
3278 if ((cmd_timeout > 0) && ((cmd_timeout * HZ) > dev_timeout))
3279 blk_queue_rq_timeout(sdev->request_queue, cmd_timeout * HZ);
3280 return 0;
3281 }
3282
arcmsr_get_adapter_config(struct AdapterControlBlock * pACB,uint32_t * rwbuffer)3283 static void arcmsr_get_adapter_config(struct AdapterControlBlock *pACB, uint32_t *rwbuffer)
3284 {
3285 int count;
3286 uint32_t *acb_firm_model = (uint32_t *)pACB->firm_model;
3287 uint32_t *acb_firm_version = (uint32_t *)pACB->firm_version;
3288 uint32_t *acb_device_map = (uint32_t *)pACB->device_map;
3289 uint32_t *firm_model = &rwbuffer[15];
3290 uint32_t *firm_version = &rwbuffer[17];
3291 uint32_t *device_map = &rwbuffer[21];
3292
3293 count = 2;
3294 while (count) {
3295 *acb_firm_model = readl(firm_model);
3296 acb_firm_model++;
3297 firm_model++;
3298 count--;
3299 }
3300 count = 4;
3301 while (count) {
3302 *acb_firm_version = readl(firm_version);
3303 acb_firm_version++;
3304 firm_version++;
3305 count--;
3306 }
3307 count = 4;
3308 while (count) {
3309 *acb_device_map = readl(device_map);
3310 acb_device_map++;
3311 device_map++;
3312 count--;
3313 }
3314 pACB->signature = readl(&rwbuffer[0]);
3315 pACB->firm_request_len = readl(&rwbuffer[1]);
3316 pACB->firm_numbers_queue = readl(&rwbuffer[2]);
3317 pACB->firm_sdram_size = readl(&rwbuffer[3]);
3318 pACB->firm_hd_channels = readl(&rwbuffer[4]);
3319 pACB->firm_cfg_version = readl(&rwbuffer[25]);
3320 pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
3321 pACB->host->host_no,
3322 pACB->firm_model,
3323 pACB->firm_version);
3324 }
3325
arcmsr_hbaA_get_config(struct AdapterControlBlock * acb)3326 static bool arcmsr_hbaA_get_config(struct AdapterControlBlock *acb)
3327 {
3328 struct MessageUnit_A __iomem *reg = acb->pmuA;
3329
3330 arcmsr_wait_firmware_ready(acb);
3331 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0);
3332 if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
3333 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
3334 miscellaneous data' timeout \n", acb->host->host_no);
3335 return false;
3336 }
3337 arcmsr_get_adapter_config(acb, reg->message_rwbuffer);
3338 return true;
3339 }
arcmsr_hbaB_get_config(struct AdapterControlBlock * acb)3340 static bool arcmsr_hbaB_get_config(struct AdapterControlBlock *acb)
3341 {
3342 struct MessageUnit_B *reg = acb->pmuB;
3343
3344 arcmsr_wait_firmware_ready(acb);
3345 writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell);
3346 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
3347 printk(KERN_ERR "arcmsr%d: can't set driver mode.\n", acb->host->host_no);
3348 return false;
3349 }
3350 writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
3351 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
3352 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
3353 miscellaneous data' timeout \n", acb->host->host_no);
3354 return false;
3355 }
3356 arcmsr_get_adapter_config(acb, reg->message_rwbuffer);
3357 return true;
3358 }
3359
arcmsr_hbaC_get_config(struct AdapterControlBlock * pACB)3360 static bool arcmsr_hbaC_get_config(struct AdapterControlBlock *pACB)
3361 {
3362 uint32_t intmask_org;
3363 struct MessageUnit_C __iomem *reg = pACB->pmuC;
3364
3365 /* disable all outbound interrupt */
3366 intmask_org = readl(®->host_int_mask); /* disable outbound message0 int */
3367 writel(intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE, ®->host_int_mask);
3368 /* wait firmware ready */
3369 arcmsr_wait_firmware_ready(pACB);
3370 /* post "get config" instruction */
3371 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0);
3372 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell);
3373 /* wait message ready */
3374 if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
3375 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
3376 miscellaneous data' timeout \n", pACB->host->host_no);
3377 return false;
3378 }
3379 arcmsr_get_adapter_config(pACB, reg->msgcode_rwbuffer);
3380 return true;
3381 }
3382
arcmsr_hbaD_get_config(struct AdapterControlBlock * acb)3383 static bool arcmsr_hbaD_get_config(struct AdapterControlBlock *acb)
3384 {
3385 struct MessageUnit_D *reg = acb->pmuD;
3386
3387 if (readl(acb->pmuD->outbound_doorbell) &
3388 ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) {
3389 writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE,
3390 acb->pmuD->outbound_doorbell);/*clear interrupt*/
3391 }
3392 arcmsr_wait_firmware_ready(acb);
3393 /* post "get config" instruction */
3394 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, reg->inbound_msgaddr0);
3395 /* wait message ready */
3396 if (!arcmsr_hbaD_wait_msgint_ready(acb)) {
3397 pr_notice("arcmsr%d: wait get adapter firmware "
3398 "miscellaneous data timeout\n", acb->host->host_no);
3399 return false;
3400 }
3401 arcmsr_get_adapter_config(acb, reg->msgcode_rwbuffer);
3402 return true;
3403 }
3404
arcmsr_hbaE_get_config(struct AdapterControlBlock * pACB)3405 static bool arcmsr_hbaE_get_config(struct AdapterControlBlock *pACB)
3406 {
3407 struct MessageUnit_E __iomem *reg = pACB->pmuE;
3408 uint32_t intmask_org;
3409
3410 /* disable all outbound interrupt */
3411 intmask_org = readl(®->host_int_mask); /* disable outbound message0 int */
3412 writel(intmask_org | ARCMSR_HBEMU_ALL_INTMASKENABLE, ®->host_int_mask);
3413 /* wait firmware ready */
3414 arcmsr_wait_firmware_ready(pACB);
3415 mdelay(20);
3416 /* post "get config" instruction */
3417 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0);
3418
3419 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
3420 writel(pACB->out_doorbell, ®->iobound_doorbell);
3421 /* wait message ready */
3422 if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
3423 pr_notice("arcmsr%d: wait get adapter firmware "
3424 "miscellaneous data timeout\n", pACB->host->host_no);
3425 return false;
3426 }
3427 arcmsr_get_adapter_config(pACB, reg->msgcode_rwbuffer);
3428 return true;
3429 }
3430
arcmsr_hbaF_get_config(struct AdapterControlBlock * pACB)3431 static bool arcmsr_hbaF_get_config(struct AdapterControlBlock *pACB)
3432 {
3433 struct MessageUnit_F __iomem *reg = pACB->pmuF;
3434 uint32_t intmask_org;
3435
3436 /* disable all outbound interrupt */
3437 intmask_org = readl(®->host_int_mask); /* disable outbound message0 int */
3438 writel(intmask_org | ARCMSR_HBEMU_ALL_INTMASKENABLE, ®->host_int_mask);
3439 /* wait firmware ready */
3440 arcmsr_wait_firmware_ready(pACB);
3441 /* post "get config" instruction */
3442 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0);
3443
3444 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
3445 writel(pACB->out_doorbell, ®->iobound_doorbell);
3446 /* wait message ready */
3447 if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
3448 pr_notice("arcmsr%d: wait get adapter firmware miscellaneous data timeout\n",
3449 pACB->host->host_no);
3450 return false;
3451 }
3452 arcmsr_get_adapter_config(pACB, pACB->msgcode_rwbuffer);
3453 return true;
3454 }
3455
arcmsr_get_firmware_spec(struct AdapterControlBlock * acb)3456 static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
3457 {
3458 bool rtn = false;
3459
3460 switch (acb->adapter_type) {
3461 case ACB_ADAPTER_TYPE_A:
3462 rtn = arcmsr_hbaA_get_config(acb);
3463 break;
3464 case ACB_ADAPTER_TYPE_B:
3465 rtn = arcmsr_hbaB_get_config(acb);
3466 break;
3467 case ACB_ADAPTER_TYPE_C:
3468 rtn = arcmsr_hbaC_get_config(acb);
3469 break;
3470 case ACB_ADAPTER_TYPE_D:
3471 rtn = arcmsr_hbaD_get_config(acb);
3472 break;
3473 case ACB_ADAPTER_TYPE_E:
3474 rtn = arcmsr_hbaE_get_config(acb);
3475 break;
3476 case ACB_ADAPTER_TYPE_F:
3477 rtn = arcmsr_hbaF_get_config(acb);
3478 break;
3479 default:
3480 break;
3481 }
3482 acb->maxOutstanding = acb->firm_numbers_queue - 1;
3483 if (acb->host->can_queue >= acb->firm_numbers_queue)
3484 acb->host->can_queue = acb->maxOutstanding;
3485 else
3486 acb->maxOutstanding = acb->host->can_queue;
3487 acb->maxFreeCCB = acb->host->can_queue;
3488 if (acb->maxFreeCCB < ARCMSR_MAX_FREECCB_NUM)
3489 acb->maxFreeCCB += 64;
3490 return rtn;
3491 }
3492
arcmsr_hbaA_polling_ccbdone(struct AdapterControlBlock * acb,struct CommandControlBlock * poll_ccb)3493 static int arcmsr_hbaA_polling_ccbdone(struct AdapterControlBlock *acb,
3494 struct CommandControlBlock *poll_ccb)
3495 {
3496 struct MessageUnit_A __iomem *reg = acb->pmuA;
3497 struct CommandControlBlock *ccb;
3498 struct ARCMSR_CDB *arcmsr_cdb;
3499 uint32_t flag_ccb, outbound_intstatus, poll_ccb_done = 0, poll_count = 0;
3500 int rtn;
3501 bool error;
3502 unsigned long ccb_cdb_phy;
3503
3504 polling_hba_ccb_retry:
3505 poll_count++;
3506 outbound_intstatus = readl(®->outbound_intstatus) & acb->outbound_int_enable;
3507 writel(outbound_intstatus, ®->outbound_intstatus);/*clear interrupt*/
3508 while (1) {
3509 if ((flag_ccb = readl(®->outbound_queueport)) == 0xFFFFFFFF) {
3510 if (poll_ccb_done){
3511 rtn = SUCCESS;
3512 break;
3513 }else {
3514 msleep(25);
3515 if (poll_count > 100){
3516 rtn = FAILED;
3517 break;
3518 }
3519 goto polling_hba_ccb_retry;
3520 }
3521 }
3522 ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff;
3523 if (acb->cdb_phyadd_hipart)
3524 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
3525 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
3526 ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
3527 poll_ccb_done |= (ccb == poll_ccb) ? 1 : 0;
3528 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
3529 if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
3530 printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
3531 " poll command abort successfully \n"
3532 , acb->host->host_no
3533 , ccb->pcmd->device->id
3534 , (u32)ccb->pcmd->device->lun
3535 , ccb);
3536 ccb->pcmd->result = DID_ABORT << 16;
3537 arcmsr_ccb_complete(ccb);
3538 continue;
3539 }
3540 printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
3541 " command done ccb = '0x%p'"
3542 "ccboutstandingcount = %d \n"
3543 , acb->host->host_no
3544 , ccb
3545 , atomic_read(&acb->ccboutstandingcount));
3546 continue;
3547 }
3548 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
3549 arcmsr_report_ccb_state(acb, ccb, error);
3550 }
3551 return rtn;
3552 }
3553
arcmsr_hbaB_polling_ccbdone(struct AdapterControlBlock * acb,struct CommandControlBlock * poll_ccb)3554 static int arcmsr_hbaB_polling_ccbdone(struct AdapterControlBlock *acb,
3555 struct CommandControlBlock *poll_ccb)
3556 {
3557 struct MessageUnit_B *reg = acb->pmuB;
3558 struct ARCMSR_CDB *arcmsr_cdb;
3559 struct CommandControlBlock *ccb;
3560 uint32_t flag_ccb, poll_ccb_done = 0, poll_count = 0;
3561 int index, rtn;
3562 bool error;
3563 unsigned long ccb_cdb_phy;
3564
3565 polling_hbb_ccb_retry:
3566 poll_count++;
3567 /* clear doorbell interrupt */
3568 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
3569 while(1){
3570 index = reg->doneq_index;
3571 flag_ccb = reg->done_qbuffer[index];
3572 if (flag_ccb == 0) {
3573 if (poll_ccb_done){
3574 rtn = SUCCESS;
3575 break;
3576 }else {
3577 msleep(25);
3578 if (poll_count > 100){
3579 rtn = FAILED;
3580 break;
3581 }
3582 goto polling_hbb_ccb_retry;
3583 }
3584 }
3585 reg->done_qbuffer[index] = 0;
3586 index++;
3587 /*if last index number set it to 0 */
3588 index %= ARCMSR_MAX_HBB_POSTQUEUE;
3589 reg->doneq_index = index;
3590 /* check if command done with no error*/
3591 ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff;
3592 if (acb->cdb_phyadd_hipart)
3593 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
3594 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
3595 ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
3596 poll_ccb_done |= (ccb == poll_ccb) ? 1 : 0;
3597 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
3598 if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
3599 printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
3600 " poll command abort successfully \n"
3601 ,acb->host->host_no
3602 ,ccb->pcmd->device->id
3603 ,(u32)ccb->pcmd->device->lun
3604 ,ccb);
3605 ccb->pcmd->result = DID_ABORT << 16;
3606 arcmsr_ccb_complete(ccb);
3607 continue;
3608 }
3609 printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
3610 " command done ccb = '0x%p'"
3611 "ccboutstandingcount = %d \n"
3612 , acb->host->host_no
3613 , ccb
3614 , atomic_read(&acb->ccboutstandingcount));
3615 continue;
3616 }
3617 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
3618 arcmsr_report_ccb_state(acb, ccb, error);
3619 }
3620 return rtn;
3621 }
3622
arcmsr_hbaC_polling_ccbdone(struct AdapterControlBlock * acb,struct CommandControlBlock * poll_ccb)3623 static int arcmsr_hbaC_polling_ccbdone(struct AdapterControlBlock *acb,
3624 struct CommandControlBlock *poll_ccb)
3625 {
3626 struct MessageUnit_C __iomem *reg = acb->pmuC;
3627 uint32_t flag_ccb;
3628 struct ARCMSR_CDB *arcmsr_cdb;
3629 bool error;
3630 struct CommandControlBlock *pCCB;
3631 uint32_t poll_ccb_done = 0, poll_count = 0;
3632 int rtn;
3633 unsigned long ccb_cdb_phy;
3634
3635 polling_hbc_ccb_retry:
3636 poll_count++;
3637 while (1) {
3638 if ((readl(®->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) == 0) {
3639 if (poll_ccb_done) {
3640 rtn = SUCCESS;
3641 break;
3642 } else {
3643 msleep(25);
3644 if (poll_count > 100) {
3645 rtn = FAILED;
3646 break;
3647 }
3648 goto polling_hbc_ccb_retry;
3649 }
3650 }
3651 flag_ccb = readl(®->outbound_queueport_low);
3652 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
3653 if (acb->cdb_phyadd_hipart)
3654 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
3655 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
3656 pCCB = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
3657 poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0;
3658 /* check ifcommand done with no error*/
3659 if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
3660 if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
3661 printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
3662 " poll command abort successfully \n"
3663 , acb->host->host_no
3664 , pCCB->pcmd->device->id
3665 , (u32)pCCB->pcmd->device->lun
3666 , pCCB);
3667 pCCB->pcmd->result = DID_ABORT << 16;
3668 arcmsr_ccb_complete(pCCB);
3669 continue;
3670 }
3671 printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
3672 " command done ccb = '0x%p'"
3673 "ccboutstandingcount = %d \n"
3674 , acb->host->host_no
3675 , pCCB
3676 , atomic_read(&acb->ccboutstandingcount));
3677 continue;
3678 }
3679 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
3680 arcmsr_report_ccb_state(acb, pCCB, error);
3681 }
3682 return rtn;
3683 }
3684
arcmsr_hbaD_polling_ccbdone(struct AdapterControlBlock * acb,struct CommandControlBlock * poll_ccb)3685 static int arcmsr_hbaD_polling_ccbdone(struct AdapterControlBlock *acb,
3686 struct CommandControlBlock *poll_ccb)
3687 {
3688 bool error;
3689 uint32_t poll_ccb_done = 0, poll_count = 0, flag_ccb;
3690 int rtn, doneq_index, index_stripped, outbound_write_pointer, toggle;
3691 unsigned long flags, ccb_cdb_phy;
3692 struct ARCMSR_CDB *arcmsr_cdb;
3693 struct CommandControlBlock *pCCB;
3694 struct MessageUnit_D *pmu = acb->pmuD;
3695
3696 polling_hbaD_ccb_retry:
3697 poll_count++;
3698 while (1) {
3699 spin_lock_irqsave(&acb->doneq_lock, flags);
3700 outbound_write_pointer = pmu->done_qbuffer[0].addressLow + 1;
3701 doneq_index = pmu->doneq_index;
3702 if ((outbound_write_pointer & 0xFFF) == (doneq_index & 0xFFF)) {
3703 spin_unlock_irqrestore(&acb->doneq_lock, flags);
3704 if (poll_ccb_done) {
3705 rtn = SUCCESS;
3706 break;
3707 } else {
3708 msleep(25);
3709 if (poll_count > 40) {
3710 rtn = FAILED;
3711 break;
3712 }
3713 goto polling_hbaD_ccb_retry;
3714 }
3715 }
3716 toggle = doneq_index & 0x4000;
3717 index_stripped = (doneq_index & 0xFFF) + 1;
3718 index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE;
3719 pmu->doneq_index = index_stripped ? (index_stripped | toggle) :
3720 ((toggle ^ 0x4000) + 1);
3721 doneq_index = pmu->doneq_index;
3722 spin_unlock_irqrestore(&acb->doneq_lock, flags);
3723 flag_ccb = pmu->done_qbuffer[doneq_index & 0xFFF].addressLow;
3724 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
3725 if (acb->cdb_phyadd_hipart)
3726 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
3727 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset +
3728 ccb_cdb_phy);
3729 pCCB = container_of(arcmsr_cdb, struct CommandControlBlock,
3730 arcmsr_cdb);
3731 poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0;
3732 if ((pCCB->acb != acb) ||
3733 (pCCB->startdone != ARCMSR_CCB_START)) {
3734 if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
3735 pr_notice("arcmsr%d: scsi id = %d "
3736 "lun = %d ccb = '0x%p' poll command "
3737 "abort successfully\n"
3738 , acb->host->host_no
3739 , pCCB->pcmd->device->id
3740 , (u32)pCCB->pcmd->device->lun
3741 , pCCB);
3742 pCCB->pcmd->result = DID_ABORT << 16;
3743 arcmsr_ccb_complete(pCCB);
3744 continue;
3745 }
3746 pr_notice("arcmsr%d: polling an illegal "
3747 "ccb command done ccb = '0x%p' "
3748 "ccboutstandingcount = %d\n"
3749 , acb->host->host_no
3750 , pCCB
3751 , atomic_read(&acb->ccboutstandingcount));
3752 continue;
3753 }
3754 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1)
3755 ? true : false;
3756 arcmsr_report_ccb_state(acb, pCCB, error);
3757 }
3758 return rtn;
3759 }
3760
arcmsr_hbaE_polling_ccbdone(struct AdapterControlBlock * acb,struct CommandControlBlock * poll_ccb)3761 static int arcmsr_hbaE_polling_ccbdone(struct AdapterControlBlock *acb,
3762 struct CommandControlBlock *poll_ccb)
3763 {
3764 bool error;
3765 uint32_t poll_ccb_done = 0, poll_count = 0, doneq_index;
3766 uint16_t cmdSMID;
3767 unsigned long flags;
3768 int rtn;
3769 struct CommandControlBlock *pCCB;
3770 struct MessageUnit_E __iomem *reg = acb->pmuE;
3771
3772 polling_hbaC_ccb_retry:
3773 poll_count++;
3774 while (1) {
3775 spin_lock_irqsave(&acb->doneq_lock, flags);
3776 doneq_index = acb->doneq_index;
3777 if ((readl(®->reply_post_producer_index) & 0xFFFF) ==
3778 doneq_index) {
3779 spin_unlock_irqrestore(&acb->doneq_lock, flags);
3780 if (poll_ccb_done) {
3781 rtn = SUCCESS;
3782 break;
3783 } else {
3784 msleep(25);
3785 if (poll_count > 40) {
3786 rtn = FAILED;
3787 break;
3788 }
3789 goto polling_hbaC_ccb_retry;
3790 }
3791 }
3792 cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID;
3793 doneq_index++;
3794 if (doneq_index >= acb->completionQ_entry)
3795 doneq_index = 0;
3796 acb->doneq_index = doneq_index;
3797 spin_unlock_irqrestore(&acb->doneq_lock, flags);
3798 pCCB = acb->pccb_pool[cmdSMID];
3799 poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0;
3800 /* check if command done with no error*/
3801 if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
3802 if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
3803 pr_notice("arcmsr%d: scsi id = %d "
3804 "lun = %d ccb = '0x%p' poll command "
3805 "abort successfully\n"
3806 , acb->host->host_no
3807 , pCCB->pcmd->device->id
3808 , (u32)pCCB->pcmd->device->lun
3809 , pCCB);
3810 pCCB->pcmd->result = DID_ABORT << 16;
3811 arcmsr_ccb_complete(pCCB);
3812 continue;
3813 }
3814 pr_notice("arcmsr%d: polling an illegal "
3815 "ccb command done ccb = '0x%p' "
3816 "ccboutstandingcount = %d\n"
3817 , acb->host->host_no
3818 , pCCB
3819 , atomic_read(&acb->ccboutstandingcount));
3820 continue;
3821 }
3822 error = (acb->pCompletionQ[doneq_index].cmdFlag &
3823 ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
3824 arcmsr_report_ccb_state(acb, pCCB, error);
3825 }
3826 writel(doneq_index, ®->reply_post_consumer_index);
3827 return rtn;
3828 }
3829
arcmsr_polling_ccbdone(struct AdapterControlBlock * acb,struct CommandControlBlock * poll_ccb)3830 static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
3831 struct CommandControlBlock *poll_ccb)
3832 {
3833 int rtn = 0;
3834 switch (acb->adapter_type) {
3835
3836 case ACB_ADAPTER_TYPE_A:
3837 rtn = arcmsr_hbaA_polling_ccbdone(acb, poll_ccb);
3838 break;
3839 case ACB_ADAPTER_TYPE_B:
3840 rtn = arcmsr_hbaB_polling_ccbdone(acb, poll_ccb);
3841 break;
3842 case ACB_ADAPTER_TYPE_C:
3843 rtn = arcmsr_hbaC_polling_ccbdone(acb, poll_ccb);
3844 break;
3845 case ACB_ADAPTER_TYPE_D:
3846 rtn = arcmsr_hbaD_polling_ccbdone(acb, poll_ccb);
3847 break;
3848 case ACB_ADAPTER_TYPE_E:
3849 case ACB_ADAPTER_TYPE_F:
3850 rtn = arcmsr_hbaE_polling_ccbdone(acb, poll_ccb);
3851 break;
3852 }
3853 return rtn;
3854 }
3855
arcmsr_set_iop_datetime(struct timer_list * t)3856 static void arcmsr_set_iop_datetime(struct timer_list *t)
3857 {
3858 struct AdapterControlBlock *pacb = from_timer(pacb, t, refresh_timer);
3859 unsigned int next_time;
3860 struct tm tm;
3861
3862 union {
3863 struct {
3864 uint16_t signature;
3865 uint8_t year;
3866 uint8_t month;
3867 uint8_t date;
3868 uint8_t hour;
3869 uint8_t minute;
3870 uint8_t second;
3871 } a;
3872 struct {
3873 uint32_t msg_time[2];
3874 } b;
3875 } datetime;
3876
3877 time64_to_tm(ktime_get_real_seconds(), -sys_tz.tz_minuteswest * 60, &tm);
3878
3879 datetime.a.signature = 0x55AA;
3880 datetime.a.year = tm.tm_year - 100; /* base 2000 instead of 1900 */
3881 datetime.a.month = tm.tm_mon;
3882 datetime.a.date = tm.tm_mday;
3883 datetime.a.hour = tm.tm_hour;
3884 datetime.a.minute = tm.tm_min;
3885 datetime.a.second = tm.tm_sec;
3886
3887 switch (pacb->adapter_type) {
3888 case ACB_ADAPTER_TYPE_A: {
3889 struct MessageUnit_A __iomem *reg = pacb->pmuA;
3890 writel(datetime.b.msg_time[0], ®->message_rwbuffer[0]);
3891 writel(datetime.b.msg_time[1], ®->message_rwbuffer[1]);
3892 writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, ®->inbound_msgaddr0);
3893 break;
3894 }
3895 case ACB_ADAPTER_TYPE_B: {
3896 uint32_t __iomem *rwbuffer;
3897 struct MessageUnit_B *reg = pacb->pmuB;
3898 rwbuffer = reg->message_rwbuffer;
3899 writel(datetime.b.msg_time[0], rwbuffer++);
3900 writel(datetime.b.msg_time[1], rwbuffer++);
3901 writel(ARCMSR_MESSAGE_SYNC_TIMER, reg->drv2iop_doorbell);
3902 break;
3903 }
3904 case ACB_ADAPTER_TYPE_C: {
3905 struct MessageUnit_C __iomem *reg = pacb->pmuC;
3906 writel(datetime.b.msg_time[0], ®->msgcode_rwbuffer[0]);
3907 writel(datetime.b.msg_time[1], ®->msgcode_rwbuffer[1]);
3908 writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, ®->inbound_msgaddr0);
3909 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell);
3910 break;
3911 }
3912 case ACB_ADAPTER_TYPE_D: {
3913 uint32_t __iomem *rwbuffer;
3914 struct MessageUnit_D *reg = pacb->pmuD;
3915 rwbuffer = reg->msgcode_rwbuffer;
3916 writel(datetime.b.msg_time[0], rwbuffer++);
3917 writel(datetime.b.msg_time[1], rwbuffer++);
3918 writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, reg->inbound_msgaddr0);
3919 break;
3920 }
3921 case ACB_ADAPTER_TYPE_E: {
3922 struct MessageUnit_E __iomem *reg = pacb->pmuE;
3923 writel(datetime.b.msg_time[0], ®->msgcode_rwbuffer[0]);
3924 writel(datetime.b.msg_time[1], ®->msgcode_rwbuffer[1]);
3925 writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, ®->inbound_msgaddr0);
3926 pacb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
3927 writel(pacb->out_doorbell, ®->iobound_doorbell);
3928 break;
3929 }
3930 case ACB_ADAPTER_TYPE_F: {
3931 struct MessageUnit_F __iomem *reg = pacb->pmuF;
3932
3933 pacb->msgcode_rwbuffer[0] = datetime.b.msg_time[0];
3934 pacb->msgcode_rwbuffer[1] = datetime.b.msg_time[1];
3935 writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, ®->inbound_msgaddr0);
3936 pacb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
3937 writel(pacb->out_doorbell, ®->iobound_doorbell);
3938 break;
3939 }
3940 }
3941 if (sys_tz.tz_minuteswest)
3942 next_time = ARCMSR_HOURS;
3943 else
3944 next_time = ARCMSR_MINUTES;
3945 mod_timer(&pacb->refresh_timer, jiffies + msecs_to_jiffies(next_time));
3946 }
3947
arcmsr_iop_confirm(struct AdapterControlBlock * acb)3948 static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
3949 {
3950 uint32_t cdb_phyaddr, cdb_phyaddr_hi32;
3951 dma_addr_t dma_coherent_handle;
3952
3953 /*
3954 ********************************************************************
3955 ** here we need to tell iop 331 our freeccb.HighPart
3956 ** if freeccb.HighPart is not zero
3957 ********************************************************************
3958 */
3959 switch (acb->adapter_type) {
3960 case ACB_ADAPTER_TYPE_B:
3961 case ACB_ADAPTER_TYPE_D:
3962 dma_coherent_handle = acb->dma_coherent_handle2;
3963 break;
3964 case ACB_ADAPTER_TYPE_E:
3965 case ACB_ADAPTER_TYPE_F:
3966 dma_coherent_handle = acb->dma_coherent_handle +
3967 offsetof(struct CommandControlBlock, arcmsr_cdb);
3968 break;
3969 default:
3970 dma_coherent_handle = acb->dma_coherent_handle;
3971 break;
3972 }
3973 cdb_phyaddr = lower_32_bits(dma_coherent_handle);
3974 cdb_phyaddr_hi32 = upper_32_bits(dma_coherent_handle);
3975 acb->cdb_phyaddr_hi32 = cdb_phyaddr_hi32;
3976 acb->cdb_phyadd_hipart = ((uint64_t)cdb_phyaddr_hi32) << 32;
3977 /*
3978 ***********************************************************************
3979 ** if adapter type B, set window of "post command Q"
3980 ***********************************************************************
3981 */
3982 switch (acb->adapter_type) {
3983
3984 case ACB_ADAPTER_TYPE_A: {
3985 if (cdb_phyaddr_hi32 != 0) {
3986 struct MessageUnit_A __iomem *reg = acb->pmuA;
3987 writel(ARCMSR_SIGNATURE_SET_CONFIG, \
3988 ®->message_rwbuffer[0]);
3989 writel(cdb_phyaddr_hi32, ®->message_rwbuffer[1]);
3990 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, \
3991 ®->inbound_msgaddr0);
3992 if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
3993 printk(KERN_NOTICE "arcmsr%d: ""set ccb high \
3994 part physical address timeout\n",
3995 acb->host->host_no);
3996 return 1;
3997 }
3998 }
3999 }
4000 break;
4001
4002 case ACB_ADAPTER_TYPE_B: {
4003 uint32_t __iomem *rwbuffer;
4004
4005 struct MessageUnit_B *reg = acb->pmuB;
4006 reg->postq_index = 0;
4007 reg->doneq_index = 0;
4008 writel(ARCMSR_MESSAGE_SET_POST_WINDOW, reg->drv2iop_doorbell);
4009 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
4010 printk(KERN_NOTICE "arcmsr%d: cannot set driver mode\n", \
4011 acb->host->host_no);
4012 return 1;
4013 }
4014 rwbuffer = reg->message_rwbuffer;
4015 /* driver "set config" signature */
4016 writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++);
4017 /* normal should be zero */
4018 writel(cdb_phyaddr_hi32, rwbuffer++);
4019 /* postQ size (256 + 8)*4 */
4020 writel(cdb_phyaddr, rwbuffer++);
4021 /* doneQ size (256 + 8)*4 */
4022 writel(cdb_phyaddr + 1056, rwbuffer++);
4023 /* ccb maxQ size must be --> [(256 + 8)*4]*/
4024 writel(1056, rwbuffer);
4025
4026 writel(ARCMSR_MESSAGE_SET_CONFIG, reg->drv2iop_doorbell);
4027 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
4028 printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \
4029 timeout \n",acb->host->host_no);
4030 return 1;
4031 }
4032 writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell);
4033 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
4034 pr_err("arcmsr%d: can't set driver mode.\n",
4035 acb->host->host_no);
4036 return 1;
4037 }
4038 }
4039 break;
4040 case ACB_ADAPTER_TYPE_C: {
4041 struct MessageUnit_C __iomem *reg = acb->pmuC;
4042
4043 printk(KERN_NOTICE "arcmsr%d: cdb_phyaddr_hi32=0x%x\n",
4044 acb->adapter_index, cdb_phyaddr_hi32);
4045 writel(ARCMSR_SIGNATURE_SET_CONFIG, ®->msgcode_rwbuffer[0]);
4046 writel(cdb_phyaddr_hi32, ®->msgcode_rwbuffer[1]);
4047 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, ®->inbound_msgaddr0);
4048 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell);
4049 if (!arcmsr_hbaC_wait_msgint_ready(acb)) {
4050 printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \
4051 timeout \n", acb->host->host_no);
4052 return 1;
4053 }
4054 }
4055 break;
4056 case ACB_ADAPTER_TYPE_D: {
4057 uint32_t __iomem *rwbuffer;
4058 struct MessageUnit_D *reg = acb->pmuD;
4059 reg->postq_index = 0;
4060 reg->doneq_index = 0;
4061 rwbuffer = reg->msgcode_rwbuffer;
4062 writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++);
4063 writel(cdb_phyaddr_hi32, rwbuffer++);
4064 writel(cdb_phyaddr, rwbuffer++);
4065 writel(cdb_phyaddr + (ARCMSR_MAX_ARC1214_POSTQUEUE *
4066 sizeof(struct InBound_SRB)), rwbuffer++);
4067 writel(0x100, rwbuffer);
4068 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, reg->inbound_msgaddr0);
4069 if (!arcmsr_hbaD_wait_msgint_ready(acb)) {
4070 pr_notice("arcmsr%d: 'set command Q window' timeout\n",
4071 acb->host->host_no);
4072 return 1;
4073 }
4074 }
4075 break;
4076 case ACB_ADAPTER_TYPE_E: {
4077 struct MessageUnit_E __iomem *reg = acb->pmuE;
4078 writel(ARCMSR_SIGNATURE_SET_CONFIG, ®->msgcode_rwbuffer[0]);
4079 writel(ARCMSR_SIGNATURE_1884, ®->msgcode_rwbuffer[1]);
4080 writel(cdb_phyaddr, ®->msgcode_rwbuffer[2]);
4081 writel(cdb_phyaddr_hi32, ®->msgcode_rwbuffer[3]);
4082 writel(acb->ccbsize, ®->msgcode_rwbuffer[4]);
4083 writel(lower_32_bits(acb->dma_coherent_handle2), ®->msgcode_rwbuffer[5]);
4084 writel(upper_32_bits(acb->dma_coherent_handle2), ®->msgcode_rwbuffer[6]);
4085 writel(acb->ioqueue_size, ®->msgcode_rwbuffer[7]);
4086 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, ®->inbound_msgaddr0);
4087 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
4088 writel(acb->out_doorbell, ®->iobound_doorbell);
4089 if (!arcmsr_hbaE_wait_msgint_ready(acb)) {
4090 pr_notice("arcmsr%d: 'set command Q window' timeout \n",
4091 acb->host->host_no);
4092 return 1;
4093 }
4094 }
4095 break;
4096 case ACB_ADAPTER_TYPE_F: {
4097 struct MessageUnit_F __iomem *reg = acb->pmuF;
4098
4099 acb->msgcode_rwbuffer[0] = ARCMSR_SIGNATURE_SET_CONFIG;
4100 acb->msgcode_rwbuffer[1] = ARCMSR_SIGNATURE_1886;
4101 acb->msgcode_rwbuffer[2] = cdb_phyaddr;
4102 acb->msgcode_rwbuffer[3] = cdb_phyaddr_hi32;
4103 acb->msgcode_rwbuffer[4] = acb->ccbsize;
4104 acb->msgcode_rwbuffer[5] = lower_32_bits(acb->dma_coherent_handle2);
4105 acb->msgcode_rwbuffer[6] = upper_32_bits(acb->dma_coherent_handle2);
4106 acb->msgcode_rwbuffer[7] = acb->completeQ_size;
4107 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, ®->inbound_msgaddr0);
4108 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
4109 writel(acb->out_doorbell, ®->iobound_doorbell);
4110 if (!arcmsr_hbaE_wait_msgint_ready(acb)) {
4111 pr_notice("arcmsr%d: 'set command Q window' timeout\n",
4112 acb->host->host_no);
4113 return 1;
4114 }
4115 }
4116 break;
4117 }
4118 return 0;
4119 }
4120
arcmsr_wait_firmware_ready(struct AdapterControlBlock * acb)4121 static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb)
4122 {
4123 uint32_t firmware_state = 0;
4124 switch (acb->adapter_type) {
4125
4126 case ACB_ADAPTER_TYPE_A: {
4127 struct MessageUnit_A __iomem *reg = acb->pmuA;
4128 do {
4129 if (!(acb->acb_flags & ACB_F_IOP_INITED))
4130 msleep(20);
4131 firmware_state = readl(®->outbound_msgaddr1);
4132 } while ((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0);
4133 }
4134 break;
4135
4136 case ACB_ADAPTER_TYPE_B: {
4137 struct MessageUnit_B *reg = acb->pmuB;
4138 do {
4139 if (!(acb->acb_flags & ACB_F_IOP_INITED))
4140 msleep(20);
4141 firmware_state = readl(reg->iop2drv_doorbell);
4142 } while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0);
4143 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell);
4144 }
4145 break;
4146 case ACB_ADAPTER_TYPE_C: {
4147 struct MessageUnit_C __iomem *reg = acb->pmuC;
4148 do {
4149 if (!(acb->acb_flags & ACB_F_IOP_INITED))
4150 msleep(20);
4151 firmware_state = readl(®->outbound_msgaddr1);
4152 } while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0);
4153 }
4154 break;
4155 case ACB_ADAPTER_TYPE_D: {
4156 struct MessageUnit_D *reg = acb->pmuD;
4157 do {
4158 if (!(acb->acb_flags & ACB_F_IOP_INITED))
4159 msleep(20);
4160 firmware_state = readl(reg->outbound_msgaddr1);
4161 } while ((firmware_state &
4162 ARCMSR_ARC1214_MESSAGE_FIRMWARE_OK) == 0);
4163 }
4164 break;
4165 case ACB_ADAPTER_TYPE_E:
4166 case ACB_ADAPTER_TYPE_F: {
4167 struct MessageUnit_E __iomem *reg = acb->pmuE;
4168 do {
4169 if (!(acb->acb_flags & ACB_F_IOP_INITED))
4170 msleep(20);
4171 firmware_state = readl(®->outbound_msgaddr1);
4172 } while ((firmware_state & ARCMSR_HBEMU_MESSAGE_FIRMWARE_OK) == 0);
4173 }
4174 break;
4175 }
4176 }
4177
arcmsr_request_device_map(struct timer_list * t)4178 static void arcmsr_request_device_map(struct timer_list *t)
4179 {
4180 struct AdapterControlBlock *acb = from_timer(acb, t, eternal_timer);
4181 if (acb->acb_flags & (ACB_F_MSG_GET_CONFIG | ACB_F_BUS_RESET | ACB_F_ABORT)) {
4182 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
4183 } else {
4184 acb->fw_flag = FW_NORMAL;
4185 switch (acb->adapter_type) {
4186 case ACB_ADAPTER_TYPE_A: {
4187 struct MessageUnit_A __iomem *reg = acb->pmuA;
4188 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0);
4189 break;
4190 }
4191 case ACB_ADAPTER_TYPE_B: {
4192 struct MessageUnit_B *reg = acb->pmuB;
4193 writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
4194 break;
4195 }
4196 case ACB_ADAPTER_TYPE_C: {
4197 struct MessageUnit_C __iomem *reg = acb->pmuC;
4198 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0);
4199 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell);
4200 break;
4201 }
4202 case ACB_ADAPTER_TYPE_D: {
4203 struct MessageUnit_D *reg = acb->pmuD;
4204 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, reg->inbound_msgaddr0);
4205 break;
4206 }
4207 case ACB_ADAPTER_TYPE_E: {
4208 struct MessageUnit_E __iomem *reg = acb->pmuE;
4209 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0);
4210 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
4211 writel(acb->out_doorbell, ®->iobound_doorbell);
4212 break;
4213 }
4214 case ACB_ADAPTER_TYPE_F: {
4215 struct MessageUnit_F __iomem *reg = acb->pmuF;
4216 uint32_t outMsg1 = readl(®->outbound_msgaddr1);
4217
4218 if (!(outMsg1 & ARCMSR_HBFMU_MESSAGE_FIRMWARE_OK) ||
4219 (outMsg1 & ARCMSR_HBFMU_MESSAGE_NO_VOLUME_CHANGE))
4220 goto nxt6s;
4221 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0);
4222 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
4223 writel(acb->out_doorbell, ®->iobound_doorbell);
4224 break;
4225 }
4226 default:
4227 return;
4228 }
4229 acb->acb_flags |= ACB_F_MSG_GET_CONFIG;
4230 nxt6s:
4231 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
4232 }
4233 }
4234
arcmsr_hbaA_start_bgrb(struct AdapterControlBlock * acb)4235 static void arcmsr_hbaA_start_bgrb(struct AdapterControlBlock *acb)
4236 {
4237 struct MessageUnit_A __iomem *reg = acb->pmuA;
4238 acb->acb_flags |= ACB_F_MSG_START_BGRB;
4239 writel(ARCMSR_INBOUND_MESG0_START_BGRB, ®->inbound_msgaddr0);
4240 if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
4241 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
4242 rebuild' timeout \n", acb->host->host_no);
4243 }
4244 }
4245
arcmsr_hbaB_start_bgrb(struct AdapterControlBlock * acb)4246 static void arcmsr_hbaB_start_bgrb(struct AdapterControlBlock *acb)
4247 {
4248 struct MessageUnit_B *reg = acb->pmuB;
4249 acb->acb_flags |= ACB_F_MSG_START_BGRB;
4250 writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell);
4251 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
4252 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
4253 rebuild' timeout \n",acb->host->host_no);
4254 }
4255 }
4256
arcmsr_hbaC_start_bgrb(struct AdapterControlBlock * pACB)4257 static void arcmsr_hbaC_start_bgrb(struct AdapterControlBlock *pACB)
4258 {
4259 struct MessageUnit_C __iomem *phbcmu = pACB->pmuC;
4260 pACB->acb_flags |= ACB_F_MSG_START_BGRB;
4261 writel(ARCMSR_INBOUND_MESG0_START_BGRB, &phbcmu->inbound_msgaddr0);
4262 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &phbcmu->inbound_doorbell);
4263 if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
4264 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
4265 rebuild' timeout \n", pACB->host->host_no);
4266 }
4267 return;
4268 }
4269
arcmsr_hbaD_start_bgrb(struct AdapterControlBlock * pACB)4270 static void arcmsr_hbaD_start_bgrb(struct AdapterControlBlock *pACB)
4271 {
4272 struct MessageUnit_D *pmu = pACB->pmuD;
4273
4274 pACB->acb_flags |= ACB_F_MSG_START_BGRB;
4275 writel(ARCMSR_INBOUND_MESG0_START_BGRB, pmu->inbound_msgaddr0);
4276 if (!arcmsr_hbaD_wait_msgint_ready(pACB)) {
4277 pr_notice("arcmsr%d: wait 'start adapter "
4278 "background rebuild' timeout\n", pACB->host->host_no);
4279 }
4280 }
4281
arcmsr_hbaE_start_bgrb(struct AdapterControlBlock * pACB)4282 static void arcmsr_hbaE_start_bgrb(struct AdapterControlBlock *pACB)
4283 {
4284 struct MessageUnit_E __iomem *pmu = pACB->pmuE;
4285
4286 pACB->acb_flags |= ACB_F_MSG_START_BGRB;
4287 writel(ARCMSR_INBOUND_MESG0_START_BGRB, &pmu->inbound_msgaddr0);
4288 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
4289 writel(pACB->out_doorbell, &pmu->iobound_doorbell);
4290 if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
4291 pr_notice("arcmsr%d: wait 'start adapter "
4292 "background rebuild' timeout \n", pACB->host->host_no);
4293 }
4294 }
4295
arcmsr_start_adapter_bgrb(struct AdapterControlBlock * acb)4296 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
4297 {
4298 switch (acb->adapter_type) {
4299 case ACB_ADAPTER_TYPE_A:
4300 arcmsr_hbaA_start_bgrb(acb);
4301 break;
4302 case ACB_ADAPTER_TYPE_B:
4303 arcmsr_hbaB_start_bgrb(acb);
4304 break;
4305 case ACB_ADAPTER_TYPE_C:
4306 arcmsr_hbaC_start_bgrb(acb);
4307 break;
4308 case ACB_ADAPTER_TYPE_D:
4309 arcmsr_hbaD_start_bgrb(acb);
4310 break;
4311 case ACB_ADAPTER_TYPE_E:
4312 case ACB_ADAPTER_TYPE_F:
4313 arcmsr_hbaE_start_bgrb(acb);
4314 break;
4315 }
4316 }
4317
arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock * acb)4318 static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb)
4319 {
4320 switch (acb->adapter_type) {
4321 case ACB_ADAPTER_TYPE_A: {
4322 struct MessageUnit_A __iomem *reg = acb->pmuA;
4323 uint32_t outbound_doorbell;
4324 /* empty doorbell Qbuffer if door bell ringed */
4325 outbound_doorbell = readl(®->outbound_doorbell);
4326 /*clear doorbell interrupt */
4327 writel(outbound_doorbell, ®->outbound_doorbell);
4328 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, ®->inbound_doorbell);
4329 }
4330 break;
4331
4332 case ACB_ADAPTER_TYPE_B: {
4333 struct MessageUnit_B *reg = acb->pmuB;
4334 uint32_t outbound_doorbell, i;
4335 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
4336 writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
4337 /* let IOP know data has been read */
4338 for(i=0; i < 200; i++) {
4339 msleep(20);
4340 outbound_doorbell = readl(reg->iop2drv_doorbell);
4341 if( outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) {
4342 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
4343 writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
4344 } else
4345 break;
4346 }
4347 }
4348 break;
4349 case ACB_ADAPTER_TYPE_C: {
4350 struct MessageUnit_C __iomem *reg = acb->pmuC;
4351 uint32_t outbound_doorbell, i;
4352 /* empty doorbell Qbuffer if door bell ringed */
4353 outbound_doorbell = readl(®->outbound_doorbell);
4354 writel(outbound_doorbell, ®->outbound_doorbell_clear);
4355 writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, ®->inbound_doorbell);
4356 for (i = 0; i < 200; i++) {
4357 msleep(20);
4358 outbound_doorbell = readl(®->outbound_doorbell);
4359 if (outbound_doorbell &
4360 ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) {
4361 writel(outbound_doorbell,
4362 ®->outbound_doorbell_clear);
4363 writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK,
4364 ®->inbound_doorbell);
4365 } else
4366 break;
4367 }
4368 }
4369 break;
4370 case ACB_ADAPTER_TYPE_D: {
4371 struct MessageUnit_D *reg = acb->pmuD;
4372 uint32_t outbound_doorbell, i;
4373 /* empty doorbell Qbuffer if door bell ringed */
4374 outbound_doorbell = readl(reg->outbound_doorbell);
4375 writel(outbound_doorbell, reg->outbound_doorbell);
4376 writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ,
4377 reg->inbound_doorbell);
4378 for (i = 0; i < 200; i++) {
4379 msleep(20);
4380 outbound_doorbell = readl(reg->outbound_doorbell);
4381 if (outbound_doorbell &
4382 ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK) {
4383 writel(outbound_doorbell,
4384 reg->outbound_doorbell);
4385 writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ,
4386 reg->inbound_doorbell);
4387 } else
4388 break;
4389 }
4390 }
4391 break;
4392 case ACB_ADAPTER_TYPE_E:
4393 case ACB_ADAPTER_TYPE_F: {
4394 struct MessageUnit_E __iomem *reg = acb->pmuE;
4395 uint32_t i, tmp;
4396
4397 acb->in_doorbell = readl(®->iobound_doorbell);
4398 writel(0, ®->host_int_status); /*clear interrupt*/
4399 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK;
4400 writel(acb->out_doorbell, ®->iobound_doorbell);
4401 for(i=0; i < 200; i++) {
4402 msleep(20);
4403 tmp = acb->in_doorbell;
4404 acb->in_doorbell = readl(®->iobound_doorbell);
4405 if((tmp ^ acb->in_doorbell) & ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK) {
4406 writel(0, ®->host_int_status); /*clear interrupt*/
4407 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK;
4408 writel(acb->out_doorbell, ®->iobound_doorbell);
4409 } else
4410 break;
4411 }
4412 }
4413 break;
4414 }
4415 }
4416
arcmsr_enable_eoi_mode(struct AdapterControlBlock * acb)4417 static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb)
4418 {
4419 switch (acb->adapter_type) {
4420 case ACB_ADAPTER_TYPE_A:
4421 return;
4422 case ACB_ADAPTER_TYPE_B:
4423 {
4424 struct MessageUnit_B *reg = acb->pmuB;
4425 writel(ARCMSR_MESSAGE_ACTIVE_EOI_MODE, reg->drv2iop_doorbell);
4426 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
4427 printk(KERN_NOTICE "ARCMSR IOP enables EOI_MODE TIMEOUT");
4428 return;
4429 }
4430 }
4431 break;
4432 case ACB_ADAPTER_TYPE_C:
4433 return;
4434 }
4435 return;
4436 }
4437
arcmsr_hardware_reset(struct AdapterControlBlock * acb)4438 static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
4439 {
4440 uint8_t value[64];
4441 int i, count = 0;
4442 struct MessageUnit_A __iomem *pmuA = acb->pmuA;
4443 struct MessageUnit_C __iomem *pmuC = acb->pmuC;
4444 struct MessageUnit_D *pmuD = acb->pmuD;
4445
4446 /* backup pci config data */
4447 printk(KERN_NOTICE "arcmsr%d: executing hw bus reset .....\n", acb->host->host_no);
4448 for (i = 0; i < 64; i++) {
4449 pci_read_config_byte(acb->pdev, i, &value[i]);
4450 }
4451 /* hardware reset signal */
4452 if (acb->dev_id == 0x1680) {
4453 writel(ARCMSR_ARC1680_BUS_RESET, &pmuA->reserved1[0]);
4454 } else if (acb->dev_id == 0x1880) {
4455 do {
4456 count++;
4457 writel(0xF, &pmuC->write_sequence);
4458 writel(0x4, &pmuC->write_sequence);
4459 writel(0xB, &pmuC->write_sequence);
4460 writel(0x2, &pmuC->write_sequence);
4461 writel(0x7, &pmuC->write_sequence);
4462 writel(0xD, &pmuC->write_sequence);
4463 } while (((readl(&pmuC->host_diagnostic) & ARCMSR_ARC1880_DiagWrite_ENABLE) == 0) && (count < 5));
4464 writel(ARCMSR_ARC1880_RESET_ADAPTER, &pmuC->host_diagnostic);
4465 } else if (acb->dev_id == 0x1884) {
4466 struct MessageUnit_E __iomem *pmuE = acb->pmuE;
4467 do {
4468 count++;
4469 writel(0x4, &pmuE->write_sequence_3xxx);
4470 writel(0xB, &pmuE->write_sequence_3xxx);
4471 writel(0x2, &pmuE->write_sequence_3xxx);
4472 writel(0x7, &pmuE->write_sequence_3xxx);
4473 writel(0xD, &pmuE->write_sequence_3xxx);
4474 mdelay(10);
4475 } while (((readl(&pmuE->host_diagnostic_3xxx) &
4476 ARCMSR_ARC1884_DiagWrite_ENABLE) == 0) && (count < 5));
4477 writel(ARCMSR_ARC188X_RESET_ADAPTER, &pmuE->host_diagnostic_3xxx);
4478 } else if (acb->dev_id == 0x1214) {
4479 writel(0x20, pmuD->reset_request);
4480 } else {
4481 pci_write_config_byte(acb->pdev, 0x84, 0x20);
4482 }
4483 msleep(2000);
4484 /* write back pci config data */
4485 for (i = 0; i < 64; i++) {
4486 pci_write_config_byte(acb->pdev, i, value[i]);
4487 }
4488 msleep(1000);
4489 return;
4490 }
4491
arcmsr_reset_in_progress(struct AdapterControlBlock * acb)4492 static bool arcmsr_reset_in_progress(struct AdapterControlBlock *acb)
4493 {
4494 bool rtn = true;
4495
4496 switch(acb->adapter_type) {
4497 case ACB_ADAPTER_TYPE_A:{
4498 struct MessageUnit_A __iomem *reg = acb->pmuA;
4499 rtn = ((readl(®->outbound_msgaddr1) &
4500 ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) ? true : false;
4501 }
4502 break;
4503 case ACB_ADAPTER_TYPE_B:{
4504 struct MessageUnit_B *reg = acb->pmuB;
4505 rtn = ((readl(reg->iop2drv_doorbell) &
4506 ARCMSR_MESSAGE_FIRMWARE_OK) == 0) ? true : false;
4507 }
4508 break;
4509 case ACB_ADAPTER_TYPE_C:{
4510 struct MessageUnit_C __iomem *reg = acb->pmuC;
4511 rtn = (readl(®->host_diagnostic) & 0x04) ? true : false;
4512 }
4513 break;
4514 case ACB_ADAPTER_TYPE_D:{
4515 struct MessageUnit_D *reg = acb->pmuD;
4516 rtn = ((readl(reg->sample_at_reset) & 0x80) == 0) ?
4517 true : false;
4518 }
4519 break;
4520 case ACB_ADAPTER_TYPE_E:
4521 case ACB_ADAPTER_TYPE_F:{
4522 struct MessageUnit_E __iomem *reg = acb->pmuE;
4523 rtn = (readl(®->host_diagnostic_3xxx) &
4524 ARCMSR_ARC188X_RESET_ADAPTER) ? true : false;
4525 }
4526 break;
4527 }
4528 return rtn;
4529 }
4530
arcmsr_iop_init(struct AdapterControlBlock * acb)4531 static void arcmsr_iop_init(struct AdapterControlBlock *acb)
4532 {
4533 uint32_t intmask_org;
4534 /* disable all outbound interrupt */
4535 intmask_org = arcmsr_disable_outbound_ints(acb);
4536 arcmsr_wait_firmware_ready(acb);
4537 arcmsr_iop_confirm(acb);
4538 /*start background rebuild*/
4539 arcmsr_start_adapter_bgrb(acb);
4540 /* empty doorbell Qbuffer if door bell ringed */
4541 arcmsr_clear_doorbell_queue_buffer(acb);
4542 arcmsr_enable_eoi_mode(acb);
4543 /* enable outbound Post Queue,outbound doorbell Interrupt */
4544 arcmsr_enable_outbound_ints(acb, intmask_org);
4545 acb->acb_flags |= ACB_F_IOP_INITED;
4546 }
4547
arcmsr_iop_reset(struct AdapterControlBlock * acb)4548 static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb)
4549 {
4550 struct CommandControlBlock *ccb;
4551 uint32_t intmask_org;
4552 uint8_t rtnval = 0x00;
4553 int i = 0;
4554 unsigned long flags;
4555
4556 if (atomic_read(&acb->ccboutstandingcount) != 0) {
4557 /* disable all outbound interrupt */
4558 intmask_org = arcmsr_disable_outbound_ints(acb);
4559 /* talk to iop 331 outstanding command aborted */
4560 rtnval = arcmsr_abort_allcmd(acb);
4561 /* clear all outbound posted Q */
4562 arcmsr_done4abort_postqueue(acb);
4563 for (i = 0; i < acb->maxFreeCCB; i++) {
4564 ccb = acb->pccb_pool[i];
4565 if (ccb->startdone == ARCMSR_CCB_START) {
4566 scsi_dma_unmap(ccb->pcmd);
4567 ccb->startdone = ARCMSR_CCB_DONE;
4568 ccb->ccb_flags = 0;
4569 spin_lock_irqsave(&acb->ccblist_lock, flags);
4570 list_add_tail(&ccb->list, &acb->ccb_free_list);
4571 spin_unlock_irqrestore(&acb->ccblist_lock, flags);
4572 }
4573 }
4574 atomic_set(&acb->ccboutstandingcount, 0);
4575 /* enable all outbound interrupt */
4576 arcmsr_enable_outbound_ints(acb, intmask_org);
4577 return rtnval;
4578 }
4579 return rtnval;
4580 }
4581
arcmsr_bus_reset(struct scsi_cmnd * cmd)4582 static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
4583 {
4584 struct AdapterControlBlock *acb;
4585 int retry_count = 0;
4586 int rtn = FAILED;
4587 acb = (struct AdapterControlBlock *) cmd->device->host->hostdata;
4588 if (acb->acb_flags & ACB_F_ADAPTER_REMOVED)
4589 return SUCCESS;
4590 pr_notice("arcmsr: executing bus reset eh.....num_resets = %d,"
4591 " num_aborts = %d \n", acb->num_resets, acb->num_aborts);
4592 acb->num_resets++;
4593
4594 if (acb->acb_flags & ACB_F_BUS_RESET) {
4595 long timeout;
4596 pr_notice("arcmsr: there is a bus reset eh proceeding...\n");
4597 timeout = wait_event_timeout(wait_q, (acb->acb_flags
4598 & ACB_F_BUS_RESET) == 0, 220 * HZ);
4599 if (timeout)
4600 return SUCCESS;
4601 }
4602 acb->acb_flags |= ACB_F_BUS_RESET;
4603 if (!arcmsr_iop_reset(acb)) {
4604 arcmsr_hardware_reset(acb);
4605 acb->acb_flags &= ~ACB_F_IOP_INITED;
4606 wait_reset_done:
4607 ssleep(ARCMSR_SLEEPTIME);
4608 if (arcmsr_reset_in_progress(acb)) {
4609 if (retry_count > ARCMSR_RETRYCOUNT) {
4610 acb->fw_flag = FW_DEADLOCK;
4611 pr_notice("arcmsr%d: waiting for hw bus reset"
4612 " return, RETRY TERMINATED!!\n",
4613 acb->host->host_no);
4614 return FAILED;
4615 }
4616 retry_count++;
4617 goto wait_reset_done;
4618 }
4619 arcmsr_iop_init(acb);
4620 acb->fw_flag = FW_NORMAL;
4621 mod_timer(&acb->eternal_timer, jiffies +
4622 msecs_to_jiffies(6 * HZ));
4623 acb->acb_flags &= ~ACB_F_BUS_RESET;
4624 rtn = SUCCESS;
4625 pr_notice("arcmsr: scsi bus reset eh returns with success\n");
4626 } else {
4627 acb->acb_flags &= ~ACB_F_BUS_RESET;
4628 acb->fw_flag = FW_NORMAL;
4629 mod_timer(&acb->eternal_timer, jiffies +
4630 msecs_to_jiffies(6 * HZ));
4631 rtn = SUCCESS;
4632 }
4633 return rtn;
4634 }
4635
arcmsr_abort_one_cmd(struct AdapterControlBlock * acb,struct CommandControlBlock * ccb)4636 static int arcmsr_abort_one_cmd(struct AdapterControlBlock *acb,
4637 struct CommandControlBlock *ccb)
4638 {
4639 int rtn;
4640 rtn = arcmsr_polling_ccbdone(acb, ccb);
4641 return rtn;
4642 }
4643
arcmsr_abort(struct scsi_cmnd * cmd)4644 static int arcmsr_abort(struct scsi_cmnd *cmd)
4645 {
4646 struct AdapterControlBlock *acb =
4647 (struct AdapterControlBlock *)cmd->device->host->hostdata;
4648 int i = 0;
4649 int rtn = FAILED;
4650 uint32_t intmask_org;
4651
4652 if (acb->acb_flags & ACB_F_ADAPTER_REMOVED)
4653 return SUCCESS;
4654 printk(KERN_NOTICE
4655 "arcmsr%d: abort device command of scsi id = %d lun = %d\n",
4656 acb->host->host_no, cmd->device->id, (u32)cmd->device->lun);
4657 acb->acb_flags |= ACB_F_ABORT;
4658 acb->num_aborts++;
4659 /*
4660 ************************************************
4661 ** the all interrupt service routine is locked
4662 ** we need to handle it as soon as possible and exit
4663 ************************************************
4664 */
4665 if (!atomic_read(&acb->ccboutstandingcount)) {
4666 acb->acb_flags &= ~ACB_F_ABORT;
4667 return rtn;
4668 }
4669
4670 intmask_org = arcmsr_disable_outbound_ints(acb);
4671 for (i = 0; i < acb->maxFreeCCB; i++) {
4672 struct CommandControlBlock *ccb = acb->pccb_pool[i];
4673 if (ccb->startdone == ARCMSR_CCB_START && ccb->pcmd == cmd) {
4674 ccb->startdone = ARCMSR_CCB_ABORTED;
4675 rtn = arcmsr_abort_one_cmd(acb, ccb);
4676 break;
4677 }
4678 }
4679 acb->acb_flags &= ~ACB_F_ABORT;
4680 arcmsr_enable_outbound_ints(acb, intmask_org);
4681 return rtn;
4682 }
4683
arcmsr_info(struct Scsi_Host * host)4684 static const char *arcmsr_info(struct Scsi_Host *host)
4685 {
4686 struct AdapterControlBlock *acb =
4687 (struct AdapterControlBlock *) host->hostdata;
4688 static char buf[256];
4689 char *type;
4690 int raid6 = 1;
4691 switch (acb->pdev->device) {
4692 case PCI_DEVICE_ID_ARECA_1110:
4693 case PCI_DEVICE_ID_ARECA_1200:
4694 case PCI_DEVICE_ID_ARECA_1202:
4695 case PCI_DEVICE_ID_ARECA_1210:
4696 raid6 = 0;
4697 fallthrough;
4698 case PCI_DEVICE_ID_ARECA_1120:
4699 case PCI_DEVICE_ID_ARECA_1130:
4700 case PCI_DEVICE_ID_ARECA_1160:
4701 case PCI_DEVICE_ID_ARECA_1170:
4702 case PCI_DEVICE_ID_ARECA_1201:
4703 case PCI_DEVICE_ID_ARECA_1203:
4704 case PCI_DEVICE_ID_ARECA_1220:
4705 case PCI_DEVICE_ID_ARECA_1230:
4706 case PCI_DEVICE_ID_ARECA_1260:
4707 case PCI_DEVICE_ID_ARECA_1270:
4708 case PCI_DEVICE_ID_ARECA_1280:
4709 type = "SATA";
4710 break;
4711 case PCI_DEVICE_ID_ARECA_1214:
4712 case PCI_DEVICE_ID_ARECA_1380:
4713 case PCI_DEVICE_ID_ARECA_1381:
4714 case PCI_DEVICE_ID_ARECA_1680:
4715 case PCI_DEVICE_ID_ARECA_1681:
4716 case PCI_DEVICE_ID_ARECA_1880:
4717 case PCI_DEVICE_ID_ARECA_1883:
4718 case PCI_DEVICE_ID_ARECA_1884:
4719 type = "SAS/SATA";
4720 break;
4721 case PCI_DEVICE_ID_ARECA_1886_0:
4722 case PCI_DEVICE_ID_ARECA_1886:
4723 type = "NVMe/SAS/SATA";
4724 break;
4725 default:
4726 type = "unknown";
4727 raid6 = 0;
4728 break;
4729 }
4730 sprintf(buf, "Areca %s RAID Controller %s\narcmsr version %s\n",
4731 type, raid6 ? "(RAID6 capable)" : "", ARCMSR_DRIVER_VERSION);
4732 return buf;
4733 }
4734