1 /*
2 *******************************************************************************
3 ** O.S : Linux
4 ** FILE NAME : arcmsr_hba.c
5 ** BY : Nick Cheng, C.L. Huang
6 ** Description: SCSI RAID Device Driver for Areca RAID Controller
7 *******************************************************************************
8 ** Copyright (C) 2002 - 2014, Areca Technology Corporation All rights reserved
9 **
10 ** Web site: www.areca.com.tw
11 ** E-mail: support@areca.com.tw
12 **
13 ** This program is free software; you can redistribute it and/or modify
14 ** it under the terms of the GNU General Public License version 2 as
15 ** published by the Free Software Foundation.
16 ** This program is distributed in the hope that it will be useful,
17 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
18 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 ** GNU General Public License for more details.
20 *******************************************************************************
21 ** Redistribution and use in source and binary forms, with or without
22 ** modification, are permitted provided that the following conditions
23 ** are met:
24 ** 1. Redistributions of source code must retain the above copyright
25 ** notice, this list of conditions and the following disclaimer.
26 ** 2. Redistributions in binary form must reproduce the above copyright
27 ** notice, this list of conditions and the following disclaimer in the
28 ** documentation and/or other materials provided with the distribution.
29 ** 3. The name of the author may not be used to endorse or promote products
30 ** derived from this software without specific prior written permission.
31 **
32 ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
33 ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
34 ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
35 ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
36 ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,BUT
37 ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38 ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
39 ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40 ** (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
41 ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 *******************************************************************************
43 ** For history of changes, see Documentation/scsi/ChangeLog.arcmsr
44 ** Firmware Specification, see Documentation/scsi/arcmsr_spec.rst
45 *******************************************************************************
46 */
47 #include <linux/module.h>
48 #include <linux/reboot.h>
49 #include <linux/spinlock.h>
50 #include <linux/pci_ids.h>
51 #include <linux/interrupt.h>
52 #include <linux/moduleparam.h>
53 #include <linux/errno.h>
54 #include <linux/types.h>
55 #include <linux/delay.h>
56 #include <linux/dma-mapping.h>
57 #include <linux/timer.h>
58 #include <linux/slab.h>
59 #include <linux/pci.h>
60 #include <linux/aer.h>
61 #include <linux/circ_buf.h>
62 #include <asm/dma.h>
63 #include <asm/io.h>
64 #include <linux/uaccess.h>
65 #include <scsi/scsi_host.h>
66 #include <scsi/scsi.h>
67 #include <scsi/scsi_cmnd.h>
68 #include <scsi/scsi_tcq.h>
69 #include <scsi/scsi_device.h>
70 #include <scsi/scsi_transport.h>
71 #include <scsi/scsicam.h>
72 #include "arcmsr.h"
73 MODULE_AUTHOR("Nick Cheng, C.L. Huang <support@areca.com.tw>");
74 MODULE_DESCRIPTION("Areca ARC11xx/12xx/16xx/188x SAS/SATA RAID Controller Driver");
75 MODULE_LICENSE("Dual BSD/GPL");
76 MODULE_VERSION(ARCMSR_DRIVER_VERSION);
77
78 static int msix_enable = 1;
79 module_param(msix_enable, int, S_IRUGO);
80 MODULE_PARM_DESC(msix_enable, "Enable MSI-X interrupt(0 ~ 1), msix_enable=1(enable), =0(disable)");
81
82 static int msi_enable = 1;
83 module_param(msi_enable, int, S_IRUGO);
84 MODULE_PARM_DESC(msi_enable, "Enable MSI interrupt(0 ~ 1), msi_enable=1(enable), =0(disable)");
85
86 static int host_can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD;
87 module_param(host_can_queue, int, S_IRUGO);
88 MODULE_PARM_DESC(host_can_queue, " adapter queue depth(32 ~ 1024), default is 128");
89
90 static int cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN;
91 module_param(cmd_per_lun, int, S_IRUGO);
92 MODULE_PARM_DESC(cmd_per_lun, " device queue depth(1 ~ 128), default is 32");
93
94 static int dma_mask_64 = 0;
95 module_param(dma_mask_64, int, S_IRUGO);
96 MODULE_PARM_DESC(dma_mask_64, " set DMA mask to 64 bits(0 ~ 1), dma_mask_64=1(64 bits), =0(32 bits)");
97
98 static int set_date_time = 0;
99 module_param(set_date_time, int, S_IRUGO);
100 MODULE_PARM_DESC(set_date_time, " send date, time to iop(0 ~ 1), set_date_time=1(enable), default(=0) is disable");
101
102 #define ARCMSR_SLEEPTIME 10
103 #define ARCMSR_RETRYCOUNT 12
104
105 static wait_queue_head_t wait_q;
106 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
107 struct scsi_cmnd *cmd);
108 static int arcmsr_iop_confirm(struct AdapterControlBlock *acb);
109 static int arcmsr_abort(struct scsi_cmnd *);
110 static int arcmsr_bus_reset(struct scsi_cmnd *);
111 static int arcmsr_bios_param(struct scsi_device *sdev,
112 struct block_device *bdev, sector_t capacity, int *info);
113 static int arcmsr_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
114 static int arcmsr_probe(struct pci_dev *pdev,
115 const struct pci_device_id *id);
116 static int arcmsr_suspend(struct pci_dev *pdev, pm_message_t state);
117 static int arcmsr_resume(struct pci_dev *pdev);
118 static void arcmsr_remove(struct pci_dev *pdev);
119 static void arcmsr_shutdown(struct pci_dev *pdev);
120 static void arcmsr_iop_init(struct AdapterControlBlock *acb);
121 static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb);
122 static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb);
123 static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
124 u32 intmask_org);
125 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
126 static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb);
127 static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb);
128 static void arcmsr_request_device_map(struct timer_list *t);
129 static void arcmsr_message_isr_bh_fn(struct work_struct *work);
130 static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb);
131 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb);
132 static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *pACB);
133 static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb);
134 static void arcmsr_hbaE_message_isr(struct AdapterControlBlock *acb);
135 static void arcmsr_hbaE_postqueue_isr(struct AdapterControlBlock *acb);
136 static void arcmsr_hbaF_postqueue_isr(struct AdapterControlBlock *acb);
137 static void arcmsr_hardware_reset(struct AdapterControlBlock *acb);
138 static const char *arcmsr_info(struct Scsi_Host *);
139 static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
140 static void arcmsr_free_irq(struct pci_dev *, struct AdapterControlBlock *);
141 static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb);
142 static void arcmsr_set_iop_datetime(struct timer_list *);
arcmsr_adjust_disk_queue_depth(struct scsi_device * sdev,int queue_depth)143 static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_depth)
144 {
145 if (queue_depth > ARCMSR_MAX_CMD_PERLUN)
146 queue_depth = ARCMSR_MAX_CMD_PERLUN;
147 return scsi_change_queue_depth(sdev, queue_depth);
148 }
149
150 static struct scsi_host_template arcmsr_scsi_host_template = {
151 .module = THIS_MODULE,
152 .name = "Areca SAS/SATA RAID driver",
153 .info = arcmsr_info,
154 .queuecommand = arcmsr_queue_command,
155 .eh_abort_handler = arcmsr_abort,
156 .eh_bus_reset_handler = arcmsr_bus_reset,
157 .bios_param = arcmsr_bios_param,
158 .change_queue_depth = arcmsr_adjust_disk_queue_depth,
159 .can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD,
160 .this_id = ARCMSR_SCSI_INITIATOR_ID,
161 .sg_tablesize = ARCMSR_DEFAULT_SG_ENTRIES,
162 .max_sectors = ARCMSR_MAX_XFER_SECTORS_C,
163 .cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN,
164 .shost_attrs = arcmsr_host_attrs,
165 .no_write_same = 1,
166 };
167
168 static struct pci_device_id arcmsr_device_id_table[] = {
169 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110),
170 .driver_data = ACB_ADAPTER_TYPE_A},
171 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120),
172 .driver_data = ACB_ADAPTER_TYPE_A},
173 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1130),
174 .driver_data = ACB_ADAPTER_TYPE_A},
175 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1160),
176 .driver_data = ACB_ADAPTER_TYPE_A},
177 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1170),
178 .driver_data = ACB_ADAPTER_TYPE_A},
179 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1200),
180 .driver_data = ACB_ADAPTER_TYPE_B},
181 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1201),
182 .driver_data = ACB_ADAPTER_TYPE_B},
183 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1202),
184 .driver_data = ACB_ADAPTER_TYPE_B},
185 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1203),
186 .driver_data = ACB_ADAPTER_TYPE_B},
187 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1210),
188 .driver_data = ACB_ADAPTER_TYPE_A},
189 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1214),
190 .driver_data = ACB_ADAPTER_TYPE_D},
191 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1220),
192 .driver_data = ACB_ADAPTER_TYPE_A},
193 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1230),
194 .driver_data = ACB_ADAPTER_TYPE_A},
195 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1260),
196 .driver_data = ACB_ADAPTER_TYPE_A},
197 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1270),
198 .driver_data = ACB_ADAPTER_TYPE_A},
199 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1280),
200 .driver_data = ACB_ADAPTER_TYPE_A},
201 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1380),
202 .driver_data = ACB_ADAPTER_TYPE_A},
203 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1381),
204 .driver_data = ACB_ADAPTER_TYPE_A},
205 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1680),
206 .driver_data = ACB_ADAPTER_TYPE_A},
207 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1681),
208 .driver_data = ACB_ADAPTER_TYPE_A},
209 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1880),
210 .driver_data = ACB_ADAPTER_TYPE_C},
211 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1883),
212 .driver_data = ACB_ADAPTER_TYPE_C},
213 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1884),
214 .driver_data = ACB_ADAPTER_TYPE_E},
215 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1886_0),
216 .driver_data = ACB_ADAPTER_TYPE_F},
217 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1886),
218 .driver_data = ACB_ADAPTER_TYPE_F},
219 {0, 0}, /* Terminating entry */
220 };
221 MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table);
222
223 static struct pci_driver arcmsr_pci_driver = {
224 .name = "arcmsr",
225 .id_table = arcmsr_device_id_table,
226 .probe = arcmsr_probe,
227 .remove = arcmsr_remove,
228 .suspend = arcmsr_suspend,
229 .resume = arcmsr_resume,
230 .shutdown = arcmsr_shutdown,
231 };
232 /*
233 ****************************************************************************
234 ****************************************************************************
235 */
236
arcmsr_free_io_queue(struct AdapterControlBlock * acb)237 static void arcmsr_free_io_queue(struct AdapterControlBlock *acb)
238 {
239 switch (acb->adapter_type) {
240 case ACB_ADAPTER_TYPE_B:
241 case ACB_ADAPTER_TYPE_D:
242 case ACB_ADAPTER_TYPE_E:
243 case ACB_ADAPTER_TYPE_F:
244 dma_free_coherent(&acb->pdev->dev, acb->ioqueue_size,
245 acb->dma_coherent2, acb->dma_coherent_handle2);
246 break;
247 }
248 }
249
arcmsr_remap_pciregion(struct AdapterControlBlock * acb)250 static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb)
251 {
252 struct pci_dev *pdev = acb->pdev;
253 switch (acb->adapter_type){
254 case ACB_ADAPTER_TYPE_A:{
255 acb->pmuA = ioremap(pci_resource_start(pdev,0), pci_resource_len(pdev,0));
256 if (!acb->pmuA) {
257 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
258 return false;
259 }
260 break;
261 }
262 case ACB_ADAPTER_TYPE_B:{
263 void __iomem *mem_base0, *mem_base1;
264 mem_base0 = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
265 if (!mem_base0) {
266 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
267 return false;
268 }
269 mem_base1 = ioremap(pci_resource_start(pdev, 2), pci_resource_len(pdev, 2));
270 if (!mem_base1) {
271 iounmap(mem_base0);
272 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
273 return false;
274 }
275 acb->mem_base0 = mem_base0;
276 acb->mem_base1 = mem_base1;
277 break;
278 }
279 case ACB_ADAPTER_TYPE_C:{
280 acb->pmuC = ioremap(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
281 if (!acb->pmuC) {
282 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
283 return false;
284 }
285 if (readl(&acb->pmuC->outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
286 writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &acb->pmuC->outbound_doorbell_clear);/*clear interrupt*/
287 return true;
288 }
289 break;
290 }
291 case ACB_ADAPTER_TYPE_D: {
292 void __iomem *mem_base0;
293 unsigned long addr, range;
294
295 addr = (unsigned long)pci_resource_start(pdev, 0);
296 range = pci_resource_len(pdev, 0);
297 mem_base0 = ioremap(addr, range);
298 if (!mem_base0) {
299 pr_notice("arcmsr%d: memory mapping region fail\n",
300 acb->host->host_no);
301 return false;
302 }
303 acb->mem_base0 = mem_base0;
304 break;
305 }
306 case ACB_ADAPTER_TYPE_E: {
307 acb->pmuE = ioremap(pci_resource_start(pdev, 1),
308 pci_resource_len(pdev, 1));
309 if (!acb->pmuE) {
310 pr_notice("arcmsr%d: memory mapping region fail \n",
311 acb->host->host_no);
312 return false;
313 }
314 writel(0, &acb->pmuE->host_int_status); /*clear interrupt*/
315 writel(ARCMSR_HBEMU_DOORBELL_SYNC, &acb->pmuE->iobound_doorbell); /* synchronize doorbell to 0 */
316 acb->in_doorbell = 0;
317 acb->out_doorbell = 0;
318 break;
319 }
320 case ACB_ADAPTER_TYPE_F: {
321 acb->pmuF = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
322 if (!acb->pmuF) {
323 pr_notice("arcmsr%d: memory mapping region fail\n",
324 acb->host->host_no);
325 return false;
326 }
327 writel(0, &acb->pmuF->host_int_status); /* clear interrupt */
328 writel(ARCMSR_HBFMU_DOORBELL_SYNC, &acb->pmuF->iobound_doorbell);
329 acb->in_doorbell = 0;
330 acb->out_doorbell = 0;
331 break;
332 }
333 }
334 return true;
335 }
336
arcmsr_unmap_pciregion(struct AdapterControlBlock * acb)337 static void arcmsr_unmap_pciregion(struct AdapterControlBlock *acb)
338 {
339 switch (acb->adapter_type) {
340 case ACB_ADAPTER_TYPE_A:
341 iounmap(acb->pmuA);
342 break;
343 case ACB_ADAPTER_TYPE_B:
344 iounmap(acb->mem_base0);
345 iounmap(acb->mem_base1);
346 break;
347 case ACB_ADAPTER_TYPE_C:
348 iounmap(acb->pmuC);
349 break;
350 case ACB_ADAPTER_TYPE_D:
351 iounmap(acb->mem_base0);
352 break;
353 case ACB_ADAPTER_TYPE_E:
354 iounmap(acb->pmuE);
355 break;
356 case ACB_ADAPTER_TYPE_F:
357 iounmap(acb->pmuF);
358 break;
359 }
360 }
361
arcmsr_do_interrupt(int irq,void * dev_id)362 static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id)
363 {
364 irqreturn_t handle_state;
365 struct AdapterControlBlock *acb = dev_id;
366
367 handle_state = arcmsr_interrupt(acb);
368 return handle_state;
369 }
370
arcmsr_bios_param(struct scsi_device * sdev,struct block_device * bdev,sector_t capacity,int * geom)371 static int arcmsr_bios_param(struct scsi_device *sdev,
372 struct block_device *bdev, sector_t capacity, int *geom)
373 {
374 int heads, sectors, cylinders, total_capacity;
375
376 if (scsi_partsize(bdev, capacity, geom))
377 return 0;
378
379 total_capacity = capacity;
380 heads = 64;
381 sectors = 32;
382 cylinders = total_capacity / (heads * sectors);
383 if (cylinders > 1024) {
384 heads = 255;
385 sectors = 63;
386 cylinders = total_capacity / (heads * sectors);
387 }
388 geom[0] = heads;
389 geom[1] = sectors;
390 geom[2] = cylinders;
391 return 0;
392 }
393
arcmsr_hbaA_wait_msgint_ready(struct AdapterControlBlock * acb)394 static uint8_t arcmsr_hbaA_wait_msgint_ready(struct AdapterControlBlock *acb)
395 {
396 struct MessageUnit_A __iomem *reg = acb->pmuA;
397 int i;
398
399 for (i = 0; i < 2000; i++) {
400 if (readl(®->outbound_intstatus) &
401 ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
402 writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT,
403 ®->outbound_intstatus);
404 return true;
405 }
406 msleep(10);
407 } /* max 20 seconds */
408
409 return false;
410 }
411
arcmsr_hbaB_wait_msgint_ready(struct AdapterControlBlock * acb)412 static uint8_t arcmsr_hbaB_wait_msgint_ready(struct AdapterControlBlock *acb)
413 {
414 struct MessageUnit_B *reg = acb->pmuB;
415 int i;
416
417 for (i = 0; i < 2000; i++) {
418 if (readl(reg->iop2drv_doorbell)
419 & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
420 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN,
421 reg->iop2drv_doorbell);
422 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT,
423 reg->drv2iop_doorbell);
424 return true;
425 }
426 msleep(10);
427 } /* max 20 seconds */
428
429 return false;
430 }
431
arcmsr_hbaC_wait_msgint_ready(struct AdapterControlBlock * pACB)432 static uint8_t arcmsr_hbaC_wait_msgint_ready(struct AdapterControlBlock *pACB)
433 {
434 struct MessageUnit_C __iomem *phbcmu = pACB->pmuC;
435 int i;
436
437 for (i = 0; i < 2000; i++) {
438 if (readl(&phbcmu->outbound_doorbell)
439 & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
440 writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR,
441 &phbcmu->outbound_doorbell_clear); /*clear interrupt*/
442 return true;
443 }
444 msleep(10);
445 } /* max 20 seconds */
446
447 return false;
448 }
449
arcmsr_hbaD_wait_msgint_ready(struct AdapterControlBlock * pACB)450 static bool arcmsr_hbaD_wait_msgint_ready(struct AdapterControlBlock *pACB)
451 {
452 struct MessageUnit_D *reg = pACB->pmuD;
453 int i;
454
455 for (i = 0; i < 2000; i++) {
456 if (readl(reg->outbound_doorbell)
457 & ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) {
458 writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE,
459 reg->outbound_doorbell);
460 return true;
461 }
462 msleep(10);
463 } /* max 20 seconds */
464 return false;
465 }
466
arcmsr_hbaE_wait_msgint_ready(struct AdapterControlBlock * pACB)467 static bool arcmsr_hbaE_wait_msgint_ready(struct AdapterControlBlock *pACB)
468 {
469 int i;
470 uint32_t read_doorbell;
471 struct MessageUnit_E __iomem *phbcmu = pACB->pmuE;
472
473 for (i = 0; i < 2000; i++) {
474 read_doorbell = readl(&phbcmu->iobound_doorbell);
475 if ((read_doorbell ^ pACB->in_doorbell) & ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE) {
476 writel(0, &phbcmu->host_int_status); /*clear interrupt*/
477 pACB->in_doorbell = read_doorbell;
478 return true;
479 }
480 msleep(10);
481 } /* max 20 seconds */
482 return false;
483 }
484
arcmsr_hbaA_flush_cache(struct AdapterControlBlock * acb)485 static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb)
486 {
487 struct MessageUnit_A __iomem *reg = acb->pmuA;
488 int retry_count = 30;
489 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, ®->inbound_msgaddr0);
490 do {
491 if (arcmsr_hbaA_wait_msgint_ready(acb))
492 break;
493 else {
494 retry_count--;
495 printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
496 timeout, retry count down = %d \n", acb->host->host_no, retry_count);
497 }
498 } while (retry_count != 0);
499 }
500
arcmsr_hbaB_flush_cache(struct AdapterControlBlock * acb)501 static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb)
502 {
503 struct MessageUnit_B *reg = acb->pmuB;
504 int retry_count = 30;
505 writel(ARCMSR_MESSAGE_FLUSH_CACHE, reg->drv2iop_doorbell);
506 do {
507 if (arcmsr_hbaB_wait_msgint_ready(acb))
508 break;
509 else {
510 retry_count--;
511 printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
512 timeout,retry count down = %d \n", acb->host->host_no, retry_count);
513 }
514 } while (retry_count != 0);
515 }
516
arcmsr_hbaC_flush_cache(struct AdapterControlBlock * pACB)517 static void arcmsr_hbaC_flush_cache(struct AdapterControlBlock *pACB)
518 {
519 struct MessageUnit_C __iomem *reg = pACB->pmuC;
520 int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */
521 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, ®->inbound_msgaddr0);
522 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell);
523 do {
524 if (arcmsr_hbaC_wait_msgint_ready(pACB)) {
525 break;
526 } else {
527 retry_count--;
528 printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
529 timeout,retry count down = %d \n", pACB->host->host_no, retry_count);
530 }
531 } while (retry_count != 0);
532 return;
533 }
534
arcmsr_hbaD_flush_cache(struct AdapterControlBlock * pACB)535 static void arcmsr_hbaD_flush_cache(struct AdapterControlBlock *pACB)
536 {
537 int retry_count = 15;
538 struct MessageUnit_D *reg = pACB->pmuD;
539
540 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, reg->inbound_msgaddr0);
541 do {
542 if (arcmsr_hbaD_wait_msgint_ready(pACB))
543 break;
544
545 retry_count--;
546 pr_notice("arcmsr%d: wait 'flush adapter "
547 "cache' timeout, retry count down = %d\n",
548 pACB->host->host_no, retry_count);
549 } while (retry_count != 0);
550 }
551
arcmsr_hbaE_flush_cache(struct AdapterControlBlock * pACB)552 static void arcmsr_hbaE_flush_cache(struct AdapterControlBlock *pACB)
553 {
554 int retry_count = 30;
555 struct MessageUnit_E __iomem *reg = pACB->pmuE;
556
557 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, ®->inbound_msgaddr0);
558 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
559 writel(pACB->out_doorbell, ®->iobound_doorbell);
560 do {
561 if (arcmsr_hbaE_wait_msgint_ready(pACB))
562 break;
563 retry_count--;
564 pr_notice("arcmsr%d: wait 'flush adapter "
565 "cache' timeout, retry count down = %d\n",
566 pACB->host->host_no, retry_count);
567 } while (retry_count != 0);
568 }
569
arcmsr_flush_adapter_cache(struct AdapterControlBlock * acb)570 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
571 {
572 switch (acb->adapter_type) {
573
574 case ACB_ADAPTER_TYPE_A:
575 arcmsr_hbaA_flush_cache(acb);
576 break;
577 case ACB_ADAPTER_TYPE_B:
578 arcmsr_hbaB_flush_cache(acb);
579 break;
580 case ACB_ADAPTER_TYPE_C:
581 arcmsr_hbaC_flush_cache(acb);
582 break;
583 case ACB_ADAPTER_TYPE_D:
584 arcmsr_hbaD_flush_cache(acb);
585 break;
586 case ACB_ADAPTER_TYPE_E:
587 case ACB_ADAPTER_TYPE_F:
588 arcmsr_hbaE_flush_cache(acb);
589 break;
590 }
591 }
592
arcmsr_hbaB_assign_regAddr(struct AdapterControlBlock * acb)593 static void arcmsr_hbaB_assign_regAddr(struct AdapterControlBlock *acb)
594 {
595 struct MessageUnit_B *reg = acb->pmuB;
596
597 if (acb->pdev->device == PCI_DEVICE_ID_ARECA_1203) {
598 reg->drv2iop_doorbell = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_1203);
599 reg->drv2iop_doorbell_mask = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_MASK_1203);
600 reg->iop2drv_doorbell = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_1203);
601 reg->iop2drv_doorbell_mask = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_MASK_1203);
602 } else {
603 reg->drv2iop_doorbell= MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL);
604 reg->drv2iop_doorbell_mask = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_MASK);
605 reg->iop2drv_doorbell = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL);
606 reg->iop2drv_doorbell_mask = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_MASK);
607 }
608 reg->message_wbuffer = MEM_BASE1(ARCMSR_MESSAGE_WBUFFER);
609 reg->message_rbuffer = MEM_BASE1(ARCMSR_MESSAGE_RBUFFER);
610 reg->message_rwbuffer = MEM_BASE1(ARCMSR_MESSAGE_RWBUFFER);
611 }
612
arcmsr_hbaD_assign_regAddr(struct AdapterControlBlock * acb)613 static void arcmsr_hbaD_assign_regAddr(struct AdapterControlBlock *acb)
614 {
615 struct MessageUnit_D *reg = acb->pmuD;
616
617 reg->chip_id = MEM_BASE0(ARCMSR_ARC1214_CHIP_ID);
618 reg->cpu_mem_config = MEM_BASE0(ARCMSR_ARC1214_CPU_MEMORY_CONFIGURATION);
619 reg->i2o_host_interrupt_mask = MEM_BASE0(ARCMSR_ARC1214_I2_HOST_INTERRUPT_MASK);
620 reg->sample_at_reset = MEM_BASE0(ARCMSR_ARC1214_SAMPLE_RESET);
621 reg->reset_request = MEM_BASE0(ARCMSR_ARC1214_RESET_REQUEST);
622 reg->host_int_status = MEM_BASE0(ARCMSR_ARC1214_MAIN_INTERRUPT_STATUS);
623 reg->pcief0_int_enable = MEM_BASE0(ARCMSR_ARC1214_PCIE_F0_INTERRUPT_ENABLE);
624 reg->inbound_msgaddr0 = MEM_BASE0(ARCMSR_ARC1214_INBOUND_MESSAGE0);
625 reg->inbound_msgaddr1 = MEM_BASE0(ARCMSR_ARC1214_INBOUND_MESSAGE1);
626 reg->outbound_msgaddr0 = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_MESSAGE0);
627 reg->outbound_msgaddr1 = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_MESSAGE1);
628 reg->inbound_doorbell = MEM_BASE0(ARCMSR_ARC1214_INBOUND_DOORBELL);
629 reg->outbound_doorbell = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_DOORBELL);
630 reg->outbound_doorbell_enable = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_DOORBELL_ENABLE);
631 reg->inboundlist_base_low = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_BASE_LOW);
632 reg->inboundlist_base_high = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_BASE_HIGH);
633 reg->inboundlist_write_pointer = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_WRITE_POINTER);
634 reg->outboundlist_base_low = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_BASE_LOW);
635 reg->outboundlist_base_high = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_BASE_HIGH);
636 reg->outboundlist_copy_pointer = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_COPY_POINTER);
637 reg->outboundlist_read_pointer = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_READ_POINTER);
638 reg->outboundlist_interrupt_cause = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_INTERRUPT_CAUSE);
639 reg->outboundlist_interrupt_enable = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_INTERRUPT_ENABLE);
640 reg->message_wbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_WBUFFER);
641 reg->message_rbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RBUFFER);
642 reg->msgcode_rwbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RWBUFFER);
643 }
644
arcmsr_hbaF_assign_regAddr(struct AdapterControlBlock * acb)645 static void arcmsr_hbaF_assign_regAddr(struct AdapterControlBlock *acb)
646 {
647 dma_addr_t host_buffer_dma;
648 struct MessageUnit_F __iomem *pmuF;
649
650 memset(acb->dma_coherent2, 0xff, acb->completeQ_size);
651 acb->message_wbuffer = (uint32_t *)round_up((unsigned long)acb->dma_coherent2 +
652 acb->completeQ_size, 4);
653 acb->message_rbuffer = ((void *)acb->message_wbuffer) + 0x100;
654 acb->msgcode_rwbuffer = ((void *)acb->message_wbuffer) + 0x200;
655 memset((void *)acb->message_wbuffer, 0, MESG_RW_BUFFER_SIZE);
656 host_buffer_dma = round_up(acb->dma_coherent_handle2 + acb->completeQ_size, 4);
657 pmuF = acb->pmuF;
658 /* host buffer low address, bit0:1 all buffer active */
659 writel(lower_32_bits(host_buffer_dma | 1), &pmuF->inbound_msgaddr0);
660 /* host buffer high address */
661 writel(upper_32_bits(host_buffer_dma), &pmuF->inbound_msgaddr1);
662 /* set host buffer physical address */
663 writel(ARCMSR_HBFMU_DOORBELL_SYNC1, &pmuF->iobound_doorbell);
664 }
665
arcmsr_alloc_io_queue(struct AdapterControlBlock * acb)666 static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb)
667 {
668 bool rtn = true;
669 void *dma_coherent;
670 dma_addr_t dma_coherent_handle;
671 struct pci_dev *pdev = acb->pdev;
672
673 switch (acb->adapter_type) {
674 case ACB_ADAPTER_TYPE_B: {
675 acb->ioqueue_size = roundup(sizeof(struct MessageUnit_B), 32);
676 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size,
677 &dma_coherent_handle, GFP_KERNEL);
678 if (!dma_coherent) {
679 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
680 return false;
681 }
682 acb->dma_coherent_handle2 = dma_coherent_handle;
683 acb->dma_coherent2 = dma_coherent;
684 acb->pmuB = (struct MessageUnit_B *)dma_coherent;
685 arcmsr_hbaB_assign_regAddr(acb);
686 }
687 break;
688 case ACB_ADAPTER_TYPE_D: {
689 acb->ioqueue_size = roundup(sizeof(struct MessageUnit_D), 32);
690 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size,
691 &dma_coherent_handle, GFP_KERNEL);
692 if (!dma_coherent) {
693 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
694 return false;
695 }
696 acb->dma_coherent_handle2 = dma_coherent_handle;
697 acb->dma_coherent2 = dma_coherent;
698 acb->pmuD = (struct MessageUnit_D *)dma_coherent;
699 arcmsr_hbaD_assign_regAddr(acb);
700 }
701 break;
702 case ACB_ADAPTER_TYPE_E: {
703 uint32_t completeQ_size;
704 completeQ_size = sizeof(struct deliver_completeQ) * ARCMSR_MAX_HBE_DONEQUEUE + 128;
705 acb->ioqueue_size = roundup(completeQ_size, 32);
706 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size,
707 &dma_coherent_handle, GFP_KERNEL);
708 if (!dma_coherent){
709 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
710 return false;
711 }
712 acb->dma_coherent_handle2 = dma_coherent_handle;
713 acb->dma_coherent2 = dma_coherent;
714 acb->pCompletionQ = dma_coherent;
715 acb->completionQ_entry = acb->ioqueue_size / sizeof(struct deliver_completeQ);
716 acb->doneq_index = 0;
717 }
718 break;
719 case ACB_ADAPTER_TYPE_F: {
720 uint32_t QueueDepth;
721 uint32_t depthTbl[] = {256, 512, 1024, 128, 64, 32};
722
723 arcmsr_wait_firmware_ready(acb);
724 QueueDepth = depthTbl[readl(&acb->pmuF->outbound_msgaddr1) & 7];
725 acb->completeQ_size = sizeof(struct deliver_completeQ) * QueueDepth + 128;
726 acb->ioqueue_size = roundup(acb->completeQ_size + MESG_RW_BUFFER_SIZE, 32);
727 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size,
728 &dma_coherent_handle, GFP_KERNEL);
729 if (!dma_coherent) {
730 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
731 return false;
732 }
733 acb->dma_coherent_handle2 = dma_coherent_handle;
734 acb->dma_coherent2 = dma_coherent;
735 acb->pCompletionQ = dma_coherent;
736 acb->completionQ_entry = acb->completeQ_size / sizeof(struct deliver_completeQ);
737 acb->doneq_index = 0;
738 arcmsr_hbaF_assign_regAddr(acb);
739 }
740 break;
741 default:
742 break;
743 }
744 return rtn;
745 }
746
arcmsr_alloc_ccb_pool(struct AdapterControlBlock * acb)747 static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
748 {
749 struct pci_dev *pdev = acb->pdev;
750 void *dma_coherent;
751 dma_addr_t dma_coherent_handle;
752 struct CommandControlBlock *ccb_tmp;
753 int i = 0, j = 0;
754 unsigned long cdb_phyaddr, next_ccb_phy;
755 unsigned long roundup_ccbsize;
756 unsigned long max_xfer_len;
757 unsigned long max_sg_entrys;
758 uint32_t firm_config_version, curr_phy_upper32;
759
760 for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
761 for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
762 acb->devstate[i][j] = ARECA_RAID_GONE;
763
764 max_xfer_len = ARCMSR_MAX_XFER_LEN;
765 max_sg_entrys = ARCMSR_DEFAULT_SG_ENTRIES;
766 firm_config_version = acb->firm_cfg_version;
767 if((firm_config_version & 0xFF) >= 3){
768 max_xfer_len = (ARCMSR_CDB_SG_PAGE_LENGTH << ((firm_config_version >> 8) & 0xFF)) * 1024;/* max 4M byte */
769 max_sg_entrys = (max_xfer_len/4096);
770 }
771 acb->host->max_sectors = max_xfer_len/512;
772 acb->host->sg_tablesize = max_sg_entrys;
773 roundup_ccbsize = roundup(sizeof(struct CommandControlBlock) + (max_sg_entrys - 1) * sizeof(struct SG64ENTRY), 32);
774 acb->uncache_size = roundup_ccbsize * acb->maxFreeCCB;
775 if (acb->adapter_type != ACB_ADAPTER_TYPE_F)
776 acb->uncache_size += acb->ioqueue_size;
777 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->uncache_size, &dma_coherent_handle, GFP_KERNEL);
778 if(!dma_coherent){
779 printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error\n", acb->host->host_no);
780 return -ENOMEM;
781 }
782 acb->dma_coherent = dma_coherent;
783 acb->dma_coherent_handle = dma_coherent_handle;
784 memset(dma_coherent, 0, acb->uncache_size);
785 acb->ccbsize = roundup_ccbsize;
786 ccb_tmp = dma_coherent;
787 curr_phy_upper32 = upper_32_bits(dma_coherent_handle);
788 acb->vir2phy_offset = (unsigned long)dma_coherent - (unsigned long)dma_coherent_handle;
789 for(i = 0; i < acb->maxFreeCCB; i++){
790 cdb_phyaddr = (unsigned long)dma_coherent_handle + offsetof(struct CommandControlBlock, arcmsr_cdb);
791 switch (acb->adapter_type) {
792 case ACB_ADAPTER_TYPE_A:
793 case ACB_ADAPTER_TYPE_B:
794 ccb_tmp->cdb_phyaddr = cdb_phyaddr >> 5;
795 break;
796 case ACB_ADAPTER_TYPE_C:
797 case ACB_ADAPTER_TYPE_D:
798 case ACB_ADAPTER_TYPE_E:
799 case ACB_ADAPTER_TYPE_F:
800 ccb_tmp->cdb_phyaddr = cdb_phyaddr;
801 break;
802 }
803 acb->pccb_pool[i] = ccb_tmp;
804 ccb_tmp->acb = acb;
805 ccb_tmp->smid = (u32)i << 16;
806 INIT_LIST_HEAD(&ccb_tmp->list);
807 next_ccb_phy = dma_coherent_handle + roundup_ccbsize;
808 if (upper_32_bits(next_ccb_phy) != curr_phy_upper32) {
809 acb->maxFreeCCB = i;
810 acb->host->can_queue = i;
811 break;
812 }
813 else
814 list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
815 ccb_tmp = (struct CommandControlBlock *)((unsigned long)ccb_tmp + roundup_ccbsize);
816 dma_coherent_handle = next_ccb_phy;
817 }
818 if (acb->adapter_type != ACB_ADAPTER_TYPE_F) {
819 acb->dma_coherent_handle2 = dma_coherent_handle;
820 acb->dma_coherent2 = ccb_tmp;
821 }
822 switch (acb->adapter_type) {
823 case ACB_ADAPTER_TYPE_B:
824 acb->pmuB = (struct MessageUnit_B *)acb->dma_coherent2;
825 arcmsr_hbaB_assign_regAddr(acb);
826 break;
827 case ACB_ADAPTER_TYPE_D:
828 acb->pmuD = (struct MessageUnit_D *)acb->dma_coherent2;
829 arcmsr_hbaD_assign_regAddr(acb);
830 break;
831 case ACB_ADAPTER_TYPE_E:
832 acb->pCompletionQ = acb->dma_coherent2;
833 acb->completionQ_entry = acb->ioqueue_size / sizeof(struct deliver_completeQ);
834 acb->doneq_index = 0;
835 break;
836 }
837 return 0;
838 }
839
arcmsr_message_isr_bh_fn(struct work_struct * work)840 static void arcmsr_message_isr_bh_fn(struct work_struct *work)
841 {
842 struct AdapterControlBlock *acb = container_of(work,
843 struct AdapterControlBlock, arcmsr_do_message_isr_bh);
844 char *acb_dev_map = (char *)acb->device_map;
845 uint32_t __iomem *signature = NULL;
846 char __iomem *devicemap = NULL;
847 int target, lun;
848 struct scsi_device *psdev;
849 char diff, temp;
850
851 switch (acb->adapter_type) {
852 case ACB_ADAPTER_TYPE_A: {
853 struct MessageUnit_A __iomem *reg = acb->pmuA;
854
855 signature = (uint32_t __iomem *)(®->message_rwbuffer[0]);
856 devicemap = (char __iomem *)(®->message_rwbuffer[21]);
857 break;
858 }
859 case ACB_ADAPTER_TYPE_B: {
860 struct MessageUnit_B *reg = acb->pmuB;
861
862 signature = (uint32_t __iomem *)(®->message_rwbuffer[0]);
863 devicemap = (char __iomem *)(®->message_rwbuffer[21]);
864 break;
865 }
866 case ACB_ADAPTER_TYPE_C: {
867 struct MessageUnit_C __iomem *reg = acb->pmuC;
868
869 signature = (uint32_t __iomem *)(®->msgcode_rwbuffer[0]);
870 devicemap = (char __iomem *)(®->msgcode_rwbuffer[21]);
871 break;
872 }
873 case ACB_ADAPTER_TYPE_D: {
874 struct MessageUnit_D *reg = acb->pmuD;
875
876 signature = (uint32_t __iomem *)(®->msgcode_rwbuffer[0]);
877 devicemap = (char __iomem *)(®->msgcode_rwbuffer[21]);
878 break;
879 }
880 case ACB_ADAPTER_TYPE_E: {
881 struct MessageUnit_E __iomem *reg = acb->pmuE;
882
883 signature = (uint32_t __iomem *)(®->msgcode_rwbuffer[0]);
884 devicemap = (char __iomem *)(®->msgcode_rwbuffer[21]);
885 break;
886 }
887 case ACB_ADAPTER_TYPE_F: {
888 signature = (uint32_t __iomem *)(&acb->msgcode_rwbuffer[0]);
889 devicemap = (char __iomem *)(&acb->msgcode_rwbuffer[21]);
890 break;
891 }
892 }
893 if (readl(signature) != ARCMSR_SIGNATURE_GET_CONFIG)
894 return;
895 for (target = 0; target < ARCMSR_MAX_TARGETID - 1;
896 target++) {
897 temp = readb(devicemap);
898 diff = (*acb_dev_map) ^ temp;
899 if (diff != 0) {
900 *acb_dev_map = temp;
901 for (lun = 0; lun < ARCMSR_MAX_TARGETLUN;
902 lun++) {
903 if ((diff & 0x01) == 1 &&
904 (temp & 0x01) == 1) {
905 scsi_add_device(acb->host,
906 0, target, lun);
907 } else if ((diff & 0x01) == 1
908 && (temp & 0x01) == 0) {
909 psdev = scsi_device_lookup(acb->host,
910 0, target, lun);
911 if (psdev != NULL) {
912 scsi_remove_device(psdev);
913 scsi_device_put(psdev);
914 }
915 }
916 temp >>= 1;
917 diff >>= 1;
918 }
919 }
920 devicemap++;
921 acb_dev_map++;
922 }
923 acb->acb_flags &= ~ACB_F_MSG_GET_CONFIG;
924 }
925
926 static int
arcmsr_request_irq(struct pci_dev * pdev,struct AdapterControlBlock * acb)927 arcmsr_request_irq(struct pci_dev *pdev, struct AdapterControlBlock *acb)
928 {
929 unsigned long flags;
930 int nvec, i;
931
932 if (msix_enable == 0)
933 goto msi_int0;
934 nvec = pci_alloc_irq_vectors(pdev, 1, ARCMST_NUM_MSIX_VECTORS,
935 PCI_IRQ_MSIX);
936 if (nvec > 0) {
937 pr_info("arcmsr%d: msi-x enabled\n", acb->host->host_no);
938 flags = 0;
939 } else {
940 msi_int0:
941 if (msi_enable == 1) {
942 nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
943 if (nvec == 1) {
944 dev_info(&pdev->dev, "msi enabled\n");
945 goto msi_int1;
946 }
947 }
948 nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY);
949 if (nvec < 1)
950 return FAILED;
951 msi_int1:
952 flags = IRQF_SHARED;
953 }
954
955 acb->vector_count = nvec;
956 for (i = 0; i < nvec; i++) {
957 if (request_irq(pci_irq_vector(pdev, i), arcmsr_do_interrupt,
958 flags, "arcmsr", acb)) {
959 pr_warn("arcmsr%d: request_irq =%d failed!\n",
960 acb->host->host_no, pci_irq_vector(pdev, i));
961 goto out_free_irq;
962 }
963 }
964
965 return SUCCESS;
966 out_free_irq:
967 while (--i >= 0)
968 free_irq(pci_irq_vector(pdev, i), acb);
969 pci_free_irq_vectors(pdev);
970 return FAILED;
971 }
972
arcmsr_init_get_devmap_timer(struct AdapterControlBlock * pacb)973 static void arcmsr_init_get_devmap_timer(struct AdapterControlBlock *pacb)
974 {
975 INIT_WORK(&pacb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
976 pacb->fw_flag = FW_NORMAL;
977 timer_setup(&pacb->eternal_timer, arcmsr_request_device_map, 0);
978 pacb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
979 add_timer(&pacb->eternal_timer);
980 }
981
arcmsr_init_set_datetime_timer(struct AdapterControlBlock * pacb)982 static void arcmsr_init_set_datetime_timer(struct AdapterControlBlock *pacb)
983 {
984 timer_setup(&pacb->refresh_timer, arcmsr_set_iop_datetime, 0);
985 pacb->refresh_timer.expires = jiffies + msecs_to_jiffies(60 * 1000);
986 add_timer(&pacb->refresh_timer);
987 }
988
arcmsr_set_dma_mask(struct AdapterControlBlock * acb)989 static int arcmsr_set_dma_mask(struct AdapterControlBlock *acb)
990 {
991 struct pci_dev *pcidev = acb->pdev;
992
993 if (IS_DMA64) {
994 if (((acb->adapter_type == ACB_ADAPTER_TYPE_A) && !dma_mask_64) ||
995 dma_set_mask(&pcidev->dev, DMA_BIT_MASK(64)))
996 goto dma32;
997 if (dma_set_coherent_mask(&pcidev->dev, DMA_BIT_MASK(64)) ||
998 dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(64))) {
999 printk("arcmsr: set DMA 64 mask failed\n");
1000 return -ENXIO;
1001 }
1002 } else {
1003 dma32:
1004 if (dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32)) ||
1005 dma_set_coherent_mask(&pcidev->dev, DMA_BIT_MASK(32)) ||
1006 dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32))) {
1007 printk("arcmsr: set DMA 32-bit mask failed\n");
1008 return -ENXIO;
1009 }
1010 }
1011 return 0;
1012 }
1013
arcmsr_probe(struct pci_dev * pdev,const struct pci_device_id * id)1014 static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1015 {
1016 struct Scsi_Host *host;
1017 struct AdapterControlBlock *acb;
1018 uint8_t bus,dev_fun;
1019 int error;
1020 error = pci_enable_device(pdev);
1021 if(error){
1022 return -ENODEV;
1023 }
1024 host = scsi_host_alloc(&arcmsr_scsi_host_template, sizeof(struct AdapterControlBlock));
1025 if(!host){
1026 goto pci_disable_dev;
1027 }
1028 init_waitqueue_head(&wait_q);
1029 bus = pdev->bus->number;
1030 dev_fun = pdev->devfn;
1031 acb = (struct AdapterControlBlock *) host->hostdata;
1032 memset(acb,0,sizeof(struct AdapterControlBlock));
1033 acb->pdev = pdev;
1034 acb->adapter_type = id->driver_data;
1035 if (arcmsr_set_dma_mask(acb))
1036 goto scsi_host_release;
1037 acb->host = host;
1038 host->max_lun = ARCMSR_MAX_TARGETLUN;
1039 host->max_id = ARCMSR_MAX_TARGETID; /*16:8*/
1040 host->max_cmd_len = 16; /*this is issue of 64bit LBA ,over 2T byte*/
1041 if ((host_can_queue < ARCMSR_MIN_OUTSTANDING_CMD) || (host_can_queue > ARCMSR_MAX_OUTSTANDING_CMD))
1042 host_can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD;
1043 host->can_queue = host_can_queue; /* max simultaneous cmds */
1044 if ((cmd_per_lun < ARCMSR_MIN_CMD_PERLUN) || (cmd_per_lun > ARCMSR_MAX_CMD_PERLUN))
1045 cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN;
1046 host->cmd_per_lun = cmd_per_lun;
1047 host->this_id = ARCMSR_SCSI_INITIATOR_ID;
1048 host->unique_id = (bus << 8) | dev_fun;
1049 pci_set_drvdata(pdev, host);
1050 pci_set_master(pdev);
1051 error = pci_request_regions(pdev, "arcmsr");
1052 if(error){
1053 goto scsi_host_release;
1054 }
1055 spin_lock_init(&acb->eh_lock);
1056 spin_lock_init(&acb->ccblist_lock);
1057 spin_lock_init(&acb->postq_lock);
1058 spin_lock_init(&acb->doneq_lock);
1059 spin_lock_init(&acb->rqbuffer_lock);
1060 spin_lock_init(&acb->wqbuffer_lock);
1061 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
1062 ACB_F_MESSAGE_RQBUFFER_CLEARED |
1063 ACB_F_MESSAGE_WQBUFFER_READED);
1064 acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
1065 INIT_LIST_HEAD(&acb->ccb_free_list);
1066 error = arcmsr_remap_pciregion(acb);
1067 if(!error){
1068 goto pci_release_regs;
1069 }
1070 error = arcmsr_alloc_io_queue(acb);
1071 if (!error)
1072 goto unmap_pci_region;
1073 error = arcmsr_get_firmware_spec(acb);
1074 if(!error){
1075 goto free_hbb_mu;
1076 }
1077 if (acb->adapter_type != ACB_ADAPTER_TYPE_F)
1078 arcmsr_free_io_queue(acb);
1079 error = arcmsr_alloc_ccb_pool(acb);
1080 if(error){
1081 goto unmap_pci_region;
1082 }
1083 error = scsi_add_host(host, &pdev->dev);
1084 if(error){
1085 goto free_ccb_pool;
1086 }
1087 if (arcmsr_request_irq(pdev, acb) == FAILED)
1088 goto scsi_host_remove;
1089 arcmsr_iop_init(acb);
1090 arcmsr_init_get_devmap_timer(acb);
1091 if (set_date_time)
1092 arcmsr_init_set_datetime_timer(acb);
1093 if(arcmsr_alloc_sysfs_attr(acb))
1094 goto out_free_sysfs;
1095 scsi_scan_host(host);
1096 return 0;
1097 out_free_sysfs:
1098 if (set_date_time)
1099 del_timer_sync(&acb->refresh_timer);
1100 del_timer_sync(&acb->eternal_timer);
1101 flush_work(&acb->arcmsr_do_message_isr_bh);
1102 arcmsr_stop_adapter_bgrb(acb);
1103 arcmsr_flush_adapter_cache(acb);
1104 arcmsr_free_irq(pdev, acb);
1105 scsi_host_remove:
1106 scsi_remove_host(host);
1107 free_ccb_pool:
1108 arcmsr_free_ccb_pool(acb);
1109 goto unmap_pci_region;
1110 free_hbb_mu:
1111 arcmsr_free_io_queue(acb);
1112 unmap_pci_region:
1113 arcmsr_unmap_pciregion(acb);
1114 pci_release_regs:
1115 pci_release_regions(pdev);
1116 scsi_host_release:
1117 scsi_host_put(host);
1118 pci_disable_dev:
1119 pci_disable_device(pdev);
1120 return -ENODEV;
1121 }
1122
arcmsr_free_irq(struct pci_dev * pdev,struct AdapterControlBlock * acb)1123 static void arcmsr_free_irq(struct pci_dev *pdev,
1124 struct AdapterControlBlock *acb)
1125 {
1126 int i;
1127
1128 for (i = 0; i < acb->vector_count; i++)
1129 free_irq(pci_irq_vector(pdev, i), acb);
1130 pci_free_irq_vectors(pdev);
1131 }
1132
arcmsr_suspend(struct pci_dev * pdev,pm_message_t state)1133 static int arcmsr_suspend(struct pci_dev *pdev, pm_message_t state)
1134 {
1135 struct Scsi_Host *host = pci_get_drvdata(pdev);
1136 struct AdapterControlBlock *acb =
1137 (struct AdapterControlBlock *)host->hostdata;
1138
1139 arcmsr_disable_outbound_ints(acb);
1140 arcmsr_free_irq(pdev, acb);
1141 del_timer_sync(&acb->eternal_timer);
1142 if (set_date_time)
1143 del_timer_sync(&acb->refresh_timer);
1144 flush_work(&acb->arcmsr_do_message_isr_bh);
1145 arcmsr_stop_adapter_bgrb(acb);
1146 arcmsr_flush_adapter_cache(acb);
1147 pci_set_drvdata(pdev, host);
1148 pci_save_state(pdev);
1149 pci_disable_device(pdev);
1150 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1151 return 0;
1152 }
1153
arcmsr_resume(struct pci_dev * pdev)1154 static int arcmsr_resume(struct pci_dev *pdev)
1155 {
1156 struct Scsi_Host *host = pci_get_drvdata(pdev);
1157 struct AdapterControlBlock *acb =
1158 (struct AdapterControlBlock *)host->hostdata;
1159
1160 pci_set_power_state(pdev, PCI_D0);
1161 pci_enable_wake(pdev, PCI_D0, 0);
1162 pci_restore_state(pdev);
1163 if (pci_enable_device(pdev)) {
1164 pr_warn("%s: pci_enable_device error\n", __func__);
1165 return -ENODEV;
1166 }
1167 if (arcmsr_set_dma_mask(acb))
1168 goto controller_unregister;
1169 pci_set_master(pdev);
1170 if (arcmsr_request_irq(pdev, acb) == FAILED)
1171 goto controller_stop;
1172 switch (acb->adapter_type) {
1173 case ACB_ADAPTER_TYPE_B: {
1174 struct MessageUnit_B *reg = acb->pmuB;
1175 uint32_t i;
1176 for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
1177 reg->post_qbuffer[i] = 0;
1178 reg->done_qbuffer[i] = 0;
1179 }
1180 reg->postq_index = 0;
1181 reg->doneq_index = 0;
1182 break;
1183 }
1184 case ACB_ADAPTER_TYPE_E:
1185 writel(0, &acb->pmuE->host_int_status);
1186 writel(ARCMSR_HBEMU_DOORBELL_SYNC, &acb->pmuE->iobound_doorbell);
1187 acb->in_doorbell = 0;
1188 acb->out_doorbell = 0;
1189 acb->doneq_index = 0;
1190 break;
1191 case ACB_ADAPTER_TYPE_F:
1192 writel(0, &acb->pmuF->host_int_status);
1193 writel(ARCMSR_HBFMU_DOORBELL_SYNC, &acb->pmuF->iobound_doorbell);
1194 acb->in_doorbell = 0;
1195 acb->out_doorbell = 0;
1196 acb->doneq_index = 0;
1197 arcmsr_hbaF_assign_regAddr(acb);
1198 break;
1199 }
1200 arcmsr_iop_init(acb);
1201 arcmsr_init_get_devmap_timer(acb);
1202 if (set_date_time)
1203 arcmsr_init_set_datetime_timer(acb);
1204 return 0;
1205 controller_stop:
1206 arcmsr_stop_adapter_bgrb(acb);
1207 arcmsr_flush_adapter_cache(acb);
1208 controller_unregister:
1209 scsi_remove_host(host);
1210 arcmsr_free_ccb_pool(acb);
1211 if (acb->adapter_type == ACB_ADAPTER_TYPE_F)
1212 arcmsr_free_io_queue(acb);
1213 arcmsr_unmap_pciregion(acb);
1214 pci_release_regions(pdev);
1215 scsi_host_put(host);
1216 pci_disable_device(pdev);
1217 return -ENODEV;
1218 }
1219
arcmsr_hbaA_abort_allcmd(struct AdapterControlBlock * acb)1220 static uint8_t arcmsr_hbaA_abort_allcmd(struct AdapterControlBlock *acb)
1221 {
1222 struct MessageUnit_A __iomem *reg = acb->pmuA;
1223 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, ®->inbound_msgaddr0);
1224 if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
1225 printk(KERN_NOTICE
1226 "arcmsr%d: wait 'abort all outstanding command' timeout\n"
1227 , acb->host->host_no);
1228 return false;
1229 }
1230 return true;
1231 }
1232
arcmsr_hbaB_abort_allcmd(struct AdapterControlBlock * acb)1233 static uint8_t arcmsr_hbaB_abort_allcmd(struct AdapterControlBlock *acb)
1234 {
1235 struct MessageUnit_B *reg = acb->pmuB;
1236
1237 writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell);
1238 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
1239 printk(KERN_NOTICE
1240 "arcmsr%d: wait 'abort all outstanding command' timeout\n"
1241 , acb->host->host_no);
1242 return false;
1243 }
1244 return true;
1245 }
arcmsr_hbaC_abort_allcmd(struct AdapterControlBlock * pACB)1246 static uint8_t arcmsr_hbaC_abort_allcmd(struct AdapterControlBlock *pACB)
1247 {
1248 struct MessageUnit_C __iomem *reg = pACB->pmuC;
1249 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, ®->inbound_msgaddr0);
1250 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell);
1251 if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
1252 printk(KERN_NOTICE
1253 "arcmsr%d: wait 'abort all outstanding command' timeout\n"
1254 , pACB->host->host_no);
1255 return false;
1256 }
1257 return true;
1258 }
1259
arcmsr_hbaD_abort_allcmd(struct AdapterControlBlock * pACB)1260 static uint8_t arcmsr_hbaD_abort_allcmd(struct AdapterControlBlock *pACB)
1261 {
1262 struct MessageUnit_D *reg = pACB->pmuD;
1263
1264 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, reg->inbound_msgaddr0);
1265 if (!arcmsr_hbaD_wait_msgint_ready(pACB)) {
1266 pr_notice("arcmsr%d: wait 'abort all outstanding "
1267 "command' timeout\n", pACB->host->host_no);
1268 return false;
1269 }
1270 return true;
1271 }
1272
arcmsr_hbaE_abort_allcmd(struct AdapterControlBlock * pACB)1273 static uint8_t arcmsr_hbaE_abort_allcmd(struct AdapterControlBlock *pACB)
1274 {
1275 struct MessageUnit_E __iomem *reg = pACB->pmuE;
1276
1277 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, ®->inbound_msgaddr0);
1278 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
1279 writel(pACB->out_doorbell, ®->iobound_doorbell);
1280 if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
1281 pr_notice("arcmsr%d: wait 'abort all outstanding "
1282 "command' timeout\n", pACB->host->host_no);
1283 return false;
1284 }
1285 return true;
1286 }
1287
arcmsr_abort_allcmd(struct AdapterControlBlock * acb)1288 static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
1289 {
1290 uint8_t rtnval = 0;
1291 switch (acb->adapter_type) {
1292 case ACB_ADAPTER_TYPE_A:
1293 rtnval = arcmsr_hbaA_abort_allcmd(acb);
1294 break;
1295 case ACB_ADAPTER_TYPE_B:
1296 rtnval = arcmsr_hbaB_abort_allcmd(acb);
1297 break;
1298 case ACB_ADAPTER_TYPE_C:
1299 rtnval = arcmsr_hbaC_abort_allcmd(acb);
1300 break;
1301 case ACB_ADAPTER_TYPE_D:
1302 rtnval = arcmsr_hbaD_abort_allcmd(acb);
1303 break;
1304 case ACB_ADAPTER_TYPE_E:
1305 case ACB_ADAPTER_TYPE_F:
1306 rtnval = arcmsr_hbaE_abort_allcmd(acb);
1307 break;
1308 }
1309 return rtnval;
1310 }
1311
arcmsr_pci_unmap_dma(struct CommandControlBlock * ccb)1312 static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb)
1313 {
1314 struct scsi_cmnd *pcmd = ccb->pcmd;
1315
1316 scsi_dma_unmap(pcmd);
1317 }
1318
arcmsr_ccb_complete(struct CommandControlBlock * ccb)1319 static void arcmsr_ccb_complete(struct CommandControlBlock *ccb)
1320 {
1321 struct AdapterControlBlock *acb = ccb->acb;
1322 struct scsi_cmnd *pcmd = ccb->pcmd;
1323 unsigned long flags;
1324 atomic_dec(&acb->ccboutstandingcount);
1325 arcmsr_pci_unmap_dma(ccb);
1326 ccb->startdone = ARCMSR_CCB_DONE;
1327 spin_lock_irqsave(&acb->ccblist_lock, flags);
1328 list_add_tail(&ccb->list, &acb->ccb_free_list);
1329 spin_unlock_irqrestore(&acb->ccblist_lock, flags);
1330 pcmd->scsi_done(pcmd);
1331 }
1332
arcmsr_report_sense_info(struct CommandControlBlock * ccb)1333 static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
1334 {
1335
1336 struct scsi_cmnd *pcmd = ccb->pcmd;
1337 struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer;
1338 pcmd->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
1339 if (sensebuffer) {
1340 int sense_data_length =
1341 sizeof(struct SENSE_DATA) < SCSI_SENSE_BUFFERSIZE
1342 ? sizeof(struct SENSE_DATA) : SCSI_SENSE_BUFFERSIZE;
1343 memset(sensebuffer, 0, SCSI_SENSE_BUFFERSIZE);
1344 memcpy(sensebuffer, ccb->arcmsr_cdb.SenseData, sense_data_length);
1345 sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
1346 sensebuffer->Valid = 1;
1347 pcmd->result |= (DRIVER_SENSE << 24);
1348 }
1349 }
1350
arcmsr_disable_outbound_ints(struct AdapterControlBlock * acb)1351 static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
1352 {
1353 u32 orig_mask = 0;
1354 switch (acb->adapter_type) {
1355 case ACB_ADAPTER_TYPE_A : {
1356 struct MessageUnit_A __iomem *reg = acb->pmuA;
1357 orig_mask = readl(®->outbound_intmask);
1358 writel(orig_mask|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, \
1359 ®->outbound_intmask);
1360 }
1361 break;
1362 case ACB_ADAPTER_TYPE_B : {
1363 struct MessageUnit_B *reg = acb->pmuB;
1364 orig_mask = readl(reg->iop2drv_doorbell_mask);
1365 writel(0, reg->iop2drv_doorbell_mask);
1366 }
1367 break;
1368 case ACB_ADAPTER_TYPE_C:{
1369 struct MessageUnit_C __iomem *reg = acb->pmuC;
1370 /* disable all outbound interrupt */
1371 orig_mask = readl(®->host_int_mask); /* disable outbound message0 int */
1372 writel(orig_mask|ARCMSR_HBCMU_ALL_INTMASKENABLE, ®->host_int_mask);
1373 }
1374 break;
1375 case ACB_ADAPTER_TYPE_D: {
1376 struct MessageUnit_D *reg = acb->pmuD;
1377 /* disable all outbound interrupt */
1378 writel(ARCMSR_ARC1214_ALL_INT_DISABLE, reg->pcief0_int_enable);
1379 }
1380 break;
1381 case ACB_ADAPTER_TYPE_E:
1382 case ACB_ADAPTER_TYPE_F: {
1383 struct MessageUnit_E __iomem *reg = acb->pmuE;
1384 orig_mask = readl(®->host_int_mask);
1385 writel(orig_mask | ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR | ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR, ®->host_int_mask);
1386 readl(®->host_int_mask); /* Dummy readl to force pci flush */
1387 }
1388 break;
1389 }
1390 return orig_mask;
1391 }
1392
arcmsr_report_ccb_state(struct AdapterControlBlock * acb,struct CommandControlBlock * ccb,bool error)1393 static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb,
1394 struct CommandControlBlock *ccb, bool error)
1395 {
1396 uint8_t id, lun;
1397 id = ccb->pcmd->device->id;
1398 lun = ccb->pcmd->device->lun;
1399 if (!error) {
1400 if (acb->devstate[id][lun] == ARECA_RAID_GONE)
1401 acb->devstate[id][lun] = ARECA_RAID_GOOD;
1402 ccb->pcmd->result = DID_OK << 16;
1403 arcmsr_ccb_complete(ccb);
1404 }else{
1405 switch (ccb->arcmsr_cdb.DeviceStatus) {
1406 case ARCMSR_DEV_SELECT_TIMEOUT: {
1407 acb->devstate[id][lun] = ARECA_RAID_GONE;
1408 ccb->pcmd->result = DID_NO_CONNECT << 16;
1409 arcmsr_ccb_complete(ccb);
1410 }
1411 break;
1412
1413 case ARCMSR_DEV_ABORTED:
1414
1415 case ARCMSR_DEV_INIT_FAIL: {
1416 acb->devstate[id][lun] = ARECA_RAID_GONE;
1417 ccb->pcmd->result = DID_BAD_TARGET << 16;
1418 arcmsr_ccb_complete(ccb);
1419 }
1420 break;
1421
1422 case ARCMSR_DEV_CHECK_CONDITION: {
1423 acb->devstate[id][lun] = ARECA_RAID_GOOD;
1424 arcmsr_report_sense_info(ccb);
1425 arcmsr_ccb_complete(ccb);
1426 }
1427 break;
1428
1429 default:
1430 printk(KERN_NOTICE
1431 "arcmsr%d: scsi id = %d lun = %d isr get command error done, \
1432 but got unknown DeviceStatus = 0x%x \n"
1433 , acb->host->host_no
1434 , id
1435 , lun
1436 , ccb->arcmsr_cdb.DeviceStatus);
1437 acb->devstate[id][lun] = ARECA_RAID_GONE;
1438 ccb->pcmd->result = DID_NO_CONNECT << 16;
1439 arcmsr_ccb_complete(ccb);
1440 break;
1441 }
1442 }
1443 }
1444
arcmsr_drain_donequeue(struct AdapterControlBlock * acb,struct CommandControlBlock * pCCB,bool error)1445 static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct CommandControlBlock *pCCB, bool error)
1446 {
1447 if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
1448 if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
1449 struct scsi_cmnd *abortcmd = pCCB->pcmd;
1450 if (abortcmd) {
1451 abortcmd->result |= DID_ABORT << 16;
1452 arcmsr_ccb_complete(pCCB);
1453 printk(KERN_NOTICE "arcmsr%d: pCCB ='0x%p' isr got aborted command \n",
1454 acb->host->host_no, pCCB);
1455 }
1456 return;
1457 }
1458 printk(KERN_NOTICE "arcmsr%d: isr get an illegal ccb command \
1459 done acb = '0x%p'"
1460 "ccb = '0x%p' ccbacb = '0x%p' startdone = 0x%x"
1461 " ccboutstandingcount = %d \n"
1462 , acb->host->host_no
1463 , acb
1464 , pCCB
1465 , pCCB->acb
1466 , pCCB->startdone
1467 , atomic_read(&acb->ccboutstandingcount));
1468 return;
1469 }
1470 arcmsr_report_ccb_state(acb, pCCB, error);
1471 }
1472
arcmsr_done4abort_postqueue(struct AdapterControlBlock * acb)1473 static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
1474 {
1475 int i = 0;
1476 uint32_t flag_ccb;
1477 struct ARCMSR_CDB *pARCMSR_CDB;
1478 bool error;
1479 struct CommandControlBlock *pCCB;
1480 unsigned long ccb_cdb_phy;
1481
1482 switch (acb->adapter_type) {
1483
1484 case ACB_ADAPTER_TYPE_A: {
1485 struct MessageUnit_A __iomem *reg = acb->pmuA;
1486 uint32_t outbound_intstatus;
1487 outbound_intstatus = readl(®->outbound_intstatus) &
1488 acb->outbound_int_enable;
1489 /*clear and abort all outbound posted Q*/
1490 writel(outbound_intstatus, ®->outbound_intstatus);/*clear interrupt*/
1491 while(((flag_ccb = readl(®->outbound_queueport)) != 0xFFFFFFFF)
1492 && (i++ < acb->maxOutstanding)) {
1493 ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff;
1494 if (acb->cdb_phyadd_hipart)
1495 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
1496 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
1497 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
1498 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
1499 arcmsr_drain_donequeue(acb, pCCB, error);
1500 }
1501 }
1502 break;
1503
1504 case ACB_ADAPTER_TYPE_B: {
1505 struct MessageUnit_B *reg = acb->pmuB;
1506 /*clear all outbound posted Q*/
1507 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); /* clear doorbell interrupt */
1508 for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
1509 flag_ccb = reg->done_qbuffer[i];
1510 if (flag_ccb != 0) {
1511 reg->done_qbuffer[i] = 0;
1512 ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff;
1513 if (acb->cdb_phyadd_hipart)
1514 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
1515 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
1516 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
1517 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
1518 arcmsr_drain_donequeue(acb, pCCB, error);
1519 }
1520 reg->post_qbuffer[i] = 0;
1521 }
1522 reg->doneq_index = 0;
1523 reg->postq_index = 0;
1524 }
1525 break;
1526 case ACB_ADAPTER_TYPE_C: {
1527 struct MessageUnit_C __iomem *reg = acb->pmuC;
1528 while ((readl(®->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < acb->maxOutstanding)) {
1529 /*need to do*/
1530 flag_ccb = readl(®->outbound_queueport_low);
1531 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
1532 if (acb->cdb_phyadd_hipart)
1533 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
1534 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
1535 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
1536 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
1537 arcmsr_drain_donequeue(acb, pCCB, error);
1538 }
1539 }
1540 break;
1541 case ACB_ADAPTER_TYPE_D: {
1542 struct MessageUnit_D *pmu = acb->pmuD;
1543 uint32_t outbound_write_pointer;
1544 uint32_t doneq_index, index_stripped, addressLow, residual, toggle;
1545 unsigned long flags;
1546
1547 residual = atomic_read(&acb->ccboutstandingcount);
1548 for (i = 0; i < residual; i++) {
1549 spin_lock_irqsave(&acb->doneq_lock, flags);
1550 outbound_write_pointer =
1551 pmu->done_qbuffer[0].addressLow + 1;
1552 doneq_index = pmu->doneq_index;
1553 if ((doneq_index & 0xFFF) !=
1554 (outbound_write_pointer & 0xFFF)) {
1555 toggle = doneq_index & 0x4000;
1556 index_stripped = (doneq_index & 0xFFF) + 1;
1557 index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE;
1558 pmu->doneq_index = index_stripped ? (index_stripped | toggle) :
1559 ((toggle ^ 0x4000) + 1);
1560 doneq_index = pmu->doneq_index;
1561 spin_unlock_irqrestore(&acb->doneq_lock, flags);
1562 addressLow = pmu->done_qbuffer[doneq_index &
1563 0xFFF].addressLow;
1564 ccb_cdb_phy = (addressLow & 0xFFFFFFF0);
1565 if (acb->cdb_phyadd_hipart)
1566 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
1567 pARCMSR_CDB = (struct ARCMSR_CDB *)
1568 (acb->vir2phy_offset + ccb_cdb_phy);
1569 pCCB = container_of(pARCMSR_CDB,
1570 struct CommandControlBlock, arcmsr_cdb);
1571 error = (addressLow &
1572 ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ?
1573 true : false;
1574 arcmsr_drain_donequeue(acb, pCCB, error);
1575 writel(doneq_index,
1576 pmu->outboundlist_read_pointer);
1577 } else {
1578 spin_unlock_irqrestore(&acb->doneq_lock, flags);
1579 mdelay(10);
1580 }
1581 }
1582 pmu->postq_index = 0;
1583 pmu->doneq_index = 0x40FF;
1584 }
1585 break;
1586 case ACB_ADAPTER_TYPE_E:
1587 arcmsr_hbaE_postqueue_isr(acb);
1588 break;
1589 case ACB_ADAPTER_TYPE_F:
1590 arcmsr_hbaF_postqueue_isr(acb);
1591 break;
1592 }
1593 }
1594
arcmsr_remove_scsi_devices(struct AdapterControlBlock * acb)1595 static void arcmsr_remove_scsi_devices(struct AdapterControlBlock *acb)
1596 {
1597 char *acb_dev_map = (char *)acb->device_map;
1598 int target, lun, i;
1599 struct scsi_device *psdev;
1600 struct CommandControlBlock *ccb;
1601 char temp;
1602
1603 for (i = 0; i < acb->maxFreeCCB; i++) {
1604 ccb = acb->pccb_pool[i];
1605 if (ccb->startdone == ARCMSR_CCB_START) {
1606 ccb->pcmd->result = DID_NO_CONNECT << 16;
1607 arcmsr_pci_unmap_dma(ccb);
1608 ccb->pcmd->scsi_done(ccb->pcmd);
1609 }
1610 }
1611 for (target = 0; target < ARCMSR_MAX_TARGETID; target++) {
1612 temp = *acb_dev_map;
1613 if (temp) {
1614 for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
1615 if (temp & 1) {
1616 psdev = scsi_device_lookup(acb->host,
1617 0, target, lun);
1618 if (psdev != NULL) {
1619 scsi_remove_device(psdev);
1620 scsi_device_put(psdev);
1621 }
1622 }
1623 temp >>= 1;
1624 }
1625 *acb_dev_map = 0;
1626 }
1627 acb_dev_map++;
1628 }
1629 }
1630
arcmsr_free_pcidev(struct AdapterControlBlock * acb)1631 static void arcmsr_free_pcidev(struct AdapterControlBlock *acb)
1632 {
1633 struct pci_dev *pdev;
1634 struct Scsi_Host *host;
1635
1636 host = acb->host;
1637 arcmsr_free_sysfs_attr(acb);
1638 scsi_remove_host(host);
1639 flush_work(&acb->arcmsr_do_message_isr_bh);
1640 del_timer_sync(&acb->eternal_timer);
1641 if (set_date_time)
1642 del_timer_sync(&acb->refresh_timer);
1643 pdev = acb->pdev;
1644 arcmsr_free_irq(pdev, acb);
1645 arcmsr_free_ccb_pool(acb);
1646 if (acb->adapter_type == ACB_ADAPTER_TYPE_F)
1647 arcmsr_free_io_queue(acb);
1648 arcmsr_unmap_pciregion(acb);
1649 pci_release_regions(pdev);
1650 scsi_host_put(host);
1651 pci_disable_device(pdev);
1652 }
1653
arcmsr_remove(struct pci_dev * pdev)1654 static void arcmsr_remove(struct pci_dev *pdev)
1655 {
1656 struct Scsi_Host *host = pci_get_drvdata(pdev);
1657 struct AdapterControlBlock *acb =
1658 (struct AdapterControlBlock *) host->hostdata;
1659 int poll_count = 0;
1660 uint16_t dev_id;
1661
1662 pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id);
1663 if (dev_id == 0xffff) {
1664 acb->acb_flags &= ~ACB_F_IOP_INITED;
1665 acb->acb_flags |= ACB_F_ADAPTER_REMOVED;
1666 arcmsr_remove_scsi_devices(acb);
1667 arcmsr_free_pcidev(acb);
1668 return;
1669 }
1670 arcmsr_free_sysfs_attr(acb);
1671 scsi_remove_host(host);
1672 flush_work(&acb->arcmsr_do_message_isr_bh);
1673 del_timer_sync(&acb->eternal_timer);
1674 if (set_date_time)
1675 del_timer_sync(&acb->refresh_timer);
1676 arcmsr_disable_outbound_ints(acb);
1677 arcmsr_stop_adapter_bgrb(acb);
1678 arcmsr_flush_adapter_cache(acb);
1679 acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
1680 acb->acb_flags &= ~ACB_F_IOP_INITED;
1681
1682 for (poll_count = 0; poll_count < acb->maxOutstanding; poll_count++){
1683 if (!atomic_read(&acb->ccboutstandingcount))
1684 break;
1685 arcmsr_interrupt(acb);/* FIXME: need spinlock */
1686 msleep(25);
1687 }
1688
1689 if (atomic_read(&acb->ccboutstandingcount)) {
1690 int i;
1691
1692 arcmsr_abort_allcmd(acb);
1693 arcmsr_done4abort_postqueue(acb);
1694 for (i = 0; i < acb->maxFreeCCB; i++) {
1695 struct CommandControlBlock *ccb = acb->pccb_pool[i];
1696 if (ccb->startdone == ARCMSR_CCB_START) {
1697 ccb->startdone = ARCMSR_CCB_ABORTED;
1698 ccb->pcmd->result = DID_ABORT << 16;
1699 arcmsr_ccb_complete(ccb);
1700 }
1701 }
1702 }
1703 arcmsr_free_irq(pdev, acb);
1704 arcmsr_free_ccb_pool(acb);
1705 if (acb->adapter_type == ACB_ADAPTER_TYPE_F)
1706 arcmsr_free_io_queue(acb);
1707 arcmsr_unmap_pciregion(acb);
1708 pci_release_regions(pdev);
1709 scsi_host_put(host);
1710 pci_disable_device(pdev);
1711 }
1712
arcmsr_shutdown(struct pci_dev * pdev)1713 static void arcmsr_shutdown(struct pci_dev *pdev)
1714 {
1715 struct Scsi_Host *host = pci_get_drvdata(pdev);
1716 struct AdapterControlBlock *acb =
1717 (struct AdapterControlBlock *)host->hostdata;
1718 if (acb->acb_flags & ACB_F_ADAPTER_REMOVED)
1719 return;
1720 del_timer_sync(&acb->eternal_timer);
1721 if (set_date_time)
1722 del_timer_sync(&acb->refresh_timer);
1723 arcmsr_disable_outbound_ints(acb);
1724 arcmsr_free_irq(pdev, acb);
1725 flush_work(&acb->arcmsr_do_message_isr_bh);
1726 arcmsr_stop_adapter_bgrb(acb);
1727 arcmsr_flush_adapter_cache(acb);
1728 }
1729
arcmsr_module_init(void)1730 static int arcmsr_module_init(void)
1731 {
1732 int error = 0;
1733 error = pci_register_driver(&arcmsr_pci_driver);
1734 return error;
1735 }
1736
arcmsr_module_exit(void)1737 static void arcmsr_module_exit(void)
1738 {
1739 pci_unregister_driver(&arcmsr_pci_driver);
1740 }
1741 module_init(arcmsr_module_init);
1742 module_exit(arcmsr_module_exit);
1743
arcmsr_enable_outbound_ints(struct AdapterControlBlock * acb,u32 intmask_org)1744 static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
1745 u32 intmask_org)
1746 {
1747 u32 mask;
1748 switch (acb->adapter_type) {
1749
1750 case ACB_ADAPTER_TYPE_A: {
1751 struct MessageUnit_A __iomem *reg = acb->pmuA;
1752 mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE |
1753 ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE|
1754 ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE);
1755 writel(mask, ®->outbound_intmask);
1756 acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
1757 }
1758 break;
1759
1760 case ACB_ADAPTER_TYPE_B: {
1761 struct MessageUnit_B *reg = acb->pmuB;
1762 mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK |
1763 ARCMSR_IOP2DRV_DATA_READ_OK |
1764 ARCMSR_IOP2DRV_CDB_DONE |
1765 ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
1766 writel(mask, reg->iop2drv_doorbell_mask);
1767 acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
1768 }
1769 break;
1770 case ACB_ADAPTER_TYPE_C: {
1771 struct MessageUnit_C __iomem *reg = acb->pmuC;
1772 mask = ~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK|ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK);
1773 writel(intmask_org & mask, ®->host_int_mask);
1774 acb->outbound_int_enable = ~(intmask_org & mask) & 0x0000000f;
1775 }
1776 break;
1777 case ACB_ADAPTER_TYPE_D: {
1778 struct MessageUnit_D *reg = acb->pmuD;
1779
1780 mask = ARCMSR_ARC1214_ALL_INT_ENABLE;
1781 writel(intmask_org | mask, reg->pcief0_int_enable);
1782 break;
1783 }
1784 case ACB_ADAPTER_TYPE_E:
1785 case ACB_ADAPTER_TYPE_F: {
1786 struct MessageUnit_E __iomem *reg = acb->pmuE;
1787
1788 mask = ~(ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR | ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR);
1789 writel(intmask_org & mask, ®->host_int_mask);
1790 break;
1791 }
1792 }
1793 }
1794
arcmsr_build_ccb(struct AdapterControlBlock * acb,struct CommandControlBlock * ccb,struct scsi_cmnd * pcmd)1795 static int arcmsr_build_ccb(struct AdapterControlBlock *acb,
1796 struct CommandControlBlock *ccb, struct scsi_cmnd *pcmd)
1797 {
1798 struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
1799 int8_t *psge = (int8_t *)&arcmsr_cdb->u;
1800 __le32 address_lo, address_hi;
1801 int arccdbsize = 0x30;
1802 __le32 length = 0;
1803 int i;
1804 struct scatterlist *sg;
1805 int nseg;
1806 ccb->pcmd = pcmd;
1807 memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB));
1808 arcmsr_cdb->TargetID = pcmd->device->id;
1809 arcmsr_cdb->LUN = pcmd->device->lun;
1810 arcmsr_cdb->Function = 1;
1811 arcmsr_cdb->msgContext = 0;
1812 memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len);
1813
1814 nseg = scsi_dma_map(pcmd);
1815 if (unlikely(nseg > acb->host->sg_tablesize || nseg < 0))
1816 return FAILED;
1817 scsi_for_each_sg(pcmd, sg, nseg, i) {
1818 /* Get the physical address of the current data pointer */
1819 length = cpu_to_le32(sg_dma_len(sg));
1820 address_lo = cpu_to_le32(dma_addr_lo32(sg_dma_address(sg)));
1821 address_hi = cpu_to_le32(dma_addr_hi32(sg_dma_address(sg)));
1822 if (address_hi == 0) {
1823 struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge;
1824
1825 pdma_sg->address = address_lo;
1826 pdma_sg->length = length;
1827 psge += sizeof (struct SG32ENTRY);
1828 arccdbsize += sizeof (struct SG32ENTRY);
1829 } else {
1830 struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge;
1831
1832 pdma_sg->addresshigh = address_hi;
1833 pdma_sg->address = address_lo;
1834 pdma_sg->length = length|cpu_to_le32(IS_SG64_ADDR);
1835 psge += sizeof (struct SG64ENTRY);
1836 arccdbsize += sizeof (struct SG64ENTRY);
1837 }
1838 }
1839 arcmsr_cdb->sgcount = (uint8_t)nseg;
1840 arcmsr_cdb->DataLength = scsi_bufflen(pcmd);
1841 arcmsr_cdb->msgPages = arccdbsize/0x100 + (arccdbsize % 0x100 ? 1 : 0);
1842 if ( arccdbsize > 256)
1843 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
1844 if (pcmd->sc_data_direction == DMA_TO_DEVICE)
1845 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
1846 ccb->arc_cdb_size = arccdbsize;
1847 return SUCCESS;
1848 }
1849
arcmsr_post_ccb(struct AdapterControlBlock * acb,struct CommandControlBlock * ccb)1850 static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb)
1851 {
1852 uint32_t cdb_phyaddr = ccb->cdb_phyaddr;
1853 struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
1854 atomic_inc(&acb->ccboutstandingcount);
1855 ccb->startdone = ARCMSR_CCB_START;
1856 switch (acb->adapter_type) {
1857 case ACB_ADAPTER_TYPE_A: {
1858 struct MessageUnit_A __iomem *reg = acb->pmuA;
1859
1860 if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE)
1861 writel(cdb_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,
1862 ®->inbound_queueport);
1863 else
1864 writel(cdb_phyaddr, ®->inbound_queueport);
1865 break;
1866 }
1867
1868 case ACB_ADAPTER_TYPE_B: {
1869 struct MessageUnit_B *reg = acb->pmuB;
1870 uint32_t ending_index, index = reg->postq_index;
1871
1872 ending_index = ((index + 1) % ARCMSR_MAX_HBB_POSTQUEUE);
1873 reg->post_qbuffer[ending_index] = 0;
1874 if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
1875 reg->post_qbuffer[index] =
1876 cdb_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE;
1877 } else {
1878 reg->post_qbuffer[index] = cdb_phyaddr;
1879 }
1880 index++;
1881 index %= ARCMSR_MAX_HBB_POSTQUEUE;/*if last index number set it to 0 */
1882 reg->postq_index = index;
1883 writel(ARCMSR_DRV2IOP_CDB_POSTED, reg->drv2iop_doorbell);
1884 }
1885 break;
1886 case ACB_ADAPTER_TYPE_C: {
1887 struct MessageUnit_C __iomem *phbcmu = acb->pmuC;
1888 uint32_t ccb_post_stamp, arc_cdb_size;
1889
1890 arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size;
1891 ccb_post_stamp = (cdb_phyaddr | ((arc_cdb_size - 1) >> 6) | 1);
1892 writel(upper_32_bits(ccb->cdb_phyaddr), &phbcmu->inbound_queueport_high);
1893 writel(ccb_post_stamp, &phbcmu->inbound_queueport_low);
1894 }
1895 break;
1896 case ACB_ADAPTER_TYPE_D: {
1897 struct MessageUnit_D *pmu = acb->pmuD;
1898 u16 index_stripped;
1899 u16 postq_index, toggle;
1900 unsigned long flags;
1901 struct InBound_SRB *pinbound_srb;
1902
1903 spin_lock_irqsave(&acb->postq_lock, flags);
1904 postq_index = pmu->postq_index;
1905 pinbound_srb = (struct InBound_SRB *)&(pmu->post_qbuffer[postq_index & 0xFF]);
1906 pinbound_srb->addressHigh = upper_32_bits(ccb->cdb_phyaddr);
1907 pinbound_srb->addressLow = cdb_phyaddr;
1908 pinbound_srb->length = ccb->arc_cdb_size >> 2;
1909 arcmsr_cdb->msgContext = dma_addr_lo32(cdb_phyaddr);
1910 toggle = postq_index & 0x4000;
1911 index_stripped = postq_index + 1;
1912 index_stripped &= (ARCMSR_MAX_ARC1214_POSTQUEUE - 1);
1913 pmu->postq_index = index_stripped ? (index_stripped | toggle) :
1914 (toggle ^ 0x4000);
1915 writel(postq_index, pmu->inboundlist_write_pointer);
1916 spin_unlock_irqrestore(&acb->postq_lock, flags);
1917 break;
1918 }
1919 case ACB_ADAPTER_TYPE_E: {
1920 struct MessageUnit_E __iomem *pmu = acb->pmuE;
1921 u32 ccb_post_stamp, arc_cdb_size;
1922
1923 arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size;
1924 ccb_post_stamp = (ccb->smid | ((arc_cdb_size - 1) >> 6));
1925 writel(0, &pmu->inbound_queueport_high);
1926 writel(ccb_post_stamp, &pmu->inbound_queueport_low);
1927 break;
1928 }
1929 case ACB_ADAPTER_TYPE_F: {
1930 struct MessageUnit_F __iomem *pmu = acb->pmuF;
1931 u32 ccb_post_stamp, arc_cdb_size;
1932
1933 if (ccb->arc_cdb_size <= 0x300)
1934 arc_cdb_size = (ccb->arc_cdb_size - 1) >> 6 | 1;
1935 else {
1936 arc_cdb_size = ((ccb->arc_cdb_size + 0xff) >> 8) + 2;
1937 if (arc_cdb_size > 0xF)
1938 arc_cdb_size = 0xF;
1939 arc_cdb_size = (arc_cdb_size << 1) | 1;
1940 }
1941 ccb_post_stamp = (ccb->smid | arc_cdb_size);
1942 writel(0, &pmu->inbound_queueport_high);
1943 writel(ccb_post_stamp, &pmu->inbound_queueport_low);
1944 break;
1945 }
1946 }
1947 }
1948
arcmsr_hbaA_stop_bgrb(struct AdapterControlBlock * acb)1949 static void arcmsr_hbaA_stop_bgrb(struct AdapterControlBlock *acb)
1950 {
1951 struct MessageUnit_A __iomem *reg = acb->pmuA;
1952 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
1953 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, ®->inbound_msgaddr0);
1954 if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
1955 printk(KERN_NOTICE
1956 "arcmsr%d: wait 'stop adapter background rebuild' timeout\n"
1957 , acb->host->host_no);
1958 }
1959 }
1960
arcmsr_hbaB_stop_bgrb(struct AdapterControlBlock * acb)1961 static void arcmsr_hbaB_stop_bgrb(struct AdapterControlBlock *acb)
1962 {
1963 struct MessageUnit_B *reg = acb->pmuB;
1964 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
1965 writel(ARCMSR_MESSAGE_STOP_BGRB, reg->drv2iop_doorbell);
1966
1967 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
1968 printk(KERN_NOTICE
1969 "arcmsr%d: wait 'stop adapter background rebuild' timeout\n"
1970 , acb->host->host_no);
1971 }
1972 }
1973
arcmsr_hbaC_stop_bgrb(struct AdapterControlBlock * pACB)1974 static void arcmsr_hbaC_stop_bgrb(struct AdapterControlBlock *pACB)
1975 {
1976 struct MessageUnit_C __iomem *reg = pACB->pmuC;
1977 pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
1978 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, ®->inbound_msgaddr0);
1979 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell);
1980 if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
1981 printk(KERN_NOTICE
1982 "arcmsr%d: wait 'stop adapter background rebuild' timeout\n"
1983 , pACB->host->host_no);
1984 }
1985 return;
1986 }
1987
arcmsr_hbaD_stop_bgrb(struct AdapterControlBlock * pACB)1988 static void arcmsr_hbaD_stop_bgrb(struct AdapterControlBlock *pACB)
1989 {
1990 struct MessageUnit_D *reg = pACB->pmuD;
1991
1992 pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
1993 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, reg->inbound_msgaddr0);
1994 if (!arcmsr_hbaD_wait_msgint_ready(pACB))
1995 pr_notice("arcmsr%d: wait 'stop adapter background rebuild' "
1996 "timeout\n", pACB->host->host_no);
1997 }
1998
arcmsr_hbaE_stop_bgrb(struct AdapterControlBlock * pACB)1999 static void arcmsr_hbaE_stop_bgrb(struct AdapterControlBlock *pACB)
2000 {
2001 struct MessageUnit_E __iomem *reg = pACB->pmuE;
2002
2003 pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
2004 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, ®->inbound_msgaddr0);
2005 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
2006 writel(pACB->out_doorbell, ®->iobound_doorbell);
2007 if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
2008 pr_notice("arcmsr%d: wait 'stop adapter background rebuild' "
2009 "timeout\n", pACB->host->host_no);
2010 }
2011 }
2012
arcmsr_stop_adapter_bgrb(struct AdapterControlBlock * acb)2013 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
2014 {
2015 switch (acb->adapter_type) {
2016 case ACB_ADAPTER_TYPE_A:
2017 arcmsr_hbaA_stop_bgrb(acb);
2018 break;
2019 case ACB_ADAPTER_TYPE_B:
2020 arcmsr_hbaB_stop_bgrb(acb);
2021 break;
2022 case ACB_ADAPTER_TYPE_C:
2023 arcmsr_hbaC_stop_bgrb(acb);
2024 break;
2025 case ACB_ADAPTER_TYPE_D:
2026 arcmsr_hbaD_stop_bgrb(acb);
2027 break;
2028 case ACB_ADAPTER_TYPE_E:
2029 case ACB_ADAPTER_TYPE_F:
2030 arcmsr_hbaE_stop_bgrb(acb);
2031 break;
2032 }
2033 }
2034
arcmsr_free_ccb_pool(struct AdapterControlBlock * acb)2035 static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb)
2036 {
2037 dma_free_coherent(&acb->pdev->dev, acb->uncache_size, acb->dma_coherent, acb->dma_coherent_handle);
2038 }
2039
arcmsr_iop_message_read(struct AdapterControlBlock * acb)2040 static void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
2041 {
2042 switch (acb->adapter_type) {
2043 case ACB_ADAPTER_TYPE_A: {
2044 struct MessageUnit_A __iomem *reg = acb->pmuA;
2045 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, ®->inbound_doorbell);
2046 }
2047 break;
2048 case ACB_ADAPTER_TYPE_B: {
2049 struct MessageUnit_B *reg = acb->pmuB;
2050 writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
2051 }
2052 break;
2053 case ACB_ADAPTER_TYPE_C: {
2054 struct MessageUnit_C __iomem *reg = acb->pmuC;
2055
2056 writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, ®->inbound_doorbell);
2057 }
2058 break;
2059 case ACB_ADAPTER_TYPE_D: {
2060 struct MessageUnit_D *reg = acb->pmuD;
2061 writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ,
2062 reg->inbound_doorbell);
2063 }
2064 break;
2065 case ACB_ADAPTER_TYPE_E:
2066 case ACB_ADAPTER_TYPE_F: {
2067 struct MessageUnit_E __iomem *reg = acb->pmuE;
2068 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK;
2069 writel(acb->out_doorbell, ®->iobound_doorbell);
2070 }
2071 break;
2072 }
2073 }
2074
arcmsr_iop_message_wrote(struct AdapterControlBlock * acb)2075 static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
2076 {
2077 switch (acb->adapter_type) {
2078 case ACB_ADAPTER_TYPE_A: {
2079 struct MessageUnit_A __iomem *reg = acb->pmuA;
2080 /*
2081 ** push inbound doorbell tell iop, driver data write ok
2082 ** and wait reply on next hwinterrupt for next Qbuffer post
2083 */
2084 writel(ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK, ®->inbound_doorbell);
2085 }
2086 break;
2087
2088 case ACB_ADAPTER_TYPE_B: {
2089 struct MessageUnit_B *reg = acb->pmuB;
2090 /*
2091 ** push inbound doorbell tell iop, driver data write ok
2092 ** and wait reply on next hwinterrupt for next Qbuffer post
2093 */
2094 writel(ARCMSR_DRV2IOP_DATA_WRITE_OK, reg->drv2iop_doorbell);
2095 }
2096 break;
2097 case ACB_ADAPTER_TYPE_C: {
2098 struct MessageUnit_C __iomem *reg = acb->pmuC;
2099 /*
2100 ** push inbound doorbell tell iop, driver data write ok
2101 ** and wait reply on next hwinterrupt for next Qbuffer post
2102 */
2103 writel(ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK, ®->inbound_doorbell);
2104 }
2105 break;
2106 case ACB_ADAPTER_TYPE_D: {
2107 struct MessageUnit_D *reg = acb->pmuD;
2108 writel(ARCMSR_ARC1214_DRV2IOP_DATA_IN_READY,
2109 reg->inbound_doorbell);
2110 }
2111 break;
2112 case ACB_ADAPTER_TYPE_E:
2113 case ACB_ADAPTER_TYPE_F: {
2114 struct MessageUnit_E __iomem *reg = acb->pmuE;
2115 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_WRITE_OK;
2116 writel(acb->out_doorbell, ®->iobound_doorbell);
2117 }
2118 break;
2119 }
2120 }
2121
arcmsr_get_iop_rqbuffer(struct AdapterControlBlock * acb)2122 struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb)
2123 {
2124 struct QBUFFER __iomem *qbuffer = NULL;
2125 switch (acb->adapter_type) {
2126
2127 case ACB_ADAPTER_TYPE_A: {
2128 struct MessageUnit_A __iomem *reg = acb->pmuA;
2129 qbuffer = (struct QBUFFER __iomem *)®->message_rbuffer;
2130 }
2131 break;
2132 case ACB_ADAPTER_TYPE_B: {
2133 struct MessageUnit_B *reg = acb->pmuB;
2134 qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer;
2135 }
2136 break;
2137 case ACB_ADAPTER_TYPE_C: {
2138 struct MessageUnit_C __iomem *phbcmu = acb->pmuC;
2139 qbuffer = (struct QBUFFER __iomem *)&phbcmu->message_rbuffer;
2140 }
2141 break;
2142 case ACB_ADAPTER_TYPE_D: {
2143 struct MessageUnit_D *reg = acb->pmuD;
2144 qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer;
2145 }
2146 break;
2147 case ACB_ADAPTER_TYPE_E: {
2148 struct MessageUnit_E __iomem *reg = acb->pmuE;
2149 qbuffer = (struct QBUFFER __iomem *)®->message_rbuffer;
2150 }
2151 break;
2152 case ACB_ADAPTER_TYPE_F: {
2153 qbuffer = (struct QBUFFER __iomem *)acb->message_rbuffer;
2154 }
2155 break;
2156 }
2157 return qbuffer;
2158 }
2159
arcmsr_get_iop_wqbuffer(struct AdapterControlBlock * acb)2160 static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBlock *acb)
2161 {
2162 struct QBUFFER __iomem *pqbuffer = NULL;
2163 switch (acb->adapter_type) {
2164
2165 case ACB_ADAPTER_TYPE_A: {
2166 struct MessageUnit_A __iomem *reg = acb->pmuA;
2167 pqbuffer = (struct QBUFFER __iomem *) ®->message_wbuffer;
2168 }
2169 break;
2170 case ACB_ADAPTER_TYPE_B: {
2171 struct MessageUnit_B *reg = acb->pmuB;
2172 pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer;
2173 }
2174 break;
2175 case ACB_ADAPTER_TYPE_C: {
2176 struct MessageUnit_C __iomem *reg = acb->pmuC;
2177 pqbuffer = (struct QBUFFER __iomem *)®->message_wbuffer;
2178 }
2179 break;
2180 case ACB_ADAPTER_TYPE_D: {
2181 struct MessageUnit_D *reg = acb->pmuD;
2182 pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer;
2183 }
2184 break;
2185 case ACB_ADAPTER_TYPE_E: {
2186 struct MessageUnit_E __iomem *reg = acb->pmuE;
2187 pqbuffer = (struct QBUFFER __iomem *)®->message_wbuffer;
2188 }
2189 break;
2190 case ACB_ADAPTER_TYPE_F:
2191 pqbuffer = (struct QBUFFER __iomem *)acb->message_wbuffer;
2192 break;
2193 }
2194 return pqbuffer;
2195 }
2196
2197 static uint32_t
arcmsr_Read_iop_rqbuffer_in_DWORD(struct AdapterControlBlock * acb,struct QBUFFER __iomem * prbuffer)2198 arcmsr_Read_iop_rqbuffer_in_DWORD(struct AdapterControlBlock *acb,
2199 struct QBUFFER __iomem *prbuffer)
2200 {
2201 uint8_t *pQbuffer;
2202 uint8_t *buf1 = NULL;
2203 uint32_t __iomem *iop_data;
2204 uint32_t iop_len, data_len, *buf2 = NULL;
2205
2206 iop_data = (uint32_t __iomem *)prbuffer->data;
2207 iop_len = readl(&prbuffer->data_len);
2208 if (iop_len > 0) {
2209 buf1 = kmalloc(128, GFP_ATOMIC);
2210 buf2 = (uint32_t *)buf1;
2211 if (buf1 == NULL)
2212 return 0;
2213 data_len = iop_len;
2214 while (data_len >= 4) {
2215 *buf2++ = readl(iop_data);
2216 iop_data++;
2217 data_len -= 4;
2218 }
2219 if (data_len)
2220 *buf2 = readl(iop_data);
2221 buf2 = (uint32_t *)buf1;
2222 }
2223 while (iop_len > 0) {
2224 pQbuffer = &acb->rqbuffer[acb->rqbuf_putIndex];
2225 *pQbuffer = *buf1;
2226 acb->rqbuf_putIndex++;
2227 /* if last, index number set it to 0 */
2228 acb->rqbuf_putIndex %= ARCMSR_MAX_QBUFFER;
2229 buf1++;
2230 iop_len--;
2231 }
2232 kfree(buf2);
2233 /* let IOP know data has been read */
2234 arcmsr_iop_message_read(acb);
2235 return 1;
2236 }
2237
2238 uint32_t
arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock * acb,struct QBUFFER __iomem * prbuffer)2239 arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *acb,
2240 struct QBUFFER __iomem *prbuffer) {
2241
2242 uint8_t *pQbuffer;
2243 uint8_t __iomem *iop_data;
2244 uint32_t iop_len;
2245
2246 if (acb->adapter_type > ACB_ADAPTER_TYPE_B)
2247 return arcmsr_Read_iop_rqbuffer_in_DWORD(acb, prbuffer);
2248 iop_data = (uint8_t __iomem *)prbuffer->data;
2249 iop_len = readl(&prbuffer->data_len);
2250 while (iop_len > 0) {
2251 pQbuffer = &acb->rqbuffer[acb->rqbuf_putIndex];
2252 *pQbuffer = readb(iop_data);
2253 acb->rqbuf_putIndex++;
2254 acb->rqbuf_putIndex %= ARCMSR_MAX_QBUFFER;
2255 iop_data++;
2256 iop_len--;
2257 }
2258 arcmsr_iop_message_read(acb);
2259 return 1;
2260 }
2261
arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock * acb)2262 static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb)
2263 {
2264 unsigned long flags;
2265 struct QBUFFER __iomem *prbuffer;
2266 int32_t buf_empty_len;
2267
2268 spin_lock_irqsave(&acb->rqbuffer_lock, flags);
2269 prbuffer = arcmsr_get_iop_rqbuffer(acb);
2270 buf_empty_len = (acb->rqbuf_putIndex - acb->rqbuf_getIndex - 1) &
2271 (ARCMSR_MAX_QBUFFER - 1);
2272 if (buf_empty_len >= readl(&prbuffer->data_len)) {
2273 if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0)
2274 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
2275 } else
2276 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
2277 spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
2278 }
2279
arcmsr_write_ioctldata2iop_in_DWORD(struct AdapterControlBlock * acb)2280 static void arcmsr_write_ioctldata2iop_in_DWORD(struct AdapterControlBlock *acb)
2281 {
2282 uint8_t *pQbuffer;
2283 struct QBUFFER __iomem *pwbuffer;
2284 uint8_t *buf1 = NULL;
2285 uint32_t __iomem *iop_data;
2286 uint32_t allxfer_len = 0, data_len, *buf2 = NULL, data;
2287
2288 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
2289 buf1 = kmalloc(128, GFP_ATOMIC);
2290 buf2 = (uint32_t *)buf1;
2291 if (buf1 == NULL)
2292 return;
2293
2294 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
2295 pwbuffer = arcmsr_get_iop_wqbuffer(acb);
2296 iop_data = (uint32_t __iomem *)pwbuffer->data;
2297 while ((acb->wqbuf_getIndex != acb->wqbuf_putIndex)
2298 && (allxfer_len < 124)) {
2299 pQbuffer = &acb->wqbuffer[acb->wqbuf_getIndex];
2300 *buf1 = *pQbuffer;
2301 acb->wqbuf_getIndex++;
2302 acb->wqbuf_getIndex %= ARCMSR_MAX_QBUFFER;
2303 buf1++;
2304 allxfer_len++;
2305 }
2306 data_len = allxfer_len;
2307 buf1 = (uint8_t *)buf2;
2308 while (data_len >= 4) {
2309 data = *buf2++;
2310 writel(data, iop_data);
2311 iop_data++;
2312 data_len -= 4;
2313 }
2314 if (data_len) {
2315 data = *buf2;
2316 writel(data, iop_data);
2317 }
2318 writel(allxfer_len, &pwbuffer->data_len);
2319 kfree(buf1);
2320 arcmsr_iop_message_wrote(acb);
2321 }
2322 }
2323
2324 void
arcmsr_write_ioctldata2iop(struct AdapterControlBlock * acb)2325 arcmsr_write_ioctldata2iop(struct AdapterControlBlock *acb)
2326 {
2327 uint8_t *pQbuffer;
2328 struct QBUFFER __iomem *pwbuffer;
2329 uint8_t __iomem *iop_data;
2330 int32_t allxfer_len = 0;
2331
2332 if (acb->adapter_type > ACB_ADAPTER_TYPE_B) {
2333 arcmsr_write_ioctldata2iop_in_DWORD(acb);
2334 return;
2335 }
2336 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
2337 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
2338 pwbuffer = arcmsr_get_iop_wqbuffer(acb);
2339 iop_data = (uint8_t __iomem *)pwbuffer->data;
2340 while ((acb->wqbuf_getIndex != acb->wqbuf_putIndex)
2341 && (allxfer_len < 124)) {
2342 pQbuffer = &acb->wqbuffer[acb->wqbuf_getIndex];
2343 writeb(*pQbuffer, iop_data);
2344 acb->wqbuf_getIndex++;
2345 acb->wqbuf_getIndex %= ARCMSR_MAX_QBUFFER;
2346 iop_data++;
2347 allxfer_len++;
2348 }
2349 writel(allxfer_len, &pwbuffer->data_len);
2350 arcmsr_iop_message_wrote(acb);
2351 }
2352 }
2353
arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock * acb)2354 static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb)
2355 {
2356 unsigned long flags;
2357
2358 spin_lock_irqsave(&acb->wqbuffer_lock, flags);
2359 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED;
2360 if (acb->wqbuf_getIndex != acb->wqbuf_putIndex)
2361 arcmsr_write_ioctldata2iop(acb);
2362 if (acb->wqbuf_getIndex == acb->wqbuf_putIndex)
2363 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
2364 spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
2365 }
2366
arcmsr_hbaA_doorbell_isr(struct AdapterControlBlock * acb)2367 static void arcmsr_hbaA_doorbell_isr(struct AdapterControlBlock *acb)
2368 {
2369 uint32_t outbound_doorbell;
2370 struct MessageUnit_A __iomem *reg = acb->pmuA;
2371 outbound_doorbell = readl(®->outbound_doorbell);
2372 do {
2373 writel(outbound_doorbell, ®->outbound_doorbell);
2374 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK)
2375 arcmsr_iop2drv_data_wrote_handle(acb);
2376 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK)
2377 arcmsr_iop2drv_data_read_handle(acb);
2378 outbound_doorbell = readl(®->outbound_doorbell);
2379 } while (outbound_doorbell & (ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK
2380 | ARCMSR_OUTBOUND_IOP331_DATA_READ_OK));
2381 }
arcmsr_hbaC_doorbell_isr(struct AdapterControlBlock * pACB)2382 static void arcmsr_hbaC_doorbell_isr(struct AdapterControlBlock *pACB)
2383 {
2384 uint32_t outbound_doorbell;
2385 struct MessageUnit_C __iomem *reg = pACB->pmuC;
2386 /*
2387 *******************************************************************
2388 ** Maybe here we need to check wrqbuffer_lock is lock or not
2389 ** DOORBELL: din! don!
2390 ** check if there are any mail need to pack from firmware
2391 *******************************************************************
2392 */
2393 outbound_doorbell = readl(®->outbound_doorbell);
2394 do {
2395 writel(outbound_doorbell, ®->outbound_doorbell_clear);
2396 readl(®->outbound_doorbell_clear);
2397 if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK)
2398 arcmsr_iop2drv_data_wrote_handle(pACB);
2399 if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK)
2400 arcmsr_iop2drv_data_read_handle(pACB);
2401 if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE)
2402 arcmsr_hbaC_message_isr(pACB);
2403 outbound_doorbell = readl(®->outbound_doorbell);
2404 } while (outbound_doorbell & (ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK
2405 | ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK
2406 | ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE));
2407 }
2408
arcmsr_hbaD_doorbell_isr(struct AdapterControlBlock * pACB)2409 static void arcmsr_hbaD_doorbell_isr(struct AdapterControlBlock *pACB)
2410 {
2411 uint32_t outbound_doorbell;
2412 struct MessageUnit_D *pmu = pACB->pmuD;
2413
2414 outbound_doorbell = readl(pmu->outbound_doorbell);
2415 do {
2416 writel(outbound_doorbell, pmu->outbound_doorbell);
2417 if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE)
2418 arcmsr_hbaD_message_isr(pACB);
2419 if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK)
2420 arcmsr_iop2drv_data_wrote_handle(pACB);
2421 if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_DATA_READ_OK)
2422 arcmsr_iop2drv_data_read_handle(pACB);
2423 outbound_doorbell = readl(pmu->outbound_doorbell);
2424 } while (outbound_doorbell & (ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK
2425 | ARCMSR_ARC1214_IOP2DRV_DATA_READ_OK
2426 | ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE));
2427 }
2428
arcmsr_hbaE_doorbell_isr(struct AdapterControlBlock * pACB)2429 static void arcmsr_hbaE_doorbell_isr(struct AdapterControlBlock *pACB)
2430 {
2431 uint32_t outbound_doorbell, in_doorbell, tmp, i;
2432 struct MessageUnit_E __iomem *reg = pACB->pmuE;
2433
2434 if (pACB->adapter_type == ACB_ADAPTER_TYPE_F) {
2435 for (i = 0; i < 5; i++) {
2436 in_doorbell = readl(®->iobound_doorbell);
2437 if (in_doorbell != 0)
2438 break;
2439 }
2440 } else
2441 in_doorbell = readl(®->iobound_doorbell);
2442 outbound_doorbell = in_doorbell ^ pACB->in_doorbell;
2443 do {
2444 writel(0, ®->host_int_status); /* clear interrupt */
2445 if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK) {
2446 arcmsr_iop2drv_data_wrote_handle(pACB);
2447 }
2448 if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_DATA_READ_OK) {
2449 arcmsr_iop2drv_data_read_handle(pACB);
2450 }
2451 if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE) {
2452 arcmsr_hbaE_message_isr(pACB);
2453 }
2454 tmp = in_doorbell;
2455 in_doorbell = readl(®->iobound_doorbell);
2456 outbound_doorbell = tmp ^ in_doorbell;
2457 } while (outbound_doorbell & (ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK
2458 | ARCMSR_HBEMU_IOP2DRV_DATA_READ_OK
2459 | ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE));
2460 pACB->in_doorbell = in_doorbell;
2461 }
2462
arcmsr_hbaA_postqueue_isr(struct AdapterControlBlock * acb)2463 static void arcmsr_hbaA_postqueue_isr(struct AdapterControlBlock *acb)
2464 {
2465 uint32_t flag_ccb;
2466 struct MessageUnit_A __iomem *reg = acb->pmuA;
2467 struct ARCMSR_CDB *pARCMSR_CDB;
2468 struct CommandControlBlock *pCCB;
2469 bool error;
2470 unsigned long cdb_phy_addr;
2471
2472 while ((flag_ccb = readl(®->outbound_queueport)) != 0xFFFFFFFF) {
2473 cdb_phy_addr = (flag_ccb << 5) & 0xffffffff;
2474 if (acb->cdb_phyadd_hipart)
2475 cdb_phy_addr = cdb_phy_addr | acb->cdb_phyadd_hipart;
2476 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + cdb_phy_addr);
2477 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
2478 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
2479 arcmsr_drain_donequeue(acb, pCCB, error);
2480 }
2481 }
arcmsr_hbaB_postqueue_isr(struct AdapterControlBlock * acb)2482 static void arcmsr_hbaB_postqueue_isr(struct AdapterControlBlock *acb)
2483 {
2484 uint32_t index;
2485 uint32_t flag_ccb;
2486 struct MessageUnit_B *reg = acb->pmuB;
2487 struct ARCMSR_CDB *pARCMSR_CDB;
2488 struct CommandControlBlock *pCCB;
2489 bool error;
2490 unsigned long cdb_phy_addr;
2491
2492 index = reg->doneq_index;
2493 while ((flag_ccb = reg->done_qbuffer[index]) != 0) {
2494 cdb_phy_addr = (flag_ccb << 5) & 0xffffffff;
2495 if (acb->cdb_phyadd_hipart)
2496 cdb_phy_addr = cdb_phy_addr | acb->cdb_phyadd_hipart;
2497 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + cdb_phy_addr);
2498 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
2499 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
2500 arcmsr_drain_donequeue(acb, pCCB, error);
2501 reg->done_qbuffer[index] = 0;
2502 index++;
2503 index %= ARCMSR_MAX_HBB_POSTQUEUE;
2504 reg->doneq_index = index;
2505 }
2506 }
2507
arcmsr_hbaC_postqueue_isr(struct AdapterControlBlock * acb)2508 static void arcmsr_hbaC_postqueue_isr(struct AdapterControlBlock *acb)
2509 {
2510 struct MessageUnit_C __iomem *phbcmu;
2511 struct ARCMSR_CDB *arcmsr_cdb;
2512 struct CommandControlBlock *ccb;
2513 uint32_t flag_ccb, throttling = 0;
2514 unsigned long ccb_cdb_phy;
2515 int error;
2516
2517 phbcmu = acb->pmuC;
2518 /* areca cdb command done */
2519 /* Use correct offset and size for syncing */
2520
2521 while ((flag_ccb = readl(&phbcmu->outbound_queueport_low)) !=
2522 0xFFFFFFFF) {
2523 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
2524 if (acb->cdb_phyadd_hipart)
2525 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
2526 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset
2527 + ccb_cdb_phy);
2528 ccb = container_of(arcmsr_cdb, struct CommandControlBlock,
2529 arcmsr_cdb);
2530 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1)
2531 ? true : false;
2532 /* check if command done with no error */
2533 arcmsr_drain_donequeue(acb, ccb, error);
2534 throttling++;
2535 if (throttling == ARCMSR_HBC_ISR_THROTTLING_LEVEL) {
2536 writel(ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING,
2537 &phbcmu->inbound_doorbell);
2538 throttling = 0;
2539 }
2540 }
2541 }
2542
arcmsr_hbaD_postqueue_isr(struct AdapterControlBlock * acb)2543 static void arcmsr_hbaD_postqueue_isr(struct AdapterControlBlock *acb)
2544 {
2545 u32 outbound_write_pointer, doneq_index, index_stripped, toggle;
2546 uint32_t addressLow;
2547 int error;
2548 struct MessageUnit_D *pmu;
2549 struct ARCMSR_CDB *arcmsr_cdb;
2550 struct CommandControlBlock *ccb;
2551 unsigned long flags, ccb_cdb_phy;
2552
2553 spin_lock_irqsave(&acb->doneq_lock, flags);
2554 pmu = acb->pmuD;
2555 outbound_write_pointer = pmu->done_qbuffer[0].addressLow + 1;
2556 doneq_index = pmu->doneq_index;
2557 if ((doneq_index & 0xFFF) != (outbound_write_pointer & 0xFFF)) {
2558 do {
2559 toggle = doneq_index & 0x4000;
2560 index_stripped = (doneq_index & 0xFFF) + 1;
2561 index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE;
2562 pmu->doneq_index = index_stripped ? (index_stripped | toggle) :
2563 ((toggle ^ 0x4000) + 1);
2564 doneq_index = pmu->doneq_index;
2565 addressLow = pmu->done_qbuffer[doneq_index &
2566 0xFFF].addressLow;
2567 ccb_cdb_phy = (addressLow & 0xFFFFFFF0);
2568 if (acb->cdb_phyadd_hipart)
2569 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
2570 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset
2571 + ccb_cdb_phy);
2572 ccb = container_of(arcmsr_cdb,
2573 struct CommandControlBlock, arcmsr_cdb);
2574 error = (addressLow & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1)
2575 ? true : false;
2576 arcmsr_drain_donequeue(acb, ccb, error);
2577 writel(doneq_index, pmu->outboundlist_read_pointer);
2578 } while ((doneq_index & 0xFFF) !=
2579 (outbound_write_pointer & 0xFFF));
2580 }
2581 writel(ARCMSR_ARC1214_OUTBOUND_LIST_INTERRUPT_CLEAR,
2582 pmu->outboundlist_interrupt_cause);
2583 readl(pmu->outboundlist_interrupt_cause);
2584 spin_unlock_irqrestore(&acb->doneq_lock, flags);
2585 }
2586
arcmsr_hbaE_postqueue_isr(struct AdapterControlBlock * acb)2587 static void arcmsr_hbaE_postqueue_isr(struct AdapterControlBlock *acb)
2588 {
2589 uint32_t doneq_index;
2590 uint16_t cmdSMID;
2591 int error;
2592 struct MessageUnit_E __iomem *pmu;
2593 struct CommandControlBlock *ccb;
2594 unsigned long flags;
2595
2596 spin_lock_irqsave(&acb->doneq_lock, flags);
2597 doneq_index = acb->doneq_index;
2598 pmu = acb->pmuE;
2599 while ((readl(&pmu->reply_post_producer_index) & 0xFFFF) != doneq_index) {
2600 cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID;
2601 ccb = acb->pccb_pool[cmdSMID];
2602 error = (acb->pCompletionQ[doneq_index].cmdFlag
2603 & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
2604 arcmsr_drain_donequeue(acb, ccb, error);
2605 doneq_index++;
2606 if (doneq_index >= acb->completionQ_entry)
2607 doneq_index = 0;
2608 }
2609 acb->doneq_index = doneq_index;
2610 writel(doneq_index, &pmu->reply_post_consumer_index);
2611 spin_unlock_irqrestore(&acb->doneq_lock, flags);
2612 }
2613
arcmsr_hbaF_postqueue_isr(struct AdapterControlBlock * acb)2614 static void arcmsr_hbaF_postqueue_isr(struct AdapterControlBlock *acb)
2615 {
2616 uint32_t doneq_index;
2617 uint16_t cmdSMID;
2618 int error;
2619 struct MessageUnit_F __iomem *phbcmu;
2620 struct CommandControlBlock *ccb;
2621 unsigned long flags;
2622
2623 spin_lock_irqsave(&acb->doneq_lock, flags);
2624 doneq_index = acb->doneq_index;
2625 phbcmu = acb->pmuF;
2626 while (1) {
2627 cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID;
2628 if (cmdSMID == 0xffff)
2629 break;
2630 ccb = acb->pccb_pool[cmdSMID];
2631 error = (acb->pCompletionQ[doneq_index].cmdFlag &
2632 ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
2633 arcmsr_drain_donequeue(acb, ccb, error);
2634 acb->pCompletionQ[doneq_index].cmdSMID = 0xffff;
2635 doneq_index++;
2636 if (doneq_index >= acb->completionQ_entry)
2637 doneq_index = 0;
2638 }
2639 acb->doneq_index = doneq_index;
2640 writel(doneq_index, &phbcmu->reply_post_consumer_index);
2641 spin_unlock_irqrestore(&acb->doneq_lock, flags);
2642 }
2643
2644 /*
2645 **********************************************************************************
2646 ** Handle a message interrupt
2647 **
2648 ** The only message interrupt we expect is in response to a query for the current adapter config.
2649 ** We want this in order to compare the drivemap so that we can detect newly-attached drives.
2650 **********************************************************************************
2651 */
arcmsr_hbaA_message_isr(struct AdapterControlBlock * acb)2652 static void arcmsr_hbaA_message_isr(struct AdapterControlBlock *acb)
2653 {
2654 struct MessageUnit_A __iomem *reg = acb->pmuA;
2655 /*clear interrupt and message state*/
2656 writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, ®->outbound_intstatus);
2657 if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
2658 schedule_work(&acb->arcmsr_do_message_isr_bh);
2659 }
arcmsr_hbaB_message_isr(struct AdapterControlBlock * acb)2660 static void arcmsr_hbaB_message_isr(struct AdapterControlBlock *acb)
2661 {
2662 struct MessageUnit_B *reg = acb->pmuB;
2663
2664 /*clear interrupt and message state*/
2665 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
2666 if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
2667 schedule_work(&acb->arcmsr_do_message_isr_bh);
2668 }
2669 /*
2670 **********************************************************************************
2671 ** Handle a message interrupt
2672 **
2673 ** The only message interrupt we expect is in response to a query for the
2674 ** current adapter config.
2675 ** We want this in order to compare the drivemap so that we can detect newly-attached drives.
2676 **********************************************************************************
2677 */
arcmsr_hbaC_message_isr(struct AdapterControlBlock * acb)2678 static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *acb)
2679 {
2680 struct MessageUnit_C __iomem *reg = acb->pmuC;
2681 /*clear interrupt and message state*/
2682 writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, ®->outbound_doorbell_clear);
2683 if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
2684 schedule_work(&acb->arcmsr_do_message_isr_bh);
2685 }
2686
arcmsr_hbaD_message_isr(struct AdapterControlBlock * acb)2687 static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb)
2688 {
2689 struct MessageUnit_D *reg = acb->pmuD;
2690
2691 writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE, reg->outbound_doorbell);
2692 readl(reg->outbound_doorbell);
2693 if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
2694 schedule_work(&acb->arcmsr_do_message_isr_bh);
2695 }
2696
arcmsr_hbaE_message_isr(struct AdapterControlBlock * acb)2697 static void arcmsr_hbaE_message_isr(struct AdapterControlBlock *acb)
2698 {
2699 struct MessageUnit_E __iomem *reg = acb->pmuE;
2700
2701 writel(0, ®->host_int_status);
2702 if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
2703 schedule_work(&acb->arcmsr_do_message_isr_bh);
2704 }
2705
arcmsr_hbaA_handle_isr(struct AdapterControlBlock * acb)2706 static int arcmsr_hbaA_handle_isr(struct AdapterControlBlock *acb)
2707 {
2708 uint32_t outbound_intstatus;
2709 struct MessageUnit_A __iomem *reg = acb->pmuA;
2710 outbound_intstatus = readl(®->outbound_intstatus) &
2711 acb->outbound_int_enable;
2712 if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT))
2713 return IRQ_NONE;
2714 do {
2715 writel(outbound_intstatus, ®->outbound_intstatus);
2716 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT)
2717 arcmsr_hbaA_doorbell_isr(acb);
2718 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT)
2719 arcmsr_hbaA_postqueue_isr(acb);
2720 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT)
2721 arcmsr_hbaA_message_isr(acb);
2722 outbound_intstatus = readl(®->outbound_intstatus) &
2723 acb->outbound_int_enable;
2724 } while (outbound_intstatus & (ARCMSR_MU_OUTBOUND_DOORBELL_INT
2725 | ARCMSR_MU_OUTBOUND_POSTQUEUE_INT
2726 | ARCMSR_MU_OUTBOUND_MESSAGE0_INT));
2727 return IRQ_HANDLED;
2728 }
2729
arcmsr_hbaB_handle_isr(struct AdapterControlBlock * acb)2730 static int arcmsr_hbaB_handle_isr(struct AdapterControlBlock *acb)
2731 {
2732 uint32_t outbound_doorbell;
2733 struct MessageUnit_B *reg = acb->pmuB;
2734 outbound_doorbell = readl(reg->iop2drv_doorbell) &
2735 acb->outbound_int_enable;
2736 if (!outbound_doorbell)
2737 return IRQ_NONE;
2738 do {
2739 writel(~outbound_doorbell, reg->iop2drv_doorbell);
2740 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell);
2741 if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK)
2742 arcmsr_iop2drv_data_wrote_handle(acb);
2743 if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK)
2744 arcmsr_iop2drv_data_read_handle(acb);
2745 if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE)
2746 arcmsr_hbaB_postqueue_isr(acb);
2747 if (outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE)
2748 arcmsr_hbaB_message_isr(acb);
2749 outbound_doorbell = readl(reg->iop2drv_doorbell) &
2750 acb->outbound_int_enable;
2751 } while (outbound_doorbell & (ARCMSR_IOP2DRV_DATA_WRITE_OK
2752 | ARCMSR_IOP2DRV_DATA_READ_OK
2753 | ARCMSR_IOP2DRV_CDB_DONE
2754 | ARCMSR_IOP2DRV_MESSAGE_CMD_DONE));
2755 return IRQ_HANDLED;
2756 }
2757
arcmsr_hbaC_handle_isr(struct AdapterControlBlock * pACB)2758 static int arcmsr_hbaC_handle_isr(struct AdapterControlBlock *pACB)
2759 {
2760 uint32_t host_interrupt_status;
2761 struct MessageUnit_C __iomem *phbcmu = pACB->pmuC;
2762 /*
2763 *********************************************
2764 ** check outbound intstatus
2765 *********************************************
2766 */
2767 host_interrupt_status = readl(&phbcmu->host_int_status) &
2768 (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR |
2769 ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR);
2770 if (!host_interrupt_status)
2771 return IRQ_NONE;
2772 do {
2773 if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR)
2774 arcmsr_hbaC_doorbell_isr(pACB);
2775 /* MU post queue interrupts*/
2776 if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR)
2777 arcmsr_hbaC_postqueue_isr(pACB);
2778 host_interrupt_status = readl(&phbcmu->host_int_status);
2779 } while (host_interrupt_status & (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR |
2780 ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR));
2781 return IRQ_HANDLED;
2782 }
2783
arcmsr_hbaD_handle_isr(struct AdapterControlBlock * pACB)2784 static irqreturn_t arcmsr_hbaD_handle_isr(struct AdapterControlBlock *pACB)
2785 {
2786 u32 host_interrupt_status;
2787 struct MessageUnit_D *pmu = pACB->pmuD;
2788
2789 host_interrupt_status = readl(pmu->host_int_status) &
2790 (ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR |
2791 ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR);
2792 if (!host_interrupt_status)
2793 return IRQ_NONE;
2794 do {
2795 /* MU post queue interrupts*/
2796 if (host_interrupt_status &
2797 ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR)
2798 arcmsr_hbaD_postqueue_isr(pACB);
2799 if (host_interrupt_status &
2800 ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR)
2801 arcmsr_hbaD_doorbell_isr(pACB);
2802 host_interrupt_status = readl(pmu->host_int_status);
2803 } while (host_interrupt_status &
2804 (ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR |
2805 ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR));
2806 return IRQ_HANDLED;
2807 }
2808
arcmsr_hbaE_handle_isr(struct AdapterControlBlock * pACB)2809 static irqreturn_t arcmsr_hbaE_handle_isr(struct AdapterControlBlock *pACB)
2810 {
2811 uint32_t host_interrupt_status;
2812 struct MessageUnit_E __iomem *pmu = pACB->pmuE;
2813
2814 host_interrupt_status = readl(&pmu->host_int_status) &
2815 (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR |
2816 ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR);
2817 if (!host_interrupt_status)
2818 return IRQ_NONE;
2819 do {
2820 /* MU ioctl transfer doorbell interrupts*/
2821 if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR) {
2822 arcmsr_hbaE_doorbell_isr(pACB);
2823 }
2824 /* MU post queue interrupts*/
2825 if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR) {
2826 arcmsr_hbaE_postqueue_isr(pACB);
2827 }
2828 host_interrupt_status = readl(&pmu->host_int_status);
2829 } while (host_interrupt_status & (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR |
2830 ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR));
2831 return IRQ_HANDLED;
2832 }
2833
arcmsr_hbaF_handle_isr(struct AdapterControlBlock * pACB)2834 static irqreturn_t arcmsr_hbaF_handle_isr(struct AdapterControlBlock *pACB)
2835 {
2836 uint32_t host_interrupt_status;
2837 struct MessageUnit_F __iomem *phbcmu = pACB->pmuF;
2838
2839 host_interrupt_status = readl(&phbcmu->host_int_status) &
2840 (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR |
2841 ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR);
2842 if (!host_interrupt_status)
2843 return IRQ_NONE;
2844 do {
2845 /* MU post queue interrupts*/
2846 if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR)
2847 arcmsr_hbaF_postqueue_isr(pACB);
2848
2849 /* MU ioctl transfer doorbell interrupts*/
2850 if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR)
2851 arcmsr_hbaE_doorbell_isr(pACB);
2852
2853 host_interrupt_status = readl(&phbcmu->host_int_status);
2854 } while (host_interrupt_status & (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR |
2855 ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR));
2856 return IRQ_HANDLED;
2857 }
2858
arcmsr_interrupt(struct AdapterControlBlock * acb)2859 static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
2860 {
2861 switch (acb->adapter_type) {
2862 case ACB_ADAPTER_TYPE_A:
2863 return arcmsr_hbaA_handle_isr(acb);
2864 case ACB_ADAPTER_TYPE_B:
2865 return arcmsr_hbaB_handle_isr(acb);
2866 case ACB_ADAPTER_TYPE_C:
2867 return arcmsr_hbaC_handle_isr(acb);
2868 case ACB_ADAPTER_TYPE_D:
2869 return arcmsr_hbaD_handle_isr(acb);
2870 case ACB_ADAPTER_TYPE_E:
2871 return arcmsr_hbaE_handle_isr(acb);
2872 case ACB_ADAPTER_TYPE_F:
2873 return arcmsr_hbaF_handle_isr(acb);
2874 default:
2875 return IRQ_NONE;
2876 }
2877 }
2878
arcmsr_iop_parking(struct AdapterControlBlock * acb)2879 static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
2880 {
2881 if (acb) {
2882 /* stop adapter background rebuild */
2883 if (acb->acb_flags & ACB_F_MSG_START_BGRB) {
2884 uint32_t intmask_org;
2885 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
2886 intmask_org = arcmsr_disable_outbound_ints(acb);
2887 arcmsr_stop_adapter_bgrb(acb);
2888 arcmsr_flush_adapter_cache(acb);
2889 arcmsr_enable_outbound_ints(acb, intmask_org);
2890 }
2891 }
2892 }
2893
2894
arcmsr_clear_iop2drv_rqueue_buffer(struct AdapterControlBlock * acb)2895 void arcmsr_clear_iop2drv_rqueue_buffer(struct AdapterControlBlock *acb)
2896 {
2897 uint32_t i;
2898
2899 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2900 for (i = 0; i < 15; i++) {
2901 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2902 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2903 acb->rqbuf_getIndex = 0;
2904 acb->rqbuf_putIndex = 0;
2905 arcmsr_iop_message_read(acb);
2906 mdelay(30);
2907 } else if (acb->rqbuf_getIndex !=
2908 acb->rqbuf_putIndex) {
2909 acb->rqbuf_getIndex = 0;
2910 acb->rqbuf_putIndex = 0;
2911 mdelay(30);
2912 } else
2913 break;
2914 }
2915 }
2916 }
2917
arcmsr_iop_message_xfer(struct AdapterControlBlock * acb,struct scsi_cmnd * cmd)2918 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
2919 struct scsi_cmnd *cmd)
2920 {
2921 char *buffer;
2922 unsigned short use_sg;
2923 int retvalue = 0, transfer_len = 0;
2924 unsigned long flags;
2925 struct CMD_MESSAGE_FIELD *pcmdmessagefld;
2926 uint32_t controlcode = (uint32_t)cmd->cmnd[5] << 24 |
2927 (uint32_t)cmd->cmnd[6] << 16 |
2928 (uint32_t)cmd->cmnd[7] << 8 |
2929 (uint32_t)cmd->cmnd[8];
2930 struct scatterlist *sg;
2931
2932 use_sg = scsi_sg_count(cmd);
2933 sg = scsi_sglist(cmd);
2934 buffer = kmap_atomic(sg_page(sg)) + sg->offset;
2935 if (use_sg > 1) {
2936 retvalue = ARCMSR_MESSAGE_FAIL;
2937 goto message_out;
2938 }
2939 transfer_len += sg->length;
2940 if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
2941 retvalue = ARCMSR_MESSAGE_FAIL;
2942 pr_info("%s: ARCMSR_MESSAGE_FAIL!\n", __func__);
2943 goto message_out;
2944 }
2945 pcmdmessagefld = (struct CMD_MESSAGE_FIELD *)buffer;
2946 switch (controlcode) {
2947 case ARCMSR_MESSAGE_READ_RQBUFFER: {
2948 unsigned char *ver_addr;
2949 uint8_t *ptmpQbuffer;
2950 uint32_t allxfer_len = 0;
2951 ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC);
2952 if (!ver_addr) {
2953 retvalue = ARCMSR_MESSAGE_FAIL;
2954 pr_info("%s: memory not enough!\n", __func__);
2955 goto message_out;
2956 }
2957 ptmpQbuffer = ver_addr;
2958 spin_lock_irqsave(&acb->rqbuffer_lock, flags);
2959 if (acb->rqbuf_getIndex != acb->rqbuf_putIndex) {
2960 unsigned int tail = acb->rqbuf_getIndex;
2961 unsigned int head = acb->rqbuf_putIndex;
2962 unsigned int cnt_to_end = CIRC_CNT_TO_END(head, tail, ARCMSR_MAX_QBUFFER);
2963
2964 allxfer_len = CIRC_CNT(head, tail, ARCMSR_MAX_QBUFFER);
2965 if (allxfer_len > ARCMSR_API_DATA_BUFLEN)
2966 allxfer_len = ARCMSR_API_DATA_BUFLEN;
2967
2968 if (allxfer_len <= cnt_to_end)
2969 memcpy(ptmpQbuffer, acb->rqbuffer + tail, allxfer_len);
2970 else {
2971 memcpy(ptmpQbuffer, acb->rqbuffer + tail, cnt_to_end);
2972 memcpy(ptmpQbuffer + cnt_to_end, acb->rqbuffer, allxfer_len - cnt_to_end);
2973 }
2974 acb->rqbuf_getIndex = (acb->rqbuf_getIndex + allxfer_len) % ARCMSR_MAX_QBUFFER;
2975 }
2976 memcpy(pcmdmessagefld->messagedatabuffer, ver_addr,
2977 allxfer_len);
2978 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2979 struct QBUFFER __iomem *prbuffer;
2980 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2981 prbuffer = arcmsr_get_iop_rqbuffer(acb);
2982 if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0)
2983 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
2984 }
2985 spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
2986 kfree(ver_addr);
2987 pcmdmessagefld->cmdmessage.Length = allxfer_len;
2988 if (acb->fw_flag == FW_DEADLOCK)
2989 pcmdmessagefld->cmdmessage.ReturnCode =
2990 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
2991 else
2992 pcmdmessagefld->cmdmessage.ReturnCode =
2993 ARCMSR_MESSAGE_RETURNCODE_OK;
2994 break;
2995 }
2996 case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
2997 unsigned char *ver_addr;
2998 uint32_t user_len;
2999 int32_t cnt2end;
3000 uint8_t *pQbuffer, *ptmpuserbuffer;
3001
3002 user_len = pcmdmessagefld->cmdmessage.Length;
3003 if (user_len > ARCMSR_API_DATA_BUFLEN) {
3004 retvalue = ARCMSR_MESSAGE_FAIL;
3005 goto message_out;
3006 }
3007
3008 ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC);
3009 if (!ver_addr) {
3010 retvalue = ARCMSR_MESSAGE_FAIL;
3011 goto message_out;
3012 }
3013 ptmpuserbuffer = ver_addr;
3014
3015 memcpy(ptmpuserbuffer,
3016 pcmdmessagefld->messagedatabuffer, user_len);
3017 spin_lock_irqsave(&acb->wqbuffer_lock, flags);
3018 if (acb->wqbuf_putIndex != acb->wqbuf_getIndex) {
3019 struct SENSE_DATA *sensebuffer =
3020 (struct SENSE_DATA *)cmd->sense_buffer;
3021 arcmsr_write_ioctldata2iop(acb);
3022 /* has error report sensedata */
3023 sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
3024 sensebuffer->SenseKey = ILLEGAL_REQUEST;
3025 sensebuffer->AdditionalSenseLength = 0x0A;
3026 sensebuffer->AdditionalSenseCode = 0x20;
3027 sensebuffer->Valid = 1;
3028 retvalue = ARCMSR_MESSAGE_FAIL;
3029 } else {
3030 pQbuffer = &acb->wqbuffer[acb->wqbuf_putIndex];
3031 cnt2end = ARCMSR_MAX_QBUFFER - acb->wqbuf_putIndex;
3032 if (user_len > cnt2end) {
3033 memcpy(pQbuffer, ptmpuserbuffer, cnt2end);
3034 ptmpuserbuffer += cnt2end;
3035 user_len -= cnt2end;
3036 acb->wqbuf_putIndex = 0;
3037 pQbuffer = acb->wqbuffer;
3038 }
3039 memcpy(pQbuffer, ptmpuserbuffer, user_len);
3040 acb->wqbuf_putIndex += user_len;
3041 acb->wqbuf_putIndex %= ARCMSR_MAX_QBUFFER;
3042 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
3043 acb->acb_flags &=
3044 ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
3045 arcmsr_write_ioctldata2iop(acb);
3046 }
3047 }
3048 spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
3049 kfree(ver_addr);
3050 if (acb->fw_flag == FW_DEADLOCK)
3051 pcmdmessagefld->cmdmessage.ReturnCode =
3052 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
3053 else
3054 pcmdmessagefld->cmdmessage.ReturnCode =
3055 ARCMSR_MESSAGE_RETURNCODE_OK;
3056 break;
3057 }
3058 case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
3059 uint8_t *pQbuffer = acb->rqbuffer;
3060
3061 arcmsr_clear_iop2drv_rqueue_buffer(acb);
3062 spin_lock_irqsave(&acb->rqbuffer_lock, flags);
3063 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
3064 acb->rqbuf_getIndex = 0;
3065 acb->rqbuf_putIndex = 0;
3066 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
3067 spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
3068 if (acb->fw_flag == FW_DEADLOCK)
3069 pcmdmessagefld->cmdmessage.ReturnCode =
3070 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
3071 else
3072 pcmdmessagefld->cmdmessage.ReturnCode =
3073 ARCMSR_MESSAGE_RETURNCODE_OK;
3074 break;
3075 }
3076 case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
3077 uint8_t *pQbuffer = acb->wqbuffer;
3078 spin_lock_irqsave(&acb->wqbuffer_lock, flags);
3079 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
3080 ACB_F_MESSAGE_WQBUFFER_READED);
3081 acb->wqbuf_getIndex = 0;
3082 acb->wqbuf_putIndex = 0;
3083 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
3084 spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
3085 if (acb->fw_flag == FW_DEADLOCK)
3086 pcmdmessagefld->cmdmessage.ReturnCode =
3087 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
3088 else
3089 pcmdmessagefld->cmdmessage.ReturnCode =
3090 ARCMSR_MESSAGE_RETURNCODE_OK;
3091 break;
3092 }
3093 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
3094 uint8_t *pQbuffer;
3095 arcmsr_clear_iop2drv_rqueue_buffer(acb);
3096 spin_lock_irqsave(&acb->rqbuffer_lock, flags);
3097 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
3098 acb->rqbuf_getIndex = 0;
3099 acb->rqbuf_putIndex = 0;
3100 pQbuffer = acb->rqbuffer;
3101 memset(pQbuffer, 0, sizeof(struct QBUFFER));
3102 spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
3103 spin_lock_irqsave(&acb->wqbuffer_lock, flags);
3104 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
3105 ACB_F_MESSAGE_WQBUFFER_READED);
3106 acb->wqbuf_getIndex = 0;
3107 acb->wqbuf_putIndex = 0;
3108 pQbuffer = acb->wqbuffer;
3109 memset(pQbuffer, 0, sizeof(struct QBUFFER));
3110 spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
3111 if (acb->fw_flag == FW_DEADLOCK)
3112 pcmdmessagefld->cmdmessage.ReturnCode =
3113 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
3114 else
3115 pcmdmessagefld->cmdmessage.ReturnCode =
3116 ARCMSR_MESSAGE_RETURNCODE_OK;
3117 break;
3118 }
3119 case ARCMSR_MESSAGE_RETURN_CODE_3F: {
3120 if (acb->fw_flag == FW_DEADLOCK)
3121 pcmdmessagefld->cmdmessage.ReturnCode =
3122 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
3123 else
3124 pcmdmessagefld->cmdmessage.ReturnCode =
3125 ARCMSR_MESSAGE_RETURNCODE_3F;
3126 break;
3127 }
3128 case ARCMSR_MESSAGE_SAY_HELLO: {
3129 int8_t *hello_string = "Hello! I am ARCMSR";
3130 if (acb->fw_flag == FW_DEADLOCK)
3131 pcmdmessagefld->cmdmessage.ReturnCode =
3132 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
3133 else
3134 pcmdmessagefld->cmdmessage.ReturnCode =
3135 ARCMSR_MESSAGE_RETURNCODE_OK;
3136 memcpy(pcmdmessagefld->messagedatabuffer,
3137 hello_string, (int16_t)strlen(hello_string));
3138 break;
3139 }
3140 case ARCMSR_MESSAGE_SAY_GOODBYE: {
3141 if (acb->fw_flag == FW_DEADLOCK)
3142 pcmdmessagefld->cmdmessage.ReturnCode =
3143 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
3144 else
3145 pcmdmessagefld->cmdmessage.ReturnCode =
3146 ARCMSR_MESSAGE_RETURNCODE_OK;
3147 arcmsr_iop_parking(acb);
3148 break;
3149 }
3150 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: {
3151 if (acb->fw_flag == FW_DEADLOCK)
3152 pcmdmessagefld->cmdmessage.ReturnCode =
3153 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
3154 else
3155 pcmdmessagefld->cmdmessage.ReturnCode =
3156 ARCMSR_MESSAGE_RETURNCODE_OK;
3157 arcmsr_flush_adapter_cache(acb);
3158 break;
3159 }
3160 default:
3161 retvalue = ARCMSR_MESSAGE_FAIL;
3162 pr_info("%s: unknown controlcode!\n", __func__);
3163 }
3164 message_out:
3165 if (use_sg) {
3166 struct scatterlist *sg = scsi_sglist(cmd);
3167 kunmap_atomic(buffer - sg->offset);
3168 }
3169 return retvalue;
3170 }
3171
arcmsr_get_freeccb(struct AdapterControlBlock * acb)3172 static struct CommandControlBlock *arcmsr_get_freeccb(struct AdapterControlBlock *acb)
3173 {
3174 struct list_head *head = &acb->ccb_free_list;
3175 struct CommandControlBlock *ccb = NULL;
3176 unsigned long flags;
3177 spin_lock_irqsave(&acb->ccblist_lock, flags);
3178 if (!list_empty(head)) {
3179 ccb = list_entry(head->next, struct CommandControlBlock, list);
3180 list_del_init(&ccb->list);
3181 }else{
3182 spin_unlock_irqrestore(&acb->ccblist_lock, flags);
3183 return NULL;
3184 }
3185 spin_unlock_irqrestore(&acb->ccblist_lock, flags);
3186 return ccb;
3187 }
3188
arcmsr_handle_virtual_command(struct AdapterControlBlock * acb,struct scsi_cmnd * cmd)3189 static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
3190 struct scsi_cmnd *cmd)
3191 {
3192 switch (cmd->cmnd[0]) {
3193 case INQUIRY: {
3194 unsigned char inqdata[36];
3195 char *buffer;
3196 struct scatterlist *sg;
3197
3198 if (cmd->device->lun) {
3199 cmd->result = (DID_TIME_OUT << 16);
3200 cmd->scsi_done(cmd);
3201 return;
3202 }
3203 inqdata[0] = TYPE_PROCESSOR;
3204 /* Periph Qualifier & Periph Dev Type */
3205 inqdata[1] = 0;
3206 /* rem media bit & Dev Type Modifier */
3207 inqdata[2] = 0;
3208 /* ISO, ECMA, & ANSI versions */
3209 inqdata[4] = 31;
3210 /* length of additional data */
3211 strncpy(&inqdata[8], "Areca ", 8);
3212 /* Vendor Identification */
3213 strncpy(&inqdata[16], "RAID controller ", 16);
3214 /* Product Identification */
3215 strncpy(&inqdata[32], "R001", 4); /* Product Revision */
3216
3217 sg = scsi_sglist(cmd);
3218 buffer = kmap_atomic(sg_page(sg)) + sg->offset;
3219
3220 memcpy(buffer, inqdata, sizeof(inqdata));
3221 sg = scsi_sglist(cmd);
3222 kunmap_atomic(buffer - sg->offset);
3223
3224 cmd->scsi_done(cmd);
3225 }
3226 break;
3227 case WRITE_BUFFER:
3228 case READ_BUFFER: {
3229 if (arcmsr_iop_message_xfer(acb, cmd))
3230 cmd->result = (DID_ERROR << 16);
3231 cmd->scsi_done(cmd);
3232 }
3233 break;
3234 default:
3235 cmd->scsi_done(cmd);
3236 }
3237 }
3238
arcmsr_queue_command_lck(struct scsi_cmnd * cmd,void (* done)(struct scsi_cmnd *))3239 static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
3240 void (* done)(struct scsi_cmnd *))
3241 {
3242 struct Scsi_Host *host = cmd->device->host;
3243 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
3244 struct CommandControlBlock *ccb;
3245 int target = cmd->device->id;
3246
3247 if (acb->acb_flags & ACB_F_ADAPTER_REMOVED) {
3248 cmd->result = (DID_NO_CONNECT << 16);
3249 cmd->scsi_done(cmd);
3250 return 0;
3251 }
3252 cmd->scsi_done = done;
3253 cmd->host_scribble = NULL;
3254 cmd->result = 0;
3255 if (target == 16) {
3256 /* virtual device for iop message transfer */
3257 arcmsr_handle_virtual_command(acb, cmd);
3258 return 0;
3259 }
3260 ccb = arcmsr_get_freeccb(acb);
3261 if (!ccb)
3262 return SCSI_MLQUEUE_HOST_BUSY;
3263 if (arcmsr_build_ccb( acb, ccb, cmd ) == FAILED) {
3264 cmd->result = (DID_ERROR << 16) | (RESERVATION_CONFLICT << 1);
3265 cmd->scsi_done(cmd);
3266 return 0;
3267 }
3268 arcmsr_post_ccb(acb, ccb);
3269 return 0;
3270 }
3271
DEF_SCSI_QCMD(arcmsr_queue_command)3272 static DEF_SCSI_QCMD(arcmsr_queue_command)
3273
3274 static void arcmsr_get_adapter_config(struct AdapterControlBlock *pACB, uint32_t *rwbuffer)
3275 {
3276 int count;
3277 uint32_t *acb_firm_model = (uint32_t *)pACB->firm_model;
3278 uint32_t *acb_firm_version = (uint32_t *)pACB->firm_version;
3279 uint32_t *acb_device_map = (uint32_t *)pACB->device_map;
3280 uint32_t *firm_model = &rwbuffer[15];
3281 uint32_t *firm_version = &rwbuffer[17];
3282 uint32_t *device_map = &rwbuffer[21];
3283
3284 count = 2;
3285 while (count) {
3286 *acb_firm_model = readl(firm_model);
3287 acb_firm_model++;
3288 firm_model++;
3289 count--;
3290 }
3291 count = 4;
3292 while (count) {
3293 *acb_firm_version = readl(firm_version);
3294 acb_firm_version++;
3295 firm_version++;
3296 count--;
3297 }
3298 count = 4;
3299 while (count) {
3300 *acb_device_map = readl(device_map);
3301 acb_device_map++;
3302 device_map++;
3303 count--;
3304 }
3305 pACB->signature = readl(&rwbuffer[0]);
3306 pACB->firm_request_len = readl(&rwbuffer[1]);
3307 pACB->firm_numbers_queue = readl(&rwbuffer[2]);
3308 pACB->firm_sdram_size = readl(&rwbuffer[3]);
3309 pACB->firm_hd_channels = readl(&rwbuffer[4]);
3310 pACB->firm_cfg_version = readl(&rwbuffer[25]);
3311 pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
3312 pACB->host->host_no,
3313 pACB->firm_model,
3314 pACB->firm_version);
3315 }
3316
arcmsr_hbaA_get_config(struct AdapterControlBlock * acb)3317 static bool arcmsr_hbaA_get_config(struct AdapterControlBlock *acb)
3318 {
3319 struct MessageUnit_A __iomem *reg = acb->pmuA;
3320
3321 arcmsr_wait_firmware_ready(acb);
3322 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0);
3323 if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
3324 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
3325 miscellaneous data' timeout \n", acb->host->host_no);
3326 return false;
3327 }
3328 arcmsr_get_adapter_config(acb, reg->message_rwbuffer);
3329 return true;
3330 }
arcmsr_hbaB_get_config(struct AdapterControlBlock * acb)3331 static bool arcmsr_hbaB_get_config(struct AdapterControlBlock *acb)
3332 {
3333 struct MessageUnit_B *reg = acb->pmuB;
3334
3335 arcmsr_wait_firmware_ready(acb);
3336 writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell);
3337 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
3338 printk(KERN_ERR "arcmsr%d: can't set driver mode.\n", acb->host->host_no);
3339 return false;
3340 }
3341 writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
3342 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
3343 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
3344 miscellaneous data' timeout \n", acb->host->host_no);
3345 return false;
3346 }
3347 arcmsr_get_adapter_config(acb, reg->message_rwbuffer);
3348 return true;
3349 }
3350
arcmsr_hbaC_get_config(struct AdapterControlBlock * pACB)3351 static bool arcmsr_hbaC_get_config(struct AdapterControlBlock *pACB)
3352 {
3353 uint32_t intmask_org;
3354 struct MessageUnit_C __iomem *reg = pACB->pmuC;
3355
3356 /* disable all outbound interrupt */
3357 intmask_org = readl(®->host_int_mask); /* disable outbound message0 int */
3358 writel(intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE, ®->host_int_mask);
3359 /* wait firmware ready */
3360 arcmsr_wait_firmware_ready(pACB);
3361 /* post "get config" instruction */
3362 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0);
3363 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell);
3364 /* wait message ready */
3365 if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
3366 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
3367 miscellaneous data' timeout \n", pACB->host->host_no);
3368 return false;
3369 }
3370 arcmsr_get_adapter_config(pACB, reg->msgcode_rwbuffer);
3371 return true;
3372 }
3373
arcmsr_hbaD_get_config(struct AdapterControlBlock * acb)3374 static bool arcmsr_hbaD_get_config(struct AdapterControlBlock *acb)
3375 {
3376 struct MessageUnit_D *reg = acb->pmuD;
3377
3378 if (readl(acb->pmuD->outbound_doorbell) &
3379 ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) {
3380 writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE,
3381 acb->pmuD->outbound_doorbell);/*clear interrupt*/
3382 }
3383 arcmsr_wait_firmware_ready(acb);
3384 /* post "get config" instruction */
3385 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, reg->inbound_msgaddr0);
3386 /* wait message ready */
3387 if (!arcmsr_hbaD_wait_msgint_ready(acb)) {
3388 pr_notice("arcmsr%d: wait get adapter firmware "
3389 "miscellaneous data timeout\n", acb->host->host_no);
3390 return false;
3391 }
3392 arcmsr_get_adapter_config(acb, reg->msgcode_rwbuffer);
3393 return true;
3394 }
3395
arcmsr_hbaE_get_config(struct AdapterControlBlock * pACB)3396 static bool arcmsr_hbaE_get_config(struct AdapterControlBlock *pACB)
3397 {
3398 struct MessageUnit_E __iomem *reg = pACB->pmuE;
3399 uint32_t intmask_org;
3400
3401 /* disable all outbound interrupt */
3402 intmask_org = readl(®->host_int_mask); /* disable outbound message0 int */
3403 writel(intmask_org | ARCMSR_HBEMU_ALL_INTMASKENABLE, ®->host_int_mask);
3404 /* wait firmware ready */
3405 arcmsr_wait_firmware_ready(pACB);
3406 mdelay(20);
3407 /* post "get config" instruction */
3408 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0);
3409
3410 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
3411 writel(pACB->out_doorbell, ®->iobound_doorbell);
3412 /* wait message ready */
3413 if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
3414 pr_notice("arcmsr%d: wait get adapter firmware "
3415 "miscellaneous data timeout\n", pACB->host->host_no);
3416 return false;
3417 }
3418 arcmsr_get_adapter_config(pACB, reg->msgcode_rwbuffer);
3419 return true;
3420 }
3421
arcmsr_hbaF_get_config(struct AdapterControlBlock * pACB)3422 static bool arcmsr_hbaF_get_config(struct AdapterControlBlock *pACB)
3423 {
3424 struct MessageUnit_F __iomem *reg = pACB->pmuF;
3425 uint32_t intmask_org;
3426
3427 /* disable all outbound interrupt */
3428 intmask_org = readl(®->host_int_mask); /* disable outbound message0 int */
3429 writel(intmask_org | ARCMSR_HBEMU_ALL_INTMASKENABLE, ®->host_int_mask);
3430 /* wait firmware ready */
3431 arcmsr_wait_firmware_ready(pACB);
3432 /* post "get config" instruction */
3433 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0);
3434
3435 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
3436 writel(pACB->out_doorbell, ®->iobound_doorbell);
3437 /* wait message ready */
3438 if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
3439 pr_notice("arcmsr%d: wait get adapter firmware miscellaneous data timeout\n",
3440 pACB->host->host_no);
3441 return false;
3442 }
3443 arcmsr_get_adapter_config(pACB, pACB->msgcode_rwbuffer);
3444 return true;
3445 }
3446
arcmsr_get_firmware_spec(struct AdapterControlBlock * acb)3447 static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
3448 {
3449 bool rtn = false;
3450
3451 switch (acb->adapter_type) {
3452 case ACB_ADAPTER_TYPE_A:
3453 rtn = arcmsr_hbaA_get_config(acb);
3454 break;
3455 case ACB_ADAPTER_TYPE_B:
3456 rtn = arcmsr_hbaB_get_config(acb);
3457 break;
3458 case ACB_ADAPTER_TYPE_C:
3459 rtn = arcmsr_hbaC_get_config(acb);
3460 break;
3461 case ACB_ADAPTER_TYPE_D:
3462 rtn = arcmsr_hbaD_get_config(acb);
3463 break;
3464 case ACB_ADAPTER_TYPE_E:
3465 rtn = arcmsr_hbaE_get_config(acb);
3466 break;
3467 case ACB_ADAPTER_TYPE_F:
3468 rtn = arcmsr_hbaF_get_config(acb);
3469 break;
3470 default:
3471 break;
3472 }
3473 acb->maxOutstanding = acb->firm_numbers_queue - 1;
3474 if (acb->host->can_queue >= acb->firm_numbers_queue)
3475 acb->host->can_queue = acb->maxOutstanding;
3476 else
3477 acb->maxOutstanding = acb->host->can_queue;
3478 acb->maxFreeCCB = acb->host->can_queue;
3479 if (acb->maxFreeCCB < ARCMSR_MAX_FREECCB_NUM)
3480 acb->maxFreeCCB += 64;
3481 return rtn;
3482 }
3483
arcmsr_hbaA_polling_ccbdone(struct AdapterControlBlock * acb,struct CommandControlBlock * poll_ccb)3484 static int arcmsr_hbaA_polling_ccbdone(struct AdapterControlBlock *acb,
3485 struct CommandControlBlock *poll_ccb)
3486 {
3487 struct MessageUnit_A __iomem *reg = acb->pmuA;
3488 struct CommandControlBlock *ccb;
3489 struct ARCMSR_CDB *arcmsr_cdb;
3490 uint32_t flag_ccb, outbound_intstatus, poll_ccb_done = 0, poll_count = 0;
3491 int rtn;
3492 bool error;
3493 unsigned long ccb_cdb_phy;
3494
3495 polling_hba_ccb_retry:
3496 poll_count++;
3497 outbound_intstatus = readl(®->outbound_intstatus) & acb->outbound_int_enable;
3498 writel(outbound_intstatus, ®->outbound_intstatus);/*clear interrupt*/
3499 while (1) {
3500 if ((flag_ccb = readl(®->outbound_queueport)) == 0xFFFFFFFF) {
3501 if (poll_ccb_done){
3502 rtn = SUCCESS;
3503 break;
3504 }else {
3505 msleep(25);
3506 if (poll_count > 100){
3507 rtn = FAILED;
3508 break;
3509 }
3510 goto polling_hba_ccb_retry;
3511 }
3512 }
3513 ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff;
3514 if (acb->cdb_phyadd_hipart)
3515 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
3516 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
3517 ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
3518 poll_ccb_done |= (ccb == poll_ccb) ? 1 : 0;
3519 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
3520 if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
3521 printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
3522 " poll command abort successfully \n"
3523 , acb->host->host_no
3524 , ccb->pcmd->device->id
3525 , (u32)ccb->pcmd->device->lun
3526 , ccb);
3527 ccb->pcmd->result = DID_ABORT << 16;
3528 arcmsr_ccb_complete(ccb);
3529 continue;
3530 }
3531 printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
3532 " command done ccb = '0x%p'"
3533 "ccboutstandingcount = %d \n"
3534 , acb->host->host_no
3535 , ccb
3536 , atomic_read(&acb->ccboutstandingcount));
3537 continue;
3538 }
3539 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
3540 arcmsr_report_ccb_state(acb, ccb, error);
3541 }
3542 return rtn;
3543 }
3544
arcmsr_hbaB_polling_ccbdone(struct AdapterControlBlock * acb,struct CommandControlBlock * poll_ccb)3545 static int arcmsr_hbaB_polling_ccbdone(struct AdapterControlBlock *acb,
3546 struct CommandControlBlock *poll_ccb)
3547 {
3548 struct MessageUnit_B *reg = acb->pmuB;
3549 struct ARCMSR_CDB *arcmsr_cdb;
3550 struct CommandControlBlock *ccb;
3551 uint32_t flag_ccb, poll_ccb_done = 0, poll_count = 0;
3552 int index, rtn;
3553 bool error;
3554 unsigned long ccb_cdb_phy;
3555
3556 polling_hbb_ccb_retry:
3557 poll_count++;
3558 /* clear doorbell interrupt */
3559 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
3560 while(1){
3561 index = reg->doneq_index;
3562 flag_ccb = reg->done_qbuffer[index];
3563 if (flag_ccb == 0) {
3564 if (poll_ccb_done){
3565 rtn = SUCCESS;
3566 break;
3567 }else {
3568 msleep(25);
3569 if (poll_count > 100){
3570 rtn = FAILED;
3571 break;
3572 }
3573 goto polling_hbb_ccb_retry;
3574 }
3575 }
3576 reg->done_qbuffer[index] = 0;
3577 index++;
3578 /*if last index number set it to 0 */
3579 index %= ARCMSR_MAX_HBB_POSTQUEUE;
3580 reg->doneq_index = index;
3581 /* check if command done with no error*/
3582 ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff;
3583 if (acb->cdb_phyadd_hipart)
3584 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
3585 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
3586 ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
3587 poll_ccb_done |= (ccb == poll_ccb) ? 1 : 0;
3588 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
3589 if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
3590 printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
3591 " poll command abort successfully \n"
3592 ,acb->host->host_no
3593 ,ccb->pcmd->device->id
3594 ,(u32)ccb->pcmd->device->lun
3595 ,ccb);
3596 ccb->pcmd->result = DID_ABORT << 16;
3597 arcmsr_ccb_complete(ccb);
3598 continue;
3599 }
3600 printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
3601 " command done ccb = '0x%p'"
3602 "ccboutstandingcount = %d \n"
3603 , acb->host->host_no
3604 , ccb
3605 , atomic_read(&acb->ccboutstandingcount));
3606 continue;
3607 }
3608 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
3609 arcmsr_report_ccb_state(acb, ccb, error);
3610 }
3611 return rtn;
3612 }
3613
arcmsr_hbaC_polling_ccbdone(struct AdapterControlBlock * acb,struct CommandControlBlock * poll_ccb)3614 static int arcmsr_hbaC_polling_ccbdone(struct AdapterControlBlock *acb,
3615 struct CommandControlBlock *poll_ccb)
3616 {
3617 struct MessageUnit_C __iomem *reg = acb->pmuC;
3618 uint32_t flag_ccb;
3619 struct ARCMSR_CDB *arcmsr_cdb;
3620 bool error;
3621 struct CommandControlBlock *pCCB;
3622 uint32_t poll_ccb_done = 0, poll_count = 0;
3623 int rtn;
3624 unsigned long ccb_cdb_phy;
3625
3626 polling_hbc_ccb_retry:
3627 poll_count++;
3628 while (1) {
3629 if ((readl(®->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) == 0) {
3630 if (poll_ccb_done) {
3631 rtn = SUCCESS;
3632 break;
3633 } else {
3634 msleep(25);
3635 if (poll_count > 100) {
3636 rtn = FAILED;
3637 break;
3638 }
3639 goto polling_hbc_ccb_retry;
3640 }
3641 }
3642 flag_ccb = readl(®->outbound_queueport_low);
3643 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
3644 if (acb->cdb_phyadd_hipart)
3645 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
3646 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
3647 pCCB = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
3648 poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0;
3649 /* check ifcommand done with no error*/
3650 if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
3651 if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
3652 printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
3653 " poll command abort successfully \n"
3654 , acb->host->host_no
3655 , pCCB->pcmd->device->id
3656 , (u32)pCCB->pcmd->device->lun
3657 , pCCB);
3658 pCCB->pcmd->result = DID_ABORT << 16;
3659 arcmsr_ccb_complete(pCCB);
3660 continue;
3661 }
3662 printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
3663 " command done ccb = '0x%p'"
3664 "ccboutstandingcount = %d \n"
3665 , acb->host->host_no
3666 , pCCB
3667 , atomic_read(&acb->ccboutstandingcount));
3668 continue;
3669 }
3670 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
3671 arcmsr_report_ccb_state(acb, pCCB, error);
3672 }
3673 return rtn;
3674 }
3675
arcmsr_hbaD_polling_ccbdone(struct AdapterControlBlock * acb,struct CommandControlBlock * poll_ccb)3676 static int arcmsr_hbaD_polling_ccbdone(struct AdapterControlBlock *acb,
3677 struct CommandControlBlock *poll_ccb)
3678 {
3679 bool error;
3680 uint32_t poll_ccb_done = 0, poll_count = 0, flag_ccb;
3681 int rtn, doneq_index, index_stripped, outbound_write_pointer, toggle;
3682 unsigned long flags, ccb_cdb_phy;
3683 struct ARCMSR_CDB *arcmsr_cdb;
3684 struct CommandControlBlock *pCCB;
3685 struct MessageUnit_D *pmu = acb->pmuD;
3686
3687 polling_hbaD_ccb_retry:
3688 poll_count++;
3689 while (1) {
3690 spin_lock_irqsave(&acb->doneq_lock, flags);
3691 outbound_write_pointer = pmu->done_qbuffer[0].addressLow + 1;
3692 doneq_index = pmu->doneq_index;
3693 if ((outbound_write_pointer & 0xFFF) == (doneq_index & 0xFFF)) {
3694 spin_unlock_irqrestore(&acb->doneq_lock, flags);
3695 if (poll_ccb_done) {
3696 rtn = SUCCESS;
3697 break;
3698 } else {
3699 msleep(25);
3700 if (poll_count > 40) {
3701 rtn = FAILED;
3702 break;
3703 }
3704 goto polling_hbaD_ccb_retry;
3705 }
3706 }
3707 toggle = doneq_index & 0x4000;
3708 index_stripped = (doneq_index & 0xFFF) + 1;
3709 index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE;
3710 pmu->doneq_index = index_stripped ? (index_stripped | toggle) :
3711 ((toggle ^ 0x4000) + 1);
3712 doneq_index = pmu->doneq_index;
3713 spin_unlock_irqrestore(&acb->doneq_lock, flags);
3714 flag_ccb = pmu->done_qbuffer[doneq_index & 0xFFF].addressLow;
3715 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
3716 if (acb->cdb_phyadd_hipart)
3717 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
3718 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset +
3719 ccb_cdb_phy);
3720 pCCB = container_of(arcmsr_cdb, struct CommandControlBlock,
3721 arcmsr_cdb);
3722 poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0;
3723 if ((pCCB->acb != acb) ||
3724 (pCCB->startdone != ARCMSR_CCB_START)) {
3725 if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
3726 pr_notice("arcmsr%d: scsi id = %d "
3727 "lun = %d ccb = '0x%p' poll command "
3728 "abort successfully\n"
3729 , acb->host->host_no
3730 , pCCB->pcmd->device->id
3731 , (u32)pCCB->pcmd->device->lun
3732 , pCCB);
3733 pCCB->pcmd->result = DID_ABORT << 16;
3734 arcmsr_ccb_complete(pCCB);
3735 continue;
3736 }
3737 pr_notice("arcmsr%d: polling an illegal "
3738 "ccb command done ccb = '0x%p' "
3739 "ccboutstandingcount = %d\n"
3740 , acb->host->host_no
3741 , pCCB
3742 , atomic_read(&acb->ccboutstandingcount));
3743 continue;
3744 }
3745 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1)
3746 ? true : false;
3747 arcmsr_report_ccb_state(acb, pCCB, error);
3748 }
3749 return rtn;
3750 }
3751
arcmsr_hbaE_polling_ccbdone(struct AdapterControlBlock * acb,struct CommandControlBlock * poll_ccb)3752 static int arcmsr_hbaE_polling_ccbdone(struct AdapterControlBlock *acb,
3753 struct CommandControlBlock *poll_ccb)
3754 {
3755 bool error;
3756 uint32_t poll_ccb_done = 0, poll_count = 0, doneq_index;
3757 uint16_t cmdSMID;
3758 unsigned long flags;
3759 int rtn;
3760 struct CommandControlBlock *pCCB;
3761 struct MessageUnit_E __iomem *reg = acb->pmuE;
3762
3763 polling_hbaC_ccb_retry:
3764 poll_count++;
3765 while (1) {
3766 spin_lock_irqsave(&acb->doneq_lock, flags);
3767 doneq_index = acb->doneq_index;
3768 if ((readl(®->reply_post_producer_index) & 0xFFFF) ==
3769 doneq_index) {
3770 spin_unlock_irqrestore(&acb->doneq_lock, flags);
3771 if (poll_ccb_done) {
3772 rtn = SUCCESS;
3773 break;
3774 } else {
3775 msleep(25);
3776 if (poll_count > 40) {
3777 rtn = FAILED;
3778 break;
3779 }
3780 goto polling_hbaC_ccb_retry;
3781 }
3782 }
3783 cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID;
3784 doneq_index++;
3785 if (doneq_index >= acb->completionQ_entry)
3786 doneq_index = 0;
3787 acb->doneq_index = doneq_index;
3788 spin_unlock_irqrestore(&acb->doneq_lock, flags);
3789 pCCB = acb->pccb_pool[cmdSMID];
3790 poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0;
3791 /* check if command done with no error*/
3792 if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
3793 if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
3794 pr_notice("arcmsr%d: scsi id = %d "
3795 "lun = %d ccb = '0x%p' poll command "
3796 "abort successfully\n"
3797 , acb->host->host_no
3798 , pCCB->pcmd->device->id
3799 , (u32)pCCB->pcmd->device->lun
3800 , pCCB);
3801 pCCB->pcmd->result = DID_ABORT << 16;
3802 arcmsr_ccb_complete(pCCB);
3803 continue;
3804 }
3805 pr_notice("arcmsr%d: polling an illegal "
3806 "ccb command done ccb = '0x%p' "
3807 "ccboutstandingcount = %d\n"
3808 , acb->host->host_no
3809 , pCCB
3810 , atomic_read(&acb->ccboutstandingcount));
3811 continue;
3812 }
3813 error = (acb->pCompletionQ[doneq_index].cmdFlag &
3814 ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
3815 arcmsr_report_ccb_state(acb, pCCB, error);
3816 }
3817 writel(doneq_index, ®->reply_post_consumer_index);
3818 return rtn;
3819 }
3820
arcmsr_polling_ccbdone(struct AdapterControlBlock * acb,struct CommandControlBlock * poll_ccb)3821 static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
3822 struct CommandControlBlock *poll_ccb)
3823 {
3824 int rtn = 0;
3825 switch (acb->adapter_type) {
3826
3827 case ACB_ADAPTER_TYPE_A:
3828 rtn = arcmsr_hbaA_polling_ccbdone(acb, poll_ccb);
3829 break;
3830 case ACB_ADAPTER_TYPE_B:
3831 rtn = arcmsr_hbaB_polling_ccbdone(acb, poll_ccb);
3832 break;
3833 case ACB_ADAPTER_TYPE_C:
3834 rtn = arcmsr_hbaC_polling_ccbdone(acb, poll_ccb);
3835 break;
3836 case ACB_ADAPTER_TYPE_D:
3837 rtn = arcmsr_hbaD_polling_ccbdone(acb, poll_ccb);
3838 break;
3839 case ACB_ADAPTER_TYPE_E:
3840 case ACB_ADAPTER_TYPE_F:
3841 rtn = arcmsr_hbaE_polling_ccbdone(acb, poll_ccb);
3842 break;
3843 }
3844 return rtn;
3845 }
3846
arcmsr_set_iop_datetime(struct timer_list * t)3847 static void arcmsr_set_iop_datetime(struct timer_list *t)
3848 {
3849 struct AdapterControlBlock *pacb = from_timer(pacb, t, refresh_timer);
3850 unsigned int next_time;
3851 struct tm tm;
3852
3853 union {
3854 struct {
3855 uint16_t signature;
3856 uint8_t year;
3857 uint8_t month;
3858 uint8_t date;
3859 uint8_t hour;
3860 uint8_t minute;
3861 uint8_t second;
3862 } a;
3863 struct {
3864 uint32_t msg_time[2];
3865 } b;
3866 } datetime;
3867
3868 time64_to_tm(ktime_get_real_seconds(), -sys_tz.tz_minuteswest * 60, &tm);
3869
3870 datetime.a.signature = 0x55AA;
3871 datetime.a.year = tm.tm_year - 100; /* base 2000 instead of 1900 */
3872 datetime.a.month = tm.tm_mon;
3873 datetime.a.date = tm.tm_mday;
3874 datetime.a.hour = tm.tm_hour;
3875 datetime.a.minute = tm.tm_min;
3876 datetime.a.second = tm.tm_sec;
3877
3878 switch (pacb->adapter_type) {
3879 case ACB_ADAPTER_TYPE_A: {
3880 struct MessageUnit_A __iomem *reg = pacb->pmuA;
3881 writel(datetime.b.msg_time[0], ®->message_rwbuffer[0]);
3882 writel(datetime.b.msg_time[1], ®->message_rwbuffer[1]);
3883 writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, ®->inbound_msgaddr0);
3884 break;
3885 }
3886 case ACB_ADAPTER_TYPE_B: {
3887 uint32_t __iomem *rwbuffer;
3888 struct MessageUnit_B *reg = pacb->pmuB;
3889 rwbuffer = reg->message_rwbuffer;
3890 writel(datetime.b.msg_time[0], rwbuffer++);
3891 writel(datetime.b.msg_time[1], rwbuffer++);
3892 writel(ARCMSR_MESSAGE_SYNC_TIMER, reg->drv2iop_doorbell);
3893 break;
3894 }
3895 case ACB_ADAPTER_TYPE_C: {
3896 struct MessageUnit_C __iomem *reg = pacb->pmuC;
3897 writel(datetime.b.msg_time[0], ®->msgcode_rwbuffer[0]);
3898 writel(datetime.b.msg_time[1], ®->msgcode_rwbuffer[1]);
3899 writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, ®->inbound_msgaddr0);
3900 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell);
3901 break;
3902 }
3903 case ACB_ADAPTER_TYPE_D: {
3904 uint32_t __iomem *rwbuffer;
3905 struct MessageUnit_D *reg = pacb->pmuD;
3906 rwbuffer = reg->msgcode_rwbuffer;
3907 writel(datetime.b.msg_time[0], rwbuffer++);
3908 writel(datetime.b.msg_time[1], rwbuffer++);
3909 writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, reg->inbound_msgaddr0);
3910 break;
3911 }
3912 case ACB_ADAPTER_TYPE_E: {
3913 struct MessageUnit_E __iomem *reg = pacb->pmuE;
3914 writel(datetime.b.msg_time[0], ®->msgcode_rwbuffer[0]);
3915 writel(datetime.b.msg_time[1], ®->msgcode_rwbuffer[1]);
3916 writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, ®->inbound_msgaddr0);
3917 pacb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
3918 writel(pacb->out_doorbell, ®->iobound_doorbell);
3919 break;
3920 }
3921 case ACB_ADAPTER_TYPE_F: {
3922 struct MessageUnit_F __iomem *reg = pacb->pmuF;
3923
3924 pacb->msgcode_rwbuffer[0] = datetime.b.msg_time[0];
3925 pacb->msgcode_rwbuffer[1] = datetime.b.msg_time[1];
3926 writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, ®->inbound_msgaddr0);
3927 pacb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
3928 writel(pacb->out_doorbell, ®->iobound_doorbell);
3929 break;
3930 }
3931 }
3932 if (sys_tz.tz_minuteswest)
3933 next_time = ARCMSR_HOURS;
3934 else
3935 next_time = ARCMSR_MINUTES;
3936 mod_timer(&pacb->refresh_timer, jiffies + msecs_to_jiffies(next_time));
3937 }
3938
arcmsr_iop_confirm(struct AdapterControlBlock * acb)3939 static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
3940 {
3941 uint32_t cdb_phyaddr, cdb_phyaddr_hi32;
3942 dma_addr_t dma_coherent_handle;
3943
3944 /*
3945 ********************************************************************
3946 ** here we need to tell iop 331 our freeccb.HighPart
3947 ** if freeccb.HighPart is not zero
3948 ********************************************************************
3949 */
3950 switch (acb->adapter_type) {
3951 case ACB_ADAPTER_TYPE_B:
3952 case ACB_ADAPTER_TYPE_D:
3953 dma_coherent_handle = acb->dma_coherent_handle2;
3954 break;
3955 case ACB_ADAPTER_TYPE_E:
3956 case ACB_ADAPTER_TYPE_F:
3957 dma_coherent_handle = acb->dma_coherent_handle +
3958 offsetof(struct CommandControlBlock, arcmsr_cdb);
3959 break;
3960 default:
3961 dma_coherent_handle = acb->dma_coherent_handle;
3962 break;
3963 }
3964 cdb_phyaddr = lower_32_bits(dma_coherent_handle);
3965 cdb_phyaddr_hi32 = upper_32_bits(dma_coherent_handle);
3966 acb->cdb_phyaddr_hi32 = cdb_phyaddr_hi32;
3967 acb->cdb_phyadd_hipart = ((uint64_t)cdb_phyaddr_hi32) << 32;
3968 /*
3969 ***********************************************************************
3970 ** if adapter type B, set window of "post command Q"
3971 ***********************************************************************
3972 */
3973 switch (acb->adapter_type) {
3974
3975 case ACB_ADAPTER_TYPE_A: {
3976 if (cdb_phyaddr_hi32 != 0) {
3977 struct MessageUnit_A __iomem *reg = acb->pmuA;
3978 writel(ARCMSR_SIGNATURE_SET_CONFIG, \
3979 ®->message_rwbuffer[0]);
3980 writel(cdb_phyaddr_hi32, ®->message_rwbuffer[1]);
3981 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, \
3982 ®->inbound_msgaddr0);
3983 if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
3984 printk(KERN_NOTICE "arcmsr%d: ""set ccb high \
3985 part physical address timeout\n",
3986 acb->host->host_no);
3987 return 1;
3988 }
3989 }
3990 }
3991 break;
3992
3993 case ACB_ADAPTER_TYPE_B: {
3994 uint32_t __iomem *rwbuffer;
3995
3996 struct MessageUnit_B *reg = acb->pmuB;
3997 reg->postq_index = 0;
3998 reg->doneq_index = 0;
3999 writel(ARCMSR_MESSAGE_SET_POST_WINDOW, reg->drv2iop_doorbell);
4000 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
4001 printk(KERN_NOTICE "arcmsr%d: cannot set driver mode\n", \
4002 acb->host->host_no);
4003 return 1;
4004 }
4005 rwbuffer = reg->message_rwbuffer;
4006 /* driver "set config" signature */
4007 writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++);
4008 /* normal should be zero */
4009 writel(cdb_phyaddr_hi32, rwbuffer++);
4010 /* postQ size (256 + 8)*4 */
4011 writel(cdb_phyaddr, rwbuffer++);
4012 /* doneQ size (256 + 8)*4 */
4013 writel(cdb_phyaddr + 1056, rwbuffer++);
4014 /* ccb maxQ size must be --> [(256 + 8)*4]*/
4015 writel(1056, rwbuffer);
4016
4017 writel(ARCMSR_MESSAGE_SET_CONFIG, reg->drv2iop_doorbell);
4018 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
4019 printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \
4020 timeout \n",acb->host->host_no);
4021 return 1;
4022 }
4023 writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell);
4024 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
4025 pr_err("arcmsr%d: can't set driver mode.\n",
4026 acb->host->host_no);
4027 return 1;
4028 }
4029 }
4030 break;
4031 case ACB_ADAPTER_TYPE_C: {
4032 struct MessageUnit_C __iomem *reg = acb->pmuC;
4033
4034 printk(KERN_NOTICE "arcmsr%d: cdb_phyaddr_hi32=0x%x\n",
4035 acb->adapter_index, cdb_phyaddr_hi32);
4036 writel(ARCMSR_SIGNATURE_SET_CONFIG, ®->msgcode_rwbuffer[0]);
4037 writel(cdb_phyaddr_hi32, ®->msgcode_rwbuffer[1]);
4038 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, ®->inbound_msgaddr0);
4039 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell);
4040 if (!arcmsr_hbaC_wait_msgint_ready(acb)) {
4041 printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \
4042 timeout \n", acb->host->host_no);
4043 return 1;
4044 }
4045 }
4046 break;
4047 case ACB_ADAPTER_TYPE_D: {
4048 uint32_t __iomem *rwbuffer;
4049 struct MessageUnit_D *reg = acb->pmuD;
4050 reg->postq_index = 0;
4051 reg->doneq_index = 0;
4052 rwbuffer = reg->msgcode_rwbuffer;
4053 writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++);
4054 writel(cdb_phyaddr_hi32, rwbuffer++);
4055 writel(cdb_phyaddr, rwbuffer++);
4056 writel(cdb_phyaddr + (ARCMSR_MAX_ARC1214_POSTQUEUE *
4057 sizeof(struct InBound_SRB)), rwbuffer++);
4058 writel(0x100, rwbuffer);
4059 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, reg->inbound_msgaddr0);
4060 if (!arcmsr_hbaD_wait_msgint_ready(acb)) {
4061 pr_notice("arcmsr%d: 'set command Q window' timeout\n",
4062 acb->host->host_no);
4063 return 1;
4064 }
4065 }
4066 break;
4067 case ACB_ADAPTER_TYPE_E: {
4068 struct MessageUnit_E __iomem *reg = acb->pmuE;
4069 writel(ARCMSR_SIGNATURE_SET_CONFIG, ®->msgcode_rwbuffer[0]);
4070 writel(ARCMSR_SIGNATURE_1884, ®->msgcode_rwbuffer[1]);
4071 writel(cdb_phyaddr, ®->msgcode_rwbuffer[2]);
4072 writel(cdb_phyaddr_hi32, ®->msgcode_rwbuffer[3]);
4073 writel(acb->ccbsize, ®->msgcode_rwbuffer[4]);
4074 writel(lower_32_bits(acb->dma_coherent_handle2), ®->msgcode_rwbuffer[5]);
4075 writel(upper_32_bits(acb->dma_coherent_handle2), ®->msgcode_rwbuffer[6]);
4076 writel(acb->ioqueue_size, ®->msgcode_rwbuffer[7]);
4077 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, ®->inbound_msgaddr0);
4078 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
4079 writel(acb->out_doorbell, ®->iobound_doorbell);
4080 if (!arcmsr_hbaE_wait_msgint_ready(acb)) {
4081 pr_notice("arcmsr%d: 'set command Q window' timeout \n",
4082 acb->host->host_no);
4083 return 1;
4084 }
4085 }
4086 break;
4087 case ACB_ADAPTER_TYPE_F: {
4088 struct MessageUnit_F __iomem *reg = acb->pmuF;
4089
4090 acb->msgcode_rwbuffer[0] = ARCMSR_SIGNATURE_SET_CONFIG;
4091 acb->msgcode_rwbuffer[1] = ARCMSR_SIGNATURE_1886;
4092 acb->msgcode_rwbuffer[2] = cdb_phyaddr;
4093 acb->msgcode_rwbuffer[3] = cdb_phyaddr_hi32;
4094 acb->msgcode_rwbuffer[4] = acb->ccbsize;
4095 acb->msgcode_rwbuffer[5] = lower_32_bits(acb->dma_coherent_handle2);
4096 acb->msgcode_rwbuffer[6] = upper_32_bits(acb->dma_coherent_handle2);
4097 acb->msgcode_rwbuffer[7] = acb->completeQ_size;
4098 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, ®->inbound_msgaddr0);
4099 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
4100 writel(acb->out_doorbell, ®->iobound_doorbell);
4101 if (!arcmsr_hbaE_wait_msgint_ready(acb)) {
4102 pr_notice("arcmsr%d: 'set command Q window' timeout\n",
4103 acb->host->host_no);
4104 return 1;
4105 }
4106 }
4107 break;
4108 }
4109 return 0;
4110 }
4111
arcmsr_wait_firmware_ready(struct AdapterControlBlock * acb)4112 static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb)
4113 {
4114 uint32_t firmware_state = 0;
4115 switch (acb->adapter_type) {
4116
4117 case ACB_ADAPTER_TYPE_A: {
4118 struct MessageUnit_A __iomem *reg = acb->pmuA;
4119 do {
4120 if (!(acb->acb_flags & ACB_F_IOP_INITED))
4121 msleep(20);
4122 firmware_state = readl(®->outbound_msgaddr1);
4123 } while ((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0);
4124 }
4125 break;
4126
4127 case ACB_ADAPTER_TYPE_B: {
4128 struct MessageUnit_B *reg = acb->pmuB;
4129 do {
4130 if (!(acb->acb_flags & ACB_F_IOP_INITED))
4131 msleep(20);
4132 firmware_state = readl(reg->iop2drv_doorbell);
4133 } while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0);
4134 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell);
4135 }
4136 break;
4137 case ACB_ADAPTER_TYPE_C: {
4138 struct MessageUnit_C __iomem *reg = acb->pmuC;
4139 do {
4140 if (!(acb->acb_flags & ACB_F_IOP_INITED))
4141 msleep(20);
4142 firmware_state = readl(®->outbound_msgaddr1);
4143 } while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0);
4144 }
4145 break;
4146 case ACB_ADAPTER_TYPE_D: {
4147 struct MessageUnit_D *reg = acb->pmuD;
4148 do {
4149 if (!(acb->acb_flags & ACB_F_IOP_INITED))
4150 msleep(20);
4151 firmware_state = readl(reg->outbound_msgaddr1);
4152 } while ((firmware_state &
4153 ARCMSR_ARC1214_MESSAGE_FIRMWARE_OK) == 0);
4154 }
4155 break;
4156 case ACB_ADAPTER_TYPE_E:
4157 case ACB_ADAPTER_TYPE_F: {
4158 struct MessageUnit_E __iomem *reg = acb->pmuE;
4159 do {
4160 if (!(acb->acb_flags & ACB_F_IOP_INITED))
4161 msleep(20);
4162 firmware_state = readl(®->outbound_msgaddr1);
4163 } while ((firmware_state & ARCMSR_HBEMU_MESSAGE_FIRMWARE_OK) == 0);
4164 }
4165 break;
4166 }
4167 }
4168
arcmsr_request_device_map(struct timer_list * t)4169 static void arcmsr_request_device_map(struct timer_list *t)
4170 {
4171 struct AdapterControlBlock *acb = from_timer(acb, t, eternal_timer);
4172 if (acb->acb_flags & (ACB_F_MSG_GET_CONFIG | ACB_F_BUS_RESET | ACB_F_ABORT)) {
4173 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
4174 } else {
4175 acb->fw_flag = FW_NORMAL;
4176 switch (acb->adapter_type) {
4177 case ACB_ADAPTER_TYPE_A: {
4178 struct MessageUnit_A __iomem *reg = acb->pmuA;
4179 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0);
4180 break;
4181 }
4182 case ACB_ADAPTER_TYPE_B: {
4183 struct MessageUnit_B *reg = acb->pmuB;
4184 writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
4185 break;
4186 }
4187 case ACB_ADAPTER_TYPE_C: {
4188 struct MessageUnit_C __iomem *reg = acb->pmuC;
4189 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0);
4190 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell);
4191 break;
4192 }
4193 case ACB_ADAPTER_TYPE_D: {
4194 struct MessageUnit_D *reg = acb->pmuD;
4195 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, reg->inbound_msgaddr0);
4196 break;
4197 }
4198 case ACB_ADAPTER_TYPE_E: {
4199 struct MessageUnit_E __iomem *reg = acb->pmuE;
4200 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0);
4201 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
4202 writel(acb->out_doorbell, ®->iobound_doorbell);
4203 break;
4204 }
4205 case ACB_ADAPTER_TYPE_F: {
4206 struct MessageUnit_F __iomem *reg = acb->pmuF;
4207 uint32_t outMsg1 = readl(®->outbound_msgaddr1);
4208
4209 if (!(outMsg1 & ARCMSR_HBFMU_MESSAGE_FIRMWARE_OK) ||
4210 (outMsg1 & ARCMSR_HBFMU_MESSAGE_NO_VOLUME_CHANGE))
4211 goto nxt6s;
4212 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0);
4213 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
4214 writel(acb->out_doorbell, ®->iobound_doorbell);
4215 break;
4216 }
4217 default:
4218 return;
4219 }
4220 acb->acb_flags |= ACB_F_MSG_GET_CONFIG;
4221 nxt6s:
4222 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
4223 }
4224 }
4225
arcmsr_hbaA_start_bgrb(struct AdapterControlBlock * acb)4226 static void arcmsr_hbaA_start_bgrb(struct AdapterControlBlock *acb)
4227 {
4228 struct MessageUnit_A __iomem *reg = acb->pmuA;
4229 acb->acb_flags |= ACB_F_MSG_START_BGRB;
4230 writel(ARCMSR_INBOUND_MESG0_START_BGRB, ®->inbound_msgaddr0);
4231 if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
4232 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
4233 rebuild' timeout \n", acb->host->host_no);
4234 }
4235 }
4236
arcmsr_hbaB_start_bgrb(struct AdapterControlBlock * acb)4237 static void arcmsr_hbaB_start_bgrb(struct AdapterControlBlock *acb)
4238 {
4239 struct MessageUnit_B *reg = acb->pmuB;
4240 acb->acb_flags |= ACB_F_MSG_START_BGRB;
4241 writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell);
4242 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
4243 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
4244 rebuild' timeout \n",acb->host->host_no);
4245 }
4246 }
4247
arcmsr_hbaC_start_bgrb(struct AdapterControlBlock * pACB)4248 static void arcmsr_hbaC_start_bgrb(struct AdapterControlBlock *pACB)
4249 {
4250 struct MessageUnit_C __iomem *phbcmu = pACB->pmuC;
4251 pACB->acb_flags |= ACB_F_MSG_START_BGRB;
4252 writel(ARCMSR_INBOUND_MESG0_START_BGRB, &phbcmu->inbound_msgaddr0);
4253 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &phbcmu->inbound_doorbell);
4254 if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
4255 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
4256 rebuild' timeout \n", pACB->host->host_no);
4257 }
4258 return;
4259 }
4260
arcmsr_hbaD_start_bgrb(struct AdapterControlBlock * pACB)4261 static void arcmsr_hbaD_start_bgrb(struct AdapterControlBlock *pACB)
4262 {
4263 struct MessageUnit_D *pmu = pACB->pmuD;
4264
4265 pACB->acb_flags |= ACB_F_MSG_START_BGRB;
4266 writel(ARCMSR_INBOUND_MESG0_START_BGRB, pmu->inbound_msgaddr0);
4267 if (!arcmsr_hbaD_wait_msgint_ready(pACB)) {
4268 pr_notice("arcmsr%d: wait 'start adapter "
4269 "background rebuild' timeout\n", pACB->host->host_no);
4270 }
4271 }
4272
arcmsr_hbaE_start_bgrb(struct AdapterControlBlock * pACB)4273 static void arcmsr_hbaE_start_bgrb(struct AdapterControlBlock *pACB)
4274 {
4275 struct MessageUnit_E __iomem *pmu = pACB->pmuE;
4276
4277 pACB->acb_flags |= ACB_F_MSG_START_BGRB;
4278 writel(ARCMSR_INBOUND_MESG0_START_BGRB, &pmu->inbound_msgaddr0);
4279 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
4280 writel(pACB->out_doorbell, &pmu->iobound_doorbell);
4281 if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
4282 pr_notice("arcmsr%d: wait 'start adapter "
4283 "background rebuild' timeout \n", pACB->host->host_no);
4284 }
4285 }
4286
arcmsr_start_adapter_bgrb(struct AdapterControlBlock * acb)4287 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
4288 {
4289 switch (acb->adapter_type) {
4290 case ACB_ADAPTER_TYPE_A:
4291 arcmsr_hbaA_start_bgrb(acb);
4292 break;
4293 case ACB_ADAPTER_TYPE_B:
4294 arcmsr_hbaB_start_bgrb(acb);
4295 break;
4296 case ACB_ADAPTER_TYPE_C:
4297 arcmsr_hbaC_start_bgrb(acb);
4298 break;
4299 case ACB_ADAPTER_TYPE_D:
4300 arcmsr_hbaD_start_bgrb(acb);
4301 break;
4302 case ACB_ADAPTER_TYPE_E:
4303 case ACB_ADAPTER_TYPE_F:
4304 arcmsr_hbaE_start_bgrb(acb);
4305 break;
4306 }
4307 }
4308
arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock * acb)4309 static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb)
4310 {
4311 switch (acb->adapter_type) {
4312 case ACB_ADAPTER_TYPE_A: {
4313 struct MessageUnit_A __iomem *reg = acb->pmuA;
4314 uint32_t outbound_doorbell;
4315 /* empty doorbell Qbuffer if door bell ringed */
4316 outbound_doorbell = readl(®->outbound_doorbell);
4317 /*clear doorbell interrupt */
4318 writel(outbound_doorbell, ®->outbound_doorbell);
4319 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, ®->inbound_doorbell);
4320 }
4321 break;
4322
4323 case ACB_ADAPTER_TYPE_B: {
4324 struct MessageUnit_B *reg = acb->pmuB;
4325 uint32_t outbound_doorbell, i;
4326 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
4327 writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
4328 /* let IOP know data has been read */
4329 for(i=0; i < 200; i++) {
4330 msleep(20);
4331 outbound_doorbell = readl(reg->iop2drv_doorbell);
4332 if( outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) {
4333 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
4334 writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
4335 } else
4336 break;
4337 }
4338 }
4339 break;
4340 case ACB_ADAPTER_TYPE_C: {
4341 struct MessageUnit_C __iomem *reg = acb->pmuC;
4342 uint32_t outbound_doorbell, i;
4343 /* empty doorbell Qbuffer if door bell ringed */
4344 outbound_doorbell = readl(®->outbound_doorbell);
4345 writel(outbound_doorbell, ®->outbound_doorbell_clear);
4346 writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, ®->inbound_doorbell);
4347 for (i = 0; i < 200; i++) {
4348 msleep(20);
4349 outbound_doorbell = readl(®->outbound_doorbell);
4350 if (outbound_doorbell &
4351 ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) {
4352 writel(outbound_doorbell,
4353 ®->outbound_doorbell_clear);
4354 writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK,
4355 ®->inbound_doorbell);
4356 } else
4357 break;
4358 }
4359 }
4360 break;
4361 case ACB_ADAPTER_TYPE_D: {
4362 struct MessageUnit_D *reg = acb->pmuD;
4363 uint32_t outbound_doorbell, i;
4364 /* empty doorbell Qbuffer if door bell ringed */
4365 outbound_doorbell = readl(reg->outbound_doorbell);
4366 writel(outbound_doorbell, reg->outbound_doorbell);
4367 writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ,
4368 reg->inbound_doorbell);
4369 for (i = 0; i < 200; i++) {
4370 msleep(20);
4371 outbound_doorbell = readl(reg->outbound_doorbell);
4372 if (outbound_doorbell &
4373 ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK) {
4374 writel(outbound_doorbell,
4375 reg->outbound_doorbell);
4376 writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ,
4377 reg->inbound_doorbell);
4378 } else
4379 break;
4380 }
4381 }
4382 break;
4383 case ACB_ADAPTER_TYPE_E:
4384 case ACB_ADAPTER_TYPE_F: {
4385 struct MessageUnit_E __iomem *reg = acb->pmuE;
4386 uint32_t i, tmp;
4387
4388 acb->in_doorbell = readl(®->iobound_doorbell);
4389 writel(0, ®->host_int_status); /*clear interrupt*/
4390 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK;
4391 writel(acb->out_doorbell, ®->iobound_doorbell);
4392 for(i=0; i < 200; i++) {
4393 msleep(20);
4394 tmp = acb->in_doorbell;
4395 acb->in_doorbell = readl(®->iobound_doorbell);
4396 if((tmp ^ acb->in_doorbell) & ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK) {
4397 writel(0, ®->host_int_status); /*clear interrupt*/
4398 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK;
4399 writel(acb->out_doorbell, ®->iobound_doorbell);
4400 } else
4401 break;
4402 }
4403 }
4404 break;
4405 }
4406 }
4407
arcmsr_enable_eoi_mode(struct AdapterControlBlock * acb)4408 static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb)
4409 {
4410 switch (acb->adapter_type) {
4411 case ACB_ADAPTER_TYPE_A:
4412 return;
4413 case ACB_ADAPTER_TYPE_B:
4414 {
4415 struct MessageUnit_B *reg = acb->pmuB;
4416 writel(ARCMSR_MESSAGE_ACTIVE_EOI_MODE, reg->drv2iop_doorbell);
4417 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
4418 printk(KERN_NOTICE "ARCMSR IOP enables EOI_MODE TIMEOUT");
4419 return;
4420 }
4421 }
4422 break;
4423 case ACB_ADAPTER_TYPE_C:
4424 return;
4425 }
4426 return;
4427 }
4428
arcmsr_hardware_reset(struct AdapterControlBlock * acb)4429 static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
4430 {
4431 uint8_t value[64];
4432 int i, count = 0;
4433 struct MessageUnit_A __iomem *pmuA = acb->pmuA;
4434 struct MessageUnit_C __iomem *pmuC = acb->pmuC;
4435 struct MessageUnit_D *pmuD = acb->pmuD;
4436
4437 /* backup pci config data */
4438 printk(KERN_NOTICE "arcmsr%d: executing hw bus reset .....\n", acb->host->host_no);
4439 for (i = 0; i < 64; i++) {
4440 pci_read_config_byte(acb->pdev, i, &value[i]);
4441 }
4442 /* hardware reset signal */
4443 if (acb->dev_id == 0x1680) {
4444 writel(ARCMSR_ARC1680_BUS_RESET, &pmuA->reserved1[0]);
4445 } else if (acb->dev_id == 0x1880) {
4446 do {
4447 count++;
4448 writel(0xF, &pmuC->write_sequence);
4449 writel(0x4, &pmuC->write_sequence);
4450 writel(0xB, &pmuC->write_sequence);
4451 writel(0x2, &pmuC->write_sequence);
4452 writel(0x7, &pmuC->write_sequence);
4453 writel(0xD, &pmuC->write_sequence);
4454 } while (((readl(&pmuC->host_diagnostic) & ARCMSR_ARC1880_DiagWrite_ENABLE) == 0) && (count < 5));
4455 writel(ARCMSR_ARC1880_RESET_ADAPTER, &pmuC->host_diagnostic);
4456 } else if (acb->dev_id == 0x1884) {
4457 struct MessageUnit_E __iomem *pmuE = acb->pmuE;
4458 do {
4459 count++;
4460 writel(0x4, &pmuE->write_sequence_3xxx);
4461 writel(0xB, &pmuE->write_sequence_3xxx);
4462 writel(0x2, &pmuE->write_sequence_3xxx);
4463 writel(0x7, &pmuE->write_sequence_3xxx);
4464 writel(0xD, &pmuE->write_sequence_3xxx);
4465 mdelay(10);
4466 } while (((readl(&pmuE->host_diagnostic_3xxx) &
4467 ARCMSR_ARC1884_DiagWrite_ENABLE) == 0) && (count < 5));
4468 writel(ARCMSR_ARC188X_RESET_ADAPTER, &pmuE->host_diagnostic_3xxx);
4469 } else if (acb->dev_id == 0x1214) {
4470 writel(0x20, pmuD->reset_request);
4471 } else {
4472 pci_write_config_byte(acb->pdev, 0x84, 0x20);
4473 }
4474 msleep(2000);
4475 /* write back pci config data */
4476 for (i = 0; i < 64; i++) {
4477 pci_write_config_byte(acb->pdev, i, value[i]);
4478 }
4479 msleep(1000);
4480 return;
4481 }
4482
arcmsr_reset_in_progress(struct AdapterControlBlock * acb)4483 static bool arcmsr_reset_in_progress(struct AdapterControlBlock *acb)
4484 {
4485 bool rtn = true;
4486
4487 switch(acb->adapter_type) {
4488 case ACB_ADAPTER_TYPE_A:{
4489 struct MessageUnit_A __iomem *reg = acb->pmuA;
4490 rtn = ((readl(®->outbound_msgaddr1) &
4491 ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) ? true : false;
4492 }
4493 break;
4494 case ACB_ADAPTER_TYPE_B:{
4495 struct MessageUnit_B *reg = acb->pmuB;
4496 rtn = ((readl(reg->iop2drv_doorbell) &
4497 ARCMSR_MESSAGE_FIRMWARE_OK) == 0) ? true : false;
4498 }
4499 break;
4500 case ACB_ADAPTER_TYPE_C:{
4501 struct MessageUnit_C __iomem *reg = acb->pmuC;
4502 rtn = (readl(®->host_diagnostic) & 0x04) ? true : false;
4503 }
4504 break;
4505 case ACB_ADAPTER_TYPE_D:{
4506 struct MessageUnit_D *reg = acb->pmuD;
4507 rtn = ((readl(reg->sample_at_reset) & 0x80) == 0) ?
4508 true : false;
4509 }
4510 break;
4511 case ACB_ADAPTER_TYPE_E:
4512 case ACB_ADAPTER_TYPE_F:{
4513 struct MessageUnit_E __iomem *reg = acb->pmuE;
4514 rtn = (readl(®->host_diagnostic_3xxx) &
4515 ARCMSR_ARC188X_RESET_ADAPTER) ? true : false;
4516 }
4517 break;
4518 }
4519 return rtn;
4520 }
4521
arcmsr_iop_init(struct AdapterControlBlock * acb)4522 static void arcmsr_iop_init(struct AdapterControlBlock *acb)
4523 {
4524 uint32_t intmask_org;
4525 /* disable all outbound interrupt */
4526 intmask_org = arcmsr_disable_outbound_ints(acb);
4527 arcmsr_wait_firmware_ready(acb);
4528 arcmsr_iop_confirm(acb);
4529 /*start background rebuild*/
4530 arcmsr_start_adapter_bgrb(acb);
4531 /* empty doorbell Qbuffer if door bell ringed */
4532 arcmsr_clear_doorbell_queue_buffer(acb);
4533 arcmsr_enable_eoi_mode(acb);
4534 /* enable outbound Post Queue,outbound doorbell Interrupt */
4535 arcmsr_enable_outbound_ints(acb, intmask_org);
4536 acb->acb_flags |= ACB_F_IOP_INITED;
4537 }
4538
arcmsr_iop_reset(struct AdapterControlBlock * acb)4539 static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb)
4540 {
4541 struct CommandControlBlock *ccb;
4542 uint32_t intmask_org;
4543 uint8_t rtnval = 0x00;
4544 int i = 0;
4545 unsigned long flags;
4546
4547 if (atomic_read(&acb->ccboutstandingcount) != 0) {
4548 /* disable all outbound interrupt */
4549 intmask_org = arcmsr_disable_outbound_ints(acb);
4550 /* talk to iop 331 outstanding command aborted */
4551 rtnval = arcmsr_abort_allcmd(acb);
4552 /* clear all outbound posted Q */
4553 arcmsr_done4abort_postqueue(acb);
4554 for (i = 0; i < acb->maxFreeCCB; i++) {
4555 ccb = acb->pccb_pool[i];
4556 if (ccb->startdone == ARCMSR_CCB_START) {
4557 scsi_dma_unmap(ccb->pcmd);
4558 ccb->startdone = ARCMSR_CCB_DONE;
4559 ccb->ccb_flags = 0;
4560 spin_lock_irqsave(&acb->ccblist_lock, flags);
4561 list_add_tail(&ccb->list, &acb->ccb_free_list);
4562 spin_unlock_irqrestore(&acb->ccblist_lock, flags);
4563 }
4564 }
4565 atomic_set(&acb->ccboutstandingcount, 0);
4566 /* enable all outbound interrupt */
4567 arcmsr_enable_outbound_ints(acb, intmask_org);
4568 return rtnval;
4569 }
4570 return rtnval;
4571 }
4572
arcmsr_bus_reset(struct scsi_cmnd * cmd)4573 static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
4574 {
4575 struct AdapterControlBlock *acb;
4576 int retry_count = 0;
4577 int rtn = FAILED;
4578 acb = (struct AdapterControlBlock *) cmd->device->host->hostdata;
4579 if (acb->acb_flags & ACB_F_ADAPTER_REMOVED)
4580 return SUCCESS;
4581 pr_notice("arcmsr: executing bus reset eh.....num_resets = %d,"
4582 " num_aborts = %d \n", acb->num_resets, acb->num_aborts);
4583 acb->num_resets++;
4584
4585 if (acb->acb_flags & ACB_F_BUS_RESET) {
4586 long timeout;
4587 pr_notice("arcmsr: there is a bus reset eh proceeding...\n");
4588 timeout = wait_event_timeout(wait_q, (acb->acb_flags
4589 & ACB_F_BUS_RESET) == 0, 220 * HZ);
4590 if (timeout)
4591 return SUCCESS;
4592 }
4593 acb->acb_flags |= ACB_F_BUS_RESET;
4594 if (!arcmsr_iop_reset(acb)) {
4595 arcmsr_hardware_reset(acb);
4596 acb->acb_flags &= ~ACB_F_IOP_INITED;
4597 wait_reset_done:
4598 ssleep(ARCMSR_SLEEPTIME);
4599 if (arcmsr_reset_in_progress(acb)) {
4600 if (retry_count > ARCMSR_RETRYCOUNT) {
4601 acb->fw_flag = FW_DEADLOCK;
4602 pr_notice("arcmsr%d: waiting for hw bus reset"
4603 " return, RETRY TERMINATED!!\n",
4604 acb->host->host_no);
4605 return FAILED;
4606 }
4607 retry_count++;
4608 goto wait_reset_done;
4609 }
4610 arcmsr_iop_init(acb);
4611 acb->fw_flag = FW_NORMAL;
4612 mod_timer(&acb->eternal_timer, jiffies +
4613 msecs_to_jiffies(6 * HZ));
4614 acb->acb_flags &= ~ACB_F_BUS_RESET;
4615 rtn = SUCCESS;
4616 pr_notice("arcmsr: scsi bus reset eh returns with success\n");
4617 } else {
4618 acb->acb_flags &= ~ACB_F_BUS_RESET;
4619 acb->fw_flag = FW_NORMAL;
4620 mod_timer(&acb->eternal_timer, jiffies +
4621 msecs_to_jiffies(6 * HZ));
4622 rtn = SUCCESS;
4623 }
4624 return rtn;
4625 }
4626
arcmsr_abort_one_cmd(struct AdapterControlBlock * acb,struct CommandControlBlock * ccb)4627 static int arcmsr_abort_one_cmd(struct AdapterControlBlock *acb,
4628 struct CommandControlBlock *ccb)
4629 {
4630 int rtn;
4631 rtn = arcmsr_polling_ccbdone(acb, ccb);
4632 return rtn;
4633 }
4634
arcmsr_abort(struct scsi_cmnd * cmd)4635 static int arcmsr_abort(struct scsi_cmnd *cmd)
4636 {
4637 struct AdapterControlBlock *acb =
4638 (struct AdapterControlBlock *)cmd->device->host->hostdata;
4639 int i = 0;
4640 int rtn = FAILED;
4641 uint32_t intmask_org;
4642
4643 if (acb->acb_flags & ACB_F_ADAPTER_REMOVED)
4644 return SUCCESS;
4645 printk(KERN_NOTICE
4646 "arcmsr%d: abort device command of scsi id = %d lun = %d\n",
4647 acb->host->host_no, cmd->device->id, (u32)cmd->device->lun);
4648 acb->acb_flags |= ACB_F_ABORT;
4649 acb->num_aborts++;
4650 /*
4651 ************************************************
4652 ** the all interrupt service routine is locked
4653 ** we need to handle it as soon as possible and exit
4654 ************************************************
4655 */
4656 if (!atomic_read(&acb->ccboutstandingcount)) {
4657 acb->acb_flags &= ~ACB_F_ABORT;
4658 return rtn;
4659 }
4660
4661 intmask_org = arcmsr_disable_outbound_ints(acb);
4662 for (i = 0; i < acb->maxFreeCCB; i++) {
4663 struct CommandControlBlock *ccb = acb->pccb_pool[i];
4664 if (ccb->startdone == ARCMSR_CCB_START && ccb->pcmd == cmd) {
4665 ccb->startdone = ARCMSR_CCB_ABORTED;
4666 rtn = arcmsr_abort_one_cmd(acb, ccb);
4667 break;
4668 }
4669 }
4670 acb->acb_flags &= ~ACB_F_ABORT;
4671 arcmsr_enable_outbound_ints(acb, intmask_org);
4672 return rtn;
4673 }
4674
arcmsr_info(struct Scsi_Host * host)4675 static const char *arcmsr_info(struct Scsi_Host *host)
4676 {
4677 struct AdapterControlBlock *acb =
4678 (struct AdapterControlBlock *) host->hostdata;
4679 static char buf[256];
4680 char *type;
4681 int raid6 = 1;
4682 switch (acb->pdev->device) {
4683 case PCI_DEVICE_ID_ARECA_1110:
4684 case PCI_DEVICE_ID_ARECA_1200:
4685 case PCI_DEVICE_ID_ARECA_1202:
4686 case PCI_DEVICE_ID_ARECA_1210:
4687 raid6 = 0;
4688 fallthrough;
4689 case PCI_DEVICE_ID_ARECA_1120:
4690 case PCI_DEVICE_ID_ARECA_1130:
4691 case PCI_DEVICE_ID_ARECA_1160:
4692 case PCI_DEVICE_ID_ARECA_1170:
4693 case PCI_DEVICE_ID_ARECA_1201:
4694 case PCI_DEVICE_ID_ARECA_1203:
4695 case PCI_DEVICE_ID_ARECA_1220:
4696 case PCI_DEVICE_ID_ARECA_1230:
4697 case PCI_DEVICE_ID_ARECA_1260:
4698 case PCI_DEVICE_ID_ARECA_1270:
4699 case PCI_DEVICE_ID_ARECA_1280:
4700 type = "SATA";
4701 break;
4702 case PCI_DEVICE_ID_ARECA_1214:
4703 case PCI_DEVICE_ID_ARECA_1380:
4704 case PCI_DEVICE_ID_ARECA_1381:
4705 case PCI_DEVICE_ID_ARECA_1680:
4706 case PCI_DEVICE_ID_ARECA_1681:
4707 case PCI_DEVICE_ID_ARECA_1880:
4708 case PCI_DEVICE_ID_ARECA_1883:
4709 case PCI_DEVICE_ID_ARECA_1884:
4710 type = "SAS/SATA";
4711 break;
4712 case PCI_DEVICE_ID_ARECA_1886_0:
4713 case PCI_DEVICE_ID_ARECA_1886:
4714 type = "NVMe/SAS/SATA";
4715 break;
4716 default:
4717 type = "unknown";
4718 raid6 = 0;
4719 break;
4720 }
4721 sprintf(buf, "Areca %s RAID Controller %s\narcmsr version %s\n",
4722 type, raid6 ? "(RAID6 capable)" : "", ARCMSR_DRIVER_VERSION);
4723 return buf;
4724 }
4725