1 // SPDX-License-Identifier: GPL-2.0-only
2 /* qlogicpti.c: Performance Technologies QlogicISP sbus card driver.
3 *
4 * Copyright (C) 1996, 2006, 2008 David S. Miller (davem@davemloft.net)
5 *
6 * A lot of this driver was directly stolen from Erik H. Moe's PCI
7 * Qlogic ISP driver. Mucho kudos to him for this code.
8 *
9 * An even bigger kudos to John Grana at Performance Technologies
10 * for providing me with the hardware to write this driver, you rule
11 * John you really do.
12 *
13 * May, 2, 1997: Added support for QLGC,isp --jj
14 */
15
16 #include <linux/kernel.h>
17 #include <linux/delay.h>
18 #include <linux/types.h>
19 #include <linux/string.h>
20 #include <linux/gfp.h>
21 #include <linux/blkdev.h>
22 #include <linux/proc_fs.h>
23 #include <linux/stat.h>
24 #include <linux/init.h>
25 #include <linux/spinlock.h>
26 #include <linux/interrupt.h>
27 #include <linux/module.h>
28 #include <linux/jiffies.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/of.h>
31 #include <linux/of_device.h>
32 #include <linux/firmware.h>
33
34 #include <asm/byteorder.h>
35
36 #include "qlogicpti.h"
37
38 #include <asm/dma.h>
39 #include <asm/ptrace.h>
40 #include <asm/pgtable.h>
41 #include <asm/oplib.h>
42 #include <asm/io.h>
43 #include <asm/irq.h>
44
45 #include <scsi/scsi.h>
46 #include <scsi/scsi_cmnd.h>
47 #include <scsi/scsi_device.h>
48 #include <scsi/scsi_eh.h>
49 #include <scsi/scsi_tcq.h>
50 #include <scsi/scsi_host.h>
51
52 #define MAX_TARGETS 16
53 #define MAX_LUNS 8 /* 32 for 1.31 F/W */
54
55 #define DEFAULT_LOOP_COUNT 10000
56
57 static struct qlogicpti *qptichain = NULL;
58 static DEFINE_SPINLOCK(qptichain_lock);
59
60 #define PACKB(a, b) (((a)<<4)|(b))
61
62 static const u_char mbox_param[] = {
63 PACKB(1, 1), /* MBOX_NO_OP */
64 PACKB(5, 5), /* MBOX_LOAD_RAM */
65 PACKB(2, 0), /* MBOX_EXEC_FIRMWARE */
66 PACKB(5, 5), /* MBOX_DUMP_RAM */
67 PACKB(3, 3), /* MBOX_WRITE_RAM_WORD */
68 PACKB(2, 3), /* MBOX_READ_RAM_WORD */
69 PACKB(6, 6), /* MBOX_MAILBOX_REG_TEST */
70 PACKB(2, 3), /* MBOX_VERIFY_CHECKSUM */
71 PACKB(1, 3), /* MBOX_ABOUT_FIRMWARE */
72 PACKB(0, 0), /* 0x0009 */
73 PACKB(0, 0), /* 0x000a */
74 PACKB(0, 0), /* 0x000b */
75 PACKB(0, 0), /* 0x000c */
76 PACKB(0, 0), /* 0x000d */
77 PACKB(1, 2), /* MBOX_CHECK_FIRMWARE */
78 PACKB(0, 0), /* 0x000f */
79 PACKB(5, 5), /* MBOX_INIT_REQ_QUEUE */
80 PACKB(6, 6), /* MBOX_INIT_RES_QUEUE */
81 PACKB(4, 4), /* MBOX_EXECUTE_IOCB */
82 PACKB(2, 2), /* MBOX_WAKE_UP */
83 PACKB(1, 6), /* MBOX_STOP_FIRMWARE */
84 PACKB(4, 4), /* MBOX_ABORT */
85 PACKB(2, 2), /* MBOX_ABORT_DEVICE */
86 PACKB(3, 3), /* MBOX_ABORT_TARGET */
87 PACKB(2, 2), /* MBOX_BUS_RESET */
88 PACKB(2, 3), /* MBOX_STOP_QUEUE */
89 PACKB(2, 3), /* MBOX_START_QUEUE */
90 PACKB(2, 3), /* MBOX_SINGLE_STEP_QUEUE */
91 PACKB(2, 3), /* MBOX_ABORT_QUEUE */
92 PACKB(2, 4), /* MBOX_GET_DEV_QUEUE_STATUS */
93 PACKB(0, 0), /* 0x001e */
94 PACKB(1, 3), /* MBOX_GET_FIRMWARE_STATUS */
95 PACKB(1, 2), /* MBOX_GET_INIT_SCSI_ID */
96 PACKB(1, 2), /* MBOX_GET_SELECT_TIMEOUT */
97 PACKB(1, 3), /* MBOX_GET_RETRY_COUNT */
98 PACKB(1, 2), /* MBOX_GET_TAG_AGE_LIMIT */
99 PACKB(1, 2), /* MBOX_GET_CLOCK_RATE */
100 PACKB(1, 2), /* MBOX_GET_ACT_NEG_STATE */
101 PACKB(1, 2), /* MBOX_GET_ASYNC_DATA_SETUP_TIME */
102 PACKB(1, 3), /* MBOX_GET_SBUS_PARAMS */
103 PACKB(2, 4), /* MBOX_GET_TARGET_PARAMS */
104 PACKB(2, 4), /* MBOX_GET_DEV_QUEUE_PARAMS */
105 PACKB(0, 0), /* 0x002a */
106 PACKB(0, 0), /* 0x002b */
107 PACKB(0, 0), /* 0x002c */
108 PACKB(0, 0), /* 0x002d */
109 PACKB(0, 0), /* 0x002e */
110 PACKB(0, 0), /* 0x002f */
111 PACKB(2, 2), /* MBOX_SET_INIT_SCSI_ID */
112 PACKB(2, 2), /* MBOX_SET_SELECT_TIMEOUT */
113 PACKB(3, 3), /* MBOX_SET_RETRY_COUNT */
114 PACKB(2, 2), /* MBOX_SET_TAG_AGE_LIMIT */
115 PACKB(2, 2), /* MBOX_SET_CLOCK_RATE */
116 PACKB(2, 2), /* MBOX_SET_ACTIVE_NEG_STATE */
117 PACKB(2, 2), /* MBOX_SET_ASYNC_DATA_SETUP_TIME */
118 PACKB(3, 3), /* MBOX_SET_SBUS_CONTROL_PARAMS */
119 PACKB(4, 4), /* MBOX_SET_TARGET_PARAMS */
120 PACKB(4, 4), /* MBOX_SET_DEV_QUEUE_PARAMS */
121 PACKB(0, 0), /* 0x003a */
122 PACKB(0, 0), /* 0x003b */
123 PACKB(0, 0), /* 0x003c */
124 PACKB(0, 0), /* 0x003d */
125 PACKB(0, 0), /* 0x003e */
126 PACKB(0, 0), /* 0x003f */
127 PACKB(0, 0), /* 0x0040 */
128 PACKB(0, 0), /* 0x0041 */
129 PACKB(0, 0) /* 0x0042 */
130 };
131
132 #define MAX_MBOX_COMMAND ARRAY_SIZE(mbox_param)
133
134 /* queue length's _must_ be power of two: */
135 #define QUEUE_DEPTH(in, out, ql) ((in - out) & (ql))
136 #define REQ_QUEUE_DEPTH(in, out) QUEUE_DEPTH(in, out, \
137 QLOGICPTI_REQ_QUEUE_LEN)
138 #define RES_QUEUE_DEPTH(in, out) QUEUE_DEPTH(in, out, RES_QUEUE_LEN)
139
qlogicpti_enable_irqs(struct qlogicpti * qpti)140 static inline void qlogicpti_enable_irqs(struct qlogicpti *qpti)
141 {
142 sbus_writew(SBUS_CTRL_ERIRQ | SBUS_CTRL_GENAB,
143 qpti->qregs + SBUS_CTRL);
144 }
145
qlogicpti_disable_irqs(struct qlogicpti * qpti)146 static inline void qlogicpti_disable_irqs(struct qlogicpti *qpti)
147 {
148 sbus_writew(0, qpti->qregs + SBUS_CTRL);
149 }
150
set_sbus_cfg1(struct qlogicpti * qpti)151 static inline void set_sbus_cfg1(struct qlogicpti *qpti)
152 {
153 u16 val;
154 u8 bursts = qpti->bursts;
155
156 #if 0 /* It appears that at least PTI cards do not support
157 * 64-byte bursts and that setting the B64 bit actually
158 * is a nop and the chip ends up using the smallest burst
159 * size. -DaveM
160 */
161 if (sbus_can_burst64() && (bursts & DMA_BURST64)) {
162 val = (SBUS_CFG1_BENAB | SBUS_CFG1_B64);
163 } else
164 #endif
165 if (bursts & DMA_BURST32) {
166 val = (SBUS_CFG1_BENAB | SBUS_CFG1_B32);
167 } else if (bursts & DMA_BURST16) {
168 val = (SBUS_CFG1_BENAB | SBUS_CFG1_B16);
169 } else if (bursts & DMA_BURST8) {
170 val = (SBUS_CFG1_BENAB | SBUS_CFG1_B8);
171 } else {
172 val = 0; /* No sbus bursts for you... */
173 }
174 sbus_writew(val, qpti->qregs + SBUS_CFG1);
175 }
176
qlogicpti_mbox_command(struct qlogicpti * qpti,u_short param[],int force)177 static int qlogicpti_mbox_command(struct qlogicpti *qpti, u_short param[], int force)
178 {
179 int loop_count;
180 u16 tmp;
181
182 if (mbox_param[param[0]] == 0)
183 return 1;
184
185 /* Set SBUS semaphore. */
186 tmp = sbus_readw(qpti->qregs + SBUS_SEMAPHORE);
187 tmp |= SBUS_SEMAPHORE_LCK;
188 sbus_writew(tmp, qpti->qregs + SBUS_SEMAPHORE);
189
190 /* Wait for host IRQ bit to clear. */
191 loop_count = DEFAULT_LOOP_COUNT;
192 while (--loop_count && (sbus_readw(qpti->qregs + HCCTRL) & HCCTRL_HIRQ)) {
193 barrier();
194 cpu_relax();
195 }
196 if (!loop_count)
197 printk(KERN_EMERG "qlogicpti%d: mbox_command loop timeout #1\n",
198 qpti->qpti_id);
199
200 /* Write mailbox command registers. */
201 switch (mbox_param[param[0]] >> 4) {
202 case 6: sbus_writew(param[5], qpti->qregs + MBOX5);
203 /* Fall through */
204 case 5: sbus_writew(param[4], qpti->qregs + MBOX4);
205 /* Fall through */
206 case 4: sbus_writew(param[3], qpti->qregs + MBOX3);
207 /* Fall through */
208 case 3: sbus_writew(param[2], qpti->qregs + MBOX2);
209 /* Fall through */
210 case 2: sbus_writew(param[1], qpti->qregs + MBOX1);
211 /* Fall through */
212 case 1: sbus_writew(param[0], qpti->qregs + MBOX0);
213 }
214
215 /* Clear RISC interrupt. */
216 tmp = sbus_readw(qpti->qregs + HCCTRL);
217 tmp |= HCCTRL_CRIRQ;
218 sbus_writew(tmp, qpti->qregs + HCCTRL);
219
220 /* Clear SBUS semaphore. */
221 sbus_writew(0, qpti->qregs + SBUS_SEMAPHORE);
222
223 /* Set HOST interrupt. */
224 tmp = sbus_readw(qpti->qregs + HCCTRL);
225 tmp |= HCCTRL_SHIRQ;
226 sbus_writew(tmp, qpti->qregs + HCCTRL);
227
228 /* Wait for HOST interrupt clears. */
229 loop_count = DEFAULT_LOOP_COUNT;
230 while (--loop_count &&
231 (sbus_readw(qpti->qregs + HCCTRL) & HCCTRL_CRIRQ))
232 udelay(20);
233 if (!loop_count)
234 printk(KERN_EMERG "qlogicpti%d: mbox_command[%04x] loop timeout #2\n",
235 qpti->qpti_id, param[0]);
236
237 /* Wait for SBUS semaphore to get set. */
238 loop_count = DEFAULT_LOOP_COUNT;
239 while (--loop_count &&
240 !(sbus_readw(qpti->qregs + SBUS_SEMAPHORE) & SBUS_SEMAPHORE_LCK)) {
241 udelay(20);
242
243 /* Workaround for some buggy chips. */
244 if (sbus_readw(qpti->qregs + MBOX0) & 0x4000)
245 break;
246 }
247 if (!loop_count)
248 printk(KERN_EMERG "qlogicpti%d: mbox_command[%04x] loop timeout #3\n",
249 qpti->qpti_id, param[0]);
250
251 /* Wait for MBOX busy condition to go away. */
252 loop_count = DEFAULT_LOOP_COUNT;
253 while (--loop_count && (sbus_readw(qpti->qregs + MBOX0) == 0x04))
254 udelay(20);
255 if (!loop_count)
256 printk(KERN_EMERG "qlogicpti%d: mbox_command[%04x] loop timeout #4\n",
257 qpti->qpti_id, param[0]);
258
259 /* Read back output parameters. */
260 switch (mbox_param[param[0]] & 0xf) {
261 case 6: param[5] = sbus_readw(qpti->qregs + MBOX5);
262 /* Fall through */
263 case 5: param[4] = sbus_readw(qpti->qregs + MBOX4);
264 /* Fall through */
265 case 4: param[3] = sbus_readw(qpti->qregs + MBOX3);
266 /* Fall through */
267 case 3: param[2] = sbus_readw(qpti->qregs + MBOX2);
268 /* Fall through */
269 case 2: param[1] = sbus_readw(qpti->qregs + MBOX1);
270 /* Fall through */
271 case 1: param[0] = sbus_readw(qpti->qregs + MBOX0);
272 }
273
274 /* Clear RISC interrupt. */
275 tmp = sbus_readw(qpti->qregs + HCCTRL);
276 tmp |= HCCTRL_CRIRQ;
277 sbus_writew(tmp, qpti->qregs + HCCTRL);
278
279 /* Release SBUS semaphore. */
280 tmp = sbus_readw(qpti->qregs + SBUS_SEMAPHORE);
281 tmp &= ~(SBUS_SEMAPHORE_LCK);
282 sbus_writew(tmp, qpti->qregs + SBUS_SEMAPHORE);
283
284 /* We're done. */
285 return 0;
286 }
287
qlogicpti_set_hostdev_defaults(struct qlogicpti * qpti)288 static inline void qlogicpti_set_hostdev_defaults(struct qlogicpti *qpti)
289 {
290 int i;
291
292 qpti->host_param.initiator_scsi_id = qpti->scsi_id;
293 qpti->host_param.bus_reset_delay = 3;
294 qpti->host_param.retry_count = 0;
295 qpti->host_param.retry_delay = 5;
296 qpti->host_param.async_data_setup_time = 3;
297 qpti->host_param.req_ack_active_negation = 1;
298 qpti->host_param.data_line_active_negation = 1;
299 qpti->host_param.data_dma_burst_enable = 1;
300 qpti->host_param.command_dma_burst_enable = 1;
301 qpti->host_param.tag_aging = 8;
302 qpti->host_param.selection_timeout = 250;
303 qpti->host_param.max_queue_depth = 256;
304
305 for(i = 0; i < MAX_TARGETS; i++) {
306 /*
307 * disconnect, parity, arq, reneg on reset, and, oddly enough
308 * tags...the midlayer's notion of tagged support has to match
309 * our device settings, and since we base whether we enable a
310 * tag on a per-cmnd basis upon what the midlayer sez, we
311 * actually enable the capability here.
312 */
313 qpti->dev_param[i].device_flags = 0xcd;
314 qpti->dev_param[i].execution_throttle = 16;
315 if (qpti->ultra) {
316 qpti->dev_param[i].synchronous_period = 12;
317 qpti->dev_param[i].synchronous_offset = 8;
318 } else {
319 qpti->dev_param[i].synchronous_period = 25;
320 qpti->dev_param[i].synchronous_offset = 12;
321 }
322 qpti->dev_param[i].device_enable = 1;
323 }
324 }
325
qlogicpti_reset_hardware(struct Scsi_Host * host)326 static int qlogicpti_reset_hardware(struct Scsi_Host *host)
327 {
328 struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata;
329 u_short param[6];
330 unsigned short risc_code_addr;
331 int loop_count, i;
332 unsigned long flags;
333
334 risc_code_addr = 0x1000; /* all load addresses are at 0x1000 */
335
336 spin_lock_irqsave(host->host_lock, flags);
337
338 sbus_writew(HCCTRL_PAUSE, qpti->qregs + HCCTRL);
339
340 /* Only reset the scsi bus if it is not free. */
341 if (sbus_readw(qpti->qregs + CPU_PCTRL) & CPU_PCTRL_BSY) {
342 sbus_writew(CPU_ORIDE_RMOD, qpti->qregs + CPU_ORIDE);
343 sbus_writew(CPU_CMD_BRESET, qpti->qregs + CPU_CMD);
344 udelay(400);
345 }
346
347 sbus_writew(SBUS_CTRL_RESET, qpti->qregs + SBUS_CTRL);
348 sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + CMD_DMA_CTRL);
349 sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + DATA_DMA_CTRL);
350
351 loop_count = DEFAULT_LOOP_COUNT;
352 while (--loop_count && ((sbus_readw(qpti->qregs + MBOX0) & 0xff) == 0x04))
353 udelay(20);
354 if (!loop_count)
355 printk(KERN_EMERG "qlogicpti%d: reset_hardware loop timeout\n",
356 qpti->qpti_id);
357
358 sbus_writew(HCCTRL_PAUSE, qpti->qregs + HCCTRL);
359 set_sbus_cfg1(qpti);
360 qlogicpti_enable_irqs(qpti);
361
362 if (sbus_readw(qpti->qregs + RISC_PSR) & RISC_PSR_ULTRA) {
363 qpti->ultra = 1;
364 sbus_writew((RISC_MTREG_P0ULTRA | RISC_MTREG_P1ULTRA),
365 qpti->qregs + RISC_MTREG);
366 } else {
367 qpti->ultra = 0;
368 sbus_writew((RISC_MTREG_P0DFLT | RISC_MTREG_P1DFLT),
369 qpti->qregs + RISC_MTREG);
370 }
371
372 /* reset adapter and per-device default values. */
373 /* do it after finding out whether we're ultra mode capable */
374 qlogicpti_set_hostdev_defaults(qpti);
375
376 /* Release the RISC processor. */
377 sbus_writew(HCCTRL_REL, qpti->qregs + HCCTRL);
378
379 /* Get RISC to start executing the firmware code. */
380 param[0] = MBOX_EXEC_FIRMWARE;
381 param[1] = risc_code_addr;
382 if (qlogicpti_mbox_command(qpti, param, 1)) {
383 printk(KERN_EMERG "qlogicpti%d: Cannot execute ISP firmware.\n",
384 qpti->qpti_id);
385 spin_unlock_irqrestore(host->host_lock, flags);
386 return 1;
387 }
388
389 /* Set initiator scsi ID. */
390 param[0] = MBOX_SET_INIT_SCSI_ID;
391 param[1] = qpti->host_param.initiator_scsi_id;
392 if (qlogicpti_mbox_command(qpti, param, 1) ||
393 (param[0] != MBOX_COMMAND_COMPLETE)) {
394 printk(KERN_EMERG "qlogicpti%d: Cannot set initiator SCSI ID.\n",
395 qpti->qpti_id);
396 spin_unlock_irqrestore(host->host_lock, flags);
397 return 1;
398 }
399
400 /* Initialize state of the queues, both hw and sw. */
401 qpti->req_in_ptr = qpti->res_out_ptr = 0;
402
403 param[0] = MBOX_INIT_RES_QUEUE;
404 param[1] = RES_QUEUE_LEN + 1;
405 param[2] = (u_short) (qpti->res_dvma >> 16);
406 param[3] = (u_short) (qpti->res_dvma & 0xffff);
407 param[4] = param[5] = 0;
408 if (qlogicpti_mbox_command(qpti, param, 1)) {
409 printk(KERN_EMERG "qlogicpti%d: Cannot init response queue.\n",
410 qpti->qpti_id);
411 spin_unlock_irqrestore(host->host_lock, flags);
412 return 1;
413 }
414
415 param[0] = MBOX_INIT_REQ_QUEUE;
416 param[1] = QLOGICPTI_REQ_QUEUE_LEN + 1;
417 param[2] = (u_short) (qpti->req_dvma >> 16);
418 param[3] = (u_short) (qpti->req_dvma & 0xffff);
419 param[4] = param[5] = 0;
420 if (qlogicpti_mbox_command(qpti, param, 1)) {
421 printk(KERN_EMERG "qlogicpti%d: Cannot init request queue.\n",
422 qpti->qpti_id);
423 spin_unlock_irqrestore(host->host_lock, flags);
424 return 1;
425 }
426
427 param[0] = MBOX_SET_RETRY_COUNT;
428 param[1] = qpti->host_param.retry_count;
429 param[2] = qpti->host_param.retry_delay;
430 qlogicpti_mbox_command(qpti, param, 0);
431
432 param[0] = MBOX_SET_TAG_AGE_LIMIT;
433 param[1] = qpti->host_param.tag_aging;
434 qlogicpti_mbox_command(qpti, param, 0);
435
436 for (i = 0; i < MAX_TARGETS; i++) {
437 param[0] = MBOX_GET_DEV_QUEUE_PARAMS;
438 param[1] = (i << 8);
439 qlogicpti_mbox_command(qpti, param, 0);
440 }
441
442 param[0] = MBOX_GET_FIRMWARE_STATUS;
443 qlogicpti_mbox_command(qpti, param, 0);
444
445 param[0] = MBOX_SET_SELECT_TIMEOUT;
446 param[1] = qpti->host_param.selection_timeout;
447 qlogicpti_mbox_command(qpti, param, 0);
448
449 for (i = 0; i < MAX_TARGETS; i++) {
450 param[0] = MBOX_SET_TARGET_PARAMS;
451 param[1] = (i << 8);
452 param[2] = (qpti->dev_param[i].device_flags << 8);
453 /*
454 * Since we're now loading 1.31 f/w, force narrow/async.
455 */
456 param[2] |= 0xc0;
457 param[3] = 0; /* no offset, we do not have sync mode yet */
458 qlogicpti_mbox_command(qpti, param, 0);
459 }
460
461 /*
462 * Always (sigh) do an initial bus reset (kicks f/w).
463 */
464 param[0] = MBOX_BUS_RESET;
465 param[1] = qpti->host_param.bus_reset_delay;
466 qlogicpti_mbox_command(qpti, param, 0);
467 qpti->send_marker = 1;
468
469 spin_unlock_irqrestore(host->host_lock, flags);
470 return 0;
471 }
472
473 #define PTI_RESET_LIMIT 400
474
qlogicpti_load_firmware(struct qlogicpti * qpti)475 static int qlogicpti_load_firmware(struct qlogicpti *qpti)
476 {
477 const struct firmware *fw;
478 const char fwname[] = "qlogic/isp1000.bin";
479 const __le16 *fw_data;
480 struct Scsi_Host *host = qpti->qhost;
481 unsigned short csum = 0;
482 unsigned short param[6];
483 unsigned short risc_code_addr, risc_code_length;
484 int err;
485 unsigned long flags;
486 int i, timeout;
487
488 err = request_firmware(&fw, fwname, &qpti->op->dev);
489 if (err) {
490 printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
491 fwname, err);
492 return err;
493 }
494 if (fw->size % 2) {
495 printk(KERN_ERR "Bogus length %zu in image \"%s\"\n",
496 fw->size, fwname);
497 err = -EINVAL;
498 goto outfirm;
499 }
500 fw_data = (const __le16 *)&fw->data[0];
501 risc_code_addr = 0x1000; /* all f/w modules load at 0x1000 */
502 risc_code_length = fw->size / 2;
503
504 spin_lock_irqsave(host->host_lock, flags);
505
506 /* Verify the checksum twice, one before loading it, and once
507 * afterwards via the mailbox commands.
508 */
509 for (i = 0; i < risc_code_length; i++)
510 csum += __le16_to_cpu(fw_data[i]);
511 if (csum) {
512 printk(KERN_EMERG "qlogicpti%d: Aieee, firmware checksum failed!",
513 qpti->qpti_id);
514 err = 1;
515 goto out;
516 }
517 sbus_writew(SBUS_CTRL_RESET, qpti->qregs + SBUS_CTRL);
518 sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + CMD_DMA_CTRL);
519 sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + DATA_DMA_CTRL);
520 timeout = PTI_RESET_LIMIT;
521 while (--timeout && (sbus_readw(qpti->qregs + SBUS_CTRL) & SBUS_CTRL_RESET))
522 udelay(20);
523 if (!timeout) {
524 printk(KERN_EMERG "qlogicpti%d: Cannot reset the ISP.", qpti->qpti_id);
525 err = 1;
526 goto out;
527 }
528
529 sbus_writew(HCCTRL_RESET, qpti->qregs + HCCTRL);
530 mdelay(1);
531
532 sbus_writew((SBUS_CTRL_GENAB | SBUS_CTRL_ERIRQ), qpti->qregs + SBUS_CTRL);
533 set_sbus_cfg1(qpti);
534 sbus_writew(0, qpti->qregs + SBUS_SEMAPHORE);
535
536 if (sbus_readw(qpti->qregs + RISC_PSR) & RISC_PSR_ULTRA) {
537 qpti->ultra = 1;
538 sbus_writew((RISC_MTREG_P0ULTRA | RISC_MTREG_P1ULTRA),
539 qpti->qregs + RISC_MTREG);
540 } else {
541 qpti->ultra = 0;
542 sbus_writew((RISC_MTREG_P0DFLT | RISC_MTREG_P1DFLT),
543 qpti->qregs + RISC_MTREG);
544 }
545
546 sbus_writew(HCCTRL_REL, qpti->qregs + HCCTRL);
547
548 /* Pin lines are only stable while RISC is paused. */
549 sbus_writew(HCCTRL_PAUSE, qpti->qregs + HCCTRL);
550 if (sbus_readw(qpti->qregs + CPU_PDIFF) & CPU_PDIFF_MODE)
551 qpti->differential = 1;
552 else
553 qpti->differential = 0;
554 sbus_writew(HCCTRL_REL, qpti->qregs + HCCTRL);
555
556 /* This shouldn't be necessary- we've reset things so we should be
557 running from the ROM now.. */
558
559 param[0] = MBOX_STOP_FIRMWARE;
560 param[1] = param[2] = param[3] = param[4] = param[5] = 0;
561 if (qlogicpti_mbox_command(qpti, param, 1)) {
562 printk(KERN_EMERG "qlogicpti%d: Cannot stop firmware for reload.\n",
563 qpti->qpti_id);
564 err = 1;
565 goto out;
566 }
567
568 /* Load it up.. */
569 for (i = 0; i < risc_code_length; i++) {
570 param[0] = MBOX_WRITE_RAM_WORD;
571 param[1] = risc_code_addr + i;
572 param[2] = __le16_to_cpu(fw_data[i]);
573 if (qlogicpti_mbox_command(qpti, param, 1) ||
574 param[0] != MBOX_COMMAND_COMPLETE) {
575 printk("qlogicpti%d: Firmware dload failed, I'm bolixed!\n",
576 qpti->qpti_id);
577 err = 1;
578 goto out;
579 }
580 }
581
582 /* Reset the ISP again. */
583 sbus_writew(HCCTRL_RESET, qpti->qregs + HCCTRL);
584 mdelay(1);
585
586 qlogicpti_enable_irqs(qpti);
587 sbus_writew(0, qpti->qregs + SBUS_SEMAPHORE);
588 sbus_writew(HCCTRL_REL, qpti->qregs + HCCTRL);
589
590 /* Ask ISP to verify the checksum of the new code. */
591 param[0] = MBOX_VERIFY_CHECKSUM;
592 param[1] = risc_code_addr;
593 if (qlogicpti_mbox_command(qpti, param, 1) ||
594 (param[0] != MBOX_COMMAND_COMPLETE)) {
595 printk(KERN_EMERG "qlogicpti%d: New firmware csum failure!\n",
596 qpti->qpti_id);
597 err = 1;
598 goto out;
599 }
600
601 /* Start using newly downloaded firmware. */
602 param[0] = MBOX_EXEC_FIRMWARE;
603 param[1] = risc_code_addr;
604 qlogicpti_mbox_command(qpti, param, 1);
605
606 param[0] = MBOX_ABOUT_FIRMWARE;
607 if (qlogicpti_mbox_command(qpti, param, 1) ||
608 (param[0] != MBOX_COMMAND_COMPLETE)) {
609 printk(KERN_EMERG "qlogicpti%d: AboutFirmware cmd fails.\n",
610 qpti->qpti_id);
611 err = 1;
612 goto out;
613 }
614
615 /* Snag the major and minor revisions from the result. */
616 qpti->fware_majrev = param[1];
617 qpti->fware_minrev = param[2];
618 qpti->fware_micrev = param[3];
619
620 /* Set the clock rate */
621 param[0] = MBOX_SET_CLOCK_RATE;
622 param[1] = qpti->clock;
623 if (qlogicpti_mbox_command(qpti, param, 1) ||
624 (param[0] != MBOX_COMMAND_COMPLETE)) {
625 printk(KERN_EMERG "qlogicpti%d: could not set clock rate.\n",
626 qpti->qpti_id);
627 err = 1;
628 goto out;
629 }
630
631 if (qpti->is_pti != 0) {
632 /* Load scsi initiator ID and interrupt level into sbus static ram. */
633 param[0] = MBOX_WRITE_RAM_WORD;
634 param[1] = 0xff80;
635 param[2] = (unsigned short) qpti->scsi_id;
636 qlogicpti_mbox_command(qpti, param, 1);
637
638 param[0] = MBOX_WRITE_RAM_WORD;
639 param[1] = 0xff00;
640 param[2] = (unsigned short) 3;
641 qlogicpti_mbox_command(qpti, param, 1);
642 }
643
644 out:
645 spin_unlock_irqrestore(host->host_lock, flags);
646 outfirm:
647 release_firmware(fw);
648 return err;
649 }
650
qlogicpti_verify_tmon(struct qlogicpti * qpti)651 static int qlogicpti_verify_tmon(struct qlogicpti *qpti)
652 {
653 int curstat = sbus_readb(qpti->sreg);
654
655 curstat &= 0xf0;
656 if (!(curstat & SREG_FUSE) && (qpti->swsreg & SREG_FUSE))
657 printk("qlogicpti%d: Fuse returned to normal state.\n", qpti->qpti_id);
658 if (!(curstat & SREG_TPOWER) && (qpti->swsreg & SREG_TPOWER))
659 printk("qlogicpti%d: termpwr back to normal state.\n", qpti->qpti_id);
660 if (curstat != qpti->swsreg) {
661 int error = 0;
662 if (curstat & SREG_FUSE) {
663 error++;
664 printk("qlogicpti%d: Fuse is open!\n", qpti->qpti_id);
665 }
666 if (curstat & SREG_TPOWER) {
667 error++;
668 printk("qlogicpti%d: termpwr failure\n", qpti->qpti_id);
669 }
670 if (qpti->differential &&
671 (curstat & SREG_DSENSE) != SREG_DSENSE) {
672 error++;
673 printk("qlogicpti%d: You have a single ended device on a "
674 "differential bus! Please fix!\n", qpti->qpti_id);
675 }
676 qpti->swsreg = curstat;
677 return error;
678 }
679 return 0;
680 }
681
682 static irqreturn_t qpti_intr(int irq, void *dev_id);
683
qpti_chain_add(struct qlogicpti * qpti)684 static void qpti_chain_add(struct qlogicpti *qpti)
685 {
686 spin_lock_irq(&qptichain_lock);
687 if (qptichain != NULL) {
688 struct qlogicpti *qlink = qptichain;
689
690 while(qlink->next)
691 qlink = qlink->next;
692 qlink->next = qpti;
693 } else {
694 qptichain = qpti;
695 }
696 qpti->next = NULL;
697 spin_unlock_irq(&qptichain_lock);
698 }
699
qpti_chain_del(struct qlogicpti * qpti)700 static void qpti_chain_del(struct qlogicpti *qpti)
701 {
702 spin_lock_irq(&qptichain_lock);
703 if (qptichain == qpti) {
704 qptichain = qpti->next;
705 } else {
706 struct qlogicpti *qlink = qptichain;
707 while(qlink->next != qpti)
708 qlink = qlink->next;
709 qlink->next = qpti->next;
710 }
711 qpti->next = NULL;
712 spin_unlock_irq(&qptichain_lock);
713 }
714
qpti_map_regs(struct qlogicpti * qpti)715 static int qpti_map_regs(struct qlogicpti *qpti)
716 {
717 struct platform_device *op = qpti->op;
718
719 qpti->qregs = of_ioremap(&op->resource[0], 0,
720 resource_size(&op->resource[0]),
721 "PTI Qlogic/ISP");
722 if (!qpti->qregs) {
723 printk("PTI: Qlogic/ISP registers are unmappable\n");
724 return -ENODEV;
725 }
726 if (qpti->is_pti) {
727 qpti->sreg = of_ioremap(&op->resource[0], (16 * 4096),
728 sizeof(unsigned char),
729 "PTI Qlogic/ISP statreg");
730 if (!qpti->sreg) {
731 printk("PTI: Qlogic/ISP status register is unmappable\n");
732 return -ENODEV;
733 }
734 }
735 return 0;
736 }
737
qpti_register_irq(struct qlogicpti * qpti)738 static int qpti_register_irq(struct qlogicpti *qpti)
739 {
740 struct platform_device *op = qpti->op;
741
742 qpti->qhost->irq = qpti->irq = op->archdata.irqs[0];
743
744 /* We used to try various overly-clever things to
745 * reduce the interrupt processing overhead on
746 * sun4c/sun4m when multiple PTI's shared the
747 * same IRQ. It was too complex and messy to
748 * sanely maintain.
749 */
750 if (request_irq(qpti->irq, qpti_intr,
751 IRQF_SHARED, "QlogicPTI", qpti))
752 goto fail;
753
754 printk("qlogicpti%d: IRQ %d ", qpti->qpti_id, qpti->irq);
755
756 return 0;
757
758 fail:
759 printk("qlogicpti%d: Cannot acquire irq line\n", qpti->qpti_id);
760 return -1;
761 }
762
qpti_get_scsi_id(struct qlogicpti * qpti)763 static void qpti_get_scsi_id(struct qlogicpti *qpti)
764 {
765 struct platform_device *op = qpti->op;
766 struct device_node *dp;
767
768 dp = op->dev.of_node;
769
770 qpti->scsi_id = of_getintprop_default(dp, "initiator-id", -1);
771 if (qpti->scsi_id == -1)
772 qpti->scsi_id = of_getintprop_default(dp, "scsi-initiator-id",
773 -1);
774 if (qpti->scsi_id == -1)
775 qpti->scsi_id =
776 of_getintprop_default(dp->parent,
777 "scsi-initiator-id", 7);
778 qpti->qhost->this_id = qpti->scsi_id;
779 qpti->qhost->max_sectors = 64;
780
781 printk("SCSI ID %d ", qpti->scsi_id);
782 }
783
qpti_get_bursts(struct qlogicpti * qpti)784 static void qpti_get_bursts(struct qlogicpti *qpti)
785 {
786 struct platform_device *op = qpti->op;
787 u8 bursts, bmask;
788
789 bursts = of_getintprop_default(op->dev.of_node, "burst-sizes", 0xff);
790 bmask = of_getintprop_default(op->dev.of_node->parent, "burst-sizes", 0xff);
791 if (bmask != 0xff)
792 bursts &= bmask;
793 if (bursts == 0xff ||
794 (bursts & DMA_BURST16) == 0 ||
795 (bursts & DMA_BURST32) == 0)
796 bursts = (DMA_BURST32 - 1);
797
798 qpti->bursts = bursts;
799 }
800
qpti_get_clock(struct qlogicpti * qpti)801 static void qpti_get_clock(struct qlogicpti *qpti)
802 {
803 unsigned int cfreq;
804
805 /* Check for what the clock input to this card is.
806 * Default to 40Mhz.
807 */
808 cfreq = prom_getintdefault(qpti->prom_node,"clock-frequency",40000000);
809 qpti->clock = (cfreq + 500000)/1000000;
810 if (qpti->clock == 0) /* bullshit */
811 qpti->clock = 40;
812 }
813
814 /* The request and response queues must each be aligned
815 * on a page boundary.
816 */
qpti_map_queues(struct qlogicpti * qpti)817 static int qpti_map_queues(struct qlogicpti *qpti)
818 {
819 struct platform_device *op = qpti->op;
820
821 #define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN)
822 qpti->res_cpu = dma_alloc_coherent(&op->dev,
823 QSIZE(RES_QUEUE_LEN),
824 &qpti->res_dvma, GFP_ATOMIC);
825 if (qpti->res_cpu == NULL ||
826 qpti->res_dvma == 0) {
827 printk("QPTI: Cannot map response queue.\n");
828 return -1;
829 }
830
831 qpti->req_cpu = dma_alloc_coherent(&op->dev,
832 QSIZE(QLOGICPTI_REQ_QUEUE_LEN),
833 &qpti->req_dvma, GFP_ATOMIC);
834 if (qpti->req_cpu == NULL ||
835 qpti->req_dvma == 0) {
836 dma_free_coherent(&op->dev, QSIZE(RES_QUEUE_LEN),
837 qpti->res_cpu, qpti->res_dvma);
838 printk("QPTI: Cannot map request queue.\n");
839 return -1;
840 }
841 memset(qpti->res_cpu, 0, QSIZE(RES_QUEUE_LEN));
842 memset(qpti->req_cpu, 0, QSIZE(QLOGICPTI_REQ_QUEUE_LEN));
843 return 0;
844 }
845
qlogicpti_info(struct Scsi_Host * host)846 const char *qlogicpti_info(struct Scsi_Host *host)
847 {
848 static char buf[80];
849 struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata;
850
851 sprintf(buf, "PTI Qlogic,ISP SBUS SCSI irq %d regs at %p",
852 qpti->qhost->irq, qpti->qregs);
853 return buf;
854 }
855
856 /* I am a certified frobtronicist. */
marker_frob(struct Command_Entry * cmd)857 static inline void marker_frob(struct Command_Entry *cmd)
858 {
859 struct Marker_Entry *marker = (struct Marker_Entry *) cmd;
860
861 memset(marker, 0, sizeof(struct Marker_Entry));
862 marker->hdr.entry_cnt = 1;
863 marker->hdr.entry_type = ENTRY_MARKER;
864 marker->modifier = SYNC_ALL;
865 marker->rsvd = 0;
866 }
867
cmd_frob(struct Command_Entry * cmd,struct scsi_cmnd * Cmnd,struct qlogicpti * qpti)868 static inline void cmd_frob(struct Command_Entry *cmd, struct scsi_cmnd *Cmnd,
869 struct qlogicpti *qpti)
870 {
871 memset(cmd, 0, sizeof(struct Command_Entry));
872 cmd->hdr.entry_cnt = 1;
873 cmd->hdr.entry_type = ENTRY_COMMAND;
874 cmd->target_id = Cmnd->device->id;
875 cmd->target_lun = Cmnd->device->lun;
876 cmd->cdb_length = Cmnd->cmd_len;
877 cmd->control_flags = 0;
878 if (Cmnd->device->tagged_supported) {
879 if (qpti->cmd_count[Cmnd->device->id] == 0)
880 qpti->tag_ages[Cmnd->device->id] = jiffies;
881 if (time_after(jiffies, qpti->tag_ages[Cmnd->device->id] + (5*HZ))) {
882 cmd->control_flags = CFLAG_ORDERED_TAG;
883 qpti->tag_ages[Cmnd->device->id] = jiffies;
884 } else
885 cmd->control_flags = CFLAG_SIMPLE_TAG;
886 }
887 if ((Cmnd->cmnd[0] == WRITE_6) ||
888 (Cmnd->cmnd[0] == WRITE_10) ||
889 (Cmnd->cmnd[0] == WRITE_12))
890 cmd->control_flags |= CFLAG_WRITE;
891 else
892 cmd->control_flags |= CFLAG_READ;
893 cmd->time_out = Cmnd->request->timeout/HZ;
894 memcpy(cmd->cdb, Cmnd->cmnd, Cmnd->cmd_len);
895 }
896
897 /* Do it to it baby. */
load_cmd(struct scsi_cmnd * Cmnd,struct Command_Entry * cmd,struct qlogicpti * qpti,u_int in_ptr,u_int out_ptr)898 static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd,
899 struct qlogicpti *qpti, u_int in_ptr, u_int out_ptr)
900 {
901 struct dataseg *ds;
902 struct scatterlist *sg, *s;
903 int i, n;
904
905 if (scsi_bufflen(Cmnd)) {
906 int sg_count;
907
908 sg = scsi_sglist(Cmnd);
909 sg_count = dma_map_sg(&qpti->op->dev, sg,
910 scsi_sg_count(Cmnd),
911 Cmnd->sc_data_direction);
912
913 ds = cmd->dataseg;
914 cmd->segment_cnt = sg_count;
915
916 /* Fill in first four sg entries: */
917 n = sg_count;
918 if (n > 4)
919 n = 4;
920 for_each_sg(sg, s, n, i) {
921 ds[i].d_base = sg_dma_address(s);
922 ds[i].d_count = sg_dma_len(s);
923 }
924 sg_count -= 4;
925 sg = s;
926 while (sg_count > 0) {
927 struct Continuation_Entry *cont;
928
929 ++cmd->hdr.entry_cnt;
930 cont = (struct Continuation_Entry *) &qpti->req_cpu[in_ptr];
931 in_ptr = NEXT_REQ_PTR(in_ptr);
932 if (in_ptr == out_ptr)
933 return -1;
934
935 cont->hdr.entry_type = ENTRY_CONTINUATION;
936 cont->hdr.entry_cnt = 0;
937 cont->hdr.sys_def_1 = 0;
938 cont->hdr.flags = 0;
939 cont->reserved = 0;
940 ds = cont->dataseg;
941 n = sg_count;
942 if (n > 7)
943 n = 7;
944 for_each_sg(sg, s, n, i) {
945 ds[i].d_base = sg_dma_address(s);
946 ds[i].d_count = sg_dma_len(s);
947 }
948 sg_count -= n;
949 sg = s;
950 }
951 } else {
952 cmd->dataseg[0].d_base = 0;
953 cmd->dataseg[0].d_count = 0;
954 cmd->segment_cnt = 1; /* Shouldn't this be 0? */
955 }
956
957 /* Committed, record Scsi_Cmd so we can find it later. */
958 cmd->handle = in_ptr;
959 qpti->cmd_slots[in_ptr] = Cmnd;
960
961 qpti->cmd_count[Cmnd->device->id]++;
962 sbus_writew(in_ptr, qpti->qregs + MBOX4);
963 qpti->req_in_ptr = in_ptr;
964
965 return in_ptr;
966 }
967
update_can_queue(struct Scsi_Host * host,u_int in_ptr,u_int out_ptr)968 static inline void update_can_queue(struct Scsi_Host *host, u_int in_ptr, u_int out_ptr)
969 {
970 /* Temporary workaround until bug is found and fixed (one bug has been found
971 already, but fixing it makes things even worse) -jj */
972 int num_free = QLOGICPTI_REQ_QUEUE_LEN - REQ_QUEUE_DEPTH(in_ptr, out_ptr) - 64;
973 host->can_queue = scsi_host_busy(host) + num_free;
974 host->sg_tablesize = QLOGICPTI_MAX_SG(num_free);
975 }
976
qlogicpti_slave_configure(struct scsi_device * sdev)977 static int qlogicpti_slave_configure(struct scsi_device *sdev)
978 {
979 struct qlogicpti *qpti = shost_priv(sdev->host);
980 int tgt = sdev->id;
981 u_short param[6];
982
983 /* tags handled in midlayer */
984 /* enable sync mode? */
985 if (sdev->sdtr) {
986 qpti->dev_param[tgt].device_flags |= 0x10;
987 } else {
988 qpti->dev_param[tgt].synchronous_offset = 0;
989 qpti->dev_param[tgt].synchronous_period = 0;
990 }
991 /* are we wide capable? */
992 if (sdev->wdtr)
993 qpti->dev_param[tgt].device_flags |= 0x20;
994
995 param[0] = MBOX_SET_TARGET_PARAMS;
996 param[1] = (tgt << 8);
997 param[2] = (qpti->dev_param[tgt].device_flags << 8);
998 if (qpti->dev_param[tgt].device_flags & 0x10) {
999 param[3] = (qpti->dev_param[tgt].synchronous_offset << 8) |
1000 qpti->dev_param[tgt].synchronous_period;
1001 } else {
1002 param[3] = 0;
1003 }
1004 qlogicpti_mbox_command(qpti, param, 0);
1005 return 0;
1006 }
1007
1008 /*
1009 * The middle SCSI layer ensures that queuecommand never gets invoked
1010 * concurrently with itself or the interrupt handler (though the
1011 * interrupt handler may call this routine as part of
1012 * request-completion handling).
1013 *
1014 * "This code must fly." -davem
1015 */
qlogicpti_queuecommand_lck(struct scsi_cmnd * Cmnd,void (* done)(struct scsi_cmnd *))1016 static int qlogicpti_queuecommand_lck(struct scsi_cmnd *Cmnd, void (*done)(struct scsi_cmnd *))
1017 {
1018 struct Scsi_Host *host = Cmnd->device->host;
1019 struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata;
1020 struct Command_Entry *cmd;
1021 u_int out_ptr;
1022 int in_ptr;
1023
1024 Cmnd->scsi_done = done;
1025
1026 in_ptr = qpti->req_in_ptr;
1027 cmd = (struct Command_Entry *) &qpti->req_cpu[in_ptr];
1028 out_ptr = sbus_readw(qpti->qregs + MBOX4);
1029 in_ptr = NEXT_REQ_PTR(in_ptr);
1030 if (in_ptr == out_ptr)
1031 goto toss_command;
1032
1033 if (qpti->send_marker) {
1034 marker_frob(cmd);
1035 qpti->send_marker = 0;
1036 if (NEXT_REQ_PTR(in_ptr) == out_ptr) {
1037 sbus_writew(in_ptr, qpti->qregs + MBOX4);
1038 qpti->req_in_ptr = in_ptr;
1039 goto toss_command;
1040 }
1041 cmd = (struct Command_Entry *) &qpti->req_cpu[in_ptr];
1042 in_ptr = NEXT_REQ_PTR(in_ptr);
1043 }
1044 cmd_frob(cmd, Cmnd, qpti);
1045 if ((in_ptr = load_cmd(Cmnd, cmd, qpti, in_ptr, out_ptr)) == -1)
1046 goto toss_command;
1047
1048 update_can_queue(host, in_ptr, out_ptr);
1049
1050 return 0;
1051
1052 toss_command:
1053 printk(KERN_EMERG "qlogicpti%d: request queue overflow\n",
1054 qpti->qpti_id);
1055
1056 /* Unfortunately, unless you use the new EH code, which
1057 * we don't, the midlayer will ignore the return value,
1058 * which is insane. We pick up the pieces like this.
1059 */
1060 Cmnd->result = DID_BUS_BUSY;
1061 done(Cmnd);
1062 return 1;
1063 }
1064
DEF_SCSI_QCMD(qlogicpti_queuecommand)1065 static DEF_SCSI_QCMD(qlogicpti_queuecommand)
1066
1067 static int qlogicpti_return_status(struct Status_Entry *sts, int id)
1068 {
1069 int host_status = DID_ERROR;
1070
1071 switch (sts->completion_status) {
1072 case CS_COMPLETE:
1073 host_status = DID_OK;
1074 break;
1075 case CS_INCOMPLETE:
1076 if (!(sts->state_flags & SF_GOT_BUS))
1077 host_status = DID_NO_CONNECT;
1078 else if (!(sts->state_flags & SF_GOT_TARGET))
1079 host_status = DID_BAD_TARGET;
1080 else if (!(sts->state_flags & SF_SENT_CDB))
1081 host_status = DID_ERROR;
1082 else if (!(sts->state_flags & SF_TRANSFERRED_DATA))
1083 host_status = DID_ERROR;
1084 else if (!(sts->state_flags & SF_GOT_STATUS))
1085 host_status = DID_ERROR;
1086 else if (!(sts->state_flags & SF_GOT_SENSE))
1087 host_status = DID_ERROR;
1088 break;
1089 case CS_DMA_ERROR:
1090 case CS_TRANSPORT_ERROR:
1091 host_status = DID_ERROR;
1092 break;
1093 case CS_RESET_OCCURRED:
1094 case CS_BUS_RESET:
1095 host_status = DID_RESET;
1096 break;
1097 case CS_ABORTED:
1098 host_status = DID_ABORT;
1099 break;
1100 case CS_TIMEOUT:
1101 host_status = DID_TIME_OUT;
1102 break;
1103 case CS_DATA_OVERRUN:
1104 case CS_COMMAND_OVERRUN:
1105 case CS_STATUS_OVERRUN:
1106 case CS_BAD_MESSAGE:
1107 case CS_NO_MESSAGE_OUT:
1108 case CS_EXT_ID_FAILED:
1109 case CS_IDE_MSG_FAILED:
1110 case CS_ABORT_MSG_FAILED:
1111 case CS_NOP_MSG_FAILED:
1112 case CS_PARITY_ERROR_MSG_FAILED:
1113 case CS_DEVICE_RESET_MSG_FAILED:
1114 case CS_ID_MSG_FAILED:
1115 case CS_UNEXP_BUS_FREE:
1116 host_status = DID_ERROR;
1117 break;
1118 case CS_DATA_UNDERRUN:
1119 host_status = DID_OK;
1120 break;
1121 default:
1122 printk(KERN_EMERG "qlogicpti%d: unknown completion status 0x%04x\n",
1123 id, sts->completion_status);
1124 host_status = DID_ERROR;
1125 break;
1126 }
1127
1128 return (sts->scsi_status & STATUS_MASK) | (host_status << 16);
1129 }
1130
qlogicpti_intr_handler(struct qlogicpti * qpti)1131 static struct scsi_cmnd *qlogicpti_intr_handler(struct qlogicpti *qpti)
1132 {
1133 struct scsi_cmnd *Cmnd, *done_queue = NULL;
1134 struct Status_Entry *sts;
1135 u_int in_ptr, out_ptr;
1136
1137 if (!(sbus_readw(qpti->qregs + SBUS_STAT) & SBUS_STAT_RINT))
1138 return NULL;
1139
1140 in_ptr = sbus_readw(qpti->qregs + MBOX5);
1141 sbus_writew(HCCTRL_CRIRQ, qpti->qregs + HCCTRL);
1142 if (sbus_readw(qpti->qregs + SBUS_SEMAPHORE) & SBUS_SEMAPHORE_LCK) {
1143 switch (sbus_readw(qpti->qregs + MBOX0)) {
1144 case ASYNC_SCSI_BUS_RESET:
1145 case EXECUTION_TIMEOUT_RESET:
1146 qpti->send_marker = 1;
1147 break;
1148 case INVALID_COMMAND:
1149 case HOST_INTERFACE_ERROR:
1150 case COMMAND_ERROR:
1151 case COMMAND_PARAM_ERROR:
1152 break;
1153 };
1154 sbus_writew(0, qpti->qregs + SBUS_SEMAPHORE);
1155 }
1156
1157 /* This looks like a network driver! */
1158 out_ptr = qpti->res_out_ptr;
1159 while (out_ptr != in_ptr) {
1160 u_int cmd_slot;
1161
1162 sts = (struct Status_Entry *) &qpti->res_cpu[out_ptr];
1163 out_ptr = NEXT_RES_PTR(out_ptr);
1164
1165 /* We store an index in the handle, not the pointer in
1166 * some form. This avoids problems due to the fact
1167 * that the handle provided is only 32-bits. -DaveM
1168 */
1169 cmd_slot = sts->handle;
1170 Cmnd = qpti->cmd_slots[cmd_slot];
1171 qpti->cmd_slots[cmd_slot] = NULL;
1172
1173 if (sts->completion_status == CS_RESET_OCCURRED ||
1174 sts->completion_status == CS_ABORTED ||
1175 (sts->status_flags & STF_BUS_RESET))
1176 qpti->send_marker = 1;
1177
1178 if (sts->state_flags & SF_GOT_SENSE)
1179 memcpy(Cmnd->sense_buffer, sts->req_sense_data,
1180 SCSI_SENSE_BUFFERSIZE);
1181
1182 if (sts->hdr.entry_type == ENTRY_STATUS)
1183 Cmnd->result =
1184 qlogicpti_return_status(sts, qpti->qpti_id);
1185 else
1186 Cmnd->result = DID_ERROR << 16;
1187
1188 if (scsi_bufflen(Cmnd))
1189 dma_unmap_sg(&qpti->op->dev,
1190 scsi_sglist(Cmnd), scsi_sg_count(Cmnd),
1191 Cmnd->sc_data_direction);
1192
1193 qpti->cmd_count[Cmnd->device->id]--;
1194 sbus_writew(out_ptr, qpti->qregs + MBOX5);
1195 Cmnd->host_scribble = (unsigned char *) done_queue;
1196 done_queue = Cmnd;
1197 }
1198 qpti->res_out_ptr = out_ptr;
1199
1200 return done_queue;
1201 }
1202
qpti_intr(int irq,void * dev_id)1203 static irqreturn_t qpti_intr(int irq, void *dev_id)
1204 {
1205 struct qlogicpti *qpti = dev_id;
1206 unsigned long flags;
1207 struct scsi_cmnd *dq;
1208
1209 spin_lock_irqsave(qpti->qhost->host_lock, flags);
1210 dq = qlogicpti_intr_handler(qpti);
1211
1212 if (dq != NULL) {
1213 do {
1214 struct scsi_cmnd *next;
1215
1216 next = (struct scsi_cmnd *) dq->host_scribble;
1217 dq->scsi_done(dq);
1218 dq = next;
1219 } while (dq != NULL);
1220 }
1221 spin_unlock_irqrestore(qpti->qhost->host_lock, flags);
1222
1223 return IRQ_HANDLED;
1224 }
1225
qlogicpti_abort(struct scsi_cmnd * Cmnd)1226 static int qlogicpti_abort(struct scsi_cmnd *Cmnd)
1227 {
1228 u_short param[6];
1229 struct Scsi_Host *host = Cmnd->device->host;
1230 struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata;
1231 int return_status = SUCCESS;
1232 u32 cmd_cookie;
1233 int i;
1234
1235 printk(KERN_WARNING "qlogicpti%d: Aborting cmd for tgt[%d] lun[%d]\n",
1236 qpti->qpti_id, (int)Cmnd->device->id, (int)Cmnd->device->lun);
1237
1238 qlogicpti_disable_irqs(qpti);
1239
1240 /* Find the 32-bit cookie we gave to the firmware for
1241 * this command.
1242 */
1243 for (i = 0; i < QLOGICPTI_REQ_QUEUE_LEN + 1; i++)
1244 if (qpti->cmd_slots[i] == Cmnd)
1245 break;
1246 cmd_cookie = i;
1247
1248 param[0] = MBOX_ABORT;
1249 param[1] = (((u_short) Cmnd->device->id) << 8) | Cmnd->device->lun;
1250 param[2] = cmd_cookie >> 16;
1251 param[3] = cmd_cookie & 0xffff;
1252 if (qlogicpti_mbox_command(qpti, param, 0) ||
1253 (param[0] != MBOX_COMMAND_COMPLETE)) {
1254 printk(KERN_EMERG "qlogicpti%d: scsi abort failure: %x\n",
1255 qpti->qpti_id, param[0]);
1256 return_status = FAILED;
1257 }
1258
1259 qlogicpti_enable_irqs(qpti);
1260
1261 return return_status;
1262 }
1263
qlogicpti_reset(struct scsi_cmnd * Cmnd)1264 static int qlogicpti_reset(struct scsi_cmnd *Cmnd)
1265 {
1266 u_short param[6];
1267 struct Scsi_Host *host = Cmnd->device->host;
1268 struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata;
1269 int return_status = SUCCESS;
1270
1271 printk(KERN_WARNING "qlogicpti%d: Resetting SCSI bus!\n",
1272 qpti->qpti_id);
1273
1274 qlogicpti_disable_irqs(qpti);
1275
1276 param[0] = MBOX_BUS_RESET;
1277 param[1] = qpti->host_param.bus_reset_delay;
1278 if (qlogicpti_mbox_command(qpti, param, 0) ||
1279 (param[0] != MBOX_COMMAND_COMPLETE)) {
1280 printk(KERN_EMERG "qlogicisp%d: scsi bus reset failure: %x\n",
1281 qpti->qpti_id, param[0]);
1282 return_status = FAILED;
1283 }
1284
1285 qlogicpti_enable_irqs(qpti);
1286
1287 return return_status;
1288 }
1289
1290 static struct scsi_host_template qpti_template = {
1291 .module = THIS_MODULE,
1292 .name = "qlogicpti",
1293 .info = qlogicpti_info,
1294 .queuecommand = qlogicpti_queuecommand,
1295 .slave_configure = qlogicpti_slave_configure,
1296 .eh_abort_handler = qlogicpti_abort,
1297 .eh_host_reset_handler = qlogicpti_reset,
1298 .can_queue = QLOGICPTI_REQ_QUEUE_LEN,
1299 .this_id = 7,
1300 .sg_tablesize = QLOGICPTI_MAX_SG(QLOGICPTI_REQ_QUEUE_LEN),
1301 };
1302
1303 static const struct of_device_id qpti_match[];
qpti_sbus_probe(struct platform_device * op)1304 static int qpti_sbus_probe(struct platform_device *op)
1305 {
1306 struct device_node *dp = op->dev.of_node;
1307 struct Scsi_Host *host;
1308 struct qlogicpti *qpti;
1309 static int nqptis;
1310 const char *fcode;
1311
1312 /* Sometimes Antares cards come up not completely
1313 * setup, and we get a report of a zero IRQ.
1314 */
1315 if (op->archdata.irqs[0] == 0)
1316 return -ENODEV;
1317
1318 host = scsi_host_alloc(&qpti_template, sizeof(struct qlogicpti));
1319 if (!host)
1320 return -ENOMEM;
1321
1322 qpti = shost_priv(host);
1323
1324 host->max_id = MAX_TARGETS;
1325 qpti->qhost = host;
1326 qpti->op = op;
1327 qpti->qpti_id = nqptis;
1328 qpti->is_pti = !of_node_name_eq(op->dev.of_node, "QLGC,isp");
1329
1330 if (qpti_map_regs(qpti) < 0)
1331 goto fail_unlink;
1332
1333 if (qpti_register_irq(qpti) < 0)
1334 goto fail_unmap_regs;
1335
1336 qpti_get_scsi_id(qpti);
1337 qpti_get_bursts(qpti);
1338 qpti_get_clock(qpti);
1339
1340 /* Clear out scsi_cmnd array. */
1341 memset(qpti->cmd_slots, 0, sizeof(qpti->cmd_slots));
1342
1343 if (qpti_map_queues(qpti) < 0)
1344 goto fail_free_irq;
1345
1346 /* Load the firmware. */
1347 if (qlogicpti_load_firmware(qpti))
1348 goto fail_unmap_queues;
1349 if (qpti->is_pti) {
1350 /* Check the PTI status reg. */
1351 if (qlogicpti_verify_tmon(qpti))
1352 goto fail_unmap_queues;
1353 }
1354
1355 /* Reset the ISP and init res/req queues. */
1356 if (qlogicpti_reset_hardware(host))
1357 goto fail_unmap_queues;
1358
1359 printk("(Firmware v%d.%d.%d)", qpti->fware_majrev,
1360 qpti->fware_minrev, qpti->fware_micrev);
1361
1362 fcode = of_get_property(dp, "isp-fcode", NULL);
1363 if (fcode && fcode[0])
1364 printk("(FCode %s)", fcode);
1365 if (of_find_property(dp, "differential", NULL) != NULL)
1366 qpti->differential = 1;
1367
1368 printk("\nqlogicpti%d: [%s Wide, using %s interface]\n",
1369 qpti->qpti_id,
1370 (qpti->ultra ? "Ultra" : "Fast"),
1371 (qpti->differential ? "differential" : "single ended"));
1372
1373 if (scsi_add_host(host, &op->dev)) {
1374 printk("qlogicpti%d: Failed scsi_add_host\n", qpti->qpti_id);
1375 goto fail_unmap_queues;
1376 }
1377
1378 dev_set_drvdata(&op->dev, qpti);
1379
1380 qpti_chain_add(qpti);
1381
1382 scsi_scan_host(host);
1383 nqptis++;
1384
1385 return 0;
1386
1387 fail_unmap_queues:
1388 #define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN)
1389 dma_free_coherent(&op->dev,
1390 QSIZE(RES_QUEUE_LEN),
1391 qpti->res_cpu, qpti->res_dvma);
1392 dma_free_coherent(&op->dev,
1393 QSIZE(QLOGICPTI_REQ_QUEUE_LEN),
1394 qpti->req_cpu, qpti->req_dvma);
1395 #undef QSIZE
1396
1397 fail_free_irq:
1398 free_irq(qpti->irq, qpti);
1399
1400 fail_unmap_regs:
1401 of_iounmap(&op->resource[0], qpti->qregs,
1402 resource_size(&op->resource[0]));
1403 if (qpti->is_pti)
1404 of_iounmap(&op->resource[0], qpti->sreg,
1405 sizeof(unsigned char));
1406
1407 fail_unlink:
1408 scsi_host_put(host);
1409
1410 return -ENODEV;
1411 }
1412
qpti_sbus_remove(struct platform_device * op)1413 static int qpti_sbus_remove(struct platform_device *op)
1414 {
1415 struct qlogicpti *qpti = dev_get_drvdata(&op->dev);
1416
1417 qpti_chain_del(qpti);
1418
1419 scsi_remove_host(qpti->qhost);
1420
1421 /* Shut up the card. */
1422 sbus_writew(0, qpti->qregs + SBUS_CTRL);
1423
1424 /* Free IRQ handler and unmap Qlogic,ISP and PTI status regs. */
1425 free_irq(qpti->irq, qpti);
1426
1427 #define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN)
1428 dma_free_coherent(&op->dev,
1429 QSIZE(RES_QUEUE_LEN),
1430 qpti->res_cpu, qpti->res_dvma);
1431 dma_free_coherent(&op->dev,
1432 QSIZE(QLOGICPTI_REQ_QUEUE_LEN),
1433 qpti->req_cpu, qpti->req_dvma);
1434 #undef QSIZE
1435
1436 of_iounmap(&op->resource[0], qpti->qregs,
1437 resource_size(&op->resource[0]));
1438 if (qpti->is_pti)
1439 of_iounmap(&op->resource[0], qpti->sreg, sizeof(unsigned char));
1440
1441 scsi_host_put(qpti->qhost);
1442
1443 return 0;
1444 }
1445
1446 static const struct of_device_id qpti_match[] = {
1447 {
1448 .name = "ptisp",
1449 },
1450 {
1451 .name = "PTI,ptisp",
1452 },
1453 {
1454 .name = "QLGC,isp",
1455 },
1456 {
1457 .name = "SUNW,isp",
1458 },
1459 {},
1460 };
1461 MODULE_DEVICE_TABLE(of, qpti_match);
1462
1463 static struct platform_driver qpti_sbus_driver = {
1464 .driver = {
1465 .name = "qpti",
1466 .of_match_table = qpti_match,
1467 },
1468 .probe = qpti_sbus_probe,
1469 .remove = qpti_sbus_remove,
1470 };
1471
qpti_init(void)1472 static int __init qpti_init(void)
1473 {
1474 return platform_driver_register(&qpti_sbus_driver);
1475 }
1476
qpti_exit(void)1477 static void __exit qpti_exit(void)
1478 {
1479 platform_driver_unregister(&qpti_sbus_driver);
1480 }
1481
1482 MODULE_DESCRIPTION("QlogicISP SBUS driver");
1483 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
1484 MODULE_LICENSE("GPL");
1485 MODULE_VERSION("2.1");
1486 MODULE_FIRMWARE("qlogic/isp1000.bin");
1487
1488 module_init(qpti_init);
1489 module_exit(qpti_exit);
1490