1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2014 QLogic Corporation
5 */
6
7 /*
8 * Table for showing the current message id in use for particular level
9 * Change this table for addition of log/debug messages.
10 * ----------------------------------------------------------------------
11 * | Level | Last Value Used | Holes |
12 * ----------------------------------------------------------------------
13 * | Module Init and Probe | 0x0199 | |
14 * | Mailbox commands | 0x1206 | 0x11a5-0x11ff |
15 * | Device Discovery | 0x2134 | 0x210e-0x2115 |
16 * | | | 0x211c-0x2128 |
17 * | | | 0x212c-0x2134 |
18 * | Queue Command and IO tracing | 0x3074 | 0x300b |
19 * | | | 0x3027-0x3028 |
20 * | | | 0x303d-0x3041 |
21 * | | | 0x302e,0x3033 |
22 * | | | 0x3036,0x3038 |
23 * | | | 0x303a |
24 * | DPC Thread | 0x4023 | 0x4002,0x4013 |
25 * | Async Events | 0x509c | |
26 * | Timer Routines | 0x6012 | |
27 * | User Space Interactions | 0x70e3 | 0x7018,0x702e |
28 * | | | 0x7020,0x7024 |
29 * | | | 0x7039,0x7045 |
30 * | | | 0x7073-0x7075 |
31 * | | | 0x70a5-0x70a6 |
32 * | | | 0x70a8,0x70ab |
33 * | | | 0x70ad-0x70ae |
34 * | | | 0x70d0-0x70d6 |
35 * | | | 0x70d7-0x70db |
36 * | Task Management | 0x8042 | 0x8000 |
37 * | | | 0x8019 |
38 * | | | 0x8025,0x8026 |
39 * | | | 0x8031,0x8032 |
40 * | | | 0x8039,0x803c |
41 * | AER/EEH | 0x9011 | |
42 * | Virtual Port | 0xa007 | |
43 * | ISP82XX Specific | 0xb157 | 0xb002,0xb024 |
44 * | | | 0xb09e,0xb0ae |
45 * | | | 0xb0c3,0xb0c6 |
46 * | | | 0xb0e0-0xb0ef |
47 * | | | 0xb085,0xb0dc |
48 * | | | 0xb107,0xb108 |
49 * | | | 0xb111,0xb11e |
50 * | | | 0xb12c,0xb12d |
51 * | | | 0xb13a,0xb142 |
52 * | | | 0xb13c-0xb140 |
53 * | | | 0xb149 |
54 * | MultiQ | 0xc010 | |
55 * | Misc | 0xd303 | 0xd031-0xd0ff |
56 * | | | 0xd101-0xd1fe |
57 * | | | 0xd214-0xd2fe |
58 * | Target Mode | 0xe081 | |
59 * | Target Mode Management | 0xf09b | 0xf002 |
60 * | | | 0xf046-0xf049 |
61 * | Target Mode Task Management | 0x1000d | |
62 * ----------------------------------------------------------------------
63 */
64
65 #include "qla_def.h"
66
67 #include <linux/delay.h>
68 #define CREATE_TRACE_POINTS
69 #include <trace/events/qla.h>
70
71 static uint32_t ql_dbg_offset = 0x800;
72
73 static inline void
qla2xxx_prep_dump(struct qla_hw_data * ha,struct qla2xxx_fw_dump * fw_dump)74 qla2xxx_prep_dump(struct qla_hw_data *ha, struct qla2xxx_fw_dump *fw_dump)
75 {
76 fw_dump->fw_major_version = htonl(ha->fw_major_version);
77 fw_dump->fw_minor_version = htonl(ha->fw_minor_version);
78 fw_dump->fw_subminor_version = htonl(ha->fw_subminor_version);
79 fw_dump->fw_attributes = htonl(ha->fw_attributes);
80
81 fw_dump->vendor = htonl(ha->pdev->vendor);
82 fw_dump->device = htonl(ha->pdev->device);
83 fw_dump->subsystem_vendor = htonl(ha->pdev->subsystem_vendor);
84 fw_dump->subsystem_device = htonl(ha->pdev->subsystem_device);
85 }
86
87 static inline void *
qla2xxx_copy_queues(struct qla_hw_data * ha,void * ptr)88 qla2xxx_copy_queues(struct qla_hw_data *ha, void *ptr)
89 {
90 struct req_que *req = ha->req_q_map[0];
91 struct rsp_que *rsp = ha->rsp_q_map[0];
92 /* Request queue. */
93 memcpy(ptr, req->ring, req->length *
94 sizeof(request_t));
95
96 /* Response queue. */
97 ptr += req->length * sizeof(request_t);
98 memcpy(ptr, rsp->ring, rsp->length *
99 sizeof(response_t));
100
101 return ptr + (rsp->length * sizeof(response_t));
102 }
103
104 int
qla27xx_dump_mpi_ram(struct qla_hw_data * ha,uint32_t addr,uint32_t * ram,uint32_t ram_dwords,void ** nxt)105 qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
106 uint32_t ram_dwords, void **nxt)
107 {
108 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
109 dma_addr_t dump_dma = ha->gid_list_dma;
110 uint32_t *chunk = (uint32_t *)ha->gid_list;
111 uint32_t dwords = qla2x00_gid_list_size(ha) / 4;
112 uint32_t stat;
113 ulong i, j, timer = 6000000;
114 int rval = QLA_FUNCTION_FAILED;
115 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
116
117 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
118
119 if (qla_pci_disconnected(vha, reg))
120 return rval;
121
122 for (i = 0; i < ram_dwords; i += dwords, addr += dwords) {
123 if (i + dwords > ram_dwords)
124 dwords = ram_dwords - i;
125
126 wrt_reg_word(®->mailbox0, MBC_LOAD_DUMP_MPI_RAM);
127 wrt_reg_word(®->mailbox1, LSW(addr));
128 wrt_reg_word(®->mailbox8, MSW(addr));
129
130 wrt_reg_word(®->mailbox2, MSW(LSD(dump_dma)));
131 wrt_reg_word(®->mailbox3, LSW(LSD(dump_dma)));
132 wrt_reg_word(®->mailbox6, MSW(MSD(dump_dma)));
133 wrt_reg_word(®->mailbox7, LSW(MSD(dump_dma)));
134
135 wrt_reg_word(®->mailbox4, MSW(dwords));
136 wrt_reg_word(®->mailbox5, LSW(dwords));
137
138 wrt_reg_word(®->mailbox9, 0);
139 wrt_reg_dword(®->hccr, HCCRX_SET_HOST_INT);
140
141 ha->flags.mbox_int = 0;
142 while (timer--) {
143 udelay(5);
144
145 if (qla_pci_disconnected(vha, reg))
146 return rval;
147
148 stat = rd_reg_dword(®->host_status);
149 /* Check for pending interrupts. */
150 if (!(stat & HSRX_RISC_INT))
151 continue;
152
153 stat &= 0xff;
154 if (stat != 0x1 && stat != 0x2 &&
155 stat != 0x10 && stat != 0x11) {
156
157 /* Clear this intr; it wasn't a mailbox intr */
158 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT);
159 rd_reg_dword(®->hccr);
160 continue;
161 }
162
163 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
164 rval = rd_reg_word(®->mailbox0) & MBS_MASK;
165 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT);
166 rd_reg_dword(®->hccr);
167 break;
168 }
169 ha->flags.mbox_int = 1;
170 *nxt = ram + i;
171
172 if (!test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
173 /* no interrupt, timed out*/
174 return rval;
175 }
176 if (rval) {
177 /* error completion status */
178 return rval;
179 }
180 for (j = 0; j < dwords; j++) {
181 ram[i + j] =
182 (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ?
183 chunk[j] : swab32(chunk[j]);
184 }
185 }
186
187 *nxt = ram + i;
188 return QLA_SUCCESS;
189 }
190
191 int
qla24xx_dump_ram(struct qla_hw_data * ha,uint32_t addr,__be32 * ram,uint32_t ram_dwords,void ** nxt)192 qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, __be32 *ram,
193 uint32_t ram_dwords, void **nxt)
194 {
195 int rval = QLA_FUNCTION_FAILED;
196 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
197 dma_addr_t dump_dma = ha->gid_list_dma;
198 uint32_t *chunk = (uint32_t *)ha->gid_list;
199 uint32_t dwords = qla2x00_gid_list_size(ha) / 4;
200 uint32_t stat;
201 ulong i, j, timer = 6000000;
202 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
203
204 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
205
206 if (qla_pci_disconnected(vha, reg))
207 return rval;
208
209 for (i = 0; i < ram_dwords; i += dwords, addr += dwords) {
210 if (i + dwords > ram_dwords)
211 dwords = ram_dwords - i;
212
213 wrt_reg_word(®->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED);
214 wrt_reg_word(®->mailbox1, LSW(addr));
215 wrt_reg_word(®->mailbox8, MSW(addr));
216 wrt_reg_word(®->mailbox10, 0);
217
218 wrt_reg_word(®->mailbox2, MSW(LSD(dump_dma)));
219 wrt_reg_word(®->mailbox3, LSW(LSD(dump_dma)));
220 wrt_reg_word(®->mailbox6, MSW(MSD(dump_dma)));
221 wrt_reg_word(®->mailbox7, LSW(MSD(dump_dma)));
222
223 wrt_reg_word(®->mailbox4, MSW(dwords));
224 wrt_reg_word(®->mailbox5, LSW(dwords));
225 wrt_reg_dword(®->hccr, HCCRX_SET_HOST_INT);
226
227 ha->flags.mbox_int = 0;
228 while (timer--) {
229 udelay(5);
230 if (qla_pci_disconnected(vha, reg))
231 return rval;
232
233 stat = rd_reg_dword(®->host_status);
234 /* Check for pending interrupts. */
235 if (!(stat & HSRX_RISC_INT))
236 continue;
237
238 stat &= 0xff;
239 if (stat != 0x1 && stat != 0x2 &&
240 stat != 0x10 && stat != 0x11) {
241 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT);
242 rd_reg_dword(®->hccr);
243 continue;
244 }
245
246 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
247 rval = rd_reg_word(®->mailbox0) & MBS_MASK;
248 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT);
249 rd_reg_dword(®->hccr);
250 break;
251 }
252 ha->flags.mbox_int = 1;
253 *nxt = ram + i;
254
255 if (!test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
256 /* no interrupt, timed out*/
257 return rval;
258 }
259 if (rval) {
260 /* error completion status */
261 return rval;
262 }
263 for (j = 0; j < dwords; j++) {
264 ram[i + j] = (__force __be32)
265 ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) ?
266 chunk[j] : swab32(chunk[j]));
267 }
268 }
269
270 *nxt = ram + i;
271 return QLA_SUCCESS;
272 }
273
274 static int
qla24xx_dump_memory(struct qla_hw_data * ha,__be32 * code_ram,uint32_t cram_size,void ** nxt)275 qla24xx_dump_memory(struct qla_hw_data *ha, __be32 *code_ram,
276 uint32_t cram_size, void **nxt)
277 {
278 int rval;
279
280 /* Code RAM. */
281 rval = qla24xx_dump_ram(ha, 0x20000, code_ram, cram_size / 4, nxt);
282 if (rval != QLA_SUCCESS)
283 return rval;
284
285 set_bit(RISC_SRAM_DUMP_CMPL, &ha->fw_dump_cap_flags);
286
287 /* External Memory. */
288 rval = qla24xx_dump_ram(ha, 0x100000, *nxt,
289 ha->fw_memory_size - 0x100000 + 1, nxt);
290 if (rval == QLA_SUCCESS)
291 set_bit(RISC_EXT_MEM_DUMP_CMPL, &ha->fw_dump_cap_flags);
292
293 return rval;
294 }
295
296 static __be32 *
qla24xx_read_window(struct device_reg_24xx __iomem * reg,uint32_t iobase,uint32_t count,__be32 * buf)297 qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase,
298 uint32_t count, __be32 *buf)
299 {
300 __le32 __iomem *dmp_reg;
301
302 wrt_reg_dword(®->iobase_addr, iobase);
303 dmp_reg = ®->iobase_window;
304 for ( ; count--; dmp_reg++)
305 *buf++ = htonl(rd_reg_dword(dmp_reg));
306
307 return buf;
308 }
309
310 void
qla24xx_pause_risc(struct device_reg_24xx __iomem * reg,struct qla_hw_data * ha)311 qla24xx_pause_risc(struct device_reg_24xx __iomem *reg, struct qla_hw_data *ha)
312 {
313 wrt_reg_dword(®->hccr, HCCRX_SET_RISC_PAUSE);
314
315 /* 100 usec delay is sufficient enough for hardware to pause RISC */
316 udelay(100);
317 if (rd_reg_dword(®->host_status) & HSRX_RISC_PAUSED)
318 set_bit(RISC_PAUSE_CMPL, &ha->fw_dump_cap_flags);
319 }
320
321 int
qla24xx_soft_reset(struct qla_hw_data * ha)322 qla24xx_soft_reset(struct qla_hw_data *ha)
323 {
324 int rval = QLA_SUCCESS;
325 uint32_t cnt;
326 uint16_t wd;
327 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
328
329 /*
330 * Reset RISC. The delay is dependent on system architecture.
331 * Driver can proceed with the reset sequence after waiting
332 * for a timeout period.
333 */
334 wrt_reg_dword(®->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
335 for (cnt = 0; cnt < 30000; cnt++) {
336 if ((rd_reg_dword(®->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
337 break;
338
339 udelay(10);
340 }
341 if (!(rd_reg_dword(®->ctrl_status) & CSRX_DMA_ACTIVE))
342 set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
343
344 wrt_reg_dword(®->ctrl_status,
345 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
346 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
347
348 udelay(100);
349
350 /* Wait for soft-reset to complete. */
351 for (cnt = 0; cnt < 30000; cnt++) {
352 if ((rd_reg_dword(®->ctrl_status) &
353 CSRX_ISP_SOFT_RESET) == 0)
354 break;
355
356 udelay(10);
357 }
358 if (!(rd_reg_dword(®->ctrl_status) & CSRX_ISP_SOFT_RESET))
359 set_bit(ISP_RESET_CMPL, &ha->fw_dump_cap_flags);
360
361 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_RESET);
362 rd_reg_dword(®->hccr); /* PCI Posting. */
363
364 for (cnt = 10000; rd_reg_word(®->mailbox0) != 0 &&
365 rval == QLA_SUCCESS; cnt--) {
366 if (cnt)
367 udelay(10);
368 else
369 rval = QLA_FUNCTION_TIMEOUT;
370 }
371 if (rval == QLA_SUCCESS)
372 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
373
374 return rval;
375 }
376
377 static int
qla2xxx_dump_ram(struct qla_hw_data * ha,uint32_t addr,__be16 * ram,uint32_t ram_words,void ** nxt)378 qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, __be16 *ram,
379 uint32_t ram_words, void **nxt)
380 {
381 int rval;
382 uint32_t cnt, stat, timer, words, idx;
383 uint16_t mb0;
384 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
385 dma_addr_t dump_dma = ha->gid_list_dma;
386 __le16 *dump = (__force __le16 *)ha->gid_list;
387
388 rval = QLA_SUCCESS;
389 mb0 = 0;
390
391 WRT_MAILBOX_REG(ha, reg, 0, MBC_DUMP_RISC_RAM_EXTENDED);
392 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
393
394 words = qla2x00_gid_list_size(ha) / 2;
395 for (cnt = 0; cnt < ram_words && rval == QLA_SUCCESS;
396 cnt += words, addr += words) {
397 if (cnt + words > ram_words)
398 words = ram_words - cnt;
399
400 WRT_MAILBOX_REG(ha, reg, 1, LSW(addr));
401 WRT_MAILBOX_REG(ha, reg, 8, MSW(addr));
402
403 WRT_MAILBOX_REG(ha, reg, 2, MSW(dump_dma));
404 WRT_MAILBOX_REG(ha, reg, 3, LSW(dump_dma));
405 WRT_MAILBOX_REG(ha, reg, 6, MSW(MSD(dump_dma)));
406 WRT_MAILBOX_REG(ha, reg, 7, LSW(MSD(dump_dma)));
407
408 WRT_MAILBOX_REG(ha, reg, 4, words);
409 wrt_reg_word(®->hccr, HCCR_SET_HOST_INT);
410
411 for (timer = 6000000; timer; timer--) {
412 /* Check for pending interrupts. */
413 stat = rd_reg_dword(®->u.isp2300.host_status);
414 if (stat & HSR_RISC_INT) {
415 stat &= 0xff;
416
417 if (stat == 0x1 || stat == 0x2) {
418 set_bit(MBX_INTERRUPT,
419 &ha->mbx_cmd_flags);
420
421 mb0 = RD_MAILBOX_REG(ha, reg, 0);
422
423 /* Release mailbox registers. */
424 wrt_reg_word(®->semaphore, 0);
425 wrt_reg_word(®->hccr,
426 HCCR_CLR_RISC_INT);
427 rd_reg_word(®->hccr);
428 break;
429 } else if (stat == 0x10 || stat == 0x11) {
430 set_bit(MBX_INTERRUPT,
431 &ha->mbx_cmd_flags);
432
433 mb0 = RD_MAILBOX_REG(ha, reg, 0);
434
435 wrt_reg_word(®->hccr,
436 HCCR_CLR_RISC_INT);
437 rd_reg_word(®->hccr);
438 break;
439 }
440
441 /* clear this intr; it wasn't a mailbox intr */
442 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT);
443 rd_reg_word(®->hccr);
444 }
445 udelay(5);
446 }
447
448 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
449 rval = mb0 & MBS_MASK;
450 for (idx = 0; idx < words; idx++)
451 ram[cnt + idx] =
452 cpu_to_be16(le16_to_cpu(dump[idx]));
453 } else {
454 rval = QLA_FUNCTION_FAILED;
455 }
456 }
457
458 *nxt = rval == QLA_SUCCESS ? &ram[cnt] : NULL;
459 return rval;
460 }
461
462 static inline void
qla2xxx_read_window(struct device_reg_2xxx __iomem * reg,uint32_t count,__be16 * buf)463 qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count,
464 __be16 *buf)
465 {
466 __le16 __iomem *dmp_reg = ®->u.isp2300.fb_cmd;
467
468 for ( ; count--; dmp_reg++)
469 *buf++ = htons(rd_reg_word(dmp_reg));
470 }
471
472 static inline void *
qla24xx_copy_eft(struct qla_hw_data * ha,void * ptr)473 qla24xx_copy_eft(struct qla_hw_data *ha, void *ptr)
474 {
475 if (!ha->eft)
476 return ptr;
477
478 memcpy(ptr, ha->eft, ntohl(ha->fw_dump->eft_size));
479 return ptr + ntohl(ha->fw_dump->eft_size);
480 }
481
482 static inline void *
qla25xx_copy_fce(struct qla_hw_data * ha,void * ptr,__be32 ** last_chain)483 qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, __be32 **last_chain)
484 {
485 uint32_t cnt;
486 __be32 *iter_reg;
487 struct qla2xxx_fce_chain *fcec = ptr;
488
489 if (!ha->fce)
490 return ptr;
491
492 *last_chain = &fcec->type;
493 fcec->type = htonl(DUMP_CHAIN_FCE);
494 fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) +
495 fce_calc_size(ha->fce_bufs));
496 fcec->size = htonl(fce_calc_size(ha->fce_bufs));
497 fcec->addr_l = htonl(LSD(ha->fce_dma));
498 fcec->addr_h = htonl(MSD(ha->fce_dma));
499
500 iter_reg = fcec->eregs;
501 for (cnt = 0; cnt < 8; cnt++)
502 *iter_reg++ = htonl(ha->fce_mb[cnt]);
503
504 memcpy(iter_reg, ha->fce, ntohl(fcec->size));
505
506 return (char *)iter_reg + ntohl(fcec->size);
507 }
508
509 static inline void *
qla25xx_copy_exlogin(struct qla_hw_data * ha,void * ptr,__be32 ** last_chain)510 qla25xx_copy_exlogin(struct qla_hw_data *ha, void *ptr, __be32 **last_chain)
511 {
512 struct qla2xxx_offld_chain *c = ptr;
513
514 if (!ha->exlogin_buf)
515 return ptr;
516
517 *last_chain = &c->type;
518
519 c->type = cpu_to_be32(DUMP_CHAIN_EXLOGIN);
520 c->chain_size = cpu_to_be32(sizeof(struct qla2xxx_offld_chain) +
521 ha->exlogin_size);
522 c->size = cpu_to_be32(ha->exlogin_size);
523 c->addr = cpu_to_be64(ha->exlogin_buf_dma);
524
525 ptr += sizeof(struct qla2xxx_offld_chain);
526 memcpy(ptr, ha->exlogin_buf, ha->exlogin_size);
527
528 return (char *)ptr + be32_to_cpu(c->size);
529 }
530
531 static inline void *
qla81xx_copy_exchoffld(struct qla_hw_data * ha,void * ptr,__be32 ** last_chain)532 qla81xx_copy_exchoffld(struct qla_hw_data *ha, void *ptr, __be32 **last_chain)
533 {
534 struct qla2xxx_offld_chain *c = ptr;
535
536 if (!ha->exchoffld_buf)
537 return ptr;
538
539 *last_chain = &c->type;
540
541 c->type = cpu_to_be32(DUMP_CHAIN_EXCHG);
542 c->chain_size = cpu_to_be32(sizeof(struct qla2xxx_offld_chain) +
543 ha->exchoffld_size);
544 c->size = cpu_to_be32(ha->exchoffld_size);
545 c->addr = cpu_to_be64(ha->exchoffld_buf_dma);
546
547 ptr += sizeof(struct qla2xxx_offld_chain);
548 memcpy(ptr, ha->exchoffld_buf, ha->exchoffld_size);
549
550 return (char *)ptr + be32_to_cpu(c->size);
551 }
552
553 static inline void *
qla2xxx_copy_atioqueues(struct qla_hw_data * ha,void * ptr,__be32 ** last_chain)554 qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr,
555 __be32 **last_chain)
556 {
557 struct qla2xxx_mqueue_chain *q;
558 struct qla2xxx_mqueue_header *qh;
559 uint32_t num_queues;
560 int que;
561 struct {
562 int length;
563 void *ring;
564 } aq, *aqp;
565
566 if (!ha->tgt.atio_ring)
567 return ptr;
568
569 num_queues = 1;
570 aqp = &aq;
571 aqp->length = ha->tgt.atio_q_length;
572 aqp->ring = ha->tgt.atio_ring;
573
574 for (que = 0; que < num_queues; que++) {
575 /* aqp = ha->atio_q_map[que]; */
576 q = ptr;
577 *last_chain = &q->type;
578 q->type = htonl(DUMP_CHAIN_QUEUE);
579 q->chain_size = htonl(
580 sizeof(struct qla2xxx_mqueue_chain) +
581 sizeof(struct qla2xxx_mqueue_header) +
582 (aqp->length * sizeof(request_t)));
583 ptr += sizeof(struct qla2xxx_mqueue_chain);
584
585 /* Add header. */
586 qh = ptr;
587 qh->queue = htonl(TYPE_ATIO_QUEUE);
588 qh->number = htonl(que);
589 qh->size = htonl(aqp->length * sizeof(request_t));
590 ptr += sizeof(struct qla2xxx_mqueue_header);
591
592 /* Add data. */
593 memcpy(ptr, aqp->ring, aqp->length * sizeof(request_t));
594
595 ptr += aqp->length * sizeof(request_t);
596 }
597
598 return ptr;
599 }
600
601 static inline void *
qla25xx_copy_mqueues(struct qla_hw_data * ha,void * ptr,__be32 ** last_chain)602 qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, __be32 **last_chain)
603 {
604 struct qla2xxx_mqueue_chain *q;
605 struct qla2xxx_mqueue_header *qh;
606 struct req_que *req;
607 struct rsp_que *rsp;
608 int que;
609
610 if (!ha->mqenable)
611 return ptr;
612
613 /* Request queues */
614 for (que = 1; que < ha->max_req_queues; que++) {
615 req = ha->req_q_map[que];
616 if (!req)
617 break;
618
619 /* Add chain. */
620 q = ptr;
621 *last_chain = &q->type;
622 q->type = htonl(DUMP_CHAIN_QUEUE);
623 q->chain_size = htonl(
624 sizeof(struct qla2xxx_mqueue_chain) +
625 sizeof(struct qla2xxx_mqueue_header) +
626 (req->length * sizeof(request_t)));
627 ptr += sizeof(struct qla2xxx_mqueue_chain);
628
629 /* Add header. */
630 qh = ptr;
631 qh->queue = htonl(TYPE_REQUEST_QUEUE);
632 qh->number = htonl(que);
633 qh->size = htonl(req->length * sizeof(request_t));
634 ptr += sizeof(struct qla2xxx_mqueue_header);
635
636 /* Add data. */
637 memcpy(ptr, req->ring, req->length * sizeof(request_t));
638 ptr += req->length * sizeof(request_t);
639 }
640
641 /* Response queues */
642 for (que = 1; que < ha->max_rsp_queues; que++) {
643 rsp = ha->rsp_q_map[que];
644 if (!rsp)
645 break;
646
647 /* Add chain. */
648 q = ptr;
649 *last_chain = &q->type;
650 q->type = htonl(DUMP_CHAIN_QUEUE);
651 q->chain_size = htonl(
652 sizeof(struct qla2xxx_mqueue_chain) +
653 sizeof(struct qla2xxx_mqueue_header) +
654 (rsp->length * sizeof(response_t)));
655 ptr += sizeof(struct qla2xxx_mqueue_chain);
656
657 /* Add header. */
658 qh = ptr;
659 qh->queue = htonl(TYPE_RESPONSE_QUEUE);
660 qh->number = htonl(que);
661 qh->size = htonl(rsp->length * sizeof(response_t));
662 ptr += sizeof(struct qla2xxx_mqueue_header);
663
664 /* Add data. */
665 memcpy(ptr, rsp->ring, rsp->length * sizeof(response_t));
666 ptr += rsp->length * sizeof(response_t);
667 }
668
669 return ptr;
670 }
671
672 static inline void *
qla25xx_copy_mq(struct qla_hw_data * ha,void * ptr,__be32 ** last_chain)673 qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, __be32 **last_chain)
674 {
675 uint32_t cnt, que_idx;
676 uint8_t que_cnt;
677 struct qla2xxx_mq_chain *mq = ptr;
678 device_reg_t *reg;
679
680 if (!ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
681 IS_QLA28XX(ha))
682 return ptr;
683
684 mq = ptr;
685 *last_chain = &mq->type;
686 mq->type = htonl(DUMP_CHAIN_MQ);
687 mq->chain_size = htonl(sizeof(struct qla2xxx_mq_chain));
688
689 que_cnt = ha->max_req_queues > ha->max_rsp_queues ?
690 ha->max_req_queues : ha->max_rsp_queues;
691 mq->count = htonl(que_cnt);
692 for (cnt = 0; cnt < que_cnt; cnt++) {
693 reg = ISP_QUE_REG(ha, cnt);
694 que_idx = cnt * 4;
695 mq->qregs[que_idx] =
696 htonl(rd_reg_dword(®->isp25mq.req_q_in));
697 mq->qregs[que_idx+1] =
698 htonl(rd_reg_dword(®->isp25mq.req_q_out));
699 mq->qregs[que_idx+2] =
700 htonl(rd_reg_dword(®->isp25mq.rsp_q_in));
701 mq->qregs[que_idx+3] =
702 htonl(rd_reg_dword(®->isp25mq.rsp_q_out));
703 }
704
705 return ptr + sizeof(struct qla2xxx_mq_chain);
706 }
707
708 void
qla2xxx_dump_post_process(scsi_qla_host_t * vha,int rval)709 qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval)
710 {
711 struct qla_hw_data *ha = vha->hw;
712
713 if (rval != QLA_SUCCESS) {
714 ql_log(ql_log_warn, vha, 0xd000,
715 "Failed to dump firmware (%x), dump status flags (0x%lx).\n",
716 rval, ha->fw_dump_cap_flags);
717 ha->fw_dumped = false;
718 } else {
719 ql_log(ql_log_info, vha, 0xd001,
720 "Firmware dump saved to temp buffer (%ld/%p), dump status flags (0x%lx).\n",
721 vha->host_no, ha->fw_dump, ha->fw_dump_cap_flags);
722 ha->fw_dumped = true;
723 qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
724 }
725 }
726
qla2xxx_dump_fw(scsi_qla_host_t * vha)727 void qla2xxx_dump_fw(scsi_qla_host_t *vha)
728 {
729 unsigned long flags;
730
731 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
732 vha->hw->isp_ops->fw_dump(vha);
733 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
734 }
735
736 /**
737 * qla2300_fw_dump() - Dumps binary data from the 2300 firmware.
738 * @vha: HA context
739 */
740 void
qla2300_fw_dump(scsi_qla_host_t * vha)741 qla2300_fw_dump(scsi_qla_host_t *vha)
742 {
743 int rval;
744 uint32_t cnt;
745 struct qla_hw_data *ha = vha->hw;
746 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
747 __le16 __iomem *dmp_reg;
748 struct qla2300_fw_dump *fw;
749 void *nxt;
750 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
751
752 lockdep_assert_held(&ha->hardware_lock);
753
754 if (!ha->fw_dump) {
755 ql_log(ql_log_warn, vha, 0xd002,
756 "No buffer available for dump.\n");
757 return;
758 }
759
760 if (ha->fw_dumped) {
761 ql_log(ql_log_warn, vha, 0xd003,
762 "Firmware has been previously dumped (%p) "
763 "-- ignoring request.\n",
764 ha->fw_dump);
765 return;
766 }
767 fw = &ha->fw_dump->isp.isp23;
768 qla2xxx_prep_dump(ha, ha->fw_dump);
769
770 rval = QLA_SUCCESS;
771 fw->hccr = htons(rd_reg_word(®->hccr));
772
773 /* Pause RISC. */
774 wrt_reg_word(®->hccr, HCCR_PAUSE_RISC);
775 if (IS_QLA2300(ha)) {
776 for (cnt = 30000;
777 (rd_reg_word(®->hccr) & HCCR_RISC_PAUSE) == 0 &&
778 rval == QLA_SUCCESS; cnt--) {
779 if (cnt)
780 udelay(100);
781 else
782 rval = QLA_FUNCTION_TIMEOUT;
783 }
784 } else {
785 rd_reg_word(®->hccr); /* PCI Posting. */
786 udelay(10);
787 }
788
789 if (rval == QLA_SUCCESS) {
790 dmp_reg = ®->flash_address;
791 for (cnt = 0; cnt < ARRAY_SIZE(fw->pbiu_reg); cnt++, dmp_reg++)
792 fw->pbiu_reg[cnt] = htons(rd_reg_word(dmp_reg));
793
794 dmp_reg = ®->u.isp2300.req_q_in;
795 for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_host_reg);
796 cnt++, dmp_reg++)
797 fw->risc_host_reg[cnt] = htons(rd_reg_word(dmp_reg));
798
799 dmp_reg = ®->u.isp2300.mailbox0;
800 for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg);
801 cnt++, dmp_reg++)
802 fw->mailbox_reg[cnt] = htons(rd_reg_word(dmp_reg));
803
804 wrt_reg_word(®->ctrl_status, 0x40);
805 qla2xxx_read_window(reg, 32, fw->resp_dma_reg);
806
807 wrt_reg_word(®->ctrl_status, 0x50);
808 qla2xxx_read_window(reg, 48, fw->dma_reg);
809
810 wrt_reg_word(®->ctrl_status, 0x00);
811 dmp_reg = ®->risc_hw;
812 for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_hdw_reg);
813 cnt++, dmp_reg++)
814 fw->risc_hdw_reg[cnt] = htons(rd_reg_word(dmp_reg));
815
816 wrt_reg_word(®->pcr, 0x2000);
817 qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
818
819 wrt_reg_word(®->pcr, 0x2200);
820 qla2xxx_read_window(reg, 16, fw->risc_gp1_reg);
821
822 wrt_reg_word(®->pcr, 0x2400);
823 qla2xxx_read_window(reg, 16, fw->risc_gp2_reg);
824
825 wrt_reg_word(®->pcr, 0x2600);
826 qla2xxx_read_window(reg, 16, fw->risc_gp3_reg);
827
828 wrt_reg_word(®->pcr, 0x2800);
829 qla2xxx_read_window(reg, 16, fw->risc_gp4_reg);
830
831 wrt_reg_word(®->pcr, 0x2A00);
832 qla2xxx_read_window(reg, 16, fw->risc_gp5_reg);
833
834 wrt_reg_word(®->pcr, 0x2C00);
835 qla2xxx_read_window(reg, 16, fw->risc_gp6_reg);
836
837 wrt_reg_word(®->pcr, 0x2E00);
838 qla2xxx_read_window(reg, 16, fw->risc_gp7_reg);
839
840 wrt_reg_word(®->ctrl_status, 0x10);
841 qla2xxx_read_window(reg, 64, fw->frame_buf_hdw_reg);
842
843 wrt_reg_word(®->ctrl_status, 0x20);
844 qla2xxx_read_window(reg, 64, fw->fpm_b0_reg);
845
846 wrt_reg_word(®->ctrl_status, 0x30);
847 qla2xxx_read_window(reg, 64, fw->fpm_b1_reg);
848
849 /* Reset RISC. */
850 wrt_reg_word(®->ctrl_status, CSR_ISP_SOFT_RESET);
851 for (cnt = 0; cnt < 30000; cnt++) {
852 if ((rd_reg_word(®->ctrl_status) &
853 CSR_ISP_SOFT_RESET) == 0)
854 break;
855
856 udelay(10);
857 }
858 }
859
860 if (!IS_QLA2300(ha)) {
861 for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 &&
862 rval == QLA_SUCCESS; cnt--) {
863 if (cnt)
864 udelay(100);
865 else
866 rval = QLA_FUNCTION_TIMEOUT;
867 }
868 }
869
870 /* Get RISC SRAM. */
871 if (rval == QLA_SUCCESS)
872 rval = qla2xxx_dump_ram(ha, 0x800, fw->risc_ram,
873 ARRAY_SIZE(fw->risc_ram), &nxt);
874
875 /* Get stack SRAM. */
876 if (rval == QLA_SUCCESS)
877 rval = qla2xxx_dump_ram(ha, 0x10000, fw->stack_ram,
878 ARRAY_SIZE(fw->stack_ram), &nxt);
879
880 /* Get data SRAM. */
881 if (rval == QLA_SUCCESS)
882 rval = qla2xxx_dump_ram(ha, 0x11000, fw->data_ram,
883 ha->fw_memory_size - 0x11000 + 1, &nxt);
884
885 if (rval == QLA_SUCCESS)
886 qla2xxx_copy_queues(ha, nxt);
887
888 qla2xxx_dump_post_process(base_vha, rval);
889 }
890
891 /**
892 * qla2100_fw_dump() - Dumps binary data from the 2100/2200 firmware.
893 * @vha: HA context
894 */
895 void
qla2100_fw_dump(scsi_qla_host_t * vha)896 qla2100_fw_dump(scsi_qla_host_t *vha)
897 {
898 int rval;
899 uint32_t cnt, timer;
900 uint16_t risc_address = 0;
901 uint16_t mb0 = 0, mb2 = 0;
902 struct qla_hw_data *ha = vha->hw;
903 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
904 __le16 __iomem *dmp_reg;
905 struct qla2100_fw_dump *fw;
906 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
907
908 lockdep_assert_held(&ha->hardware_lock);
909
910 if (!ha->fw_dump) {
911 ql_log(ql_log_warn, vha, 0xd004,
912 "No buffer available for dump.\n");
913 return;
914 }
915
916 if (ha->fw_dumped) {
917 ql_log(ql_log_warn, vha, 0xd005,
918 "Firmware has been previously dumped (%p) "
919 "-- ignoring request.\n",
920 ha->fw_dump);
921 return;
922 }
923 fw = &ha->fw_dump->isp.isp21;
924 qla2xxx_prep_dump(ha, ha->fw_dump);
925
926 rval = QLA_SUCCESS;
927 fw->hccr = htons(rd_reg_word(®->hccr));
928
929 /* Pause RISC. */
930 wrt_reg_word(®->hccr, HCCR_PAUSE_RISC);
931 for (cnt = 30000; (rd_reg_word(®->hccr) & HCCR_RISC_PAUSE) == 0 &&
932 rval == QLA_SUCCESS; cnt--) {
933 if (cnt)
934 udelay(100);
935 else
936 rval = QLA_FUNCTION_TIMEOUT;
937 }
938 if (rval == QLA_SUCCESS) {
939 dmp_reg = ®->flash_address;
940 for (cnt = 0; cnt < ARRAY_SIZE(fw->pbiu_reg); cnt++, dmp_reg++)
941 fw->pbiu_reg[cnt] = htons(rd_reg_word(dmp_reg));
942
943 dmp_reg = ®->u.isp2100.mailbox0;
944 for (cnt = 0; cnt < ha->mbx_count; cnt++, dmp_reg++) {
945 if (cnt == 8)
946 dmp_reg = ®->u_end.isp2200.mailbox8;
947
948 fw->mailbox_reg[cnt] = htons(rd_reg_word(dmp_reg));
949 }
950
951 dmp_reg = ®->u.isp2100.unused_2[0];
952 for (cnt = 0; cnt < ARRAY_SIZE(fw->dma_reg); cnt++, dmp_reg++)
953 fw->dma_reg[cnt] = htons(rd_reg_word(dmp_reg));
954
955 wrt_reg_word(®->ctrl_status, 0x00);
956 dmp_reg = ®->risc_hw;
957 for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_hdw_reg); cnt++, dmp_reg++)
958 fw->risc_hdw_reg[cnt] = htons(rd_reg_word(dmp_reg));
959
960 wrt_reg_word(®->pcr, 0x2000);
961 qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
962
963 wrt_reg_word(®->pcr, 0x2100);
964 qla2xxx_read_window(reg, 16, fw->risc_gp1_reg);
965
966 wrt_reg_word(®->pcr, 0x2200);
967 qla2xxx_read_window(reg, 16, fw->risc_gp2_reg);
968
969 wrt_reg_word(®->pcr, 0x2300);
970 qla2xxx_read_window(reg, 16, fw->risc_gp3_reg);
971
972 wrt_reg_word(®->pcr, 0x2400);
973 qla2xxx_read_window(reg, 16, fw->risc_gp4_reg);
974
975 wrt_reg_word(®->pcr, 0x2500);
976 qla2xxx_read_window(reg, 16, fw->risc_gp5_reg);
977
978 wrt_reg_word(®->pcr, 0x2600);
979 qla2xxx_read_window(reg, 16, fw->risc_gp6_reg);
980
981 wrt_reg_word(®->pcr, 0x2700);
982 qla2xxx_read_window(reg, 16, fw->risc_gp7_reg);
983
984 wrt_reg_word(®->ctrl_status, 0x10);
985 qla2xxx_read_window(reg, 16, fw->frame_buf_hdw_reg);
986
987 wrt_reg_word(®->ctrl_status, 0x20);
988 qla2xxx_read_window(reg, 64, fw->fpm_b0_reg);
989
990 wrt_reg_word(®->ctrl_status, 0x30);
991 qla2xxx_read_window(reg, 64, fw->fpm_b1_reg);
992
993 /* Reset the ISP. */
994 wrt_reg_word(®->ctrl_status, CSR_ISP_SOFT_RESET);
995 }
996
997 for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 &&
998 rval == QLA_SUCCESS; cnt--) {
999 if (cnt)
1000 udelay(100);
1001 else
1002 rval = QLA_FUNCTION_TIMEOUT;
1003 }
1004
1005 /* Pause RISC. */
1006 if (rval == QLA_SUCCESS && (IS_QLA2200(ha) || (IS_QLA2100(ha) &&
1007 (rd_reg_word(®->mctr) & (BIT_1 | BIT_0)) != 0))) {
1008
1009 wrt_reg_word(®->hccr, HCCR_PAUSE_RISC);
1010 for (cnt = 30000;
1011 (rd_reg_word(®->hccr) & HCCR_RISC_PAUSE) == 0 &&
1012 rval == QLA_SUCCESS; cnt--) {
1013 if (cnt)
1014 udelay(100);
1015 else
1016 rval = QLA_FUNCTION_TIMEOUT;
1017 }
1018 if (rval == QLA_SUCCESS) {
1019 /* Set memory configuration and timing. */
1020 if (IS_QLA2100(ha))
1021 wrt_reg_word(®->mctr, 0xf1);
1022 else
1023 wrt_reg_word(®->mctr, 0xf2);
1024 rd_reg_word(®->mctr); /* PCI Posting. */
1025
1026 /* Release RISC. */
1027 wrt_reg_word(®->hccr, HCCR_RELEASE_RISC);
1028 }
1029 }
1030
1031 if (rval == QLA_SUCCESS) {
1032 /* Get RISC SRAM. */
1033 risc_address = 0x1000;
1034 WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_WORD);
1035 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
1036 }
1037 for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_ram) && rval == QLA_SUCCESS;
1038 cnt++, risc_address++) {
1039 WRT_MAILBOX_REG(ha, reg, 1, risc_address);
1040 wrt_reg_word(®->hccr, HCCR_SET_HOST_INT);
1041
1042 for (timer = 6000000; timer != 0; timer--) {
1043 /* Check for pending interrupts. */
1044 if (rd_reg_word(®->istatus) & ISR_RISC_INT) {
1045 if (rd_reg_word(®->semaphore) & BIT_0) {
1046 set_bit(MBX_INTERRUPT,
1047 &ha->mbx_cmd_flags);
1048
1049 mb0 = RD_MAILBOX_REG(ha, reg, 0);
1050 mb2 = RD_MAILBOX_REG(ha, reg, 2);
1051
1052 wrt_reg_word(®->semaphore, 0);
1053 wrt_reg_word(®->hccr,
1054 HCCR_CLR_RISC_INT);
1055 rd_reg_word(®->hccr);
1056 break;
1057 }
1058 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT);
1059 rd_reg_word(®->hccr);
1060 }
1061 udelay(5);
1062 }
1063
1064 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
1065 rval = mb0 & MBS_MASK;
1066 fw->risc_ram[cnt] = htons(mb2);
1067 } else {
1068 rval = QLA_FUNCTION_FAILED;
1069 }
1070 }
1071
1072 if (rval == QLA_SUCCESS)
1073 qla2xxx_copy_queues(ha, &fw->queue_dump[0]);
1074
1075 qla2xxx_dump_post_process(base_vha, rval);
1076 }
1077
1078 void
qla24xx_fw_dump(scsi_qla_host_t * vha)1079 qla24xx_fw_dump(scsi_qla_host_t *vha)
1080 {
1081 int rval;
1082 uint32_t cnt;
1083 struct qla_hw_data *ha = vha->hw;
1084 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1085 __le32 __iomem *dmp_reg;
1086 __be32 *iter_reg;
1087 __le16 __iomem *mbx_reg;
1088 struct qla24xx_fw_dump *fw;
1089 void *nxt;
1090 void *nxt_chain;
1091 __be32 *last_chain = NULL;
1092 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1093
1094 lockdep_assert_held(&ha->hardware_lock);
1095
1096 if (IS_P3P_TYPE(ha))
1097 return;
1098
1099 ha->fw_dump_cap_flags = 0;
1100
1101 if (!ha->fw_dump) {
1102 ql_log(ql_log_warn, vha, 0xd006,
1103 "No buffer available for dump.\n");
1104 return;
1105 }
1106
1107 if (ha->fw_dumped) {
1108 ql_log(ql_log_warn, vha, 0xd007,
1109 "Firmware has been previously dumped (%p) "
1110 "-- ignoring request.\n",
1111 ha->fw_dump);
1112 return;
1113 }
1114 QLA_FW_STOPPED(ha);
1115 fw = &ha->fw_dump->isp.isp24;
1116 qla2xxx_prep_dump(ha, ha->fw_dump);
1117
1118 fw->host_status = htonl(rd_reg_dword(®->host_status));
1119
1120 /*
1121 * Pause RISC. No need to track timeout, as resetting the chip
1122 * is the right approach incase of pause timeout
1123 */
1124 qla24xx_pause_risc(reg, ha);
1125
1126 /* Host interface registers. */
1127 dmp_reg = ®->flash_addr;
1128 for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++)
1129 fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg));
1130
1131 /* Disable interrupts. */
1132 wrt_reg_dword(®->ictrl, 0);
1133 rd_reg_dword(®->ictrl);
1134
1135 /* Shadow registers. */
1136 wrt_reg_dword(®->iobase_addr, 0x0F70);
1137 rd_reg_dword(®->iobase_addr);
1138 wrt_reg_dword(®->iobase_select, 0xB0000000);
1139 fw->shadow_reg[0] = htonl(rd_reg_dword(®->iobase_sdata));
1140
1141 wrt_reg_dword(®->iobase_select, 0xB0100000);
1142 fw->shadow_reg[1] = htonl(rd_reg_dword(®->iobase_sdata));
1143
1144 wrt_reg_dword(®->iobase_select, 0xB0200000);
1145 fw->shadow_reg[2] = htonl(rd_reg_dword(®->iobase_sdata));
1146
1147 wrt_reg_dword(®->iobase_select, 0xB0300000);
1148 fw->shadow_reg[3] = htonl(rd_reg_dword(®->iobase_sdata));
1149
1150 wrt_reg_dword(®->iobase_select, 0xB0400000);
1151 fw->shadow_reg[4] = htonl(rd_reg_dword(®->iobase_sdata));
1152
1153 wrt_reg_dword(®->iobase_select, 0xB0500000);
1154 fw->shadow_reg[5] = htonl(rd_reg_dword(®->iobase_sdata));
1155
1156 wrt_reg_dword(®->iobase_select, 0xB0600000);
1157 fw->shadow_reg[6] = htonl(rd_reg_dword(®->iobase_sdata));
1158
1159 /* Mailbox registers. */
1160 mbx_reg = ®->mailbox0;
1161 for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++)
1162 fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg));
1163
1164 /* Transfer sequence registers. */
1165 iter_reg = fw->xseq_gp_reg;
1166 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
1167 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
1168 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
1169 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
1170 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
1171 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
1172 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
1173 qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
1174
1175 qla24xx_read_window(reg, 0xBFE0, 16, fw->xseq_0_reg);
1176 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
1177
1178 /* Receive sequence registers. */
1179 iter_reg = fw->rseq_gp_reg;
1180 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
1181 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
1182 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
1183 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
1184 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
1185 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
1186 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
1187 qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
1188
1189 qla24xx_read_window(reg, 0xFFD0, 16, fw->rseq_0_reg);
1190 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
1191 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
1192
1193 /* Command DMA registers. */
1194 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
1195
1196 /* Queues. */
1197 iter_reg = fw->req0_dma_reg;
1198 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
1199 dmp_reg = ®->iobase_q;
1200 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1201 *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
1202
1203 iter_reg = fw->resp0_dma_reg;
1204 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
1205 dmp_reg = ®->iobase_q;
1206 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1207 *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
1208
1209 iter_reg = fw->req1_dma_reg;
1210 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
1211 dmp_reg = ®->iobase_q;
1212 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1213 *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
1214
1215 /* Transmit DMA registers. */
1216 iter_reg = fw->xmt0_dma_reg;
1217 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
1218 qla24xx_read_window(reg, 0x7610, 16, iter_reg);
1219
1220 iter_reg = fw->xmt1_dma_reg;
1221 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
1222 qla24xx_read_window(reg, 0x7630, 16, iter_reg);
1223
1224 iter_reg = fw->xmt2_dma_reg;
1225 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
1226 qla24xx_read_window(reg, 0x7650, 16, iter_reg);
1227
1228 iter_reg = fw->xmt3_dma_reg;
1229 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
1230 qla24xx_read_window(reg, 0x7670, 16, iter_reg);
1231
1232 iter_reg = fw->xmt4_dma_reg;
1233 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
1234 qla24xx_read_window(reg, 0x7690, 16, iter_reg);
1235
1236 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
1237
1238 /* Receive DMA registers. */
1239 iter_reg = fw->rcvt0_data_dma_reg;
1240 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
1241 qla24xx_read_window(reg, 0x7710, 16, iter_reg);
1242
1243 iter_reg = fw->rcvt1_data_dma_reg;
1244 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
1245 qla24xx_read_window(reg, 0x7730, 16, iter_reg);
1246
1247 /* RISC registers. */
1248 iter_reg = fw->risc_gp_reg;
1249 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
1250 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
1251 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
1252 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
1253 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
1254 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
1255 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
1256 qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
1257
1258 /* Local memory controller registers. */
1259 iter_reg = fw->lmc_reg;
1260 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
1261 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
1262 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
1263 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
1264 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
1265 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
1266 qla24xx_read_window(reg, 0x3060, 16, iter_reg);
1267
1268 /* Fibre Protocol Module registers. */
1269 iter_reg = fw->fpm_hdw_reg;
1270 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
1271 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
1272 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
1273 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
1274 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
1275 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
1276 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
1277 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
1278 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
1279 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
1280 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
1281 qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
1282
1283 /* Frame Buffer registers. */
1284 iter_reg = fw->fb_hdw_reg;
1285 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
1286 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
1287 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
1288 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
1289 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
1290 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
1291 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
1292 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
1293 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
1294 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
1295 qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1296
1297 rval = qla24xx_soft_reset(ha);
1298 if (rval != QLA_SUCCESS)
1299 goto qla24xx_fw_dump_failed_0;
1300
1301 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1302 &nxt);
1303 if (rval != QLA_SUCCESS)
1304 goto qla24xx_fw_dump_failed_0;
1305
1306 nxt = qla2xxx_copy_queues(ha, nxt);
1307
1308 qla24xx_copy_eft(ha, nxt);
1309
1310 nxt_chain = (void *)ha->fw_dump + ha->chain_offset;
1311 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1312 if (last_chain) {
1313 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
1314 *last_chain |= htonl(DUMP_CHAIN_LAST);
1315 }
1316
1317 /* Adjust valid length. */
1318 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
1319
1320 qla24xx_fw_dump_failed_0:
1321 qla2xxx_dump_post_process(base_vha, rval);
1322 }
1323
1324 void
qla25xx_fw_dump(scsi_qla_host_t * vha)1325 qla25xx_fw_dump(scsi_qla_host_t *vha)
1326 {
1327 int rval;
1328 uint32_t cnt;
1329 struct qla_hw_data *ha = vha->hw;
1330 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1331 __le32 __iomem *dmp_reg;
1332 __be32 *iter_reg;
1333 __le16 __iomem *mbx_reg;
1334 struct qla25xx_fw_dump *fw;
1335 void *nxt, *nxt_chain;
1336 __be32 *last_chain = NULL;
1337 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1338
1339 lockdep_assert_held(&ha->hardware_lock);
1340
1341 ha->fw_dump_cap_flags = 0;
1342
1343 if (!ha->fw_dump) {
1344 ql_log(ql_log_warn, vha, 0xd008,
1345 "No buffer available for dump.\n");
1346 return;
1347 }
1348
1349 if (ha->fw_dumped) {
1350 ql_log(ql_log_warn, vha, 0xd009,
1351 "Firmware has been previously dumped (%p) "
1352 "-- ignoring request.\n",
1353 ha->fw_dump);
1354 return;
1355 }
1356 QLA_FW_STOPPED(ha);
1357 fw = &ha->fw_dump->isp.isp25;
1358 qla2xxx_prep_dump(ha, ha->fw_dump);
1359 ha->fw_dump->version = htonl(2);
1360
1361 fw->host_status = htonl(rd_reg_dword(®->host_status));
1362
1363 /*
1364 * Pause RISC. No need to track timeout, as resetting the chip
1365 * is the right approach incase of pause timeout
1366 */
1367 qla24xx_pause_risc(reg, ha);
1368
1369 /* Host/Risc registers. */
1370 iter_reg = fw->host_risc_reg;
1371 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
1372 qla24xx_read_window(reg, 0x7010, 16, iter_reg);
1373
1374 /* PCIe registers. */
1375 wrt_reg_dword(®->iobase_addr, 0x7C00);
1376 rd_reg_dword(®->iobase_addr);
1377 wrt_reg_dword(®->iobase_window, 0x01);
1378 dmp_reg = ®->iobase_c4;
1379 fw->pcie_regs[0] = htonl(rd_reg_dword(dmp_reg));
1380 dmp_reg++;
1381 fw->pcie_regs[1] = htonl(rd_reg_dword(dmp_reg));
1382 dmp_reg++;
1383 fw->pcie_regs[2] = htonl(rd_reg_dword(dmp_reg));
1384 fw->pcie_regs[3] = htonl(rd_reg_dword(®->iobase_window));
1385
1386 wrt_reg_dword(®->iobase_window, 0x00);
1387 rd_reg_dword(®->iobase_window);
1388
1389 /* Host interface registers. */
1390 dmp_reg = ®->flash_addr;
1391 for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++)
1392 fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg));
1393
1394 /* Disable interrupts. */
1395 wrt_reg_dword(®->ictrl, 0);
1396 rd_reg_dword(®->ictrl);
1397
1398 /* Shadow registers. */
1399 wrt_reg_dword(®->iobase_addr, 0x0F70);
1400 rd_reg_dword(®->iobase_addr);
1401 wrt_reg_dword(®->iobase_select, 0xB0000000);
1402 fw->shadow_reg[0] = htonl(rd_reg_dword(®->iobase_sdata));
1403
1404 wrt_reg_dword(®->iobase_select, 0xB0100000);
1405 fw->shadow_reg[1] = htonl(rd_reg_dword(®->iobase_sdata));
1406
1407 wrt_reg_dword(®->iobase_select, 0xB0200000);
1408 fw->shadow_reg[2] = htonl(rd_reg_dword(®->iobase_sdata));
1409
1410 wrt_reg_dword(®->iobase_select, 0xB0300000);
1411 fw->shadow_reg[3] = htonl(rd_reg_dword(®->iobase_sdata));
1412
1413 wrt_reg_dword(®->iobase_select, 0xB0400000);
1414 fw->shadow_reg[4] = htonl(rd_reg_dword(®->iobase_sdata));
1415
1416 wrt_reg_dword(®->iobase_select, 0xB0500000);
1417 fw->shadow_reg[5] = htonl(rd_reg_dword(®->iobase_sdata));
1418
1419 wrt_reg_dword(®->iobase_select, 0xB0600000);
1420 fw->shadow_reg[6] = htonl(rd_reg_dword(®->iobase_sdata));
1421
1422 wrt_reg_dword(®->iobase_select, 0xB0700000);
1423 fw->shadow_reg[7] = htonl(rd_reg_dword(®->iobase_sdata));
1424
1425 wrt_reg_dword(®->iobase_select, 0xB0800000);
1426 fw->shadow_reg[8] = htonl(rd_reg_dword(®->iobase_sdata));
1427
1428 wrt_reg_dword(®->iobase_select, 0xB0900000);
1429 fw->shadow_reg[9] = htonl(rd_reg_dword(®->iobase_sdata));
1430
1431 wrt_reg_dword(®->iobase_select, 0xB0A00000);
1432 fw->shadow_reg[10] = htonl(rd_reg_dword(®->iobase_sdata));
1433
1434 /* RISC I/O register. */
1435 wrt_reg_dword(®->iobase_addr, 0x0010);
1436 fw->risc_io_reg = htonl(rd_reg_dword(®->iobase_window));
1437
1438 /* Mailbox registers. */
1439 mbx_reg = ®->mailbox0;
1440 for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++)
1441 fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg));
1442
1443 /* Transfer sequence registers. */
1444 iter_reg = fw->xseq_gp_reg;
1445 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
1446 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
1447 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
1448 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
1449 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
1450 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
1451 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
1452 qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
1453
1454 iter_reg = fw->xseq_0_reg;
1455 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
1456 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
1457 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
1458
1459 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
1460
1461 /* Receive sequence registers. */
1462 iter_reg = fw->rseq_gp_reg;
1463 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
1464 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
1465 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
1466 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
1467 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
1468 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
1469 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
1470 qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
1471
1472 iter_reg = fw->rseq_0_reg;
1473 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
1474 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
1475
1476 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
1477 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
1478
1479 /* Auxiliary sequence registers. */
1480 iter_reg = fw->aseq_gp_reg;
1481 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
1482 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
1483 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
1484 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
1485 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
1486 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
1487 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
1488 qla24xx_read_window(reg, 0xB070, 16, iter_reg);
1489
1490 iter_reg = fw->aseq_0_reg;
1491 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
1492 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
1493
1494 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
1495 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
1496
1497 /* Command DMA registers. */
1498 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
1499
1500 /* Queues. */
1501 iter_reg = fw->req0_dma_reg;
1502 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
1503 dmp_reg = ®->iobase_q;
1504 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1505 *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
1506
1507 iter_reg = fw->resp0_dma_reg;
1508 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
1509 dmp_reg = ®->iobase_q;
1510 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1511 *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
1512
1513 iter_reg = fw->req1_dma_reg;
1514 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
1515 dmp_reg = ®->iobase_q;
1516 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1517 *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
1518
1519 /* Transmit DMA registers. */
1520 iter_reg = fw->xmt0_dma_reg;
1521 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
1522 qla24xx_read_window(reg, 0x7610, 16, iter_reg);
1523
1524 iter_reg = fw->xmt1_dma_reg;
1525 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
1526 qla24xx_read_window(reg, 0x7630, 16, iter_reg);
1527
1528 iter_reg = fw->xmt2_dma_reg;
1529 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
1530 qla24xx_read_window(reg, 0x7650, 16, iter_reg);
1531
1532 iter_reg = fw->xmt3_dma_reg;
1533 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
1534 qla24xx_read_window(reg, 0x7670, 16, iter_reg);
1535
1536 iter_reg = fw->xmt4_dma_reg;
1537 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
1538 qla24xx_read_window(reg, 0x7690, 16, iter_reg);
1539
1540 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
1541
1542 /* Receive DMA registers. */
1543 iter_reg = fw->rcvt0_data_dma_reg;
1544 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
1545 qla24xx_read_window(reg, 0x7710, 16, iter_reg);
1546
1547 iter_reg = fw->rcvt1_data_dma_reg;
1548 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
1549 qla24xx_read_window(reg, 0x7730, 16, iter_reg);
1550
1551 /* RISC registers. */
1552 iter_reg = fw->risc_gp_reg;
1553 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
1554 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
1555 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
1556 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
1557 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
1558 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
1559 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
1560 qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
1561
1562 /* Local memory controller registers. */
1563 iter_reg = fw->lmc_reg;
1564 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
1565 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
1566 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
1567 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
1568 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
1569 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
1570 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
1571 qla24xx_read_window(reg, 0x3070, 16, iter_reg);
1572
1573 /* Fibre Protocol Module registers. */
1574 iter_reg = fw->fpm_hdw_reg;
1575 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
1576 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
1577 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
1578 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
1579 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
1580 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
1581 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
1582 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
1583 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
1584 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
1585 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
1586 qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
1587
1588 /* Frame Buffer registers. */
1589 iter_reg = fw->fb_hdw_reg;
1590 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
1591 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
1592 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
1593 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
1594 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
1595 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
1596 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
1597 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
1598 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
1599 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
1600 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1601 qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
1602
1603 /* Multi queue registers */
1604 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
1605 &last_chain);
1606
1607 rval = qla24xx_soft_reset(ha);
1608 if (rval != QLA_SUCCESS)
1609 goto qla25xx_fw_dump_failed_0;
1610
1611 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1612 &nxt);
1613 if (rval != QLA_SUCCESS)
1614 goto qla25xx_fw_dump_failed_0;
1615
1616 nxt = qla2xxx_copy_queues(ha, nxt);
1617
1618 qla24xx_copy_eft(ha, nxt);
1619
1620 /* Chain entries -- started with MQ. */
1621 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
1622 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
1623 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1624 nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain);
1625 if (last_chain) {
1626 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
1627 *last_chain |= htonl(DUMP_CHAIN_LAST);
1628 }
1629
1630 /* Adjust valid length. */
1631 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
1632
1633 qla25xx_fw_dump_failed_0:
1634 qla2xxx_dump_post_process(base_vha, rval);
1635 }
1636
1637 void
qla81xx_fw_dump(scsi_qla_host_t * vha)1638 qla81xx_fw_dump(scsi_qla_host_t *vha)
1639 {
1640 int rval;
1641 uint32_t cnt;
1642 struct qla_hw_data *ha = vha->hw;
1643 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1644 __le32 __iomem *dmp_reg;
1645 __be32 *iter_reg;
1646 __le16 __iomem *mbx_reg;
1647 struct qla81xx_fw_dump *fw;
1648 void *nxt, *nxt_chain;
1649 __be32 *last_chain = NULL;
1650 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1651
1652 lockdep_assert_held(&ha->hardware_lock);
1653
1654 ha->fw_dump_cap_flags = 0;
1655
1656 if (!ha->fw_dump) {
1657 ql_log(ql_log_warn, vha, 0xd00a,
1658 "No buffer available for dump.\n");
1659 return;
1660 }
1661
1662 if (ha->fw_dumped) {
1663 ql_log(ql_log_warn, vha, 0xd00b,
1664 "Firmware has been previously dumped (%p) "
1665 "-- ignoring request.\n",
1666 ha->fw_dump);
1667 return;
1668 }
1669 fw = &ha->fw_dump->isp.isp81;
1670 qla2xxx_prep_dump(ha, ha->fw_dump);
1671
1672 fw->host_status = htonl(rd_reg_dword(®->host_status));
1673
1674 /*
1675 * Pause RISC. No need to track timeout, as resetting the chip
1676 * is the right approach incase of pause timeout
1677 */
1678 qla24xx_pause_risc(reg, ha);
1679
1680 /* Host/Risc registers. */
1681 iter_reg = fw->host_risc_reg;
1682 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
1683 qla24xx_read_window(reg, 0x7010, 16, iter_reg);
1684
1685 /* PCIe registers. */
1686 wrt_reg_dword(®->iobase_addr, 0x7C00);
1687 rd_reg_dword(®->iobase_addr);
1688 wrt_reg_dword(®->iobase_window, 0x01);
1689 dmp_reg = ®->iobase_c4;
1690 fw->pcie_regs[0] = htonl(rd_reg_dword(dmp_reg));
1691 dmp_reg++;
1692 fw->pcie_regs[1] = htonl(rd_reg_dword(dmp_reg));
1693 dmp_reg++;
1694 fw->pcie_regs[2] = htonl(rd_reg_dword(dmp_reg));
1695 fw->pcie_regs[3] = htonl(rd_reg_dword(®->iobase_window));
1696
1697 wrt_reg_dword(®->iobase_window, 0x00);
1698 rd_reg_dword(®->iobase_window);
1699
1700 /* Host interface registers. */
1701 dmp_reg = ®->flash_addr;
1702 for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++)
1703 fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg));
1704
1705 /* Disable interrupts. */
1706 wrt_reg_dword(®->ictrl, 0);
1707 rd_reg_dword(®->ictrl);
1708
1709 /* Shadow registers. */
1710 wrt_reg_dword(®->iobase_addr, 0x0F70);
1711 rd_reg_dword(®->iobase_addr);
1712 wrt_reg_dword(®->iobase_select, 0xB0000000);
1713 fw->shadow_reg[0] = htonl(rd_reg_dword(®->iobase_sdata));
1714
1715 wrt_reg_dword(®->iobase_select, 0xB0100000);
1716 fw->shadow_reg[1] = htonl(rd_reg_dword(®->iobase_sdata));
1717
1718 wrt_reg_dword(®->iobase_select, 0xB0200000);
1719 fw->shadow_reg[2] = htonl(rd_reg_dword(®->iobase_sdata));
1720
1721 wrt_reg_dword(®->iobase_select, 0xB0300000);
1722 fw->shadow_reg[3] = htonl(rd_reg_dword(®->iobase_sdata));
1723
1724 wrt_reg_dword(®->iobase_select, 0xB0400000);
1725 fw->shadow_reg[4] = htonl(rd_reg_dword(®->iobase_sdata));
1726
1727 wrt_reg_dword(®->iobase_select, 0xB0500000);
1728 fw->shadow_reg[5] = htonl(rd_reg_dword(®->iobase_sdata));
1729
1730 wrt_reg_dword(®->iobase_select, 0xB0600000);
1731 fw->shadow_reg[6] = htonl(rd_reg_dword(®->iobase_sdata));
1732
1733 wrt_reg_dword(®->iobase_select, 0xB0700000);
1734 fw->shadow_reg[7] = htonl(rd_reg_dword(®->iobase_sdata));
1735
1736 wrt_reg_dword(®->iobase_select, 0xB0800000);
1737 fw->shadow_reg[8] = htonl(rd_reg_dword(®->iobase_sdata));
1738
1739 wrt_reg_dword(®->iobase_select, 0xB0900000);
1740 fw->shadow_reg[9] = htonl(rd_reg_dword(®->iobase_sdata));
1741
1742 wrt_reg_dword(®->iobase_select, 0xB0A00000);
1743 fw->shadow_reg[10] = htonl(rd_reg_dword(®->iobase_sdata));
1744
1745 /* RISC I/O register. */
1746 wrt_reg_dword(®->iobase_addr, 0x0010);
1747 fw->risc_io_reg = htonl(rd_reg_dword(®->iobase_window));
1748
1749 /* Mailbox registers. */
1750 mbx_reg = ®->mailbox0;
1751 for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++)
1752 fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg));
1753
1754 /* Transfer sequence registers. */
1755 iter_reg = fw->xseq_gp_reg;
1756 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
1757 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
1758 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
1759 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
1760 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
1761 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
1762 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
1763 qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
1764
1765 iter_reg = fw->xseq_0_reg;
1766 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
1767 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
1768 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
1769
1770 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
1771
1772 /* Receive sequence registers. */
1773 iter_reg = fw->rseq_gp_reg;
1774 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
1775 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
1776 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
1777 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
1778 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
1779 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
1780 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
1781 qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
1782
1783 iter_reg = fw->rseq_0_reg;
1784 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
1785 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
1786
1787 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
1788 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
1789
1790 /* Auxiliary sequence registers. */
1791 iter_reg = fw->aseq_gp_reg;
1792 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
1793 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
1794 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
1795 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
1796 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
1797 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
1798 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
1799 qla24xx_read_window(reg, 0xB070, 16, iter_reg);
1800
1801 iter_reg = fw->aseq_0_reg;
1802 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
1803 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
1804
1805 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
1806 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
1807
1808 /* Command DMA registers. */
1809 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
1810
1811 /* Queues. */
1812 iter_reg = fw->req0_dma_reg;
1813 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
1814 dmp_reg = ®->iobase_q;
1815 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1816 *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
1817
1818 iter_reg = fw->resp0_dma_reg;
1819 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
1820 dmp_reg = ®->iobase_q;
1821 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1822 *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
1823
1824 iter_reg = fw->req1_dma_reg;
1825 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
1826 dmp_reg = ®->iobase_q;
1827 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1828 *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
1829
1830 /* Transmit DMA registers. */
1831 iter_reg = fw->xmt0_dma_reg;
1832 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
1833 qla24xx_read_window(reg, 0x7610, 16, iter_reg);
1834
1835 iter_reg = fw->xmt1_dma_reg;
1836 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
1837 qla24xx_read_window(reg, 0x7630, 16, iter_reg);
1838
1839 iter_reg = fw->xmt2_dma_reg;
1840 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
1841 qla24xx_read_window(reg, 0x7650, 16, iter_reg);
1842
1843 iter_reg = fw->xmt3_dma_reg;
1844 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
1845 qla24xx_read_window(reg, 0x7670, 16, iter_reg);
1846
1847 iter_reg = fw->xmt4_dma_reg;
1848 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
1849 qla24xx_read_window(reg, 0x7690, 16, iter_reg);
1850
1851 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
1852
1853 /* Receive DMA registers. */
1854 iter_reg = fw->rcvt0_data_dma_reg;
1855 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
1856 qla24xx_read_window(reg, 0x7710, 16, iter_reg);
1857
1858 iter_reg = fw->rcvt1_data_dma_reg;
1859 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
1860 qla24xx_read_window(reg, 0x7730, 16, iter_reg);
1861
1862 /* RISC registers. */
1863 iter_reg = fw->risc_gp_reg;
1864 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
1865 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
1866 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
1867 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
1868 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
1869 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
1870 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
1871 qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
1872
1873 /* Local memory controller registers. */
1874 iter_reg = fw->lmc_reg;
1875 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
1876 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
1877 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
1878 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
1879 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
1880 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
1881 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
1882 qla24xx_read_window(reg, 0x3070, 16, iter_reg);
1883
1884 /* Fibre Protocol Module registers. */
1885 iter_reg = fw->fpm_hdw_reg;
1886 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
1887 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
1888 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
1889 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
1890 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
1891 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
1892 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
1893 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
1894 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
1895 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
1896 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
1897 iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
1898 iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg);
1899 qla24xx_read_window(reg, 0x40D0, 16, iter_reg);
1900
1901 /* Frame Buffer registers. */
1902 iter_reg = fw->fb_hdw_reg;
1903 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
1904 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
1905 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
1906 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
1907 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
1908 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
1909 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
1910 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
1911 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
1912 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
1913 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1914 iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg);
1915 qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
1916
1917 /* Multi queue registers */
1918 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
1919 &last_chain);
1920
1921 rval = qla24xx_soft_reset(ha);
1922 if (rval != QLA_SUCCESS)
1923 goto qla81xx_fw_dump_failed_0;
1924
1925 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1926 &nxt);
1927 if (rval != QLA_SUCCESS)
1928 goto qla81xx_fw_dump_failed_0;
1929
1930 nxt = qla2xxx_copy_queues(ha, nxt);
1931
1932 qla24xx_copy_eft(ha, nxt);
1933
1934 /* Chain entries -- started with MQ. */
1935 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
1936 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
1937 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1938 nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain);
1939 nxt_chain = qla81xx_copy_exchoffld(ha, nxt_chain, &last_chain);
1940 if (last_chain) {
1941 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
1942 *last_chain |= htonl(DUMP_CHAIN_LAST);
1943 }
1944
1945 /* Adjust valid length. */
1946 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
1947
1948 qla81xx_fw_dump_failed_0:
1949 qla2xxx_dump_post_process(base_vha, rval);
1950 }
1951
1952 void
qla83xx_fw_dump(scsi_qla_host_t * vha)1953 qla83xx_fw_dump(scsi_qla_host_t *vha)
1954 {
1955 int rval;
1956 uint32_t cnt;
1957 struct qla_hw_data *ha = vha->hw;
1958 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1959 __le32 __iomem *dmp_reg;
1960 __be32 *iter_reg;
1961 __le16 __iomem *mbx_reg;
1962 struct qla83xx_fw_dump *fw;
1963 void *nxt, *nxt_chain;
1964 __be32 *last_chain = NULL;
1965 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1966
1967 lockdep_assert_held(&ha->hardware_lock);
1968
1969 ha->fw_dump_cap_flags = 0;
1970
1971 if (!ha->fw_dump) {
1972 ql_log(ql_log_warn, vha, 0xd00c,
1973 "No buffer available for dump!!!\n");
1974 return;
1975 }
1976
1977 if (ha->fw_dumped) {
1978 ql_log(ql_log_warn, vha, 0xd00d,
1979 "Firmware has been previously dumped (%p) -- ignoring "
1980 "request...\n", ha->fw_dump);
1981 return;
1982 }
1983 QLA_FW_STOPPED(ha);
1984 fw = &ha->fw_dump->isp.isp83;
1985 qla2xxx_prep_dump(ha, ha->fw_dump);
1986
1987 fw->host_status = htonl(rd_reg_dword(®->host_status));
1988
1989 /*
1990 * Pause RISC. No need to track timeout, as resetting the chip
1991 * is the right approach incase of pause timeout
1992 */
1993 qla24xx_pause_risc(reg, ha);
1994
1995 wrt_reg_dword(®->iobase_addr, 0x6000);
1996 dmp_reg = ®->iobase_window;
1997 rd_reg_dword(dmp_reg);
1998 wrt_reg_dword(dmp_reg, 0);
1999
2000 dmp_reg = ®->unused_4_1[0];
2001 rd_reg_dword(dmp_reg);
2002 wrt_reg_dword(dmp_reg, 0);
2003
2004 wrt_reg_dword(®->iobase_addr, 0x6010);
2005 dmp_reg = ®->unused_4_1[2];
2006 rd_reg_dword(dmp_reg);
2007 wrt_reg_dword(dmp_reg, 0);
2008
2009 /* select PCR and disable ecc checking and correction */
2010 wrt_reg_dword(®->iobase_addr, 0x0F70);
2011 rd_reg_dword(®->iobase_addr);
2012 wrt_reg_dword(®->iobase_select, 0x60000000); /* write to F0h = PCR */
2013
2014 /* Host/Risc registers. */
2015 iter_reg = fw->host_risc_reg;
2016 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
2017 iter_reg = qla24xx_read_window(reg, 0x7010, 16, iter_reg);
2018 qla24xx_read_window(reg, 0x7040, 16, iter_reg);
2019
2020 /* PCIe registers. */
2021 wrt_reg_dword(®->iobase_addr, 0x7C00);
2022 rd_reg_dword(®->iobase_addr);
2023 wrt_reg_dword(®->iobase_window, 0x01);
2024 dmp_reg = ®->iobase_c4;
2025 fw->pcie_regs[0] = htonl(rd_reg_dword(dmp_reg));
2026 dmp_reg++;
2027 fw->pcie_regs[1] = htonl(rd_reg_dword(dmp_reg));
2028 dmp_reg++;
2029 fw->pcie_regs[2] = htonl(rd_reg_dword(dmp_reg));
2030 fw->pcie_regs[3] = htonl(rd_reg_dword(®->iobase_window));
2031
2032 wrt_reg_dword(®->iobase_window, 0x00);
2033 rd_reg_dword(®->iobase_window);
2034
2035 /* Host interface registers. */
2036 dmp_reg = ®->flash_addr;
2037 for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++)
2038 fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg));
2039
2040 /* Disable interrupts. */
2041 wrt_reg_dword(®->ictrl, 0);
2042 rd_reg_dword(®->ictrl);
2043
2044 /* Shadow registers. */
2045 wrt_reg_dword(®->iobase_addr, 0x0F70);
2046 rd_reg_dword(®->iobase_addr);
2047 wrt_reg_dword(®->iobase_select, 0xB0000000);
2048 fw->shadow_reg[0] = htonl(rd_reg_dword(®->iobase_sdata));
2049
2050 wrt_reg_dword(®->iobase_select, 0xB0100000);
2051 fw->shadow_reg[1] = htonl(rd_reg_dword(®->iobase_sdata));
2052
2053 wrt_reg_dword(®->iobase_select, 0xB0200000);
2054 fw->shadow_reg[2] = htonl(rd_reg_dword(®->iobase_sdata));
2055
2056 wrt_reg_dword(®->iobase_select, 0xB0300000);
2057 fw->shadow_reg[3] = htonl(rd_reg_dword(®->iobase_sdata));
2058
2059 wrt_reg_dword(®->iobase_select, 0xB0400000);
2060 fw->shadow_reg[4] = htonl(rd_reg_dword(®->iobase_sdata));
2061
2062 wrt_reg_dword(®->iobase_select, 0xB0500000);
2063 fw->shadow_reg[5] = htonl(rd_reg_dword(®->iobase_sdata));
2064
2065 wrt_reg_dword(®->iobase_select, 0xB0600000);
2066 fw->shadow_reg[6] = htonl(rd_reg_dword(®->iobase_sdata));
2067
2068 wrt_reg_dword(®->iobase_select, 0xB0700000);
2069 fw->shadow_reg[7] = htonl(rd_reg_dword(®->iobase_sdata));
2070
2071 wrt_reg_dword(®->iobase_select, 0xB0800000);
2072 fw->shadow_reg[8] = htonl(rd_reg_dword(®->iobase_sdata));
2073
2074 wrt_reg_dword(®->iobase_select, 0xB0900000);
2075 fw->shadow_reg[9] = htonl(rd_reg_dword(®->iobase_sdata));
2076
2077 wrt_reg_dword(®->iobase_select, 0xB0A00000);
2078 fw->shadow_reg[10] = htonl(rd_reg_dword(®->iobase_sdata));
2079
2080 /* RISC I/O register. */
2081 wrt_reg_dword(®->iobase_addr, 0x0010);
2082 fw->risc_io_reg = htonl(rd_reg_dword(®->iobase_window));
2083
2084 /* Mailbox registers. */
2085 mbx_reg = ®->mailbox0;
2086 for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++)
2087 fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg));
2088
2089 /* Transfer sequence registers. */
2090 iter_reg = fw->xseq_gp_reg;
2091 iter_reg = qla24xx_read_window(reg, 0xBE00, 16, iter_reg);
2092 iter_reg = qla24xx_read_window(reg, 0xBE10, 16, iter_reg);
2093 iter_reg = qla24xx_read_window(reg, 0xBE20, 16, iter_reg);
2094 iter_reg = qla24xx_read_window(reg, 0xBE30, 16, iter_reg);
2095 iter_reg = qla24xx_read_window(reg, 0xBE40, 16, iter_reg);
2096 iter_reg = qla24xx_read_window(reg, 0xBE50, 16, iter_reg);
2097 iter_reg = qla24xx_read_window(reg, 0xBE60, 16, iter_reg);
2098 iter_reg = qla24xx_read_window(reg, 0xBE70, 16, iter_reg);
2099 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
2100 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
2101 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
2102 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
2103 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
2104 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
2105 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
2106 qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
2107
2108 iter_reg = fw->xseq_0_reg;
2109 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
2110 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
2111 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
2112
2113 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
2114
2115 qla24xx_read_window(reg, 0xBEF0, 16, fw->xseq_2_reg);
2116
2117 /* Receive sequence registers. */
2118 iter_reg = fw->rseq_gp_reg;
2119 iter_reg = qla24xx_read_window(reg, 0xFE00, 16, iter_reg);
2120 iter_reg = qla24xx_read_window(reg, 0xFE10, 16, iter_reg);
2121 iter_reg = qla24xx_read_window(reg, 0xFE20, 16, iter_reg);
2122 iter_reg = qla24xx_read_window(reg, 0xFE30, 16, iter_reg);
2123 iter_reg = qla24xx_read_window(reg, 0xFE40, 16, iter_reg);
2124 iter_reg = qla24xx_read_window(reg, 0xFE50, 16, iter_reg);
2125 iter_reg = qla24xx_read_window(reg, 0xFE60, 16, iter_reg);
2126 iter_reg = qla24xx_read_window(reg, 0xFE70, 16, iter_reg);
2127 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
2128 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
2129 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
2130 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
2131 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
2132 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
2133 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
2134 qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
2135
2136 iter_reg = fw->rseq_0_reg;
2137 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
2138 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
2139
2140 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
2141 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
2142 qla24xx_read_window(reg, 0xFEF0, 16, fw->rseq_3_reg);
2143
2144 /* Auxiliary sequence registers. */
2145 iter_reg = fw->aseq_gp_reg;
2146 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
2147 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
2148 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
2149 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
2150 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
2151 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
2152 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
2153 iter_reg = qla24xx_read_window(reg, 0xB070, 16, iter_reg);
2154 iter_reg = qla24xx_read_window(reg, 0xB100, 16, iter_reg);
2155 iter_reg = qla24xx_read_window(reg, 0xB110, 16, iter_reg);
2156 iter_reg = qla24xx_read_window(reg, 0xB120, 16, iter_reg);
2157 iter_reg = qla24xx_read_window(reg, 0xB130, 16, iter_reg);
2158 iter_reg = qla24xx_read_window(reg, 0xB140, 16, iter_reg);
2159 iter_reg = qla24xx_read_window(reg, 0xB150, 16, iter_reg);
2160 iter_reg = qla24xx_read_window(reg, 0xB160, 16, iter_reg);
2161 qla24xx_read_window(reg, 0xB170, 16, iter_reg);
2162
2163 iter_reg = fw->aseq_0_reg;
2164 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
2165 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
2166
2167 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
2168 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
2169 qla24xx_read_window(reg, 0xB1F0, 16, fw->aseq_3_reg);
2170
2171 /* Command DMA registers. */
2172 iter_reg = fw->cmd_dma_reg;
2173 iter_reg = qla24xx_read_window(reg, 0x7100, 16, iter_reg);
2174 iter_reg = qla24xx_read_window(reg, 0x7120, 16, iter_reg);
2175 iter_reg = qla24xx_read_window(reg, 0x7130, 16, iter_reg);
2176 qla24xx_read_window(reg, 0x71F0, 16, iter_reg);
2177
2178 /* Queues. */
2179 iter_reg = fw->req0_dma_reg;
2180 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
2181 dmp_reg = ®->iobase_q;
2182 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
2183 *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
2184
2185 iter_reg = fw->resp0_dma_reg;
2186 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
2187 dmp_reg = ®->iobase_q;
2188 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
2189 *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
2190
2191 iter_reg = fw->req1_dma_reg;
2192 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
2193 dmp_reg = ®->iobase_q;
2194 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
2195 *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
2196
2197 /* Transmit DMA registers. */
2198 iter_reg = fw->xmt0_dma_reg;
2199 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
2200 qla24xx_read_window(reg, 0x7610, 16, iter_reg);
2201
2202 iter_reg = fw->xmt1_dma_reg;
2203 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
2204 qla24xx_read_window(reg, 0x7630, 16, iter_reg);
2205
2206 iter_reg = fw->xmt2_dma_reg;
2207 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
2208 qla24xx_read_window(reg, 0x7650, 16, iter_reg);
2209
2210 iter_reg = fw->xmt3_dma_reg;
2211 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
2212 qla24xx_read_window(reg, 0x7670, 16, iter_reg);
2213
2214 iter_reg = fw->xmt4_dma_reg;
2215 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
2216 qla24xx_read_window(reg, 0x7690, 16, iter_reg);
2217
2218 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
2219
2220 /* Receive DMA registers. */
2221 iter_reg = fw->rcvt0_data_dma_reg;
2222 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
2223 qla24xx_read_window(reg, 0x7710, 16, iter_reg);
2224
2225 iter_reg = fw->rcvt1_data_dma_reg;
2226 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
2227 qla24xx_read_window(reg, 0x7730, 16, iter_reg);
2228
2229 /* RISC registers. */
2230 iter_reg = fw->risc_gp_reg;
2231 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
2232 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
2233 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
2234 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
2235 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
2236 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
2237 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
2238 qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
2239
2240 /* Local memory controller registers. */
2241 iter_reg = fw->lmc_reg;
2242 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
2243 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
2244 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
2245 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
2246 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
2247 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
2248 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
2249 qla24xx_read_window(reg, 0x3070, 16, iter_reg);
2250
2251 /* Fibre Protocol Module registers. */
2252 iter_reg = fw->fpm_hdw_reg;
2253 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
2254 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
2255 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
2256 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
2257 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
2258 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
2259 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
2260 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
2261 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
2262 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
2263 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
2264 iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
2265 iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg);
2266 iter_reg = qla24xx_read_window(reg, 0x40D0, 16, iter_reg);
2267 iter_reg = qla24xx_read_window(reg, 0x40E0, 16, iter_reg);
2268 qla24xx_read_window(reg, 0x40F0, 16, iter_reg);
2269
2270 /* RQ0 Array registers. */
2271 iter_reg = fw->rq0_array_reg;
2272 iter_reg = qla24xx_read_window(reg, 0x5C00, 16, iter_reg);
2273 iter_reg = qla24xx_read_window(reg, 0x5C10, 16, iter_reg);
2274 iter_reg = qla24xx_read_window(reg, 0x5C20, 16, iter_reg);
2275 iter_reg = qla24xx_read_window(reg, 0x5C30, 16, iter_reg);
2276 iter_reg = qla24xx_read_window(reg, 0x5C40, 16, iter_reg);
2277 iter_reg = qla24xx_read_window(reg, 0x5C50, 16, iter_reg);
2278 iter_reg = qla24xx_read_window(reg, 0x5C60, 16, iter_reg);
2279 iter_reg = qla24xx_read_window(reg, 0x5C70, 16, iter_reg);
2280 iter_reg = qla24xx_read_window(reg, 0x5C80, 16, iter_reg);
2281 iter_reg = qla24xx_read_window(reg, 0x5C90, 16, iter_reg);
2282 iter_reg = qla24xx_read_window(reg, 0x5CA0, 16, iter_reg);
2283 iter_reg = qla24xx_read_window(reg, 0x5CB0, 16, iter_reg);
2284 iter_reg = qla24xx_read_window(reg, 0x5CC0, 16, iter_reg);
2285 iter_reg = qla24xx_read_window(reg, 0x5CD0, 16, iter_reg);
2286 iter_reg = qla24xx_read_window(reg, 0x5CE0, 16, iter_reg);
2287 qla24xx_read_window(reg, 0x5CF0, 16, iter_reg);
2288
2289 /* RQ1 Array registers. */
2290 iter_reg = fw->rq1_array_reg;
2291 iter_reg = qla24xx_read_window(reg, 0x5D00, 16, iter_reg);
2292 iter_reg = qla24xx_read_window(reg, 0x5D10, 16, iter_reg);
2293 iter_reg = qla24xx_read_window(reg, 0x5D20, 16, iter_reg);
2294 iter_reg = qla24xx_read_window(reg, 0x5D30, 16, iter_reg);
2295 iter_reg = qla24xx_read_window(reg, 0x5D40, 16, iter_reg);
2296 iter_reg = qla24xx_read_window(reg, 0x5D50, 16, iter_reg);
2297 iter_reg = qla24xx_read_window(reg, 0x5D60, 16, iter_reg);
2298 iter_reg = qla24xx_read_window(reg, 0x5D70, 16, iter_reg);
2299 iter_reg = qla24xx_read_window(reg, 0x5D80, 16, iter_reg);
2300 iter_reg = qla24xx_read_window(reg, 0x5D90, 16, iter_reg);
2301 iter_reg = qla24xx_read_window(reg, 0x5DA0, 16, iter_reg);
2302 iter_reg = qla24xx_read_window(reg, 0x5DB0, 16, iter_reg);
2303 iter_reg = qla24xx_read_window(reg, 0x5DC0, 16, iter_reg);
2304 iter_reg = qla24xx_read_window(reg, 0x5DD0, 16, iter_reg);
2305 iter_reg = qla24xx_read_window(reg, 0x5DE0, 16, iter_reg);
2306 qla24xx_read_window(reg, 0x5DF0, 16, iter_reg);
2307
2308 /* RP0 Array registers. */
2309 iter_reg = fw->rp0_array_reg;
2310 iter_reg = qla24xx_read_window(reg, 0x5E00, 16, iter_reg);
2311 iter_reg = qla24xx_read_window(reg, 0x5E10, 16, iter_reg);
2312 iter_reg = qla24xx_read_window(reg, 0x5E20, 16, iter_reg);
2313 iter_reg = qla24xx_read_window(reg, 0x5E30, 16, iter_reg);
2314 iter_reg = qla24xx_read_window(reg, 0x5E40, 16, iter_reg);
2315 iter_reg = qla24xx_read_window(reg, 0x5E50, 16, iter_reg);
2316 iter_reg = qla24xx_read_window(reg, 0x5E60, 16, iter_reg);
2317 iter_reg = qla24xx_read_window(reg, 0x5E70, 16, iter_reg);
2318 iter_reg = qla24xx_read_window(reg, 0x5E80, 16, iter_reg);
2319 iter_reg = qla24xx_read_window(reg, 0x5E90, 16, iter_reg);
2320 iter_reg = qla24xx_read_window(reg, 0x5EA0, 16, iter_reg);
2321 iter_reg = qla24xx_read_window(reg, 0x5EB0, 16, iter_reg);
2322 iter_reg = qla24xx_read_window(reg, 0x5EC0, 16, iter_reg);
2323 iter_reg = qla24xx_read_window(reg, 0x5ED0, 16, iter_reg);
2324 iter_reg = qla24xx_read_window(reg, 0x5EE0, 16, iter_reg);
2325 qla24xx_read_window(reg, 0x5EF0, 16, iter_reg);
2326
2327 /* RP1 Array registers. */
2328 iter_reg = fw->rp1_array_reg;
2329 iter_reg = qla24xx_read_window(reg, 0x5F00, 16, iter_reg);
2330 iter_reg = qla24xx_read_window(reg, 0x5F10, 16, iter_reg);
2331 iter_reg = qla24xx_read_window(reg, 0x5F20, 16, iter_reg);
2332 iter_reg = qla24xx_read_window(reg, 0x5F30, 16, iter_reg);
2333 iter_reg = qla24xx_read_window(reg, 0x5F40, 16, iter_reg);
2334 iter_reg = qla24xx_read_window(reg, 0x5F50, 16, iter_reg);
2335 iter_reg = qla24xx_read_window(reg, 0x5F60, 16, iter_reg);
2336 iter_reg = qla24xx_read_window(reg, 0x5F70, 16, iter_reg);
2337 iter_reg = qla24xx_read_window(reg, 0x5F80, 16, iter_reg);
2338 iter_reg = qla24xx_read_window(reg, 0x5F90, 16, iter_reg);
2339 iter_reg = qla24xx_read_window(reg, 0x5FA0, 16, iter_reg);
2340 iter_reg = qla24xx_read_window(reg, 0x5FB0, 16, iter_reg);
2341 iter_reg = qla24xx_read_window(reg, 0x5FC0, 16, iter_reg);
2342 iter_reg = qla24xx_read_window(reg, 0x5FD0, 16, iter_reg);
2343 iter_reg = qla24xx_read_window(reg, 0x5FE0, 16, iter_reg);
2344 qla24xx_read_window(reg, 0x5FF0, 16, iter_reg);
2345
2346 iter_reg = fw->at0_array_reg;
2347 iter_reg = qla24xx_read_window(reg, 0x7080, 16, iter_reg);
2348 iter_reg = qla24xx_read_window(reg, 0x7090, 16, iter_reg);
2349 iter_reg = qla24xx_read_window(reg, 0x70A0, 16, iter_reg);
2350 iter_reg = qla24xx_read_window(reg, 0x70B0, 16, iter_reg);
2351 iter_reg = qla24xx_read_window(reg, 0x70C0, 16, iter_reg);
2352 iter_reg = qla24xx_read_window(reg, 0x70D0, 16, iter_reg);
2353 iter_reg = qla24xx_read_window(reg, 0x70E0, 16, iter_reg);
2354 qla24xx_read_window(reg, 0x70F0, 16, iter_reg);
2355
2356 /* I/O Queue Control registers. */
2357 qla24xx_read_window(reg, 0x7800, 16, fw->queue_control_reg);
2358
2359 /* Frame Buffer registers. */
2360 iter_reg = fw->fb_hdw_reg;
2361 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
2362 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
2363 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
2364 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
2365 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
2366 iter_reg = qla24xx_read_window(reg, 0x6060, 16, iter_reg);
2367 iter_reg = qla24xx_read_window(reg, 0x6070, 16, iter_reg);
2368 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
2369 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
2370 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
2371 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
2372 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
2373 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
2374 iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg);
2375 iter_reg = qla24xx_read_window(reg, 0x6530, 16, iter_reg);
2376 iter_reg = qla24xx_read_window(reg, 0x6540, 16, iter_reg);
2377 iter_reg = qla24xx_read_window(reg, 0x6550, 16, iter_reg);
2378 iter_reg = qla24xx_read_window(reg, 0x6560, 16, iter_reg);
2379 iter_reg = qla24xx_read_window(reg, 0x6570, 16, iter_reg);
2380 iter_reg = qla24xx_read_window(reg, 0x6580, 16, iter_reg);
2381 iter_reg = qla24xx_read_window(reg, 0x6590, 16, iter_reg);
2382 iter_reg = qla24xx_read_window(reg, 0x65A0, 16, iter_reg);
2383 iter_reg = qla24xx_read_window(reg, 0x65B0, 16, iter_reg);
2384 iter_reg = qla24xx_read_window(reg, 0x65C0, 16, iter_reg);
2385 iter_reg = qla24xx_read_window(reg, 0x65D0, 16, iter_reg);
2386 iter_reg = qla24xx_read_window(reg, 0x65E0, 16, iter_reg);
2387 qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
2388
2389 /* Multi queue registers */
2390 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
2391 &last_chain);
2392
2393 rval = qla24xx_soft_reset(ha);
2394 if (rval != QLA_SUCCESS) {
2395 ql_log(ql_log_warn, vha, 0xd00e,
2396 "SOFT RESET FAILED, forcing continuation of dump!!!\n");
2397 rval = QLA_SUCCESS;
2398
2399 ql_log(ql_log_warn, vha, 0xd00f, "try a bigger hammer!!!\n");
2400
2401 wrt_reg_dword(®->hccr, HCCRX_SET_RISC_RESET);
2402 rd_reg_dword(®->hccr);
2403
2404 wrt_reg_dword(®->hccr, HCCRX_REL_RISC_PAUSE);
2405 rd_reg_dword(®->hccr);
2406
2407 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_RESET);
2408 rd_reg_dword(®->hccr);
2409
2410 for (cnt = 30000; cnt && (rd_reg_word(®->mailbox0)); cnt--)
2411 udelay(5);
2412
2413 if (!cnt) {
2414 nxt = fw->code_ram;
2415 nxt += sizeof(fw->code_ram);
2416 nxt += (ha->fw_memory_size - 0x100000 + 1);
2417 goto copy_queue;
2418 } else {
2419 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
2420 ql_log(ql_log_warn, vha, 0xd010,
2421 "bigger hammer success?\n");
2422 }
2423 }
2424
2425 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
2426 &nxt);
2427 if (rval != QLA_SUCCESS)
2428 goto qla83xx_fw_dump_failed_0;
2429
2430 copy_queue:
2431 nxt = qla2xxx_copy_queues(ha, nxt);
2432
2433 qla24xx_copy_eft(ha, nxt);
2434
2435 /* Chain entries -- started with MQ. */
2436 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
2437 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
2438 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
2439 nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain);
2440 nxt_chain = qla81xx_copy_exchoffld(ha, nxt_chain, &last_chain);
2441 if (last_chain) {
2442 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
2443 *last_chain |= htonl(DUMP_CHAIN_LAST);
2444 }
2445
2446 /* Adjust valid length. */
2447 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
2448
2449 qla83xx_fw_dump_failed_0:
2450 qla2xxx_dump_post_process(base_vha, rval);
2451 }
2452
2453 /****************************************************************************/
2454 /* Driver Debug Functions. */
2455 /****************************************************************************/
2456
2457 /* Write the debug message prefix into @pbuf. */
ql_dbg_prefix(char * pbuf,int pbuf_size,const scsi_qla_host_t * vha,uint msg_id)2458 static void ql_dbg_prefix(char *pbuf, int pbuf_size,
2459 const scsi_qla_host_t *vha, uint msg_id)
2460 {
2461 if (vha) {
2462 const struct pci_dev *pdev = vha->hw->pdev;
2463
2464 /* <module-name> [<dev-name>]-<msg-id>:<host>: */
2465 snprintf(pbuf, pbuf_size, "%s [%s]-%04x:%lu: ", QL_MSGHDR,
2466 dev_name(&(pdev->dev)), msg_id, vha->host_no);
2467 } else {
2468 /* <module-name> [<dev-name>]-<msg-id>: : */
2469 snprintf(pbuf, pbuf_size, "%s [%s]-%04x: : ", QL_MSGHDR,
2470 "0000:00:00.0", msg_id);
2471 }
2472 }
2473
2474 /*
2475 * This function is for formatting and logging debug information.
2476 * It is to be used when vha is available. It formats the message
2477 * and logs it to the messages file.
2478 * parameters:
2479 * level: The level of the debug messages to be printed.
2480 * If ql2xextended_error_logging value is correctly set,
2481 * this message will appear in the messages file.
2482 * vha: Pointer to the scsi_qla_host_t.
2483 * id: This is a unique identifier for the level. It identifies the
2484 * part of the code from where the message originated.
2485 * msg: The message to be displayed.
2486 */
2487 void
ql_dbg(uint level,scsi_qla_host_t * vha,uint id,const char * fmt,...)2488 ql_dbg(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...)
2489 {
2490 va_list va;
2491 struct va_format vaf;
2492 char pbuf[64];
2493
2494 if (!ql_mask_match(level) && !trace_ql_dbg_log_enabled())
2495 return;
2496
2497 va_start(va, fmt);
2498
2499 vaf.fmt = fmt;
2500 vaf.va = &va;
2501
2502 ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), vha, id);
2503
2504 if (!ql_mask_match(level))
2505 trace_ql_dbg_log(pbuf, &vaf);
2506 else
2507 pr_warn("%s%pV", pbuf, &vaf);
2508
2509 va_end(va);
2510
2511 }
2512
2513 /*
2514 * This function is for formatting and logging debug information.
2515 * It is to be used when vha is not available and pci is available,
2516 * i.e., before host allocation. It formats the message and logs it
2517 * to the messages file.
2518 * parameters:
2519 * level: The level of the debug messages to be printed.
2520 * If ql2xextended_error_logging value is correctly set,
2521 * this message will appear in the messages file.
2522 * pdev: Pointer to the struct pci_dev.
2523 * id: This is a unique id for the level. It identifies the part
2524 * of the code from where the message originated.
2525 * msg: The message to be displayed.
2526 */
2527 void
ql_dbg_pci(uint level,struct pci_dev * pdev,uint id,const char * fmt,...)2528 ql_dbg_pci(uint level, struct pci_dev *pdev, uint id, const char *fmt, ...)
2529 {
2530 va_list va;
2531 struct va_format vaf;
2532 char pbuf[128];
2533
2534 if (pdev == NULL)
2535 return;
2536 if (!ql_mask_match(level))
2537 return;
2538
2539 va_start(va, fmt);
2540
2541 vaf.fmt = fmt;
2542 vaf.va = &va;
2543
2544 ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL, id + ql_dbg_offset);
2545 pr_warn("%s%pV", pbuf, &vaf);
2546
2547 va_end(va);
2548 }
2549
2550 /*
2551 * This function is for formatting and logging log messages.
2552 * It is to be used when vha is available. It formats the message
2553 * and logs it to the messages file. All the messages will be logged
2554 * irrespective of value of ql2xextended_error_logging.
2555 * parameters:
2556 * level: The level of the log messages to be printed in the
2557 * messages file.
2558 * vha: Pointer to the scsi_qla_host_t
2559 * id: This is a unique id for the level. It identifies the
2560 * part of the code from where the message originated.
2561 * msg: The message to be displayed.
2562 */
2563 void
ql_log(uint level,scsi_qla_host_t * vha,uint id,const char * fmt,...)2564 ql_log(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...)
2565 {
2566 va_list va;
2567 struct va_format vaf;
2568 char pbuf[128];
2569
2570 if (level > ql_errlev)
2571 return;
2572
2573 ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), vha, id);
2574
2575 va_start(va, fmt);
2576
2577 vaf.fmt = fmt;
2578 vaf.va = &va;
2579
2580 switch (level) {
2581 case ql_log_fatal: /* FATAL LOG */
2582 pr_crit("%s%pV", pbuf, &vaf);
2583 break;
2584 case ql_log_warn:
2585 pr_err("%s%pV", pbuf, &vaf);
2586 break;
2587 case ql_log_info:
2588 pr_warn("%s%pV", pbuf, &vaf);
2589 break;
2590 default:
2591 pr_info("%s%pV", pbuf, &vaf);
2592 break;
2593 }
2594
2595 va_end(va);
2596 }
2597
2598 /*
2599 * This function is for formatting and logging log messages.
2600 * It is to be used when vha is not available and pci is available,
2601 * i.e., before host allocation. It formats the message and logs
2602 * it to the messages file. All the messages are logged irrespective
2603 * of the value of ql2xextended_error_logging.
2604 * parameters:
2605 * level: The level of the log messages to be printed in the
2606 * messages file.
2607 * pdev: Pointer to the struct pci_dev.
2608 * id: This is a unique id for the level. It identifies the
2609 * part of the code from where the message originated.
2610 * msg: The message to be displayed.
2611 */
2612 void
ql_log_pci(uint level,struct pci_dev * pdev,uint id,const char * fmt,...)2613 ql_log_pci(uint level, struct pci_dev *pdev, uint id, const char *fmt, ...)
2614 {
2615 va_list va;
2616 struct va_format vaf;
2617 char pbuf[128];
2618
2619 if (pdev == NULL)
2620 return;
2621 if (level > ql_errlev)
2622 return;
2623
2624 ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL, id);
2625
2626 va_start(va, fmt);
2627
2628 vaf.fmt = fmt;
2629 vaf.va = &va;
2630
2631 switch (level) {
2632 case ql_log_fatal: /* FATAL LOG */
2633 pr_crit("%s%pV", pbuf, &vaf);
2634 break;
2635 case ql_log_warn:
2636 pr_err("%s%pV", pbuf, &vaf);
2637 break;
2638 case ql_log_info:
2639 pr_warn("%s%pV", pbuf, &vaf);
2640 break;
2641 default:
2642 pr_info("%s%pV", pbuf, &vaf);
2643 break;
2644 }
2645
2646 va_end(va);
2647 }
2648
2649 void
ql_dump_regs(uint level,scsi_qla_host_t * vha,uint id)2650 ql_dump_regs(uint level, scsi_qla_host_t *vha, uint id)
2651 {
2652 int i;
2653 struct qla_hw_data *ha = vha->hw;
2654 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2655 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
2656 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
2657 __le16 __iomem *mbx_reg;
2658
2659 if (!ql_mask_match(level))
2660 return;
2661
2662 if (IS_P3P_TYPE(ha))
2663 mbx_reg = ®82->mailbox_in[0];
2664 else if (IS_FWI2_CAPABLE(ha))
2665 mbx_reg = ®24->mailbox0;
2666 else
2667 mbx_reg = MAILBOX_REG(ha, reg, 0);
2668
2669 ql_dbg(level, vha, id, "Mailbox registers:\n");
2670 for (i = 0; i < 6; i++, mbx_reg++)
2671 ql_dbg(level, vha, id,
2672 "mbox[%d] %#04x\n", i, rd_reg_word(mbx_reg));
2673 }
2674
2675 void
ql_dump_buffer(uint level,scsi_qla_host_t * vha,uint id,const void * buf,uint size)2676 ql_dump_buffer(uint level, scsi_qla_host_t *vha, uint id, const void *buf,
2677 uint size)
2678 {
2679 uint cnt;
2680
2681 if (!ql_mask_match(level))
2682 return;
2683
2684 ql_dbg(level, vha, id,
2685 "%-+5d 0 1 2 3 4 5 6 7 8 9 A B C D E F\n", size);
2686 ql_dbg(level, vha, id,
2687 "----- -----------------------------------------------\n");
2688 for (cnt = 0; cnt < size; cnt += 16) {
2689 ql_dbg(level, vha, id, "%04x: ", cnt);
2690 print_hex_dump(KERN_CONT, "", DUMP_PREFIX_NONE, 16, 1,
2691 buf + cnt, min(16U, size - cnt), false);
2692 }
2693 }
2694
2695 /*
2696 * This function is for formatting and logging log messages.
2697 * It is to be used when vha is available. It formats the message
2698 * and logs it to the messages file. All the messages will be logged
2699 * irrespective of value of ql2xextended_error_logging.
2700 * parameters:
2701 * level: The level of the log messages to be printed in the
2702 * messages file.
2703 * vha: Pointer to the scsi_qla_host_t
2704 * id: This is a unique id for the level. It identifies the
2705 * part of the code from where the message originated.
2706 * msg: The message to be displayed.
2707 */
2708 void
ql_log_qp(uint32_t level,struct qla_qpair * qpair,int32_t id,const char * fmt,...)2709 ql_log_qp(uint32_t level, struct qla_qpair *qpair, int32_t id,
2710 const char *fmt, ...)
2711 {
2712 va_list va;
2713 struct va_format vaf;
2714 char pbuf[128];
2715
2716 if (level > ql_errlev)
2717 return;
2718
2719 ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), qpair ? qpair->vha : NULL, id);
2720
2721 va_start(va, fmt);
2722
2723 vaf.fmt = fmt;
2724 vaf.va = &va;
2725
2726 switch (level) {
2727 case ql_log_fatal: /* FATAL LOG */
2728 pr_crit("%s%pV", pbuf, &vaf);
2729 break;
2730 case ql_log_warn:
2731 pr_err("%s%pV", pbuf, &vaf);
2732 break;
2733 case ql_log_info:
2734 pr_warn("%s%pV", pbuf, &vaf);
2735 break;
2736 default:
2737 pr_info("%s%pV", pbuf, &vaf);
2738 break;
2739 }
2740
2741 va_end(va);
2742 }
2743
2744 /*
2745 * This function is for formatting and logging debug information.
2746 * It is to be used when vha is available. It formats the message
2747 * and logs it to the messages file.
2748 * parameters:
2749 * level: The level of the debug messages to be printed.
2750 * If ql2xextended_error_logging value is correctly set,
2751 * this message will appear in the messages file.
2752 * vha: Pointer to the scsi_qla_host_t.
2753 * id: This is a unique identifier for the level. It identifies the
2754 * part of the code from where the message originated.
2755 * msg: The message to be displayed.
2756 */
2757 void
ql_dbg_qp(uint32_t level,struct qla_qpair * qpair,int32_t id,const char * fmt,...)2758 ql_dbg_qp(uint32_t level, struct qla_qpair *qpair, int32_t id,
2759 const char *fmt, ...)
2760 {
2761 va_list va;
2762 struct va_format vaf;
2763 char pbuf[128];
2764
2765 if (!ql_mask_match(level))
2766 return;
2767
2768 va_start(va, fmt);
2769
2770 vaf.fmt = fmt;
2771 vaf.va = &va;
2772
2773 ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), qpair ? qpair->vha : NULL,
2774 id + ql_dbg_offset);
2775 pr_warn("%s%pV", pbuf, &vaf);
2776
2777 va_end(va);
2778
2779 }
2780