1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7
8 /*
9 * Table for showing the current message id in use for particular level
10 * Change this table for addition of log/debug messages.
11 * ----------------------------------------------------------------------
12 * | Level | Last Value Used | Holes |
13 * ----------------------------------------------------------------------
14 * | Module Init and Probe | 0x017f | 0x0146 |
15 * | | | 0x015b-0x0160 |
16 * | | | 0x016e-0x0170 |
17 * | Mailbox commands | 0x118d | 0x1115-0x1116 |
18 * | | | 0x111a-0x111b |
19 * | Device Discovery | 0x2016 | 0x2020-0x2022, |
20 * | | | 0x2011-0x2012, |
21 * | | | 0x2099-0x20a4 |
22 * | Queue Command and IO tracing | 0x3075 | 0x300b |
23 * | | | 0x3027-0x3028 |
24 * | | | 0x303d-0x3041 |
25 * | | | 0x302d,0x3033 |
26 * | | | 0x3036,0x3038 |
27 * | | | 0x303a |
28 * | DPC Thread | 0x4023 | 0x4002,0x4013 |
29 * | Async Events | 0x508a | 0x502b-0x502f |
30 * | | | 0x5047 |
31 * | | | 0x5084,0x5075 |
32 * | | | 0x503d,0x5044 |
33 * | | | 0x507b,0x505f |
34 * | Timer Routines | 0x6012 | |
35 * | User Space Interactions | 0x70e2 | 0x7018,0x702e |
36 * | | | 0x7020,0x7024 |
37 * | | | 0x7039,0x7045 |
38 * | | | 0x7073-0x7075 |
39 * | | | 0x70a5-0x70a6 |
40 * | | | 0x70a8,0x70ab |
41 * | | | 0x70ad-0x70ae |
42 * | | | 0x70d7-0x70db |
43 * | | | 0x70de-0x70df |
44 * | Task Management | 0x803d | 0x8000,0x800b |
45 * | | | 0x8019 |
46 * | | | 0x8025,0x8026 |
47 * | | | 0x8031,0x8032 |
48 * | | | 0x8039,0x803c |
49 * | AER/EEH | 0x9011 | |
50 * | Virtual Port | 0xa007 | |
51 * | ISP82XX Specific | 0xb157 | 0xb002,0xb024 |
52 * | | | 0xb09e,0xb0ae |
53 * | | | 0xb0c3,0xb0c6 |
54 * | | | 0xb0e0-0xb0ef |
55 * | | | 0xb085,0xb0dc |
56 * | | | 0xb107,0xb108 |
57 * | | | 0xb111,0xb11e |
58 * | | | 0xb12c,0xb12d |
59 * | | | 0xb13a,0xb142 |
60 * | | | 0xb13c-0xb140 |
61 * | | | 0xb149 |
62 * | MultiQ | 0xc00c | |
63 * | Misc | 0xd300 | 0xd016-0xd017 |
64 * | | | 0xd021,0xd024 |
65 * | | | 0xd025,0xd029 |
66 * | | | 0xd02a,0xd02e |
67 * | | | 0xd031-0xd0ff |
68 * | | | 0xd101-0xd1fe |
69 * | | | 0xd214-0xd2fe |
70 * | Target Mode | 0xe080 | |
71 * | Target Mode Management | 0xf096 | 0xf002 |
72 * | | | 0xf046-0xf049 |
73 * | Target Mode Task Management | 0x1000d | |
74 * ----------------------------------------------------------------------
75 */
76
77 #include "qla_def.h"
78
79 #include <linux/delay.h>
80
81 static uint32_t ql_dbg_offset = 0x800;
82
83 static inline void
qla2xxx_prep_dump(struct qla_hw_data * ha,struct qla2xxx_fw_dump * fw_dump)84 qla2xxx_prep_dump(struct qla_hw_data *ha, struct qla2xxx_fw_dump *fw_dump)
85 {
86 fw_dump->fw_major_version = htonl(ha->fw_major_version);
87 fw_dump->fw_minor_version = htonl(ha->fw_minor_version);
88 fw_dump->fw_subminor_version = htonl(ha->fw_subminor_version);
89 fw_dump->fw_attributes = htonl(ha->fw_attributes);
90
91 fw_dump->vendor = htonl(ha->pdev->vendor);
92 fw_dump->device = htonl(ha->pdev->device);
93 fw_dump->subsystem_vendor = htonl(ha->pdev->subsystem_vendor);
94 fw_dump->subsystem_device = htonl(ha->pdev->subsystem_device);
95 }
96
97 static inline void *
qla2xxx_copy_queues(struct qla_hw_data * ha,void * ptr)98 qla2xxx_copy_queues(struct qla_hw_data *ha, void *ptr)
99 {
100 struct req_que *req = ha->req_q_map[0];
101 struct rsp_que *rsp = ha->rsp_q_map[0];
102 /* Request queue. */
103 memcpy(ptr, req->ring, req->length *
104 sizeof(request_t));
105
106 /* Response queue. */
107 ptr += req->length * sizeof(request_t);
108 memcpy(ptr, rsp->ring, rsp->length *
109 sizeof(response_t));
110
111 return ptr + (rsp->length * sizeof(response_t));
112 }
113
114 int
qla27xx_dump_mpi_ram(struct qla_hw_data * ha,uint32_t addr,uint32_t * ram,uint32_t ram_dwords,void ** nxt)115 qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
116 uint32_t ram_dwords, void **nxt)
117 {
118 int rval;
119 uint32_t cnt, stat, timer, dwords, idx;
120 uint16_t mb0;
121 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
122 dma_addr_t dump_dma = ha->gid_list_dma;
123 uint32_t *dump = (uint32_t *)ha->gid_list;
124
125 rval = QLA_SUCCESS;
126 mb0 = 0;
127
128 WRT_REG_WORD(®->mailbox0, MBC_LOAD_DUMP_MPI_RAM);
129 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
130
131 dwords = qla2x00_gid_list_size(ha) / 4;
132 for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS;
133 cnt += dwords, addr += dwords) {
134 if (cnt + dwords > ram_dwords)
135 dwords = ram_dwords - cnt;
136
137 WRT_REG_WORD(®->mailbox1, LSW(addr));
138 WRT_REG_WORD(®->mailbox8, MSW(addr));
139
140 WRT_REG_WORD(®->mailbox2, MSW(dump_dma));
141 WRT_REG_WORD(®->mailbox3, LSW(dump_dma));
142 WRT_REG_WORD(®->mailbox6, MSW(MSD(dump_dma)));
143 WRT_REG_WORD(®->mailbox7, LSW(MSD(dump_dma)));
144
145 WRT_REG_WORD(®->mailbox4, MSW(dwords));
146 WRT_REG_WORD(®->mailbox5, LSW(dwords));
147
148 WRT_REG_WORD(®->mailbox9, 0);
149 WRT_REG_DWORD(®->hccr, HCCRX_SET_HOST_INT);
150
151 ha->flags.mbox_int = 0;
152 for (timer = 6000000; timer; timer--) {
153 /* Check for pending interrupts. */
154 stat = RD_REG_DWORD(®->host_status);
155 if (stat & HSRX_RISC_INT) {
156 stat &= 0xff;
157
158 if (stat == 0x1 || stat == 0x2 ||
159 stat == 0x10 || stat == 0x11) {
160 set_bit(MBX_INTERRUPT,
161 &ha->mbx_cmd_flags);
162
163 mb0 = RD_REG_WORD(®->mailbox0);
164 RD_REG_WORD(®->mailbox1);
165
166 WRT_REG_DWORD(®->hccr,
167 HCCRX_CLR_RISC_INT);
168 RD_REG_DWORD(®->hccr);
169 break;
170 }
171
172 /* Clear this intr; it wasn't a mailbox intr */
173 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
174 RD_REG_DWORD(®->hccr);
175 }
176 udelay(5);
177 }
178 ha->flags.mbox_int = 1;
179
180 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
181 rval = mb0 & MBS_MASK;
182 for (idx = 0; idx < dwords; idx++)
183 ram[cnt + idx] = IS_QLA27XX(ha) ?
184 le32_to_cpu(dump[idx]) : swab32(dump[idx]);
185 } else {
186 rval = QLA_FUNCTION_FAILED;
187 }
188 }
189
190 *nxt = rval == QLA_SUCCESS ? &ram[cnt] : NULL;
191 return rval;
192 }
193
194 int
qla24xx_dump_ram(struct qla_hw_data * ha,uint32_t addr,uint32_t * ram,uint32_t ram_dwords,void ** nxt)195 qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
196 uint32_t ram_dwords, void **nxt)
197 {
198 int rval;
199 uint32_t cnt, stat, timer, dwords, idx;
200 uint16_t mb0;
201 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
202 dma_addr_t dump_dma = ha->gid_list_dma;
203 uint32_t *dump = (uint32_t *)ha->gid_list;
204
205 rval = QLA_SUCCESS;
206 mb0 = 0;
207
208 WRT_REG_WORD(®->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED);
209 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
210
211 dwords = qla2x00_gid_list_size(ha) / 4;
212 for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS;
213 cnt += dwords, addr += dwords) {
214 if (cnt + dwords > ram_dwords)
215 dwords = ram_dwords - cnt;
216
217 WRT_REG_WORD(®->mailbox1, LSW(addr));
218 WRT_REG_WORD(®->mailbox8, MSW(addr));
219
220 WRT_REG_WORD(®->mailbox2, MSW(dump_dma));
221 WRT_REG_WORD(®->mailbox3, LSW(dump_dma));
222 WRT_REG_WORD(®->mailbox6, MSW(MSD(dump_dma)));
223 WRT_REG_WORD(®->mailbox7, LSW(MSD(dump_dma)));
224
225 WRT_REG_WORD(®->mailbox4, MSW(dwords));
226 WRT_REG_WORD(®->mailbox5, LSW(dwords));
227 WRT_REG_DWORD(®->hccr, HCCRX_SET_HOST_INT);
228
229 ha->flags.mbox_int = 0;
230 for (timer = 6000000; timer; timer--) {
231 /* Check for pending interrupts. */
232 stat = RD_REG_DWORD(®->host_status);
233 if (stat & HSRX_RISC_INT) {
234 stat &= 0xff;
235
236 if (stat == 0x1 || stat == 0x2 ||
237 stat == 0x10 || stat == 0x11) {
238 set_bit(MBX_INTERRUPT,
239 &ha->mbx_cmd_flags);
240
241 mb0 = RD_REG_WORD(®->mailbox0);
242
243 WRT_REG_DWORD(®->hccr,
244 HCCRX_CLR_RISC_INT);
245 RD_REG_DWORD(®->hccr);
246 break;
247 }
248
249 /* Clear this intr; it wasn't a mailbox intr */
250 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
251 RD_REG_DWORD(®->hccr);
252 }
253 udelay(5);
254 }
255 ha->flags.mbox_int = 1;
256
257 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
258 rval = mb0 & MBS_MASK;
259 for (idx = 0; idx < dwords; idx++)
260 ram[cnt + idx] = IS_QLA27XX(ha) ?
261 le32_to_cpu(dump[idx]) : swab32(dump[idx]);
262 } else {
263 rval = QLA_FUNCTION_FAILED;
264 }
265 }
266
267 *nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL;
268 return rval;
269 }
270
271 static int
qla24xx_dump_memory(struct qla_hw_data * ha,uint32_t * code_ram,uint32_t cram_size,void ** nxt)272 qla24xx_dump_memory(struct qla_hw_data *ha, uint32_t *code_ram,
273 uint32_t cram_size, void **nxt)
274 {
275 int rval;
276
277 /* Code RAM. */
278 rval = qla24xx_dump_ram(ha, 0x20000, code_ram, cram_size / 4, nxt);
279 if (rval != QLA_SUCCESS)
280 return rval;
281
282 set_bit(RISC_SRAM_DUMP_CMPL, &ha->fw_dump_cap_flags);
283
284 /* External Memory. */
285 rval = qla24xx_dump_ram(ha, 0x100000, *nxt,
286 ha->fw_memory_size - 0x100000 + 1, nxt);
287 if (rval == QLA_SUCCESS)
288 set_bit(RISC_EXT_MEM_DUMP_CMPL, &ha->fw_dump_cap_flags);
289
290 return rval;
291 }
292
293 static uint32_t *
qla24xx_read_window(struct device_reg_24xx __iomem * reg,uint32_t iobase,uint32_t count,uint32_t * buf)294 qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase,
295 uint32_t count, uint32_t *buf)
296 {
297 uint32_t __iomem *dmp_reg;
298
299 WRT_REG_DWORD(®->iobase_addr, iobase);
300 dmp_reg = ®->iobase_window;
301 while (count--)
302 *buf++ = htonl(RD_REG_DWORD(dmp_reg++));
303
304 return buf;
305 }
306
307 void
qla24xx_pause_risc(struct device_reg_24xx __iomem * reg,struct qla_hw_data * ha)308 qla24xx_pause_risc(struct device_reg_24xx __iomem *reg, struct qla_hw_data *ha)
309 {
310 WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_PAUSE);
311
312 /* 100 usec delay is sufficient enough for hardware to pause RISC */
313 udelay(100);
314 if (RD_REG_DWORD(®->host_status) & HSRX_RISC_PAUSED)
315 set_bit(RISC_PAUSE_CMPL, &ha->fw_dump_cap_flags);
316 }
317
318 int
qla24xx_soft_reset(struct qla_hw_data * ha)319 qla24xx_soft_reset(struct qla_hw_data *ha)
320 {
321 int rval = QLA_SUCCESS;
322 uint32_t cnt;
323 uint16_t wd;
324 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
325
326 /*
327 * Reset RISC. The delay is dependent on system architecture.
328 * Driver can proceed with the reset sequence after waiting
329 * for a timeout period.
330 */
331 WRT_REG_DWORD(®->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
332 for (cnt = 0; cnt < 30000; cnt++) {
333 if ((RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
334 break;
335
336 udelay(10);
337 }
338 if (!(RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE))
339 set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
340
341 WRT_REG_DWORD(®->ctrl_status,
342 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
343 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
344
345 udelay(100);
346
347 /* Wait for soft-reset to complete. */
348 for (cnt = 0; cnt < 30000; cnt++) {
349 if ((RD_REG_DWORD(®->ctrl_status) &
350 CSRX_ISP_SOFT_RESET) == 0)
351 break;
352
353 udelay(10);
354 }
355 if (!(RD_REG_DWORD(®->ctrl_status) & CSRX_ISP_SOFT_RESET))
356 set_bit(ISP_RESET_CMPL, &ha->fw_dump_cap_flags);
357
358 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_RESET);
359 RD_REG_DWORD(®->hccr); /* PCI Posting. */
360
361 for (cnt = 10000; RD_REG_WORD(®->mailbox0) != 0 &&
362 rval == QLA_SUCCESS; cnt--) {
363 if (cnt)
364 udelay(10);
365 else
366 rval = QLA_FUNCTION_TIMEOUT;
367 }
368 if (rval == QLA_SUCCESS)
369 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
370
371 return rval;
372 }
373
374 static int
qla2xxx_dump_ram(struct qla_hw_data * ha,uint32_t addr,uint16_t * ram,uint32_t ram_words,void ** nxt)375 qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
376 uint32_t ram_words, void **nxt)
377 {
378 int rval;
379 uint32_t cnt, stat, timer, words, idx;
380 uint16_t mb0;
381 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
382 dma_addr_t dump_dma = ha->gid_list_dma;
383 uint16_t *dump = (uint16_t *)ha->gid_list;
384
385 rval = QLA_SUCCESS;
386 mb0 = 0;
387
388 WRT_MAILBOX_REG(ha, reg, 0, MBC_DUMP_RISC_RAM_EXTENDED);
389 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
390
391 words = qla2x00_gid_list_size(ha) / 2;
392 for (cnt = 0; cnt < ram_words && rval == QLA_SUCCESS;
393 cnt += words, addr += words) {
394 if (cnt + words > ram_words)
395 words = ram_words - cnt;
396
397 WRT_MAILBOX_REG(ha, reg, 1, LSW(addr));
398 WRT_MAILBOX_REG(ha, reg, 8, MSW(addr));
399
400 WRT_MAILBOX_REG(ha, reg, 2, MSW(dump_dma));
401 WRT_MAILBOX_REG(ha, reg, 3, LSW(dump_dma));
402 WRT_MAILBOX_REG(ha, reg, 6, MSW(MSD(dump_dma)));
403 WRT_MAILBOX_REG(ha, reg, 7, LSW(MSD(dump_dma)));
404
405 WRT_MAILBOX_REG(ha, reg, 4, words);
406 WRT_REG_WORD(®->hccr, HCCR_SET_HOST_INT);
407
408 for (timer = 6000000; timer; timer--) {
409 /* Check for pending interrupts. */
410 stat = RD_REG_DWORD(®->u.isp2300.host_status);
411 if (stat & HSR_RISC_INT) {
412 stat &= 0xff;
413
414 if (stat == 0x1 || stat == 0x2) {
415 set_bit(MBX_INTERRUPT,
416 &ha->mbx_cmd_flags);
417
418 mb0 = RD_MAILBOX_REG(ha, reg, 0);
419
420 /* Release mailbox registers. */
421 WRT_REG_WORD(®->semaphore, 0);
422 WRT_REG_WORD(®->hccr,
423 HCCR_CLR_RISC_INT);
424 RD_REG_WORD(®->hccr);
425 break;
426 } else if (stat == 0x10 || stat == 0x11) {
427 set_bit(MBX_INTERRUPT,
428 &ha->mbx_cmd_flags);
429
430 mb0 = RD_MAILBOX_REG(ha, reg, 0);
431
432 WRT_REG_WORD(®->hccr,
433 HCCR_CLR_RISC_INT);
434 RD_REG_WORD(®->hccr);
435 break;
436 }
437
438 /* clear this intr; it wasn't a mailbox intr */
439 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
440 RD_REG_WORD(®->hccr);
441 }
442 udelay(5);
443 }
444
445 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
446 rval = mb0 & MBS_MASK;
447 for (idx = 0; idx < words; idx++)
448 ram[cnt + idx] = swab16(dump[idx]);
449 } else {
450 rval = QLA_FUNCTION_FAILED;
451 }
452 }
453
454 *nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL;
455 return rval;
456 }
457
458 static inline void
qla2xxx_read_window(struct device_reg_2xxx __iomem * reg,uint32_t count,uint16_t * buf)459 qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count,
460 uint16_t *buf)
461 {
462 uint16_t __iomem *dmp_reg = ®->u.isp2300.fb_cmd;
463
464 while (count--)
465 *buf++ = htons(RD_REG_WORD(dmp_reg++));
466 }
467
468 static inline void *
qla24xx_copy_eft(struct qla_hw_data * ha,void * ptr)469 qla24xx_copy_eft(struct qla_hw_data *ha, void *ptr)
470 {
471 if (!ha->eft)
472 return ptr;
473
474 memcpy(ptr, ha->eft, ntohl(ha->fw_dump->eft_size));
475 return ptr + ntohl(ha->fw_dump->eft_size);
476 }
477
478 static inline void *
qla25xx_copy_fce(struct qla_hw_data * ha,void * ptr,uint32_t ** last_chain)479 qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
480 {
481 uint32_t cnt;
482 uint32_t *iter_reg;
483 struct qla2xxx_fce_chain *fcec = ptr;
484
485 if (!ha->fce)
486 return ptr;
487
488 *last_chain = &fcec->type;
489 fcec->type = htonl(DUMP_CHAIN_FCE);
490 fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) +
491 fce_calc_size(ha->fce_bufs));
492 fcec->size = htonl(fce_calc_size(ha->fce_bufs));
493 fcec->addr_l = htonl(LSD(ha->fce_dma));
494 fcec->addr_h = htonl(MSD(ha->fce_dma));
495
496 iter_reg = fcec->eregs;
497 for (cnt = 0; cnt < 8; cnt++)
498 *iter_reg++ = htonl(ha->fce_mb[cnt]);
499
500 memcpy(iter_reg, ha->fce, ntohl(fcec->size));
501
502 return (char *)iter_reg + ntohl(fcec->size);
503 }
504
505 static inline void *
qla2xxx_copy_atioqueues(struct qla_hw_data * ha,void * ptr,uint32_t ** last_chain)506 qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr,
507 uint32_t **last_chain)
508 {
509 struct qla2xxx_mqueue_chain *q;
510 struct qla2xxx_mqueue_header *qh;
511 uint32_t num_queues;
512 int que;
513 struct {
514 int length;
515 void *ring;
516 } aq, *aqp;
517
518 if (!ha->tgt.atio_ring)
519 return ptr;
520
521 num_queues = 1;
522 aqp = &aq;
523 aqp->length = ha->tgt.atio_q_length;
524 aqp->ring = ha->tgt.atio_ring;
525
526 for (que = 0; que < num_queues; que++) {
527 /* aqp = ha->atio_q_map[que]; */
528 q = ptr;
529 *last_chain = &q->type;
530 q->type = htonl(DUMP_CHAIN_QUEUE);
531 q->chain_size = htonl(
532 sizeof(struct qla2xxx_mqueue_chain) +
533 sizeof(struct qla2xxx_mqueue_header) +
534 (aqp->length * sizeof(request_t)));
535 ptr += sizeof(struct qla2xxx_mqueue_chain);
536
537 /* Add header. */
538 qh = ptr;
539 qh->queue = htonl(TYPE_ATIO_QUEUE);
540 qh->number = htonl(que);
541 qh->size = htonl(aqp->length * sizeof(request_t));
542 ptr += sizeof(struct qla2xxx_mqueue_header);
543
544 /* Add data. */
545 memcpy(ptr, aqp->ring, aqp->length * sizeof(request_t));
546
547 ptr += aqp->length * sizeof(request_t);
548 }
549
550 return ptr;
551 }
552
553 static inline void *
qla25xx_copy_mqueues(struct qla_hw_data * ha,void * ptr,uint32_t ** last_chain)554 qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
555 {
556 struct qla2xxx_mqueue_chain *q;
557 struct qla2xxx_mqueue_header *qh;
558 struct req_que *req;
559 struct rsp_que *rsp;
560 int que;
561
562 if (!ha->mqenable)
563 return ptr;
564
565 /* Request queues */
566 for (que = 1; que < ha->max_req_queues; que++) {
567 req = ha->req_q_map[que];
568 if (!req)
569 break;
570
571 /* Add chain. */
572 q = ptr;
573 *last_chain = &q->type;
574 q->type = htonl(DUMP_CHAIN_QUEUE);
575 q->chain_size = htonl(
576 sizeof(struct qla2xxx_mqueue_chain) +
577 sizeof(struct qla2xxx_mqueue_header) +
578 (req->length * sizeof(request_t)));
579 ptr += sizeof(struct qla2xxx_mqueue_chain);
580
581 /* Add header. */
582 qh = ptr;
583 qh->queue = htonl(TYPE_REQUEST_QUEUE);
584 qh->number = htonl(que);
585 qh->size = htonl(req->length * sizeof(request_t));
586 ptr += sizeof(struct qla2xxx_mqueue_header);
587
588 /* Add data. */
589 memcpy(ptr, req->ring, req->length * sizeof(request_t));
590 ptr += req->length * sizeof(request_t);
591 }
592
593 /* Response queues */
594 for (que = 1; que < ha->max_rsp_queues; que++) {
595 rsp = ha->rsp_q_map[que];
596 if (!rsp)
597 break;
598
599 /* Add chain. */
600 q = ptr;
601 *last_chain = &q->type;
602 q->type = htonl(DUMP_CHAIN_QUEUE);
603 q->chain_size = htonl(
604 sizeof(struct qla2xxx_mqueue_chain) +
605 sizeof(struct qla2xxx_mqueue_header) +
606 (rsp->length * sizeof(response_t)));
607 ptr += sizeof(struct qla2xxx_mqueue_chain);
608
609 /* Add header. */
610 qh = ptr;
611 qh->queue = htonl(TYPE_RESPONSE_QUEUE);
612 qh->number = htonl(que);
613 qh->size = htonl(rsp->length * sizeof(response_t));
614 ptr += sizeof(struct qla2xxx_mqueue_header);
615
616 /* Add data. */
617 memcpy(ptr, rsp->ring, rsp->length * sizeof(response_t));
618 ptr += rsp->length * sizeof(response_t);
619 }
620
621 return ptr;
622 }
623
624 static inline void *
qla25xx_copy_mq(struct qla_hw_data * ha,void * ptr,uint32_t ** last_chain)625 qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
626 {
627 uint32_t cnt, que_idx;
628 uint8_t que_cnt;
629 struct qla2xxx_mq_chain *mq = ptr;
630 device_reg_t *reg;
631
632 if (!ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
633 return ptr;
634
635 mq = ptr;
636 *last_chain = &mq->type;
637 mq->type = htonl(DUMP_CHAIN_MQ);
638 mq->chain_size = htonl(sizeof(struct qla2xxx_mq_chain));
639
640 que_cnt = ha->max_req_queues > ha->max_rsp_queues ?
641 ha->max_req_queues : ha->max_rsp_queues;
642 mq->count = htonl(que_cnt);
643 for (cnt = 0; cnt < que_cnt; cnt++) {
644 reg = ISP_QUE_REG(ha, cnt);
645 que_idx = cnt * 4;
646 mq->qregs[que_idx] =
647 htonl(RD_REG_DWORD(®->isp25mq.req_q_in));
648 mq->qregs[que_idx+1] =
649 htonl(RD_REG_DWORD(®->isp25mq.req_q_out));
650 mq->qregs[que_idx+2] =
651 htonl(RD_REG_DWORD(®->isp25mq.rsp_q_in));
652 mq->qregs[que_idx+3] =
653 htonl(RD_REG_DWORD(®->isp25mq.rsp_q_out));
654 }
655
656 return ptr + sizeof(struct qla2xxx_mq_chain);
657 }
658
659 void
qla2xxx_dump_post_process(scsi_qla_host_t * vha,int rval)660 qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval)
661 {
662 struct qla_hw_data *ha = vha->hw;
663
664 if (rval != QLA_SUCCESS) {
665 ql_log(ql_log_warn, vha, 0xd000,
666 "Failed to dump firmware (%x), dump status flags (0x%lx).\n",
667 rval, ha->fw_dump_cap_flags);
668 ha->fw_dumped = 0;
669 } else {
670 ql_log(ql_log_info, vha, 0xd001,
671 "Firmware dump saved to temp buffer (%ld/%p), dump status flags (0x%lx).\n",
672 vha->host_no, ha->fw_dump, ha->fw_dump_cap_flags);
673 ha->fw_dumped = 1;
674 qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
675 }
676 }
677
678 /**
679 * qla2300_fw_dump() - Dumps binary data from the 2300 firmware.
680 * @ha: HA context
681 * @hardware_locked: Called with the hardware_lock
682 */
683 void
qla2300_fw_dump(scsi_qla_host_t * vha,int hardware_locked)684 qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
685 {
686 int rval;
687 uint32_t cnt;
688 struct qla_hw_data *ha = vha->hw;
689 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
690 uint16_t __iomem *dmp_reg;
691 unsigned long flags;
692 struct qla2300_fw_dump *fw;
693 void *nxt;
694 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
695
696 flags = 0;
697
698 #ifndef __CHECKER__
699 if (!hardware_locked)
700 spin_lock_irqsave(&ha->hardware_lock, flags);
701 #endif
702
703 if (!ha->fw_dump) {
704 ql_log(ql_log_warn, vha, 0xd002,
705 "No buffer available for dump.\n");
706 goto qla2300_fw_dump_failed;
707 }
708
709 if (ha->fw_dumped) {
710 ql_log(ql_log_warn, vha, 0xd003,
711 "Firmware has been previously dumped (%p) "
712 "-- ignoring request.\n",
713 ha->fw_dump);
714 goto qla2300_fw_dump_failed;
715 }
716 fw = &ha->fw_dump->isp.isp23;
717 qla2xxx_prep_dump(ha, ha->fw_dump);
718
719 rval = QLA_SUCCESS;
720 fw->hccr = htons(RD_REG_WORD(®->hccr));
721
722 /* Pause RISC. */
723 WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC);
724 if (IS_QLA2300(ha)) {
725 for (cnt = 30000;
726 (RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) == 0 &&
727 rval == QLA_SUCCESS; cnt--) {
728 if (cnt)
729 udelay(100);
730 else
731 rval = QLA_FUNCTION_TIMEOUT;
732 }
733 } else {
734 RD_REG_WORD(®->hccr); /* PCI Posting. */
735 udelay(10);
736 }
737
738 if (rval == QLA_SUCCESS) {
739 dmp_reg = ®->flash_address;
740 for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++)
741 fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
742
743 dmp_reg = ®->u.isp2300.req_q_in;
744 for (cnt = 0; cnt < sizeof(fw->risc_host_reg) / 2; cnt++)
745 fw->risc_host_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
746
747 dmp_reg = ®->u.isp2300.mailbox0;
748 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
749 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
750
751 WRT_REG_WORD(®->ctrl_status, 0x40);
752 qla2xxx_read_window(reg, 32, fw->resp_dma_reg);
753
754 WRT_REG_WORD(®->ctrl_status, 0x50);
755 qla2xxx_read_window(reg, 48, fw->dma_reg);
756
757 WRT_REG_WORD(®->ctrl_status, 0x00);
758 dmp_reg = ®->risc_hw;
759 for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++)
760 fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
761
762 WRT_REG_WORD(®->pcr, 0x2000);
763 qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
764
765 WRT_REG_WORD(®->pcr, 0x2200);
766 qla2xxx_read_window(reg, 16, fw->risc_gp1_reg);
767
768 WRT_REG_WORD(®->pcr, 0x2400);
769 qla2xxx_read_window(reg, 16, fw->risc_gp2_reg);
770
771 WRT_REG_WORD(®->pcr, 0x2600);
772 qla2xxx_read_window(reg, 16, fw->risc_gp3_reg);
773
774 WRT_REG_WORD(®->pcr, 0x2800);
775 qla2xxx_read_window(reg, 16, fw->risc_gp4_reg);
776
777 WRT_REG_WORD(®->pcr, 0x2A00);
778 qla2xxx_read_window(reg, 16, fw->risc_gp5_reg);
779
780 WRT_REG_WORD(®->pcr, 0x2C00);
781 qla2xxx_read_window(reg, 16, fw->risc_gp6_reg);
782
783 WRT_REG_WORD(®->pcr, 0x2E00);
784 qla2xxx_read_window(reg, 16, fw->risc_gp7_reg);
785
786 WRT_REG_WORD(®->ctrl_status, 0x10);
787 qla2xxx_read_window(reg, 64, fw->frame_buf_hdw_reg);
788
789 WRT_REG_WORD(®->ctrl_status, 0x20);
790 qla2xxx_read_window(reg, 64, fw->fpm_b0_reg);
791
792 WRT_REG_WORD(®->ctrl_status, 0x30);
793 qla2xxx_read_window(reg, 64, fw->fpm_b1_reg);
794
795 /* Reset RISC. */
796 WRT_REG_WORD(®->ctrl_status, CSR_ISP_SOFT_RESET);
797 for (cnt = 0; cnt < 30000; cnt++) {
798 if ((RD_REG_WORD(®->ctrl_status) &
799 CSR_ISP_SOFT_RESET) == 0)
800 break;
801
802 udelay(10);
803 }
804 }
805
806 if (!IS_QLA2300(ha)) {
807 for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 &&
808 rval == QLA_SUCCESS; cnt--) {
809 if (cnt)
810 udelay(100);
811 else
812 rval = QLA_FUNCTION_TIMEOUT;
813 }
814 }
815
816 /* Get RISC SRAM. */
817 if (rval == QLA_SUCCESS)
818 rval = qla2xxx_dump_ram(ha, 0x800, fw->risc_ram,
819 sizeof(fw->risc_ram) / 2, &nxt);
820
821 /* Get stack SRAM. */
822 if (rval == QLA_SUCCESS)
823 rval = qla2xxx_dump_ram(ha, 0x10000, fw->stack_ram,
824 sizeof(fw->stack_ram) / 2, &nxt);
825
826 /* Get data SRAM. */
827 if (rval == QLA_SUCCESS)
828 rval = qla2xxx_dump_ram(ha, 0x11000, fw->data_ram,
829 ha->fw_memory_size - 0x11000 + 1, &nxt);
830
831 if (rval == QLA_SUCCESS)
832 qla2xxx_copy_queues(ha, nxt);
833
834 qla2xxx_dump_post_process(base_vha, rval);
835
836 qla2300_fw_dump_failed:
837 #ifndef __CHECKER__
838 if (!hardware_locked)
839 spin_unlock_irqrestore(&ha->hardware_lock, flags);
840 #else
841 ;
842 #endif
843 }
844
845 /**
846 * qla2100_fw_dump() - Dumps binary data from the 2100/2200 firmware.
847 * @ha: HA context
848 * @hardware_locked: Called with the hardware_lock
849 */
850 void
qla2100_fw_dump(scsi_qla_host_t * vha,int hardware_locked)851 qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
852 {
853 int rval;
854 uint32_t cnt, timer;
855 uint16_t risc_address;
856 uint16_t mb0, mb2;
857 struct qla_hw_data *ha = vha->hw;
858 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
859 uint16_t __iomem *dmp_reg;
860 unsigned long flags;
861 struct qla2100_fw_dump *fw;
862 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
863
864 risc_address = 0;
865 mb0 = mb2 = 0;
866 flags = 0;
867
868 #ifndef __CHECKER__
869 if (!hardware_locked)
870 spin_lock_irqsave(&ha->hardware_lock, flags);
871 #endif
872
873 if (!ha->fw_dump) {
874 ql_log(ql_log_warn, vha, 0xd004,
875 "No buffer available for dump.\n");
876 goto qla2100_fw_dump_failed;
877 }
878
879 if (ha->fw_dumped) {
880 ql_log(ql_log_warn, vha, 0xd005,
881 "Firmware has been previously dumped (%p) "
882 "-- ignoring request.\n",
883 ha->fw_dump);
884 goto qla2100_fw_dump_failed;
885 }
886 fw = &ha->fw_dump->isp.isp21;
887 qla2xxx_prep_dump(ha, ha->fw_dump);
888
889 rval = QLA_SUCCESS;
890 fw->hccr = htons(RD_REG_WORD(®->hccr));
891
892 /* Pause RISC. */
893 WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC);
894 for (cnt = 30000; (RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) == 0 &&
895 rval == QLA_SUCCESS; cnt--) {
896 if (cnt)
897 udelay(100);
898 else
899 rval = QLA_FUNCTION_TIMEOUT;
900 }
901 if (rval == QLA_SUCCESS) {
902 dmp_reg = ®->flash_address;
903 for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++)
904 fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
905
906 dmp_reg = ®->u.isp2100.mailbox0;
907 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
908 if (cnt == 8)
909 dmp_reg = ®->u_end.isp2200.mailbox8;
910
911 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
912 }
913
914 dmp_reg = ®->u.isp2100.unused_2[0];
915 for (cnt = 0; cnt < sizeof(fw->dma_reg) / 2; cnt++)
916 fw->dma_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
917
918 WRT_REG_WORD(®->ctrl_status, 0x00);
919 dmp_reg = ®->risc_hw;
920 for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++)
921 fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
922
923 WRT_REG_WORD(®->pcr, 0x2000);
924 qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
925
926 WRT_REG_WORD(®->pcr, 0x2100);
927 qla2xxx_read_window(reg, 16, fw->risc_gp1_reg);
928
929 WRT_REG_WORD(®->pcr, 0x2200);
930 qla2xxx_read_window(reg, 16, fw->risc_gp2_reg);
931
932 WRT_REG_WORD(®->pcr, 0x2300);
933 qla2xxx_read_window(reg, 16, fw->risc_gp3_reg);
934
935 WRT_REG_WORD(®->pcr, 0x2400);
936 qla2xxx_read_window(reg, 16, fw->risc_gp4_reg);
937
938 WRT_REG_WORD(®->pcr, 0x2500);
939 qla2xxx_read_window(reg, 16, fw->risc_gp5_reg);
940
941 WRT_REG_WORD(®->pcr, 0x2600);
942 qla2xxx_read_window(reg, 16, fw->risc_gp6_reg);
943
944 WRT_REG_WORD(®->pcr, 0x2700);
945 qla2xxx_read_window(reg, 16, fw->risc_gp7_reg);
946
947 WRT_REG_WORD(®->ctrl_status, 0x10);
948 qla2xxx_read_window(reg, 16, fw->frame_buf_hdw_reg);
949
950 WRT_REG_WORD(®->ctrl_status, 0x20);
951 qla2xxx_read_window(reg, 64, fw->fpm_b0_reg);
952
953 WRT_REG_WORD(®->ctrl_status, 0x30);
954 qla2xxx_read_window(reg, 64, fw->fpm_b1_reg);
955
956 /* Reset the ISP. */
957 WRT_REG_WORD(®->ctrl_status, CSR_ISP_SOFT_RESET);
958 }
959
960 for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 &&
961 rval == QLA_SUCCESS; cnt--) {
962 if (cnt)
963 udelay(100);
964 else
965 rval = QLA_FUNCTION_TIMEOUT;
966 }
967
968 /* Pause RISC. */
969 if (rval == QLA_SUCCESS && (IS_QLA2200(ha) || (IS_QLA2100(ha) &&
970 (RD_REG_WORD(®->mctr) & (BIT_1 | BIT_0)) != 0))) {
971
972 WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC);
973 for (cnt = 30000;
974 (RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) == 0 &&
975 rval == QLA_SUCCESS; cnt--) {
976 if (cnt)
977 udelay(100);
978 else
979 rval = QLA_FUNCTION_TIMEOUT;
980 }
981 if (rval == QLA_SUCCESS) {
982 /* Set memory configuration and timing. */
983 if (IS_QLA2100(ha))
984 WRT_REG_WORD(®->mctr, 0xf1);
985 else
986 WRT_REG_WORD(®->mctr, 0xf2);
987 RD_REG_WORD(®->mctr); /* PCI Posting. */
988
989 /* Release RISC. */
990 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC);
991 }
992 }
993
994 if (rval == QLA_SUCCESS) {
995 /* Get RISC SRAM. */
996 risc_address = 0x1000;
997 WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_WORD);
998 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
999 }
1000 for (cnt = 0; cnt < sizeof(fw->risc_ram) / 2 && rval == QLA_SUCCESS;
1001 cnt++, risc_address++) {
1002 WRT_MAILBOX_REG(ha, reg, 1, risc_address);
1003 WRT_REG_WORD(®->hccr, HCCR_SET_HOST_INT);
1004
1005 for (timer = 6000000; timer != 0; timer--) {
1006 /* Check for pending interrupts. */
1007 if (RD_REG_WORD(®->istatus) & ISR_RISC_INT) {
1008 if (RD_REG_WORD(®->semaphore) & BIT_0) {
1009 set_bit(MBX_INTERRUPT,
1010 &ha->mbx_cmd_flags);
1011
1012 mb0 = RD_MAILBOX_REG(ha, reg, 0);
1013 mb2 = RD_MAILBOX_REG(ha, reg, 2);
1014
1015 WRT_REG_WORD(®->semaphore, 0);
1016 WRT_REG_WORD(®->hccr,
1017 HCCR_CLR_RISC_INT);
1018 RD_REG_WORD(®->hccr);
1019 break;
1020 }
1021 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
1022 RD_REG_WORD(®->hccr);
1023 }
1024 udelay(5);
1025 }
1026
1027 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
1028 rval = mb0 & MBS_MASK;
1029 fw->risc_ram[cnt] = htons(mb2);
1030 } else {
1031 rval = QLA_FUNCTION_FAILED;
1032 }
1033 }
1034
1035 if (rval == QLA_SUCCESS)
1036 qla2xxx_copy_queues(ha, &fw->risc_ram[cnt]);
1037
1038 qla2xxx_dump_post_process(base_vha, rval);
1039
1040 qla2100_fw_dump_failed:
1041 #ifndef __CHECKER__
1042 if (!hardware_locked)
1043 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1044 #else
1045 ;
1046 #endif
1047 }
1048
1049 void
qla24xx_fw_dump(scsi_qla_host_t * vha,int hardware_locked)1050 qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1051 {
1052 int rval;
1053 uint32_t cnt;
1054 struct qla_hw_data *ha = vha->hw;
1055 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1056 uint32_t __iomem *dmp_reg;
1057 uint32_t *iter_reg;
1058 uint16_t __iomem *mbx_reg;
1059 unsigned long flags;
1060 struct qla24xx_fw_dump *fw;
1061 void *nxt;
1062 void *nxt_chain;
1063 uint32_t *last_chain = NULL;
1064 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1065
1066 if (IS_P3P_TYPE(ha))
1067 return;
1068
1069 flags = 0;
1070 ha->fw_dump_cap_flags = 0;
1071
1072 #ifndef __CHECKER__
1073 if (!hardware_locked)
1074 spin_lock_irqsave(&ha->hardware_lock, flags);
1075 #endif
1076
1077 if (!ha->fw_dump) {
1078 ql_log(ql_log_warn, vha, 0xd006,
1079 "No buffer available for dump.\n");
1080 goto qla24xx_fw_dump_failed;
1081 }
1082
1083 if (ha->fw_dumped) {
1084 ql_log(ql_log_warn, vha, 0xd007,
1085 "Firmware has been previously dumped (%p) "
1086 "-- ignoring request.\n",
1087 ha->fw_dump);
1088 goto qla24xx_fw_dump_failed;
1089 }
1090 fw = &ha->fw_dump->isp.isp24;
1091 qla2xxx_prep_dump(ha, ha->fw_dump);
1092
1093 fw->host_status = htonl(RD_REG_DWORD(®->host_status));
1094
1095 /*
1096 * Pause RISC. No need to track timeout, as resetting the chip
1097 * is the right approach incase of pause timeout
1098 */
1099 qla24xx_pause_risc(reg, ha);
1100
1101 /* Host interface registers. */
1102 dmp_reg = ®->flash_addr;
1103 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
1104 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
1105
1106 /* Disable interrupts. */
1107 WRT_REG_DWORD(®->ictrl, 0);
1108 RD_REG_DWORD(®->ictrl);
1109
1110 /* Shadow registers. */
1111 WRT_REG_DWORD(®->iobase_addr, 0x0F70);
1112 RD_REG_DWORD(®->iobase_addr);
1113 WRT_REG_DWORD(®->iobase_select, 0xB0000000);
1114 fw->shadow_reg[0] = htonl(RD_REG_DWORD(®->iobase_sdata));
1115
1116 WRT_REG_DWORD(®->iobase_select, 0xB0100000);
1117 fw->shadow_reg[1] = htonl(RD_REG_DWORD(®->iobase_sdata));
1118
1119 WRT_REG_DWORD(®->iobase_select, 0xB0200000);
1120 fw->shadow_reg[2] = htonl(RD_REG_DWORD(®->iobase_sdata));
1121
1122 WRT_REG_DWORD(®->iobase_select, 0xB0300000);
1123 fw->shadow_reg[3] = htonl(RD_REG_DWORD(®->iobase_sdata));
1124
1125 WRT_REG_DWORD(®->iobase_select, 0xB0400000);
1126 fw->shadow_reg[4] = htonl(RD_REG_DWORD(®->iobase_sdata));
1127
1128 WRT_REG_DWORD(®->iobase_select, 0xB0500000);
1129 fw->shadow_reg[5] = htonl(RD_REG_DWORD(®->iobase_sdata));
1130
1131 WRT_REG_DWORD(®->iobase_select, 0xB0600000);
1132 fw->shadow_reg[6] = htonl(RD_REG_DWORD(®->iobase_sdata));
1133
1134 /* Mailbox registers. */
1135 mbx_reg = ®->mailbox0;
1136 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
1137 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
1138
1139 /* Transfer sequence registers. */
1140 iter_reg = fw->xseq_gp_reg;
1141 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
1142 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
1143 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
1144 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
1145 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
1146 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
1147 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
1148 qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
1149
1150 qla24xx_read_window(reg, 0xBFE0, 16, fw->xseq_0_reg);
1151 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
1152
1153 /* Receive sequence registers. */
1154 iter_reg = fw->rseq_gp_reg;
1155 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
1156 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
1157 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
1158 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
1159 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
1160 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
1161 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
1162 qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
1163
1164 qla24xx_read_window(reg, 0xFFD0, 16, fw->rseq_0_reg);
1165 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
1166 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
1167
1168 /* Command DMA registers. */
1169 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
1170
1171 /* Queues. */
1172 iter_reg = fw->req0_dma_reg;
1173 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
1174 dmp_reg = ®->iobase_q;
1175 for (cnt = 0; cnt < 7; cnt++)
1176 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1177
1178 iter_reg = fw->resp0_dma_reg;
1179 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
1180 dmp_reg = ®->iobase_q;
1181 for (cnt = 0; cnt < 7; cnt++)
1182 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1183
1184 iter_reg = fw->req1_dma_reg;
1185 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
1186 dmp_reg = ®->iobase_q;
1187 for (cnt = 0; cnt < 7; cnt++)
1188 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1189
1190 /* Transmit DMA registers. */
1191 iter_reg = fw->xmt0_dma_reg;
1192 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
1193 qla24xx_read_window(reg, 0x7610, 16, iter_reg);
1194
1195 iter_reg = fw->xmt1_dma_reg;
1196 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
1197 qla24xx_read_window(reg, 0x7630, 16, iter_reg);
1198
1199 iter_reg = fw->xmt2_dma_reg;
1200 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
1201 qla24xx_read_window(reg, 0x7650, 16, iter_reg);
1202
1203 iter_reg = fw->xmt3_dma_reg;
1204 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
1205 qla24xx_read_window(reg, 0x7670, 16, iter_reg);
1206
1207 iter_reg = fw->xmt4_dma_reg;
1208 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
1209 qla24xx_read_window(reg, 0x7690, 16, iter_reg);
1210
1211 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
1212
1213 /* Receive DMA registers. */
1214 iter_reg = fw->rcvt0_data_dma_reg;
1215 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
1216 qla24xx_read_window(reg, 0x7710, 16, iter_reg);
1217
1218 iter_reg = fw->rcvt1_data_dma_reg;
1219 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
1220 qla24xx_read_window(reg, 0x7730, 16, iter_reg);
1221
1222 /* RISC registers. */
1223 iter_reg = fw->risc_gp_reg;
1224 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
1225 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
1226 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
1227 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
1228 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
1229 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
1230 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
1231 qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
1232
1233 /* Local memory controller registers. */
1234 iter_reg = fw->lmc_reg;
1235 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
1236 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
1237 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
1238 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
1239 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
1240 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
1241 qla24xx_read_window(reg, 0x3060, 16, iter_reg);
1242
1243 /* Fibre Protocol Module registers. */
1244 iter_reg = fw->fpm_hdw_reg;
1245 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
1246 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
1247 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
1248 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
1249 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
1250 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
1251 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
1252 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
1253 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
1254 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
1255 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
1256 qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
1257
1258 /* Frame Buffer registers. */
1259 iter_reg = fw->fb_hdw_reg;
1260 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
1261 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
1262 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
1263 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
1264 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
1265 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
1266 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
1267 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
1268 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
1269 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
1270 qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1271
1272 rval = qla24xx_soft_reset(ha);
1273 if (rval != QLA_SUCCESS)
1274 goto qla24xx_fw_dump_failed_0;
1275
1276 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1277 &nxt);
1278 if (rval != QLA_SUCCESS)
1279 goto qla24xx_fw_dump_failed_0;
1280
1281 nxt = qla2xxx_copy_queues(ha, nxt);
1282
1283 qla24xx_copy_eft(ha, nxt);
1284
1285 nxt_chain = (void *)ha->fw_dump + ha->chain_offset;
1286 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1287 if (last_chain) {
1288 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
1289 *last_chain |= htonl(DUMP_CHAIN_LAST);
1290 }
1291
1292 /* Adjust valid length. */
1293 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
1294
1295 qla24xx_fw_dump_failed_0:
1296 qla2xxx_dump_post_process(base_vha, rval);
1297
1298 qla24xx_fw_dump_failed:
1299 #ifndef __CHECKER__
1300 if (!hardware_locked)
1301 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1302 #else
1303 ;
1304 #endif
1305 }
1306
1307 void
qla25xx_fw_dump(scsi_qla_host_t * vha,int hardware_locked)1308 qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1309 {
1310 int rval;
1311 uint32_t cnt;
1312 struct qla_hw_data *ha = vha->hw;
1313 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1314 uint32_t __iomem *dmp_reg;
1315 uint32_t *iter_reg;
1316 uint16_t __iomem *mbx_reg;
1317 unsigned long flags;
1318 struct qla25xx_fw_dump *fw;
1319 void *nxt, *nxt_chain;
1320 uint32_t *last_chain = NULL;
1321 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1322
1323 flags = 0;
1324 ha->fw_dump_cap_flags = 0;
1325
1326 #ifndef __CHECKER__
1327 if (!hardware_locked)
1328 spin_lock_irqsave(&ha->hardware_lock, flags);
1329 #endif
1330
1331 if (!ha->fw_dump) {
1332 ql_log(ql_log_warn, vha, 0xd008,
1333 "No buffer available for dump.\n");
1334 goto qla25xx_fw_dump_failed;
1335 }
1336
1337 if (ha->fw_dumped) {
1338 ql_log(ql_log_warn, vha, 0xd009,
1339 "Firmware has been previously dumped (%p) "
1340 "-- ignoring request.\n",
1341 ha->fw_dump);
1342 goto qla25xx_fw_dump_failed;
1343 }
1344 fw = &ha->fw_dump->isp.isp25;
1345 qla2xxx_prep_dump(ha, ha->fw_dump);
1346 ha->fw_dump->version = htonl(2);
1347
1348 fw->host_status = htonl(RD_REG_DWORD(®->host_status));
1349
1350 /*
1351 * Pause RISC. No need to track timeout, as resetting the chip
1352 * is the right approach incase of pause timeout
1353 */
1354 qla24xx_pause_risc(reg, ha);
1355
1356 /* Host/Risc registers. */
1357 iter_reg = fw->host_risc_reg;
1358 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
1359 qla24xx_read_window(reg, 0x7010, 16, iter_reg);
1360
1361 /* PCIe registers. */
1362 WRT_REG_DWORD(®->iobase_addr, 0x7C00);
1363 RD_REG_DWORD(®->iobase_addr);
1364 WRT_REG_DWORD(®->iobase_window, 0x01);
1365 dmp_reg = ®->iobase_c4;
1366 fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++));
1367 fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++));
1368 fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
1369 fw->pcie_regs[3] = htonl(RD_REG_DWORD(®->iobase_window));
1370
1371 WRT_REG_DWORD(®->iobase_window, 0x00);
1372 RD_REG_DWORD(®->iobase_window);
1373
1374 /* Host interface registers. */
1375 dmp_reg = ®->flash_addr;
1376 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
1377 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
1378
1379 /* Disable interrupts. */
1380 WRT_REG_DWORD(®->ictrl, 0);
1381 RD_REG_DWORD(®->ictrl);
1382
1383 /* Shadow registers. */
1384 WRT_REG_DWORD(®->iobase_addr, 0x0F70);
1385 RD_REG_DWORD(®->iobase_addr);
1386 WRT_REG_DWORD(®->iobase_select, 0xB0000000);
1387 fw->shadow_reg[0] = htonl(RD_REG_DWORD(®->iobase_sdata));
1388
1389 WRT_REG_DWORD(®->iobase_select, 0xB0100000);
1390 fw->shadow_reg[1] = htonl(RD_REG_DWORD(®->iobase_sdata));
1391
1392 WRT_REG_DWORD(®->iobase_select, 0xB0200000);
1393 fw->shadow_reg[2] = htonl(RD_REG_DWORD(®->iobase_sdata));
1394
1395 WRT_REG_DWORD(®->iobase_select, 0xB0300000);
1396 fw->shadow_reg[3] = htonl(RD_REG_DWORD(®->iobase_sdata));
1397
1398 WRT_REG_DWORD(®->iobase_select, 0xB0400000);
1399 fw->shadow_reg[4] = htonl(RD_REG_DWORD(®->iobase_sdata));
1400
1401 WRT_REG_DWORD(®->iobase_select, 0xB0500000);
1402 fw->shadow_reg[5] = htonl(RD_REG_DWORD(®->iobase_sdata));
1403
1404 WRT_REG_DWORD(®->iobase_select, 0xB0600000);
1405 fw->shadow_reg[6] = htonl(RD_REG_DWORD(®->iobase_sdata));
1406
1407 WRT_REG_DWORD(®->iobase_select, 0xB0700000);
1408 fw->shadow_reg[7] = htonl(RD_REG_DWORD(®->iobase_sdata));
1409
1410 WRT_REG_DWORD(®->iobase_select, 0xB0800000);
1411 fw->shadow_reg[8] = htonl(RD_REG_DWORD(®->iobase_sdata));
1412
1413 WRT_REG_DWORD(®->iobase_select, 0xB0900000);
1414 fw->shadow_reg[9] = htonl(RD_REG_DWORD(®->iobase_sdata));
1415
1416 WRT_REG_DWORD(®->iobase_select, 0xB0A00000);
1417 fw->shadow_reg[10] = htonl(RD_REG_DWORD(®->iobase_sdata));
1418
1419 /* RISC I/O register. */
1420 WRT_REG_DWORD(®->iobase_addr, 0x0010);
1421 fw->risc_io_reg = htonl(RD_REG_DWORD(®->iobase_window));
1422
1423 /* Mailbox registers. */
1424 mbx_reg = ®->mailbox0;
1425 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
1426 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
1427
1428 /* Transfer sequence registers. */
1429 iter_reg = fw->xseq_gp_reg;
1430 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
1431 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
1432 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
1433 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
1434 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
1435 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
1436 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
1437 qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
1438
1439 iter_reg = fw->xseq_0_reg;
1440 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
1441 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
1442 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
1443
1444 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
1445
1446 /* Receive sequence registers. */
1447 iter_reg = fw->rseq_gp_reg;
1448 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
1449 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
1450 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
1451 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
1452 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
1453 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
1454 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
1455 qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
1456
1457 iter_reg = fw->rseq_0_reg;
1458 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
1459 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
1460
1461 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
1462 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
1463
1464 /* Auxiliary sequence registers. */
1465 iter_reg = fw->aseq_gp_reg;
1466 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
1467 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
1468 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
1469 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
1470 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
1471 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
1472 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
1473 qla24xx_read_window(reg, 0xB070, 16, iter_reg);
1474
1475 iter_reg = fw->aseq_0_reg;
1476 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
1477 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
1478
1479 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
1480 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
1481
1482 /* Command DMA registers. */
1483 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
1484
1485 /* Queues. */
1486 iter_reg = fw->req0_dma_reg;
1487 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
1488 dmp_reg = ®->iobase_q;
1489 for (cnt = 0; cnt < 7; cnt++)
1490 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1491
1492 iter_reg = fw->resp0_dma_reg;
1493 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
1494 dmp_reg = ®->iobase_q;
1495 for (cnt = 0; cnt < 7; cnt++)
1496 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1497
1498 iter_reg = fw->req1_dma_reg;
1499 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
1500 dmp_reg = ®->iobase_q;
1501 for (cnt = 0; cnt < 7; cnt++)
1502 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1503
1504 /* Transmit DMA registers. */
1505 iter_reg = fw->xmt0_dma_reg;
1506 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
1507 qla24xx_read_window(reg, 0x7610, 16, iter_reg);
1508
1509 iter_reg = fw->xmt1_dma_reg;
1510 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
1511 qla24xx_read_window(reg, 0x7630, 16, iter_reg);
1512
1513 iter_reg = fw->xmt2_dma_reg;
1514 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
1515 qla24xx_read_window(reg, 0x7650, 16, iter_reg);
1516
1517 iter_reg = fw->xmt3_dma_reg;
1518 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
1519 qla24xx_read_window(reg, 0x7670, 16, iter_reg);
1520
1521 iter_reg = fw->xmt4_dma_reg;
1522 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
1523 qla24xx_read_window(reg, 0x7690, 16, iter_reg);
1524
1525 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
1526
1527 /* Receive DMA registers. */
1528 iter_reg = fw->rcvt0_data_dma_reg;
1529 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
1530 qla24xx_read_window(reg, 0x7710, 16, iter_reg);
1531
1532 iter_reg = fw->rcvt1_data_dma_reg;
1533 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
1534 qla24xx_read_window(reg, 0x7730, 16, iter_reg);
1535
1536 /* RISC registers. */
1537 iter_reg = fw->risc_gp_reg;
1538 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
1539 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
1540 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
1541 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
1542 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
1543 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
1544 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
1545 qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
1546
1547 /* Local memory controller registers. */
1548 iter_reg = fw->lmc_reg;
1549 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
1550 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
1551 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
1552 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
1553 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
1554 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
1555 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
1556 qla24xx_read_window(reg, 0x3070, 16, iter_reg);
1557
1558 /* Fibre Protocol Module registers. */
1559 iter_reg = fw->fpm_hdw_reg;
1560 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
1561 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
1562 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
1563 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
1564 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
1565 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
1566 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
1567 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
1568 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
1569 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
1570 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
1571 qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
1572
1573 /* Frame Buffer registers. */
1574 iter_reg = fw->fb_hdw_reg;
1575 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
1576 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
1577 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
1578 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
1579 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
1580 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
1581 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
1582 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
1583 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
1584 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
1585 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1586 qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
1587
1588 /* Multi queue registers */
1589 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
1590 &last_chain);
1591
1592 rval = qla24xx_soft_reset(ha);
1593 if (rval != QLA_SUCCESS)
1594 goto qla25xx_fw_dump_failed_0;
1595
1596 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1597 &nxt);
1598 if (rval != QLA_SUCCESS)
1599 goto qla25xx_fw_dump_failed_0;
1600
1601 nxt = qla2xxx_copy_queues(ha, nxt);
1602
1603 qla24xx_copy_eft(ha, nxt);
1604
1605 /* Chain entries -- started with MQ. */
1606 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
1607 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
1608 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1609 if (last_chain) {
1610 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
1611 *last_chain |= htonl(DUMP_CHAIN_LAST);
1612 }
1613
1614 /* Adjust valid length. */
1615 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
1616
1617 qla25xx_fw_dump_failed_0:
1618 qla2xxx_dump_post_process(base_vha, rval);
1619
1620 qla25xx_fw_dump_failed:
1621 #ifndef __CHECKER__
1622 if (!hardware_locked)
1623 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1624 #else
1625 ;
1626 #endif
1627 }
1628
1629 void
qla81xx_fw_dump(scsi_qla_host_t * vha,int hardware_locked)1630 qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1631 {
1632 int rval;
1633 uint32_t cnt;
1634 struct qla_hw_data *ha = vha->hw;
1635 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1636 uint32_t __iomem *dmp_reg;
1637 uint32_t *iter_reg;
1638 uint16_t __iomem *mbx_reg;
1639 unsigned long flags;
1640 struct qla81xx_fw_dump *fw;
1641 void *nxt, *nxt_chain;
1642 uint32_t *last_chain = NULL;
1643 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1644
1645 flags = 0;
1646 ha->fw_dump_cap_flags = 0;
1647
1648 #ifndef __CHECKER__
1649 if (!hardware_locked)
1650 spin_lock_irqsave(&ha->hardware_lock, flags);
1651 #endif
1652
1653 if (!ha->fw_dump) {
1654 ql_log(ql_log_warn, vha, 0xd00a,
1655 "No buffer available for dump.\n");
1656 goto qla81xx_fw_dump_failed;
1657 }
1658
1659 if (ha->fw_dumped) {
1660 ql_log(ql_log_warn, vha, 0xd00b,
1661 "Firmware has been previously dumped (%p) "
1662 "-- ignoring request.\n",
1663 ha->fw_dump);
1664 goto qla81xx_fw_dump_failed;
1665 }
1666 fw = &ha->fw_dump->isp.isp81;
1667 qla2xxx_prep_dump(ha, ha->fw_dump);
1668
1669 fw->host_status = htonl(RD_REG_DWORD(®->host_status));
1670
1671 /*
1672 * Pause RISC. No need to track timeout, as resetting the chip
1673 * is the right approach incase of pause timeout
1674 */
1675 qla24xx_pause_risc(reg, ha);
1676
1677 /* Host/Risc registers. */
1678 iter_reg = fw->host_risc_reg;
1679 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
1680 qla24xx_read_window(reg, 0x7010, 16, iter_reg);
1681
1682 /* PCIe registers. */
1683 WRT_REG_DWORD(®->iobase_addr, 0x7C00);
1684 RD_REG_DWORD(®->iobase_addr);
1685 WRT_REG_DWORD(®->iobase_window, 0x01);
1686 dmp_reg = ®->iobase_c4;
1687 fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++));
1688 fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++));
1689 fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
1690 fw->pcie_regs[3] = htonl(RD_REG_DWORD(®->iobase_window));
1691
1692 WRT_REG_DWORD(®->iobase_window, 0x00);
1693 RD_REG_DWORD(®->iobase_window);
1694
1695 /* Host interface registers. */
1696 dmp_reg = ®->flash_addr;
1697 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
1698 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
1699
1700 /* Disable interrupts. */
1701 WRT_REG_DWORD(®->ictrl, 0);
1702 RD_REG_DWORD(®->ictrl);
1703
1704 /* Shadow registers. */
1705 WRT_REG_DWORD(®->iobase_addr, 0x0F70);
1706 RD_REG_DWORD(®->iobase_addr);
1707 WRT_REG_DWORD(®->iobase_select, 0xB0000000);
1708 fw->shadow_reg[0] = htonl(RD_REG_DWORD(®->iobase_sdata));
1709
1710 WRT_REG_DWORD(®->iobase_select, 0xB0100000);
1711 fw->shadow_reg[1] = htonl(RD_REG_DWORD(®->iobase_sdata));
1712
1713 WRT_REG_DWORD(®->iobase_select, 0xB0200000);
1714 fw->shadow_reg[2] = htonl(RD_REG_DWORD(®->iobase_sdata));
1715
1716 WRT_REG_DWORD(®->iobase_select, 0xB0300000);
1717 fw->shadow_reg[3] = htonl(RD_REG_DWORD(®->iobase_sdata));
1718
1719 WRT_REG_DWORD(®->iobase_select, 0xB0400000);
1720 fw->shadow_reg[4] = htonl(RD_REG_DWORD(®->iobase_sdata));
1721
1722 WRT_REG_DWORD(®->iobase_select, 0xB0500000);
1723 fw->shadow_reg[5] = htonl(RD_REG_DWORD(®->iobase_sdata));
1724
1725 WRT_REG_DWORD(®->iobase_select, 0xB0600000);
1726 fw->shadow_reg[6] = htonl(RD_REG_DWORD(®->iobase_sdata));
1727
1728 WRT_REG_DWORD(®->iobase_select, 0xB0700000);
1729 fw->shadow_reg[7] = htonl(RD_REG_DWORD(®->iobase_sdata));
1730
1731 WRT_REG_DWORD(®->iobase_select, 0xB0800000);
1732 fw->shadow_reg[8] = htonl(RD_REG_DWORD(®->iobase_sdata));
1733
1734 WRT_REG_DWORD(®->iobase_select, 0xB0900000);
1735 fw->shadow_reg[9] = htonl(RD_REG_DWORD(®->iobase_sdata));
1736
1737 WRT_REG_DWORD(®->iobase_select, 0xB0A00000);
1738 fw->shadow_reg[10] = htonl(RD_REG_DWORD(®->iobase_sdata));
1739
1740 /* RISC I/O register. */
1741 WRT_REG_DWORD(®->iobase_addr, 0x0010);
1742 fw->risc_io_reg = htonl(RD_REG_DWORD(®->iobase_window));
1743
1744 /* Mailbox registers. */
1745 mbx_reg = ®->mailbox0;
1746 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
1747 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
1748
1749 /* Transfer sequence registers. */
1750 iter_reg = fw->xseq_gp_reg;
1751 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
1752 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
1753 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
1754 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
1755 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
1756 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
1757 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
1758 qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
1759
1760 iter_reg = fw->xseq_0_reg;
1761 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
1762 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
1763 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
1764
1765 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
1766
1767 /* Receive sequence registers. */
1768 iter_reg = fw->rseq_gp_reg;
1769 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
1770 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
1771 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
1772 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
1773 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
1774 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
1775 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
1776 qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
1777
1778 iter_reg = fw->rseq_0_reg;
1779 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
1780 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
1781
1782 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
1783 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
1784
1785 /* Auxiliary sequence registers. */
1786 iter_reg = fw->aseq_gp_reg;
1787 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
1788 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
1789 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
1790 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
1791 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
1792 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
1793 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
1794 qla24xx_read_window(reg, 0xB070, 16, iter_reg);
1795
1796 iter_reg = fw->aseq_0_reg;
1797 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
1798 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
1799
1800 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
1801 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
1802
1803 /* Command DMA registers. */
1804 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
1805
1806 /* Queues. */
1807 iter_reg = fw->req0_dma_reg;
1808 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
1809 dmp_reg = ®->iobase_q;
1810 for (cnt = 0; cnt < 7; cnt++)
1811 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1812
1813 iter_reg = fw->resp0_dma_reg;
1814 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
1815 dmp_reg = ®->iobase_q;
1816 for (cnt = 0; cnt < 7; cnt++)
1817 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1818
1819 iter_reg = fw->req1_dma_reg;
1820 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
1821 dmp_reg = ®->iobase_q;
1822 for (cnt = 0; cnt < 7; cnt++)
1823 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1824
1825 /* Transmit DMA registers. */
1826 iter_reg = fw->xmt0_dma_reg;
1827 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
1828 qla24xx_read_window(reg, 0x7610, 16, iter_reg);
1829
1830 iter_reg = fw->xmt1_dma_reg;
1831 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
1832 qla24xx_read_window(reg, 0x7630, 16, iter_reg);
1833
1834 iter_reg = fw->xmt2_dma_reg;
1835 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
1836 qla24xx_read_window(reg, 0x7650, 16, iter_reg);
1837
1838 iter_reg = fw->xmt3_dma_reg;
1839 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
1840 qla24xx_read_window(reg, 0x7670, 16, iter_reg);
1841
1842 iter_reg = fw->xmt4_dma_reg;
1843 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
1844 qla24xx_read_window(reg, 0x7690, 16, iter_reg);
1845
1846 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
1847
1848 /* Receive DMA registers. */
1849 iter_reg = fw->rcvt0_data_dma_reg;
1850 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
1851 qla24xx_read_window(reg, 0x7710, 16, iter_reg);
1852
1853 iter_reg = fw->rcvt1_data_dma_reg;
1854 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
1855 qla24xx_read_window(reg, 0x7730, 16, iter_reg);
1856
1857 /* RISC registers. */
1858 iter_reg = fw->risc_gp_reg;
1859 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
1860 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
1861 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
1862 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
1863 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
1864 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
1865 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
1866 qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
1867
1868 /* Local memory controller registers. */
1869 iter_reg = fw->lmc_reg;
1870 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
1871 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
1872 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
1873 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
1874 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
1875 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
1876 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
1877 qla24xx_read_window(reg, 0x3070, 16, iter_reg);
1878
1879 /* Fibre Protocol Module registers. */
1880 iter_reg = fw->fpm_hdw_reg;
1881 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
1882 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
1883 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
1884 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
1885 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
1886 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
1887 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
1888 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
1889 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
1890 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
1891 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
1892 iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
1893 iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg);
1894 qla24xx_read_window(reg, 0x40D0, 16, iter_reg);
1895
1896 /* Frame Buffer registers. */
1897 iter_reg = fw->fb_hdw_reg;
1898 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
1899 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
1900 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
1901 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
1902 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
1903 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
1904 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
1905 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
1906 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
1907 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
1908 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1909 iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg);
1910 qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
1911
1912 /* Multi queue registers */
1913 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
1914 &last_chain);
1915
1916 rval = qla24xx_soft_reset(ha);
1917 if (rval != QLA_SUCCESS)
1918 goto qla81xx_fw_dump_failed_0;
1919
1920 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1921 &nxt);
1922 if (rval != QLA_SUCCESS)
1923 goto qla81xx_fw_dump_failed_0;
1924
1925 nxt = qla2xxx_copy_queues(ha, nxt);
1926
1927 qla24xx_copy_eft(ha, nxt);
1928
1929 /* Chain entries -- started with MQ. */
1930 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
1931 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
1932 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1933 if (last_chain) {
1934 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
1935 *last_chain |= htonl(DUMP_CHAIN_LAST);
1936 }
1937
1938 /* Adjust valid length. */
1939 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
1940
1941 qla81xx_fw_dump_failed_0:
1942 qla2xxx_dump_post_process(base_vha, rval);
1943
1944 qla81xx_fw_dump_failed:
1945 #ifndef __CHECKER__
1946 if (!hardware_locked)
1947 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1948 #else
1949 ;
1950 #endif
1951 }
1952
1953 void
qla83xx_fw_dump(scsi_qla_host_t * vha,int hardware_locked)1954 qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1955 {
1956 int rval;
1957 uint32_t cnt;
1958 struct qla_hw_data *ha = vha->hw;
1959 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1960 uint32_t __iomem *dmp_reg;
1961 uint32_t *iter_reg;
1962 uint16_t __iomem *mbx_reg;
1963 unsigned long flags;
1964 struct qla83xx_fw_dump *fw;
1965 void *nxt, *nxt_chain;
1966 uint32_t *last_chain = NULL;
1967 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1968
1969 flags = 0;
1970 ha->fw_dump_cap_flags = 0;
1971
1972 #ifndef __CHECKER__
1973 if (!hardware_locked)
1974 spin_lock_irqsave(&ha->hardware_lock, flags);
1975 #endif
1976
1977 if (!ha->fw_dump) {
1978 ql_log(ql_log_warn, vha, 0xd00c,
1979 "No buffer available for dump!!!\n");
1980 goto qla83xx_fw_dump_failed;
1981 }
1982
1983 if (ha->fw_dumped) {
1984 ql_log(ql_log_warn, vha, 0xd00d,
1985 "Firmware has been previously dumped (%p) -- ignoring "
1986 "request...\n", ha->fw_dump);
1987 goto qla83xx_fw_dump_failed;
1988 }
1989 fw = &ha->fw_dump->isp.isp83;
1990 qla2xxx_prep_dump(ha, ha->fw_dump);
1991
1992 fw->host_status = htonl(RD_REG_DWORD(®->host_status));
1993
1994 /*
1995 * Pause RISC. No need to track timeout, as resetting the chip
1996 * is the right approach incase of pause timeout
1997 */
1998 qla24xx_pause_risc(reg, ha);
1999
2000 WRT_REG_DWORD(®->iobase_addr, 0x6000);
2001 dmp_reg = ®->iobase_window;
2002 RD_REG_DWORD(dmp_reg);
2003 WRT_REG_DWORD(dmp_reg, 0);
2004
2005 dmp_reg = ®->unused_4_1[0];
2006 RD_REG_DWORD(dmp_reg);
2007 WRT_REG_DWORD(dmp_reg, 0);
2008
2009 WRT_REG_DWORD(®->iobase_addr, 0x6010);
2010 dmp_reg = ®->unused_4_1[2];
2011 RD_REG_DWORD(dmp_reg);
2012 WRT_REG_DWORD(dmp_reg, 0);
2013
2014 /* select PCR and disable ecc checking and correction */
2015 WRT_REG_DWORD(®->iobase_addr, 0x0F70);
2016 RD_REG_DWORD(®->iobase_addr);
2017 WRT_REG_DWORD(®->iobase_select, 0x60000000); /* write to F0h = PCR */
2018
2019 /* Host/Risc registers. */
2020 iter_reg = fw->host_risc_reg;
2021 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
2022 iter_reg = qla24xx_read_window(reg, 0x7010, 16, iter_reg);
2023 qla24xx_read_window(reg, 0x7040, 16, iter_reg);
2024
2025 /* PCIe registers. */
2026 WRT_REG_DWORD(®->iobase_addr, 0x7C00);
2027 RD_REG_DWORD(®->iobase_addr);
2028 WRT_REG_DWORD(®->iobase_window, 0x01);
2029 dmp_reg = ®->iobase_c4;
2030 fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++));
2031 fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++));
2032 fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
2033 fw->pcie_regs[3] = htonl(RD_REG_DWORD(®->iobase_window));
2034
2035 WRT_REG_DWORD(®->iobase_window, 0x00);
2036 RD_REG_DWORD(®->iobase_window);
2037
2038 /* Host interface registers. */
2039 dmp_reg = ®->flash_addr;
2040 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
2041 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
2042
2043 /* Disable interrupts. */
2044 WRT_REG_DWORD(®->ictrl, 0);
2045 RD_REG_DWORD(®->ictrl);
2046
2047 /* Shadow registers. */
2048 WRT_REG_DWORD(®->iobase_addr, 0x0F70);
2049 RD_REG_DWORD(®->iobase_addr);
2050 WRT_REG_DWORD(®->iobase_select, 0xB0000000);
2051 fw->shadow_reg[0] = htonl(RD_REG_DWORD(®->iobase_sdata));
2052
2053 WRT_REG_DWORD(®->iobase_select, 0xB0100000);
2054 fw->shadow_reg[1] = htonl(RD_REG_DWORD(®->iobase_sdata));
2055
2056 WRT_REG_DWORD(®->iobase_select, 0xB0200000);
2057 fw->shadow_reg[2] = htonl(RD_REG_DWORD(®->iobase_sdata));
2058
2059 WRT_REG_DWORD(®->iobase_select, 0xB0300000);
2060 fw->shadow_reg[3] = htonl(RD_REG_DWORD(®->iobase_sdata));
2061
2062 WRT_REG_DWORD(®->iobase_select, 0xB0400000);
2063 fw->shadow_reg[4] = htonl(RD_REG_DWORD(®->iobase_sdata));
2064
2065 WRT_REG_DWORD(®->iobase_select, 0xB0500000);
2066 fw->shadow_reg[5] = htonl(RD_REG_DWORD(®->iobase_sdata));
2067
2068 WRT_REG_DWORD(®->iobase_select, 0xB0600000);
2069 fw->shadow_reg[6] = htonl(RD_REG_DWORD(®->iobase_sdata));
2070
2071 WRT_REG_DWORD(®->iobase_select, 0xB0700000);
2072 fw->shadow_reg[7] = htonl(RD_REG_DWORD(®->iobase_sdata));
2073
2074 WRT_REG_DWORD(®->iobase_select, 0xB0800000);
2075 fw->shadow_reg[8] = htonl(RD_REG_DWORD(®->iobase_sdata));
2076
2077 WRT_REG_DWORD(®->iobase_select, 0xB0900000);
2078 fw->shadow_reg[9] = htonl(RD_REG_DWORD(®->iobase_sdata));
2079
2080 WRT_REG_DWORD(®->iobase_select, 0xB0A00000);
2081 fw->shadow_reg[10] = htonl(RD_REG_DWORD(®->iobase_sdata));
2082
2083 /* RISC I/O register. */
2084 WRT_REG_DWORD(®->iobase_addr, 0x0010);
2085 fw->risc_io_reg = htonl(RD_REG_DWORD(®->iobase_window));
2086
2087 /* Mailbox registers. */
2088 mbx_reg = ®->mailbox0;
2089 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
2090 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
2091
2092 /* Transfer sequence registers. */
2093 iter_reg = fw->xseq_gp_reg;
2094 iter_reg = qla24xx_read_window(reg, 0xBE00, 16, iter_reg);
2095 iter_reg = qla24xx_read_window(reg, 0xBE10, 16, iter_reg);
2096 iter_reg = qla24xx_read_window(reg, 0xBE20, 16, iter_reg);
2097 iter_reg = qla24xx_read_window(reg, 0xBE30, 16, iter_reg);
2098 iter_reg = qla24xx_read_window(reg, 0xBE40, 16, iter_reg);
2099 iter_reg = qla24xx_read_window(reg, 0xBE50, 16, iter_reg);
2100 iter_reg = qla24xx_read_window(reg, 0xBE60, 16, iter_reg);
2101 iter_reg = qla24xx_read_window(reg, 0xBE70, 16, iter_reg);
2102 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
2103 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
2104 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
2105 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
2106 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
2107 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
2108 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
2109 qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
2110
2111 iter_reg = fw->xseq_0_reg;
2112 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
2113 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
2114 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
2115
2116 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
2117
2118 qla24xx_read_window(reg, 0xBEF0, 16, fw->xseq_2_reg);
2119
2120 /* Receive sequence registers. */
2121 iter_reg = fw->rseq_gp_reg;
2122 iter_reg = qla24xx_read_window(reg, 0xFE00, 16, iter_reg);
2123 iter_reg = qla24xx_read_window(reg, 0xFE10, 16, iter_reg);
2124 iter_reg = qla24xx_read_window(reg, 0xFE20, 16, iter_reg);
2125 iter_reg = qla24xx_read_window(reg, 0xFE30, 16, iter_reg);
2126 iter_reg = qla24xx_read_window(reg, 0xFE40, 16, iter_reg);
2127 iter_reg = qla24xx_read_window(reg, 0xFE50, 16, iter_reg);
2128 iter_reg = qla24xx_read_window(reg, 0xFE60, 16, iter_reg);
2129 iter_reg = qla24xx_read_window(reg, 0xFE70, 16, iter_reg);
2130 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
2131 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
2132 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
2133 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
2134 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
2135 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
2136 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
2137 qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
2138
2139 iter_reg = fw->rseq_0_reg;
2140 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
2141 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
2142
2143 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
2144 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
2145 qla24xx_read_window(reg, 0xFEF0, 16, fw->rseq_3_reg);
2146
2147 /* Auxiliary sequence registers. */
2148 iter_reg = fw->aseq_gp_reg;
2149 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
2150 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
2151 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
2152 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
2153 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
2154 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
2155 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
2156 iter_reg = qla24xx_read_window(reg, 0xB070, 16, iter_reg);
2157 iter_reg = qla24xx_read_window(reg, 0xB100, 16, iter_reg);
2158 iter_reg = qla24xx_read_window(reg, 0xB110, 16, iter_reg);
2159 iter_reg = qla24xx_read_window(reg, 0xB120, 16, iter_reg);
2160 iter_reg = qla24xx_read_window(reg, 0xB130, 16, iter_reg);
2161 iter_reg = qla24xx_read_window(reg, 0xB140, 16, iter_reg);
2162 iter_reg = qla24xx_read_window(reg, 0xB150, 16, iter_reg);
2163 iter_reg = qla24xx_read_window(reg, 0xB160, 16, iter_reg);
2164 qla24xx_read_window(reg, 0xB170, 16, iter_reg);
2165
2166 iter_reg = fw->aseq_0_reg;
2167 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
2168 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
2169
2170 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
2171 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
2172 qla24xx_read_window(reg, 0xB1F0, 16, fw->aseq_3_reg);
2173
2174 /* Command DMA registers. */
2175 iter_reg = fw->cmd_dma_reg;
2176 iter_reg = qla24xx_read_window(reg, 0x7100, 16, iter_reg);
2177 iter_reg = qla24xx_read_window(reg, 0x7120, 16, iter_reg);
2178 iter_reg = qla24xx_read_window(reg, 0x7130, 16, iter_reg);
2179 qla24xx_read_window(reg, 0x71F0, 16, iter_reg);
2180
2181 /* Queues. */
2182 iter_reg = fw->req0_dma_reg;
2183 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
2184 dmp_reg = ®->iobase_q;
2185 for (cnt = 0; cnt < 7; cnt++)
2186 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
2187
2188 iter_reg = fw->resp0_dma_reg;
2189 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
2190 dmp_reg = ®->iobase_q;
2191 for (cnt = 0; cnt < 7; cnt++)
2192 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
2193
2194 iter_reg = fw->req1_dma_reg;
2195 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
2196 dmp_reg = ®->iobase_q;
2197 for (cnt = 0; cnt < 7; cnt++)
2198 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
2199
2200 /* Transmit DMA registers. */
2201 iter_reg = fw->xmt0_dma_reg;
2202 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
2203 qla24xx_read_window(reg, 0x7610, 16, iter_reg);
2204
2205 iter_reg = fw->xmt1_dma_reg;
2206 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
2207 qla24xx_read_window(reg, 0x7630, 16, iter_reg);
2208
2209 iter_reg = fw->xmt2_dma_reg;
2210 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
2211 qla24xx_read_window(reg, 0x7650, 16, iter_reg);
2212
2213 iter_reg = fw->xmt3_dma_reg;
2214 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
2215 qla24xx_read_window(reg, 0x7670, 16, iter_reg);
2216
2217 iter_reg = fw->xmt4_dma_reg;
2218 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
2219 qla24xx_read_window(reg, 0x7690, 16, iter_reg);
2220
2221 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
2222
2223 /* Receive DMA registers. */
2224 iter_reg = fw->rcvt0_data_dma_reg;
2225 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
2226 qla24xx_read_window(reg, 0x7710, 16, iter_reg);
2227
2228 iter_reg = fw->rcvt1_data_dma_reg;
2229 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
2230 qla24xx_read_window(reg, 0x7730, 16, iter_reg);
2231
2232 /* RISC registers. */
2233 iter_reg = fw->risc_gp_reg;
2234 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
2235 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
2236 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
2237 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
2238 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
2239 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
2240 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
2241 qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
2242
2243 /* Local memory controller registers. */
2244 iter_reg = fw->lmc_reg;
2245 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
2246 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
2247 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
2248 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
2249 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
2250 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
2251 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
2252 qla24xx_read_window(reg, 0x3070, 16, iter_reg);
2253
2254 /* Fibre Protocol Module registers. */
2255 iter_reg = fw->fpm_hdw_reg;
2256 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
2257 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
2258 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
2259 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
2260 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
2261 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
2262 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
2263 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
2264 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
2265 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
2266 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
2267 iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
2268 iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg);
2269 iter_reg = qla24xx_read_window(reg, 0x40D0, 16, iter_reg);
2270 iter_reg = qla24xx_read_window(reg, 0x40E0, 16, iter_reg);
2271 qla24xx_read_window(reg, 0x40F0, 16, iter_reg);
2272
2273 /* RQ0 Array registers. */
2274 iter_reg = fw->rq0_array_reg;
2275 iter_reg = qla24xx_read_window(reg, 0x5C00, 16, iter_reg);
2276 iter_reg = qla24xx_read_window(reg, 0x5C10, 16, iter_reg);
2277 iter_reg = qla24xx_read_window(reg, 0x5C20, 16, iter_reg);
2278 iter_reg = qla24xx_read_window(reg, 0x5C30, 16, iter_reg);
2279 iter_reg = qla24xx_read_window(reg, 0x5C40, 16, iter_reg);
2280 iter_reg = qla24xx_read_window(reg, 0x5C50, 16, iter_reg);
2281 iter_reg = qla24xx_read_window(reg, 0x5C60, 16, iter_reg);
2282 iter_reg = qla24xx_read_window(reg, 0x5C70, 16, iter_reg);
2283 iter_reg = qla24xx_read_window(reg, 0x5C80, 16, iter_reg);
2284 iter_reg = qla24xx_read_window(reg, 0x5C90, 16, iter_reg);
2285 iter_reg = qla24xx_read_window(reg, 0x5CA0, 16, iter_reg);
2286 iter_reg = qla24xx_read_window(reg, 0x5CB0, 16, iter_reg);
2287 iter_reg = qla24xx_read_window(reg, 0x5CC0, 16, iter_reg);
2288 iter_reg = qla24xx_read_window(reg, 0x5CD0, 16, iter_reg);
2289 iter_reg = qla24xx_read_window(reg, 0x5CE0, 16, iter_reg);
2290 qla24xx_read_window(reg, 0x5CF0, 16, iter_reg);
2291
2292 /* RQ1 Array registers. */
2293 iter_reg = fw->rq1_array_reg;
2294 iter_reg = qla24xx_read_window(reg, 0x5D00, 16, iter_reg);
2295 iter_reg = qla24xx_read_window(reg, 0x5D10, 16, iter_reg);
2296 iter_reg = qla24xx_read_window(reg, 0x5D20, 16, iter_reg);
2297 iter_reg = qla24xx_read_window(reg, 0x5D30, 16, iter_reg);
2298 iter_reg = qla24xx_read_window(reg, 0x5D40, 16, iter_reg);
2299 iter_reg = qla24xx_read_window(reg, 0x5D50, 16, iter_reg);
2300 iter_reg = qla24xx_read_window(reg, 0x5D60, 16, iter_reg);
2301 iter_reg = qla24xx_read_window(reg, 0x5D70, 16, iter_reg);
2302 iter_reg = qla24xx_read_window(reg, 0x5D80, 16, iter_reg);
2303 iter_reg = qla24xx_read_window(reg, 0x5D90, 16, iter_reg);
2304 iter_reg = qla24xx_read_window(reg, 0x5DA0, 16, iter_reg);
2305 iter_reg = qla24xx_read_window(reg, 0x5DB0, 16, iter_reg);
2306 iter_reg = qla24xx_read_window(reg, 0x5DC0, 16, iter_reg);
2307 iter_reg = qla24xx_read_window(reg, 0x5DD0, 16, iter_reg);
2308 iter_reg = qla24xx_read_window(reg, 0x5DE0, 16, iter_reg);
2309 qla24xx_read_window(reg, 0x5DF0, 16, iter_reg);
2310
2311 /* RP0 Array registers. */
2312 iter_reg = fw->rp0_array_reg;
2313 iter_reg = qla24xx_read_window(reg, 0x5E00, 16, iter_reg);
2314 iter_reg = qla24xx_read_window(reg, 0x5E10, 16, iter_reg);
2315 iter_reg = qla24xx_read_window(reg, 0x5E20, 16, iter_reg);
2316 iter_reg = qla24xx_read_window(reg, 0x5E30, 16, iter_reg);
2317 iter_reg = qla24xx_read_window(reg, 0x5E40, 16, iter_reg);
2318 iter_reg = qla24xx_read_window(reg, 0x5E50, 16, iter_reg);
2319 iter_reg = qla24xx_read_window(reg, 0x5E60, 16, iter_reg);
2320 iter_reg = qla24xx_read_window(reg, 0x5E70, 16, iter_reg);
2321 iter_reg = qla24xx_read_window(reg, 0x5E80, 16, iter_reg);
2322 iter_reg = qla24xx_read_window(reg, 0x5E90, 16, iter_reg);
2323 iter_reg = qla24xx_read_window(reg, 0x5EA0, 16, iter_reg);
2324 iter_reg = qla24xx_read_window(reg, 0x5EB0, 16, iter_reg);
2325 iter_reg = qla24xx_read_window(reg, 0x5EC0, 16, iter_reg);
2326 iter_reg = qla24xx_read_window(reg, 0x5ED0, 16, iter_reg);
2327 iter_reg = qla24xx_read_window(reg, 0x5EE0, 16, iter_reg);
2328 qla24xx_read_window(reg, 0x5EF0, 16, iter_reg);
2329
2330 /* RP1 Array registers. */
2331 iter_reg = fw->rp1_array_reg;
2332 iter_reg = qla24xx_read_window(reg, 0x5F00, 16, iter_reg);
2333 iter_reg = qla24xx_read_window(reg, 0x5F10, 16, iter_reg);
2334 iter_reg = qla24xx_read_window(reg, 0x5F20, 16, iter_reg);
2335 iter_reg = qla24xx_read_window(reg, 0x5F30, 16, iter_reg);
2336 iter_reg = qla24xx_read_window(reg, 0x5F40, 16, iter_reg);
2337 iter_reg = qla24xx_read_window(reg, 0x5F50, 16, iter_reg);
2338 iter_reg = qla24xx_read_window(reg, 0x5F60, 16, iter_reg);
2339 iter_reg = qla24xx_read_window(reg, 0x5F70, 16, iter_reg);
2340 iter_reg = qla24xx_read_window(reg, 0x5F80, 16, iter_reg);
2341 iter_reg = qla24xx_read_window(reg, 0x5F90, 16, iter_reg);
2342 iter_reg = qla24xx_read_window(reg, 0x5FA0, 16, iter_reg);
2343 iter_reg = qla24xx_read_window(reg, 0x5FB0, 16, iter_reg);
2344 iter_reg = qla24xx_read_window(reg, 0x5FC0, 16, iter_reg);
2345 iter_reg = qla24xx_read_window(reg, 0x5FD0, 16, iter_reg);
2346 iter_reg = qla24xx_read_window(reg, 0x5FE0, 16, iter_reg);
2347 qla24xx_read_window(reg, 0x5FF0, 16, iter_reg);
2348
2349 iter_reg = fw->at0_array_reg;
2350 iter_reg = qla24xx_read_window(reg, 0x7080, 16, iter_reg);
2351 iter_reg = qla24xx_read_window(reg, 0x7090, 16, iter_reg);
2352 iter_reg = qla24xx_read_window(reg, 0x70A0, 16, iter_reg);
2353 iter_reg = qla24xx_read_window(reg, 0x70B0, 16, iter_reg);
2354 iter_reg = qla24xx_read_window(reg, 0x70C0, 16, iter_reg);
2355 iter_reg = qla24xx_read_window(reg, 0x70D0, 16, iter_reg);
2356 iter_reg = qla24xx_read_window(reg, 0x70E0, 16, iter_reg);
2357 qla24xx_read_window(reg, 0x70F0, 16, iter_reg);
2358
2359 /* I/O Queue Control registers. */
2360 qla24xx_read_window(reg, 0x7800, 16, fw->queue_control_reg);
2361
2362 /* Frame Buffer registers. */
2363 iter_reg = fw->fb_hdw_reg;
2364 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
2365 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
2366 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
2367 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
2368 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
2369 iter_reg = qla24xx_read_window(reg, 0x6060, 16, iter_reg);
2370 iter_reg = qla24xx_read_window(reg, 0x6070, 16, iter_reg);
2371 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
2372 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
2373 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
2374 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
2375 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
2376 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
2377 iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg);
2378 iter_reg = qla24xx_read_window(reg, 0x6530, 16, iter_reg);
2379 iter_reg = qla24xx_read_window(reg, 0x6540, 16, iter_reg);
2380 iter_reg = qla24xx_read_window(reg, 0x6550, 16, iter_reg);
2381 iter_reg = qla24xx_read_window(reg, 0x6560, 16, iter_reg);
2382 iter_reg = qla24xx_read_window(reg, 0x6570, 16, iter_reg);
2383 iter_reg = qla24xx_read_window(reg, 0x6580, 16, iter_reg);
2384 iter_reg = qla24xx_read_window(reg, 0x6590, 16, iter_reg);
2385 iter_reg = qla24xx_read_window(reg, 0x65A0, 16, iter_reg);
2386 iter_reg = qla24xx_read_window(reg, 0x65B0, 16, iter_reg);
2387 iter_reg = qla24xx_read_window(reg, 0x65C0, 16, iter_reg);
2388 iter_reg = qla24xx_read_window(reg, 0x65D0, 16, iter_reg);
2389 iter_reg = qla24xx_read_window(reg, 0x65E0, 16, iter_reg);
2390 qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
2391
2392 /* Multi queue registers */
2393 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
2394 &last_chain);
2395
2396 rval = qla24xx_soft_reset(ha);
2397 if (rval != QLA_SUCCESS) {
2398 ql_log(ql_log_warn, vha, 0xd00e,
2399 "SOFT RESET FAILED, forcing continuation of dump!!!\n");
2400 rval = QLA_SUCCESS;
2401
2402 ql_log(ql_log_warn, vha, 0xd00f, "try a bigger hammer!!!\n");
2403
2404 WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_RESET);
2405 RD_REG_DWORD(®->hccr);
2406
2407 WRT_REG_DWORD(®->hccr, HCCRX_REL_RISC_PAUSE);
2408 RD_REG_DWORD(®->hccr);
2409
2410 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_RESET);
2411 RD_REG_DWORD(®->hccr);
2412
2413 for (cnt = 30000; cnt && (RD_REG_WORD(®->mailbox0)); cnt--)
2414 udelay(5);
2415
2416 if (!cnt) {
2417 nxt = fw->code_ram;
2418 nxt += sizeof(fw->code_ram);
2419 nxt += (ha->fw_memory_size - 0x100000 + 1);
2420 goto copy_queue;
2421 } else {
2422 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
2423 ql_log(ql_log_warn, vha, 0xd010,
2424 "bigger hammer success?\n");
2425 }
2426 }
2427
2428 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
2429 &nxt);
2430 if (rval != QLA_SUCCESS)
2431 goto qla83xx_fw_dump_failed_0;
2432
2433 copy_queue:
2434 nxt = qla2xxx_copy_queues(ha, nxt);
2435
2436 qla24xx_copy_eft(ha, nxt);
2437
2438 /* Chain entries -- started with MQ. */
2439 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
2440 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
2441 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
2442 if (last_chain) {
2443 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
2444 *last_chain |= htonl(DUMP_CHAIN_LAST);
2445 }
2446
2447 /* Adjust valid length. */
2448 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
2449
2450 qla83xx_fw_dump_failed_0:
2451 qla2xxx_dump_post_process(base_vha, rval);
2452
2453 qla83xx_fw_dump_failed:
2454 #ifndef __CHECKER__
2455 if (!hardware_locked)
2456 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2457 #else
2458 ;
2459 #endif
2460 }
2461
2462 /****************************************************************************/
2463 /* Driver Debug Functions. */
2464 /****************************************************************************/
2465
2466 static inline int
ql_mask_match(uint32_t level)2467 ql_mask_match(uint32_t level)
2468 {
2469 if (ql2xextended_error_logging == 1)
2470 ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
2471 return (level & ql2xextended_error_logging) == level;
2472 }
2473
2474 /*
2475 * This function is for formatting and logging debug information.
2476 * It is to be used when vha is available. It formats the message
2477 * and logs it to the messages file.
2478 * parameters:
2479 * level: The level of the debug messages to be printed.
2480 * If ql2xextended_error_logging value is correctly set,
2481 * this message will appear in the messages file.
2482 * vha: Pointer to the scsi_qla_host_t.
2483 * id: This is a unique identifier for the level. It identifies the
2484 * part of the code from where the message originated.
2485 * msg: The message to be displayed.
2486 */
2487 void
ql_dbg(uint32_t level,scsi_qla_host_t * vha,int32_t id,const char * fmt,...)2488 ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
2489 {
2490 va_list va;
2491 struct va_format vaf;
2492
2493 if (!ql_mask_match(level))
2494 return;
2495
2496 va_start(va, fmt);
2497
2498 vaf.fmt = fmt;
2499 vaf.va = &va;
2500
2501 if (vha != NULL) {
2502 const struct pci_dev *pdev = vha->hw->pdev;
2503 /* <module-name> <pci-name> <msg-id>:<host> Message */
2504 pr_warn("%s [%s]-%04x:%ld: %pV",
2505 QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset,
2506 vha->host_no, &vaf);
2507 } else {
2508 pr_warn("%s [%s]-%04x: : %pV",
2509 QL_MSGHDR, "0000:00:00.0", id + ql_dbg_offset, &vaf);
2510 }
2511
2512 va_end(va);
2513
2514 }
2515
2516 /*
2517 * This function is for formatting and logging debug information.
2518 * It is to be used when vha is not available and pci is available,
2519 * i.e., before host allocation. It formats the message and logs it
2520 * to the messages file.
2521 * parameters:
2522 * level: The level of the debug messages to be printed.
2523 * If ql2xextended_error_logging value is correctly set,
2524 * this message will appear in the messages file.
2525 * pdev: Pointer to the struct pci_dev.
2526 * id: This is a unique id for the level. It identifies the part
2527 * of the code from where the message originated.
2528 * msg: The message to be displayed.
2529 */
2530 void
ql_dbg_pci(uint32_t level,struct pci_dev * pdev,int32_t id,const char * fmt,...)2531 ql_dbg_pci(uint32_t level, struct pci_dev *pdev, int32_t id,
2532 const char *fmt, ...)
2533 {
2534 va_list va;
2535 struct va_format vaf;
2536
2537 if (pdev == NULL)
2538 return;
2539 if (!ql_mask_match(level))
2540 return;
2541
2542 va_start(va, fmt);
2543
2544 vaf.fmt = fmt;
2545 vaf.va = &va;
2546
2547 /* <module-name> <dev-name>:<msg-id> Message */
2548 pr_warn("%s [%s]-%04x: : %pV",
2549 QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset, &vaf);
2550
2551 va_end(va);
2552 }
2553
2554 /*
2555 * This function is for formatting and logging log messages.
2556 * It is to be used when vha is available. It formats the message
2557 * and logs it to the messages file. All the messages will be logged
2558 * irrespective of value of ql2xextended_error_logging.
2559 * parameters:
2560 * level: The level of the log messages to be printed in the
2561 * messages file.
2562 * vha: Pointer to the scsi_qla_host_t
2563 * id: This is a unique id for the level. It identifies the
2564 * part of the code from where the message originated.
2565 * msg: The message to be displayed.
2566 */
2567 void
ql_log(uint32_t level,scsi_qla_host_t * vha,int32_t id,const char * fmt,...)2568 ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
2569 {
2570 va_list va;
2571 struct va_format vaf;
2572 char pbuf[128];
2573
2574 if (level > ql_errlev)
2575 return;
2576
2577 if (vha != NULL) {
2578 const struct pci_dev *pdev = vha->hw->pdev;
2579 /* <module-name> <msg-id>:<host> Message */
2580 snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x:%ld: ",
2581 QL_MSGHDR, dev_name(&(pdev->dev)), id, vha->host_no);
2582 } else {
2583 snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
2584 QL_MSGHDR, "0000:00:00.0", id);
2585 }
2586 pbuf[sizeof(pbuf) - 1] = 0;
2587
2588 va_start(va, fmt);
2589
2590 vaf.fmt = fmt;
2591 vaf.va = &va;
2592
2593 switch (level) {
2594 case ql_log_fatal: /* FATAL LOG */
2595 pr_crit("%s%pV", pbuf, &vaf);
2596 break;
2597 case ql_log_warn:
2598 pr_err("%s%pV", pbuf, &vaf);
2599 break;
2600 case ql_log_info:
2601 pr_warn("%s%pV", pbuf, &vaf);
2602 break;
2603 default:
2604 pr_info("%s%pV", pbuf, &vaf);
2605 break;
2606 }
2607
2608 va_end(va);
2609 }
2610
2611 /*
2612 * This function is for formatting and logging log messages.
2613 * It is to be used when vha is not available and pci is available,
2614 * i.e., before host allocation. It formats the message and logs
2615 * it to the messages file. All the messages are logged irrespective
2616 * of the value of ql2xextended_error_logging.
2617 * parameters:
2618 * level: The level of the log messages to be printed in the
2619 * messages file.
2620 * pdev: Pointer to the struct pci_dev.
2621 * id: This is a unique id for the level. It identifies the
2622 * part of the code from where the message originated.
2623 * msg: The message to be displayed.
2624 */
2625 void
ql_log_pci(uint32_t level,struct pci_dev * pdev,int32_t id,const char * fmt,...)2626 ql_log_pci(uint32_t level, struct pci_dev *pdev, int32_t id,
2627 const char *fmt, ...)
2628 {
2629 va_list va;
2630 struct va_format vaf;
2631 char pbuf[128];
2632
2633 if (pdev == NULL)
2634 return;
2635 if (level > ql_errlev)
2636 return;
2637
2638 /* <module-name> <dev-name>:<msg-id> Message */
2639 snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
2640 QL_MSGHDR, dev_name(&(pdev->dev)), id);
2641 pbuf[sizeof(pbuf) - 1] = 0;
2642
2643 va_start(va, fmt);
2644
2645 vaf.fmt = fmt;
2646 vaf.va = &va;
2647
2648 switch (level) {
2649 case ql_log_fatal: /* FATAL LOG */
2650 pr_crit("%s%pV", pbuf, &vaf);
2651 break;
2652 case ql_log_warn:
2653 pr_err("%s%pV", pbuf, &vaf);
2654 break;
2655 case ql_log_info:
2656 pr_warn("%s%pV", pbuf, &vaf);
2657 break;
2658 default:
2659 pr_info("%s%pV", pbuf, &vaf);
2660 break;
2661 }
2662
2663 va_end(va);
2664 }
2665
2666 void
ql_dump_regs(uint32_t level,scsi_qla_host_t * vha,int32_t id)2667 ql_dump_regs(uint32_t level, scsi_qla_host_t *vha, int32_t id)
2668 {
2669 int i;
2670 struct qla_hw_data *ha = vha->hw;
2671 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2672 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
2673 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
2674 uint16_t __iomem *mbx_reg;
2675
2676 if (!ql_mask_match(level))
2677 return;
2678
2679 if (IS_P3P_TYPE(ha))
2680 mbx_reg = ®82->mailbox_in[0];
2681 else if (IS_FWI2_CAPABLE(ha))
2682 mbx_reg = ®24->mailbox0;
2683 else
2684 mbx_reg = MAILBOX_REG(ha, reg, 0);
2685
2686 ql_dbg(level, vha, id, "Mailbox registers:\n");
2687 for (i = 0; i < 6; i++)
2688 ql_dbg(level, vha, id,
2689 "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg++));
2690 }
2691
2692
2693 void
ql_dump_buffer(uint32_t level,scsi_qla_host_t * vha,int32_t id,uint8_t * b,uint32_t size)2694 ql_dump_buffer(uint32_t level, scsi_qla_host_t *vha, int32_t id,
2695 uint8_t *b, uint32_t size)
2696 {
2697 uint32_t cnt;
2698 uint8_t c;
2699
2700 if (!ql_mask_match(level))
2701 return;
2702
2703 ql_dbg(level, vha, id, " 0 1 2 3 4 5 6 7 8 "
2704 "9 Ah Bh Ch Dh Eh Fh\n");
2705 ql_dbg(level, vha, id, "----------------------------------"
2706 "----------------------------\n");
2707
2708 ql_dbg(level, vha, id, " ");
2709 for (cnt = 0; cnt < size;) {
2710 c = *b++;
2711 printk("%02x", (uint32_t) c);
2712 cnt++;
2713 if (!(cnt % 16))
2714 printk("\n");
2715 else
2716 printk(" ");
2717 }
2718 if (cnt % 16)
2719 ql_dbg(level, vha, id, "\n");
2720 }
2721