• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2014 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 
8 #include <linux/vmalloc.h>
9 #include <linux/delay.h>
10 
11 #include "qla_def.h"
12 #include "qla_gbl.h"
13 
14 #include <linux/delay.h>
15 
16 #define TIMEOUT_100_MS 100
17 
18 /* 8044 Flash Read/Write functions */
19 uint32_t
qla8044_rd_reg(struct qla_hw_data * ha,ulong addr)20 qla8044_rd_reg(struct qla_hw_data *ha, ulong addr)
21 {
22 	return readl((void __iomem *) (ha->nx_pcibase + addr));
23 }
24 
25 void
qla8044_wr_reg(struct qla_hw_data * ha,ulong addr,uint32_t val)26 qla8044_wr_reg(struct qla_hw_data *ha, ulong addr, uint32_t val)
27 {
28 	writel(val, (void __iomem *)((ha)->nx_pcibase + addr));
29 }
30 
31 int
qla8044_rd_direct(struct scsi_qla_host * vha,const uint32_t crb_reg)32 qla8044_rd_direct(struct scsi_qla_host *vha,
33 	const uint32_t crb_reg)
34 {
35 	struct qla_hw_data *ha = vha->hw;
36 
37 	if (crb_reg < CRB_REG_INDEX_MAX)
38 		return qla8044_rd_reg(ha, qla8044_reg_tbl[crb_reg]);
39 	else
40 		return QLA_FUNCTION_FAILED;
41 }
42 
43 void
qla8044_wr_direct(struct scsi_qla_host * vha,const uint32_t crb_reg,const uint32_t value)44 qla8044_wr_direct(struct scsi_qla_host *vha,
45 	const uint32_t crb_reg,
46 	const uint32_t value)
47 {
48 	struct qla_hw_data *ha = vha->hw;
49 
50 	if (crb_reg < CRB_REG_INDEX_MAX)
51 		qla8044_wr_reg(ha, qla8044_reg_tbl[crb_reg], value);
52 }
53 
54 static int
qla8044_set_win_base(scsi_qla_host_t * vha,uint32_t addr)55 qla8044_set_win_base(scsi_qla_host_t *vha, uint32_t addr)
56 {
57 	uint32_t val;
58 	int ret_val = QLA_SUCCESS;
59 	struct qla_hw_data *ha = vha->hw;
60 
61 	qla8044_wr_reg(ha, QLA8044_CRB_WIN_FUNC(ha->portnum), addr);
62 	val = qla8044_rd_reg(ha, QLA8044_CRB_WIN_FUNC(ha->portnum));
63 
64 	if (val != addr) {
65 		ql_log(ql_log_warn, vha, 0xb087,
66 		    "%s: Failed to set register window : "
67 		    "addr written 0x%x, read 0x%x!\n",
68 		    __func__, addr, val);
69 		ret_val = QLA_FUNCTION_FAILED;
70 	}
71 	return ret_val;
72 }
73 
74 static int
qla8044_rd_reg_indirect(scsi_qla_host_t * vha,uint32_t addr,uint32_t * data)75 qla8044_rd_reg_indirect(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data)
76 {
77 	int ret_val = QLA_SUCCESS;
78 	struct qla_hw_data *ha = vha->hw;
79 
80 	ret_val = qla8044_set_win_base(vha, addr);
81 	if (!ret_val)
82 		*data = qla8044_rd_reg(ha, QLA8044_WILDCARD);
83 	else
84 		ql_log(ql_log_warn, vha, 0xb088,
85 		    "%s: failed read of addr 0x%x!\n", __func__, addr);
86 	return ret_val;
87 }
88 
89 static int
qla8044_wr_reg_indirect(scsi_qla_host_t * vha,uint32_t addr,uint32_t data)90 qla8044_wr_reg_indirect(scsi_qla_host_t *vha, uint32_t addr, uint32_t data)
91 {
92 	int ret_val = QLA_SUCCESS;
93 	struct qla_hw_data *ha = vha->hw;
94 
95 	ret_val = qla8044_set_win_base(vha, addr);
96 	if (!ret_val)
97 		qla8044_wr_reg(ha, QLA8044_WILDCARD, data);
98 	else
99 		ql_log(ql_log_warn, vha, 0xb089,
100 		    "%s: failed wrt to addr 0x%x, data 0x%x\n",
101 		    __func__, addr, data);
102 	return ret_val;
103 }
104 
105 /*
106  * qla8044_read_write_crb_reg - Read from raddr and write value to waddr.
107  *
108  * @ha : Pointer to adapter structure
109  * @raddr : CRB address to read from
110  * @waddr : CRB address to write to
111  *
112  */
113 static void
qla8044_read_write_crb_reg(struct scsi_qla_host * vha,uint32_t raddr,uint32_t waddr)114 qla8044_read_write_crb_reg(struct scsi_qla_host *vha,
115 	uint32_t raddr, uint32_t waddr)
116 {
117 	uint32_t value;
118 
119 	qla8044_rd_reg_indirect(vha, raddr, &value);
120 	qla8044_wr_reg_indirect(vha, waddr, value);
121 }
122 
123 static int
qla8044_poll_wait_for_ready(struct scsi_qla_host * vha,uint32_t addr1,uint32_t mask)124 qla8044_poll_wait_for_ready(struct scsi_qla_host *vha, uint32_t addr1,
125 	uint32_t mask)
126 {
127 	unsigned long timeout;
128 	uint32_t temp;
129 
130 	/* jiffies after 100ms */
131 	timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS);
132 	do {
133 		qla8044_rd_reg_indirect(vha, addr1, &temp);
134 		if ((temp & mask) != 0)
135 			break;
136 		if (time_after_eq(jiffies, timeout)) {
137 			ql_log(ql_log_warn, vha, 0xb151,
138 				"Error in processing rdmdio entry\n");
139 			return -1;
140 		}
141 	} while (1);
142 
143 	return 0;
144 }
145 
146 static uint32_t
qla8044_ipmdio_rd_reg(struct scsi_qla_host * vha,uint32_t addr1,uint32_t addr3,uint32_t mask,uint32_t addr)147 qla8044_ipmdio_rd_reg(struct scsi_qla_host *vha,
148 	uint32_t addr1, uint32_t addr3, uint32_t mask, uint32_t addr)
149 {
150 	uint32_t temp;
151 	int ret = 0;
152 
153 	ret = qla8044_poll_wait_for_ready(vha, addr1, mask);
154 	if (ret == -1)
155 		return -1;
156 
157 	temp = (0x40000000 | addr);
158 	qla8044_wr_reg_indirect(vha, addr1, temp);
159 
160 	ret = qla8044_poll_wait_for_ready(vha, addr1, mask);
161 	if (ret == -1)
162 		return 0;
163 
164 	qla8044_rd_reg_indirect(vha, addr3, &ret);
165 
166 	return ret;
167 }
168 
169 
170 static int
qla8044_poll_wait_ipmdio_bus_idle(struct scsi_qla_host * vha,uint32_t addr1,uint32_t addr2,uint32_t addr3,uint32_t mask)171 qla8044_poll_wait_ipmdio_bus_idle(struct scsi_qla_host *vha,
172 	uint32_t addr1, uint32_t addr2, uint32_t addr3, uint32_t mask)
173 {
174 	unsigned long timeout;
175 	uint32_t temp;
176 
177 	/* jiffies after 100 msecs */
178 	timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS);
179 	do {
180 		temp = qla8044_ipmdio_rd_reg(vha, addr1, addr3, mask, addr2);
181 		if ((temp & 0x1) != 1)
182 			break;
183 		if (time_after_eq(jiffies, timeout)) {
184 			ql_log(ql_log_warn, vha, 0xb152,
185 			    "Error in processing mdiobus idle\n");
186 			return -1;
187 		}
188 	} while (1);
189 
190 	return 0;
191 }
192 
193 static int
qla8044_ipmdio_wr_reg(struct scsi_qla_host * vha,uint32_t addr1,uint32_t addr3,uint32_t mask,uint32_t addr,uint32_t value)194 qla8044_ipmdio_wr_reg(struct scsi_qla_host *vha, uint32_t addr1,
195 	uint32_t addr3, uint32_t mask, uint32_t addr, uint32_t value)
196 {
197 	int ret = 0;
198 
199 	ret = qla8044_poll_wait_for_ready(vha, addr1, mask);
200 	if (ret == -1)
201 		return -1;
202 
203 	qla8044_wr_reg_indirect(vha, addr3, value);
204 	qla8044_wr_reg_indirect(vha, addr1, addr);
205 
206 	ret = qla8044_poll_wait_for_ready(vha, addr1, mask);
207 	if (ret == -1)
208 		return -1;
209 
210 	return 0;
211 }
212 /*
213  * qla8044_rmw_crb_reg - Read value from raddr, AND with test_mask,
214  * Shift Left,Right/OR/XOR with values RMW header and write value to waddr.
215  *
216  * @vha : Pointer to adapter structure
217  * @raddr : CRB address to read from
218  * @waddr : CRB address to write to
219  * @p_rmw_hdr : header with shift/or/xor values.
220  *
221  */
222 static void
qla8044_rmw_crb_reg(struct scsi_qla_host * vha,uint32_t raddr,uint32_t waddr,struct qla8044_rmw * p_rmw_hdr)223 qla8044_rmw_crb_reg(struct scsi_qla_host *vha,
224 	uint32_t raddr, uint32_t waddr,	struct qla8044_rmw *p_rmw_hdr)
225 {
226 	uint32_t value;
227 
228 	if (p_rmw_hdr->index_a)
229 		value = vha->reset_tmplt.array[p_rmw_hdr->index_a];
230 	else
231 		qla8044_rd_reg_indirect(vha, raddr, &value);
232 	value &= p_rmw_hdr->test_mask;
233 	value <<= p_rmw_hdr->shl;
234 	value >>= p_rmw_hdr->shr;
235 	value |= p_rmw_hdr->or_value;
236 	value ^= p_rmw_hdr->xor_value;
237 	qla8044_wr_reg_indirect(vha, waddr, value);
238 	return;
239 }
240 
241 static inline void
qla8044_set_qsnt_ready(struct scsi_qla_host * vha)242 qla8044_set_qsnt_ready(struct scsi_qla_host *vha)
243 {
244 	uint32_t qsnt_state;
245 	struct qla_hw_data *ha = vha->hw;
246 
247 	qsnt_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
248 	qsnt_state |= (1 << ha->portnum);
249 	qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, qsnt_state);
250 	ql_log(ql_log_info, vha, 0xb08e, "%s(%ld): qsnt_state: 0x%08x\n",
251 	     __func__, vha->host_no, qsnt_state);
252 }
253 
254 void
qla8044_clear_qsnt_ready(struct scsi_qla_host * vha)255 qla8044_clear_qsnt_ready(struct scsi_qla_host *vha)
256 {
257 	uint32_t qsnt_state;
258 	struct qla_hw_data *ha = vha->hw;
259 
260 	qsnt_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
261 	qsnt_state &= ~(1 << ha->portnum);
262 	qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, qsnt_state);
263 	ql_log(ql_log_info, vha, 0xb08f, "%s(%ld): qsnt_state: 0x%08x\n",
264 	    __func__, vha->host_no, qsnt_state);
265 }
266 
267 /**
268  *
269  * qla8044_lock_recovery - Recovers the idc_lock.
270  * @ha : Pointer to adapter structure
271  *
272  * Lock Recovery Register
273  * 5-2	Lock recovery owner: Function ID of driver doing lock recovery,
274  *	valid if bits 1..0 are set by driver doing lock recovery.
275  * 1-0  1 - Driver intends to force unlock the IDC lock.
276  *	2 - Driver is moving forward to unlock the IDC lock. Driver clears
277  *	    this field after force unlocking the IDC lock.
278  *
279  * Lock Recovery process
280  * a. Read the IDC_LOCK_RECOVERY register. If the value in bits 1..0 is
281  *    greater than 0, then wait for the other driver to unlock otherwise
282  *    move to the next step.
283  * b. Indicate intent to force-unlock by writing 1h to the IDC_LOCK_RECOVERY
284  *    register bits 1..0 and also set the function# in bits 5..2.
285  * c. Read the IDC_LOCK_RECOVERY register again after a delay of 200ms.
286  *    Wait for the other driver to perform lock recovery if the function
287  *    number in bits 5..2 has changed, otherwise move to the next step.
288  * d. Write a value of 2h to the IDC_LOCK_RECOVERY register bits 1..0
289  *    leaving your function# in bits 5..2.
290  * e. Force unlock using the DRIVER_UNLOCK register and immediately clear
291  *    the IDC_LOCK_RECOVERY bits 5..0 by writing 0.
292  **/
293 static int
qla8044_lock_recovery(struct scsi_qla_host * vha)294 qla8044_lock_recovery(struct scsi_qla_host *vha)
295 {
296 	uint32_t lock = 0, lockid;
297 	struct qla_hw_data *ha = vha->hw;
298 
299 	lockid = qla8044_rd_reg(ha, QLA8044_DRV_LOCKRECOVERY);
300 
301 	/* Check for other Recovery in progress, go wait */
302 	if ((lockid & IDC_LOCK_RECOVERY_STATE_MASK) != 0)
303 		return QLA_FUNCTION_FAILED;
304 
305 	/* Intent to Recover */
306 	qla8044_wr_reg(ha, QLA8044_DRV_LOCKRECOVERY,
307 	    (ha->portnum <<
308 	     IDC_LOCK_RECOVERY_STATE_SHIFT_BITS) | INTENT_TO_RECOVER);
309 	msleep(200);
310 
311 	/* Check Intent to Recover is advertised */
312 	lockid = qla8044_rd_reg(ha, QLA8044_DRV_LOCKRECOVERY);
313 	if ((lockid & IDC_LOCK_RECOVERY_OWNER_MASK) != (ha->portnum <<
314 	    IDC_LOCK_RECOVERY_STATE_SHIFT_BITS))
315 		return QLA_FUNCTION_FAILED;
316 
317 	ql_dbg(ql_dbg_p3p, vha, 0xb08B, "%s:%d: IDC Lock recovery initiated\n"
318 	    , __func__, ha->portnum);
319 
320 	/* Proceed to Recover */
321 	qla8044_wr_reg(ha, QLA8044_DRV_LOCKRECOVERY,
322 	    (ha->portnum << IDC_LOCK_RECOVERY_STATE_SHIFT_BITS) |
323 	    PROCEED_TO_RECOVER);
324 
325 	/* Force Unlock() */
326 	qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, 0xFF);
327 	qla8044_rd_reg(ha, QLA8044_DRV_UNLOCK);
328 
329 	/* Clear bits 0-5 in IDC_RECOVERY register*/
330 	qla8044_wr_reg(ha, QLA8044_DRV_LOCKRECOVERY, 0);
331 
332 	/* Get lock() */
333 	lock = qla8044_rd_reg(ha, QLA8044_DRV_LOCK);
334 	if (lock) {
335 		lockid = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
336 		lockid = ((lockid + (1 << 8)) & ~0xFF) | ha->portnum;
337 		qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, lockid);
338 		return QLA_SUCCESS;
339 	} else
340 		return QLA_FUNCTION_FAILED;
341 }
342 
343 int
qla8044_idc_lock(struct qla_hw_data * ha)344 qla8044_idc_lock(struct qla_hw_data *ha)
345 {
346 	uint32_t ret_val = QLA_SUCCESS, timeout = 0, status = 0;
347 	uint32_t lock_id, lock_cnt, func_num, tmo_owner = 0, first_owner = 0;
348 	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
349 
350 	while (status == 0) {
351 		/* acquire semaphore5 from PCI HW block */
352 		status = qla8044_rd_reg(ha, QLA8044_DRV_LOCK);
353 
354 		if (status) {
355 			/* Increment Counter (8-31) and update func_num (0-7) on
356 			 * getting a successful lock  */
357 			lock_id = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
358 			lock_id = ((lock_id + (1 << 8)) & ~0xFF) | ha->portnum;
359 			qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, lock_id);
360 			break;
361 		}
362 
363 		if (timeout == 0)
364 			first_owner = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
365 
366 		if (++timeout >=
367 		    (QLA8044_DRV_LOCK_TIMEOUT / QLA8044_DRV_LOCK_MSLEEP)) {
368 			tmo_owner = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
369 			func_num = tmo_owner & 0xFF;
370 			lock_cnt = tmo_owner >> 8;
371 			ql_log(ql_log_warn, vha, 0xb114,
372 			    "%s: Lock by func %d failed after 2s, lock held "
373 			    "by func %d, lock count %d, first_owner %d\n",
374 			    __func__, ha->portnum, func_num, lock_cnt,
375 			    (first_owner & 0xFF));
376 			if (first_owner != tmo_owner) {
377 				/* Some other driver got lock,
378 				 * OR same driver got lock again (counter
379 				 * value changed), when we were waiting for
380 				 * lock. Retry for another 2 sec */
381 				ql_dbg(ql_dbg_p3p, vha, 0xb115,
382 				    "%s: %d: IDC lock failed\n",
383 				    __func__, ha->portnum);
384 				timeout = 0;
385 			} else {
386 				/* Same driver holding lock > 2sec.
387 				 * Force Recovery */
388 				if (qla8044_lock_recovery(vha) == QLA_SUCCESS) {
389 					/* Recovered and got lock */
390 					ret_val = QLA_SUCCESS;
391 					ql_dbg(ql_dbg_p3p, vha, 0xb116,
392 					    "%s:IDC lock Recovery by %d"
393 					    "successful...\n", __func__,
394 					     ha->portnum);
395 				}
396 				/* Recovery Failed, some other function
397 				 * has the lock, wait for 2secs
398 				 * and retry
399 				 */
400 				ql_dbg(ql_dbg_p3p, vha, 0xb08a,
401 				       "%s: IDC lock Recovery by %d "
402 				       "failed, Retrying timeout\n", __func__,
403 				       ha->portnum);
404 				timeout = 0;
405 			}
406 		}
407 		msleep(QLA8044_DRV_LOCK_MSLEEP);
408 	}
409 	return ret_val;
410 }
411 
412 void
qla8044_idc_unlock(struct qla_hw_data * ha)413 qla8044_idc_unlock(struct qla_hw_data *ha)
414 {
415 	int id;
416 	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
417 
418 	id = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
419 
420 	if ((id & 0xFF) != ha->portnum) {
421 		ql_log(ql_log_warn, vha, 0xb118,
422 		    "%s: IDC Unlock by %d failed, lock owner is %d!\n",
423 		    __func__, ha->portnum, (id & 0xFF));
424 		return;
425 	}
426 
427 	/* Keep lock counter value, update the ha->func_num to 0xFF */
428 	qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, (id | 0xFF));
429 	qla8044_rd_reg(ha, QLA8044_DRV_UNLOCK);
430 }
431 
432 /* 8044 Flash Lock/Unlock functions */
433 static int
qla8044_flash_lock(scsi_qla_host_t * vha)434 qla8044_flash_lock(scsi_qla_host_t *vha)
435 {
436 	int lock_owner;
437 	int timeout = 0;
438 	uint32_t lock_status = 0;
439 	int ret_val = QLA_SUCCESS;
440 	struct qla_hw_data *ha = vha->hw;
441 
442 	while (lock_status == 0) {
443 		lock_status = qla8044_rd_reg(ha, QLA8044_FLASH_LOCK);
444 		if (lock_status)
445 			break;
446 
447 		if (++timeout >= QLA8044_FLASH_LOCK_TIMEOUT / 20) {
448 			lock_owner = qla8044_rd_reg(ha,
449 			    QLA8044_FLASH_LOCK_ID);
450 			ql_log(ql_log_warn, vha, 0xb113,
451 			    "%s: Simultaneous flash access by following ports, active port = %d: accessing port = %d",
452 			    __func__, ha->portnum, lock_owner);
453 			ret_val = QLA_FUNCTION_FAILED;
454 			break;
455 		}
456 		msleep(20);
457 	}
458 	qla8044_wr_reg(ha, QLA8044_FLASH_LOCK_ID, ha->portnum);
459 	return ret_val;
460 }
461 
462 static void
qla8044_flash_unlock(scsi_qla_host_t * vha)463 qla8044_flash_unlock(scsi_qla_host_t *vha)
464 {
465 	struct qla_hw_data *ha = vha->hw;
466 
467 	/* Reading FLASH_UNLOCK register unlocks the Flash */
468 	qla8044_wr_reg(ha, QLA8044_FLASH_LOCK_ID, 0xFF);
469 	qla8044_rd_reg(ha, QLA8044_FLASH_UNLOCK);
470 }
471 
472 
473 static
qla8044_flash_lock_recovery(struct scsi_qla_host * vha)474 void qla8044_flash_lock_recovery(struct scsi_qla_host *vha)
475 {
476 
477 	if (qla8044_flash_lock(vha)) {
478 		/* Someone else is holding the lock. */
479 		ql_log(ql_log_warn, vha, 0xb120, "Resetting flash_lock\n");
480 	}
481 
482 	/*
483 	 * Either we got the lock, or someone
484 	 * else died while holding it.
485 	 * In either case, unlock.
486 	 */
487 	qla8044_flash_unlock(vha);
488 }
489 
490 /*
491  * Address and length are byte address
492  */
493 static int
qla8044_read_flash_data(scsi_qla_host_t * vha,uint8_t * p_data,uint32_t flash_addr,int u32_word_count)494 qla8044_read_flash_data(scsi_qla_host_t *vha,  uint8_t *p_data,
495 	uint32_t flash_addr, int u32_word_count)
496 {
497 	int i, ret_val = QLA_SUCCESS;
498 	uint32_t u32_word;
499 
500 	if (qla8044_flash_lock(vha) != QLA_SUCCESS) {
501 		ret_val = QLA_FUNCTION_FAILED;
502 		goto exit_lock_error;
503 	}
504 
505 	if (flash_addr & 0x03) {
506 		ql_log(ql_log_warn, vha, 0xb117,
507 		    "%s: Illegal addr = 0x%x\n", __func__, flash_addr);
508 		ret_val = QLA_FUNCTION_FAILED;
509 		goto exit_flash_read;
510 	}
511 
512 	for (i = 0; i < u32_word_count; i++) {
513 		if (qla8044_wr_reg_indirect(vha, QLA8044_FLASH_DIRECT_WINDOW,
514 		    (flash_addr & 0xFFFF0000))) {
515 			ql_log(ql_log_warn, vha, 0xb119,
516 			    "%s: failed to write addr 0x%x to "
517 			    "FLASH_DIRECT_WINDOW\n! ",
518 			    __func__, flash_addr);
519 			ret_val = QLA_FUNCTION_FAILED;
520 			goto exit_flash_read;
521 		}
522 
523 		ret_val = qla8044_rd_reg_indirect(vha,
524 		    QLA8044_FLASH_DIRECT_DATA(flash_addr),
525 		    &u32_word);
526 		if (ret_val != QLA_SUCCESS) {
527 			ql_log(ql_log_warn, vha, 0xb08c,
528 			    "%s: failed to read addr 0x%x!\n",
529 			    __func__, flash_addr);
530 			goto exit_flash_read;
531 		}
532 
533 		*(uint32_t *)p_data = u32_word;
534 		p_data = p_data + 4;
535 		flash_addr = flash_addr + 4;
536 	}
537 
538 exit_flash_read:
539 	qla8044_flash_unlock(vha);
540 
541 exit_lock_error:
542 	return ret_val;
543 }
544 
545 /*
546  * Address and length are byte address
547  */
548 uint8_t *
qla8044_read_optrom_data(struct scsi_qla_host * vha,uint8_t * buf,uint32_t offset,uint32_t length)549 qla8044_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
550 	uint32_t offset, uint32_t length)
551 {
552 	scsi_block_requests(vha->host);
553 	if (qla8044_read_flash_data(vha, (uint8_t *)buf, offset, length / 4)
554 	    != QLA_SUCCESS) {
555 		ql_log(ql_log_warn, vha,  0xb08d,
556 		    "%s: Failed to read from flash\n",
557 		    __func__);
558 	}
559 	scsi_unblock_requests(vha->host);
560 	return buf;
561 }
562 
563 static inline int
qla8044_need_reset(struct scsi_qla_host * vha)564 qla8044_need_reset(struct scsi_qla_host *vha)
565 {
566 	uint32_t drv_state, drv_active;
567 	int rval;
568 	struct qla_hw_data *ha = vha->hw;
569 
570 	drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
571 	drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
572 
573 	rval = drv_state & (1 << ha->portnum);
574 
575 	if (ha->flags.eeh_busy && drv_active)
576 		rval = 1;
577 	return rval;
578 }
579 
580 /*
581  * qla8044_write_list - Write the value (p_entry->arg2) to address specified
582  * by p_entry->arg1 for all entries in header with delay of p_hdr->delay between
583  * entries.
584  *
585  * @vha : Pointer to adapter structure
586  * @p_hdr : reset_entry header for WRITE_LIST opcode.
587  *
588  */
589 static void
qla8044_write_list(struct scsi_qla_host * vha,struct qla8044_reset_entry_hdr * p_hdr)590 qla8044_write_list(struct scsi_qla_host *vha,
591 	struct qla8044_reset_entry_hdr *p_hdr)
592 {
593 	struct qla8044_entry *p_entry;
594 	uint32_t i;
595 
596 	p_entry = (struct qla8044_entry *)((char *)p_hdr +
597 	    sizeof(struct qla8044_reset_entry_hdr));
598 
599 	for (i = 0; i < p_hdr->count; i++, p_entry++) {
600 		qla8044_wr_reg_indirect(vha, p_entry->arg1, p_entry->arg2);
601 		if (p_hdr->delay)
602 			udelay((uint32_t)(p_hdr->delay));
603 	}
604 }
605 
606 /*
607  * qla8044_read_write_list - Read from address specified by p_entry->arg1,
608  * write value read to address specified by p_entry->arg2, for all entries in
609  * header with delay of p_hdr->delay between entries.
610  *
611  * @vha : Pointer to adapter structure
612  * @p_hdr : reset_entry header for READ_WRITE_LIST opcode.
613  *
614  */
615 static void
qla8044_read_write_list(struct scsi_qla_host * vha,struct qla8044_reset_entry_hdr * p_hdr)616 qla8044_read_write_list(struct scsi_qla_host *vha,
617 	struct qla8044_reset_entry_hdr *p_hdr)
618 {
619 	struct qla8044_entry *p_entry;
620 	uint32_t i;
621 
622 	p_entry = (struct qla8044_entry *)((char *)p_hdr +
623 	    sizeof(struct qla8044_reset_entry_hdr));
624 
625 	for (i = 0; i < p_hdr->count; i++, p_entry++) {
626 		qla8044_read_write_crb_reg(vha, p_entry->arg1,
627 		    p_entry->arg2);
628 		if (p_hdr->delay)
629 			udelay((uint32_t)(p_hdr->delay));
630 	}
631 }
632 
633 /*
634  * qla8044_poll_reg - Poll the given CRB addr for duration msecs till
635  * value read ANDed with test_mask is equal to test_result.
636  *
637  * @ha : Pointer to adapter structure
638  * @addr : CRB register address
639  * @duration : Poll for total of "duration" msecs
640  * @test_mask : Mask value read with "test_mask"
641  * @test_result : Compare (value&test_mask) with test_result.
642  *
643  * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED
644  */
645 static int
qla8044_poll_reg(struct scsi_qla_host * vha,uint32_t addr,int duration,uint32_t test_mask,uint32_t test_result)646 qla8044_poll_reg(struct scsi_qla_host *vha, uint32_t addr,
647 	int duration, uint32_t test_mask, uint32_t test_result)
648 {
649 	uint32_t value;
650 	int timeout_error;
651 	uint8_t retries;
652 	int ret_val = QLA_SUCCESS;
653 
654 	ret_val = qla8044_rd_reg_indirect(vha, addr, &value);
655 	if (ret_val == QLA_FUNCTION_FAILED) {
656 		timeout_error = 1;
657 		goto exit_poll_reg;
658 	}
659 
660 	/* poll every 1/10 of the total duration */
661 	retries = duration/10;
662 
663 	do {
664 		if ((value & test_mask) != test_result) {
665 			timeout_error = 1;
666 			msleep(duration/10);
667 			ret_val = qla8044_rd_reg_indirect(vha, addr, &value);
668 			if (ret_val == QLA_FUNCTION_FAILED) {
669 				timeout_error = 1;
670 				goto exit_poll_reg;
671 			}
672 		} else {
673 			timeout_error = 0;
674 			break;
675 		}
676 	} while (retries--);
677 
678 exit_poll_reg:
679 	if (timeout_error) {
680 		vha->reset_tmplt.seq_error++;
681 		ql_log(ql_log_fatal, vha, 0xb090,
682 		    "%s: Poll Failed: 0x%08x 0x%08x 0x%08x\n",
683 		    __func__, value, test_mask, test_result);
684 	}
685 
686 	return timeout_error;
687 }
688 
689 /*
690  * qla8044_poll_list - For all entries in the POLL_LIST header, poll read CRB
691  * register specified by p_entry->arg1 and compare (value AND test_mask) with
692  * test_result to validate it. Wait for p_hdr->delay between processing entries.
693  *
694  * @ha : Pointer to adapter structure
695  * @p_hdr : reset_entry header for POLL_LIST opcode.
696  *
697  */
698 static void
qla8044_poll_list(struct scsi_qla_host * vha,struct qla8044_reset_entry_hdr * p_hdr)699 qla8044_poll_list(struct scsi_qla_host *vha,
700 	struct qla8044_reset_entry_hdr *p_hdr)
701 {
702 	long delay;
703 	struct qla8044_entry *p_entry;
704 	struct qla8044_poll *p_poll;
705 	uint32_t i;
706 	uint32_t value;
707 
708 	p_poll = (struct qla8044_poll *)
709 		((char *)p_hdr + sizeof(struct qla8044_reset_entry_hdr));
710 
711 	/* Entries start after 8 byte qla8044_poll, poll header contains
712 	 * the test_mask, test_value.
713 	 */
714 	p_entry = (struct qla8044_entry *)((char *)p_poll +
715 	    sizeof(struct qla8044_poll));
716 
717 	delay = (long)p_hdr->delay;
718 
719 	if (!delay) {
720 		for (i = 0; i < p_hdr->count; i++, p_entry++)
721 			qla8044_poll_reg(vha, p_entry->arg1,
722 			    delay, p_poll->test_mask, p_poll->test_value);
723 	} else {
724 		for (i = 0; i < p_hdr->count; i++, p_entry++) {
725 			if (delay) {
726 				if (qla8044_poll_reg(vha,
727 				    p_entry->arg1, delay,
728 				    p_poll->test_mask,
729 				    p_poll->test_value)) {
730 					/*If
731 					* (data_read&test_mask != test_value)
732 					* read TIMEOUT_ADDR (arg1) and
733 					* ADDR (arg2) registers
734 					*/
735 					qla8044_rd_reg_indirect(vha,
736 					    p_entry->arg1, &value);
737 					qla8044_rd_reg_indirect(vha,
738 					    p_entry->arg2, &value);
739 				}
740 			}
741 		}
742 	}
743 }
744 
745 /*
746  * qla8044_poll_write_list - Write dr_value, ar_value to dr_addr/ar_addr,
747  * read ar_addr, if (value& test_mask != test_mask) re-read till timeout
748  * expires.
749  *
750  * @vha : Pointer to adapter structure
751  * @p_hdr : reset entry header for POLL_WRITE_LIST opcode.
752  *
753  */
754 static void
qla8044_poll_write_list(struct scsi_qla_host * vha,struct qla8044_reset_entry_hdr * p_hdr)755 qla8044_poll_write_list(struct scsi_qla_host *vha,
756 	struct qla8044_reset_entry_hdr *p_hdr)
757 {
758 	long delay;
759 	struct qla8044_quad_entry *p_entry;
760 	struct qla8044_poll *p_poll;
761 	uint32_t i;
762 
763 	p_poll = (struct qla8044_poll *)((char *)p_hdr +
764 	    sizeof(struct qla8044_reset_entry_hdr));
765 
766 	p_entry = (struct qla8044_quad_entry *)((char *)p_poll +
767 	    sizeof(struct qla8044_poll));
768 
769 	delay = (long)p_hdr->delay;
770 
771 	for (i = 0; i < p_hdr->count; i++, p_entry++) {
772 		qla8044_wr_reg_indirect(vha,
773 		    p_entry->dr_addr, p_entry->dr_value);
774 		qla8044_wr_reg_indirect(vha,
775 		    p_entry->ar_addr, p_entry->ar_value);
776 		if (delay) {
777 			if (qla8044_poll_reg(vha,
778 			    p_entry->ar_addr, delay,
779 			    p_poll->test_mask,
780 			    p_poll->test_value)) {
781 				ql_dbg(ql_dbg_p3p, vha, 0xb091,
782 				    "%s: Timeout Error: poll list, ",
783 				    __func__);
784 				ql_dbg(ql_dbg_p3p, vha, 0xb092,
785 				    "item_num %d, entry_num %d\n", i,
786 				    vha->reset_tmplt.seq_index);
787 			}
788 		}
789 	}
790 }
791 
792 /*
793  * qla8044_read_modify_write - Read value from p_entry->arg1, modify the
794  * value, write value to p_entry->arg2. Process entries with p_hdr->delay
795  * between entries.
796  *
797  * @vha : Pointer to adapter structure
798  * @p_hdr : header with shift/or/xor values.
799  *
800  */
801 static void
qla8044_read_modify_write(struct scsi_qla_host * vha,struct qla8044_reset_entry_hdr * p_hdr)802 qla8044_read_modify_write(struct scsi_qla_host *vha,
803 	struct qla8044_reset_entry_hdr *p_hdr)
804 {
805 	struct qla8044_entry *p_entry;
806 	struct qla8044_rmw *p_rmw_hdr;
807 	uint32_t i;
808 
809 	p_rmw_hdr = (struct qla8044_rmw *)((char *)p_hdr +
810 	    sizeof(struct qla8044_reset_entry_hdr));
811 
812 	p_entry = (struct qla8044_entry *)((char *)p_rmw_hdr +
813 	    sizeof(struct qla8044_rmw));
814 
815 	for (i = 0; i < p_hdr->count; i++, p_entry++) {
816 		qla8044_rmw_crb_reg(vha, p_entry->arg1,
817 		    p_entry->arg2, p_rmw_hdr);
818 		if (p_hdr->delay)
819 			udelay((uint32_t)(p_hdr->delay));
820 	}
821 }
822 
823 /*
824  * qla8044_pause - Wait for p_hdr->delay msecs, called between processing
825  * two entries of a sequence.
826  *
827  * @vha : Pointer to adapter structure
828  * @p_hdr : Common reset entry header.
829  *
830  */
831 static
qla8044_pause(struct scsi_qla_host * vha,struct qla8044_reset_entry_hdr * p_hdr)832 void qla8044_pause(struct scsi_qla_host *vha,
833 	struct qla8044_reset_entry_hdr *p_hdr)
834 {
835 	if (p_hdr->delay)
836 		mdelay((uint32_t)((long)p_hdr->delay));
837 }
838 
839 /*
840  * qla8044_template_end - Indicates end of reset sequence processing.
841  *
842  * @vha : Pointer to adapter structure
843  * @p_hdr : Common reset entry header.
844  *
845  */
846 static void
qla8044_template_end(struct scsi_qla_host * vha,struct qla8044_reset_entry_hdr * p_hdr)847 qla8044_template_end(struct scsi_qla_host *vha,
848 	struct qla8044_reset_entry_hdr *p_hdr)
849 {
850 	vha->reset_tmplt.template_end = 1;
851 
852 	if (vha->reset_tmplt.seq_error == 0) {
853 		ql_dbg(ql_dbg_p3p, vha, 0xb093,
854 		    "%s: Reset sequence completed SUCCESSFULLY.\n", __func__);
855 	} else {
856 		ql_log(ql_log_fatal, vha, 0xb094,
857 		    "%s: Reset sequence completed with some timeout "
858 		    "errors.\n", __func__);
859 	}
860 }
861 
862 /*
863  * qla8044_poll_read_list - Write ar_value to ar_addr register, read ar_addr,
864  * if (value & test_mask != test_value) re-read till timeout value expires,
865  * read dr_addr register and assign to reset_tmplt.array.
866  *
867  * @vha : Pointer to adapter structure
868  * @p_hdr : Common reset entry header.
869  *
870  */
871 static void
qla8044_poll_read_list(struct scsi_qla_host * vha,struct qla8044_reset_entry_hdr * p_hdr)872 qla8044_poll_read_list(struct scsi_qla_host *vha,
873 	struct qla8044_reset_entry_hdr *p_hdr)
874 {
875 	long delay;
876 	int index;
877 	struct qla8044_quad_entry *p_entry;
878 	struct qla8044_poll *p_poll;
879 	uint32_t i;
880 	uint32_t value;
881 
882 	p_poll = (struct qla8044_poll *)
883 		((char *)p_hdr + sizeof(struct qla8044_reset_entry_hdr));
884 
885 	p_entry = (struct qla8044_quad_entry *)
886 		((char *)p_poll + sizeof(struct qla8044_poll));
887 
888 	delay = (long)p_hdr->delay;
889 
890 	for (i = 0; i < p_hdr->count; i++, p_entry++) {
891 		qla8044_wr_reg_indirect(vha, p_entry->ar_addr,
892 		    p_entry->ar_value);
893 		if (delay) {
894 			if (qla8044_poll_reg(vha, p_entry->ar_addr, delay,
895 			    p_poll->test_mask, p_poll->test_value)) {
896 				ql_dbg(ql_dbg_p3p, vha, 0xb095,
897 				    "%s: Timeout Error: poll "
898 				    "list, ", __func__);
899 				ql_dbg(ql_dbg_p3p, vha, 0xb096,
900 				    "Item_num %d, "
901 				    "entry_num %d\n", i,
902 				    vha->reset_tmplt.seq_index);
903 			} else {
904 				index = vha->reset_tmplt.array_index;
905 				qla8044_rd_reg_indirect(vha,
906 				    p_entry->dr_addr, &value);
907 				vha->reset_tmplt.array[index++] = value;
908 				if (index == QLA8044_MAX_RESET_SEQ_ENTRIES)
909 					vha->reset_tmplt.array_index = 1;
910 			}
911 		}
912 	}
913 }
914 
915 /*
916  * qla8031_process_reset_template - Process all entries in reset template
917  * till entry with SEQ_END opcode, which indicates end of the reset template
918  * processing. Each entry has a Reset Entry header, entry opcode/command, with
919  * size of the entry, number of entries in sub-sequence and delay in microsecs
920  * or timeout in millisecs.
921  *
922  * @ha : Pointer to adapter structure
923  * @p_buff : Common reset entry header.
924  *
925  */
926 static void
qla8044_process_reset_template(struct scsi_qla_host * vha,char * p_buff)927 qla8044_process_reset_template(struct scsi_qla_host *vha,
928 	char *p_buff)
929 {
930 	int index, entries;
931 	struct qla8044_reset_entry_hdr *p_hdr;
932 	char *p_entry = p_buff;
933 
934 	vha->reset_tmplt.seq_end = 0;
935 	vha->reset_tmplt.template_end = 0;
936 	entries = vha->reset_tmplt.hdr->entries;
937 	index = vha->reset_tmplt.seq_index;
938 
939 	for (; (!vha->reset_tmplt.seq_end) && (index  < entries); index++) {
940 		p_hdr = (struct qla8044_reset_entry_hdr *)p_entry;
941 		switch (p_hdr->cmd) {
942 		case OPCODE_NOP:
943 			break;
944 		case OPCODE_WRITE_LIST:
945 			qla8044_write_list(vha, p_hdr);
946 			break;
947 		case OPCODE_READ_WRITE_LIST:
948 			qla8044_read_write_list(vha, p_hdr);
949 			break;
950 		case OPCODE_POLL_LIST:
951 			qla8044_poll_list(vha, p_hdr);
952 			break;
953 		case OPCODE_POLL_WRITE_LIST:
954 			qla8044_poll_write_list(vha, p_hdr);
955 			break;
956 		case OPCODE_READ_MODIFY_WRITE:
957 			qla8044_read_modify_write(vha, p_hdr);
958 			break;
959 		case OPCODE_SEQ_PAUSE:
960 			qla8044_pause(vha, p_hdr);
961 			break;
962 		case OPCODE_SEQ_END:
963 			vha->reset_tmplt.seq_end = 1;
964 			break;
965 		case OPCODE_TMPL_END:
966 			qla8044_template_end(vha, p_hdr);
967 			break;
968 		case OPCODE_POLL_READ_LIST:
969 			qla8044_poll_read_list(vha, p_hdr);
970 			break;
971 		default:
972 			ql_log(ql_log_fatal, vha, 0xb097,
973 			    "%s: Unknown command ==> 0x%04x on "
974 			    "entry = %d\n", __func__, p_hdr->cmd, index);
975 			break;
976 		}
977 		/*
978 		 *Set pointer to next entry in the sequence.
979 		*/
980 		p_entry += p_hdr->size;
981 	}
982 	vha->reset_tmplt.seq_index = index;
983 }
984 
985 static void
qla8044_process_init_seq(struct scsi_qla_host * vha)986 qla8044_process_init_seq(struct scsi_qla_host *vha)
987 {
988 	qla8044_process_reset_template(vha,
989 	    vha->reset_tmplt.init_offset);
990 	if (vha->reset_tmplt.seq_end != 1)
991 		ql_log(ql_log_fatal, vha, 0xb098,
992 		    "%s: Abrupt INIT Sub-Sequence end.\n",
993 		    __func__);
994 }
995 
996 static void
qla8044_process_stop_seq(struct scsi_qla_host * vha)997 qla8044_process_stop_seq(struct scsi_qla_host *vha)
998 {
999 	vha->reset_tmplt.seq_index = 0;
1000 	qla8044_process_reset_template(vha, vha->reset_tmplt.stop_offset);
1001 	if (vha->reset_tmplt.seq_end != 1)
1002 		ql_log(ql_log_fatal, vha, 0xb099,
1003 		    "%s: Abrupt STOP Sub-Sequence end.\n", __func__);
1004 }
1005 
1006 static void
qla8044_process_start_seq(struct scsi_qla_host * vha)1007 qla8044_process_start_seq(struct scsi_qla_host *vha)
1008 {
1009 	qla8044_process_reset_template(vha, vha->reset_tmplt.start_offset);
1010 	if (vha->reset_tmplt.template_end != 1)
1011 		ql_log(ql_log_fatal, vha, 0xb09a,
1012 		    "%s: Abrupt START Sub-Sequence end.\n",
1013 		    __func__);
1014 }
1015 
1016 static int
qla8044_lockless_flash_read_u32(struct scsi_qla_host * vha,uint32_t flash_addr,uint8_t * p_data,int u32_word_count)1017 qla8044_lockless_flash_read_u32(struct scsi_qla_host *vha,
1018 	uint32_t flash_addr, uint8_t *p_data, int u32_word_count)
1019 {
1020 	uint32_t i;
1021 	uint32_t u32_word;
1022 	uint32_t flash_offset;
1023 	uint32_t addr = flash_addr;
1024 	int ret_val = QLA_SUCCESS;
1025 
1026 	flash_offset = addr & (QLA8044_FLASH_SECTOR_SIZE - 1);
1027 
1028 	if (addr & 0x3) {
1029 		ql_log(ql_log_fatal, vha, 0xb09b, "%s: Illegal addr = 0x%x\n",
1030 		    __func__, addr);
1031 		ret_val = QLA_FUNCTION_FAILED;
1032 		goto exit_lockless_read;
1033 	}
1034 
1035 	ret_val = qla8044_wr_reg_indirect(vha,
1036 	    QLA8044_FLASH_DIRECT_WINDOW, (addr));
1037 
1038 	if (ret_val != QLA_SUCCESS) {
1039 		ql_log(ql_log_fatal, vha, 0xb09c,
1040 		    "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n",
1041 		    __func__, addr);
1042 		goto exit_lockless_read;
1043 	}
1044 
1045 	/* Check if data is spread across multiple sectors  */
1046 	if ((flash_offset + (u32_word_count * sizeof(uint32_t))) >
1047 	    (QLA8044_FLASH_SECTOR_SIZE - 1)) {
1048 		/* Multi sector read */
1049 		for (i = 0; i < u32_word_count; i++) {
1050 			ret_val = qla8044_rd_reg_indirect(vha,
1051 			    QLA8044_FLASH_DIRECT_DATA(addr), &u32_word);
1052 			if (ret_val != QLA_SUCCESS) {
1053 				ql_log(ql_log_fatal, vha, 0xb09d,
1054 				    "%s: failed to read addr 0x%x!\n",
1055 				    __func__, addr);
1056 				goto exit_lockless_read;
1057 			}
1058 			*(uint32_t *)p_data  = u32_word;
1059 			p_data = p_data + 4;
1060 			addr = addr + 4;
1061 			flash_offset = flash_offset + 4;
1062 			if (flash_offset > (QLA8044_FLASH_SECTOR_SIZE - 1)) {
1063 				/* This write is needed once for each sector */
1064 				ret_val = qla8044_wr_reg_indirect(vha,
1065 				    QLA8044_FLASH_DIRECT_WINDOW, (addr));
1066 				if (ret_val != QLA_SUCCESS) {
1067 					ql_log(ql_log_fatal, vha, 0xb09f,
1068 					    "%s: failed to write addr "
1069 					    "0x%x to FLASH_DIRECT_WINDOW!\n",
1070 					    __func__, addr);
1071 					goto exit_lockless_read;
1072 				}
1073 				flash_offset = 0;
1074 			}
1075 		}
1076 	} else {
1077 		/* Single sector read */
1078 		for (i = 0; i < u32_word_count; i++) {
1079 			ret_val = qla8044_rd_reg_indirect(vha,
1080 			    QLA8044_FLASH_DIRECT_DATA(addr), &u32_word);
1081 			if (ret_val != QLA_SUCCESS) {
1082 				ql_log(ql_log_fatal, vha, 0xb0a0,
1083 				    "%s: failed to read addr 0x%x!\n",
1084 				    __func__, addr);
1085 				goto exit_lockless_read;
1086 			}
1087 			*(uint32_t *)p_data = u32_word;
1088 			p_data = p_data + 4;
1089 			addr = addr + 4;
1090 		}
1091 	}
1092 
1093 exit_lockless_read:
1094 	return ret_val;
1095 }
1096 
1097 /*
1098  * qla8044_ms_mem_write_128b - Writes data to MS/off-chip memory
1099  *
1100  * @vha : Pointer to adapter structure
1101  * addr : Flash address to write to
1102  * data : Data to be written
1103  * count : word_count to be written
1104  *
1105  * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED
1106  */
1107 static int
qla8044_ms_mem_write_128b(struct scsi_qla_host * vha,uint64_t addr,uint32_t * data,uint32_t count)1108 qla8044_ms_mem_write_128b(struct scsi_qla_host *vha,
1109 	uint64_t addr, uint32_t *data, uint32_t count)
1110 {
1111 	int i, j, ret_val = QLA_SUCCESS;
1112 	uint32_t agt_ctrl;
1113 	unsigned long flags;
1114 	struct qla_hw_data *ha = vha->hw;
1115 
1116 	/* Only 128-bit aligned access */
1117 	if (addr & 0xF) {
1118 		ret_val = QLA_FUNCTION_FAILED;
1119 		goto exit_ms_mem_write;
1120 	}
1121 	write_lock_irqsave(&ha->hw_lock, flags);
1122 
1123 	/* Write address */
1124 	ret_val = qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_ADDR_HI, 0);
1125 	if (ret_val == QLA_FUNCTION_FAILED) {
1126 		ql_log(ql_log_fatal, vha, 0xb0a1,
1127 		    "%s: write to AGT_ADDR_HI failed!\n", __func__);
1128 		goto exit_ms_mem_write_unlock;
1129 	}
1130 
1131 	for (i = 0; i < count; i++, addr += 16) {
1132 		if (!((addr_in_range(addr, QLA8044_ADDR_QDR_NET,
1133 		    QLA8044_ADDR_QDR_NET_MAX)) ||
1134 		    (addr_in_range(addr, QLA8044_ADDR_DDR_NET,
1135 			QLA8044_ADDR_DDR_NET_MAX)))) {
1136 			ret_val = QLA_FUNCTION_FAILED;
1137 			goto exit_ms_mem_write_unlock;
1138 		}
1139 
1140 		ret_val = qla8044_wr_reg_indirect(vha,
1141 		    MD_MIU_TEST_AGT_ADDR_LO, addr);
1142 
1143 		/* Write data */
1144 		ret_val += qla8044_wr_reg_indirect(vha,
1145 		    MD_MIU_TEST_AGT_WRDATA_LO, *data++);
1146 		ret_val += qla8044_wr_reg_indirect(vha,
1147 		    MD_MIU_TEST_AGT_WRDATA_HI, *data++);
1148 		ret_val += qla8044_wr_reg_indirect(vha,
1149 		    MD_MIU_TEST_AGT_WRDATA_ULO, *data++);
1150 		ret_val += qla8044_wr_reg_indirect(vha,
1151 		    MD_MIU_TEST_AGT_WRDATA_UHI, *data++);
1152 		if (ret_val == QLA_FUNCTION_FAILED) {
1153 			ql_log(ql_log_fatal, vha, 0xb0a2,
1154 			    "%s: write to AGT_WRDATA failed!\n",
1155 			    __func__);
1156 			goto exit_ms_mem_write_unlock;
1157 		}
1158 
1159 		/* Check write status */
1160 		ret_val = qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL,
1161 		    MIU_TA_CTL_WRITE_ENABLE);
1162 		ret_val += qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL,
1163 		    MIU_TA_CTL_WRITE_START);
1164 		if (ret_val == QLA_FUNCTION_FAILED) {
1165 			ql_log(ql_log_fatal, vha, 0xb0a3,
1166 			    "%s: write to AGT_CTRL failed!\n", __func__);
1167 			goto exit_ms_mem_write_unlock;
1168 		}
1169 
1170 		for (j = 0; j < MAX_CTL_CHECK; j++) {
1171 			ret_val = qla8044_rd_reg_indirect(vha,
1172 			    MD_MIU_TEST_AGT_CTRL, &agt_ctrl);
1173 			if (ret_val == QLA_FUNCTION_FAILED) {
1174 				ql_log(ql_log_fatal, vha, 0xb0a4,
1175 				    "%s: failed to read "
1176 				    "MD_MIU_TEST_AGT_CTRL!\n", __func__);
1177 				goto exit_ms_mem_write_unlock;
1178 			}
1179 			if ((agt_ctrl & MIU_TA_CTL_BUSY) == 0)
1180 				break;
1181 		}
1182 
1183 		/* Status check failed */
1184 		if (j >= MAX_CTL_CHECK) {
1185 			ql_log(ql_log_fatal, vha, 0xb0a5,
1186 			    "%s: MS memory write failed!\n",
1187 			   __func__);
1188 			ret_val = QLA_FUNCTION_FAILED;
1189 			goto exit_ms_mem_write_unlock;
1190 		}
1191 	}
1192 
1193 exit_ms_mem_write_unlock:
1194 	write_unlock_irqrestore(&ha->hw_lock, flags);
1195 
1196 exit_ms_mem_write:
1197 	return ret_val;
1198 }
1199 
1200 static int
qla8044_copy_bootloader(struct scsi_qla_host * vha)1201 qla8044_copy_bootloader(struct scsi_qla_host *vha)
1202 {
1203 	uint8_t *p_cache;
1204 	uint32_t src, count, size;
1205 	uint64_t dest;
1206 	int ret_val = QLA_SUCCESS;
1207 	struct qla_hw_data *ha = vha->hw;
1208 
1209 	src = QLA8044_BOOTLOADER_FLASH_ADDR;
1210 	dest = qla8044_rd_reg(ha, QLA8044_BOOTLOADER_ADDR);
1211 	size = qla8044_rd_reg(ha, QLA8044_BOOTLOADER_SIZE);
1212 
1213 	/* 128 bit alignment check */
1214 	if (size & 0xF)
1215 		size = (size + 16) & ~0xF;
1216 
1217 	/* 16 byte count */
1218 	count = size/16;
1219 
1220 	p_cache = vmalloc(size);
1221 	if (p_cache == NULL) {
1222 		ql_log(ql_log_fatal, vha, 0xb0a6,
1223 		    "%s: Failed to allocate memory for "
1224 		    "boot loader cache\n", __func__);
1225 		ret_val = QLA_FUNCTION_FAILED;
1226 		goto exit_copy_bootloader;
1227 	}
1228 
1229 	ret_val = qla8044_lockless_flash_read_u32(vha, src,
1230 	    p_cache, size/sizeof(uint32_t));
1231 	if (ret_val == QLA_FUNCTION_FAILED) {
1232 		ql_log(ql_log_fatal, vha, 0xb0a7,
1233 		    "%s: Error reading F/W from flash!!!\n", __func__);
1234 		goto exit_copy_error;
1235 	}
1236 	ql_dbg(ql_dbg_p3p, vha, 0xb0a8, "%s: Read F/W from flash!\n",
1237 	    __func__);
1238 
1239 	/* 128 bit/16 byte write to MS memory */
1240 	ret_val = qla8044_ms_mem_write_128b(vha, dest,
1241 	    (uint32_t *)p_cache, count);
1242 	if (ret_val == QLA_FUNCTION_FAILED) {
1243 		ql_log(ql_log_fatal, vha, 0xb0a9,
1244 		    "%s: Error writing F/W to MS !!!\n", __func__);
1245 		goto exit_copy_error;
1246 	}
1247 	ql_dbg(ql_dbg_p3p, vha, 0xb0aa,
1248 	    "%s: Wrote F/W (size %d) to MS !!!\n",
1249 	    __func__, size);
1250 
1251 exit_copy_error:
1252 	vfree(p_cache);
1253 
1254 exit_copy_bootloader:
1255 	return ret_val;
1256 }
1257 
1258 static int
qla8044_restart(struct scsi_qla_host * vha)1259 qla8044_restart(struct scsi_qla_host *vha)
1260 {
1261 	int ret_val = QLA_SUCCESS;
1262 	struct qla_hw_data *ha = vha->hw;
1263 
1264 	qla8044_process_stop_seq(vha);
1265 
1266 	/* Collect minidump */
1267 	if (ql2xmdenable)
1268 		qla8044_get_minidump(vha);
1269 	else
1270 		ql_log(ql_log_fatal, vha, 0xb14c,
1271 		    "Minidump disabled.\n");
1272 
1273 	qla8044_process_init_seq(vha);
1274 
1275 	if (qla8044_copy_bootloader(vha)) {
1276 		ql_log(ql_log_fatal, vha, 0xb0ab,
1277 		    "%s: Copy bootloader, firmware restart failed!\n",
1278 		    __func__);
1279 		ret_val = QLA_FUNCTION_FAILED;
1280 		goto exit_restart;
1281 	}
1282 
1283 	/*
1284 	 *  Loads F/W from flash
1285 	 */
1286 	qla8044_wr_reg(ha, QLA8044_FW_IMAGE_VALID, QLA8044_BOOT_FROM_FLASH);
1287 
1288 	qla8044_process_start_seq(vha);
1289 
1290 exit_restart:
1291 	return ret_val;
1292 }
1293 
1294 /*
1295  * qla8044_check_cmd_peg_status - Check peg status to see if Peg is
1296  * initialized.
1297  *
1298  * @ha : Pointer to adapter structure
1299  *
1300  * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED
1301  */
1302 static int
qla8044_check_cmd_peg_status(struct scsi_qla_host * vha)1303 qla8044_check_cmd_peg_status(struct scsi_qla_host *vha)
1304 {
1305 	uint32_t val, ret_val = QLA_FUNCTION_FAILED;
1306 	int retries = CRB_CMDPEG_CHECK_RETRY_COUNT;
1307 	struct qla_hw_data *ha = vha->hw;
1308 
1309 	do {
1310 		val = qla8044_rd_reg(ha, QLA8044_CMDPEG_STATE);
1311 		if (val == PHAN_INITIALIZE_COMPLETE) {
1312 			ql_dbg(ql_dbg_p3p, vha, 0xb0ac,
1313 			    "%s: Command Peg initialization "
1314 			    "complete! state=0x%x\n", __func__, val);
1315 			ret_val = QLA_SUCCESS;
1316 			break;
1317 		}
1318 		msleep(CRB_CMDPEG_CHECK_DELAY);
1319 	} while (--retries);
1320 
1321 	return ret_val;
1322 }
1323 
1324 static int
qla8044_start_firmware(struct scsi_qla_host * vha)1325 qla8044_start_firmware(struct scsi_qla_host *vha)
1326 {
1327 	int ret_val = QLA_SUCCESS;
1328 
1329 	if (qla8044_restart(vha)) {
1330 		ql_log(ql_log_fatal, vha, 0xb0ad,
1331 		    "%s: Restart Error!!!, Need Reset!!!\n",
1332 		    __func__);
1333 		ret_val = QLA_FUNCTION_FAILED;
1334 		goto exit_start_fw;
1335 	} else
1336 		ql_dbg(ql_dbg_p3p, vha, 0xb0af,
1337 		    "%s: Restart done!\n", __func__);
1338 
1339 	ret_val = qla8044_check_cmd_peg_status(vha);
1340 	if (ret_val) {
1341 		ql_log(ql_log_fatal, vha, 0xb0b0,
1342 		    "%s: Peg not initialized!\n", __func__);
1343 		ret_val = QLA_FUNCTION_FAILED;
1344 	}
1345 
1346 exit_start_fw:
1347 	return ret_val;
1348 }
1349 
1350 void
qla8044_clear_drv_active(struct qla_hw_data * ha)1351 qla8044_clear_drv_active(struct qla_hw_data *ha)
1352 {
1353 	uint32_t drv_active;
1354 	struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
1355 
1356 	drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
1357 	drv_active &= ~(1 << (ha->portnum));
1358 
1359 	ql_log(ql_log_info, vha, 0xb0b1,
1360 	    "%s(%ld): drv_active: 0x%08x\n",
1361 	    __func__, vha->host_no, drv_active);
1362 
1363 	qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, drv_active);
1364 }
1365 
1366 /*
1367  * qla8044_device_bootstrap - Initialize device, set DEV_READY, start fw
1368  * @ha: pointer to adapter structure
1369  *
1370  * Note: IDC lock must be held upon entry
1371  **/
1372 static int
qla8044_device_bootstrap(struct scsi_qla_host * vha)1373 qla8044_device_bootstrap(struct scsi_qla_host *vha)
1374 {
1375 	int rval = QLA_FUNCTION_FAILED;
1376 	int i;
1377 	uint32_t old_count = 0, count = 0;
1378 	int need_reset = 0;
1379 	uint32_t idc_ctrl;
1380 	struct qla_hw_data *ha = vha->hw;
1381 
1382 	need_reset = qla8044_need_reset(vha);
1383 
1384 	if (!need_reset) {
1385 		old_count = qla8044_rd_direct(vha,
1386 		    QLA8044_PEG_ALIVE_COUNTER_INDEX);
1387 
1388 		for (i = 0; i < 10; i++) {
1389 			msleep(200);
1390 
1391 			count = qla8044_rd_direct(vha,
1392 			    QLA8044_PEG_ALIVE_COUNTER_INDEX);
1393 			if (count != old_count) {
1394 				rval = QLA_SUCCESS;
1395 				goto dev_ready;
1396 			}
1397 		}
1398 		qla8044_flash_lock_recovery(vha);
1399 	} else {
1400 		/* We are trying to perform a recovery here. */
1401 		if (ha->flags.isp82xx_fw_hung)
1402 			qla8044_flash_lock_recovery(vha);
1403 	}
1404 
1405 	/* set to DEV_INITIALIZING */
1406 	ql_log(ql_log_info, vha, 0xb0b2,
1407 	    "%s: HW State: INITIALIZING\n", __func__);
1408 	qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
1409 	    QLA8XXX_DEV_INITIALIZING);
1410 
1411 	qla8044_idc_unlock(ha);
1412 	rval = qla8044_start_firmware(vha);
1413 	qla8044_idc_lock(ha);
1414 
1415 	if (rval != QLA_SUCCESS) {
1416 		ql_log(ql_log_info, vha, 0xb0b3,
1417 		     "%s: HW State: FAILED\n", __func__);
1418 		qla8044_clear_drv_active(ha);
1419 		qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
1420 		    QLA8XXX_DEV_FAILED);
1421 		return rval;
1422 	}
1423 
1424 	/* For ISP8044, If IDC_CTRL GRACEFUL_RESET_BIT1 is set , reset it after
1425 	 * device goes to INIT state. */
1426 	idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
1427 	if (idc_ctrl & GRACEFUL_RESET_BIT1) {
1428 		qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL,
1429 		    (idc_ctrl & ~GRACEFUL_RESET_BIT1));
1430 		ha->fw_dumped = 0;
1431 	}
1432 
1433 dev_ready:
1434 	ql_log(ql_log_info, vha, 0xb0b4,
1435 	    "%s: HW State: READY\n", __func__);
1436 	qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, QLA8XXX_DEV_READY);
1437 
1438 	return rval;
1439 }
1440 
1441 /*-------------------------Reset Sequence Functions-----------------------*/
1442 static void
qla8044_dump_reset_seq_hdr(struct scsi_qla_host * vha)1443 qla8044_dump_reset_seq_hdr(struct scsi_qla_host *vha)
1444 {
1445 	u8 *phdr;
1446 
1447 	if (!vha->reset_tmplt.buff) {
1448 		ql_log(ql_log_fatal, vha, 0xb0b5,
1449 		    "%s: Error Invalid reset_seq_template\n", __func__);
1450 		return;
1451 	}
1452 
1453 	phdr = vha->reset_tmplt.buff;
1454 	ql_dbg(ql_dbg_p3p, vha, 0xb0b6,
1455 	    "Reset Template :\n\t0x%X 0x%X 0x%X 0x%X"
1456 	    "0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n"
1457 	    "\t0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n\n",
1458 	    *phdr, *(phdr+1), *(phdr+2), *(phdr+3), *(phdr+4),
1459 	    *(phdr+5), *(phdr+6), *(phdr+7), *(phdr + 8),
1460 	    *(phdr+9), *(phdr+10), *(phdr+11), *(phdr+12),
1461 	    *(phdr+13), *(phdr+14), *(phdr+15));
1462 }
1463 
1464 /*
1465  * qla8044_reset_seq_checksum_test - Validate Reset Sequence template.
1466  *
1467  * @ha : Pointer to adapter structure
1468  *
1469  * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED
1470  */
1471 static int
qla8044_reset_seq_checksum_test(struct scsi_qla_host * vha)1472 qla8044_reset_seq_checksum_test(struct scsi_qla_host *vha)
1473 {
1474 	uint32_t sum =  0;
1475 	uint16_t *buff = (uint16_t *)vha->reset_tmplt.buff;
1476 	int u16_count =  vha->reset_tmplt.hdr->size / sizeof(uint16_t);
1477 
1478 	while (u16_count-- > 0)
1479 		sum += *buff++;
1480 
1481 	while (sum >> 16)
1482 		sum = (sum & 0xFFFF) +  (sum >> 16);
1483 
1484 	/* checksum of 0 indicates a valid template */
1485 	if (~sum) {
1486 		return QLA_SUCCESS;
1487 	} else {
1488 		ql_log(ql_log_fatal, vha, 0xb0b7,
1489 		    "%s: Reset seq checksum failed\n", __func__);
1490 		return QLA_FUNCTION_FAILED;
1491 	}
1492 }
1493 
1494 /*
1495  * qla8044_read_reset_template - Read Reset Template from Flash, validate
1496  * the template and store offsets of stop/start/init offsets in ha->reset_tmplt.
1497  *
1498  * @ha : Pointer to adapter structure
1499  */
1500 void
qla8044_read_reset_template(struct scsi_qla_host * vha)1501 qla8044_read_reset_template(struct scsi_qla_host *vha)
1502 {
1503 	uint8_t *p_buff;
1504 	uint32_t addr, tmplt_hdr_def_size, tmplt_hdr_size;
1505 
1506 	vha->reset_tmplt.seq_error = 0;
1507 	vha->reset_tmplt.buff = vmalloc(QLA8044_RESTART_TEMPLATE_SIZE);
1508 	if (vha->reset_tmplt.buff == NULL) {
1509 		ql_log(ql_log_fatal, vha, 0xb0b8,
1510 		    "%s: Failed to allocate reset template resources\n",
1511 		    __func__);
1512 		goto exit_read_reset_template;
1513 	}
1514 
1515 	p_buff = vha->reset_tmplt.buff;
1516 	addr = QLA8044_RESET_TEMPLATE_ADDR;
1517 
1518 	tmplt_hdr_def_size =
1519 	    sizeof(struct qla8044_reset_template_hdr) / sizeof(uint32_t);
1520 
1521 	ql_dbg(ql_dbg_p3p, vha, 0xb0b9,
1522 	    "%s: Read template hdr size %d from Flash\n",
1523 	    __func__, tmplt_hdr_def_size);
1524 
1525 	/* Copy template header from flash */
1526 	if (qla8044_read_flash_data(vha, p_buff, addr, tmplt_hdr_def_size)) {
1527 		ql_log(ql_log_fatal, vha, 0xb0ba,
1528 		    "%s: Failed to read reset template\n", __func__);
1529 		goto exit_read_template_error;
1530 	}
1531 
1532 	vha->reset_tmplt.hdr =
1533 	 (struct qla8044_reset_template_hdr *) vha->reset_tmplt.buff;
1534 
1535 	/* Validate the template header size and signature */
1536 	tmplt_hdr_size = vha->reset_tmplt.hdr->hdr_size/sizeof(uint32_t);
1537 	if ((tmplt_hdr_size != tmplt_hdr_def_size) ||
1538 	    (vha->reset_tmplt.hdr->signature != RESET_TMPLT_HDR_SIGNATURE)) {
1539 		ql_log(ql_log_fatal, vha, 0xb0bb,
1540 		    "%s: Template Header size invalid %d "
1541 		    "tmplt_hdr_def_size %d!!!\n", __func__,
1542 		    tmplt_hdr_size, tmplt_hdr_def_size);
1543 		goto exit_read_template_error;
1544 	}
1545 
1546 	addr = QLA8044_RESET_TEMPLATE_ADDR + vha->reset_tmplt.hdr->hdr_size;
1547 	p_buff = vha->reset_tmplt.buff + vha->reset_tmplt.hdr->hdr_size;
1548 	tmplt_hdr_def_size = (vha->reset_tmplt.hdr->size -
1549 	    vha->reset_tmplt.hdr->hdr_size)/sizeof(uint32_t);
1550 
1551 	ql_dbg(ql_dbg_p3p, vha, 0xb0bc,
1552 	    "%s: Read rest of the template size %d\n",
1553 	    __func__, vha->reset_tmplt.hdr->size);
1554 
1555 	/* Copy rest of the template */
1556 	if (qla8044_read_flash_data(vha, p_buff, addr, tmplt_hdr_def_size)) {
1557 		ql_log(ql_log_fatal, vha, 0xb0bd,
1558 		    "%s: Failed to read reset tempelate\n", __func__);
1559 		goto exit_read_template_error;
1560 	}
1561 
1562 	/* Integrity check */
1563 	if (qla8044_reset_seq_checksum_test(vha)) {
1564 		ql_log(ql_log_fatal, vha, 0xb0be,
1565 		    "%s: Reset Seq checksum failed!\n", __func__);
1566 		goto exit_read_template_error;
1567 	}
1568 
1569 	ql_dbg(ql_dbg_p3p, vha, 0xb0bf,
1570 	    "%s: Reset Seq checksum passed! Get stop, "
1571 	    "start and init seq offsets\n", __func__);
1572 
1573 	/* Get STOP, START, INIT sequence offsets */
1574 	vha->reset_tmplt.init_offset = vha->reset_tmplt.buff +
1575 	    vha->reset_tmplt.hdr->init_seq_offset;
1576 
1577 	vha->reset_tmplt.start_offset = vha->reset_tmplt.buff +
1578 	    vha->reset_tmplt.hdr->start_seq_offset;
1579 
1580 	vha->reset_tmplt.stop_offset = vha->reset_tmplt.buff +
1581 	    vha->reset_tmplt.hdr->hdr_size;
1582 
1583 	qla8044_dump_reset_seq_hdr(vha);
1584 
1585 	goto exit_read_reset_template;
1586 
1587 exit_read_template_error:
1588 	vfree(vha->reset_tmplt.buff);
1589 
1590 exit_read_reset_template:
1591 	return;
1592 }
1593 
1594 void
qla8044_set_idc_dontreset(struct scsi_qla_host * vha)1595 qla8044_set_idc_dontreset(struct scsi_qla_host *vha)
1596 {
1597 	uint32_t idc_ctrl;
1598 	struct qla_hw_data *ha = vha->hw;
1599 
1600 	idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
1601 	idc_ctrl |= DONTRESET_BIT0;
1602 	ql_dbg(ql_dbg_p3p, vha, 0xb0c0,
1603 	    "%s: idc_ctrl = %d\n", __func__, idc_ctrl);
1604 	qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, idc_ctrl);
1605 }
1606 
1607 static inline void
qla8044_set_rst_ready(struct scsi_qla_host * vha)1608 qla8044_set_rst_ready(struct scsi_qla_host *vha)
1609 {
1610 	uint32_t drv_state;
1611 	struct qla_hw_data *ha = vha->hw;
1612 
1613 	drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
1614 
1615 	/* For ISP8044, drv_active register has 1 bit per function,
1616 	 * shift 1 by func_num to set a bit for the function.*/
1617 	drv_state |= (1 << ha->portnum);
1618 
1619 	ql_log(ql_log_info, vha, 0xb0c1,
1620 	    "%s(%ld): drv_state: 0x%08x\n",
1621 	    __func__, vha->host_no, drv_state);
1622 	qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, drv_state);
1623 }
1624 
1625 /**
1626  * qla8044_need_reset_handler - Code to start reset sequence
1627  * @ha: pointer to adapter structure
1628  *
1629  * Note: IDC lock must be held upon entry
1630  **/
1631 static void
qla8044_need_reset_handler(struct scsi_qla_host * vha)1632 qla8044_need_reset_handler(struct scsi_qla_host *vha)
1633 {
1634 	uint32_t dev_state = 0, drv_state, drv_active;
1635 	unsigned long reset_timeout;
1636 	struct qla_hw_data *ha = vha->hw;
1637 
1638 	ql_log(ql_log_fatal, vha, 0xb0c2,
1639 	    "%s: Performing ISP error recovery\n", __func__);
1640 
1641 	if (vha->flags.online) {
1642 		qla8044_idc_unlock(ha);
1643 		qla2x00_abort_isp_cleanup(vha);
1644 		ha->isp_ops->get_flash_version(vha, vha->req->ring);
1645 		ha->isp_ops->nvram_config(vha);
1646 		qla8044_idc_lock(ha);
1647 	}
1648 
1649 	dev_state = qla8044_rd_direct(vha,
1650 	    QLA8044_CRB_DEV_STATE_INDEX);
1651 	drv_state = qla8044_rd_direct(vha,
1652 	    QLA8044_CRB_DRV_STATE_INDEX);
1653 	drv_active = qla8044_rd_direct(vha,
1654 	    QLA8044_CRB_DRV_ACTIVE_INDEX);
1655 
1656 	ql_log(ql_log_info, vha, 0xb0c5,
1657 	    "%s(%ld): drv_state = 0x%x, drv_active = 0x%x dev_state = 0x%x\n",
1658 	    __func__, vha->host_no, drv_state, drv_active, dev_state);
1659 
1660 	qla8044_set_rst_ready(vha);
1661 
1662 	/* wait for 10 seconds for reset ack from all functions */
1663 	reset_timeout = jiffies + (ha->fcoe_reset_timeout * HZ);
1664 
1665 	do {
1666 		if (time_after_eq(jiffies, reset_timeout)) {
1667 			ql_log(ql_log_info, vha, 0xb0c4,
1668 			    "%s: Function %d: Reset Ack Timeout!, drv_state: 0x%08x, drv_active: 0x%08x\n",
1669 			    __func__, ha->portnum, drv_state, drv_active);
1670 			break;
1671 		}
1672 
1673 		qla8044_idc_unlock(ha);
1674 		msleep(1000);
1675 		qla8044_idc_lock(ha);
1676 
1677 		dev_state = qla8044_rd_direct(vha,
1678 		    QLA8044_CRB_DEV_STATE_INDEX);
1679 		drv_state = qla8044_rd_direct(vha,
1680 		    QLA8044_CRB_DRV_STATE_INDEX);
1681 		drv_active = qla8044_rd_direct(vha,
1682 		    QLA8044_CRB_DRV_ACTIVE_INDEX);
1683 	} while (((drv_state & drv_active) != drv_active) &&
1684 	    (dev_state == QLA8XXX_DEV_NEED_RESET));
1685 
1686 	/* Remove IDC participation of functions not acknowledging */
1687 	if (drv_state != drv_active) {
1688 		ql_log(ql_log_info, vha, 0xb0c7,
1689 		    "%s(%ld): Function %d turning off drv_active of non-acking function 0x%x\n",
1690 		    __func__, vha->host_no, ha->portnum,
1691 		    (drv_active ^ drv_state));
1692 		drv_active = drv_active & drv_state;
1693 		qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX,
1694 		    drv_active);
1695 	} else {
1696 		/*
1697 		 * Reset owner should execute reset recovery,
1698 		 * if all functions acknowledged
1699 		 */
1700 		if ((ha->flags.nic_core_reset_owner) &&
1701 		    (dev_state == QLA8XXX_DEV_NEED_RESET)) {
1702 			ha->flags.nic_core_reset_owner = 0;
1703 			qla8044_device_bootstrap(vha);
1704 			return;
1705 		}
1706 	}
1707 
1708 	/* Exit if non active function */
1709 	if (!(drv_active & (1 << ha->portnum))) {
1710 		ha->flags.nic_core_reset_owner = 0;
1711 		return;
1712 	}
1713 
1714 	/*
1715 	 * Execute Reset Recovery if Reset Owner or Function 7
1716 	 * is the only active function
1717 	 */
1718 	if (ha->flags.nic_core_reset_owner ||
1719 	    ((drv_state & drv_active) == QLA8044_FUN7_ACTIVE_INDEX)) {
1720 		ha->flags.nic_core_reset_owner = 0;
1721 		qla8044_device_bootstrap(vha);
1722 	}
1723 }
1724 
1725 static void
qla8044_set_drv_active(struct scsi_qla_host * vha)1726 qla8044_set_drv_active(struct scsi_qla_host *vha)
1727 {
1728 	uint32_t drv_active;
1729 	struct qla_hw_data *ha = vha->hw;
1730 
1731 	drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
1732 
1733 	/* For ISP8044, drv_active register has 1 bit per function,
1734 	 * shift 1 by func_num to set a bit for the function.*/
1735 	drv_active |= (1 << ha->portnum);
1736 
1737 	ql_log(ql_log_info, vha, 0xb0c8,
1738 	    "%s(%ld): drv_active: 0x%08x\n",
1739 	    __func__, vha->host_no, drv_active);
1740 	qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, drv_active);
1741 }
1742 
1743 static int
qla8044_check_drv_active(struct scsi_qla_host * vha)1744 qla8044_check_drv_active(struct scsi_qla_host *vha)
1745 {
1746 	uint32_t drv_active;
1747 	struct qla_hw_data *ha = vha->hw;
1748 
1749 	drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
1750 	if (drv_active & (1 << ha->portnum))
1751 		return QLA_SUCCESS;
1752 	else
1753 		return QLA_TEST_FAILED;
1754 }
1755 
1756 static void
qla8044_clear_idc_dontreset(struct scsi_qla_host * vha)1757 qla8044_clear_idc_dontreset(struct scsi_qla_host *vha)
1758 {
1759 	uint32_t idc_ctrl;
1760 	struct qla_hw_data *ha = vha->hw;
1761 
1762 	idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
1763 	idc_ctrl &= ~DONTRESET_BIT0;
1764 	ql_log(ql_log_info, vha, 0xb0c9,
1765 	    "%s: idc_ctrl = %d\n", __func__,
1766 	    idc_ctrl);
1767 	qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, idc_ctrl);
1768 }
1769 
1770 static int
qla8044_set_idc_ver(struct scsi_qla_host * vha)1771 qla8044_set_idc_ver(struct scsi_qla_host *vha)
1772 {
1773 	int idc_ver;
1774 	uint32_t drv_active;
1775 	int rval = QLA_SUCCESS;
1776 	struct qla_hw_data *ha = vha->hw;
1777 
1778 	drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
1779 	if (drv_active == (1 << ha->portnum)) {
1780 		idc_ver = qla8044_rd_direct(vha,
1781 		    QLA8044_CRB_DRV_IDC_VERSION_INDEX);
1782 		idc_ver &= (~0xFF);
1783 		idc_ver |= QLA8044_IDC_VER_MAJ_VALUE;
1784 		qla8044_wr_direct(vha, QLA8044_CRB_DRV_IDC_VERSION_INDEX,
1785 		    idc_ver);
1786 		ql_log(ql_log_info, vha, 0xb0ca,
1787 		    "%s: IDC version updated to %d\n",
1788 		    __func__, idc_ver);
1789 	} else {
1790 		idc_ver = qla8044_rd_direct(vha,
1791 		    QLA8044_CRB_DRV_IDC_VERSION_INDEX);
1792 		idc_ver &= 0xFF;
1793 		if (QLA8044_IDC_VER_MAJ_VALUE != idc_ver) {
1794 			ql_log(ql_log_info, vha, 0xb0cb,
1795 			    "%s: qla4xxx driver IDC version %d "
1796 			    "is not compatible with IDC version %d "
1797 			    "of other drivers!\n",
1798 			    __func__, QLA8044_IDC_VER_MAJ_VALUE,
1799 			    idc_ver);
1800 			rval = QLA_FUNCTION_FAILED;
1801 			goto exit_set_idc_ver;
1802 		}
1803 	}
1804 
1805 	/* Update IDC_MINOR_VERSION */
1806 	idc_ver = qla8044_rd_reg(ha, QLA8044_CRB_IDC_VER_MINOR);
1807 	idc_ver &= ~(0x03 << (ha->portnum * 2));
1808 	idc_ver |= (QLA8044_IDC_VER_MIN_VALUE << (ha->portnum * 2));
1809 	qla8044_wr_reg(ha, QLA8044_CRB_IDC_VER_MINOR, idc_ver);
1810 
1811 exit_set_idc_ver:
1812 	return rval;
1813 }
1814 
1815 static int
qla8044_update_idc_reg(struct scsi_qla_host * vha)1816 qla8044_update_idc_reg(struct scsi_qla_host *vha)
1817 {
1818 	uint32_t drv_active;
1819 	int rval = QLA_SUCCESS;
1820 	struct qla_hw_data *ha = vha->hw;
1821 
1822 	if (vha->flags.init_done)
1823 		goto exit_update_idc_reg;
1824 
1825 	qla8044_idc_lock(ha);
1826 	qla8044_set_drv_active(vha);
1827 
1828 	drv_active = qla8044_rd_direct(vha,
1829 	    QLA8044_CRB_DRV_ACTIVE_INDEX);
1830 
1831 	/* If we are the first driver to load and
1832 	 * ql2xdontresethba is not set, clear IDC_CTRL BIT0. */
1833 	if ((drv_active == (1 << ha->portnum)) && !ql2xdontresethba)
1834 		qla8044_clear_idc_dontreset(vha);
1835 
1836 	rval = qla8044_set_idc_ver(vha);
1837 	if (rval == QLA_FUNCTION_FAILED)
1838 		qla8044_clear_drv_active(ha);
1839 	qla8044_idc_unlock(ha);
1840 
1841 exit_update_idc_reg:
1842 	return rval;
1843 }
1844 
1845 /**
1846  * qla8044_need_qsnt_handler - Code to start qsnt
1847  * @ha: pointer to adapter structure
1848  **/
1849 static void
qla8044_need_qsnt_handler(struct scsi_qla_host * vha)1850 qla8044_need_qsnt_handler(struct scsi_qla_host *vha)
1851 {
1852 	unsigned long qsnt_timeout;
1853 	uint32_t drv_state, drv_active, dev_state;
1854 	struct qla_hw_data *ha = vha->hw;
1855 
1856 	if (vha->flags.online)
1857 		qla2x00_quiesce_io(vha);
1858 	else
1859 		return;
1860 
1861 	qla8044_set_qsnt_ready(vha);
1862 
1863 	/* Wait for 30 secs for all functions to ack qsnt mode */
1864 	qsnt_timeout = jiffies + (QSNT_ACK_TOV * HZ);
1865 	drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
1866 	drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
1867 
1868 	/* Shift drv_active by 1 to match drv_state. As quiescent ready bit
1869 	   position is at bit 1 and drv active is at bit 0 */
1870 	drv_active = drv_active << 1;
1871 
1872 	while (drv_state != drv_active) {
1873 		if (time_after_eq(jiffies, qsnt_timeout)) {
1874 			/* Other functions did not ack, changing state to
1875 			 * DEV_READY
1876 			 */
1877 			clear_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
1878 			qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
1879 					    QLA8XXX_DEV_READY);
1880 			qla8044_clear_qsnt_ready(vha);
1881 			ql_log(ql_log_info, vha, 0xb0cc,
1882 			    "Timeout waiting for quiescent ack!!!\n");
1883 			return;
1884 		}
1885 		qla8044_idc_unlock(ha);
1886 		msleep(1000);
1887 		qla8044_idc_lock(ha);
1888 
1889 		drv_state = qla8044_rd_direct(vha,
1890 		    QLA8044_CRB_DRV_STATE_INDEX);
1891 		drv_active = qla8044_rd_direct(vha,
1892 		    QLA8044_CRB_DRV_ACTIVE_INDEX);
1893 		drv_active = drv_active << 1;
1894 	}
1895 
1896 	/* All functions have Acked. Set quiescent state */
1897 	dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
1898 
1899 	if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT) {
1900 		qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
1901 		    QLA8XXX_DEV_QUIESCENT);
1902 		ql_log(ql_log_info, vha, 0xb0cd,
1903 		    "%s: HW State: QUIESCENT\n", __func__);
1904 	}
1905 }
1906 
1907 /*
1908  * qla8044_device_state_handler - Adapter state machine
1909  * @ha: pointer to host adapter structure.
1910  *
1911  * Note: IDC lock must be UNLOCKED upon entry
1912  **/
1913 int
qla8044_device_state_handler(struct scsi_qla_host * vha)1914 qla8044_device_state_handler(struct scsi_qla_host *vha)
1915 {
1916 	uint32_t dev_state;
1917 	int rval = QLA_SUCCESS;
1918 	unsigned long dev_init_timeout;
1919 	struct qla_hw_data *ha = vha->hw;
1920 
1921 	rval = qla8044_update_idc_reg(vha);
1922 	if (rval == QLA_FUNCTION_FAILED)
1923 		goto exit_error;
1924 
1925 	dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
1926 	ql_dbg(ql_dbg_p3p, vha, 0xb0ce,
1927 	    "Device state is 0x%x = %s\n",
1928 	    dev_state, dev_state < MAX_STATES ?
1929 	    qdev_state(dev_state) : "Unknown");
1930 
1931 	/* wait for 30 seconds for device to go ready */
1932 	dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ);
1933 
1934 	qla8044_idc_lock(ha);
1935 
1936 	while (1) {
1937 		if (time_after_eq(jiffies, dev_init_timeout)) {
1938 			if (qla8044_check_drv_active(vha) == QLA_SUCCESS) {
1939 				ql_log(ql_log_warn, vha, 0xb0cf,
1940 				    "%s: Device Init Failed 0x%x = %s\n",
1941 				    QLA2XXX_DRIVER_NAME, dev_state,
1942 				    dev_state < MAX_STATES ?
1943 				    qdev_state(dev_state) : "Unknown");
1944 				qla8044_wr_direct(vha,
1945 				    QLA8044_CRB_DEV_STATE_INDEX,
1946 				    QLA8XXX_DEV_FAILED);
1947 			}
1948 		}
1949 
1950 		dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
1951 		ql_log(ql_log_info, vha, 0xb0d0,
1952 		    "Device state is 0x%x = %s\n",
1953 		    dev_state, dev_state < MAX_STATES ?
1954 		    qdev_state(dev_state) : "Unknown");
1955 
1956 		/* NOTE: Make sure idc unlocked upon exit of switch statement */
1957 		switch (dev_state) {
1958 		case QLA8XXX_DEV_READY:
1959 			ha->flags.nic_core_reset_owner = 0;
1960 			goto exit;
1961 		case QLA8XXX_DEV_COLD:
1962 			rval = qla8044_device_bootstrap(vha);
1963 			break;
1964 		case QLA8XXX_DEV_INITIALIZING:
1965 			qla8044_idc_unlock(ha);
1966 			msleep(1000);
1967 			qla8044_idc_lock(ha);
1968 			break;
1969 		case QLA8XXX_DEV_NEED_RESET:
1970 			/* For ISP8044, if NEED_RESET is set by any driver,
1971 			 * it should be honored, irrespective of IDC_CTRL
1972 			 * DONTRESET_BIT0 */
1973 			qla8044_need_reset_handler(vha);
1974 			break;
1975 		case QLA8XXX_DEV_NEED_QUIESCENT:
1976 			/* idc locked/unlocked in handler */
1977 			qla8044_need_qsnt_handler(vha);
1978 
1979 			/* Reset the init timeout after qsnt handler */
1980 			dev_init_timeout = jiffies +
1981 			    (ha->fcoe_reset_timeout * HZ);
1982 			break;
1983 		case QLA8XXX_DEV_QUIESCENT:
1984 			ql_log(ql_log_info, vha, 0xb0d1,
1985 			    "HW State: QUIESCENT\n");
1986 
1987 			qla8044_idc_unlock(ha);
1988 			msleep(1000);
1989 			qla8044_idc_lock(ha);
1990 
1991 			/* Reset the init timeout after qsnt handler */
1992 			dev_init_timeout = jiffies +
1993 			    (ha->fcoe_reset_timeout * HZ);
1994 			break;
1995 		case QLA8XXX_DEV_FAILED:
1996 			ha->flags.nic_core_reset_owner = 0;
1997 			qla8044_idc_unlock(ha);
1998 			qla8xxx_dev_failed_handler(vha);
1999 			rval = QLA_FUNCTION_FAILED;
2000 			qla8044_idc_lock(ha);
2001 			goto exit;
2002 		default:
2003 			qla8044_idc_unlock(ha);
2004 			qla8xxx_dev_failed_handler(vha);
2005 			rval = QLA_FUNCTION_FAILED;
2006 			qla8044_idc_lock(ha);
2007 			goto exit;
2008 		}
2009 	}
2010 exit:
2011 	qla8044_idc_unlock(ha);
2012 
2013 exit_error:
2014 	return rval;
2015 }
2016 
2017 /**
2018  * qla4_8xxx_check_temp - Check the ISP82XX temperature.
2019  * @ha: adapter block pointer.
2020  *
2021  * Note: The caller should not hold the idc lock.
2022  **/
2023 static int
qla8044_check_temp(struct scsi_qla_host * vha)2024 qla8044_check_temp(struct scsi_qla_host *vha)
2025 {
2026 	uint32_t temp, temp_state, temp_val;
2027 	int status = QLA_SUCCESS;
2028 
2029 	temp = qla8044_rd_direct(vha, QLA8044_CRB_TEMP_STATE_INDEX);
2030 	temp_state = qla82xx_get_temp_state(temp);
2031 	temp_val = qla82xx_get_temp_val(temp);
2032 
2033 	if (temp_state == QLA82XX_TEMP_PANIC) {
2034 		ql_log(ql_log_warn, vha, 0xb0d2,
2035 		    "Device temperature %d degrees C"
2036 		    " exceeds maximum allowed. Hardware has been shut"
2037 		    " down\n", temp_val);
2038 		status = QLA_FUNCTION_FAILED;
2039 		return status;
2040 	} else if (temp_state == QLA82XX_TEMP_WARN) {
2041 		ql_log(ql_log_warn, vha, 0xb0d3,
2042 		    "Device temperature %d"
2043 		    " degrees C exceeds operating range."
2044 		    " Immediate action needed.\n", temp_val);
2045 	}
2046 	return 0;
2047 }
2048 
qla8044_read_temperature(scsi_qla_host_t * vha)2049 int qla8044_read_temperature(scsi_qla_host_t *vha)
2050 {
2051 	uint32_t temp;
2052 
2053 	temp = qla8044_rd_direct(vha, QLA8044_CRB_TEMP_STATE_INDEX);
2054 	return qla82xx_get_temp_val(temp);
2055 }
2056 
2057 /**
2058  * qla8044_check_fw_alive  - Check firmware health
2059  * @ha: Pointer to host adapter structure.
2060  *
2061  * Context: Interrupt
2062  **/
2063 int
qla8044_check_fw_alive(struct scsi_qla_host * vha)2064 qla8044_check_fw_alive(struct scsi_qla_host *vha)
2065 {
2066 	uint32_t fw_heartbeat_counter;
2067 	uint32_t halt_status1, halt_status2;
2068 	int status = QLA_SUCCESS;
2069 
2070 	fw_heartbeat_counter = qla8044_rd_direct(vha,
2071 	    QLA8044_PEG_ALIVE_COUNTER_INDEX);
2072 
2073 	/* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */
2074 	if (fw_heartbeat_counter == 0xffffffff) {
2075 		ql_dbg(ql_dbg_p3p, vha, 0xb0d4,
2076 		    "scsi%ld: %s: Device in frozen "
2077 		    "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n",
2078 		    vha->host_no, __func__);
2079 		return status;
2080 	}
2081 
2082 	if (vha->fw_heartbeat_counter == fw_heartbeat_counter) {
2083 		vha->seconds_since_last_heartbeat++;
2084 		/* FW not alive after 2 seconds */
2085 		if (vha->seconds_since_last_heartbeat == 2) {
2086 			vha->seconds_since_last_heartbeat = 0;
2087 			halt_status1 = qla8044_rd_direct(vha,
2088 			    QLA8044_PEG_HALT_STATUS1_INDEX);
2089 			halt_status2 = qla8044_rd_direct(vha,
2090 			    QLA8044_PEG_HALT_STATUS2_INDEX);
2091 
2092 			ql_log(ql_log_info, vha, 0xb0d5,
2093 			    "scsi(%ld): %s, ISP8044 "
2094 			    "Dumping hw/fw registers:\n"
2095 			    " PEG_HALT_STATUS1: 0x%x, "
2096 			    "PEG_HALT_STATUS2: 0x%x,\n",
2097 			    vha->host_no, __func__, halt_status1,
2098 			    halt_status2);
2099 			status = QLA_FUNCTION_FAILED;
2100 		}
2101 	} else
2102 		vha->seconds_since_last_heartbeat = 0;
2103 
2104 	vha->fw_heartbeat_counter = fw_heartbeat_counter;
2105 	return status;
2106 }
2107 
2108 void
qla8044_watchdog(struct scsi_qla_host * vha)2109 qla8044_watchdog(struct scsi_qla_host *vha)
2110 {
2111 	uint32_t dev_state, halt_status;
2112 	int halt_status_unrecoverable = 0;
2113 	struct qla_hw_data *ha = vha->hw;
2114 
2115 	/* don't poll if reset is going on or FW hang in quiescent state */
2116 	if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
2117 	    test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags))) {
2118 		dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
2119 
2120 		if (qla8044_check_fw_alive(vha)) {
2121 			ha->flags.isp82xx_fw_hung = 1;
2122 			ql_log(ql_log_warn, vha, 0xb10a,
2123 			    "Firmware hung.\n");
2124 			qla82xx_clear_pending_mbx(vha);
2125 		}
2126 
2127 		if (qla8044_check_temp(vha)) {
2128 			set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
2129 			ha->flags.isp82xx_fw_hung = 1;
2130 			qla2xxx_wake_dpc(vha);
2131 		} else if (dev_state == QLA8XXX_DEV_NEED_RESET &&
2132 			   !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) {
2133 			ql_log(ql_log_info, vha, 0xb0d6,
2134 			    "%s: HW State: NEED RESET!\n",
2135 			    __func__);
2136 			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2137 			qla2xxx_wake_dpc(vha);
2138 		} else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT &&
2139 		    !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) {
2140 			ql_log(ql_log_info, vha, 0xb0d7,
2141 			    "%s: HW State: NEED QUIES detected!\n",
2142 			    __func__);
2143 			set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
2144 			qla2xxx_wake_dpc(vha);
2145 		} else  {
2146 			/* Check firmware health */
2147 			if (ha->flags.isp82xx_fw_hung) {
2148 				halt_status = qla8044_rd_direct(vha,
2149 					QLA8044_PEG_HALT_STATUS1_INDEX);
2150 				if (halt_status &
2151 				    QLA8044_HALT_STATUS_FW_RESET) {
2152 					ql_log(ql_log_fatal, vha,
2153 					    0xb0d8, "%s: Firmware "
2154 					    "error detected device "
2155 					    "is being reset\n",
2156 					    __func__);
2157 				} else if (halt_status &
2158 					    QLA8044_HALT_STATUS_UNRECOVERABLE) {
2159 						halt_status_unrecoverable = 1;
2160 				}
2161 
2162 				/* Since we cannot change dev_state in interrupt
2163 				 * context, set appropriate DPC flag then wakeup
2164 				 *  DPC */
2165 				if (halt_status_unrecoverable) {
2166 					set_bit(ISP_UNRECOVERABLE,
2167 					    &vha->dpc_flags);
2168 				} else {
2169 					if (dev_state ==
2170 					    QLA8XXX_DEV_QUIESCENT) {
2171 						set_bit(FCOE_CTX_RESET_NEEDED,
2172 						    &vha->dpc_flags);
2173 						ql_log(ql_log_info, vha, 0xb0d9,
2174 						    "%s: FW CONTEXT Reset "
2175 						    "needed!\n", __func__);
2176 					} else {
2177 						ql_log(ql_log_info, vha,
2178 						    0xb0da, "%s: "
2179 						    "detect abort needed\n",
2180 						    __func__);
2181 						set_bit(ISP_ABORT_NEEDED,
2182 						    &vha->dpc_flags);
2183 					}
2184 				}
2185 				qla2xxx_wake_dpc(vha);
2186 			}
2187 		}
2188 
2189 	}
2190 }
2191 
2192 static int
qla8044_minidump_process_control(struct scsi_qla_host * vha,struct qla8044_minidump_entry_hdr * entry_hdr)2193 qla8044_minidump_process_control(struct scsi_qla_host *vha,
2194 				 struct qla8044_minidump_entry_hdr *entry_hdr)
2195 {
2196 	struct qla8044_minidump_entry_crb *crb_entry;
2197 	uint32_t read_value, opcode, poll_time, addr, index;
2198 	uint32_t crb_addr, rval = QLA_SUCCESS;
2199 	unsigned long wtime;
2200 	struct qla8044_minidump_template_hdr *tmplt_hdr;
2201 	int i;
2202 	struct qla_hw_data *ha = vha->hw;
2203 
2204 	ql_dbg(ql_dbg_p3p, vha, 0xb0dd, "Entering fn: %s\n", __func__);
2205 	tmplt_hdr = (struct qla8044_minidump_template_hdr *)
2206 		ha->md_tmplt_hdr;
2207 	crb_entry = (struct qla8044_minidump_entry_crb *)entry_hdr;
2208 
2209 	crb_addr = crb_entry->addr;
2210 	for (i = 0; i < crb_entry->op_count; i++) {
2211 		opcode = crb_entry->crb_ctrl.opcode;
2212 
2213 		if (opcode & QLA82XX_DBG_OPCODE_WR) {
2214 			qla8044_wr_reg_indirect(vha, crb_addr,
2215 			    crb_entry->value_1);
2216 			opcode &= ~QLA82XX_DBG_OPCODE_WR;
2217 		}
2218 
2219 		if (opcode & QLA82XX_DBG_OPCODE_RW) {
2220 			qla8044_rd_reg_indirect(vha, crb_addr, &read_value);
2221 			qla8044_wr_reg_indirect(vha, crb_addr, read_value);
2222 			opcode &= ~QLA82XX_DBG_OPCODE_RW;
2223 		}
2224 
2225 		if (opcode & QLA82XX_DBG_OPCODE_AND) {
2226 			qla8044_rd_reg_indirect(vha, crb_addr, &read_value);
2227 			read_value &= crb_entry->value_2;
2228 			opcode &= ~QLA82XX_DBG_OPCODE_AND;
2229 			if (opcode & QLA82XX_DBG_OPCODE_OR) {
2230 				read_value |= crb_entry->value_3;
2231 				opcode &= ~QLA82XX_DBG_OPCODE_OR;
2232 			}
2233 			qla8044_wr_reg_indirect(vha, crb_addr, read_value);
2234 		}
2235 		if (opcode & QLA82XX_DBG_OPCODE_OR) {
2236 			qla8044_rd_reg_indirect(vha, crb_addr, &read_value);
2237 			read_value |= crb_entry->value_3;
2238 			qla8044_wr_reg_indirect(vha, crb_addr, read_value);
2239 			opcode &= ~QLA82XX_DBG_OPCODE_OR;
2240 		}
2241 		if (opcode & QLA82XX_DBG_OPCODE_POLL) {
2242 			poll_time = crb_entry->crb_strd.poll_timeout;
2243 			wtime = jiffies + poll_time;
2244 			qla8044_rd_reg_indirect(vha, crb_addr, &read_value);
2245 
2246 			do {
2247 				if ((read_value & crb_entry->value_2) ==
2248 				    crb_entry->value_1) {
2249 					break;
2250 				} else if (time_after_eq(jiffies, wtime)) {
2251 					/* capturing dump failed */
2252 					rval = QLA_FUNCTION_FAILED;
2253 					break;
2254 				} else {
2255 					qla8044_rd_reg_indirect(vha,
2256 					    crb_addr, &read_value);
2257 				}
2258 			} while (1);
2259 			opcode &= ~QLA82XX_DBG_OPCODE_POLL;
2260 		}
2261 
2262 		if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) {
2263 			if (crb_entry->crb_strd.state_index_a) {
2264 				index = crb_entry->crb_strd.state_index_a;
2265 				addr = tmplt_hdr->saved_state_array[index];
2266 			} else {
2267 				addr = crb_addr;
2268 			}
2269 
2270 			qla8044_rd_reg_indirect(vha, addr, &read_value);
2271 			index = crb_entry->crb_ctrl.state_index_v;
2272 			tmplt_hdr->saved_state_array[index] = read_value;
2273 			opcode &= ~QLA82XX_DBG_OPCODE_RDSTATE;
2274 		}
2275 
2276 		if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) {
2277 			if (crb_entry->crb_strd.state_index_a) {
2278 				index = crb_entry->crb_strd.state_index_a;
2279 				addr = tmplt_hdr->saved_state_array[index];
2280 			} else {
2281 				addr = crb_addr;
2282 			}
2283 
2284 			if (crb_entry->crb_ctrl.state_index_v) {
2285 				index = crb_entry->crb_ctrl.state_index_v;
2286 				read_value =
2287 				    tmplt_hdr->saved_state_array[index];
2288 			} else {
2289 				read_value = crb_entry->value_1;
2290 			}
2291 
2292 			qla8044_wr_reg_indirect(vha, addr, read_value);
2293 			opcode &= ~QLA82XX_DBG_OPCODE_WRSTATE;
2294 		}
2295 
2296 		if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) {
2297 			index = crb_entry->crb_ctrl.state_index_v;
2298 			read_value = tmplt_hdr->saved_state_array[index];
2299 			read_value <<= crb_entry->crb_ctrl.shl;
2300 			read_value >>= crb_entry->crb_ctrl.shr;
2301 			if (crb_entry->value_2)
2302 				read_value &= crb_entry->value_2;
2303 			read_value |= crb_entry->value_3;
2304 			read_value += crb_entry->value_1;
2305 			tmplt_hdr->saved_state_array[index] = read_value;
2306 			opcode &= ~QLA82XX_DBG_OPCODE_MDSTATE;
2307 		}
2308 		crb_addr += crb_entry->crb_strd.addr_stride;
2309 	}
2310 	return rval;
2311 }
2312 
2313 static void
qla8044_minidump_process_rdcrb(struct scsi_qla_host * vha,struct qla8044_minidump_entry_hdr * entry_hdr,uint32_t ** d_ptr)2314 qla8044_minidump_process_rdcrb(struct scsi_qla_host *vha,
2315 	struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
2316 {
2317 	uint32_t r_addr, r_stride, loop_cnt, i, r_value;
2318 	struct qla8044_minidump_entry_crb *crb_hdr;
2319 	uint32_t *data_ptr = *d_ptr;
2320 
2321 	ql_dbg(ql_dbg_p3p, vha, 0xb0de, "Entering fn: %s\n", __func__);
2322 	crb_hdr = (struct qla8044_minidump_entry_crb *)entry_hdr;
2323 	r_addr = crb_hdr->addr;
2324 	r_stride = crb_hdr->crb_strd.addr_stride;
2325 	loop_cnt = crb_hdr->op_count;
2326 
2327 	for (i = 0; i < loop_cnt; i++) {
2328 		qla8044_rd_reg_indirect(vha, r_addr, &r_value);
2329 		*data_ptr++ = r_addr;
2330 		*data_ptr++ = r_value;
2331 		r_addr += r_stride;
2332 	}
2333 	*d_ptr = data_ptr;
2334 }
2335 
2336 static int
qla8044_minidump_process_rdmem(struct scsi_qla_host * vha,struct qla8044_minidump_entry_hdr * entry_hdr,uint32_t ** d_ptr)2337 qla8044_minidump_process_rdmem(struct scsi_qla_host *vha,
2338 	struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
2339 {
2340 	uint32_t r_addr, r_value, r_data;
2341 	uint32_t i, j, loop_cnt;
2342 	struct qla8044_minidump_entry_rdmem *m_hdr;
2343 	unsigned long flags;
2344 	uint32_t *data_ptr = *d_ptr;
2345 	struct qla_hw_data *ha = vha->hw;
2346 
2347 	ql_dbg(ql_dbg_p3p, vha, 0xb0df, "Entering fn: %s\n", __func__);
2348 	m_hdr = (struct qla8044_minidump_entry_rdmem *)entry_hdr;
2349 	r_addr = m_hdr->read_addr;
2350 	loop_cnt = m_hdr->read_data_size/16;
2351 
2352 	ql_dbg(ql_dbg_p3p, vha, 0xb0f0,
2353 	    "[%s]: Read addr: 0x%x, read_data_size: 0x%x\n",
2354 	    __func__, r_addr, m_hdr->read_data_size);
2355 
2356 	if (r_addr & 0xf) {
2357 		ql_dbg(ql_dbg_p3p, vha, 0xb0f1,
2358 		    "[%s]: Read addr 0x%x not 16 bytes aligned\n",
2359 		    __func__, r_addr);
2360 		return QLA_FUNCTION_FAILED;
2361 	}
2362 
2363 	if (m_hdr->read_data_size % 16) {
2364 		ql_dbg(ql_dbg_p3p, vha, 0xb0f2,
2365 		    "[%s]: Read data[0x%x] not multiple of 16 bytes\n",
2366 		    __func__, m_hdr->read_data_size);
2367 		return QLA_FUNCTION_FAILED;
2368 	}
2369 
2370 	ql_dbg(ql_dbg_p3p, vha, 0xb0f3,
2371 	    "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n",
2372 	    __func__, r_addr, m_hdr->read_data_size, loop_cnt);
2373 
2374 	write_lock_irqsave(&ha->hw_lock, flags);
2375 	for (i = 0; i < loop_cnt; i++) {
2376 		qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_ADDR_LO, r_addr);
2377 		r_value = 0;
2378 		qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_ADDR_HI, r_value);
2379 		r_value = MIU_TA_CTL_ENABLE;
2380 		qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL, r_value);
2381 		r_value = MIU_TA_CTL_START_ENABLE;
2382 		qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL, r_value);
2383 
2384 		for (j = 0; j < MAX_CTL_CHECK; j++) {
2385 			qla8044_rd_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL,
2386 			    &r_value);
2387 			if ((r_value & MIU_TA_CTL_BUSY) == 0)
2388 				break;
2389 		}
2390 
2391 		if (j >= MAX_CTL_CHECK) {
2392 			write_unlock_irqrestore(&ha->hw_lock, flags);
2393 			return QLA_SUCCESS;
2394 		}
2395 
2396 		for (j = 0; j < 4; j++) {
2397 			qla8044_rd_reg_indirect(vha, MD_MIU_TEST_AGT_RDDATA[j],
2398 			    &r_data);
2399 			*data_ptr++ = r_data;
2400 		}
2401 
2402 		r_addr += 16;
2403 	}
2404 	write_unlock_irqrestore(&ha->hw_lock, flags);
2405 
2406 	ql_dbg(ql_dbg_p3p, vha, 0xb0f4,
2407 	    "Leaving fn: %s datacount: 0x%x\n",
2408 	     __func__, (loop_cnt * 16));
2409 
2410 	*d_ptr = data_ptr;
2411 	return QLA_SUCCESS;
2412 }
2413 
2414 /* ISP83xx flash read for _RDROM _BOARD */
2415 static uint32_t
qla8044_minidump_process_rdrom(struct scsi_qla_host * vha,struct qla8044_minidump_entry_hdr * entry_hdr,uint32_t ** d_ptr)2416 qla8044_minidump_process_rdrom(struct scsi_qla_host *vha,
2417 	struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
2418 {
2419 	uint32_t fl_addr, u32_count, rval;
2420 	struct qla8044_minidump_entry_rdrom *rom_hdr;
2421 	uint32_t *data_ptr = *d_ptr;
2422 
2423 	rom_hdr = (struct qla8044_minidump_entry_rdrom *)entry_hdr;
2424 	fl_addr = rom_hdr->read_addr;
2425 	u32_count = (rom_hdr->read_data_size)/sizeof(uint32_t);
2426 
2427 	ql_dbg(ql_dbg_p3p, vha, 0xb0f5, "[%s]: fl_addr: 0x%x, count: 0x%x\n",
2428 	    __func__, fl_addr, u32_count);
2429 
2430 	rval = qla8044_lockless_flash_read_u32(vha, fl_addr,
2431 	    (u8 *)(data_ptr), u32_count);
2432 
2433 	if (rval != QLA_SUCCESS) {
2434 		ql_log(ql_log_fatal, vha, 0xb0f6,
2435 		    "%s: Flash Read Error,Count=%d\n", __func__, u32_count);
2436 		return QLA_FUNCTION_FAILED;
2437 	} else {
2438 		data_ptr += u32_count;
2439 		*d_ptr = data_ptr;
2440 		return QLA_SUCCESS;
2441 	}
2442 }
2443 
2444 static void
qla8044_mark_entry_skipped(struct scsi_qla_host * vha,struct qla8044_minidump_entry_hdr * entry_hdr,int index)2445 qla8044_mark_entry_skipped(struct scsi_qla_host *vha,
2446 	struct qla8044_minidump_entry_hdr *entry_hdr, int index)
2447 {
2448 	entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG;
2449 
2450 	ql_log(ql_log_info, vha, 0xb0f7,
2451 	    "scsi(%ld): Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n",
2452 	    vha->host_no, index, entry_hdr->entry_type,
2453 	    entry_hdr->d_ctrl.entry_capture_mask);
2454 }
2455 
2456 static int
qla8044_minidump_process_l2tag(struct scsi_qla_host * vha,struct qla8044_minidump_entry_hdr * entry_hdr,uint32_t ** d_ptr)2457 qla8044_minidump_process_l2tag(struct scsi_qla_host *vha,
2458 	struct qla8044_minidump_entry_hdr *entry_hdr,
2459 				 uint32_t **d_ptr)
2460 {
2461 	uint32_t addr, r_addr, c_addr, t_r_addr;
2462 	uint32_t i, k, loop_count, t_value, r_cnt, r_value;
2463 	unsigned long p_wait, w_time, p_mask;
2464 	uint32_t c_value_w, c_value_r;
2465 	struct qla8044_minidump_entry_cache *cache_hdr;
2466 	int rval = QLA_FUNCTION_FAILED;
2467 	uint32_t *data_ptr = *d_ptr;
2468 
2469 	ql_dbg(ql_dbg_p3p, vha, 0xb0f8, "Entering fn: %s\n", __func__);
2470 	cache_hdr = (struct qla8044_minidump_entry_cache *)entry_hdr;
2471 
2472 	loop_count = cache_hdr->op_count;
2473 	r_addr = cache_hdr->read_addr;
2474 	c_addr = cache_hdr->control_addr;
2475 	c_value_w = cache_hdr->cache_ctrl.write_value;
2476 
2477 	t_r_addr = cache_hdr->tag_reg_addr;
2478 	t_value = cache_hdr->addr_ctrl.init_tag_value;
2479 	r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
2480 	p_wait = cache_hdr->cache_ctrl.poll_wait;
2481 	p_mask = cache_hdr->cache_ctrl.poll_mask;
2482 
2483 	for (i = 0; i < loop_count; i++) {
2484 		qla8044_wr_reg_indirect(vha, t_r_addr, t_value);
2485 		if (c_value_w)
2486 			qla8044_wr_reg_indirect(vha, c_addr, c_value_w);
2487 
2488 		if (p_mask) {
2489 			w_time = jiffies + p_wait;
2490 			do {
2491 				qla8044_rd_reg_indirect(vha, c_addr,
2492 				    &c_value_r);
2493 				if ((c_value_r & p_mask) == 0) {
2494 					break;
2495 				} else if (time_after_eq(jiffies, w_time)) {
2496 					/* capturing dump failed */
2497 					return rval;
2498 				}
2499 			} while (1);
2500 		}
2501 
2502 		addr = r_addr;
2503 		for (k = 0; k < r_cnt; k++) {
2504 			qla8044_rd_reg_indirect(vha, addr, &r_value);
2505 			*data_ptr++ = r_value;
2506 			addr += cache_hdr->read_ctrl.read_addr_stride;
2507 		}
2508 		t_value += cache_hdr->addr_ctrl.tag_value_stride;
2509 	}
2510 	*d_ptr = data_ptr;
2511 	return QLA_SUCCESS;
2512 }
2513 
2514 static void
qla8044_minidump_process_l1cache(struct scsi_qla_host * vha,struct qla8044_minidump_entry_hdr * entry_hdr,uint32_t ** d_ptr)2515 qla8044_minidump_process_l1cache(struct scsi_qla_host *vha,
2516 	struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
2517 {
2518 	uint32_t addr, r_addr, c_addr, t_r_addr;
2519 	uint32_t i, k, loop_count, t_value, r_cnt, r_value;
2520 	uint32_t c_value_w;
2521 	struct qla8044_minidump_entry_cache *cache_hdr;
2522 	uint32_t *data_ptr = *d_ptr;
2523 
2524 	cache_hdr = (struct qla8044_minidump_entry_cache *)entry_hdr;
2525 	loop_count = cache_hdr->op_count;
2526 	r_addr = cache_hdr->read_addr;
2527 	c_addr = cache_hdr->control_addr;
2528 	c_value_w = cache_hdr->cache_ctrl.write_value;
2529 
2530 	t_r_addr = cache_hdr->tag_reg_addr;
2531 	t_value = cache_hdr->addr_ctrl.init_tag_value;
2532 	r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
2533 
2534 	for (i = 0; i < loop_count; i++) {
2535 		qla8044_wr_reg_indirect(vha, t_r_addr, t_value);
2536 		qla8044_wr_reg_indirect(vha, c_addr, c_value_w);
2537 		addr = r_addr;
2538 		for (k = 0; k < r_cnt; k++) {
2539 			qla8044_rd_reg_indirect(vha, addr, &r_value);
2540 			*data_ptr++ = r_value;
2541 			addr += cache_hdr->read_ctrl.read_addr_stride;
2542 		}
2543 		t_value += cache_hdr->addr_ctrl.tag_value_stride;
2544 	}
2545 	*d_ptr = data_ptr;
2546 }
2547 
2548 static void
qla8044_minidump_process_rdocm(struct scsi_qla_host * vha,struct qla8044_minidump_entry_hdr * entry_hdr,uint32_t ** d_ptr)2549 qla8044_minidump_process_rdocm(struct scsi_qla_host *vha,
2550 	struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
2551 {
2552 	uint32_t r_addr, r_stride, loop_cnt, i, r_value;
2553 	struct qla8044_minidump_entry_rdocm *ocm_hdr;
2554 	uint32_t *data_ptr = *d_ptr;
2555 	struct qla_hw_data *ha = vha->hw;
2556 
2557 	ql_dbg(ql_dbg_p3p, vha, 0xb0f9, "Entering fn: %s\n", __func__);
2558 
2559 	ocm_hdr = (struct qla8044_minidump_entry_rdocm *)entry_hdr;
2560 	r_addr = ocm_hdr->read_addr;
2561 	r_stride = ocm_hdr->read_addr_stride;
2562 	loop_cnt = ocm_hdr->op_count;
2563 
2564 	ql_dbg(ql_dbg_p3p, vha, 0xb0fa,
2565 	    "[%s]: r_addr: 0x%x, r_stride: 0x%x, loop_cnt: 0x%x\n",
2566 	    __func__, r_addr, r_stride, loop_cnt);
2567 
2568 	for (i = 0; i < loop_cnt; i++) {
2569 		r_value = readl((void __iomem *)(r_addr + ha->nx_pcibase));
2570 		*data_ptr++ = r_value;
2571 		r_addr += r_stride;
2572 	}
2573 	ql_dbg(ql_dbg_p3p, vha, 0xb0fb, "Leaving fn: %s datacount: 0x%lx\n",
2574 	    __func__, (long unsigned int) (loop_cnt * sizeof(uint32_t)));
2575 
2576 	*d_ptr = data_ptr;
2577 }
2578 
2579 static void
qla8044_minidump_process_rdmux(struct scsi_qla_host * vha,struct qla8044_minidump_entry_hdr * entry_hdr,uint32_t ** d_ptr)2580 qla8044_minidump_process_rdmux(struct scsi_qla_host *vha,
2581 	struct qla8044_minidump_entry_hdr *entry_hdr,
2582 	uint32_t **d_ptr)
2583 {
2584 	uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value;
2585 	struct qla8044_minidump_entry_mux *mux_hdr;
2586 	uint32_t *data_ptr = *d_ptr;
2587 
2588 	ql_dbg(ql_dbg_p3p, vha, 0xb0fc, "Entering fn: %s\n", __func__);
2589 
2590 	mux_hdr = (struct qla8044_minidump_entry_mux *)entry_hdr;
2591 	r_addr = mux_hdr->read_addr;
2592 	s_addr = mux_hdr->select_addr;
2593 	s_stride = mux_hdr->select_value_stride;
2594 	s_value = mux_hdr->select_value;
2595 	loop_cnt = mux_hdr->op_count;
2596 
2597 	for (i = 0; i < loop_cnt; i++) {
2598 		qla8044_wr_reg_indirect(vha, s_addr, s_value);
2599 		qla8044_rd_reg_indirect(vha, r_addr, &r_value);
2600 		*data_ptr++ = s_value;
2601 		*data_ptr++ = r_value;
2602 		s_value += s_stride;
2603 	}
2604 	*d_ptr = data_ptr;
2605 }
2606 
2607 static void
qla8044_minidump_process_queue(struct scsi_qla_host * vha,struct qla8044_minidump_entry_hdr * entry_hdr,uint32_t ** d_ptr)2608 qla8044_minidump_process_queue(struct scsi_qla_host *vha,
2609 	struct qla8044_minidump_entry_hdr *entry_hdr,
2610 	uint32_t **d_ptr)
2611 {
2612 	uint32_t s_addr, r_addr;
2613 	uint32_t r_stride, r_value, r_cnt, qid = 0;
2614 	uint32_t i, k, loop_cnt;
2615 	struct qla8044_minidump_entry_queue *q_hdr;
2616 	uint32_t *data_ptr = *d_ptr;
2617 
2618 	ql_dbg(ql_dbg_p3p, vha, 0xb0fd, "Entering fn: %s\n", __func__);
2619 	q_hdr = (struct qla8044_minidump_entry_queue *)entry_hdr;
2620 	s_addr = q_hdr->select_addr;
2621 	r_cnt = q_hdr->rd_strd.read_addr_cnt;
2622 	r_stride = q_hdr->rd_strd.read_addr_stride;
2623 	loop_cnt = q_hdr->op_count;
2624 
2625 	for (i = 0; i < loop_cnt; i++) {
2626 		qla8044_wr_reg_indirect(vha, s_addr, qid);
2627 		r_addr = q_hdr->read_addr;
2628 		for (k = 0; k < r_cnt; k++) {
2629 			qla8044_rd_reg_indirect(vha, r_addr, &r_value);
2630 			*data_ptr++ = r_value;
2631 			r_addr += r_stride;
2632 		}
2633 		qid += q_hdr->q_strd.queue_id_stride;
2634 	}
2635 	*d_ptr = data_ptr;
2636 }
2637 
2638 /* ISP83xx functions to process new minidump entries... */
2639 static uint32_t
qla8044_minidump_process_pollrd(struct scsi_qla_host * vha,struct qla8044_minidump_entry_hdr * entry_hdr,uint32_t ** d_ptr)2640 qla8044_minidump_process_pollrd(struct scsi_qla_host *vha,
2641 	struct qla8044_minidump_entry_hdr *entry_hdr,
2642 	uint32_t **d_ptr)
2643 {
2644 	uint32_t r_addr, s_addr, s_value, r_value, poll_wait, poll_mask;
2645 	uint16_t s_stride, i;
2646 	struct qla8044_minidump_entry_pollrd *pollrd_hdr;
2647 	uint32_t *data_ptr = *d_ptr;
2648 
2649 	pollrd_hdr = (struct qla8044_minidump_entry_pollrd *) entry_hdr;
2650 	s_addr = pollrd_hdr->select_addr;
2651 	r_addr = pollrd_hdr->read_addr;
2652 	s_value = pollrd_hdr->select_value;
2653 	s_stride = pollrd_hdr->select_value_stride;
2654 
2655 	poll_wait = pollrd_hdr->poll_wait;
2656 	poll_mask = pollrd_hdr->poll_mask;
2657 
2658 	for (i = 0; i < pollrd_hdr->op_count; i++) {
2659 		qla8044_wr_reg_indirect(vha, s_addr, s_value);
2660 		poll_wait = pollrd_hdr->poll_wait;
2661 		while (1) {
2662 			qla8044_rd_reg_indirect(vha, s_addr, &r_value);
2663 			if ((r_value & poll_mask) != 0) {
2664 				break;
2665 			} else {
2666 				usleep_range(1000, 1100);
2667 				if (--poll_wait == 0) {
2668 					ql_log(ql_log_fatal, vha, 0xb0fe,
2669 					    "%s: TIMEOUT\n", __func__);
2670 					goto error;
2671 				}
2672 			}
2673 		}
2674 		qla8044_rd_reg_indirect(vha, r_addr, &r_value);
2675 		*data_ptr++ = s_value;
2676 		*data_ptr++ = r_value;
2677 
2678 		s_value += s_stride;
2679 	}
2680 	*d_ptr = data_ptr;
2681 	return QLA_SUCCESS;
2682 
2683 error:
2684 	return QLA_FUNCTION_FAILED;
2685 }
2686 
2687 static void
qla8044_minidump_process_rdmux2(struct scsi_qla_host * vha,struct qla8044_minidump_entry_hdr * entry_hdr,uint32_t ** d_ptr)2688 qla8044_minidump_process_rdmux2(struct scsi_qla_host *vha,
2689 	struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
2690 {
2691 	uint32_t sel_val1, sel_val2, t_sel_val, data, i;
2692 	uint32_t sel_addr1, sel_addr2, sel_val_mask, read_addr;
2693 	struct qla8044_minidump_entry_rdmux2 *rdmux2_hdr;
2694 	uint32_t *data_ptr = *d_ptr;
2695 
2696 	rdmux2_hdr = (struct qla8044_minidump_entry_rdmux2 *) entry_hdr;
2697 	sel_val1 = rdmux2_hdr->select_value_1;
2698 	sel_val2 = rdmux2_hdr->select_value_2;
2699 	sel_addr1 = rdmux2_hdr->select_addr_1;
2700 	sel_addr2 = rdmux2_hdr->select_addr_2;
2701 	sel_val_mask = rdmux2_hdr->select_value_mask;
2702 	read_addr = rdmux2_hdr->read_addr;
2703 
2704 	for (i = 0; i < rdmux2_hdr->op_count; i++) {
2705 		qla8044_wr_reg_indirect(vha, sel_addr1, sel_val1);
2706 		t_sel_val = sel_val1 & sel_val_mask;
2707 		*data_ptr++ = t_sel_val;
2708 
2709 		qla8044_wr_reg_indirect(vha, sel_addr2, t_sel_val);
2710 		qla8044_rd_reg_indirect(vha, read_addr, &data);
2711 
2712 		*data_ptr++ = data;
2713 
2714 		qla8044_wr_reg_indirect(vha, sel_addr1, sel_val2);
2715 		t_sel_val = sel_val2 & sel_val_mask;
2716 		*data_ptr++ = t_sel_val;
2717 
2718 		qla8044_wr_reg_indirect(vha, sel_addr2, t_sel_val);
2719 		qla8044_rd_reg_indirect(vha, read_addr, &data);
2720 
2721 		*data_ptr++ = data;
2722 
2723 		sel_val1 += rdmux2_hdr->select_value_stride;
2724 		sel_val2 += rdmux2_hdr->select_value_stride;
2725 	}
2726 
2727 	*d_ptr = data_ptr;
2728 }
2729 
2730 static uint32_t
qla8044_minidump_process_pollrdmwr(struct scsi_qla_host * vha,struct qla8044_minidump_entry_hdr * entry_hdr,uint32_t ** d_ptr)2731 qla8044_minidump_process_pollrdmwr(struct scsi_qla_host *vha,
2732 	struct qla8044_minidump_entry_hdr *entry_hdr,
2733 	uint32_t **d_ptr)
2734 {
2735 	uint32_t poll_wait, poll_mask, r_value, data;
2736 	uint32_t addr_1, addr_2, value_1, value_2;
2737 	struct qla8044_minidump_entry_pollrdmwr *poll_hdr;
2738 	uint32_t *data_ptr = *d_ptr;
2739 
2740 	poll_hdr = (struct qla8044_minidump_entry_pollrdmwr *) entry_hdr;
2741 	addr_1 = poll_hdr->addr_1;
2742 	addr_2 = poll_hdr->addr_2;
2743 	value_1 = poll_hdr->value_1;
2744 	value_2 = poll_hdr->value_2;
2745 	poll_mask = poll_hdr->poll_mask;
2746 
2747 	qla8044_wr_reg_indirect(vha, addr_1, value_1);
2748 
2749 	poll_wait = poll_hdr->poll_wait;
2750 	while (1) {
2751 		qla8044_rd_reg_indirect(vha, addr_1, &r_value);
2752 
2753 		if ((r_value & poll_mask) != 0) {
2754 			break;
2755 		} else {
2756 			usleep_range(1000, 1100);
2757 			if (--poll_wait == 0) {
2758 				ql_log(ql_log_fatal, vha, 0xb0ff,
2759 				    "%s: TIMEOUT\n", __func__);
2760 				goto error;
2761 			}
2762 		}
2763 	}
2764 
2765 	qla8044_rd_reg_indirect(vha, addr_2, &data);
2766 	data &= poll_hdr->modify_mask;
2767 	qla8044_wr_reg_indirect(vha, addr_2, data);
2768 	qla8044_wr_reg_indirect(vha, addr_1, value_2);
2769 
2770 	poll_wait = poll_hdr->poll_wait;
2771 	while (1) {
2772 		qla8044_rd_reg_indirect(vha, addr_1, &r_value);
2773 
2774 		if ((r_value & poll_mask) != 0) {
2775 			break;
2776 		} else {
2777 			usleep_range(1000, 1100);
2778 			if (--poll_wait == 0) {
2779 				ql_log(ql_log_fatal, vha, 0xb100,
2780 				    "%s: TIMEOUT2\n", __func__);
2781 				goto error;
2782 			}
2783 		}
2784 	}
2785 
2786 	*data_ptr++ = addr_2;
2787 	*data_ptr++ = data;
2788 
2789 	*d_ptr = data_ptr;
2790 
2791 	return QLA_SUCCESS;
2792 
2793 error:
2794 	return QLA_FUNCTION_FAILED;
2795 }
2796 
2797 #define ISP8044_PEX_DMA_ENGINE_INDEX		8
2798 #define ISP8044_PEX_DMA_BASE_ADDRESS		0x77320000
2799 #define ISP8044_PEX_DMA_NUM_OFFSET		0x10000
2800 #define ISP8044_PEX_DMA_CMD_ADDR_LOW		0x0
2801 #define ISP8044_PEX_DMA_CMD_ADDR_HIGH		0x04
2802 #define ISP8044_PEX_DMA_CMD_STS_AND_CNTRL	0x08
2803 
2804 #define ISP8044_PEX_DMA_READ_SIZE	(16 * 1024)
2805 #define ISP8044_PEX_DMA_MAX_WAIT	(100 * 100) /* Max wait of 100 msecs */
2806 
2807 static int
qla8044_check_dma_engine_state(struct scsi_qla_host * vha)2808 qla8044_check_dma_engine_state(struct scsi_qla_host *vha)
2809 {
2810 	struct qla_hw_data *ha = vha->hw;
2811 	int rval = QLA_SUCCESS;
2812 	uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0;
2813 	uint64_t dma_base_addr = 0;
2814 	struct qla8044_minidump_template_hdr *tmplt_hdr = NULL;
2815 
2816 	tmplt_hdr = ha->md_tmplt_hdr;
2817 	dma_eng_num =
2818 	    tmplt_hdr->saved_state_array[ISP8044_PEX_DMA_ENGINE_INDEX];
2819 	dma_base_addr = ISP8044_PEX_DMA_BASE_ADDRESS +
2820 		(dma_eng_num * ISP8044_PEX_DMA_NUM_OFFSET);
2821 
2822 	/* Read the pex-dma's command-status-and-control register. */
2823 	rval = qla8044_rd_reg_indirect(vha,
2824 	    (dma_base_addr + ISP8044_PEX_DMA_CMD_STS_AND_CNTRL),
2825 	    &cmd_sts_and_cntrl);
2826 	if (rval)
2827 		return QLA_FUNCTION_FAILED;
2828 
2829 	/* Check if requested pex-dma engine is available. */
2830 	if (cmd_sts_and_cntrl & BIT_31)
2831 		return QLA_SUCCESS;
2832 
2833 	return QLA_FUNCTION_FAILED;
2834 }
2835 
2836 static int
qla8044_start_pex_dma(struct scsi_qla_host * vha,struct qla8044_minidump_entry_rdmem_pex_dma * m_hdr)2837 qla8044_start_pex_dma(struct scsi_qla_host *vha,
2838 	struct qla8044_minidump_entry_rdmem_pex_dma *m_hdr)
2839 {
2840 	struct qla_hw_data *ha = vha->hw;
2841 	int rval = QLA_SUCCESS, wait = 0;
2842 	uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0;
2843 	uint64_t dma_base_addr = 0;
2844 	struct qla8044_minidump_template_hdr *tmplt_hdr = NULL;
2845 
2846 	tmplt_hdr = ha->md_tmplt_hdr;
2847 	dma_eng_num =
2848 	    tmplt_hdr->saved_state_array[ISP8044_PEX_DMA_ENGINE_INDEX];
2849 	dma_base_addr = ISP8044_PEX_DMA_BASE_ADDRESS +
2850 		(dma_eng_num * ISP8044_PEX_DMA_NUM_OFFSET);
2851 
2852 	rval = qla8044_wr_reg_indirect(vha,
2853 	    dma_base_addr + ISP8044_PEX_DMA_CMD_ADDR_LOW,
2854 	    m_hdr->desc_card_addr);
2855 	if (rval)
2856 		goto error_exit;
2857 
2858 	rval = qla8044_wr_reg_indirect(vha,
2859 	    dma_base_addr + ISP8044_PEX_DMA_CMD_ADDR_HIGH, 0);
2860 	if (rval)
2861 		goto error_exit;
2862 
2863 	rval = qla8044_wr_reg_indirect(vha,
2864 	    dma_base_addr + ISP8044_PEX_DMA_CMD_STS_AND_CNTRL,
2865 	    m_hdr->start_dma_cmd);
2866 	if (rval)
2867 		goto error_exit;
2868 
2869 	/* Wait for dma operation to complete. */
2870 	for (wait = 0; wait < ISP8044_PEX_DMA_MAX_WAIT; wait++) {
2871 		rval = qla8044_rd_reg_indirect(vha,
2872 		    (dma_base_addr + ISP8044_PEX_DMA_CMD_STS_AND_CNTRL),
2873 		    &cmd_sts_and_cntrl);
2874 		if (rval)
2875 			goto error_exit;
2876 
2877 		if ((cmd_sts_and_cntrl & BIT_1) == 0)
2878 			break;
2879 
2880 		udelay(10);
2881 	}
2882 
2883 	/* Wait a max of 100 ms, otherwise fallback to rdmem entry read */
2884 	if (wait >= ISP8044_PEX_DMA_MAX_WAIT) {
2885 		rval = QLA_FUNCTION_FAILED;
2886 		goto error_exit;
2887 	}
2888 
2889 error_exit:
2890 	return rval;
2891 }
2892 
2893 static int
qla8044_minidump_pex_dma_read(struct scsi_qla_host * vha,struct qla8044_minidump_entry_hdr * entry_hdr,uint32_t ** d_ptr)2894 qla8044_minidump_pex_dma_read(struct scsi_qla_host *vha,
2895 	struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
2896 {
2897 	struct qla_hw_data *ha = vha->hw;
2898 	int rval = QLA_SUCCESS;
2899 	struct qla8044_minidump_entry_rdmem_pex_dma *m_hdr = NULL;
2900 	uint32_t chunk_size, read_size;
2901 	uint8_t *data_ptr = (uint8_t *)*d_ptr;
2902 	void *rdmem_buffer = NULL;
2903 	dma_addr_t rdmem_dma;
2904 	struct qla8044_pex_dma_descriptor dma_desc;
2905 
2906 	rval = qla8044_check_dma_engine_state(vha);
2907 	if (rval != QLA_SUCCESS) {
2908 		ql_dbg(ql_dbg_p3p, vha, 0xb147,
2909 		    "DMA engine not available. Fallback to rdmem-read.\n");
2910 		return QLA_FUNCTION_FAILED;
2911 	}
2912 
2913 	m_hdr = (void *)entry_hdr;
2914 
2915 	rdmem_buffer = dma_alloc_coherent(&ha->pdev->dev,
2916 	    ISP8044_PEX_DMA_READ_SIZE, &rdmem_dma, GFP_KERNEL);
2917 	if (!rdmem_buffer) {
2918 		ql_dbg(ql_dbg_p3p, vha, 0xb148,
2919 		    "Unable to allocate rdmem dma buffer\n");
2920 		return QLA_FUNCTION_FAILED;
2921 	}
2922 
2923 	/* Prepare pex-dma descriptor to be written to MS memory. */
2924 	/* dma-desc-cmd layout:
2925 	 *		0-3: dma-desc-cmd 0-3
2926 	 *		4-7: pcid function number
2927 	 *		8-15: dma-desc-cmd 8-15
2928 	 * dma_bus_addr: dma buffer address
2929 	 * cmd.read_data_size: amount of data-chunk to be read.
2930 	 */
2931 	dma_desc.cmd.dma_desc_cmd = (m_hdr->dma_desc_cmd & 0xff0f);
2932 	dma_desc.cmd.dma_desc_cmd |=
2933 	    ((PCI_FUNC(ha->pdev->devfn) & 0xf) << 0x4);
2934 
2935 	dma_desc.dma_bus_addr = rdmem_dma;
2936 	dma_desc.cmd.read_data_size = chunk_size = ISP8044_PEX_DMA_READ_SIZE;
2937 	read_size = 0;
2938 
2939 	/*
2940 	 * Perform rdmem operation using pex-dma.
2941 	 * Prepare dma in chunks of ISP8044_PEX_DMA_READ_SIZE.
2942 	 */
2943 	while (read_size < m_hdr->read_data_size) {
2944 		if (m_hdr->read_data_size - read_size <
2945 		    ISP8044_PEX_DMA_READ_SIZE) {
2946 			chunk_size = (m_hdr->read_data_size - read_size);
2947 			dma_desc.cmd.read_data_size = chunk_size;
2948 		}
2949 
2950 		dma_desc.src_addr = m_hdr->read_addr + read_size;
2951 
2952 		/* Prepare: Write pex-dma descriptor to MS memory. */
2953 		rval = qla8044_ms_mem_write_128b(vha,
2954 		    m_hdr->desc_card_addr, (void *)&dma_desc,
2955 		    (sizeof(struct qla8044_pex_dma_descriptor)/16));
2956 		if (rval) {
2957 			ql_log(ql_log_warn, vha, 0xb14a,
2958 			    "%s: Error writing rdmem-dma-init to MS !!!\n",
2959 			    __func__);
2960 			goto error_exit;
2961 		}
2962 		ql_dbg(ql_dbg_p3p, vha, 0xb14b,
2963 		    "%s: Dma-descriptor: Instruct for rdmem dma "
2964 		    "(chunk_size 0x%x).\n", __func__, chunk_size);
2965 
2966 		/* Execute: Start pex-dma operation. */
2967 		rval = qla8044_start_pex_dma(vha, m_hdr);
2968 		if (rval)
2969 			goto error_exit;
2970 
2971 		memcpy(data_ptr, rdmem_buffer, chunk_size);
2972 		data_ptr += chunk_size;
2973 		read_size += chunk_size;
2974 	}
2975 
2976 	*d_ptr = (void *)data_ptr;
2977 
2978 error_exit:
2979 	if (rdmem_buffer)
2980 		dma_free_coherent(&ha->pdev->dev, ISP8044_PEX_DMA_READ_SIZE,
2981 		    rdmem_buffer, rdmem_dma);
2982 
2983 	return rval;
2984 }
2985 
2986 static uint32_t
qla8044_minidump_process_rddfe(struct scsi_qla_host * vha,struct qla8044_minidump_entry_hdr * entry_hdr,uint32_t ** d_ptr)2987 qla8044_minidump_process_rddfe(struct scsi_qla_host *vha,
2988 	struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
2989 {
2990 	int loop_cnt;
2991 	uint32_t addr1, addr2, value, data, temp, wrVal;
2992 	uint8_t stride, stride2;
2993 	uint16_t count;
2994 	uint32_t poll, mask, modify_mask;
2995 	uint32_t wait_count = 0;
2996 
2997 	uint32_t *data_ptr = *d_ptr;
2998 
2999 	struct qla8044_minidump_entry_rddfe *rddfe;
3000 	rddfe = (struct qla8044_minidump_entry_rddfe *) entry_hdr;
3001 
3002 	addr1 = rddfe->addr_1;
3003 	value = rddfe->value;
3004 	stride = rddfe->stride;
3005 	stride2 = rddfe->stride2;
3006 	count = rddfe->count;
3007 
3008 	poll = rddfe->poll;
3009 	mask = rddfe->mask;
3010 	modify_mask = rddfe->modify_mask;
3011 
3012 	addr2 = addr1 + stride;
3013 
3014 	for (loop_cnt = 0x0; loop_cnt < count; loop_cnt++) {
3015 		qla8044_wr_reg_indirect(vha, addr1, (0x40000000 | value));
3016 
3017 		wait_count = 0;
3018 		while (wait_count < poll) {
3019 			qla8044_rd_reg_indirect(vha, addr1, &temp);
3020 			if ((temp & mask) != 0)
3021 				break;
3022 			wait_count++;
3023 		}
3024 
3025 		if (wait_count == poll) {
3026 			ql_log(ql_log_warn, vha, 0xb153,
3027 			    "%s: TIMEOUT\n", __func__);
3028 			goto error;
3029 		} else {
3030 			qla8044_rd_reg_indirect(vha, addr2, &temp);
3031 			temp = temp & modify_mask;
3032 			temp = (temp | ((loop_cnt << 16) | loop_cnt));
3033 			wrVal = ((temp << 16) | temp);
3034 
3035 			qla8044_wr_reg_indirect(vha, addr2, wrVal);
3036 			qla8044_wr_reg_indirect(vha, addr1, value);
3037 
3038 			wait_count = 0;
3039 			while (wait_count < poll) {
3040 				qla8044_rd_reg_indirect(vha, addr1, &temp);
3041 				if ((temp & mask) != 0)
3042 					break;
3043 				wait_count++;
3044 			}
3045 			if (wait_count == poll) {
3046 				ql_log(ql_log_warn, vha, 0xb154,
3047 				    "%s: TIMEOUT\n", __func__);
3048 				goto error;
3049 			}
3050 
3051 			qla8044_wr_reg_indirect(vha, addr1,
3052 			    ((0x40000000 | value) + stride2));
3053 			wait_count = 0;
3054 			while (wait_count < poll) {
3055 				qla8044_rd_reg_indirect(vha, addr1, &temp);
3056 				if ((temp & mask) != 0)
3057 					break;
3058 				wait_count++;
3059 			}
3060 
3061 			if (wait_count == poll) {
3062 				ql_log(ql_log_warn, vha, 0xb155,
3063 				    "%s: TIMEOUT\n", __func__);
3064 				goto error;
3065 			}
3066 
3067 			qla8044_rd_reg_indirect(vha, addr2, &data);
3068 
3069 			*data_ptr++ = wrVal;
3070 			*data_ptr++ = data;
3071 		}
3072 
3073 	}
3074 
3075 	*d_ptr = data_ptr;
3076 	return QLA_SUCCESS;
3077 
3078 error:
3079 	return -1;
3080 
3081 }
3082 
3083 static uint32_t
qla8044_minidump_process_rdmdio(struct scsi_qla_host * vha,struct qla8044_minidump_entry_hdr * entry_hdr,uint32_t ** d_ptr)3084 qla8044_minidump_process_rdmdio(struct scsi_qla_host *vha,
3085 	struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
3086 {
3087 	int ret = 0;
3088 	uint32_t addr1, addr2, value1, value2, data, selVal;
3089 	uint8_t stride1, stride2;
3090 	uint32_t addr3, addr4, addr5, addr6, addr7;
3091 	uint16_t count, loop_cnt;
3092 	uint32_t mask;
3093 	uint32_t *data_ptr = *d_ptr;
3094 
3095 	struct qla8044_minidump_entry_rdmdio *rdmdio;
3096 
3097 	rdmdio = (struct qla8044_minidump_entry_rdmdio *) entry_hdr;
3098 
3099 	addr1 = rdmdio->addr_1;
3100 	addr2 = rdmdio->addr_2;
3101 	value1 = rdmdio->value_1;
3102 	stride1 = rdmdio->stride_1;
3103 	stride2 = rdmdio->stride_2;
3104 	count = rdmdio->count;
3105 
3106 	mask = rdmdio->mask;
3107 	value2 = rdmdio->value_2;
3108 
3109 	addr3 = addr1 + stride1;
3110 
3111 	for (loop_cnt = 0; loop_cnt < count; loop_cnt++) {
3112 		ret = qla8044_poll_wait_ipmdio_bus_idle(vha, addr1, addr2,
3113 		    addr3, mask);
3114 		if (ret == -1)
3115 			goto error;
3116 
3117 		addr4 = addr2 - stride1;
3118 		ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask, addr4,
3119 		    value2);
3120 		if (ret == -1)
3121 			goto error;
3122 
3123 		addr5 = addr2 - (2 * stride1);
3124 		ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask, addr5,
3125 		    value1);
3126 		if (ret == -1)
3127 			goto error;
3128 
3129 		addr6 = addr2 - (3 * stride1);
3130 		ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask,
3131 		    addr6, 0x2);
3132 		if (ret == -1)
3133 			goto error;
3134 
3135 		ret = qla8044_poll_wait_ipmdio_bus_idle(vha, addr1, addr2,
3136 		    addr3, mask);
3137 		if (ret == -1)
3138 			goto error;
3139 
3140 		addr7 = addr2 - (4 * stride1);
3141 		data = qla8044_ipmdio_rd_reg(vha, addr1, addr3, mask, addr7);
3142 		if (data == -1)
3143 			goto error;
3144 
3145 		selVal = (value2 << 18) | (value1 << 2) | 2;
3146 
3147 		stride2 = rdmdio->stride_2;
3148 		*data_ptr++ = selVal;
3149 		*data_ptr++ = data;
3150 
3151 		value1 = value1 + stride2;
3152 		*d_ptr = data_ptr;
3153 	}
3154 
3155 	return 0;
3156 
3157 error:
3158 	return -1;
3159 }
3160 
qla8044_minidump_process_pollwr(struct scsi_qla_host * vha,struct qla8044_minidump_entry_hdr * entry_hdr,uint32_t ** d_ptr)3161 static uint32_t qla8044_minidump_process_pollwr(struct scsi_qla_host *vha,
3162 		struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
3163 {
3164 	uint32_t addr1, addr2, value1, value2, poll, r_value;
3165 	uint32_t wait_count = 0;
3166 	struct qla8044_minidump_entry_pollwr *pollwr_hdr;
3167 
3168 	pollwr_hdr = (struct qla8044_minidump_entry_pollwr *)entry_hdr;
3169 	addr1 = pollwr_hdr->addr_1;
3170 	addr2 = pollwr_hdr->addr_2;
3171 	value1 = pollwr_hdr->value_1;
3172 	value2 = pollwr_hdr->value_2;
3173 
3174 	poll = pollwr_hdr->poll;
3175 
3176 	while (wait_count < poll) {
3177 		qla8044_rd_reg_indirect(vha, addr1, &r_value);
3178 
3179 		if ((r_value & poll) != 0)
3180 			break;
3181 		wait_count++;
3182 	}
3183 
3184 	if (wait_count == poll) {
3185 		ql_log(ql_log_warn, vha, 0xb156, "%s: TIMEOUT\n", __func__);
3186 		goto error;
3187 	}
3188 
3189 	qla8044_wr_reg_indirect(vha, addr2, value2);
3190 	qla8044_wr_reg_indirect(vha, addr1, value1);
3191 
3192 	wait_count = 0;
3193 	while (wait_count < poll) {
3194 		qla8044_rd_reg_indirect(vha, addr1, &r_value);
3195 
3196 		if ((r_value & poll) != 0)
3197 			break;
3198 		wait_count++;
3199 	}
3200 
3201 	return QLA_SUCCESS;
3202 
3203 error:
3204 	return -1;
3205 }
3206 
3207 /*
3208  *
3209  * qla8044_collect_md_data - Retrieve firmware minidump data.
3210  * @ha: pointer to adapter structure
3211  **/
3212 int
qla8044_collect_md_data(struct scsi_qla_host * vha)3213 qla8044_collect_md_data(struct scsi_qla_host *vha)
3214 {
3215 	int num_entry_hdr = 0;
3216 	struct qla8044_minidump_entry_hdr *entry_hdr;
3217 	struct qla8044_minidump_template_hdr *tmplt_hdr;
3218 	uint32_t *data_ptr;
3219 	uint32_t data_collected = 0, f_capture_mask;
3220 	int i, rval = QLA_FUNCTION_FAILED;
3221 	uint64_t now;
3222 	uint32_t timestamp, idc_control;
3223 	struct qla_hw_data *ha = vha->hw;
3224 
3225 	if (!ha->md_dump) {
3226 		ql_log(ql_log_info, vha, 0xb101,
3227 		    "%s(%ld) No buffer to dump\n",
3228 		    __func__, vha->host_no);
3229 		return rval;
3230 	}
3231 
3232 	if (ha->fw_dumped) {
3233 		ql_log(ql_log_warn, vha, 0xb10d,
3234 		    "Firmware has been previously dumped (%p) "
3235 		    "-- ignoring request.\n", ha->fw_dump);
3236 		goto md_failed;
3237 	}
3238 
3239 	ha->fw_dumped = 0;
3240 
3241 	if (!ha->md_tmplt_hdr || !ha->md_dump) {
3242 		ql_log(ql_log_warn, vha, 0xb10e,
3243 		    "Memory not allocated for minidump capture\n");
3244 		goto md_failed;
3245 	}
3246 
3247 	qla8044_idc_lock(ha);
3248 	idc_control = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
3249 	if (idc_control & GRACEFUL_RESET_BIT1) {
3250 		ql_log(ql_log_warn, vha, 0xb112,
3251 		    "Forced reset from application, "
3252 		    "ignore minidump capture\n");
3253 		qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL,
3254 		    (idc_control & ~GRACEFUL_RESET_BIT1));
3255 		qla8044_idc_unlock(ha);
3256 
3257 		goto md_failed;
3258 	}
3259 	qla8044_idc_unlock(ha);
3260 
3261 	if (qla82xx_validate_template_chksum(vha)) {
3262 		ql_log(ql_log_info, vha, 0xb109,
3263 		    "Template checksum validation error\n");
3264 		goto md_failed;
3265 	}
3266 
3267 	tmplt_hdr = (struct qla8044_minidump_template_hdr *)
3268 		ha->md_tmplt_hdr;
3269 	data_ptr = (uint32_t *)((uint8_t *)ha->md_dump);
3270 	num_entry_hdr = tmplt_hdr->num_of_entries;
3271 
3272 	ql_dbg(ql_dbg_p3p, vha, 0xb11a,
3273 	    "Capture Mask obtained: 0x%x\n", tmplt_hdr->capture_debug_level);
3274 
3275 	f_capture_mask = tmplt_hdr->capture_debug_level & 0xFF;
3276 
3277 	/* Validate whether required debug level is set */
3278 	if ((f_capture_mask & 0x3) != 0x3) {
3279 		ql_log(ql_log_warn, vha, 0xb10f,
3280 		    "Minimum required capture mask[0x%x] level not set\n",
3281 		    f_capture_mask);
3282 
3283 	}
3284 	tmplt_hdr->driver_capture_mask = ql2xmdcapmask;
3285 	ql_log(ql_log_info, vha, 0xb102,
3286 	    "[%s]: starting data ptr: %p\n",
3287 	   __func__, data_ptr);
3288 	ql_log(ql_log_info, vha, 0xb10b,
3289 	   "[%s]: no of entry headers in Template: 0x%x\n",
3290 	   __func__, num_entry_hdr);
3291 	ql_log(ql_log_info, vha, 0xb10c,
3292 	    "[%s]: Total_data_size 0x%x, %d obtained\n",
3293 	   __func__, ha->md_dump_size, ha->md_dump_size);
3294 
3295 	/* Update current timestamp before taking dump */
3296 	now = get_jiffies_64();
3297 	timestamp = (u32)(jiffies_to_msecs(now) / 1000);
3298 	tmplt_hdr->driver_timestamp = timestamp;
3299 
3300 	entry_hdr = (struct qla8044_minidump_entry_hdr *)
3301 		(((uint8_t *)ha->md_tmplt_hdr) + tmplt_hdr->first_entry_offset);
3302 	tmplt_hdr->saved_state_array[QLA8044_SS_OCM_WNDREG_INDEX] =
3303 	    tmplt_hdr->ocm_window_reg[ha->portnum];
3304 
3305 	/* Walk through the entry headers - validate/perform required action */
3306 	for (i = 0; i < num_entry_hdr; i++) {
3307 		if (data_collected > ha->md_dump_size) {
3308 			ql_log(ql_log_info, vha, 0xb103,
3309 			    "Data collected: [0x%x], "
3310 			    "Total Dump size: [0x%x]\n",
3311 			    data_collected, ha->md_dump_size);
3312 			return rval;
3313 		}
3314 
3315 		if (!(entry_hdr->d_ctrl.entry_capture_mask &
3316 		      ql2xmdcapmask)) {
3317 			entry_hdr->d_ctrl.driver_flags |=
3318 			    QLA82XX_DBG_SKIPPED_FLAG;
3319 			goto skip_nxt_entry;
3320 		}
3321 
3322 		ql_dbg(ql_dbg_p3p, vha, 0xb104,
3323 		    "Data collected: [0x%x], Dump size left:[0x%x]\n",
3324 		    data_collected,
3325 		    (ha->md_dump_size - data_collected));
3326 
3327 		/* Decode the entry type and take required action to capture
3328 		 * debug data
3329 		 */
3330 		switch (entry_hdr->entry_type) {
3331 		case QLA82XX_RDEND:
3332 			qla8044_mark_entry_skipped(vha, entry_hdr, i);
3333 			break;
3334 		case QLA82XX_CNTRL:
3335 			rval = qla8044_minidump_process_control(vha,
3336 			    entry_hdr);
3337 			if (rval != QLA_SUCCESS) {
3338 				qla8044_mark_entry_skipped(vha, entry_hdr, i);
3339 				goto md_failed;
3340 			}
3341 			break;
3342 		case QLA82XX_RDCRB:
3343 			qla8044_minidump_process_rdcrb(vha,
3344 			    entry_hdr, &data_ptr);
3345 			break;
3346 		case QLA82XX_RDMEM:
3347 			rval = qla8044_minidump_pex_dma_read(vha,
3348 			    entry_hdr, &data_ptr);
3349 			if (rval != QLA_SUCCESS) {
3350 				rval = qla8044_minidump_process_rdmem(vha,
3351 				    entry_hdr, &data_ptr);
3352 				if (rval != QLA_SUCCESS) {
3353 					qla8044_mark_entry_skipped(vha,
3354 					    entry_hdr, i);
3355 					goto md_failed;
3356 				}
3357 			}
3358 			break;
3359 		case QLA82XX_BOARD:
3360 		case QLA82XX_RDROM:
3361 			rval = qla8044_minidump_process_rdrom(vha,
3362 			    entry_hdr, &data_ptr);
3363 			if (rval != QLA_SUCCESS) {
3364 				qla8044_mark_entry_skipped(vha,
3365 				    entry_hdr, i);
3366 			}
3367 			break;
3368 		case QLA82XX_L2DTG:
3369 		case QLA82XX_L2ITG:
3370 		case QLA82XX_L2DAT:
3371 		case QLA82XX_L2INS:
3372 			rval = qla8044_minidump_process_l2tag(vha,
3373 			    entry_hdr, &data_ptr);
3374 			if (rval != QLA_SUCCESS) {
3375 				qla8044_mark_entry_skipped(vha, entry_hdr, i);
3376 				goto md_failed;
3377 			}
3378 			break;
3379 		case QLA8044_L1DTG:
3380 		case QLA8044_L1ITG:
3381 		case QLA82XX_L1DAT:
3382 		case QLA82XX_L1INS:
3383 			qla8044_minidump_process_l1cache(vha,
3384 			    entry_hdr, &data_ptr);
3385 			break;
3386 		case QLA82XX_RDOCM:
3387 			qla8044_minidump_process_rdocm(vha,
3388 			    entry_hdr, &data_ptr);
3389 			break;
3390 		case QLA82XX_RDMUX:
3391 			qla8044_minidump_process_rdmux(vha,
3392 			    entry_hdr, &data_ptr);
3393 			break;
3394 		case QLA82XX_QUEUE:
3395 			qla8044_minidump_process_queue(vha,
3396 			    entry_hdr, &data_ptr);
3397 			break;
3398 		case QLA8044_POLLRD:
3399 			rval = qla8044_minidump_process_pollrd(vha,
3400 			    entry_hdr, &data_ptr);
3401 			if (rval != QLA_SUCCESS)
3402 				qla8044_mark_entry_skipped(vha, entry_hdr, i);
3403 			break;
3404 		case QLA8044_RDMUX2:
3405 			qla8044_minidump_process_rdmux2(vha,
3406 			    entry_hdr, &data_ptr);
3407 			break;
3408 		case QLA8044_POLLRDMWR:
3409 			rval = qla8044_minidump_process_pollrdmwr(vha,
3410 			    entry_hdr, &data_ptr);
3411 			if (rval != QLA_SUCCESS)
3412 				qla8044_mark_entry_skipped(vha, entry_hdr, i);
3413 			break;
3414 		case QLA8044_RDDFE:
3415 			rval = qla8044_minidump_process_rddfe(vha, entry_hdr,
3416 			    &data_ptr);
3417 			if (rval != QLA_SUCCESS)
3418 				qla8044_mark_entry_skipped(vha, entry_hdr, i);
3419 			break;
3420 		case QLA8044_RDMDIO:
3421 			rval = qla8044_minidump_process_rdmdio(vha, entry_hdr,
3422 			    &data_ptr);
3423 			if (rval != QLA_SUCCESS)
3424 				qla8044_mark_entry_skipped(vha, entry_hdr, i);
3425 			break;
3426 		case QLA8044_POLLWR:
3427 			rval = qla8044_minidump_process_pollwr(vha, entry_hdr,
3428 			    &data_ptr);
3429 			if (rval != QLA_SUCCESS)
3430 				qla8044_mark_entry_skipped(vha, entry_hdr, i);
3431 			break;
3432 		case QLA82XX_RDNOP:
3433 		default:
3434 			qla8044_mark_entry_skipped(vha, entry_hdr, i);
3435 			break;
3436 		}
3437 
3438 		data_collected = (uint8_t *)data_ptr -
3439 		    (uint8_t *)((uint8_t *)ha->md_dump);
3440 skip_nxt_entry:
3441 		/*
3442 		 * next entry in the template
3443 		 */
3444 		entry_hdr = (struct qla8044_minidump_entry_hdr *)
3445 		    (((uint8_t *)entry_hdr) + entry_hdr->entry_size);
3446 	}
3447 
3448 	if (data_collected != ha->md_dump_size) {
3449 		ql_log(ql_log_info, vha, 0xb105,
3450 		    "Dump data mismatch: Data collected: "
3451 		    "[0x%x], total_data_size:[0x%x]\n",
3452 		    data_collected, ha->md_dump_size);
3453 		rval = QLA_FUNCTION_FAILED;
3454 		goto md_failed;
3455 	}
3456 
3457 	ql_log(ql_log_info, vha, 0xb110,
3458 	    "Firmware dump saved to temp buffer (%ld/%p %ld/%p).\n",
3459 	    vha->host_no, ha->md_tmplt_hdr, vha->host_no, ha->md_dump);
3460 	ha->fw_dumped = 1;
3461 	qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
3462 
3463 
3464 	ql_log(ql_log_info, vha, 0xb106,
3465 	    "Leaving fn: %s Last entry: 0x%x\n",
3466 	    __func__, i);
3467 md_failed:
3468 	return rval;
3469 }
3470 
3471 void
qla8044_get_minidump(struct scsi_qla_host * vha)3472 qla8044_get_minidump(struct scsi_qla_host *vha)
3473 {
3474 	struct qla_hw_data *ha = vha->hw;
3475 
3476 	if (!qla8044_collect_md_data(vha)) {
3477 		ha->fw_dumped = 1;
3478 		ha->prev_minidump_failed = 0;
3479 	} else {
3480 		ql_log(ql_log_fatal, vha, 0xb0db,
3481 		    "%s: Unable to collect minidump\n",
3482 		    __func__);
3483 		ha->prev_minidump_failed = 1;
3484 	}
3485 }
3486 
3487 static int
qla8044_poll_flash_status_reg(struct scsi_qla_host * vha)3488 qla8044_poll_flash_status_reg(struct scsi_qla_host *vha)
3489 {
3490 	uint32_t flash_status;
3491 	int retries = QLA8044_FLASH_READ_RETRY_COUNT;
3492 	int ret_val = QLA_SUCCESS;
3493 
3494 	while (retries--) {
3495 		ret_val = qla8044_rd_reg_indirect(vha, QLA8044_FLASH_STATUS,
3496 		    &flash_status);
3497 		if (ret_val) {
3498 			ql_log(ql_log_warn, vha, 0xb13c,
3499 			    "%s: Failed to read FLASH_STATUS reg.\n",
3500 			    __func__);
3501 			break;
3502 		}
3503 		if ((flash_status & QLA8044_FLASH_STATUS_READY) ==
3504 		    QLA8044_FLASH_STATUS_READY)
3505 			break;
3506 		msleep(QLA8044_FLASH_STATUS_REG_POLL_DELAY);
3507 	}
3508 
3509 	if (!retries)
3510 		ret_val = QLA_FUNCTION_FAILED;
3511 
3512 	return ret_val;
3513 }
3514 
3515 static int
qla8044_write_flash_status_reg(struct scsi_qla_host * vha,uint32_t data)3516 qla8044_write_flash_status_reg(struct scsi_qla_host *vha,
3517 			       uint32_t data)
3518 {
3519 	int ret_val = QLA_SUCCESS;
3520 	uint32_t cmd;
3521 
3522 	cmd = vha->hw->fdt_wrt_sts_reg_cmd;
3523 
3524 	ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
3525 	    QLA8044_FLASH_STATUS_WRITE_DEF_SIG | cmd);
3526 	if (ret_val) {
3527 		ql_log(ql_log_warn, vha, 0xb125,
3528 		    "%s: Failed to write to FLASH_ADDR.\n", __func__);
3529 		goto exit_func;
3530 	}
3531 
3532 	ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, data);
3533 	if (ret_val) {
3534 		ql_log(ql_log_warn, vha, 0xb126,
3535 		    "%s: Failed to write to FLASH_WRDATA.\n", __func__);
3536 		goto exit_func;
3537 	}
3538 
3539 	ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
3540 	    QLA8044_FLASH_SECOND_ERASE_MS_VAL);
3541 	if (ret_val) {
3542 		ql_log(ql_log_warn, vha, 0xb127,
3543 		    "%s: Failed to write to FLASH_CONTROL.\n", __func__);
3544 		goto exit_func;
3545 	}
3546 
3547 	ret_val = qla8044_poll_flash_status_reg(vha);
3548 	if (ret_val)
3549 		ql_log(ql_log_warn, vha, 0xb128,
3550 		    "%s: Error polling flash status reg.\n", __func__);
3551 
3552 exit_func:
3553 	return ret_val;
3554 }
3555 
3556 /*
3557  * This function assumes that the flash lock is held.
3558  */
3559 static int
qla8044_unprotect_flash(scsi_qla_host_t * vha)3560 qla8044_unprotect_flash(scsi_qla_host_t *vha)
3561 {
3562 	int ret_val;
3563 	struct qla_hw_data *ha = vha->hw;
3564 
3565 	ret_val = qla8044_write_flash_status_reg(vha, ha->fdt_wrt_enable);
3566 	if (ret_val)
3567 		ql_log(ql_log_warn, vha, 0xb139,
3568 		    "%s: Write flash status failed.\n", __func__);
3569 
3570 	return ret_val;
3571 }
3572 
3573 /*
3574  * This function assumes that the flash lock is held.
3575  */
3576 static int
qla8044_protect_flash(scsi_qla_host_t * vha)3577 qla8044_protect_flash(scsi_qla_host_t *vha)
3578 {
3579 	int ret_val;
3580 	struct qla_hw_data *ha = vha->hw;
3581 
3582 	ret_val = qla8044_write_flash_status_reg(vha, ha->fdt_wrt_disable);
3583 	if (ret_val)
3584 		ql_log(ql_log_warn, vha, 0xb13b,
3585 		    "%s: Write flash status failed.\n", __func__);
3586 
3587 	return ret_val;
3588 }
3589 
3590 
3591 static int
qla8044_erase_flash_sector(struct scsi_qla_host * vha,uint32_t sector_start_addr)3592 qla8044_erase_flash_sector(struct scsi_qla_host *vha,
3593 			   uint32_t sector_start_addr)
3594 {
3595 	uint32_t reversed_addr;
3596 	int ret_val = QLA_SUCCESS;
3597 
3598 	ret_val = qla8044_poll_flash_status_reg(vha);
3599 	if (ret_val) {
3600 		ql_log(ql_log_warn, vha, 0xb12e,
3601 		    "%s: Poll flash status after erase failed..\n", __func__);
3602 	}
3603 
3604 	reversed_addr = (((sector_start_addr & 0xFF) << 16) |
3605 	    (sector_start_addr & 0xFF00) |
3606 	    ((sector_start_addr & 0xFF0000) >> 16));
3607 
3608 	ret_val = qla8044_wr_reg_indirect(vha,
3609 	    QLA8044_FLASH_WRDATA, reversed_addr);
3610 	if (ret_val) {
3611 		ql_log(ql_log_warn, vha, 0xb12f,
3612 		    "%s: Failed to write to FLASH_WRDATA.\n", __func__);
3613 	}
3614 	ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
3615 	   QLA8044_FLASH_ERASE_SIG | vha->hw->fdt_erase_cmd);
3616 	if (ret_val) {
3617 		ql_log(ql_log_warn, vha, 0xb130,
3618 		    "%s: Failed to write to FLASH_ADDR.\n", __func__);
3619 	}
3620 	ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
3621 	    QLA8044_FLASH_LAST_ERASE_MS_VAL);
3622 	if (ret_val) {
3623 		ql_log(ql_log_warn, vha, 0xb131,
3624 		    "%s: Failed write to FLASH_CONTROL.\n", __func__);
3625 	}
3626 	ret_val = qla8044_poll_flash_status_reg(vha);
3627 	if (ret_val) {
3628 		ql_log(ql_log_warn, vha, 0xb132,
3629 		    "%s: Poll flash status failed.\n", __func__);
3630 	}
3631 
3632 
3633 	return ret_val;
3634 }
3635 
3636 /*
3637  * qla8044_flash_write_u32 - Write data to flash
3638  *
3639  * @ha : Pointer to adapter structure
3640  * addr : Flash address to write to
3641  * p_data : Data to be written
3642  *
3643  * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED
3644  *
3645  * NOTE: Lock should be held on entry
3646  */
3647 static int
qla8044_flash_write_u32(struct scsi_qla_host * vha,uint32_t addr,uint32_t * p_data)3648 qla8044_flash_write_u32(struct scsi_qla_host *vha, uint32_t addr,
3649 			uint32_t *p_data)
3650 {
3651 	int ret_val = QLA_SUCCESS;
3652 
3653 	ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
3654 	    0x00800000 | (addr >> 2));
3655 	if (ret_val) {
3656 		ql_log(ql_log_warn, vha, 0xb134,
3657 		    "%s: Failed write to FLASH_ADDR.\n", __func__);
3658 		goto exit_func;
3659 	}
3660 	ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, *p_data);
3661 	if (ret_val) {
3662 		ql_log(ql_log_warn, vha, 0xb135,
3663 		    "%s: Failed write to FLASH_WRDATA.\n", __func__);
3664 		goto exit_func;
3665 	}
3666 	ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL, 0x3D);
3667 	if (ret_val) {
3668 		ql_log(ql_log_warn, vha, 0xb136,
3669 		    "%s: Failed write to FLASH_CONTROL.\n", __func__);
3670 		goto exit_func;
3671 	}
3672 	ret_val = qla8044_poll_flash_status_reg(vha);
3673 	if (ret_val) {
3674 		ql_log(ql_log_warn, vha, 0xb137,
3675 		    "%s: Poll flash status failed.\n", __func__);
3676 	}
3677 
3678 exit_func:
3679 	return ret_val;
3680 }
3681 
3682 static int
qla8044_write_flash_buffer_mode(scsi_qla_host_t * vha,uint32_t * dwptr,uint32_t faddr,uint32_t dwords)3683 qla8044_write_flash_buffer_mode(scsi_qla_host_t *vha, uint32_t *dwptr,
3684 				uint32_t faddr, uint32_t dwords)
3685 {
3686 	int ret = QLA_FUNCTION_FAILED;
3687 	uint32_t spi_val;
3688 
3689 	if (dwords < QLA8044_MIN_OPTROM_BURST_DWORDS ||
3690 	    dwords > QLA8044_MAX_OPTROM_BURST_DWORDS) {
3691 		ql_dbg(ql_dbg_user, vha, 0xb123,
3692 		    "Got unsupported dwords = 0x%x.\n",
3693 		    dwords);
3694 		return QLA_FUNCTION_FAILED;
3695 	}
3696 
3697 	qla8044_rd_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL, &spi_val);
3698 	qla8044_wr_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL,
3699 	    spi_val | QLA8044_FLASH_SPI_CTL);
3700 	qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
3701 	    QLA8044_FLASH_FIRST_TEMP_VAL);
3702 
3703 	/* First DWORD write to FLASH_WRDATA */
3704 	ret = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA,
3705 	    *dwptr++);
3706 	qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
3707 	    QLA8044_FLASH_FIRST_MS_PATTERN);
3708 
3709 	ret = qla8044_poll_flash_status_reg(vha);
3710 	if (ret) {
3711 		ql_log(ql_log_warn, vha, 0xb124,
3712 		    "%s: Failed.\n", __func__);
3713 		goto exit_func;
3714 	}
3715 
3716 	dwords--;
3717 
3718 	qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
3719 	    QLA8044_FLASH_SECOND_TEMP_VAL);
3720 
3721 
3722 	/* Second to N-1 DWORDS writes */
3723 	while (dwords != 1) {
3724 		qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, *dwptr++);
3725 		qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
3726 		    QLA8044_FLASH_SECOND_MS_PATTERN);
3727 		ret = qla8044_poll_flash_status_reg(vha);
3728 		if (ret) {
3729 			ql_log(ql_log_warn, vha, 0xb129,
3730 			    "%s: Failed.\n", __func__);
3731 			goto exit_func;
3732 		}
3733 		dwords--;
3734 	}
3735 
3736 	qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
3737 	    QLA8044_FLASH_FIRST_TEMP_VAL | (faddr >> 2));
3738 
3739 	/* Last DWORD write */
3740 	qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, *dwptr++);
3741 	qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
3742 	    QLA8044_FLASH_LAST_MS_PATTERN);
3743 	ret = qla8044_poll_flash_status_reg(vha);
3744 	if (ret) {
3745 		ql_log(ql_log_warn, vha, 0xb12a,
3746 		    "%s: Failed.\n", __func__);
3747 		goto exit_func;
3748 	}
3749 	qla8044_rd_reg_indirect(vha, QLA8044_FLASH_SPI_STATUS, &spi_val);
3750 
3751 	if ((spi_val & QLA8044_FLASH_SPI_CTL) == QLA8044_FLASH_SPI_CTL) {
3752 		ql_log(ql_log_warn, vha, 0xb12b,
3753 		    "%s: Failed.\n", __func__);
3754 		spi_val = 0;
3755 		/* Operation failed, clear error bit. */
3756 		qla8044_rd_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL,
3757 		    &spi_val);
3758 		qla8044_wr_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL,
3759 		    spi_val | QLA8044_FLASH_SPI_CTL);
3760 	}
3761 exit_func:
3762 	return ret;
3763 }
3764 
3765 static int
qla8044_write_flash_dword_mode(scsi_qla_host_t * vha,uint32_t * dwptr,uint32_t faddr,uint32_t dwords)3766 qla8044_write_flash_dword_mode(scsi_qla_host_t *vha, uint32_t *dwptr,
3767 			       uint32_t faddr, uint32_t dwords)
3768 {
3769 	int ret = QLA_FUNCTION_FAILED;
3770 	uint32_t liter;
3771 
3772 	for (liter = 0; liter < dwords; liter++, faddr += 4, dwptr++) {
3773 		ret = qla8044_flash_write_u32(vha, faddr, dwptr);
3774 		if (ret) {
3775 			ql_dbg(ql_dbg_p3p, vha, 0xb141,
3776 			    "%s: flash address=%x data=%x.\n", __func__,
3777 			     faddr, *dwptr);
3778 			break;
3779 		}
3780 	}
3781 
3782 	return ret;
3783 }
3784 
3785 int
qla8044_write_optrom_data(struct scsi_qla_host * vha,uint8_t * buf,uint32_t offset,uint32_t length)3786 qla8044_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
3787 			  uint32_t offset, uint32_t length)
3788 {
3789 	int rval = QLA_FUNCTION_FAILED, i, burst_iter_count;
3790 	int dword_count, erase_sec_count;
3791 	uint32_t erase_offset;
3792 	uint8_t *p_cache, *p_src;
3793 
3794 	erase_offset = offset;
3795 
3796 	p_cache = kcalloc(length, sizeof(uint8_t), GFP_KERNEL);
3797 	if (!p_cache)
3798 		return QLA_FUNCTION_FAILED;
3799 
3800 	memcpy(p_cache, buf, length);
3801 	p_src = p_cache;
3802 	dword_count = length / sizeof(uint32_t);
3803 	/* Since the offset and legth are sector aligned, it will be always
3804 	 * multiple of burst_iter_count (64)
3805 	 */
3806 	burst_iter_count = dword_count / QLA8044_MAX_OPTROM_BURST_DWORDS;
3807 	erase_sec_count = length / QLA8044_SECTOR_SIZE;
3808 
3809 	/* Suspend HBA. */
3810 	scsi_block_requests(vha->host);
3811 	/* Lock and enable write for whole operation. */
3812 	qla8044_flash_lock(vha);
3813 	qla8044_unprotect_flash(vha);
3814 
3815 	/* Erasing the sectors */
3816 	for (i = 0; i < erase_sec_count; i++) {
3817 		rval = qla8044_erase_flash_sector(vha, erase_offset);
3818 		ql_dbg(ql_dbg_user, vha, 0xb138,
3819 		    "Done erase of sector=0x%x.\n",
3820 		    erase_offset);
3821 		if (rval) {
3822 			ql_log(ql_log_warn, vha, 0xb121,
3823 			    "Failed to erase the sector having address: "
3824 			    "0x%x.\n", erase_offset);
3825 			goto out;
3826 		}
3827 		erase_offset += QLA8044_SECTOR_SIZE;
3828 	}
3829 	ql_dbg(ql_dbg_user, vha, 0xb13f,
3830 	    "Got write for addr = 0x%x length=0x%x.\n",
3831 	    offset, length);
3832 
3833 	for (i = 0; i < burst_iter_count; i++) {
3834 
3835 		/* Go with write. */
3836 		rval = qla8044_write_flash_buffer_mode(vha, (uint32_t *)p_src,
3837 		    offset, QLA8044_MAX_OPTROM_BURST_DWORDS);
3838 		if (rval) {
3839 			/* Buffer Mode failed skip to dword mode */
3840 			ql_log(ql_log_warn, vha, 0xb122,
3841 			    "Failed to write flash in buffer mode, "
3842 			    "Reverting to slow-write.\n");
3843 			rval = qla8044_write_flash_dword_mode(vha,
3844 			    (uint32_t *)p_src, offset,
3845 			    QLA8044_MAX_OPTROM_BURST_DWORDS);
3846 		}
3847 		p_src +=  sizeof(uint32_t) * QLA8044_MAX_OPTROM_BURST_DWORDS;
3848 		offset += sizeof(uint32_t) * QLA8044_MAX_OPTROM_BURST_DWORDS;
3849 	}
3850 	ql_dbg(ql_dbg_user, vha, 0xb133,
3851 	    "Done writing.\n");
3852 
3853 out:
3854 	qla8044_protect_flash(vha);
3855 	qla8044_flash_unlock(vha);
3856 	scsi_unblock_requests(vha->host);
3857 	kfree(p_cache);
3858 
3859 	return rval;
3860 }
3861 
3862 #define LEG_INT_PTR_B31		(1 << 31)
3863 #define LEG_INT_PTR_B30		(1 << 30)
3864 #define PF_BITS_MASK		(0xF << 16)
3865 /**
3866  * qla8044_intr_handler() - Process interrupts for the ISP8044
3867  * @irq:
3868  * @dev_id: SCSI driver HA context
3869  *
3870  * Called by system whenever the host adapter generates an interrupt.
3871  *
3872  * Returns handled flag.
3873  */
3874 irqreturn_t
qla8044_intr_handler(int irq,void * dev_id)3875 qla8044_intr_handler(int irq, void *dev_id)
3876 {
3877 	scsi_qla_host_t	*vha;
3878 	struct qla_hw_data *ha;
3879 	struct rsp_que *rsp;
3880 	struct device_reg_82xx __iomem *reg;
3881 	int		status = 0;
3882 	unsigned long	flags;
3883 	unsigned long	iter;
3884 	uint32_t	stat;
3885 	uint16_t	mb[4];
3886 	uint32_t leg_int_ptr = 0, pf_bit;
3887 
3888 	rsp = (struct rsp_que *) dev_id;
3889 	if (!rsp) {
3890 		ql_log(ql_log_info, NULL, 0xb143,
3891 		    "%s(): NULL response queue pointer\n", __func__);
3892 		return IRQ_NONE;
3893 	}
3894 	ha = rsp->hw;
3895 	vha = pci_get_drvdata(ha->pdev);
3896 
3897 	if (unlikely(pci_channel_offline(ha->pdev)))
3898 		return IRQ_HANDLED;
3899 
3900 	leg_int_ptr = qla8044_rd_reg(ha, LEG_INTR_PTR_OFFSET);
3901 
3902 	/* Legacy interrupt is valid if bit31 of leg_int_ptr is set */
3903 	if (!(leg_int_ptr & (LEG_INT_PTR_B31))) {
3904 		ql_dbg(ql_dbg_p3p, vha, 0xb144,
3905 		    "%s: Legacy Interrupt Bit 31 not set, "
3906 		    "spurious interrupt!\n", __func__);
3907 		return IRQ_NONE;
3908 	}
3909 
3910 	pf_bit = ha->portnum << 16;
3911 	/* Validate the PCIE function ID set in leg_int_ptr bits [19..16] */
3912 	if ((leg_int_ptr & (PF_BITS_MASK)) != pf_bit) {
3913 		ql_dbg(ql_dbg_p3p, vha, 0xb145,
3914 		    "%s: Incorrect function ID 0x%x in "
3915 		    "legacy interrupt register, "
3916 		    "ha->pf_bit = 0x%x\n", __func__,
3917 		    (leg_int_ptr & (PF_BITS_MASK)), pf_bit);
3918 		return IRQ_NONE;
3919 	}
3920 
3921 	/* To de-assert legacy interrupt, write 0 to Legacy Interrupt Trigger
3922 	 * Control register and poll till Legacy Interrupt Pointer register
3923 	 * bit32 is 0.
3924 	 */
3925 	qla8044_wr_reg(ha, LEG_INTR_TRIG_OFFSET, 0);
3926 	do {
3927 		leg_int_ptr = qla8044_rd_reg(ha, LEG_INTR_PTR_OFFSET);
3928 		if ((leg_int_ptr & (PF_BITS_MASK)) != pf_bit)
3929 			break;
3930 	} while (leg_int_ptr & (LEG_INT_PTR_B30));
3931 
3932 	reg = &ha->iobase->isp82;
3933 	spin_lock_irqsave(&ha->hardware_lock, flags);
3934 	for (iter = 1; iter--; ) {
3935 
3936 		if (RD_REG_DWORD(&reg->host_int)) {
3937 			stat = RD_REG_DWORD(&reg->host_status);
3938 			if ((stat & HSRX_RISC_INT) == 0)
3939 				break;
3940 
3941 			switch (stat & 0xff) {
3942 			case 0x1:
3943 			case 0x2:
3944 			case 0x10:
3945 			case 0x11:
3946 				qla82xx_mbx_completion(vha, MSW(stat));
3947 				status |= MBX_INTERRUPT;
3948 				break;
3949 			case 0x12:
3950 				mb[0] = MSW(stat);
3951 				mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
3952 				mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
3953 				mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
3954 				qla2x00_async_event(vha, rsp, mb);
3955 				break;
3956 			case 0x13:
3957 				qla24xx_process_response_queue(vha, rsp);
3958 				break;
3959 			default:
3960 				ql_dbg(ql_dbg_p3p, vha, 0xb146,
3961 				    "Unrecognized interrupt type "
3962 				    "(%d).\n", stat & 0xff);
3963 				break;
3964 			}
3965 		}
3966 		WRT_REG_DWORD(&reg->host_int, 0);
3967 	}
3968 
3969 	qla2x00_handle_mbx_completion(ha, status);
3970 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
3971 
3972 	return IRQ_HANDLED;
3973 }
3974 
3975 static int
qla8044_idc_dontreset(struct qla_hw_data * ha)3976 qla8044_idc_dontreset(struct qla_hw_data *ha)
3977 {
3978 	uint32_t idc_ctrl;
3979 
3980 	idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
3981 	return idc_ctrl & DONTRESET_BIT0;
3982 }
3983 
3984 static void
qla8044_clear_rst_ready(scsi_qla_host_t * vha)3985 qla8044_clear_rst_ready(scsi_qla_host_t *vha)
3986 {
3987 	uint32_t drv_state;
3988 
3989 	drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
3990 
3991 	/*
3992 	 * For ISP8044, drv_active register has 1 bit per function,
3993 	 * shift 1 by func_num to set a bit for the function.
3994 	 * For ISP82xx, drv_active has 4 bits per function
3995 	 */
3996 	drv_state &= ~(1 << vha->hw->portnum);
3997 
3998 	ql_dbg(ql_dbg_p3p, vha, 0xb13d,
3999 	    "drv_state: 0x%08x\n", drv_state);
4000 	qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, drv_state);
4001 }
4002 
4003 int
qla8044_abort_isp(scsi_qla_host_t * vha)4004 qla8044_abort_isp(scsi_qla_host_t *vha)
4005 {
4006 	int rval;
4007 	uint32_t dev_state;
4008 	struct qla_hw_data *ha = vha->hw;
4009 
4010 	qla8044_idc_lock(ha);
4011 	dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
4012 
4013 	if (ql2xdontresethba)
4014 		qla8044_set_idc_dontreset(vha);
4015 
4016 	/* If device_state is NEED_RESET, go ahead with
4017 	 * Reset,irrespective of ql2xdontresethba. This is to allow a
4018 	 * non-reset-owner to force a reset. Non-reset-owner sets
4019 	 * the IDC_CTRL BIT0 to prevent Reset-owner from doing a Reset
4020 	 * and then forces a Reset by setting device_state to
4021 	 * NEED_RESET. */
4022 	if (dev_state == QLA8XXX_DEV_READY) {
4023 		/* If IDC_CTRL DONTRESETHBA_BIT0 is set don't do reset
4024 		 * recovery */
4025 		if (qla8044_idc_dontreset(ha) == DONTRESET_BIT0) {
4026 			ql_dbg(ql_dbg_p3p, vha, 0xb13e,
4027 			    "Reset recovery disabled\n");
4028 			rval = QLA_FUNCTION_FAILED;
4029 			goto exit_isp_reset;
4030 		}
4031 
4032 		ql_dbg(ql_dbg_p3p, vha, 0xb140,
4033 		    "HW State: NEED RESET\n");
4034 		qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
4035 		    QLA8XXX_DEV_NEED_RESET);
4036 	}
4037 
4038 	/* For ISP8044, Reset owner is NIC, iSCSI or FCOE based on priority
4039 	 * and which drivers are present. Unlike ISP82XX, the function setting
4040 	 * NEED_RESET, may not be the Reset owner. */
4041 	qla83xx_reset_ownership(vha);
4042 
4043 	qla8044_idc_unlock(ha);
4044 	rval = qla8044_device_state_handler(vha);
4045 	qla8044_idc_lock(ha);
4046 	qla8044_clear_rst_ready(vha);
4047 
4048 exit_isp_reset:
4049 	qla8044_idc_unlock(ha);
4050 	if (rval == QLA_SUCCESS) {
4051 		ha->flags.isp82xx_fw_hung = 0;
4052 		ha->flags.nic_core_reset_hdlr_active = 0;
4053 		rval = qla82xx_restart_isp(vha);
4054 	}
4055 
4056 	return rval;
4057 }
4058 
4059 void
qla8044_fw_dump(scsi_qla_host_t * vha,int hardware_locked)4060 qla8044_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
4061 {
4062 	struct qla_hw_data *ha = vha->hw;
4063 
4064 	if (!ha->allow_cna_fw_dump)
4065 		return;
4066 
4067 	scsi_block_requests(vha->host);
4068 	ha->flags.isp82xx_no_md_cap = 1;
4069 	qla8044_idc_lock(ha);
4070 	qla82xx_set_reset_owner(vha);
4071 	qla8044_idc_unlock(ha);
4072 	qla2x00_wait_for_chip_reset(vha);
4073 	scsi_unblock_requests(vha->host);
4074 }
4075