• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #include "qlge.h"
2 
ql_unpause_mpi_risc(struct ql_adapter * qdev)3 int ql_unpause_mpi_risc(struct ql_adapter *qdev)
4 {
5 	u32 tmp;
6 
7 	/* Un-pause the RISC */
8 	tmp = ql_read32(qdev, CSR);
9 	if (!(tmp & CSR_RP))
10 		return -EIO;
11 
12 	ql_write32(qdev, CSR, CSR_CMD_CLR_PAUSE);
13 	return 0;
14 }
15 
ql_pause_mpi_risc(struct ql_adapter * qdev)16 int ql_pause_mpi_risc(struct ql_adapter *qdev)
17 {
18 	u32 tmp;
19 	int count = UDELAY_COUNT;
20 
21 	/* Pause the RISC */
22 	ql_write32(qdev, CSR, CSR_CMD_SET_PAUSE);
23 	do {
24 		tmp = ql_read32(qdev, CSR);
25 		if (tmp & CSR_RP)
26 			break;
27 		mdelay(UDELAY_DELAY);
28 		count--;
29 	} while (count);
30 	return (count == 0) ? -ETIMEDOUT : 0;
31 }
32 
ql_hard_reset_mpi_risc(struct ql_adapter * qdev)33 int ql_hard_reset_mpi_risc(struct ql_adapter *qdev)
34 {
35 	u32 tmp;
36 	int count = UDELAY_COUNT;
37 
38 	/* Reset the RISC */
39 	ql_write32(qdev, CSR, CSR_CMD_SET_RST);
40 	do {
41 		tmp = ql_read32(qdev, CSR);
42 		if (tmp & CSR_RR) {
43 			ql_write32(qdev, CSR, CSR_CMD_CLR_RST);
44 			break;
45 		}
46 		mdelay(UDELAY_DELAY);
47 		count--;
48 	} while (count);
49 	return (count == 0) ? -ETIMEDOUT : 0;
50 }
51 
ql_read_mpi_reg(struct ql_adapter * qdev,u32 reg,u32 * data)52 int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
53 {
54 	int status;
55 	/* wait for reg to come ready */
56 	status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
57 	if (status)
58 		goto exit;
59 	/* set up for reg read */
60 	ql_write32(qdev, PROC_ADDR, reg | PROC_ADDR_R);
61 	/* wait for reg to come ready */
62 	status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
63 	if (status)
64 		goto exit;
65 	/* get the data */
66 	*data = ql_read32(qdev, PROC_DATA);
67 exit:
68 	return status;
69 }
70 
ql_write_mpi_reg(struct ql_adapter * qdev,u32 reg,u32 data)71 int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data)
72 {
73 	int status = 0;
74 	/* wait for reg to come ready */
75 	status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
76 	if (status)
77 		goto exit;
78 	/* write the data to the data reg */
79 	ql_write32(qdev, PROC_DATA, data);
80 	/* trigger the write */
81 	ql_write32(qdev, PROC_ADDR, reg);
82 	/* wait for reg to come ready */
83 	status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
84 	if (status)
85 		goto exit;
86 exit:
87 	return status;
88 }
89 
ql_soft_reset_mpi_risc(struct ql_adapter * qdev)90 int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
91 {
92 	int status;
93 	status = ql_write_mpi_reg(qdev, 0x00001010, 1);
94 	return status;
95 }
96 
97 /* Determine if we are in charge of the firwmare. If
98  * we are the lower of the 2 NIC pcie functions, or if
99  * we are the higher function and the lower function
100  * is not enabled.
101  */
ql_own_firmware(struct ql_adapter * qdev)102 int ql_own_firmware(struct ql_adapter *qdev)
103 {
104 	u32 temp;
105 
106 	/* If we are the lower of the 2 NIC functions
107 	 * on the chip the we are responsible for
108 	 * core dump and firmware reset after an error.
109 	 */
110 	if (qdev->func < qdev->alt_func)
111 		return 1;
112 
113 	/* If we are the higher of the 2 NIC functions
114 	 * on the chip and the lower function is not
115 	 * enabled, then we are responsible for
116 	 * core dump and firmware reset after an error.
117 	 */
118 	temp =  ql_read32(qdev, STS);
119 	if (!(temp & (1 << (8 + qdev->alt_func))))
120 		return 1;
121 
122 	return 0;
123 
124 }
125 
ql_get_mb_sts(struct ql_adapter * qdev,struct mbox_params * mbcp)126 static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp)
127 {
128 	int i, status;
129 
130 	status = ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
131 	if (status)
132 		return -EBUSY;
133 	for (i = 0; i < mbcp->out_count; i++) {
134 		status =
135 		    ql_read_mpi_reg(qdev, qdev->mailbox_out + i,
136 				     &mbcp->mbox_out[i]);
137 		if (status) {
138 			netif_err(qdev, drv, qdev->ndev, "Failed mailbox read.\n");
139 			break;
140 		}
141 	}
142 	ql_sem_unlock(qdev, SEM_PROC_REG_MASK);	/* does flush too */
143 	return status;
144 }
145 
146 /* Wait for a single mailbox command to complete.
147  * Returns zero on success.
148  */
ql_wait_mbx_cmd_cmplt(struct ql_adapter * qdev)149 static int ql_wait_mbx_cmd_cmplt(struct ql_adapter *qdev)
150 {
151 	int count = 100;
152 	u32 value;
153 
154 	do {
155 		value = ql_read32(qdev, STS);
156 		if (value & STS_PI)
157 			return 0;
158 		mdelay(UDELAY_DELAY); /* 100ms */
159 	} while (--count);
160 	return -ETIMEDOUT;
161 }
162 
163 /* Execute a single mailbox command.
164  * Caller must hold PROC_ADDR semaphore.
165  */
ql_exec_mb_cmd(struct ql_adapter * qdev,struct mbox_params * mbcp)166 static int ql_exec_mb_cmd(struct ql_adapter *qdev, struct mbox_params *mbcp)
167 {
168 	int i, status;
169 
170 	/*
171 	 * Make sure there's nothing pending.
172 	 * This shouldn't happen.
173 	 */
174 	if (ql_read32(qdev, CSR) & CSR_HRI)
175 		return -EIO;
176 
177 	status = ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
178 	if (status)
179 		return status;
180 
181 	/*
182 	 * Fill the outbound mailboxes.
183 	 */
184 	for (i = 0; i < mbcp->in_count; i++) {
185 		status = ql_write_mpi_reg(qdev, qdev->mailbox_in + i,
186 						mbcp->mbox_in[i]);
187 		if (status)
188 			goto end;
189 	}
190 	/*
191 	 * Wake up the MPI firmware.
192 	 */
193 	ql_write32(qdev, CSR, CSR_CMD_SET_H2R_INT);
194 end:
195 	ql_sem_unlock(qdev, SEM_PROC_REG_MASK);
196 	return status;
197 }
198 
199 /* We are being asked by firmware to accept
200  * a change to the port.  This is only
201  * a change to max frame sizes (Tx/Rx), pause
202  * parameters, or loopback mode. We wake up a worker
203  * to handler processing this since a mailbox command
204  * will need to be sent to ACK the request.
205  */
ql_idc_req_aen(struct ql_adapter * qdev)206 static int ql_idc_req_aen(struct ql_adapter *qdev)
207 {
208 	int status;
209 	struct mbox_params *mbcp = &qdev->idc_mbc;
210 
211 	netif_err(qdev, drv, qdev->ndev, "Enter!\n");
212 	/* Get the status data and start up a thread to
213 	 * handle the request.
214 	 */
215 	mbcp = &qdev->idc_mbc;
216 	mbcp->out_count = 4;
217 	status = ql_get_mb_sts(qdev, mbcp);
218 	if (status) {
219 		netif_err(qdev, drv, qdev->ndev,
220 			  "Could not read MPI, resetting ASIC!\n");
221 		ql_queue_asic_error(qdev);
222 	} else	{
223 		/* Begin polled mode early so
224 		 * we don't get another interrupt
225 		 * when we leave mpi_worker.
226 		 */
227 		ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
228 		queue_delayed_work(qdev->workqueue, &qdev->mpi_idc_work, 0);
229 	}
230 	return status;
231 }
232 
233 /* Process an inter-device event completion.
234  * If good, signal the caller's completion.
235  */
ql_idc_cmplt_aen(struct ql_adapter * qdev)236 static int ql_idc_cmplt_aen(struct ql_adapter *qdev)
237 {
238 	int status;
239 	struct mbox_params *mbcp = &qdev->idc_mbc;
240 	mbcp->out_count = 4;
241 	status = ql_get_mb_sts(qdev, mbcp);
242 	if (status) {
243 		netif_err(qdev, drv, qdev->ndev,
244 			  "Could not read MPI, resetting RISC!\n");
245 		ql_queue_fw_error(qdev);
246 	} else
247 		/* Wake up the sleeping mpi_idc_work thread that is
248 		 * waiting for this event.
249 		 */
250 		complete(&qdev->ide_completion);
251 
252 	return status;
253 }
254 
ql_link_up(struct ql_adapter * qdev,struct mbox_params * mbcp)255 static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp)
256 {
257 	int status;
258 	mbcp->out_count = 2;
259 
260 	status = ql_get_mb_sts(qdev, mbcp);
261 	if (status) {
262 		netif_err(qdev, drv, qdev->ndev,
263 			  "%s: Could not get mailbox status.\n", __func__);
264 		return;
265 	}
266 
267 	qdev->link_status = mbcp->mbox_out[1];
268 	netif_err(qdev, drv, qdev->ndev, "Link Up.\n");
269 
270 	/* If we're coming back from an IDC event
271 	 * then set up the CAM and frame routing.
272 	 */
273 	if (test_bit(QL_CAM_RT_SET, &qdev->flags)) {
274 		status = ql_cam_route_initialize(qdev);
275 		if (status) {
276 			netif_err(qdev, ifup, qdev->ndev,
277 				  "Failed to init CAM/Routing tables.\n");
278 			return;
279 		} else
280 			clear_bit(QL_CAM_RT_SET, &qdev->flags);
281 	}
282 
283 	/* Queue up a worker to check the frame
284 	 * size information, and fix it if it's not
285 	 * to our liking.
286 	 */
287 	if (!test_bit(QL_PORT_CFG, &qdev->flags)) {
288 		netif_err(qdev, drv, qdev->ndev, "Queue Port Config Worker!\n");
289 		set_bit(QL_PORT_CFG, &qdev->flags);
290 		/* Begin polled mode early so
291 		 * we don't get another interrupt
292 		 * when we leave mpi_worker dpc.
293 		 */
294 		ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
295 		queue_delayed_work(qdev->workqueue,
296 				&qdev->mpi_port_cfg_work, 0);
297 	}
298 
299 	ql_link_on(qdev);
300 }
301 
ql_link_down(struct ql_adapter * qdev,struct mbox_params * mbcp)302 static void ql_link_down(struct ql_adapter *qdev, struct mbox_params *mbcp)
303 {
304 	int status;
305 
306 	mbcp->out_count = 3;
307 
308 	status = ql_get_mb_sts(qdev, mbcp);
309 	if (status)
310 		netif_err(qdev, drv, qdev->ndev, "Link down AEN broken!\n");
311 
312 	ql_link_off(qdev);
313 }
314 
ql_sfp_in(struct ql_adapter * qdev,struct mbox_params * mbcp)315 static int ql_sfp_in(struct ql_adapter *qdev, struct mbox_params *mbcp)
316 {
317 	int status;
318 
319 	mbcp->out_count = 5;
320 
321 	status = ql_get_mb_sts(qdev, mbcp);
322 	if (status)
323 		netif_err(qdev, drv, qdev->ndev, "SFP in AEN broken!\n");
324 	else
325 		netif_err(qdev, drv, qdev->ndev, "SFP insertion detected.\n");
326 
327 	return status;
328 }
329 
ql_sfp_out(struct ql_adapter * qdev,struct mbox_params * mbcp)330 static int ql_sfp_out(struct ql_adapter *qdev, struct mbox_params *mbcp)
331 {
332 	int status;
333 
334 	mbcp->out_count = 1;
335 
336 	status = ql_get_mb_sts(qdev, mbcp);
337 	if (status)
338 		netif_err(qdev, drv, qdev->ndev, "SFP out AEN broken!\n");
339 	else
340 		netif_err(qdev, drv, qdev->ndev, "SFP removal detected.\n");
341 
342 	return status;
343 }
344 
ql_aen_lost(struct ql_adapter * qdev,struct mbox_params * mbcp)345 static int ql_aen_lost(struct ql_adapter *qdev, struct mbox_params *mbcp)
346 {
347 	int status;
348 
349 	mbcp->out_count = 6;
350 
351 	status = ql_get_mb_sts(qdev, mbcp);
352 	if (status)
353 		netif_err(qdev, drv, qdev->ndev, "Lost AEN broken!\n");
354 	else {
355 		int i;
356 		netif_err(qdev, drv, qdev->ndev, "Lost AEN detected.\n");
357 		for (i = 0; i < mbcp->out_count; i++)
358 			netif_err(qdev, drv, qdev->ndev, "mbox_out[%d] = 0x%.08x.\n",
359 				  i, mbcp->mbox_out[i]);
360 
361 	}
362 
363 	return status;
364 }
365 
ql_init_fw_done(struct ql_adapter * qdev,struct mbox_params * mbcp)366 static void ql_init_fw_done(struct ql_adapter *qdev, struct mbox_params *mbcp)
367 {
368 	int status;
369 
370 	mbcp->out_count = 2;
371 
372 	status = ql_get_mb_sts(qdev, mbcp);
373 	if (status) {
374 		netif_err(qdev, drv, qdev->ndev, "Firmware did not initialize!\n");
375 	} else {
376 		netif_err(qdev, drv, qdev->ndev, "Firmware Revision  = 0x%.08x.\n",
377 			  mbcp->mbox_out[1]);
378 		qdev->fw_rev_id = mbcp->mbox_out[1];
379 		status = ql_cam_route_initialize(qdev);
380 		if (status)
381 			netif_err(qdev, ifup, qdev->ndev,
382 				  "Failed to init CAM/Routing tables.\n");
383 	}
384 }
385 
386 /* Process an async event and clear it unless it's an
387  * error condition.
388  *  This can get called iteratively from the mpi_work thread
389  *  when events arrive via an interrupt.
390  *  It also gets called when a mailbox command is polling for
391  *  it's completion. */
ql_mpi_handler(struct ql_adapter * qdev,struct mbox_params * mbcp)392 static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
393 {
394 	int status;
395 	int orig_count = mbcp->out_count;
396 
397 	/* Just get mailbox zero for now. */
398 	mbcp->out_count = 1;
399 	status = ql_get_mb_sts(qdev, mbcp);
400 	if (status) {
401 		netif_err(qdev, drv, qdev->ndev,
402 			  "Could not read MPI, resetting ASIC!\n");
403 		ql_queue_asic_error(qdev);
404 		goto end;
405 	}
406 
407 	switch (mbcp->mbox_out[0]) {
408 
409 	/* This case is only active when we arrive here
410 	 * as a result of issuing a mailbox command to
411 	 * the firmware.
412 	 */
413 	case MB_CMD_STS_INTRMDT:
414 	case MB_CMD_STS_GOOD:
415 	case MB_CMD_STS_INVLD_CMD:
416 	case MB_CMD_STS_XFC_ERR:
417 	case MB_CMD_STS_CSUM_ERR:
418 	case MB_CMD_STS_ERR:
419 	case MB_CMD_STS_PARAM_ERR:
420 		/* We can only get mailbox status if we're polling from an
421 		 * unfinished command.  Get the rest of the status data and
422 		 * return back to the caller.
423 		 * We only end up here when we're polling for a mailbox
424 		 * command completion.
425 		 */
426 		mbcp->out_count = orig_count;
427 		status = ql_get_mb_sts(qdev, mbcp);
428 		return status;
429 
430 	/* We are being asked by firmware to accept
431 	 * a change to the port.  This is only
432 	 * a change to max frame sizes (Tx/Rx), pause
433 	 * parameters, or loopback mode.
434 	 */
435 	case AEN_IDC_REQ:
436 		status = ql_idc_req_aen(qdev);
437 		break;
438 
439 	/* Process and inbound IDC event.
440 	 * This will happen when we're trying to
441 	 * change tx/rx max frame size, change pause
442 	 * parameters or loopback mode.
443 	 */
444 	case AEN_IDC_CMPLT:
445 	case AEN_IDC_EXT:
446 		status = ql_idc_cmplt_aen(qdev);
447 		break;
448 
449 	case AEN_LINK_UP:
450 		ql_link_up(qdev, mbcp);
451 		break;
452 
453 	case AEN_LINK_DOWN:
454 		ql_link_down(qdev, mbcp);
455 		break;
456 
457 	case AEN_FW_INIT_DONE:
458 		/* If we're in process on executing the firmware,
459 		 * then convert the status to normal mailbox status.
460 		 */
461 		if (mbcp->mbox_in[0] == MB_CMD_EX_FW) {
462 			mbcp->out_count = orig_count;
463 			status = ql_get_mb_sts(qdev, mbcp);
464 			mbcp->mbox_out[0] = MB_CMD_STS_GOOD;
465 			return status;
466 		}
467 		ql_init_fw_done(qdev, mbcp);
468 		break;
469 
470 	case AEN_AEN_SFP_IN:
471 		ql_sfp_in(qdev, mbcp);
472 		break;
473 
474 	case AEN_AEN_SFP_OUT:
475 		ql_sfp_out(qdev, mbcp);
476 		break;
477 
478 	/* This event can arrive at boot time or after an
479 	 * MPI reset if the firmware failed to initialize.
480 	 */
481 	case AEN_FW_INIT_FAIL:
482 		/* If we're in process on executing the firmware,
483 		 * then convert the status to normal mailbox status.
484 		 */
485 		if (mbcp->mbox_in[0] == MB_CMD_EX_FW) {
486 			mbcp->out_count = orig_count;
487 			status = ql_get_mb_sts(qdev, mbcp);
488 			mbcp->mbox_out[0] = MB_CMD_STS_ERR;
489 			return status;
490 		}
491 		netif_err(qdev, drv, qdev->ndev,
492 			  "Firmware initialization failed.\n");
493 		status = -EIO;
494 		ql_queue_fw_error(qdev);
495 		break;
496 
497 	case AEN_SYS_ERR:
498 		netif_err(qdev, drv, qdev->ndev, "System Error.\n");
499 		ql_queue_fw_error(qdev);
500 		status = -EIO;
501 		break;
502 
503 	case AEN_AEN_LOST:
504 		ql_aen_lost(qdev, mbcp);
505 		break;
506 
507 	case AEN_DCBX_CHG:
508 		/* Need to support AEN 8110 */
509 		break;
510 	default:
511 		netif_err(qdev, drv, qdev->ndev,
512 			  "Unsupported AE %.08x.\n", mbcp->mbox_out[0]);
513 		/* Clear the MPI firmware status. */
514 	}
515 end:
516 	ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
517 	/* Restore the original mailbox count to
518 	 * what the caller asked for.  This can get
519 	 * changed when a mailbox command is waiting
520 	 * for a response and an AEN arrives and
521 	 * is handled.
522 	 * */
523 	mbcp->out_count = orig_count;
524 	return status;
525 }
526 
527 /* Execute a single mailbox command.
528  * mbcp is a pointer to an array of u32.  Each
529  * element in the array contains the value for it's
530  * respective mailbox register.
531  */
ql_mailbox_command(struct ql_adapter * qdev,struct mbox_params * mbcp)532 static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
533 {
534 	int status;
535 	unsigned long count;
536 
537 	mutex_lock(&qdev->mpi_mutex);
538 
539 	/* Begin polled mode for MPI */
540 	ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
541 
542 	/* Load the mailbox registers and wake up MPI RISC. */
543 	status = ql_exec_mb_cmd(qdev, mbcp);
544 	if (status)
545 		goto end;
546 
547 
548 	/* If we're generating a system error, then there's nothing
549 	 * to wait for.
550 	 */
551 	if (mbcp->mbox_in[0] == MB_CMD_MAKE_SYS_ERR)
552 		goto end;
553 
554 	/* Wait for the command to complete. We loop
555 	 * here because some AEN might arrive while
556 	 * we're waiting for the mailbox command to
557 	 * complete. If more than 5 seconds expire we can
558 	 * assume something is wrong. */
559 	count = jiffies + HZ * MAILBOX_TIMEOUT;
560 	do {
561 		/* Wait for the interrupt to come in. */
562 		status = ql_wait_mbx_cmd_cmplt(qdev);
563 		if (status)
564 			continue;
565 
566 		/* Process the event.  If it's an AEN, it
567 		 * will be handled in-line or a worker
568 		 * will be spawned. If it's our completion
569 		 * we will catch it below.
570 		 */
571 		status = ql_mpi_handler(qdev, mbcp);
572 		if (status)
573 			goto end;
574 
575 		/* It's either the completion for our mailbox
576 		 * command complete or an AEN.  If it's our
577 		 * completion then get out.
578 		 */
579 		if (((mbcp->mbox_out[0] & 0x0000f000) ==
580 					MB_CMD_STS_GOOD) ||
581 			((mbcp->mbox_out[0] & 0x0000f000) ==
582 					MB_CMD_STS_INTRMDT))
583 			goto done;
584 	} while (time_before(jiffies, count));
585 
586 	netif_err(qdev, drv, qdev->ndev,
587 		  "Timed out waiting for mailbox complete.\n");
588 	status = -ETIMEDOUT;
589 	goto end;
590 
591 done:
592 
593 	/* Now we can clear the interrupt condition
594 	 * and look at our status.
595 	 */
596 	ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
597 
598 	if (((mbcp->mbox_out[0] & 0x0000f000) !=
599 					MB_CMD_STS_GOOD) &&
600 		((mbcp->mbox_out[0] & 0x0000f000) !=
601 					MB_CMD_STS_INTRMDT)) {
602 		status = -EIO;
603 	}
604 end:
605 	/* End polled mode for MPI */
606 	ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
607 	mutex_unlock(&qdev->mpi_mutex);
608 	return status;
609 }
610 
611 /* Get MPI firmware version. This will be used for
612  * driver banner and for ethtool info.
613  * Returns zero on success.
614  */
ql_mb_about_fw(struct ql_adapter * qdev)615 int ql_mb_about_fw(struct ql_adapter *qdev)
616 {
617 	struct mbox_params mbc;
618 	struct mbox_params *mbcp = &mbc;
619 	int status = 0;
620 
621 	memset(mbcp, 0, sizeof(struct mbox_params));
622 
623 	mbcp->in_count = 1;
624 	mbcp->out_count = 3;
625 
626 	mbcp->mbox_in[0] = MB_CMD_ABOUT_FW;
627 
628 	status = ql_mailbox_command(qdev, mbcp);
629 	if (status)
630 		return status;
631 
632 	if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
633 		netif_err(qdev, drv, qdev->ndev,
634 			  "Failed about firmware command\n");
635 		status = -EIO;
636 	}
637 
638 	/* Store the firmware version */
639 	qdev->fw_rev_id = mbcp->mbox_out[1];
640 
641 	return status;
642 }
643 
644 /* Get functional state for MPI firmware.
645  * Returns zero on success.
646  */
ql_mb_get_fw_state(struct ql_adapter * qdev)647 int ql_mb_get_fw_state(struct ql_adapter *qdev)
648 {
649 	struct mbox_params mbc;
650 	struct mbox_params *mbcp = &mbc;
651 	int status = 0;
652 
653 	memset(mbcp, 0, sizeof(struct mbox_params));
654 
655 	mbcp->in_count = 1;
656 	mbcp->out_count = 2;
657 
658 	mbcp->mbox_in[0] = MB_CMD_GET_FW_STATE;
659 
660 	status = ql_mailbox_command(qdev, mbcp);
661 	if (status)
662 		return status;
663 
664 	if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
665 		netif_err(qdev, drv, qdev->ndev,
666 			  "Failed Get Firmware State.\n");
667 		status = -EIO;
668 	}
669 
670 	/* If bit zero is set in mbx 1 then the firmware is
671 	 * running, but not initialized.  This should never
672 	 * happen.
673 	 */
674 	if (mbcp->mbox_out[1] & 1) {
675 		netif_err(qdev, drv, qdev->ndev,
676 			  "Firmware waiting for initialization.\n");
677 		status = -EIO;
678 	}
679 
680 	return status;
681 }
682 
683 /* Send and ACK mailbox command to the firmware to
684  * let it continue with the change.
685  */
ql_mb_idc_ack(struct ql_adapter * qdev)686 static int ql_mb_idc_ack(struct ql_adapter *qdev)
687 {
688 	struct mbox_params mbc;
689 	struct mbox_params *mbcp = &mbc;
690 	int status = 0;
691 
692 	memset(mbcp, 0, sizeof(struct mbox_params));
693 
694 	mbcp->in_count = 5;
695 	mbcp->out_count = 1;
696 
697 	mbcp->mbox_in[0] = MB_CMD_IDC_ACK;
698 	mbcp->mbox_in[1] = qdev->idc_mbc.mbox_out[1];
699 	mbcp->mbox_in[2] = qdev->idc_mbc.mbox_out[2];
700 	mbcp->mbox_in[3] = qdev->idc_mbc.mbox_out[3];
701 	mbcp->mbox_in[4] = qdev->idc_mbc.mbox_out[4];
702 
703 	status = ql_mailbox_command(qdev, mbcp);
704 	if (status)
705 		return status;
706 
707 	if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
708 		netif_err(qdev, drv, qdev->ndev, "Failed IDC ACK send.\n");
709 		status = -EIO;
710 	}
711 	return status;
712 }
713 
714 /* Get link settings and maximum frame size settings
715  * for the current port.
716  * Most likely will block.
717  */
ql_mb_set_port_cfg(struct ql_adapter * qdev)718 int ql_mb_set_port_cfg(struct ql_adapter *qdev)
719 {
720 	struct mbox_params mbc;
721 	struct mbox_params *mbcp = &mbc;
722 	int status = 0;
723 
724 	memset(mbcp, 0, sizeof(struct mbox_params));
725 
726 	mbcp->in_count = 3;
727 	mbcp->out_count = 1;
728 
729 	mbcp->mbox_in[0] = MB_CMD_SET_PORT_CFG;
730 	mbcp->mbox_in[1] = qdev->link_config;
731 	mbcp->mbox_in[2] = qdev->max_frame_size;
732 
733 
734 	status = ql_mailbox_command(qdev, mbcp);
735 	if (status)
736 		return status;
737 
738 	if (mbcp->mbox_out[0] == MB_CMD_STS_INTRMDT) {
739 		netif_err(qdev, drv, qdev->ndev,
740 			  "Port Config sent, wait for IDC.\n");
741 	} else	if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
742 		netif_err(qdev, drv, qdev->ndev,
743 			  "Failed Set Port Configuration.\n");
744 		status = -EIO;
745 	}
746 	return status;
747 }
748 
ql_mb_dump_ram(struct ql_adapter * qdev,u64 req_dma,u32 addr,u32 size)749 static int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
750 	u32 size)
751 {
752 	int status = 0;
753 	struct mbox_params mbc;
754 	struct mbox_params *mbcp = &mbc;
755 
756 	memset(mbcp, 0, sizeof(struct mbox_params));
757 
758 	mbcp->in_count = 9;
759 	mbcp->out_count = 1;
760 
761 	mbcp->mbox_in[0] = MB_CMD_DUMP_RISC_RAM;
762 	mbcp->mbox_in[1] = LSW(addr);
763 	mbcp->mbox_in[2] = MSW(req_dma);
764 	mbcp->mbox_in[3] = LSW(req_dma);
765 	mbcp->mbox_in[4] = MSW(size);
766 	mbcp->mbox_in[5] = LSW(size);
767 	mbcp->mbox_in[6] = MSW(MSD(req_dma));
768 	mbcp->mbox_in[7] = LSW(MSD(req_dma));
769 	mbcp->mbox_in[8] = MSW(addr);
770 
771 
772 	status = ql_mailbox_command(qdev, mbcp);
773 	if (status)
774 		return status;
775 
776 	if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
777 		netif_err(qdev, drv, qdev->ndev, "Failed to dump risc RAM.\n");
778 		status = -EIO;
779 	}
780 	return status;
781 }
782 
783 /* Issue a mailbox command to dump RISC RAM. */
ql_dump_risc_ram_area(struct ql_adapter * qdev,void * buf,u32 ram_addr,int word_count)784 int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
785 		u32 ram_addr, int word_count)
786 {
787 	int status;
788 	char *my_buf;
789 	dma_addr_t buf_dma;
790 
791 	my_buf = pci_alloc_consistent(qdev->pdev, word_count * sizeof(u32),
792 					&buf_dma);
793 	if (!my_buf)
794 		return -EIO;
795 
796 	status = ql_mb_dump_ram(qdev, buf_dma, ram_addr, word_count);
797 	if (!status)
798 		memcpy(buf, my_buf, word_count * sizeof(u32));
799 
800 	pci_free_consistent(qdev->pdev, word_count * sizeof(u32), my_buf,
801 				buf_dma);
802 	return status;
803 }
804 
805 /* Get link settings and maximum frame size settings
806  * for the current port.
807  * Most likely will block.
808  */
ql_mb_get_port_cfg(struct ql_adapter * qdev)809 int ql_mb_get_port_cfg(struct ql_adapter *qdev)
810 {
811 	struct mbox_params mbc;
812 	struct mbox_params *mbcp = &mbc;
813 	int status = 0;
814 
815 	memset(mbcp, 0, sizeof(struct mbox_params));
816 
817 	mbcp->in_count = 1;
818 	mbcp->out_count = 3;
819 
820 	mbcp->mbox_in[0] = MB_CMD_GET_PORT_CFG;
821 
822 	status = ql_mailbox_command(qdev, mbcp);
823 	if (status)
824 		return status;
825 
826 	if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
827 		netif_err(qdev, drv, qdev->ndev,
828 			  "Failed Get Port Configuration.\n");
829 		status = -EIO;
830 	} else	{
831 		netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
832 			     "Passed Get Port Configuration.\n");
833 		qdev->link_config = mbcp->mbox_out[1];
834 		qdev->max_frame_size = mbcp->mbox_out[2];
835 	}
836 	return status;
837 }
838 
ql_mb_wol_mode(struct ql_adapter * qdev,u32 wol)839 int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol)
840 {
841 	struct mbox_params mbc;
842 	struct mbox_params *mbcp = &mbc;
843 	int status;
844 
845 	memset(mbcp, 0, sizeof(struct mbox_params));
846 
847 	mbcp->in_count = 2;
848 	mbcp->out_count = 1;
849 
850 	mbcp->mbox_in[0] = MB_CMD_SET_WOL_MODE;
851 	mbcp->mbox_in[1] = wol;
852 
853 
854 	status = ql_mailbox_command(qdev, mbcp);
855 	if (status)
856 		return status;
857 
858 	if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
859 		netif_err(qdev, drv, qdev->ndev, "Failed to set WOL mode.\n");
860 		status = -EIO;
861 	}
862 	return status;
863 }
864 
ql_mb_wol_set_magic(struct ql_adapter * qdev,u32 enable_wol)865 int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol)
866 {
867 	struct mbox_params mbc;
868 	struct mbox_params *mbcp = &mbc;
869 	int status;
870 	u8 *addr = qdev->ndev->dev_addr;
871 
872 	memset(mbcp, 0, sizeof(struct mbox_params));
873 
874 	mbcp->in_count = 8;
875 	mbcp->out_count = 1;
876 
877 	mbcp->mbox_in[0] = MB_CMD_SET_WOL_MAGIC;
878 	if (enable_wol) {
879 		mbcp->mbox_in[1] = (u32)addr[0];
880 		mbcp->mbox_in[2] = (u32)addr[1];
881 		mbcp->mbox_in[3] = (u32)addr[2];
882 		mbcp->mbox_in[4] = (u32)addr[3];
883 		mbcp->mbox_in[5] = (u32)addr[4];
884 		mbcp->mbox_in[6] = (u32)addr[5];
885 		mbcp->mbox_in[7] = 0;
886 	} else {
887 		mbcp->mbox_in[1] = 0;
888 		mbcp->mbox_in[2] = 1;
889 		mbcp->mbox_in[3] = 1;
890 		mbcp->mbox_in[4] = 1;
891 		mbcp->mbox_in[5] = 1;
892 		mbcp->mbox_in[6] = 1;
893 		mbcp->mbox_in[7] = 0;
894 	}
895 
896 	status = ql_mailbox_command(qdev, mbcp);
897 	if (status)
898 		return status;
899 
900 	if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
901 		netif_err(qdev, drv, qdev->ndev, "Failed to set WOL mode.\n");
902 		status = -EIO;
903 	}
904 	return status;
905 }
906 
907 /* IDC - Inter Device Communication...
908  * Some firmware commands require consent of adjacent FCOE
909  * function.  This function waits for the OK, or a
910  * counter-request for a little more time.i
911  * The firmware will complete the request if the other
912  * function doesn't respond.
913  */
ql_idc_wait(struct ql_adapter * qdev)914 static int ql_idc_wait(struct ql_adapter *qdev)
915 {
916 	int status = -ETIMEDOUT;
917 	long wait_time = 1 * HZ;
918 	struct mbox_params *mbcp = &qdev->idc_mbc;
919 	do {
920 		/* Wait here for the command to complete
921 		 * via the IDC process.
922 		 */
923 		wait_time =
924 			wait_for_completion_timeout(&qdev->ide_completion,
925 							wait_time);
926 		if (!wait_time) {
927 			netif_err(qdev, drv, qdev->ndev, "IDC Timeout.\n");
928 			break;
929 		}
930 		/* Now examine the response from the IDC process.
931 		 * We might have a good completion or a request for
932 		 * more wait time.
933 		 */
934 		if (mbcp->mbox_out[0] == AEN_IDC_EXT) {
935 			netif_err(qdev, drv, qdev->ndev,
936 				  "IDC Time Extension from function.\n");
937 			wait_time += (mbcp->mbox_out[1] >> 8) & 0x0000000f;
938 		} else if (mbcp->mbox_out[0] == AEN_IDC_CMPLT) {
939 			netif_err(qdev, drv, qdev->ndev, "IDC Success.\n");
940 			status = 0;
941 			break;
942 		} else {
943 			netif_err(qdev, drv, qdev->ndev,
944 				  "IDC: Invalid State 0x%.04x.\n",
945 				  mbcp->mbox_out[0]);
946 			status = -EIO;
947 			break;
948 		}
949 	} while (wait_time);
950 
951 	return status;
952 }
953 
ql_mb_set_led_cfg(struct ql_adapter * qdev,u32 led_config)954 int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config)
955 {
956 	struct mbox_params mbc;
957 	struct mbox_params *mbcp = &mbc;
958 	int status;
959 
960 	memset(mbcp, 0, sizeof(struct mbox_params));
961 
962 	mbcp->in_count = 2;
963 	mbcp->out_count = 1;
964 
965 	mbcp->mbox_in[0] = MB_CMD_SET_LED_CFG;
966 	mbcp->mbox_in[1] = led_config;
967 
968 
969 	status = ql_mailbox_command(qdev, mbcp);
970 	if (status)
971 		return status;
972 
973 	if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
974 		netif_err(qdev, drv, qdev->ndev,
975 			  "Failed to set LED Configuration.\n");
976 		status = -EIO;
977 	}
978 
979 	return status;
980 }
981 
ql_mb_get_led_cfg(struct ql_adapter * qdev)982 int ql_mb_get_led_cfg(struct ql_adapter *qdev)
983 {
984 	struct mbox_params mbc;
985 	struct mbox_params *mbcp = &mbc;
986 	int status;
987 
988 	memset(mbcp, 0, sizeof(struct mbox_params));
989 
990 	mbcp->in_count = 1;
991 	mbcp->out_count = 2;
992 
993 	mbcp->mbox_in[0] = MB_CMD_GET_LED_CFG;
994 
995 	status = ql_mailbox_command(qdev, mbcp);
996 	if (status)
997 		return status;
998 
999 	if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
1000 		netif_err(qdev, drv, qdev->ndev,
1001 			  "Failed to get LED Configuration.\n");
1002 		status = -EIO;
1003 	} else
1004 		qdev->led_config = mbcp->mbox_out[1];
1005 
1006 	return status;
1007 }
1008 
ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter * qdev,u32 control)1009 int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control)
1010 {
1011 	struct mbox_params mbc;
1012 	struct mbox_params *mbcp = &mbc;
1013 	int status;
1014 
1015 	memset(mbcp, 0, sizeof(struct mbox_params));
1016 
1017 	mbcp->in_count = 1;
1018 	mbcp->out_count = 2;
1019 
1020 	mbcp->mbox_in[0] = MB_CMD_SET_MGMNT_TFK_CTL;
1021 	mbcp->mbox_in[1] = control;
1022 
1023 	status = ql_mailbox_command(qdev, mbcp);
1024 	if (status)
1025 		return status;
1026 
1027 	if (mbcp->mbox_out[0] == MB_CMD_STS_GOOD)
1028 		return status;
1029 
1030 	if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) {
1031 		netif_err(qdev, drv, qdev->ndev,
1032 			  "Command not supported by firmware.\n");
1033 		status = -EINVAL;
1034 	} else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) {
1035 		/* This indicates that the firmware is
1036 		 * already in the state we are trying to
1037 		 * change it to.
1038 		 */
1039 		netif_err(qdev, drv, qdev->ndev,
1040 			  "Command parameters make no change.\n");
1041 	}
1042 	return status;
1043 }
1044 
1045 /* Returns a negative error code or the mailbox command status. */
ql_mb_get_mgmnt_traffic_ctl(struct ql_adapter * qdev,u32 * control)1046 static int ql_mb_get_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 *control)
1047 {
1048 	struct mbox_params mbc;
1049 	struct mbox_params *mbcp = &mbc;
1050 	int status;
1051 
1052 	memset(mbcp, 0, sizeof(struct mbox_params));
1053 	*control = 0;
1054 
1055 	mbcp->in_count = 1;
1056 	mbcp->out_count = 1;
1057 
1058 	mbcp->mbox_in[0] = MB_CMD_GET_MGMNT_TFK_CTL;
1059 
1060 	status = ql_mailbox_command(qdev, mbcp);
1061 	if (status)
1062 		return status;
1063 
1064 	if (mbcp->mbox_out[0] == MB_CMD_STS_GOOD) {
1065 		*control = mbcp->mbox_in[1];
1066 		return status;
1067 	}
1068 
1069 	if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) {
1070 		netif_err(qdev, drv, qdev->ndev,
1071 			  "Command not supported by firmware.\n");
1072 		status = -EINVAL;
1073 	} else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) {
1074 		netif_err(qdev, drv, qdev->ndev,
1075 			  "Failed to get MPI traffic control.\n");
1076 		status = -EIO;
1077 	}
1078 	return status;
1079 }
1080 
ql_wait_fifo_empty(struct ql_adapter * qdev)1081 int ql_wait_fifo_empty(struct ql_adapter *qdev)
1082 {
1083 	int count = 5;
1084 	u32 mgmnt_fifo_empty;
1085 	u32 nic_fifo_empty;
1086 
1087 	do {
1088 		nic_fifo_empty = ql_read32(qdev, STS) & STS_NFE;
1089 		ql_mb_get_mgmnt_traffic_ctl(qdev, &mgmnt_fifo_empty);
1090 		mgmnt_fifo_empty &= MB_GET_MPI_TFK_FIFO_EMPTY;
1091 		if (nic_fifo_empty && mgmnt_fifo_empty)
1092 			return 0;
1093 		msleep(100);
1094 	} while (count-- > 0);
1095 	return -ETIMEDOUT;
1096 }
1097 
1098 /* API called in work thread context to set new TX/RX
1099  * maximum frame size values to match MTU.
1100  */
ql_set_port_cfg(struct ql_adapter * qdev)1101 static int ql_set_port_cfg(struct ql_adapter *qdev)
1102 {
1103 	int status;
1104 	status = ql_mb_set_port_cfg(qdev);
1105 	if (status)
1106 		return status;
1107 	status = ql_idc_wait(qdev);
1108 	return status;
1109 }
1110 
1111 /* The following routines are worker threads that process
1112  * events that may sleep waiting for completion.
1113  */
1114 
1115 /* This thread gets the maximum TX and RX frame size values
1116  * from the firmware and, if necessary, changes them to match
1117  * the MTU setting.
1118  */
ql_mpi_port_cfg_work(struct work_struct * work)1119 void ql_mpi_port_cfg_work(struct work_struct *work)
1120 {
1121 	struct ql_adapter *qdev =
1122 	    container_of(work, struct ql_adapter, mpi_port_cfg_work.work);
1123 	int status;
1124 
1125 	status = ql_mb_get_port_cfg(qdev);
1126 	if (status) {
1127 		netif_err(qdev, drv, qdev->ndev,
1128 			  "Bug: Failed to get port config data.\n");
1129 		goto err;
1130 	}
1131 
1132 	if (qdev->link_config & CFG_JUMBO_FRAME_SIZE &&
1133 			qdev->max_frame_size ==
1134 			CFG_DEFAULT_MAX_FRAME_SIZE)
1135 		goto end;
1136 
1137 	qdev->link_config |=	CFG_JUMBO_FRAME_SIZE;
1138 	qdev->max_frame_size = CFG_DEFAULT_MAX_FRAME_SIZE;
1139 	status = ql_set_port_cfg(qdev);
1140 	if (status) {
1141 		netif_err(qdev, drv, qdev->ndev,
1142 			  "Bug: Failed to set port config data.\n");
1143 		goto err;
1144 	}
1145 end:
1146 	clear_bit(QL_PORT_CFG, &qdev->flags);
1147 	return;
1148 err:
1149 	ql_queue_fw_error(qdev);
1150 	goto end;
1151 }
1152 
1153 /* Process an inter-device request.  This is issues by
1154  * the firmware in response to another function requesting
1155  * a change to the port. We set a flag to indicate a change
1156  * has been made and then send a mailbox command ACKing
1157  * the change request.
1158  */
ql_mpi_idc_work(struct work_struct * work)1159 void ql_mpi_idc_work(struct work_struct *work)
1160 {
1161 	struct ql_adapter *qdev =
1162 	    container_of(work, struct ql_adapter, mpi_idc_work.work);
1163 	int status;
1164 	struct mbox_params *mbcp = &qdev->idc_mbc;
1165 	u32 aen;
1166 	int timeout;
1167 
1168 	aen = mbcp->mbox_out[1] >> 16;
1169 	timeout = (mbcp->mbox_out[1] >> 8) & 0xf;
1170 
1171 	switch (aen) {
1172 	default:
1173 		netif_err(qdev, drv, qdev->ndev,
1174 			  "Bug: Unhandled IDC action.\n");
1175 		break;
1176 	case MB_CMD_PORT_RESET:
1177 	case MB_CMD_STOP_FW:
1178 		ql_link_off(qdev);
1179 	case MB_CMD_SET_PORT_CFG:
1180 		/* Signal the resulting link up AEN
1181 		 * that the frame routing and mac addr
1182 		 * needs to be set.
1183 		 * */
1184 		set_bit(QL_CAM_RT_SET, &qdev->flags);
1185 		/* Do ACK if required */
1186 		if (timeout) {
1187 			status = ql_mb_idc_ack(qdev);
1188 			if (status)
1189 				netif_err(qdev, drv, qdev->ndev,
1190 					  "Bug: No pending IDC!\n");
1191 		} else {
1192 			netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
1193 				     "IDC ACK not required\n");
1194 			status = 0; /* success */
1195 		}
1196 		break;
1197 
1198 	/* These sub-commands issued by another (FCoE)
1199 	 * function are requesting to do an operation
1200 	 * on the shared resource (MPI environment).
1201 	 * We currently don't issue these so we just
1202 	 * ACK the request.
1203 	 */
1204 	case MB_CMD_IOP_RESTART_MPI:
1205 	case MB_CMD_IOP_PREP_LINK_DOWN:
1206 		/* Drop the link, reload the routing
1207 		 * table when link comes up.
1208 		 */
1209 		ql_link_off(qdev);
1210 		set_bit(QL_CAM_RT_SET, &qdev->flags);
1211 		/* Fall through. */
1212 	case MB_CMD_IOP_DVR_START:
1213 	case MB_CMD_IOP_FLASH_ACC:
1214 	case MB_CMD_IOP_CORE_DUMP_MPI:
1215 	case MB_CMD_IOP_PREP_UPDATE_MPI:
1216 	case MB_CMD_IOP_COMP_UPDATE_MPI:
1217 	case MB_CMD_IOP_NONE:	/*  an IDC without params */
1218 		/* Do ACK if required */
1219 		if (timeout) {
1220 			status = ql_mb_idc_ack(qdev);
1221 			if (status)
1222 				netif_err(qdev, drv, qdev->ndev,
1223 					  "Bug: No pending IDC!\n");
1224 		} else {
1225 			netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
1226 				     "IDC ACK not required\n");
1227 			status = 0; /* success */
1228 		}
1229 		break;
1230 	}
1231 }
1232 
ql_mpi_work(struct work_struct * work)1233 void ql_mpi_work(struct work_struct *work)
1234 {
1235 	struct ql_adapter *qdev =
1236 	    container_of(work, struct ql_adapter, mpi_work.work);
1237 	struct mbox_params mbc;
1238 	struct mbox_params *mbcp = &mbc;
1239 	int err = 0;
1240 
1241 	mutex_lock(&qdev->mpi_mutex);
1242 	/* Begin polled mode for MPI */
1243 	ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
1244 
1245 	while (ql_read32(qdev, STS) & STS_PI) {
1246 		memset(mbcp, 0, sizeof(struct mbox_params));
1247 		mbcp->out_count = 1;
1248 		/* Don't continue if an async event
1249 		 * did not complete properly.
1250 		 */
1251 		err = ql_mpi_handler(qdev, mbcp);
1252 		if (err)
1253 			break;
1254 	}
1255 
1256 	/* End polled mode for MPI */
1257 	ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
1258 	mutex_unlock(&qdev->mpi_mutex);
1259 	ql_enable_completion_interrupt(qdev, 0);
1260 }
1261 
ql_mpi_reset_work(struct work_struct * work)1262 void ql_mpi_reset_work(struct work_struct *work)
1263 {
1264 	struct ql_adapter *qdev =
1265 	    container_of(work, struct ql_adapter, mpi_reset_work.work);
1266 	cancel_delayed_work_sync(&qdev->mpi_work);
1267 	cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
1268 	cancel_delayed_work_sync(&qdev->mpi_idc_work);
1269 	/* If we're not the dominant NIC function,
1270 	 * then there is nothing to do.
1271 	 */
1272 	if (!ql_own_firmware(qdev)) {
1273 		netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n");
1274 		return;
1275 	}
1276 
1277 	if (!ql_core_dump(qdev, qdev->mpi_coredump)) {
1278 		netif_err(qdev, drv, qdev->ndev, "Core is dumped!\n");
1279 		qdev->core_is_dumped = 1;
1280 		queue_delayed_work(qdev->workqueue,
1281 			&qdev->mpi_core_to_log, 5 * HZ);
1282 	}
1283 	ql_soft_reset_mpi_risc(qdev);
1284 }
1285