1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2014 QLogic Corporation
5 */
6 #include "qla_def.h"
7 #include "qla_gbl.h"
8
9 #include <linux/delay.h>
10 #include <linux/slab.h>
11 #include <linux/vmalloc.h>
12
13 #include "qla_devtbl.h"
14
15 #ifdef CONFIG_SPARC
16 #include <asm/prom.h>
17 #endif
18
19 #include "qla_target.h"
20
21 /*
22 * QLogic ISP2x00 Hardware Support Function Prototypes.
23 */
24 static int qla2x00_isp_firmware(scsi_qla_host_t *);
25 static int qla2x00_setup_chip(scsi_qla_host_t *);
26 static int qla2x00_fw_ready(scsi_qla_host_t *);
27 static int qla2x00_configure_hba(scsi_qla_host_t *);
28 static int qla2x00_configure_loop(scsi_qla_host_t *);
29 static int qla2x00_configure_local_loop(scsi_qla_host_t *);
30 static int qla2x00_configure_fabric(scsi_qla_host_t *);
31 static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *);
32 static int qla2x00_restart_isp(scsi_qla_host_t *);
33
34 static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
35 static int qla84xx_init_chip(scsi_qla_host_t *);
36 static int qla25xx_init_queues(struct qla_hw_data *);
37 static int qla24xx_post_prli_work(struct scsi_qla_host*, fc_port_t *);
38 static void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha,
39 struct event_arg *ea);
40 static void qla24xx_handle_prli_done_event(struct scsi_qla_host *,
41 struct event_arg *);
42 static void __qla24xx_handle_gpdb_event(scsi_qla_host_t *, struct event_arg *);
43
44 /* SRB Extensions ---------------------------------------------------------- */
45
46 void
qla2x00_sp_timeout(struct timer_list * t)47 qla2x00_sp_timeout(struct timer_list *t)
48 {
49 srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer);
50 struct srb_iocb *iocb;
51
52 WARN_ON(irqs_disabled());
53 iocb = &sp->u.iocb_cmd;
54 iocb->timeout(sp);
55 }
56
qla2x00_sp_free(srb_t * sp)57 void qla2x00_sp_free(srb_t *sp)
58 {
59 struct srb_iocb *iocb = &sp->u.iocb_cmd;
60
61 del_timer(&iocb->timer);
62 qla2x00_rel_sp(sp);
63 }
64
qla2xxx_rel_done_warning(srb_t * sp,int res)65 void qla2xxx_rel_done_warning(srb_t *sp, int res)
66 {
67 WARN_ONCE(1, "Calling done() of an already freed srb %p object\n", sp);
68 }
69
qla2xxx_rel_free_warning(srb_t * sp)70 void qla2xxx_rel_free_warning(srb_t *sp)
71 {
72 WARN_ONCE(1, "Calling free() of an already freed srb %p object\n", sp);
73 }
74
75 /* Asynchronous Login/Logout Routines -------------------------------------- */
76
77 unsigned long
qla2x00_get_async_timeout(struct scsi_qla_host * vha)78 qla2x00_get_async_timeout(struct scsi_qla_host *vha)
79 {
80 unsigned long tmo;
81 struct qla_hw_data *ha = vha->hw;
82
83 /* Firmware should use switch negotiated r_a_tov for timeout. */
84 tmo = ha->r_a_tov / 10 * 2;
85 if (IS_QLAFX00(ha)) {
86 tmo = FX00_DEF_RATOV * 2;
87 } else if (!IS_FWI2_CAPABLE(ha)) {
88 /*
89 * Except for earlier ISPs where the timeout is seeded from the
90 * initialization control block.
91 */
92 tmo = ha->login_timeout;
93 }
94 return tmo;
95 }
96
qla24xx_abort_iocb_timeout(void * data)97 static void qla24xx_abort_iocb_timeout(void *data)
98 {
99 srb_t *sp = data;
100 struct srb_iocb *abt = &sp->u.iocb_cmd;
101 struct qla_qpair *qpair = sp->qpair;
102 u32 handle;
103 unsigned long flags;
104
105 if (sp->cmd_sp)
106 ql_dbg(ql_dbg_async, sp->vha, 0x507c,
107 "Abort timeout - cmd hdl=%x, cmd type=%x hdl=%x, type=%x\n",
108 sp->cmd_sp->handle, sp->cmd_sp->type,
109 sp->handle, sp->type);
110 else
111 ql_dbg(ql_dbg_async, sp->vha, 0x507c,
112 "Abort timeout 2 - hdl=%x, type=%x\n",
113 sp->handle, sp->type);
114
115 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
116 for (handle = 1; handle < qpair->req->num_outstanding_cmds; handle++) {
117 if (sp->cmd_sp && (qpair->req->outstanding_cmds[handle] ==
118 sp->cmd_sp))
119 qpair->req->outstanding_cmds[handle] = NULL;
120
121 /* removing the abort */
122 if (qpair->req->outstanding_cmds[handle] == sp) {
123 qpair->req->outstanding_cmds[handle] = NULL;
124 break;
125 }
126 }
127 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
128
129 if (sp->cmd_sp)
130 sp->cmd_sp->done(sp->cmd_sp, QLA_OS_TIMER_EXPIRED);
131
132 abt->u.abt.comp_status = cpu_to_le16(CS_TIMEOUT);
133 sp->done(sp, QLA_OS_TIMER_EXPIRED);
134 }
135
qla24xx_abort_sp_done(srb_t * sp,int res)136 static void qla24xx_abort_sp_done(srb_t *sp, int res)
137 {
138 struct srb_iocb *abt = &sp->u.iocb_cmd;
139
140 del_timer(&sp->u.iocb_cmd.timer);
141 if (sp->flags & SRB_WAKEUP_ON_COMP)
142 complete(&abt->u.abt.comp);
143 else
144 sp->free(sp);
145 }
146
qla24xx_async_abort_cmd(srb_t * cmd_sp,bool wait)147 int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
148 {
149 scsi_qla_host_t *vha = cmd_sp->vha;
150 struct srb_iocb *abt_iocb;
151 srb_t *sp;
152 int rval = QLA_FUNCTION_FAILED;
153
154 sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport,
155 GFP_ATOMIC);
156 if (!sp)
157 return rval;
158
159 abt_iocb = &sp->u.iocb_cmd;
160 sp->type = SRB_ABT_CMD;
161 sp->name = "abort";
162 sp->qpair = cmd_sp->qpair;
163 sp->cmd_sp = cmd_sp;
164 if (wait)
165 sp->flags = SRB_WAKEUP_ON_COMP;
166
167 abt_iocb->timeout = qla24xx_abort_iocb_timeout;
168 init_completion(&abt_iocb->u.abt.comp);
169 /* FW can send 2 x ABTS's timeout/20s */
170 qla2x00_init_timer(sp, 42);
171
172 abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
173 abt_iocb->u.abt.req_que_no = cpu_to_le16(cmd_sp->qpair->req->id);
174
175 sp->done = qla24xx_abort_sp_done;
176
177 ql_dbg(ql_dbg_async, vha, 0x507c,
178 "Abort command issued - hdl=%x, type=%x\n", cmd_sp->handle,
179 cmd_sp->type);
180
181 rval = qla2x00_start_sp(sp);
182 if (rval != QLA_SUCCESS) {
183 sp->free(sp);
184 return rval;
185 }
186
187 if (wait) {
188 wait_for_completion(&abt_iocb->u.abt.comp);
189 rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
190 QLA_SUCCESS : QLA_FUNCTION_FAILED;
191 sp->free(sp);
192 }
193
194 return rval;
195 }
196
197 void
qla2x00_async_iocb_timeout(void * data)198 qla2x00_async_iocb_timeout(void *data)
199 {
200 srb_t *sp = data;
201 fc_port_t *fcport = sp->fcport;
202 struct srb_iocb *lio = &sp->u.iocb_cmd;
203 int rc, h;
204 unsigned long flags;
205
206 if (fcport) {
207 ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
208 "Async-%s timeout - hdl=%x portid=%06x %8phC.\n",
209 sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
210
211 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
212 } else {
213 pr_info("Async-%s timeout - hdl=%x.\n",
214 sp->name, sp->handle);
215 }
216
217 switch (sp->type) {
218 case SRB_LOGIN_CMD:
219 rc = qla24xx_async_abort_cmd(sp, false);
220 if (rc) {
221 /* Retry as needed. */
222 lio->u.logio.data[0] = MBS_COMMAND_ERROR;
223 lio->u.logio.data[1] =
224 lio->u.logio.flags & SRB_LOGIN_RETRIED ?
225 QLA_LOGIO_LOGIN_RETRIED : 0;
226 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
227 for (h = 1; h < sp->qpair->req->num_outstanding_cmds;
228 h++) {
229 if (sp->qpair->req->outstanding_cmds[h] ==
230 sp) {
231 sp->qpair->req->outstanding_cmds[h] =
232 NULL;
233 break;
234 }
235 }
236 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
237 sp->done(sp, QLA_FUNCTION_TIMEOUT);
238 }
239 break;
240 case SRB_LOGOUT_CMD:
241 case SRB_CT_PTHRU_CMD:
242 case SRB_MB_IOCB:
243 case SRB_NACK_PLOGI:
244 case SRB_NACK_PRLI:
245 case SRB_NACK_LOGO:
246 case SRB_CTRL_VP:
247 default:
248 rc = qla24xx_async_abort_cmd(sp, false);
249 if (rc) {
250 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
251 for (h = 1; h < sp->qpair->req->num_outstanding_cmds;
252 h++) {
253 if (sp->qpair->req->outstanding_cmds[h] ==
254 sp) {
255 sp->qpair->req->outstanding_cmds[h] =
256 NULL;
257 break;
258 }
259 }
260 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
261 sp->done(sp, QLA_FUNCTION_TIMEOUT);
262 }
263 break;
264 }
265 }
266
qla2x00_async_login_sp_done(srb_t * sp,int res)267 static void qla2x00_async_login_sp_done(srb_t *sp, int res)
268 {
269 struct scsi_qla_host *vha = sp->vha;
270 struct srb_iocb *lio = &sp->u.iocb_cmd;
271 struct event_arg ea;
272
273 ql_dbg(ql_dbg_disc, vha, 0x20dd,
274 "%s %8phC res %d \n", __func__, sp->fcport->port_name, res);
275
276 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
277
278 if (!test_bit(UNLOADING, &vha->dpc_flags)) {
279 memset(&ea, 0, sizeof(ea));
280 ea.fcport = sp->fcport;
281 ea.data[0] = lio->u.logio.data[0];
282 ea.data[1] = lio->u.logio.data[1];
283 ea.iop[0] = lio->u.logio.iop[0];
284 ea.iop[1] = lio->u.logio.iop[1];
285 ea.sp = sp;
286 qla24xx_handle_plogi_done_event(vha, &ea);
287 }
288
289 sp->free(sp);
290 }
291
292 static inline bool
fcport_is_smaller(fc_port_t * fcport)293 fcport_is_smaller(fc_port_t *fcport)
294 {
295 if (wwn_to_u64(fcport->port_name) <
296 wwn_to_u64(fcport->vha->port_name))
297 return true;
298 else
299 return false;
300 }
301
302 static inline bool
fcport_is_bigger(fc_port_t * fcport)303 fcport_is_bigger(fc_port_t *fcport)
304 {
305 return !fcport_is_smaller(fcport);
306 }
307
308 int
qla2x00_async_login(struct scsi_qla_host * vha,fc_port_t * fcport,uint16_t * data)309 qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
310 uint16_t *data)
311 {
312 srb_t *sp;
313 struct srb_iocb *lio;
314 int rval = QLA_FUNCTION_FAILED;
315
316 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) ||
317 fcport->loop_id == FC_NO_LOOP_ID) {
318 ql_log(ql_log_warn, vha, 0xffff,
319 "%s: %8phC - not sending command.\n",
320 __func__, fcport->port_name);
321 return rval;
322 }
323
324 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
325 if (!sp)
326 goto done;
327
328 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND);
329 fcport->flags |= FCF_ASYNC_SENT;
330 fcport->logout_completed = 0;
331
332 sp->type = SRB_LOGIN_CMD;
333 sp->name = "login";
334 sp->gen1 = fcport->rscn_gen;
335 sp->gen2 = fcport->login_gen;
336
337 lio = &sp->u.iocb_cmd;
338 lio->timeout = qla2x00_async_iocb_timeout;
339 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
340
341 sp->done = qla2x00_async_login_sp_done;
342 if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport))
343 lio->u.logio.flags |= SRB_LOGIN_PRLI_ONLY;
344 else
345 lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
346
347 if (NVME_TARGET(vha->hw, fcport))
348 lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI;
349
350 ql_dbg(ql_dbg_disc, vha, 0x2072,
351 "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x "
352 "retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id,
353 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
354 fcport->login_retry);
355
356 rval = qla2x00_start_sp(sp);
357 if (rval != QLA_SUCCESS) {
358 fcport->flags |= FCF_LOGIN_NEEDED;
359 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
360 goto done_free_sp;
361 }
362
363 return rval;
364
365 done_free_sp:
366 sp->free(sp);
367 fcport->flags &= ~FCF_ASYNC_SENT;
368 done:
369 fcport->flags &= ~FCF_ASYNC_ACTIVE;
370 return rval;
371 }
372
qla2x00_async_logout_sp_done(srb_t * sp,int res)373 static void qla2x00_async_logout_sp_done(srb_t *sp, int res)
374 {
375 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
376 sp->fcport->login_gen++;
377 qlt_logo_completion_handler(sp->fcport, res);
378 sp->free(sp);
379 }
380
381 int
qla2x00_async_logout(struct scsi_qla_host * vha,fc_port_t * fcport)382 qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
383 {
384 srb_t *sp;
385 struct srb_iocb *lio;
386 int rval = QLA_FUNCTION_FAILED;
387
388 fcport->flags |= FCF_ASYNC_SENT;
389 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
390 if (!sp)
391 goto done;
392
393 sp->type = SRB_LOGOUT_CMD;
394 sp->name = "logout";
395
396 lio = &sp->u.iocb_cmd;
397 lio->timeout = qla2x00_async_iocb_timeout;
398 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
399
400 sp->done = qla2x00_async_logout_sp_done;
401
402 ql_dbg(ql_dbg_disc, vha, 0x2070,
403 "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC.\n",
404 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
405 fcport->d_id.b.area, fcport->d_id.b.al_pa,
406 fcport->port_name);
407
408 rval = qla2x00_start_sp(sp);
409 if (rval != QLA_SUCCESS)
410 goto done_free_sp;
411 return rval;
412
413 done_free_sp:
414 sp->free(sp);
415 done:
416 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
417 return rval;
418 }
419
420 void
qla2x00_async_prlo_done(struct scsi_qla_host * vha,fc_port_t * fcport,uint16_t * data)421 qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport,
422 uint16_t *data)
423 {
424 fcport->flags &= ~FCF_ASYNC_ACTIVE;
425 /* Don't re-login in target mode */
426 if (!fcport->tgt_session)
427 qla2x00_mark_device_lost(vha, fcport, 1);
428 qlt_logo_completion_handler(fcport, data[0]);
429 }
430
qla2x00_async_prlo_sp_done(srb_t * sp,int res)431 static void qla2x00_async_prlo_sp_done(srb_t *sp, int res)
432 {
433 struct srb_iocb *lio = &sp->u.iocb_cmd;
434 struct scsi_qla_host *vha = sp->vha;
435
436 sp->fcport->flags &= ~FCF_ASYNC_ACTIVE;
437 if (!test_bit(UNLOADING, &vha->dpc_flags))
438 qla2x00_post_async_prlo_done_work(sp->fcport->vha, sp->fcport,
439 lio->u.logio.data);
440 sp->free(sp);
441 }
442
443 int
qla2x00_async_prlo(struct scsi_qla_host * vha,fc_port_t * fcport)444 qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport)
445 {
446 srb_t *sp;
447 struct srb_iocb *lio;
448 int rval;
449
450 rval = QLA_FUNCTION_FAILED;
451 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
452 if (!sp)
453 goto done;
454
455 sp->type = SRB_PRLO_CMD;
456 sp->name = "prlo";
457
458 lio = &sp->u.iocb_cmd;
459 lio->timeout = qla2x00_async_iocb_timeout;
460 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
461
462 sp->done = qla2x00_async_prlo_sp_done;
463
464 ql_dbg(ql_dbg_disc, vha, 0x2070,
465 "Async-prlo - hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
466 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
467 fcport->d_id.b.area, fcport->d_id.b.al_pa);
468
469 rval = qla2x00_start_sp(sp);
470 if (rval != QLA_SUCCESS)
471 goto done_free_sp;
472
473 return rval;
474
475 done_free_sp:
476 sp->free(sp);
477 done:
478 fcport->flags &= ~FCF_ASYNC_ACTIVE;
479 return rval;
480 }
481
482 static
qla24xx_handle_adisc_event(scsi_qla_host_t * vha,struct event_arg * ea)483 void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
484 {
485 struct fc_port *fcport = ea->fcport;
486
487 ql_dbg(ql_dbg_disc, vha, 0x20d2,
488 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
489 __func__, fcport->port_name, fcport->disc_state,
490 fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
491 fcport->rscn_gen, ea->sp->gen1, fcport->loop_id);
492
493 WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n",
494 ea->data[0]);
495
496 if (ea->data[0] != MBS_COMMAND_COMPLETE) {
497 ql_dbg(ql_dbg_disc, vha, 0x2066,
498 "%s %8phC: adisc fail: post delete\n",
499 __func__, ea->fcport->port_name);
500 /* deleted = 0 & logout_on_delete = force fw cleanup */
501 fcport->deleted = 0;
502 fcport->logout_on_delete = 1;
503 qlt_schedule_sess_for_deletion(ea->fcport);
504 return;
505 }
506
507 if (ea->fcport->disc_state == DSC_DELETE_PEND)
508 return;
509
510 if (ea->sp->gen2 != ea->fcport->login_gen) {
511 /* target side must have changed it. */
512 ql_dbg(ql_dbg_disc, vha, 0x20d3,
513 "%s %8phC generation changed\n",
514 __func__, ea->fcport->port_name);
515 return;
516 } else if (ea->sp->gen1 != ea->fcport->rscn_gen) {
517 qla_rscn_replay(fcport);
518 qlt_schedule_sess_for_deletion(fcport);
519 return;
520 }
521
522 __qla24xx_handle_gpdb_event(vha, ea);
523 }
524
qla_post_els_plogi_work(struct scsi_qla_host * vha,fc_port_t * fcport)525 static int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport)
526 {
527 struct qla_work_evt *e;
528
529 e = qla2x00_alloc_work(vha, QLA_EVT_ELS_PLOGI);
530 if (!e)
531 return QLA_FUNCTION_FAILED;
532
533 e->u.fcport.fcport = fcport;
534 fcport->flags |= FCF_ASYNC_ACTIVE;
535 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND);
536 return qla2x00_post_work(vha, e);
537 }
538
qla2x00_async_adisc_sp_done(srb_t * sp,int res)539 static void qla2x00_async_adisc_sp_done(srb_t *sp, int res)
540 {
541 struct scsi_qla_host *vha = sp->vha;
542 struct event_arg ea;
543 struct srb_iocb *lio = &sp->u.iocb_cmd;
544
545 ql_dbg(ql_dbg_disc, vha, 0x2066,
546 "Async done-%s res %x %8phC\n",
547 sp->name, res, sp->fcport->port_name);
548
549 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
550
551 memset(&ea, 0, sizeof(ea));
552 ea.rc = res;
553 ea.data[0] = lio->u.logio.data[0];
554 ea.data[1] = lio->u.logio.data[1];
555 ea.iop[0] = lio->u.logio.iop[0];
556 ea.iop[1] = lio->u.logio.iop[1];
557 ea.fcport = sp->fcport;
558 ea.sp = sp;
559
560 qla24xx_handle_adisc_event(vha, &ea);
561
562 sp->free(sp);
563 }
564
565 int
qla2x00_async_adisc(struct scsi_qla_host * vha,fc_port_t * fcport,uint16_t * data)566 qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
567 uint16_t *data)
568 {
569 srb_t *sp;
570 struct srb_iocb *lio;
571 int rval = QLA_FUNCTION_FAILED;
572
573 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
574 return rval;
575
576 fcport->flags |= FCF_ASYNC_SENT;
577 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
578 if (!sp)
579 goto done;
580
581 sp->type = SRB_ADISC_CMD;
582 sp->name = "adisc";
583
584 lio = &sp->u.iocb_cmd;
585 lio->timeout = qla2x00_async_iocb_timeout;
586 sp->gen1 = fcport->rscn_gen;
587 sp->gen2 = fcport->login_gen;
588 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
589
590 sp->done = qla2x00_async_adisc_sp_done;
591 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
592 lio->u.logio.flags |= SRB_LOGIN_RETRIED;
593
594 ql_dbg(ql_dbg_disc, vha, 0x206f,
595 "Async-adisc - hdl=%x loopid=%x portid=%06x %8phC.\n",
596 sp->handle, fcport->loop_id, fcport->d_id.b24, fcport->port_name);
597
598 rval = qla2x00_start_sp(sp);
599 if (rval != QLA_SUCCESS)
600 goto done_free_sp;
601
602 return rval;
603
604 done_free_sp:
605 sp->free(sp);
606 done:
607 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
608 qla2x00_post_async_adisc_work(vha, fcport, data);
609 return rval;
610 }
611
qla2x00_is_reserved_id(scsi_qla_host_t * vha,uint16_t loop_id)612 static bool qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id)
613 {
614 struct qla_hw_data *ha = vha->hw;
615
616 if (IS_FWI2_CAPABLE(ha))
617 return loop_id > NPH_LAST_HANDLE;
618
619 return (loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) ||
620 loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST;
621 }
622
623 /**
624 * qla2x00_find_new_loop_id - scan through our port list and find a new usable loop ID
625 * @vha: adapter state pointer.
626 * @dev: port structure pointer.
627 *
628 * Returns:
629 * qla2x00 local function return status code.
630 *
631 * Context:
632 * Kernel context.
633 */
qla2x00_find_new_loop_id(scsi_qla_host_t * vha,fc_port_t * dev)634 static int qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
635 {
636 int rval;
637 struct qla_hw_data *ha = vha->hw;
638 unsigned long flags = 0;
639
640 rval = QLA_SUCCESS;
641
642 spin_lock_irqsave(&ha->vport_slock, flags);
643
644 dev->loop_id = find_first_zero_bit(ha->loop_id_map, LOOPID_MAP_SIZE);
645 if (dev->loop_id >= LOOPID_MAP_SIZE ||
646 qla2x00_is_reserved_id(vha, dev->loop_id)) {
647 dev->loop_id = FC_NO_LOOP_ID;
648 rval = QLA_FUNCTION_FAILED;
649 } else {
650 set_bit(dev->loop_id, ha->loop_id_map);
651 }
652 spin_unlock_irqrestore(&ha->vport_slock, flags);
653
654 if (rval == QLA_SUCCESS)
655 ql_dbg(ql_dbg_disc, dev->vha, 0x2086,
656 "Assigning new loopid=%x, portid=%x.\n",
657 dev->loop_id, dev->d_id.b24);
658 else
659 ql_log(ql_log_warn, dev->vha, 0x2087,
660 "No loop_id's available, portid=%x.\n",
661 dev->d_id.b24);
662
663 return rval;
664 }
665
qla2x00_clear_loop_id(fc_port_t * fcport)666 void qla2x00_clear_loop_id(fc_port_t *fcport)
667 {
668 struct qla_hw_data *ha = fcport->vha->hw;
669
670 if (fcport->loop_id == FC_NO_LOOP_ID ||
671 qla2x00_is_reserved_id(fcport->vha, fcport->loop_id))
672 return;
673
674 clear_bit(fcport->loop_id, ha->loop_id_map);
675 fcport->loop_id = FC_NO_LOOP_ID;
676 }
677
qla24xx_handle_gnl_done_event(scsi_qla_host_t * vha,struct event_arg * ea)678 static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
679 struct event_arg *ea)
680 {
681 fc_port_t *fcport, *conflict_fcport;
682 struct get_name_list_extended *e;
683 u16 i, n, found = 0, loop_id;
684 port_id_t id;
685 u64 wwn;
686 u16 data[2];
687 u8 current_login_state, nvme_cls;
688
689 fcport = ea->fcport;
690 ql_dbg(ql_dbg_disc, vha, 0xffff,
691 "%s %8phC DS %d LS rc %d %d login %d|%d rscn %d|%d lid %d\n",
692 __func__, fcport->port_name, fcport->disc_state,
693 fcport->fw_login_state, ea->rc,
694 fcport->login_gen, fcport->last_login_gen,
695 fcport->rscn_gen, fcport->last_rscn_gen, vha->loop_id);
696
697 if (fcport->disc_state == DSC_DELETE_PEND)
698 return;
699
700 if (ea->rc) { /* rval */
701 if (fcport->login_retry == 0) {
702 ql_dbg(ql_dbg_disc, vha, 0x20de,
703 "GNL failed Port login retry %8phN, retry cnt=%d.\n",
704 fcport->port_name, fcport->login_retry);
705 }
706 return;
707 }
708
709 if (fcport->last_rscn_gen != fcport->rscn_gen) {
710 qla_rscn_replay(fcport);
711 qlt_schedule_sess_for_deletion(fcport);
712 return;
713 } else if (fcport->last_login_gen != fcport->login_gen) {
714 ql_dbg(ql_dbg_disc, vha, 0x20e0,
715 "%s %8phC login gen changed\n",
716 __func__, fcport->port_name);
717 return;
718 }
719
720 n = ea->data[0] / sizeof(struct get_name_list_extended);
721
722 ql_dbg(ql_dbg_disc, vha, 0x20e1,
723 "%s %d %8phC n %d %02x%02x%02x lid %d \n",
724 __func__, __LINE__, fcport->port_name, n,
725 fcport->d_id.b.domain, fcport->d_id.b.area,
726 fcport->d_id.b.al_pa, fcport->loop_id);
727
728 for (i = 0; i < n; i++) {
729 e = &vha->gnl.l[i];
730 wwn = wwn_to_u64(e->port_name);
731 id.b.domain = e->port_id[2];
732 id.b.area = e->port_id[1];
733 id.b.al_pa = e->port_id[0];
734 id.b.rsvd_1 = 0;
735
736 if (memcmp((u8 *)&wwn, fcport->port_name, WWN_SIZE))
737 continue;
738
739 if (IS_SW_RESV_ADDR(id))
740 continue;
741
742 found = 1;
743
744 loop_id = le16_to_cpu(e->nport_handle);
745 loop_id = (loop_id & 0x7fff);
746 nvme_cls = e->current_login_state >> 4;
747 current_login_state = e->current_login_state & 0xf;
748
749 if (PRLI_PHASE(nvme_cls)) {
750 current_login_state = nvme_cls;
751 fcport->fc4_type &= ~FS_FC4TYPE_FCP;
752 fcport->fc4_type |= FS_FC4TYPE_NVME;
753 } else if (PRLI_PHASE(current_login_state)) {
754 fcport->fc4_type |= FS_FC4TYPE_FCP;
755 fcport->fc4_type &= ~FS_FC4TYPE_NVME;
756 }
757
758 ql_dbg(ql_dbg_disc, vha, 0x20e2,
759 "%s found %8phC CLS [%x|%x] fc4_type %d ID[%06x|%06x] lid[%d|%d]\n",
760 __func__, fcport->port_name,
761 e->current_login_state, fcport->fw_login_state,
762 fcport->fc4_type, id.b24, fcport->d_id.b24,
763 loop_id, fcport->loop_id);
764
765 switch (fcport->disc_state) {
766 case DSC_DELETE_PEND:
767 case DSC_DELETED:
768 break;
769 default:
770 if ((id.b24 != fcport->d_id.b24 &&
771 fcport->d_id.b24 &&
772 fcport->loop_id != FC_NO_LOOP_ID) ||
773 (fcport->loop_id != FC_NO_LOOP_ID &&
774 fcport->loop_id != loop_id)) {
775 ql_dbg(ql_dbg_disc, vha, 0x20e3,
776 "%s %d %8phC post del sess\n",
777 __func__, __LINE__, fcport->port_name);
778 if (fcport->n2n_flag)
779 fcport->d_id.b24 = 0;
780 qlt_schedule_sess_for_deletion(fcport);
781 return;
782 }
783 break;
784 }
785
786 fcport->loop_id = loop_id;
787 if (fcport->n2n_flag)
788 fcport->d_id.b24 = id.b24;
789
790 wwn = wwn_to_u64(fcport->port_name);
791 qlt_find_sess_invalidate_other(vha, wwn,
792 id, loop_id, &conflict_fcport);
793
794 if (conflict_fcport) {
795 /*
796 * Another share fcport share the same loop_id &
797 * nport id. Conflict fcport needs to finish
798 * cleanup before this fcport can proceed to login.
799 */
800 conflict_fcport->conflict = fcport;
801 fcport->login_pause = 1;
802 }
803
804 switch (vha->hw->current_topology) {
805 default:
806 switch (current_login_state) {
807 case DSC_LS_PRLI_COMP:
808 ql_dbg(ql_dbg_disc + ql_dbg_verbose,
809 vha, 0x20e4, "%s %d %8phC post gpdb\n",
810 __func__, __LINE__, fcport->port_name);
811
812 if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
813 fcport->port_type = FCT_INITIATOR;
814 else
815 fcport->port_type = FCT_TARGET;
816 data[0] = data[1] = 0;
817 qla2x00_post_async_adisc_work(vha, fcport,
818 data);
819 break;
820 case DSC_LS_PORT_UNAVAIL:
821 default:
822 if (fcport->loop_id == FC_NO_LOOP_ID) {
823 qla2x00_find_new_loop_id(vha, fcport);
824 fcport->fw_login_state =
825 DSC_LS_PORT_UNAVAIL;
826 }
827 ql_dbg(ql_dbg_disc, vha, 0x20e5,
828 "%s %d %8phC\n", __func__, __LINE__,
829 fcport->port_name);
830 qla24xx_fcport_handle_login(vha, fcport);
831 break;
832 }
833 break;
834 case ISP_CFG_N:
835 fcport->fw_login_state = current_login_state;
836 fcport->d_id = id;
837 switch (current_login_state) {
838 case DSC_LS_PRLI_PEND:
839 /*
840 * In the middle of PRLI. Let it finish.
841 * Allow relogin code to recheck state again
842 * with GNL. Push disc_state back to DELETED
843 * so GNL can go out again
844 */
845 qla2x00_set_fcport_disc_state(fcport,
846 DSC_DELETED);
847 break;
848 case DSC_LS_PRLI_COMP:
849 if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
850 fcport->port_type = FCT_INITIATOR;
851 else
852 fcport->port_type = FCT_TARGET;
853
854 data[0] = data[1] = 0;
855 qla2x00_post_async_adisc_work(vha, fcport,
856 data);
857 break;
858 case DSC_LS_PLOGI_COMP:
859 if (fcport_is_bigger(fcport)) {
860 /* local adapter is smaller */
861 if (fcport->loop_id != FC_NO_LOOP_ID)
862 qla2x00_clear_loop_id(fcport);
863
864 fcport->loop_id = loop_id;
865 qla24xx_fcport_handle_login(vha,
866 fcport);
867 break;
868 }
869 fallthrough;
870 default:
871 if (fcport_is_smaller(fcport)) {
872 /* local adapter is bigger */
873 if (fcport->loop_id != FC_NO_LOOP_ID)
874 qla2x00_clear_loop_id(fcport);
875
876 fcport->loop_id = loop_id;
877 qla24xx_fcport_handle_login(vha,
878 fcport);
879 }
880 break;
881 }
882 break;
883 } /* switch (ha->current_topology) */
884 }
885
886 if (!found) {
887 switch (vha->hw->current_topology) {
888 case ISP_CFG_F:
889 case ISP_CFG_FL:
890 for (i = 0; i < n; i++) {
891 e = &vha->gnl.l[i];
892 id.b.domain = e->port_id[0];
893 id.b.area = e->port_id[1];
894 id.b.al_pa = e->port_id[2];
895 id.b.rsvd_1 = 0;
896 loop_id = le16_to_cpu(e->nport_handle);
897
898 if (fcport->d_id.b24 == id.b24) {
899 conflict_fcport =
900 qla2x00_find_fcport_by_wwpn(vha,
901 e->port_name, 0);
902 if (conflict_fcport) {
903 ql_dbg(ql_dbg_disc + ql_dbg_verbose,
904 vha, 0x20e5,
905 "%s %d %8phC post del sess\n",
906 __func__, __LINE__,
907 conflict_fcport->port_name);
908 qlt_schedule_sess_for_deletion
909 (conflict_fcport);
910 }
911 }
912 /*
913 * FW already picked this loop id for
914 * another fcport
915 */
916 if (fcport->loop_id == loop_id)
917 fcport->loop_id = FC_NO_LOOP_ID;
918 }
919 qla24xx_fcport_handle_login(vha, fcport);
920 break;
921 case ISP_CFG_N:
922 qla2x00_set_fcport_disc_state(fcport, DSC_DELETED);
923 if (time_after_eq(jiffies, fcport->dm_login_expire)) {
924 if (fcport->n2n_link_reset_cnt < 2) {
925 fcport->n2n_link_reset_cnt++;
926 /*
927 * remote port is not sending PLOGI.
928 * Reset link to kick start his state
929 * machine
930 */
931 set_bit(N2N_LINK_RESET,
932 &vha->dpc_flags);
933 } else {
934 if (fcport->n2n_chip_reset < 1) {
935 ql_log(ql_log_info, vha, 0x705d,
936 "Chip reset to bring laser down");
937 set_bit(ISP_ABORT_NEEDED,
938 &vha->dpc_flags);
939 fcport->n2n_chip_reset++;
940 } else {
941 ql_log(ql_log_info, vha, 0x705d,
942 "Remote port %8ph is not coming back\n",
943 fcport->port_name);
944 fcport->scan_state = 0;
945 }
946 }
947 qla2xxx_wake_dpc(vha);
948 } else {
949 /*
950 * report port suppose to do PLOGI. Give him
951 * more time. FW will catch it.
952 */
953 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
954 }
955 break;
956 default:
957 break;
958 }
959 }
960 } /* gnl_event */
961
qla24xx_async_gnl_sp_done(srb_t * sp,int res)962 static void qla24xx_async_gnl_sp_done(srb_t *sp, int res)
963 {
964 struct scsi_qla_host *vha = sp->vha;
965 unsigned long flags;
966 struct fc_port *fcport = NULL, *tf;
967 u16 i, n = 0, loop_id;
968 struct event_arg ea;
969 struct get_name_list_extended *e;
970 u64 wwn;
971 struct list_head h;
972 bool found = false;
973
974 ql_dbg(ql_dbg_disc, vha, 0x20e7,
975 "Async done-%s res %x mb[1]=%x mb[2]=%x \n",
976 sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1],
977 sp->u.iocb_cmd.u.mbx.in_mb[2]);
978
979 if (res == QLA_FUNCTION_TIMEOUT)
980 return;
981
982 sp->fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
983 memset(&ea, 0, sizeof(ea));
984 ea.sp = sp;
985 ea.rc = res;
986
987 if (sp->u.iocb_cmd.u.mbx.in_mb[1] >=
988 sizeof(struct get_name_list_extended)) {
989 n = sp->u.iocb_cmd.u.mbx.in_mb[1] /
990 sizeof(struct get_name_list_extended);
991 ea.data[0] = sp->u.iocb_cmd.u.mbx.in_mb[1]; /* amnt xfered */
992 }
993
994 for (i = 0; i < n; i++) {
995 e = &vha->gnl.l[i];
996 loop_id = le16_to_cpu(e->nport_handle);
997 /* mask out reserve bit */
998 loop_id = (loop_id & 0x7fff);
999 set_bit(loop_id, vha->hw->loop_id_map);
1000 wwn = wwn_to_u64(e->port_name);
1001
1002 ql_dbg(ql_dbg_disc, vha, 0x20e8,
1003 "%s %8phC %02x:%02x:%02x CLS %x/%x lid %x \n",
1004 __func__, &wwn, e->port_id[2], e->port_id[1],
1005 e->port_id[0], e->current_login_state, e->last_login_state,
1006 (loop_id & 0x7fff));
1007 }
1008
1009 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1010
1011 INIT_LIST_HEAD(&h);
1012 fcport = tf = NULL;
1013 if (!list_empty(&vha->gnl.fcports))
1014 list_splice_init(&vha->gnl.fcports, &h);
1015 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1016
1017 list_for_each_entry_safe(fcport, tf, &h, gnl_entry) {
1018 list_del_init(&fcport->gnl_entry);
1019 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1020 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
1021 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1022 ea.fcport = fcport;
1023
1024 qla24xx_handle_gnl_done_event(vha, &ea);
1025 }
1026
1027 /* create new fcport if fw has knowledge of new sessions */
1028 for (i = 0; i < n; i++) {
1029 port_id_t id;
1030 u64 wwnn;
1031
1032 e = &vha->gnl.l[i];
1033 wwn = wwn_to_u64(e->port_name);
1034
1035 found = false;
1036 list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
1037 if (!memcmp((u8 *)&wwn, fcport->port_name,
1038 WWN_SIZE)) {
1039 found = true;
1040 break;
1041 }
1042 }
1043
1044 id.b.domain = e->port_id[2];
1045 id.b.area = e->port_id[1];
1046 id.b.al_pa = e->port_id[0];
1047 id.b.rsvd_1 = 0;
1048
1049 if (!found && wwn && !IS_SW_RESV_ADDR(id)) {
1050 ql_dbg(ql_dbg_disc, vha, 0x2065,
1051 "%s %d %8phC %06x post new sess\n",
1052 __func__, __LINE__, (u8 *)&wwn, id.b24);
1053 wwnn = wwn_to_u64(e->node_name);
1054 qla24xx_post_newsess_work(vha, &id, (u8 *)&wwn,
1055 (u8 *)&wwnn, NULL, 0);
1056 }
1057 }
1058
1059 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1060 vha->gnl.sent = 0;
1061 if (!list_empty(&vha->gnl.fcports)) {
1062 /* retrigger gnl */
1063 list_for_each_entry_safe(fcport, tf, &vha->gnl.fcports,
1064 gnl_entry) {
1065 list_del_init(&fcport->gnl_entry);
1066 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
1067 if (qla24xx_post_gnl_work(vha, fcport) == QLA_SUCCESS)
1068 break;
1069 }
1070 }
1071 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1072
1073 sp->free(sp);
1074 }
1075
qla24xx_async_gnl(struct scsi_qla_host * vha,fc_port_t * fcport)1076 int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
1077 {
1078 srb_t *sp;
1079 struct srb_iocb *mbx;
1080 int rval = QLA_FUNCTION_FAILED;
1081 unsigned long flags;
1082 u16 *mb;
1083
1084 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
1085 return rval;
1086
1087 ql_dbg(ql_dbg_disc, vha, 0x20d9,
1088 "Async-gnlist WWPN %8phC \n", fcport->port_name);
1089
1090 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1091 fcport->flags |= FCF_ASYNC_SENT;
1092 qla2x00_set_fcport_disc_state(fcport, DSC_GNL);
1093 fcport->last_rscn_gen = fcport->rscn_gen;
1094 fcport->last_login_gen = fcport->login_gen;
1095
1096 list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports);
1097 if (vha->gnl.sent) {
1098 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1099 return QLA_SUCCESS;
1100 }
1101 vha->gnl.sent = 1;
1102 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1103
1104 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1105 if (!sp)
1106 goto done;
1107
1108 sp->type = SRB_MB_IOCB;
1109 sp->name = "gnlist";
1110 sp->gen1 = fcport->rscn_gen;
1111 sp->gen2 = fcport->login_gen;
1112
1113 mbx = &sp->u.iocb_cmd;
1114 mbx->timeout = qla2x00_async_iocb_timeout;
1115 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);
1116
1117 mb = sp->u.iocb_cmd.u.mbx.out_mb;
1118 mb[0] = MBC_PORT_NODE_NAME_LIST;
1119 mb[1] = BIT_2 | BIT_3;
1120 mb[2] = MSW(vha->gnl.ldma);
1121 mb[3] = LSW(vha->gnl.ldma);
1122 mb[6] = MSW(MSD(vha->gnl.ldma));
1123 mb[7] = LSW(MSD(vha->gnl.ldma));
1124 mb[8] = vha->gnl.size;
1125 mb[9] = vha->vp_idx;
1126
1127 sp->done = qla24xx_async_gnl_sp_done;
1128
1129 ql_dbg(ql_dbg_disc, vha, 0x20da,
1130 "Async-%s - OUT WWPN %8phC hndl %x\n",
1131 sp->name, fcport->port_name, sp->handle);
1132
1133 rval = qla2x00_start_sp(sp);
1134 if (rval != QLA_SUCCESS)
1135 goto done_free_sp;
1136
1137 return rval;
1138
1139 done_free_sp:
1140 sp->free(sp);
1141 done:
1142 fcport->flags &= ~(FCF_ASYNC_ACTIVE | FCF_ASYNC_SENT);
1143 return rval;
1144 }
1145
qla24xx_post_gnl_work(struct scsi_qla_host * vha,fc_port_t * fcport)1146 int qla24xx_post_gnl_work(struct scsi_qla_host *vha, fc_port_t *fcport)
1147 {
1148 struct qla_work_evt *e;
1149
1150 e = qla2x00_alloc_work(vha, QLA_EVT_GNL);
1151 if (!e)
1152 return QLA_FUNCTION_FAILED;
1153
1154 e->u.fcport.fcport = fcport;
1155 fcport->flags |= FCF_ASYNC_ACTIVE;
1156 return qla2x00_post_work(vha, e);
1157 }
1158
qla24xx_async_gpdb_sp_done(srb_t * sp,int res)1159 static void qla24xx_async_gpdb_sp_done(srb_t *sp, int res)
1160 {
1161 struct scsi_qla_host *vha = sp->vha;
1162 struct qla_hw_data *ha = vha->hw;
1163 fc_port_t *fcport = sp->fcport;
1164 u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb;
1165 struct event_arg ea;
1166
1167 ql_dbg(ql_dbg_disc, vha, 0x20db,
1168 "Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n",
1169 sp->name, res, fcport->port_name, mb[1], mb[2]);
1170
1171 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
1172
1173 if (res == QLA_FUNCTION_TIMEOUT)
1174 goto done;
1175
1176 memset(&ea, 0, sizeof(ea));
1177 ea.fcport = fcport;
1178 ea.sp = sp;
1179
1180 qla24xx_handle_gpdb_event(vha, &ea);
1181
1182 done:
1183 dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
1184 sp->u.iocb_cmd.u.mbx.in_dma);
1185
1186 sp->free(sp);
1187 }
1188
qla24xx_post_prli_work(struct scsi_qla_host * vha,fc_port_t * fcport)1189 static int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport)
1190 {
1191 struct qla_work_evt *e;
1192
1193 if (vha->host->active_mode == MODE_TARGET)
1194 return QLA_FUNCTION_FAILED;
1195
1196 e = qla2x00_alloc_work(vha, QLA_EVT_PRLI);
1197 if (!e)
1198 return QLA_FUNCTION_FAILED;
1199
1200 e->u.fcport.fcport = fcport;
1201
1202 return qla2x00_post_work(vha, e);
1203 }
1204
qla2x00_async_prli_sp_done(srb_t * sp,int res)1205 static void qla2x00_async_prli_sp_done(srb_t *sp, int res)
1206 {
1207 struct scsi_qla_host *vha = sp->vha;
1208 struct srb_iocb *lio = &sp->u.iocb_cmd;
1209 struct event_arg ea;
1210
1211 ql_dbg(ql_dbg_disc, vha, 0x2129,
1212 "%s %8phC res %d \n", __func__,
1213 sp->fcport->port_name, res);
1214
1215 sp->fcport->flags &= ~FCF_ASYNC_SENT;
1216
1217 if (!test_bit(UNLOADING, &vha->dpc_flags)) {
1218 memset(&ea, 0, sizeof(ea));
1219 ea.fcport = sp->fcport;
1220 ea.data[0] = lio->u.logio.data[0];
1221 ea.data[1] = lio->u.logio.data[1];
1222 ea.iop[0] = lio->u.logio.iop[0];
1223 ea.iop[1] = lio->u.logio.iop[1];
1224 ea.sp = sp;
1225
1226 qla24xx_handle_prli_done_event(vha, &ea);
1227 }
1228
1229 sp->free(sp);
1230 }
1231
1232 int
qla24xx_async_prli(struct scsi_qla_host * vha,fc_port_t * fcport)1233 qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
1234 {
1235 srb_t *sp;
1236 struct srb_iocb *lio;
1237 int rval = QLA_FUNCTION_FAILED;
1238
1239 if (!vha->flags.online) {
1240 ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC exit\n",
1241 __func__, __LINE__, fcport->port_name);
1242 return rval;
1243 }
1244
1245 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND ||
1246 fcport->fw_login_state == DSC_LS_PRLI_PEND) &&
1247 qla_dual_mode_enabled(vha)) {
1248 ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC exit\n",
1249 __func__, __LINE__, fcport->port_name);
1250 return rval;
1251 }
1252
1253 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1254 if (!sp)
1255 return rval;
1256
1257 fcport->flags |= FCF_ASYNC_SENT;
1258 fcport->logout_completed = 0;
1259
1260 sp->type = SRB_PRLI_CMD;
1261 sp->name = "prli";
1262
1263 lio = &sp->u.iocb_cmd;
1264 lio->timeout = qla2x00_async_iocb_timeout;
1265 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
1266
1267 sp->done = qla2x00_async_prli_sp_done;
1268 lio->u.logio.flags = 0;
1269
1270 if (NVME_TARGET(vha->hw, fcport))
1271 lio->u.logio.flags |= SRB_LOGIN_NVME_PRLI;
1272
1273 ql_dbg(ql_dbg_disc, vha, 0x211b,
1274 "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d fc4type %x priority %x %s.\n",
1275 fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24,
1276 fcport->login_retry, fcport->fc4_type, vha->hw->fc4_type_priority,
1277 NVME_TARGET(vha->hw, fcport) ? "nvme" : "fcp");
1278
1279 rval = qla2x00_start_sp(sp);
1280 if (rval != QLA_SUCCESS) {
1281 fcport->flags |= FCF_LOGIN_NEEDED;
1282 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1283 goto done_free_sp;
1284 }
1285
1286 return rval;
1287
1288 done_free_sp:
1289 sp->free(sp);
1290 fcport->flags &= ~FCF_ASYNC_SENT;
1291 return rval;
1292 }
1293
qla24xx_post_gpdb_work(struct scsi_qla_host * vha,fc_port_t * fcport,u8 opt)1294 int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
1295 {
1296 struct qla_work_evt *e;
1297
1298 e = qla2x00_alloc_work(vha, QLA_EVT_GPDB);
1299 if (!e)
1300 return QLA_FUNCTION_FAILED;
1301
1302 e->u.fcport.fcport = fcport;
1303 e->u.fcport.opt = opt;
1304 fcport->flags |= FCF_ASYNC_ACTIVE;
1305 return qla2x00_post_work(vha, e);
1306 }
1307
qla24xx_async_gpdb(struct scsi_qla_host * vha,fc_port_t * fcport,u8 opt)1308 int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
1309 {
1310 srb_t *sp;
1311 struct srb_iocb *mbx;
1312 int rval = QLA_FUNCTION_FAILED;
1313 u16 *mb;
1314 dma_addr_t pd_dma;
1315 struct port_database_24xx *pd;
1316 struct qla_hw_data *ha = vha->hw;
1317
1318 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) ||
1319 fcport->loop_id == FC_NO_LOOP_ID) {
1320 ql_log(ql_log_warn, vha, 0xffff,
1321 "%s: %8phC - not sending command.\n",
1322 __func__, fcport->port_name);
1323 return rval;
1324 }
1325
1326 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1327 if (!sp)
1328 goto done;
1329
1330 qla2x00_set_fcport_disc_state(fcport, DSC_GPDB);
1331
1332 fcport->flags |= FCF_ASYNC_SENT;
1333 sp->type = SRB_MB_IOCB;
1334 sp->name = "gpdb";
1335 sp->gen1 = fcport->rscn_gen;
1336 sp->gen2 = fcport->login_gen;
1337
1338 mbx = &sp->u.iocb_cmd;
1339 mbx->timeout = qla2x00_async_iocb_timeout;
1340 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
1341
1342 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
1343 if (pd == NULL) {
1344 ql_log(ql_log_warn, vha, 0xd043,
1345 "Failed to allocate port database structure.\n");
1346 goto done_free_sp;
1347 }
1348
1349 mb = sp->u.iocb_cmd.u.mbx.out_mb;
1350 mb[0] = MBC_GET_PORT_DATABASE;
1351 mb[1] = fcport->loop_id;
1352 mb[2] = MSW(pd_dma);
1353 mb[3] = LSW(pd_dma);
1354 mb[6] = MSW(MSD(pd_dma));
1355 mb[7] = LSW(MSD(pd_dma));
1356 mb[9] = vha->vp_idx;
1357 mb[10] = opt;
1358
1359 mbx->u.mbx.in = pd;
1360 mbx->u.mbx.in_dma = pd_dma;
1361
1362 sp->done = qla24xx_async_gpdb_sp_done;
1363
1364 ql_dbg(ql_dbg_disc, vha, 0x20dc,
1365 "Async-%s %8phC hndl %x opt %x\n",
1366 sp->name, fcport->port_name, sp->handle, opt);
1367
1368 rval = qla2x00_start_sp(sp);
1369 if (rval != QLA_SUCCESS)
1370 goto done_free_sp;
1371 return rval;
1372
1373 done_free_sp:
1374 if (pd)
1375 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
1376
1377 sp->free(sp);
1378 fcport->flags &= ~FCF_ASYNC_SENT;
1379 done:
1380 fcport->flags &= ~FCF_ASYNC_ACTIVE;
1381 qla24xx_post_gpdb_work(vha, fcport, opt);
1382 return rval;
1383 }
1384
1385 static
__qla24xx_handle_gpdb_event(scsi_qla_host_t * vha,struct event_arg * ea)1386 void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
1387 {
1388 unsigned long flags;
1389
1390 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1391 ea->fcport->login_gen++;
1392 ea->fcport->deleted = 0;
1393 ea->fcport->logout_on_delete = 1;
1394
1395 if (!ea->fcport->login_succ && !IS_SW_RESV_ADDR(ea->fcport->d_id)) {
1396 vha->fcport_count++;
1397 ea->fcport->login_succ = 1;
1398
1399 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1400 qla24xx_sched_upd_fcport(ea->fcport);
1401 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1402 } else if (ea->fcport->login_succ) {
1403 /*
1404 * We have an existing session. A late RSCN delivery
1405 * must have triggered the session to be re-validate.
1406 * Session is still valid.
1407 */
1408 ql_dbg(ql_dbg_disc, vha, 0x20d6,
1409 "%s %d %8phC session revalidate success\n",
1410 __func__, __LINE__, ea->fcport->port_name);
1411 qla2x00_set_fcport_disc_state(ea->fcport, DSC_LOGIN_COMPLETE);
1412 }
1413 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1414 }
1415
1416 static
qla24xx_handle_gpdb_event(scsi_qla_host_t * vha,struct event_arg * ea)1417 void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
1418 {
1419 fc_port_t *fcport = ea->fcport;
1420 struct port_database_24xx *pd;
1421 struct srb *sp = ea->sp;
1422 uint8_t ls;
1423
1424 pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in;
1425
1426 fcport->flags &= ~FCF_ASYNC_SENT;
1427
1428 ql_dbg(ql_dbg_disc, vha, 0x20d2,
1429 "%s %8phC DS %d LS %d fc4_type %x rc %d\n", __func__,
1430 fcport->port_name, fcport->disc_state, pd->current_login_state,
1431 fcport->fc4_type, ea->rc);
1432
1433 if (fcport->disc_state == DSC_DELETE_PEND)
1434 return;
1435
1436 if (NVME_TARGET(vha->hw, fcport))
1437 ls = pd->current_login_state >> 4;
1438 else
1439 ls = pd->current_login_state & 0xf;
1440
1441 if (ea->sp->gen2 != fcport->login_gen) {
1442 /* target side must have changed it. */
1443
1444 ql_dbg(ql_dbg_disc, vha, 0x20d3,
1445 "%s %8phC generation changed\n",
1446 __func__, fcport->port_name);
1447 return;
1448 } else if (ea->sp->gen1 != fcport->rscn_gen) {
1449 qla_rscn_replay(fcport);
1450 qlt_schedule_sess_for_deletion(fcport);
1451 return;
1452 }
1453
1454 switch (ls) {
1455 case PDS_PRLI_COMPLETE:
1456 __qla24xx_parse_gpdb(vha, fcport, pd);
1457 break;
1458 case PDS_PLOGI_PENDING:
1459 case PDS_PLOGI_COMPLETE:
1460 case PDS_PRLI_PENDING:
1461 case PDS_PRLI2_PENDING:
1462 /* Set discovery state back to GNL to Relogin attempt */
1463 if (qla_dual_mode_enabled(vha) ||
1464 qla_ini_mode_enabled(vha)) {
1465 qla2x00_set_fcport_disc_state(fcport, DSC_GNL);
1466 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1467 }
1468 return;
1469 case PDS_LOGO_PENDING:
1470 case PDS_PORT_UNAVAILABLE:
1471 default:
1472 ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC post del sess\n",
1473 __func__, __LINE__, fcport->port_name);
1474 qlt_schedule_sess_for_deletion(fcport);
1475 return;
1476 }
1477 __qla24xx_handle_gpdb_event(vha, ea);
1478 } /* gpdb event */
1479
qla_chk_n2n_b4_login(struct scsi_qla_host * vha,fc_port_t * fcport)1480 static void qla_chk_n2n_b4_login(struct scsi_qla_host *vha, fc_port_t *fcport)
1481 {
1482 u8 login = 0;
1483 int rc;
1484
1485 if (qla_tgt_mode_enabled(vha))
1486 return;
1487
1488 if (qla_dual_mode_enabled(vha)) {
1489 if (N2N_TOPO(vha->hw)) {
1490 u64 mywwn, wwn;
1491
1492 mywwn = wwn_to_u64(vha->port_name);
1493 wwn = wwn_to_u64(fcport->port_name);
1494 if (mywwn > wwn)
1495 login = 1;
1496 else if ((fcport->fw_login_state == DSC_LS_PLOGI_COMP)
1497 && time_after_eq(jiffies,
1498 fcport->plogi_nack_done_deadline))
1499 login = 1;
1500 } else {
1501 login = 1;
1502 }
1503 } else {
1504 /* initiator mode */
1505 login = 1;
1506 }
1507
1508 if (login && fcport->login_retry) {
1509 fcport->login_retry--;
1510 if (fcport->loop_id == FC_NO_LOOP_ID) {
1511 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
1512 rc = qla2x00_find_new_loop_id(vha, fcport);
1513 if (rc) {
1514 ql_dbg(ql_dbg_disc, vha, 0x20e6,
1515 "%s %d %8phC post del sess - out of loopid\n",
1516 __func__, __LINE__, fcport->port_name);
1517 fcport->scan_state = 0;
1518 qlt_schedule_sess_for_deletion(fcport);
1519 return;
1520 }
1521 }
1522 ql_dbg(ql_dbg_disc, vha, 0x20bf,
1523 "%s %d %8phC post login\n",
1524 __func__, __LINE__, fcport->port_name);
1525 qla2x00_post_async_login_work(vha, fcport, NULL);
1526 }
1527 }
1528
qla24xx_fcport_handle_login(struct scsi_qla_host * vha,fc_port_t * fcport)1529 int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
1530 {
1531 u16 data[2];
1532 u64 wwn;
1533 u16 sec;
1534
1535 ql_dbg(ql_dbg_disc, vha, 0x20d8,
1536 "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d lid %d scan %d\n",
1537 __func__, fcport->port_name, fcport->disc_state,
1538 fcport->fw_login_state, fcport->login_pause, fcport->flags,
1539 fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen,
1540 fcport->login_gen, fcport->loop_id, fcport->scan_state);
1541
1542 if (fcport->scan_state != QLA_FCPORT_FOUND)
1543 return 0;
1544
1545 if ((fcport->loop_id != FC_NO_LOOP_ID) &&
1546 qla_dual_mode_enabled(vha) &&
1547 ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
1548 (fcport->fw_login_state == DSC_LS_PRLI_PEND)))
1549 return 0;
1550
1551 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP &&
1552 !N2N_TOPO(vha->hw)) {
1553 if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) {
1554 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1555 return 0;
1556 }
1557 }
1558
1559 /* Target won't initiate port login if fabric is present */
1560 if (vha->host->active_mode == MODE_TARGET && !N2N_TOPO(vha->hw))
1561 return 0;
1562
1563 if (fcport->flags & FCF_ASYNC_SENT) {
1564 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1565 return 0;
1566 }
1567
1568 switch (fcport->disc_state) {
1569 case DSC_DELETED:
1570 wwn = wwn_to_u64(fcport->node_name);
1571 switch (vha->hw->current_topology) {
1572 case ISP_CFG_N:
1573 if (fcport_is_smaller(fcport)) {
1574 /* this adapter is bigger */
1575 if (fcport->login_retry) {
1576 if (fcport->loop_id == FC_NO_LOOP_ID) {
1577 qla2x00_find_new_loop_id(vha,
1578 fcport);
1579 fcport->fw_login_state =
1580 DSC_LS_PORT_UNAVAIL;
1581 }
1582 fcport->login_retry--;
1583 qla_post_els_plogi_work(vha, fcport);
1584 } else {
1585 ql_log(ql_log_info, vha, 0x705d,
1586 "Unable to reach remote port %8phC",
1587 fcport->port_name);
1588 }
1589 } else {
1590 qla24xx_post_gnl_work(vha, fcport);
1591 }
1592 break;
1593 default:
1594 if (wwn == 0) {
1595 ql_dbg(ql_dbg_disc, vha, 0xffff,
1596 "%s %d %8phC post GNNID\n",
1597 __func__, __LINE__, fcport->port_name);
1598 qla24xx_post_gnnid_work(vha, fcport);
1599 } else if (fcport->loop_id == FC_NO_LOOP_ID) {
1600 ql_dbg(ql_dbg_disc, vha, 0x20bd,
1601 "%s %d %8phC post gnl\n",
1602 __func__, __LINE__, fcport->port_name);
1603 qla24xx_post_gnl_work(vha, fcport);
1604 } else {
1605 qla_chk_n2n_b4_login(vha, fcport);
1606 }
1607 break;
1608 }
1609 break;
1610
1611 case DSC_GNL:
1612 switch (vha->hw->current_topology) {
1613 case ISP_CFG_N:
1614 if ((fcport->current_login_state & 0xf) == 0x6) {
1615 ql_dbg(ql_dbg_disc, vha, 0x2118,
1616 "%s %d %8phC post GPDB work\n",
1617 __func__, __LINE__, fcport->port_name);
1618 fcport->chip_reset =
1619 vha->hw->base_qpair->chip_reset;
1620 qla24xx_post_gpdb_work(vha, fcport, 0);
1621 } else {
1622 ql_dbg(ql_dbg_disc, vha, 0x2118,
1623 "%s %d %8phC post %s PRLI\n",
1624 __func__, __LINE__, fcport->port_name,
1625 NVME_TARGET(vha->hw, fcport) ? "NVME" :
1626 "FC");
1627 qla24xx_post_prli_work(vha, fcport);
1628 }
1629 break;
1630 default:
1631 if (fcport->login_pause) {
1632 ql_dbg(ql_dbg_disc, vha, 0x20d8,
1633 "%s %d %8phC exit\n",
1634 __func__, __LINE__,
1635 fcport->port_name);
1636 fcport->last_rscn_gen = fcport->rscn_gen;
1637 fcport->last_login_gen = fcport->login_gen;
1638 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1639 break;
1640 }
1641 qla_chk_n2n_b4_login(vha, fcport);
1642 break;
1643 }
1644 break;
1645
1646 case DSC_LOGIN_FAILED:
1647 if (N2N_TOPO(vha->hw))
1648 qla_chk_n2n_b4_login(vha, fcport);
1649 else
1650 qlt_schedule_sess_for_deletion(fcport);
1651 break;
1652
1653 case DSC_LOGIN_COMPLETE:
1654 /* recheck login state */
1655 data[0] = data[1] = 0;
1656 qla2x00_post_async_adisc_work(vha, fcport, data);
1657 break;
1658
1659 case DSC_LOGIN_PEND:
1660 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP)
1661 qla24xx_post_prli_work(vha, fcport);
1662 break;
1663
1664 case DSC_UPD_FCPORT:
1665 sec = jiffies_to_msecs(jiffies -
1666 fcport->jiffies_at_registration)/1000;
1667 if (fcport->sec_since_registration < sec && sec &&
1668 !(sec % 60)) {
1669 fcport->sec_since_registration = sec;
1670 ql_dbg(ql_dbg_disc, fcport->vha, 0xffff,
1671 "%s %8phC - Slow Rport registration(%d Sec)\n",
1672 __func__, fcport->port_name, sec);
1673 }
1674
1675 if (fcport->next_disc_state != DSC_DELETE_PEND)
1676 fcport->next_disc_state = DSC_ADISC;
1677 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1678 break;
1679
1680 default:
1681 break;
1682 }
1683
1684 return 0;
1685 }
1686
qla24xx_post_newsess_work(struct scsi_qla_host * vha,port_id_t * id,u8 * port_name,u8 * node_name,void * pla,u8 fc4_type)1687 int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
1688 u8 *port_name, u8 *node_name, void *pla, u8 fc4_type)
1689 {
1690 struct qla_work_evt *e;
1691
1692 e = qla2x00_alloc_work(vha, QLA_EVT_NEW_SESS);
1693 if (!e)
1694 return QLA_FUNCTION_FAILED;
1695
1696 e->u.new_sess.id = *id;
1697 e->u.new_sess.pla = pla;
1698 e->u.new_sess.fc4_type = fc4_type;
1699 memcpy(e->u.new_sess.port_name, port_name, WWN_SIZE);
1700 if (node_name)
1701 memcpy(e->u.new_sess.node_name, node_name, WWN_SIZE);
1702
1703 return qla2x00_post_work(vha, e);
1704 }
1705
qla2x00_handle_rscn(scsi_qla_host_t * vha,struct event_arg * ea)1706 void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
1707 {
1708 fc_port_t *fcport;
1709 unsigned long flags;
1710
1711 fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
1712 if (fcport) {
1713 fcport->scan_needed = 1;
1714 fcport->rscn_gen++;
1715 }
1716
1717 spin_lock_irqsave(&vha->work_lock, flags);
1718 if (vha->scan.scan_flags == 0) {
1719 ql_dbg(ql_dbg_disc, vha, 0xffff, "%s: schedule\n", __func__);
1720 vha->scan.scan_flags |= SF_QUEUED;
1721 schedule_delayed_work(&vha->scan.scan_work, 5);
1722 }
1723 spin_unlock_irqrestore(&vha->work_lock, flags);
1724 }
1725
qla24xx_handle_relogin_event(scsi_qla_host_t * vha,struct event_arg * ea)1726 void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
1727 struct event_arg *ea)
1728 {
1729 fc_port_t *fcport = ea->fcport;
1730
1731 if (test_bit(UNLOADING, &vha->dpc_flags))
1732 return;
1733
1734 ql_dbg(ql_dbg_disc, vha, 0x2102,
1735 "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n",
1736 __func__, fcport->port_name, fcport->disc_state,
1737 fcport->fw_login_state, fcport->login_pause,
1738 fcport->deleted, fcport->conflict,
1739 fcport->last_rscn_gen, fcport->rscn_gen,
1740 fcport->last_login_gen, fcport->login_gen,
1741 fcport->flags);
1742
1743 if (fcport->last_rscn_gen != fcport->rscn_gen) {
1744 ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gnl\n",
1745 __func__, __LINE__, fcport->port_name);
1746 qla24xx_post_gnl_work(vha, fcport);
1747 return;
1748 }
1749
1750 qla24xx_fcport_handle_login(vha, fcport);
1751 }
1752
qla_handle_els_plogi_done(scsi_qla_host_t * vha,struct event_arg * ea)1753 void qla_handle_els_plogi_done(scsi_qla_host_t *vha,
1754 struct event_arg *ea)
1755 {
1756 /* for pure Target Mode, PRLI will not be initiated */
1757 if (vha->host->active_mode == MODE_TARGET)
1758 return;
1759
1760 ql_dbg(ql_dbg_disc, vha, 0x2118,
1761 "%s %d %8phC post PRLI\n",
1762 __func__, __LINE__, ea->fcport->port_name);
1763 qla24xx_post_prli_work(vha, ea->fcport);
1764 }
1765
1766 /*
1767 * RSCN(s) came in for this fcport, but the RSCN(s) was not able
1768 * to be consumed by the fcport
1769 */
qla_rscn_replay(fc_port_t * fcport)1770 void qla_rscn_replay(fc_port_t *fcport)
1771 {
1772 struct event_arg ea;
1773
1774 switch (fcport->disc_state) {
1775 case DSC_DELETE_PEND:
1776 return;
1777 default:
1778 break;
1779 }
1780
1781 if (fcport->scan_needed) {
1782 memset(&ea, 0, sizeof(ea));
1783 ea.id = fcport->d_id;
1784 ea.id.b.rsvd_1 = RSCN_PORT_ADDR;
1785 qla2x00_handle_rscn(fcport->vha, &ea);
1786 }
1787 }
1788
1789 static void
qla2x00_tmf_iocb_timeout(void * data)1790 qla2x00_tmf_iocb_timeout(void *data)
1791 {
1792 srb_t *sp = data;
1793 struct srb_iocb *tmf = &sp->u.iocb_cmd;
1794 int rc, h;
1795 unsigned long flags;
1796
1797 rc = qla24xx_async_abort_cmd(sp, false);
1798 if (rc) {
1799 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
1800 for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
1801 if (sp->qpair->req->outstanding_cmds[h] == sp) {
1802 sp->qpair->req->outstanding_cmds[h] = NULL;
1803 break;
1804 }
1805 }
1806 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
1807 tmf->u.tmf.comp_status = cpu_to_le16(CS_TIMEOUT);
1808 tmf->u.tmf.data = QLA_FUNCTION_FAILED;
1809 complete(&tmf->u.tmf.comp);
1810 }
1811 }
1812
qla2x00_tmf_sp_done(srb_t * sp,int res)1813 static void qla2x00_tmf_sp_done(srb_t *sp, int res)
1814 {
1815 struct srb_iocb *tmf = &sp->u.iocb_cmd;
1816
1817 complete(&tmf->u.tmf.comp);
1818 }
1819
1820 int
qla2x00_async_tm_cmd(fc_port_t * fcport,uint32_t flags,uint32_t lun,uint32_t tag)1821 qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
1822 uint32_t tag)
1823 {
1824 struct scsi_qla_host *vha = fcport->vha;
1825 struct srb_iocb *tm_iocb;
1826 srb_t *sp;
1827 int rval = QLA_FUNCTION_FAILED;
1828
1829 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1830 if (!sp)
1831 goto done;
1832
1833 tm_iocb = &sp->u.iocb_cmd;
1834 sp->type = SRB_TM_CMD;
1835 sp->name = "tmf";
1836
1837 tm_iocb->timeout = qla2x00_tmf_iocb_timeout;
1838 init_completion(&tm_iocb->u.tmf.comp);
1839 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
1840
1841 tm_iocb->u.tmf.flags = flags;
1842 tm_iocb->u.tmf.lun = lun;
1843 tm_iocb->u.tmf.data = tag;
1844 sp->done = qla2x00_tmf_sp_done;
1845
1846 ql_dbg(ql_dbg_taskm, vha, 0x802f,
1847 "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
1848 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
1849 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1850
1851 rval = qla2x00_start_sp(sp);
1852 if (rval != QLA_SUCCESS)
1853 goto done_free_sp;
1854 wait_for_completion(&tm_iocb->u.tmf.comp);
1855
1856 rval = tm_iocb->u.tmf.data;
1857
1858 if (rval != QLA_SUCCESS) {
1859 ql_log(ql_log_warn, vha, 0x8030,
1860 "TM IOCB failed (%x).\n", rval);
1861 }
1862
1863 if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) {
1864 flags = tm_iocb->u.tmf.flags;
1865 lun = (uint16_t)tm_iocb->u.tmf.lun;
1866
1867 /* Issue Marker IOCB */
1868 qla2x00_marker(vha, vha->hw->base_qpair,
1869 fcport->loop_id, lun,
1870 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
1871 }
1872
1873 done_free_sp:
1874 sp->free(sp);
1875 fcport->flags &= ~FCF_ASYNC_SENT;
1876 done:
1877 return rval;
1878 }
1879
1880 int
qla24xx_async_abort_command(srb_t * sp)1881 qla24xx_async_abort_command(srb_t *sp)
1882 {
1883 unsigned long flags = 0;
1884
1885 uint32_t handle;
1886 fc_port_t *fcport = sp->fcport;
1887 struct qla_qpair *qpair = sp->qpair;
1888 struct scsi_qla_host *vha = fcport->vha;
1889 struct req_que *req = qpair->req;
1890
1891 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
1892 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
1893 if (req->outstanding_cmds[handle] == sp)
1894 break;
1895 }
1896 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
1897
1898 if (handle == req->num_outstanding_cmds) {
1899 /* Command not found. */
1900 return QLA_FUNCTION_FAILED;
1901 }
1902 if (sp->type == SRB_FXIOCB_DCMD)
1903 return qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
1904 FXDISC_ABORT_IOCTL);
1905
1906 return qla24xx_async_abort_cmd(sp, true);
1907 }
1908
1909 static void
qla24xx_handle_prli_done_event(struct scsi_qla_host * vha,struct event_arg * ea)1910 qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
1911 {
1912 WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n",
1913 ea->data[0]);
1914
1915 switch (ea->data[0]) {
1916 case MBS_COMMAND_COMPLETE:
1917 ql_dbg(ql_dbg_disc, vha, 0x2118,
1918 "%s %d %8phC post gpdb\n",
1919 __func__, __LINE__, ea->fcport->port_name);
1920
1921 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
1922 ea->fcport->logout_on_delete = 1;
1923 ea->fcport->nvme_prli_service_param = ea->iop[0];
1924 if (ea->iop[0] & NVME_PRLI_SP_FIRST_BURST)
1925 ea->fcport->nvme_first_burst_size =
1926 (ea->iop[1] & 0xffff) * 512;
1927 else
1928 ea->fcport->nvme_first_burst_size = 0;
1929 qla24xx_post_gpdb_work(vha, ea->fcport, 0);
1930 break;
1931 default:
1932 if ((ea->iop[0] == LSC_SCODE_ELS_REJECT) &&
1933 (ea->iop[1] == 0x50000)) { /* reson 5=busy expl:0x0 */
1934 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1935 ea->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
1936 break;
1937 }
1938
1939 ql_dbg(ql_dbg_disc, vha, 0x2118,
1940 "%s %d %8phC priority %s, fc4type %x\n",
1941 __func__, __LINE__, ea->fcport->port_name,
1942 vha->hw->fc4_type_priority == FC4_PRIORITY_FCP ?
1943 "FCP" : "NVMe", ea->fcport->fc4_type);
1944
1945 if (N2N_TOPO(vha->hw)) {
1946 if (vha->hw->fc4_type_priority == FC4_PRIORITY_NVME) {
1947 ea->fcport->fc4_type &= ~FS_FC4TYPE_NVME;
1948 ea->fcport->fc4_type |= FS_FC4TYPE_FCP;
1949 } else {
1950 ea->fcport->fc4_type &= ~FS_FC4TYPE_FCP;
1951 ea->fcport->fc4_type |= FS_FC4TYPE_NVME;
1952 }
1953
1954 if (ea->fcport->n2n_link_reset_cnt < 3) {
1955 ea->fcport->n2n_link_reset_cnt++;
1956 vha->relogin_jif = jiffies + 2 * HZ;
1957 /*
1958 * PRLI failed. Reset link to kick start
1959 * state machine
1960 */
1961 set_bit(N2N_LINK_RESET, &vha->dpc_flags);
1962 } else {
1963 ql_log(ql_log_warn, vha, 0x2119,
1964 "%s %d %8phC Unable to reconnect\n",
1965 __func__, __LINE__,
1966 ea->fcport->port_name);
1967 }
1968 } else {
1969 /*
1970 * switch connect. login failed. Take connection down
1971 * and allow relogin to retrigger
1972 */
1973 if (NVME_FCP_TARGET(ea->fcport)) {
1974 ql_dbg(ql_dbg_disc, vha, 0x2118,
1975 "%s %d %8phC post %s prli\n",
1976 __func__, __LINE__,
1977 ea->fcport->port_name,
1978 (ea->fcport->fc4_type & FS_FC4TYPE_NVME)
1979 ? "NVMe" : "FCP");
1980 if (vha->hw->fc4_type_priority == FC4_PRIORITY_NVME)
1981 ea->fcport->fc4_type &= ~FS_FC4TYPE_NVME;
1982 else
1983 ea->fcport->fc4_type &= ~FS_FC4TYPE_FCP;
1984 }
1985
1986 ea->fcport->flags &= ~FCF_ASYNC_SENT;
1987 ea->fcport->keep_nport_handle = 0;
1988 ea->fcport->logout_on_delete = 1;
1989 qlt_schedule_sess_for_deletion(ea->fcport);
1990 }
1991 break;
1992 }
1993 }
1994
1995 void
qla24xx_handle_plogi_done_event(struct scsi_qla_host * vha,struct event_arg * ea)1996 qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
1997 {
1998 port_id_t cid; /* conflict Nport id */
1999 u16 lid;
2000 struct fc_port *conflict_fcport;
2001 unsigned long flags;
2002 struct fc_port *fcport = ea->fcport;
2003
2004 ql_dbg(ql_dbg_disc, vha, 0xffff,
2005 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d data %x|%x iop %x|%x\n",
2006 __func__, fcport->port_name, fcport->disc_state,
2007 fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen,
2008 ea->sp->gen1, fcport->rscn_gen,
2009 ea->data[0], ea->data[1], ea->iop[0], ea->iop[1]);
2010
2011 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
2012 (fcport->fw_login_state == DSC_LS_PRLI_PEND)) {
2013 ql_dbg(ql_dbg_disc, vha, 0x20ea,
2014 "%s %d %8phC Remote is trying to login\n",
2015 __func__, __LINE__, fcport->port_name);
2016 return;
2017 }
2018
2019 if ((fcport->disc_state == DSC_DELETE_PEND) ||
2020 (fcport->disc_state == DSC_DELETED)) {
2021 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2022 return;
2023 }
2024
2025 if (ea->sp->gen2 != fcport->login_gen) {
2026 /* target side must have changed it. */
2027 ql_dbg(ql_dbg_disc, vha, 0x20d3,
2028 "%s %8phC generation changed\n",
2029 __func__, fcport->port_name);
2030 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2031 return;
2032 } else if (ea->sp->gen1 != fcport->rscn_gen) {
2033 ql_dbg(ql_dbg_disc, vha, 0x20d3,
2034 "%s %8phC RSCN generation changed\n",
2035 __func__, fcport->port_name);
2036 qla_rscn_replay(fcport);
2037 qlt_schedule_sess_for_deletion(fcport);
2038 return;
2039 }
2040
2041 WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n",
2042 ea->data[0]);
2043
2044 switch (ea->data[0]) {
2045 case MBS_COMMAND_COMPLETE:
2046 /*
2047 * Driver must validate login state - If PRLI not complete,
2048 * force a relogin attempt via implicit LOGO, PLOGI, and PRLI
2049 * requests.
2050 */
2051 if (NVME_TARGET(vha->hw, ea->fcport)) {
2052 ql_dbg(ql_dbg_disc, vha, 0x2117,
2053 "%s %d %8phC post prli\n",
2054 __func__, __LINE__, ea->fcport->port_name);
2055 qla24xx_post_prli_work(vha, ea->fcport);
2056 } else {
2057 ql_dbg(ql_dbg_disc, vha, 0x20ea,
2058 "%s %d %8phC LoopID 0x%x in use with %06x. post gpdb\n",
2059 __func__, __LINE__, ea->fcport->port_name,
2060 ea->fcport->loop_id, ea->fcport->d_id.b24);
2061
2062 set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
2063 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
2064 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
2065 ea->fcport->logout_on_delete = 1;
2066 ea->fcport->send_els_logo = 0;
2067 ea->fcport->fw_login_state = DSC_LS_PRLI_COMP;
2068 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
2069
2070 qla24xx_post_gpdb_work(vha, ea->fcport, 0);
2071 }
2072 break;
2073 case MBS_COMMAND_ERROR:
2074 ql_dbg(ql_dbg_disc, vha, 0x20eb, "%s %d %8phC cmd error %x\n",
2075 __func__, __LINE__, ea->fcport->port_name, ea->data[1]);
2076
2077 ea->fcport->flags &= ~FCF_ASYNC_SENT;
2078 qla2x00_set_fcport_disc_state(ea->fcport, DSC_LOGIN_FAILED);
2079 if (ea->data[1] & QLA_LOGIO_LOGIN_RETRIED)
2080 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2081 else
2082 qla2x00_mark_device_lost(vha, ea->fcport, 1);
2083 break;
2084 case MBS_LOOP_ID_USED:
2085 /* data[1] = IO PARAM 1 = nport ID */
2086 cid.b.domain = (ea->iop[1] >> 16) & 0xff;
2087 cid.b.area = (ea->iop[1] >> 8) & 0xff;
2088 cid.b.al_pa = ea->iop[1] & 0xff;
2089 cid.b.rsvd_1 = 0;
2090
2091 ql_dbg(ql_dbg_disc, vha, 0x20ec,
2092 "%s %d %8phC lid %#x in use with pid %06x post gnl\n",
2093 __func__, __LINE__, ea->fcport->port_name,
2094 ea->fcport->loop_id, cid.b24);
2095
2096 set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
2097 ea->fcport->loop_id = FC_NO_LOOP_ID;
2098 qla24xx_post_gnl_work(vha, ea->fcport);
2099 break;
2100 case MBS_PORT_ID_USED:
2101 lid = ea->iop[1] & 0xffff;
2102 qlt_find_sess_invalidate_other(vha,
2103 wwn_to_u64(ea->fcport->port_name),
2104 ea->fcport->d_id, lid, &conflict_fcport);
2105
2106 if (conflict_fcport) {
2107 /*
2108 * Another fcport share the same loop_id/nport id.
2109 * Conflict fcport needs to finish cleanup before this
2110 * fcport can proceed to login.
2111 */
2112 conflict_fcport->conflict = ea->fcport;
2113 ea->fcport->login_pause = 1;
2114
2115 ql_dbg(ql_dbg_disc, vha, 0x20ed,
2116 "%s %d %8phC NPortId %06x inuse with loopid 0x%x. post gidpn\n",
2117 __func__, __LINE__, ea->fcport->port_name,
2118 ea->fcport->d_id.b24, lid);
2119 } else {
2120 ql_dbg(ql_dbg_disc, vha, 0x20ed,
2121 "%s %d %8phC NPortId %06x inuse with loopid 0x%x. sched delete\n",
2122 __func__, __LINE__, ea->fcport->port_name,
2123 ea->fcport->d_id.b24, lid);
2124
2125 qla2x00_clear_loop_id(ea->fcport);
2126 set_bit(lid, vha->hw->loop_id_map);
2127 ea->fcport->loop_id = lid;
2128 ea->fcport->keep_nport_handle = 0;
2129 ea->fcport->logout_on_delete = 1;
2130 qlt_schedule_sess_for_deletion(ea->fcport);
2131 }
2132 break;
2133 }
2134 return;
2135 }
2136
2137 /****************************************************************************/
2138 /* QLogic ISP2x00 Hardware Support Functions. */
2139 /****************************************************************************/
2140
2141 static int
qla83xx_nic_core_fw_load(scsi_qla_host_t * vha)2142 qla83xx_nic_core_fw_load(scsi_qla_host_t *vha)
2143 {
2144 int rval = QLA_SUCCESS;
2145 struct qla_hw_data *ha = vha->hw;
2146 uint32_t idc_major_ver, idc_minor_ver;
2147 uint16_t config[4];
2148
2149 qla83xx_idc_lock(vha, 0);
2150
2151 /* SV: TODO: Assign initialization timeout from
2152 * flash-info / other param
2153 */
2154 ha->fcoe_dev_init_timeout = QLA83XX_IDC_INITIALIZATION_TIMEOUT;
2155 ha->fcoe_reset_timeout = QLA83XX_IDC_RESET_ACK_TIMEOUT;
2156
2157 /* Set our fcoe function presence */
2158 if (__qla83xx_set_drv_presence(vha) != QLA_SUCCESS) {
2159 ql_dbg(ql_dbg_p3p, vha, 0xb077,
2160 "Error while setting DRV-Presence.\n");
2161 rval = QLA_FUNCTION_FAILED;
2162 goto exit;
2163 }
2164
2165 /* Decide the reset ownership */
2166 qla83xx_reset_ownership(vha);
2167
2168 /*
2169 * On first protocol driver load:
2170 * Init-Owner: Set IDC-Major-Version and Clear IDC-Lock-Recovery
2171 * register.
2172 * Others: Check compatibility with current IDC Major version.
2173 */
2174 qla83xx_rd_reg(vha, QLA83XX_IDC_MAJOR_VERSION, &idc_major_ver);
2175 if (ha->flags.nic_core_reset_owner) {
2176 /* Set IDC Major version */
2177 idc_major_ver = QLA83XX_SUPP_IDC_MAJOR_VERSION;
2178 qla83xx_wr_reg(vha, QLA83XX_IDC_MAJOR_VERSION, idc_major_ver);
2179
2180 /* Clearing IDC-Lock-Recovery register */
2181 qla83xx_wr_reg(vha, QLA83XX_IDC_LOCK_RECOVERY, 0);
2182 } else if (idc_major_ver != QLA83XX_SUPP_IDC_MAJOR_VERSION) {
2183 /*
2184 * Clear further IDC participation if we are not compatible with
2185 * the current IDC Major Version.
2186 */
2187 ql_log(ql_log_warn, vha, 0xb07d,
2188 "Failing load, idc_major_ver=%d, expected_major_ver=%d.\n",
2189 idc_major_ver, QLA83XX_SUPP_IDC_MAJOR_VERSION);
2190 __qla83xx_clear_drv_presence(vha);
2191 rval = QLA_FUNCTION_FAILED;
2192 goto exit;
2193 }
2194 /* Each function sets its supported Minor version. */
2195 qla83xx_rd_reg(vha, QLA83XX_IDC_MINOR_VERSION, &idc_minor_ver);
2196 idc_minor_ver |= (QLA83XX_SUPP_IDC_MINOR_VERSION << (ha->portnum * 2));
2197 qla83xx_wr_reg(vha, QLA83XX_IDC_MINOR_VERSION, idc_minor_ver);
2198
2199 if (ha->flags.nic_core_reset_owner) {
2200 memset(config, 0, sizeof(config));
2201 if (!qla81xx_get_port_config(vha, config))
2202 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
2203 QLA8XXX_DEV_READY);
2204 }
2205
2206 rval = qla83xx_idc_state_handler(vha);
2207
2208 exit:
2209 qla83xx_idc_unlock(vha, 0);
2210
2211 return rval;
2212 }
2213
2214 /*
2215 * qla2x00_initialize_adapter
2216 * Initialize board.
2217 *
2218 * Input:
2219 * ha = adapter block pointer.
2220 *
2221 * Returns:
2222 * 0 = success
2223 */
2224 int
qla2x00_initialize_adapter(scsi_qla_host_t * vha)2225 qla2x00_initialize_adapter(scsi_qla_host_t *vha)
2226 {
2227 int rval;
2228 struct qla_hw_data *ha = vha->hw;
2229 struct req_que *req = ha->req_q_map[0];
2230 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2231
2232 memset(&vha->qla_stats, 0, sizeof(vha->qla_stats));
2233 memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
2234
2235 /* Clear adapter flags. */
2236 vha->flags.online = 0;
2237 ha->flags.chip_reset_done = 0;
2238 vha->flags.reset_active = 0;
2239 ha->flags.pci_channel_io_perm_failure = 0;
2240 ha->flags.eeh_busy = 0;
2241 vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
2242 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
2243 atomic_set(&vha->loop_state, LOOP_DOWN);
2244 vha->device_flags = DFLG_NO_CABLE;
2245 vha->dpc_flags = 0;
2246 vha->flags.management_server_logged_in = 0;
2247 vha->marker_needed = 0;
2248 ha->isp_abort_cnt = 0;
2249 ha->beacon_blink_led = 0;
2250
2251 set_bit(0, ha->req_qid_map);
2252 set_bit(0, ha->rsp_qid_map);
2253
2254 ql_dbg(ql_dbg_init, vha, 0x0040,
2255 "Configuring PCI space...\n");
2256 rval = ha->isp_ops->pci_config(vha);
2257 if (rval) {
2258 ql_log(ql_log_warn, vha, 0x0044,
2259 "Unable to configure PCI space.\n");
2260 return (rval);
2261 }
2262
2263 ha->isp_ops->reset_chip(vha);
2264
2265 /* Check for secure flash support */
2266 if (IS_QLA28XX(ha)) {
2267 if (rd_reg_word(®->mailbox12) & BIT_0)
2268 ha->flags.secure_adapter = 1;
2269 ql_log(ql_log_info, vha, 0xffff, "Secure Adapter: %s\n",
2270 (ha->flags.secure_adapter) ? "Yes" : "No");
2271 }
2272
2273
2274 rval = qla2xxx_get_flash_info(vha);
2275 if (rval) {
2276 ql_log(ql_log_fatal, vha, 0x004f,
2277 "Unable to validate FLASH data.\n");
2278 return rval;
2279 }
2280
2281 if (IS_QLA8044(ha)) {
2282 qla8044_read_reset_template(vha);
2283
2284 /* NOTE: If ql2xdontresethba==1, set IDC_CTRL DONTRESET_BIT0.
2285 * If DONRESET_BIT0 is set, drivers should not set dev_state
2286 * to NEED_RESET. But if NEED_RESET is set, drivers should
2287 * should honor the reset. */
2288 if (ql2xdontresethba == 1)
2289 qla8044_set_idc_dontreset(vha);
2290 }
2291
2292 ha->isp_ops->get_flash_version(vha, req->ring);
2293 ql_dbg(ql_dbg_init, vha, 0x0061,
2294 "Configure NVRAM parameters...\n");
2295
2296 /* Let priority default to FCP, can be overridden by nvram_config */
2297 ha->fc4_type_priority = FC4_PRIORITY_FCP;
2298
2299 ha->isp_ops->nvram_config(vha);
2300
2301 if (ha->fc4_type_priority != FC4_PRIORITY_FCP &&
2302 ha->fc4_type_priority != FC4_PRIORITY_NVME)
2303 ha->fc4_type_priority = FC4_PRIORITY_FCP;
2304
2305 ql_log(ql_log_info, vha, 0xffff, "FC4 priority set to %s\n",
2306 ha->fc4_type_priority == FC4_PRIORITY_FCP ? "FCP" : "NVMe");
2307
2308 if (ha->flags.disable_serdes) {
2309 /* Mask HBA via NVRAM settings? */
2310 ql_log(ql_log_info, vha, 0x0077,
2311 "Masking HBA WWPN %8phN (via NVRAM).\n", vha->port_name);
2312 return QLA_FUNCTION_FAILED;
2313 }
2314
2315 ql_dbg(ql_dbg_init, vha, 0x0078,
2316 "Verifying loaded RISC code...\n");
2317
2318 /* If smartsan enabled then require fdmi and rdp enabled */
2319 if (ql2xsmartsan) {
2320 ql2xfdmienable = 1;
2321 ql2xrdpenable = 1;
2322 }
2323
2324 if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
2325 rval = ha->isp_ops->chip_diag(vha);
2326 if (rval)
2327 return (rval);
2328 rval = qla2x00_setup_chip(vha);
2329 if (rval)
2330 return (rval);
2331 }
2332
2333 if (IS_QLA84XX(ha)) {
2334 ha->cs84xx = qla84xx_get_chip(vha);
2335 if (!ha->cs84xx) {
2336 ql_log(ql_log_warn, vha, 0x00d0,
2337 "Unable to configure ISP84XX.\n");
2338 return QLA_FUNCTION_FAILED;
2339 }
2340 }
2341
2342 if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha))
2343 rval = qla2x00_init_rings(vha);
2344
2345 /* No point in continuing if firmware initialization failed. */
2346 if (rval != QLA_SUCCESS)
2347 return rval;
2348
2349 ha->flags.chip_reset_done = 1;
2350
2351 if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
2352 /* Issue verify 84xx FW IOCB to complete 84xx initialization */
2353 rval = qla84xx_init_chip(vha);
2354 if (rval != QLA_SUCCESS) {
2355 ql_log(ql_log_warn, vha, 0x00d4,
2356 "Unable to initialize ISP84XX.\n");
2357 qla84xx_put_chip(vha);
2358 }
2359 }
2360
2361 /* Load the NIC Core f/w if we are the first protocol driver. */
2362 if (IS_QLA8031(ha)) {
2363 rval = qla83xx_nic_core_fw_load(vha);
2364 if (rval)
2365 ql_log(ql_log_warn, vha, 0x0124,
2366 "Error in initializing NIC Core f/w.\n");
2367 }
2368
2369 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
2370 qla24xx_read_fcp_prio_cfg(vha);
2371
2372 if (IS_P3P_TYPE(ha))
2373 qla82xx_set_driver_version(vha, QLA2XXX_VERSION);
2374 else
2375 qla25xx_set_driver_version(vha, QLA2XXX_VERSION);
2376
2377 return (rval);
2378 }
2379
2380 /**
2381 * qla2100_pci_config() - Setup ISP21xx PCI configuration registers.
2382 * @vha: HA context
2383 *
2384 * Returns 0 on success.
2385 */
2386 int
qla2100_pci_config(scsi_qla_host_t * vha)2387 qla2100_pci_config(scsi_qla_host_t *vha)
2388 {
2389 uint16_t w;
2390 unsigned long flags;
2391 struct qla_hw_data *ha = vha->hw;
2392 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2393
2394 pci_set_master(ha->pdev);
2395 pci_try_set_mwi(ha->pdev);
2396
2397 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
2398 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
2399 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2400
2401 pci_disable_rom(ha->pdev);
2402
2403 /* Get PCI bus information. */
2404 spin_lock_irqsave(&ha->hardware_lock, flags);
2405 ha->pci_attr = rd_reg_word(®->ctrl_status);
2406 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2407
2408 return QLA_SUCCESS;
2409 }
2410
2411 /**
2412 * qla2300_pci_config() - Setup ISP23xx PCI configuration registers.
2413 * @vha: HA context
2414 *
2415 * Returns 0 on success.
2416 */
2417 int
qla2300_pci_config(scsi_qla_host_t * vha)2418 qla2300_pci_config(scsi_qla_host_t *vha)
2419 {
2420 uint16_t w;
2421 unsigned long flags = 0;
2422 uint32_t cnt;
2423 struct qla_hw_data *ha = vha->hw;
2424 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2425
2426 pci_set_master(ha->pdev);
2427 pci_try_set_mwi(ha->pdev);
2428
2429 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
2430 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
2431
2432 if (IS_QLA2322(ha) || IS_QLA6322(ha))
2433 w &= ~PCI_COMMAND_INTX_DISABLE;
2434 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2435
2436 /*
2437 * If this is a 2300 card and not 2312, reset the
2438 * COMMAND_INVALIDATE due to a bug in the 2300. Unfortunately,
2439 * the 2310 also reports itself as a 2300 so we need to get the
2440 * fb revision level -- a 6 indicates it really is a 2300 and
2441 * not a 2310.
2442 */
2443 if (IS_QLA2300(ha)) {
2444 spin_lock_irqsave(&ha->hardware_lock, flags);
2445
2446 /* Pause RISC. */
2447 wrt_reg_word(®->hccr, HCCR_PAUSE_RISC);
2448 for (cnt = 0; cnt < 30000; cnt++) {
2449 if ((rd_reg_word(®->hccr) & HCCR_RISC_PAUSE) != 0)
2450 break;
2451
2452 udelay(10);
2453 }
2454
2455 /* Select FPM registers. */
2456 wrt_reg_word(®->ctrl_status, 0x20);
2457 rd_reg_word(®->ctrl_status);
2458
2459 /* Get the fb rev level */
2460 ha->fb_rev = RD_FB_CMD_REG(ha, reg);
2461
2462 if (ha->fb_rev == FPM_2300)
2463 pci_clear_mwi(ha->pdev);
2464
2465 /* Deselect FPM registers. */
2466 wrt_reg_word(®->ctrl_status, 0x0);
2467 rd_reg_word(®->ctrl_status);
2468
2469 /* Release RISC module. */
2470 wrt_reg_word(®->hccr, HCCR_RELEASE_RISC);
2471 for (cnt = 0; cnt < 30000; cnt++) {
2472 if ((rd_reg_word(®->hccr) & HCCR_RISC_PAUSE) == 0)
2473 break;
2474
2475 udelay(10);
2476 }
2477
2478 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2479 }
2480
2481 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
2482
2483 pci_disable_rom(ha->pdev);
2484
2485 /* Get PCI bus information. */
2486 spin_lock_irqsave(&ha->hardware_lock, flags);
2487 ha->pci_attr = rd_reg_word(®->ctrl_status);
2488 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2489
2490 return QLA_SUCCESS;
2491 }
2492
2493 /**
2494 * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers.
2495 * @vha: HA context
2496 *
2497 * Returns 0 on success.
2498 */
2499 int
qla24xx_pci_config(scsi_qla_host_t * vha)2500 qla24xx_pci_config(scsi_qla_host_t *vha)
2501 {
2502 uint16_t w;
2503 unsigned long flags = 0;
2504 struct qla_hw_data *ha = vha->hw;
2505 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2506
2507 pci_set_master(ha->pdev);
2508 pci_try_set_mwi(ha->pdev);
2509
2510 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
2511 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
2512 w &= ~PCI_COMMAND_INTX_DISABLE;
2513 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2514
2515 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
2516
2517 /* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */
2518 if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX))
2519 pcix_set_mmrbc(ha->pdev, 2048);
2520
2521 /* PCIe -- adjust Maximum Read Request Size (2048). */
2522 if (pci_is_pcie(ha->pdev))
2523 pcie_set_readrq(ha->pdev, 4096);
2524
2525 pci_disable_rom(ha->pdev);
2526
2527 ha->chip_revision = ha->pdev->revision;
2528
2529 /* Get PCI bus information. */
2530 spin_lock_irqsave(&ha->hardware_lock, flags);
2531 ha->pci_attr = rd_reg_dword(®->ctrl_status);
2532 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2533
2534 return QLA_SUCCESS;
2535 }
2536
2537 /**
2538 * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers.
2539 * @vha: HA context
2540 *
2541 * Returns 0 on success.
2542 */
2543 int
qla25xx_pci_config(scsi_qla_host_t * vha)2544 qla25xx_pci_config(scsi_qla_host_t *vha)
2545 {
2546 uint16_t w;
2547 struct qla_hw_data *ha = vha->hw;
2548
2549 pci_set_master(ha->pdev);
2550 pci_try_set_mwi(ha->pdev);
2551
2552 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
2553 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
2554 w &= ~PCI_COMMAND_INTX_DISABLE;
2555 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2556
2557 /* PCIe -- adjust Maximum Read Request Size (2048). */
2558 if (pci_is_pcie(ha->pdev))
2559 pcie_set_readrq(ha->pdev, 4096);
2560
2561 pci_disable_rom(ha->pdev);
2562
2563 ha->chip_revision = ha->pdev->revision;
2564
2565 return QLA_SUCCESS;
2566 }
2567
2568 /**
2569 * qla2x00_isp_firmware() - Choose firmware image.
2570 * @vha: HA context
2571 *
2572 * Returns 0 on success.
2573 */
2574 static int
qla2x00_isp_firmware(scsi_qla_host_t * vha)2575 qla2x00_isp_firmware(scsi_qla_host_t *vha)
2576 {
2577 int rval;
2578 uint16_t loop_id, topo, sw_cap;
2579 uint8_t domain, area, al_pa;
2580 struct qla_hw_data *ha = vha->hw;
2581
2582 /* Assume loading risc code */
2583 rval = QLA_FUNCTION_FAILED;
2584
2585 if (ha->flags.disable_risc_code_load) {
2586 ql_log(ql_log_info, vha, 0x0079, "RISC CODE NOT loaded.\n");
2587
2588 /* Verify checksum of loaded RISC code. */
2589 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address);
2590 if (rval == QLA_SUCCESS) {
2591 /* And, verify we are not in ROM code. */
2592 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
2593 &area, &domain, &topo, &sw_cap);
2594 }
2595 }
2596
2597 if (rval)
2598 ql_dbg(ql_dbg_init, vha, 0x007a,
2599 "**** Load RISC code ****.\n");
2600
2601 return (rval);
2602 }
2603
2604 /**
2605 * qla2x00_reset_chip() - Reset ISP chip.
2606 * @vha: HA context
2607 *
2608 * Returns 0 on success.
2609 */
2610 int
qla2x00_reset_chip(scsi_qla_host_t * vha)2611 qla2x00_reset_chip(scsi_qla_host_t *vha)
2612 {
2613 unsigned long flags = 0;
2614 struct qla_hw_data *ha = vha->hw;
2615 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2616 uint32_t cnt;
2617 uint16_t cmd;
2618 int rval = QLA_FUNCTION_FAILED;
2619
2620 if (unlikely(pci_channel_offline(ha->pdev)))
2621 return rval;
2622
2623 ha->isp_ops->disable_intrs(ha);
2624
2625 spin_lock_irqsave(&ha->hardware_lock, flags);
2626
2627 /* Turn off master enable */
2628 cmd = 0;
2629 pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd);
2630 cmd &= ~PCI_COMMAND_MASTER;
2631 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
2632
2633 if (!IS_QLA2100(ha)) {
2634 /* Pause RISC. */
2635 wrt_reg_word(®->hccr, HCCR_PAUSE_RISC);
2636 if (IS_QLA2200(ha) || IS_QLA2300(ha)) {
2637 for (cnt = 0; cnt < 30000; cnt++) {
2638 if ((rd_reg_word(®->hccr) &
2639 HCCR_RISC_PAUSE) != 0)
2640 break;
2641 udelay(100);
2642 }
2643 } else {
2644 rd_reg_word(®->hccr); /* PCI Posting. */
2645 udelay(10);
2646 }
2647
2648 /* Select FPM registers. */
2649 wrt_reg_word(®->ctrl_status, 0x20);
2650 rd_reg_word(®->ctrl_status); /* PCI Posting. */
2651
2652 /* FPM Soft Reset. */
2653 wrt_reg_word(®->fpm_diag_config, 0x100);
2654 rd_reg_word(®->fpm_diag_config); /* PCI Posting. */
2655
2656 /* Toggle Fpm Reset. */
2657 if (!IS_QLA2200(ha)) {
2658 wrt_reg_word(®->fpm_diag_config, 0x0);
2659 rd_reg_word(®->fpm_diag_config); /* PCI Posting. */
2660 }
2661
2662 /* Select frame buffer registers. */
2663 wrt_reg_word(®->ctrl_status, 0x10);
2664 rd_reg_word(®->ctrl_status); /* PCI Posting. */
2665
2666 /* Reset frame buffer FIFOs. */
2667 if (IS_QLA2200(ha)) {
2668 WRT_FB_CMD_REG(ha, reg, 0xa000);
2669 RD_FB_CMD_REG(ha, reg); /* PCI Posting. */
2670 } else {
2671 WRT_FB_CMD_REG(ha, reg, 0x00fc);
2672
2673 /* Read back fb_cmd until zero or 3 seconds max */
2674 for (cnt = 0; cnt < 3000; cnt++) {
2675 if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0)
2676 break;
2677 udelay(100);
2678 }
2679 }
2680
2681 /* Select RISC module registers. */
2682 wrt_reg_word(®->ctrl_status, 0);
2683 rd_reg_word(®->ctrl_status); /* PCI Posting. */
2684
2685 /* Reset RISC processor. */
2686 wrt_reg_word(®->hccr, HCCR_RESET_RISC);
2687 rd_reg_word(®->hccr); /* PCI Posting. */
2688
2689 /* Release RISC processor. */
2690 wrt_reg_word(®->hccr, HCCR_RELEASE_RISC);
2691 rd_reg_word(®->hccr); /* PCI Posting. */
2692 }
2693
2694 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT);
2695 wrt_reg_word(®->hccr, HCCR_CLR_HOST_INT);
2696
2697 /* Reset ISP chip. */
2698 wrt_reg_word(®->ctrl_status, CSR_ISP_SOFT_RESET);
2699
2700 /* Wait for RISC to recover from reset. */
2701 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
2702 /*
2703 * It is necessary to for a delay here since the card doesn't
2704 * respond to PCI reads during a reset. On some architectures
2705 * this will result in an MCA.
2706 */
2707 udelay(20);
2708 for (cnt = 30000; cnt; cnt--) {
2709 if ((rd_reg_word(®->ctrl_status) &
2710 CSR_ISP_SOFT_RESET) == 0)
2711 break;
2712 udelay(100);
2713 }
2714 } else
2715 udelay(10);
2716
2717 /* Reset RISC processor. */
2718 wrt_reg_word(®->hccr, HCCR_RESET_RISC);
2719
2720 wrt_reg_word(®->semaphore, 0);
2721
2722 /* Release RISC processor. */
2723 wrt_reg_word(®->hccr, HCCR_RELEASE_RISC);
2724 rd_reg_word(®->hccr); /* PCI Posting. */
2725
2726 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
2727 for (cnt = 0; cnt < 30000; cnt++) {
2728 if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY)
2729 break;
2730
2731 udelay(100);
2732 }
2733 } else
2734 udelay(100);
2735
2736 /* Turn on master enable */
2737 cmd |= PCI_COMMAND_MASTER;
2738 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
2739
2740 /* Disable RISC pause on FPM parity error. */
2741 if (!IS_QLA2100(ha)) {
2742 wrt_reg_word(®->hccr, HCCR_DISABLE_PARITY_PAUSE);
2743 rd_reg_word(®->hccr); /* PCI Posting. */
2744 }
2745
2746 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2747
2748 return QLA_SUCCESS;
2749 }
2750
2751 /**
2752 * qla81xx_reset_mpi() - Reset's MPI FW via Write MPI Register MBC.
2753 * @vha: HA context
2754 *
2755 * Returns 0 on success.
2756 */
2757 static int
qla81xx_reset_mpi(scsi_qla_host_t * vha)2758 qla81xx_reset_mpi(scsi_qla_host_t *vha)
2759 {
2760 uint16_t mb[4] = {0x1010, 0, 1, 0};
2761
2762 if (!IS_QLA81XX(vha->hw))
2763 return QLA_SUCCESS;
2764
2765 return qla81xx_write_mpi_register(vha, mb);
2766 }
2767
2768 /**
2769 * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC.
2770 * @vha: HA context
2771 *
2772 * Returns 0 on success.
2773 */
2774 static inline int
qla24xx_reset_risc(scsi_qla_host_t * vha)2775 qla24xx_reset_risc(scsi_qla_host_t *vha)
2776 {
2777 unsigned long flags = 0;
2778 struct qla_hw_data *ha = vha->hw;
2779 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2780 uint32_t cnt;
2781 uint16_t wd;
2782 static int abts_cnt; /* ISP abort retry counts */
2783 int rval = QLA_SUCCESS;
2784
2785 spin_lock_irqsave(&ha->hardware_lock, flags);
2786
2787 /* Reset RISC. */
2788 wrt_reg_dword(®->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
2789 for (cnt = 0; cnt < 30000; cnt++) {
2790 if ((rd_reg_dword(®->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
2791 break;
2792
2793 udelay(10);
2794 }
2795
2796 if (!(rd_reg_dword(®->ctrl_status) & CSRX_DMA_ACTIVE))
2797 set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
2798
2799 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017e,
2800 "HCCR: 0x%x, Control Status %x, DMA active status:0x%x\n",
2801 rd_reg_dword(®->hccr),
2802 rd_reg_dword(®->ctrl_status),
2803 (rd_reg_dword(®->ctrl_status) & CSRX_DMA_ACTIVE));
2804
2805 wrt_reg_dword(®->ctrl_status,
2806 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
2807 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
2808
2809 udelay(100);
2810
2811 /* Wait for firmware to complete NVRAM accesses. */
2812 rd_reg_word(®->mailbox0);
2813 for (cnt = 10000; rd_reg_word(®->mailbox0) != 0 &&
2814 rval == QLA_SUCCESS; cnt--) {
2815 barrier();
2816 if (cnt)
2817 udelay(5);
2818 else
2819 rval = QLA_FUNCTION_TIMEOUT;
2820 }
2821
2822 if (rval == QLA_SUCCESS)
2823 set_bit(ISP_MBX_RDY, &ha->fw_dump_cap_flags);
2824
2825 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017f,
2826 "HCCR: 0x%x, MailBox0 Status 0x%x\n",
2827 rd_reg_dword(®->hccr),
2828 rd_reg_word(®->mailbox0));
2829
2830 /* Wait for soft-reset to complete. */
2831 rd_reg_dword(®->ctrl_status);
2832 for (cnt = 0; cnt < 60; cnt++) {
2833 barrier();
2834 if ((rd_reg_dword(®->ctrl_status) &
2835 CSRX_ISP_SOFT_RESET) == 0)
2836 break;
2837
2838 udelay(5);
2839 }
2840 if (!(rd_reg_dword(®->ctrl_status) & CSRX_ISP_SOFT_RESET))
2841 set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags);
2842
2843 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015d,
2844 "HCCR: 0x%x, Soft Reset status: 0x%x\n",
2845 rd_reg_dword(®->hccr),
2846 rd_reg_dword(®->ctrl_status));
2847
2848 /* If required, do an MPI FW reset now */
2849 if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) {
2850 if (qla81xx_reset_mpi(vha) != QLA_SUCCESS) {
2851 if (++abts_cnt < 5) {
2852 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2853 set_bit(MPI_RESET_NEEDED, &vha->dpc_flags);
2854 } else {
2855 /*
2856 * We exhausted the ISP abort retries. We have to
2857 * set the board offline.
2858 */
2859 abts_cnt = 0;
2860 vha->flags.online = 0;
2861 }
2862 }
2863 }
2864
2865 wrt_reg_dword(®->hccr, HCCRX_SET_RISC_RESET);
2866 rd_reg_dword(®->hccr);
2867
2868 wrt_reg_dword(®->hccr, HCCRX_REL_RISC_PAUSE);
2869 rd_reg_dword(®->hccr);
2870
2871 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_RESET);
2872 rd_reg_dword(®->hccr);
2873
2874 rd_reg_word(®->mailbox0);
2875 for (cnt = 60; rd_reg_word(®->mailbox0) != 0 &&
2876 rval == QLA_SUCCESS; cnt--) {
2877 barrier();
2878 if (cnt)
2879 udelay(5);
2880 else
2881 rval = QLA_FUNCTION_TIMEOUT;
2882 }
2883 if (rval == QLA_SUCCESS)
2884 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
2885
2886 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015e,
2887 "Host Risc 0x%x, mailbox0 0x%x\n",
2888 rd_reg_dword(®->hccr),
2889 rd_reg_word(®->mailbox0));
2890
2891 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2892
2893 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015f,
2894 "Driver in %s mode\n",
2895 IS_NOPOLLING_TYPE(ha) ? "Interrupt" : "Polling");
2896
2897 if (IS_NOPOLLING_TYPE(ha))
2898 ha->isp_ops->enable_intrs(ha);
2899
2900 return rval;
2901 }
2902
2903 static void
qla25xx_read_risc_sema_reg(scsi_qla_host_t * vha,uint32_t * data)2904 qla25xx_read_risc_sema_reg(scsi_qla_host_t *vha, uint32_t *data)
2905 {
2906 struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
2907
2908 wrt_reg_dword(®->iobase_addr, RISC_REGISTER_BASE_OFFSET);
2909 *data = rd_reg_dword(®->iobase_window + RISC_REGISTER_WINDOW_OFFSET);
2910 }
2911
2912 static void
qla25xx_write_risc_sema_reg(scsi_qla_host_t * vha,uint32_t data)2913 qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data)
2914 {
2915 struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
2916
2917 wrt_reg_dword(®->iobase_addr, RISC_REGISTER_BASE_OFFSET);
2918 wrt_reg_dword(®->iobase_window + RISC_REGISTER_WINDOW_OFFSET, data);
2919 }
2920
2921 static void
qla25xx_manipulate_risc_semaphore(scsi_qla_host_t * vha)2922 qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha)
2923 {
2924 uint32_t wd32 = 0;
2925 uint delta_msec = 100;
2926 uint elapsed_msec = 0;
2927 uint timeout_msec;
2928 ulong n;
2929
2930 if (vha->hw->pdev->subsystem_device != 0x0175 &&
2931 vha->hw->pdev->subsystem_device != 0x0240)
2932 return;
2933
2934 wrt_reg_dword(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE);
2935 udelay(100);
2936
2937 attempt:
2938 timeout_msec = TIMEOUT_SEMAPHORE;
2939 n = timeout_msec / delta_msec;
2940 while (n--) {
2941 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_SET);
2942 qla25xx_read_risc_sema_reg(vha, &wd32);
2943 if (wd32 & RISC_SEMAPHORE)
2944 break;
2945 msleep(delta_msec);
2946 elapsed_msec += delta_msec;
2947 if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
2948 goto force;
2949 }
2950
2951 if (!(wd32 & RISC_SEMAPHORE))
2952 goto force;
2953
2954 if (!(wd32 & RISC_SEMAPHORE_FORCE))
2955 goto acquired;
2956
2957 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_CLR);
2958 timeout_msec = TIMEOUT_SEMAPHORE_FORCE;
2959 n = timeout_msec / delta_msec;
2960 while (n--) {
2961 qla25xx_read_risc_sema_reg(vha, &wd32);
2962 if (!(wd32 & RISC_SEMAPHORE_FORCE))
2963 break;
2964 msleep(delta_msec);
2965 elapsed_msec += delta_msec;
2966 if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
2967 goto force;
2968 }
2969
2970 if (wd32 & RISC_SEMAPHORE_FORCE)
2971 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_CLR);
2972
2973 goto attempt;
2974
2975 force:
2976 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_SET);
2977
2978 acquired:
2979 return;
2980 }
2981
2982 /**
2983 * qla24xx_reset_chip() - Reset ISP24xx chip.
2984 * @vha: HA context
2985 *
2986 * Returns 0 on success.
2987 */
2988 int
qla24xx_reset_chip(scsi_qla_host_t * vha)2989 qla24xx_reset_chip(scsi_qla_host_t *vha)
2990 {
2991 struct qla_hw_data *ha = vha->hw;
2992 int rval = QLA_FUNCTION_FAILED;
2993
2994 if (pci_channel_offline(ha->pdev) &&
2995 ha->flags.pci_channel_io_perm_failure) {
2996 return rval;
2997 }
2998
2999 ha->isp_ops->disable_intrs(ha);
3000
3001 qla25xx_manipulate_risc_semaphore(vha);
3002
3003 /* Perform RISC reset. */
3004 rval = qla24xx_reset_risc(vha);
3005
3006 return rval;
3007 }
3008
3009 /**
3010 * qla2x00_chip_diag() - Test chip for proper operation.
3011 * @vha: HA context
3012 *
3013 * Returns 0 on success.
3014 */
3015 int
qla2x00_chip_diag(scsi_qla_host_t * vha)3016 qla2x00_chip_diag(scsi_qla_host_t *vha)
3017 {
3018 int rval;
3019 struct qla_hw_data *ha = vha->hw;
3020 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3021 unsigned long flags = 0;
3022 uint16_t data;
3023 uint32_t cnt;
3024 uint16_t mb[5];
3025 struct req_que *req = ha->req_q_map[0];
3026
3027 /* Assume a failed state */
3028 rval = QLA_FUNCTION_FAILED;
3029
3030 ql_dbg(ql_dbg_init, vha, 0x007b, "Testing device at %p.\n",
3031 ®->flash_address);
3032
3033 spin_lock_irqsave(&ha->hardware_lock, flags);
3034
3035 /* Reset ISP chip. */
3036 wrt_reg_word(®->ctrl_status, CSR_ISP_SOFT_RESET);
3037
3038 /*
3039 * We need to have a delay here since the card will not respond while
3040 * in reset causing an MCA on some architectures.
3041 */
3042 udelay(20);
3043 data = qla2x00_debounce_register(®->ctrl_status);
3044 for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) {
3045 udelay(5);
3046 data = rd_reg_word(®->ctrl_status);
3047 barrier();
3048 }
3049
3050 if (!cnt)
3051 goto chip_diag_failed;
3052
3053 ql_dbg(ql_dbg_init, vha, 0x007c,
3054 "Reset register cleared by chip reset.\n");
3055
3056 /* Reset RISC processor. */
3057 wrt_reg_word(®->hccr, HCCR_RESET_RISC);
3058 wrt_reg_word(®->hccr, HCCR_RELEASE_RISC);
3059
3060 /* Workaround for QLA2312 PCI parity error */
3061 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
3062 data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0));
3063 for (cnt = 6000000; cnt && (data == MBS_BUSY); cnt--) {
3064 udelay(5);
3065 data = RD_MAILBOX_REG(ha, reg, 0);
3066 barrier();
3067 }
3068 } else
3069 udelay(10);
3070
3071 if (!cnt)
3072 goto chip_diag_failed;
3073
3074 /* Check product ID of chip */
3075 ql_dbg(ql_dbg_init, vha, 0x007d, "Checking product ID of chip.\n");
3076
3077 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
3078 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
3079 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
3080 mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4));
3081 if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) ||
3082 mb[3] != PROD_ID_3) {
3083 ql_log(ql_log_warn, vha, 0x0062,
3084 "Wrong product ID = 0x%x,0x%x,0x%x.\n",
3085 mb[1], mb[2], mb[3]);
3086
3087 goto chip_diag_failed;
3088 }
3089 ha->product_id[0] = mb[1];
3090 ha->product_id[1] = mb[2];
3091 ha->product_id[2] = mb[3];
3092 ha->product_id[3] = mb[4];
3093
3094 /* Adjust fw RISC transfer size */
3095 if (req->length > 1024)
3096 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024;
3097 else
3098 ha->fw_transfer_size = REQUEST_ENTRY_SIZE *
3099 req->length;
3100
3101 if (IS_QLA2200(ha) &&
3102 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) {
3103 /* Limit firmware transfer size with a 2200A */
3104 ql_dbg(ql_dbg_init, vha, 0x007e, "Found QLA2200A Chip.\n");
3105
3106 ha->device_type |= DT_ISP2200A;
3107 ha->fw_transfer_size = 128;
3108 }
3109
3110 /* Wrap Incoming Mailboxes Test. */
3111 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3112
3113 ql_dbg(ql_dbg_init, vha, 0x007f, "Checking mailboxes.\n");
3114 rval = qla2x00_mbx_reg_test(vha);
3115 if (rval)
3116 ql_log(ql_log_warn, vha, 0x0080,
3117 "Failed mailbox send register test.\n");
3118 else
3119 /* Flag a successful rval */
3120 rval = QLA_SUCCESS;
3121 spin_lock_irqsave(&ha->hardware_lock, flags);
3122
3123 chip_diag_failed:
3124 if (rval)
3125 ql_log(ql_log_info, vha, 0x0081,
3126 "Chip diagnostics **** FAILED ****.\n");
3127
3128 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3129
3130 return (rval);
3131 }
3132
3133 /**
3134 * qla24xx_chip_diag() - Test ISP24xx for proper operation.
3135 * @vha: HA context
3136 *
3137 * Returns 0 on success.
3138 */
3139 int
qla24xx_chip_diag(scsi_qla_host_t * vha)3140 qla24xx_chip_diag(scsi_qla_host_t *vha)
3141 {
3142 int rval;
3143 struct qla_hw_data *ha = vha->hw;
3144 struct req_que *req = ha->req_q_map[0];
3145
3146 if (IS_P3P_TYPE(ha))
3147 return QLA_SUCCESS;
3148
3149 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
3150
3151 rval = qla2x00_mbx_reg_test(vha);
3152 if (rval) {
3153 ql_log(ql_log_warn, vha, 0x0082,
3154 "Failed mailbox send register test.\n");
3155 } else {
3156 /* Flag a successful rval */
3157 rval = QLA_SUCCESS;
3158 }
3159
3160 return rval;
3161 }
3162
3163 static void
qla2x00_init_fce_trace(scsi_qla_host_t * vha)3164 qla2x00_init_fce_trace(scsi_qla_host_t *vha)
3165 {
3166 int rval;
3167 dma_addr_t tc_dma;
3168 void *tc;
3169 struct qla_hw_data *ha = vha->hw;
3170
3171 if (!IS_FWI2_CAPABLE(ha))
3172 return;
3173
3174 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
3175 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
3176 return;
3177
3178 if (ha->fce) {
3179 ql_dbg(ql_dbg_init, vha, 0x00bd,
3180 "%s: FCE Mem is already allocated.\n",
3181 __func__);
3182 return;
3183 }
3184
3185 /* Allocate memory for Fibre Channel Event Buffer. */
3186 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
3187 GFP_KERNEL);
3188 if (!tc) {
3189 ql_log(ql_log_warn, vha, 0x00be,
3190 "Unable to allocate (%d KB) for FCE.\n",
3191 FCE_SIZE / 1024);
3192 return;
3193 }
3194
3195 rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
3196 ha->fce_mb, &ha->fce_bufs);
3197 if (rval) {
3198 ql_log(ql_log_warn, vha, 0x00bf,
3199 "Unable to initialize FCE (%d).\n", rval);
3200 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, tc_dma);
3201 return;
3202 }
3203
3204 ql_dbg(ql_dbg_init, vha, 0x00c0,
3205 "Allocated (%d KB) for FCE...\n", FCE_SIZE / 1024);
3206
3207 ha->flags.fce_enabled = 1;
3208 ha->fce_dma = tc_dma;
3209 ha->fce = tc;
3210 }
3211
3212 static void
qla2x00_init_eft_trace(scsi_qla_host_t * vha)3213 qla2x00_init_eft_trace(scsi_qla_host_t *vha)
3214 {
3215 int rval;
3216 dma_addr_t tc_dma;
3217 void *tc;
3218 struct qla_hw_data *ha = vha->hw;
3219
3220 if (!IS_FWI2_CAPABLE(ha))
3221 return;
3222
3223 if (ha->eft) {
3224 ql_dbg(ql_dbg_init, vha, 0x00bd,
3225 "%s: EFT Mem is already allocated.\n",
3226 __func__);
3227 return;
3228 }
3229
3230 /* Allocate memory for Extended Trace Buffer. */
3231 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
3232 GFP_KERNEL);
3233 if (!tc) {
3234 ql_log(ql_log_warn, vha, 0x00c1,
3235 "Unable to allocate (%d KB) for EFT.\n",
3236 EFT_SIZE / 1024);
3237 return;
3238 }
3239
3240 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
3241 if (rval) {
3242 ql_log(ql_log_warn, vha, 0x00c2,
3243 "Unable to initialize EFT (%d).\n", rval);
3244 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, tc_dma);
3245 return;
3246 }
3247
3248 ql_dbg(ql_dbg_init, vha, 0x00c3,
3249 "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
3250
3251 ha->eft_dma = tc_dma;
3252 ha->eft = tc;
3253 }
3254
3255 static void
qla2x00_alloc_offload_mem(scsi_qla_host_t * vha)3256 qla2x00_alloc_offload_mem(scsi_qla_host_t *vha)
3257 {
3258 qla2x00_init_fce_trace(vha);
3259 qla2x00_init_eft_trace(vha);
3260 }
3261
3262 void
qla2x00_alloc_fw_dump(scsi_qla_host_t * vha)3263 qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
3264 {
3265 uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
3266 eft_size, fce_size, mq_size;
3267 struct qla_hw_data *ha = vha->hw;
3268 struct req_que *req = ha->req_q_map[0];
3269 struct rsp_que *rsp = ha->rsp_q_map[0];
3270 struct qla2xxx_fw_dump *fw_dump;
3271
3272 dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
3273 req_q_size = rsp_q_size = 0;
3274
3275 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
3276 fixed_size = sizeof(struct qla2100_fw_dump);
3277 } else if (IS_QLA23XX(ha)) {
3278 fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
3279 mem_size = (ha->fw_memory_size - 0x11000 + 1) *
3280 sizeof(uint16_t);
3281 } else if (IS_FWI2_CAPABLE(ha)) {
3282 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
3283 fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
3284 else if (IS_QLA81XX(ha))
3285 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
3286 else if (IS_QLA25XX(ha))
3287 fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
3288 else
3289 fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
3290
3291 mem_size = (ha->fw_memory_size - 0x100000 + 1) *
3292 sizeof(uint32_t);
3293 if (ha->mqenable) {
3294 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) &&
3295 !IS_QLA28XX(ha))
3296 mq_size = sizeof(struct qla2xxx_mq_chain);
3297 /*
3298 * Allocate maximum buffer size for all queues - Q0.
3299 * Resizing must be done at end-of-dump processing.
3300 */
3301 mq_size += (ha->max_req_queues - 1) *
3302 (req->length * sizeof(request_t));
3303 mq_size += (ha->max_rsp_queues - 1) *
3304 (rsp->length * sizeof(response_t));
3305 }
3306 if (ha->tgt.atio_ring)
3307 mq_size += ha->tgt.atio_q_length * sizeof(request_t);
3308
3309 qla2x00_init_fce_trace(vha);
3310 if (ha->fce)
3311 fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
3312 qla2x00_init_eft_trace(vha);
3313 if (ha->eft)
3314 eft_size = EFT_SIZE;
3315 }
3316
3317 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
3318 struct fwdt *fwdt = ha->fwdt;
3319 uint j;
3320
3321 for (j = 0; j < 2; j++, fwdt++) {
3322 if (!fwdt->template) {
3323 ql_dbg(ql_dbg_init, vha, 0x00ba,
3324 "-> fwdt%u no template\n", j);
3325 continue;
3326 }
3327 ql_dbg(ql_dbg_init, vha, 0x00fa,
3328 "-> fwdt%u calculating fwdump size...\n", j);
3329 fwdt->dump_size = qla27xx_fwdt_calculate_dump_size(
3330 vha, fwdt->template);
3331 ql_dbg(ql_dbg_init, vha, 0x00fa,
3332 "-> fwdt%u calculated fwdump size = %#lx bytes\n",
3333 j, fwdt->dump_size);
3334 dump_size += fwdt->dump_size;
3335 }
3336 /* Add space for spare MPI fw dump. */
3337 dump_size += ha->fwdt[1].dump_size;
3338 } else {
3339 req_q_size = req->length * sizeof(request_t);
3340 rsp_q_size = rsp->length * sizeof(response_t);
3341 dump_size = offsetof(struct qla2xxx_fw_dump, isp);
3342 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size
3343 + eft_size;
3344 ha->chain_offset = dump_size;
3345 dump_size += mq_size + fce_size;
3346 if (ha->exchoffld_buf)
3347 dump_size += sizeof(struct qla2xxx_offld_chain) +
3348 ha->exchoffld_size;
3349 if (ha->exlogin_buf)
3350 dump_size += sizeof(struct qla2xxx_offld_chain) +
3351 ha->exlogin_size;
3352 }
3353
3354 if (!ha->fw_dump_len || dump_size > ha->fw_dump_alloc_len) {
3355
3356 ql_dbg(ql_dbg_init, vha, 0x00c5,
3357 "%s dump_size %d fw_dump_len %d fw_dump_alloc_len %d\n",
3358 __func__, dump_size, ha->fw_dump_len,
3359 ha->fw_dump_alloc_len);
3360
3361 fw_dump = vmalloc(dump_size);
3362 if (!fw_dump) {
3363 ql_log(ql_log_warn, vha, 0x00c4,
3364 "Unable to allocate (%d KB) for firmware dump.\n",
3365 dump_size / 1024);
3366 } else {
3367 mutex_lock(&ha->optrom_mutex);
3368 if (ha->fw_dumped) {
3369 memcpy(fw_dump, ha->fw_dump, ha->fw_dump_len);
3370 vfree(ha->fw_dump);
3371 ha->fw_dump = fw_dump;
3372 ha->fw_dump_alloc_len = dump_size;
3373 ql_dbg(ql_dbg_init, vha, 0x00c5,
3374 "Re-Allocated (%d KB) and save firmware dump.\n",
3375 dump_size / 1024);
3376 } else {
3377 if (ha->fw_dump)
3378 vfree(ha->fw_dump);
3379 ha->fw_dump = fw_dump;
3380
3381 ha->fw_dump_len = ha->fw_dump_alloc_len =
3382 dump_size;
3383 ql_dbg(ql_dbg_init, vha, 0x00c5,
3384 "Allocated (%d KB) for firmware dump.\n",
3385 dump_size / 1024);
3386
3387 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
3388 ha->mpi_fw_dump = (char *)fw_dump +
3389 ha->fwdt[1].dump_size;
3390 mutex_unlock(&ha->optrom_mutex);
3391 return;
3392 }
3393
3394 ha->fw_dump->signature[0] = 'Q';
3395 ha->fw_dump->signature[1] = 'L';
3396 ha->fw_dump->signature[2] = 'G';
3397 ha->fw_dump->signature[3] = 'C';
3398 ha->fw_dump->version = htonl(1);
3399
3400 ha->fw_dump->fixed_size = htonl(fixed_size);
3401 ha->fw_dump->mem_size = htonl(mem_size);
3402 ha->fw_dump->req_q_size = htonl(req_q_size);
3403 ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
3404
3405 ha->fw_dump->eft_size = htonl(eft_size);
3406 ha->fw_dump->eft_addr_l =
3407 htonl(LSD(ha->eft_dma));
3408 ha->fw_dump->eft_addr_h =
3409 htonl(MSD(ha->eft_dma));
3410
3411 ha->fw_dump->header_size =
3412 htonl(offsetof
3413 (struct qla2xxx_fw_dump, isp));
3414 }
3415 mutex_unlock(&ha->optrom_mutex);
3416 }
3417 }
3418 }
3419
3420 static int
qla81xx_mpi_sync(scsi_qla_host_t * vha)3421 qla81xx_mpi_sync(scsi_qla_host_t *vha)
3422 {
3423 #define MPS_MASK 0xe0
3424 int rval;
3425 uint16_t dc;
3426 uint32_t dw;
3427
3428 if (!IS_QLA81XX(vha->hw))
3429 return QLA_SUCCESS;
3430
3431 rval = qla2x00_write_ram_word(vha, 0x7c00, 1);
3432 if (rval != QLA_SUCCESS) {
3433 ql_log(ql_log_warn, vha, 0x0105,
3434 "Unable to acquire semaphore.\n");
3435 goto done;
3436 }
3437
3438 pci_read_config_word(vha->hw->pdev, 0x54, &dc);
3439 rval = qla2x00_read_ram_word(vha, 0x7a15, &dw);
3440 if (rval != QLA_SUCCESS) {
3441 ql_log(ql_log_warn, vha, 0x0067, "Unable to read sync.\n");
3442 goto done_release;
3443 }
3444
3445 dc &= MPS_MASK;
3446 if (dc == (dw & MPS_MASK))
3447 goto done_release;
3448
3449 dw &= ~MPS_MASK;
3450 dw |= dc;
3451 rval = qla2x00_write_ram_word(vha, 0x7a15, dw);
3452 if (rval != QLA_SUCCESS) {
3453 ql_log(ql_log_warn, vha, 0x0114, "Unable to gain sync.\n");
3454 }
3455
3456 done_release:
3457 rval = qla2x00_write_ram_word(vha, 0x7c00, 0);
3458 if (rval != QLA_SUCCESS) {
3459 ql_log(ql_log_warn, vha, 0x006d,
3460 "Unable to release semaphore.\n");
3461 }
3462
3463 done:
3464 return rval;
3465 }
3466
3467 int
qla2x00_alloc_outstanding_cmds(struct qla_hw_data * ha,struct req_que * req)3468 qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req)
3469 {
3470 /* Don't try to reallocate the array */
3471 if (req->outstanding_cmds)
3472 return QLA_SUCCESS;
3473
3474 if (!IS_FWI2_CAPABLE(ha))
3475 req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS;
3476 else {
3477 if (ha->cur_fw_xcb_count <= ha->cur_fw_iocb_count)
3478 req->num_outstanding_cmds = ha->cur_fw_xcb_count;
3479 else
3480 req->num_outstanding_cmds = ha->cur_fw_iocb_count;
3481 }
3482
3483 req->outstanding_cmds = kcalloc(req->num_outstanding_cmds,
3484 sizeof(srb_t *),
3485 GFP_KERNEL);
3486
3487 if (!req->outstanding_cmds) {
3488 /*
3489 * Try to allocate a minimal size just so we can get through
3490 * initialization.
3491 */
3492 req->num_outstanding_cmds = MIN_OUTSTANDING_COMMANDS;
3493 req->outstanding_cmds = kcalloc(req->num_outstanding_cmds,
3494 sizeof(srb_t *),
3495 GFP_KERNEL);
3496
3497 if (!req->outstanding_cmds) {
3498 ql_log(ql_log_fatal, NULL, 0x0126,
3499 "Failed to allocate memory for "
3500 "outstanding_cmds for req_que %p.\n", req);
3501 req->num_outstanding_cmds = 0;
3502 return QLA_FUNCTION_FAILED;
3503 }
3504 }
3505
3506 return QLA_SUCCESS;
3507 }
3508
3509 #define PRINT_FIELD(_field, _flag, _str) { \
3510 if (a0->_field & _flag) {\
3511 if (p) {\
3512 strcat(ptr, "|");\
3513 ptr++;\
3514 leftover--;\
3515 } \
3516 len = snprintf(ptr, leftover, "%s", _str); \
3517 p = 1;\
3518 leftover -= len;\
3519 ptr += len; \
3520 } \
3521 }
3522
qla2xxx_print_sfp_info(struct scsi_qla_host * vha)3523 static void qla2xxx_print_sfp_info(struct scsi_qla_host *vha)
3524 {
3525 #define STR_LEN 64
3526 struct sff_8247_a0 *a0 = (struct sff_8247_a0 *)vha->hw->sfp_data;
3527 u8 str[STR_LEN], *ptr, p;
3528 int leftover, len;
3529
3530 memset(str, 0, STR_LEN);
3531 snprintf(str, SFF_VEN_NAME_LEN+1, a0->vendor_name);
3532 ql_dbg(ql_dbg_init, vha, 0x015a,
3533 "SFP MFG Name: %s\n", str);
3534
3535 memset(str, 0, STR_LEN);
3536 snprintf(str, SFF_PART_NAME_LEN+1, a0->vendor_pn);
3537 ql_dbg(ql_dbg_init, vha, 0x015c,
3538 "SFP Part Name: %s\n", str);
3539
3540 /* media */
3541 memset(str, 0, STR_LEN);
3542 ptr = str;
3543 leftover = STR_LEN;
3544 p = len = 0;
3545 PRINT_FIELD(fc_med_cc9, FC_MED_TW, "Twin AX");
3546 PRINT_FIELD(fc_med_cc9, FC_MED_TP, "Twisted Pair");
3547 PRINT_FIELD(fc_med_cc9, FC_MED_MI, "Min Coax");
3548 PRINT_FIELD(fc_med_cc9, FC_MED_TV, "Video Coax");
3549 PRINT_FIELD(fc_med_cc9, FC_MED_M6, "MultiMode 62.5um");
3550 PRINT_FIELD(fc_med_cc9, FC_MED_M5, "MultiMode 50um");
3551 PRINT_FIELD(fc_med_cc9, FC_MED_SM, "SingleMode");
3552 ql_dbg(ql_dbg_init, vha, 0x0160,
3553 "SFP Media: %s\n", str);
3554
3555 /* link length */
3556 memset(str, 0, STR_LEN);
3557 ptr = str;
3558 leftover = STR_LEN;
3559 p = len = 0;
3560 PRINT_FIELD(fc_ll_cc7, FC_LL_VL, "Very Long");
3561 PRINT_FIELD(fc_ll_cc7, FC_LL_S, "Short");
3562 PRINT_FIELD(fc_ll_cc7, FC_LL_I, "Intermediate");
3563 PRINT_FIELD(fc_ll_cc7, FC_LL_L, "Long");
3564 PRINT_FIELD(fc_ll_cc7, FC_LL_M, "Medium");
3565 ql_dbg(ql_dbg_init, vha, 0x0196,
3566 "SFP Link Length: %s\n", str);
3567
3568 memset(str, 0, STR_LEN);
3569 ptr = str;
3570 leftover = STR_LEN;
3571 p = len = 0;
3572 PRINT_FIELD(fc_ll_cc7, FC_LL_SA, "Short Wave (SA)");
3573 PRINT_FIELD(fc_ll_cc7, FC_LL_LC, "Long Wave(LC)");
3574 PRINT_FIELD(fc_tec_cc8, FC_TEC_SN, "Short Wave (SN)");
3575 PRINT_FIELD(fc_tec_cc8, FC_TEC_SL, "Short Wave (SL)");
3576 PRINT_FIELD(fc_tec_cc8, FC_TEC_LL, "Long Wave (LL)");
3577 ql_dbg(ql_dbg_init, vha, 0x016e,
3578 "SFP FC Link Tech: %s\n", str);
3579
3580 if (a0->length_km)
3581 ql_dbg(ql_dbg_init, vha, 0x016f,
3582 "SFP Distant: %d km\n", a0->length_km);
3583 if (a0->length_100m)
3584 ql_dbg(ql_dbg_init, vha, 0x0170,
3585 "SFP Distant: %d m\n", a0->length_100m*100);
3586 if (a0->length_50um_10m)
3587 ql_dbg(ql_dbg_init, vha, 0x0189,
3588 "SFP Distant (WL=50um): %d m\n", a0->length_50um_10m * 10);
3589 if (a0->length_62um_10m)
3590 ql_dbg(ql_dbg_init, vha, 0x018a,
3591 "SFP Distant (WL=62.5um): %d m\n", a0->length_62um_10m * 10);
3592 if (a0->length_om4_10m)
3593 ql_dbg(ql_dbg_init, vha, 0x0194,
3594 "SFP Distant (OM4): %d m\n", a0->length_om4_10m * 10);
3595 if (a0->length_om3_10m)
3596 ql_dbg(ql_dbg_init, vha, 0x0195,
3597 "SFP Distant (OM3): %d m\n", a0->length_om3_10m * 10);
3598 }
3599
3600
3601 /**
3602 * qla24xx_detect_sfp()
3603 *
3604 * @vha: adapter state pointer.
3605 *
3606 * @return
3607 * 0 -- Configure firmware to use short-range settings -- normal
3608 * buffer-to-buffer credits.
3609 *
3610 * 1 -- Configure firmware to use long-range settings -- extra
3611 * buffer-to-buffer credits should be allocated with
3612 * ha->lr_distance containing distance settings from NVRAM or SFP
3613 * (if supported).
3614 */
3615 int
qla24xx_detect_sfp(scsi_qla_host_t * vha)3616 qla24xx_detect_sfp(scsi_qla_host_t *vha)
3617 {
3618 int rc, used_nvram;
3619 struct sff_8247_a0 *a;
3620 struct qla_hw_data *ha = vha->hw;
3621 struct nvram_81xx *nv = ha->nvram;
3622 #define LR_DISTANCE_UNKNOWN 2
3623 static const char * const types[] = { "Short", "Long" };
3624 static const char * const lengths[] = { "(10km)", "(5km)", "" };
3625 u8 ll = 0;
3626
3627 /* Seed with NVRAM settings. */
3628 used_nvram = 0;
3629 ha->flags.lr_detected = 0;
3630 if (IS_BPM_RANGE_CAPABLE(ha) &&
3631 (nv->enhanced_features & NEF_LR_DIST_ENABLE)) {
3632 used_nvram = 1;
3633 ha->flags.lr_detected = 1;
3634 ha->lr_distance =
3635 (nv->enhanced_features >> LR_DIST_NV_POS)
3636 & LR_DIST_NV_MASK;
3637 }
3638
3639 if (!IS_BPM_ENABLED(vha))
3640 goto out;
3641 /* Determine SR/LR capabilities of SFP/Transceiver. */
3642 rc = qla2x00_read_sfp_dev(vha, NULL, 0);
3643 if (rc)
3644 goto out;
3645
3646 used_nvram = 0;
3647 a = (struct sff_8247_a0 *)vha->hw->sfp_data;
3648 qla2xxx_print_sfp_info(vha);
3649
3650 ha->flags.lr_detected = 0;
3651 ll = a->fc_ll_cc7;
3652 if (ll & FC_LL_VL || ll & FC_LL_L) {
3653 /* Long range, track length. */
3654 ha->flags.lr_detected = 1;
3655
3656 if (a->length_km > 5 || a->length_100m > 50)
3657 ha->lr_distance = LR_DISTANCE_10K;
3658 else
3659 ha->lr_distance = LR_DISTANCE_5K;
3660 }
3661
3662 out:
3663 ql_dbg(ql_dbg_async, vha, 0x507b,
3664 "SFP detect: %s-Range SFP %s (nvr=%x ll=%x lr=%x lrd=%x).\n",
3665 types[ha->flags.lr_detected],
3666 ha->flags.lr_detected ? lengths[ha->lr_distance] :
3667 lengths[LR_DISTANCE_UNKNOWN],
3668 used_nvram, ll, ha->flags.lr_detected, ha->lr_distance);
3669 return ha->flags.lr_detected;
3670 }
3671
qla_init_iocb_limit(scsi_qla_host_t * vha)3672 void qla_init_iocb_limit(scsi_qla_host_t *vha)
3673 {
3674 u16 i, num_qps;
3675 u32 limit;
3676 struct qla_hw_data *ha = vha->hw;
3677
3678 num_qps = ha->num_qpairs + 1;
3679 limit = (ha->orig_fw_iocb_count * QLA_IOCB_PCT_LIMIT) / 100;
3680
3681 ha->base_qpair->fwres.iocbs_total = ha->orig_fw_iocb_count;
3682 ha->base_qpair->fwres.iocbs_limit = limit;
3683 ha->base_qpair->fwres.iocbs_qp_limit = limit / num_qps;
3684 ha->base_qpair->fwres.iocbs_used = 0;
3685 for (i = 0; i < ha->max_qpairs; i++) {
3686 if (ha->queue_pair_map[i]) {
3687 ha->queue_pair_map[i]->fwres.iocbs_total =
3688 ha->orig_fw_iocb_count;
3689 ha->queue_pair_map[i]->fwres.iocbs_limit = limit;
3690 ha->queue_pair_map[i]->fwres.iocbs_qp_limit =
3691 limit / num_qps;
3692 ha->queue_pair_map[i]->fwres.iocbs_used = 0;
3693 }
3694 }
3695 }
3696
3697 /**
3698 * qla2x00_setup_chip() - Load and start RISC firmware.
3699 * @vha: HA context
3700 *
3701 * Returns 0 on success.
3702 */
3703 static int
qla2x00_setup_chip(scsi_qla_host_t * vha)3704 qla2x00_setup_chip(scsi_qla_host_t *vha)
3705 {
3706 int rval;
3707 uint32_t srisc_address = 0;
3708 struct qla_hw_data *ha = vha->hw;
3709 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3710 unsigned long flags;
3711 uint16_t fw_major_version;
3712 int done_once = 0;
3713
3714 if (IS_P3P_TYPE(ha)) {
3715 rval = ha->isp_ops->load_risc(vha, &srisc_address);
3716 if (rval == QLA_SUCCESS) {
3717 qla2x00_stop_firmware(vha);
3718 goto enable_82xx_npiv;
3719 } else
3720 goto failed;
3721 }
3722
3723 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
3724 /* Disable SRAM, Instruction RAM and GP RAM parity. */
3725 spin_lock_irqsave(&ha->hardware_lock, flags);
3726 wrt_reg_word(®->hccr, (HCCR_ENABLE_PARITY + 0x0));
3727 rd_reg_word(®->hccr);
3728 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3729 }
3730
3731 qla81xx_mpi_sync(vha);
3732
3733 execute_fw_with_lr:
3734 /* Load firmware sequences */
3735 rval = ha->isp_ops->load_risc(vha, &srisc_address);
3736 if (rval == QLA_SUCCESS) {
3737 ql_dbg(ql_dbg_init, vha, 0x00c9,
3738 "Verifying Checksum of loaded RISC code.\n");
3739
3740 rval = qla2x00_verify_checksum(vha, srisc_address);
3741 if (rval == QLA_SUCCESS) {
3742 /* Start firmware execution. */
3743 ql_dbg(ql_dbg_init, vha, 0x00ca,
3744 "Starting firmware.\n");
3745
3746 if (ql2xexlogins)
3747 ha->flags.exlogins_enabled = 1;
3748
3749 if (qla_is_exch_offld_enabled(vha))
3750 ha->flags.exchoffld_enabled = 1;
3751
3752 rval = qla2x00_execute_fw(vha, srisc_address);
3753 /* Retrieve firmware information. */
3754 if (rval == QLA_SUCCESS) {
3755 /* Enable BPM support? */
3756 if (!done_once++ && qla24xx_detect_sfp(vha)) {
3757 ql_dbg(ql_dbg_init, vha, 0x00ca,
3758 "Re-starting firmware -- BPM.\n");
3759 /* Best-effort - re-init. */
3760 ha->isp_ops->reset_chip(vha);
3761 ha->isp_ops->chip_diag(vha);
3762 goto execute_fw_with_lr;
3763 }
3764
3765 if (IS_ZIO_THRESHOLD_CAPABLE(ha))
3766 qla27xx_set_zio_threshold(vha,
3767 ha->last_zio_threshold);
3768
3769 rval = qla2x00_set_exlogins_buffer(vha);
3770 if (rval != QLA_SUCCESS)
3771 goto failed;
3772
3773 rval = qla2x00_set_exchoffld_buffer(vha);
3774 if (rval != QLA_SUCCESS)
3775 goto failed;
3776
3777 enable_82xx_npiv:
3778 fw_major_version = ha->fw_major_version;
3779 if (IS_P3P_TYPE(ha))
3780 qla82xx_check_md_needed(vha);
3781 else
3782 rval = qla2x00_get_fw_version(vha);
3783 if (rval != QLA_SUCCESS)
3784 goto failed;
3785 ha->flags.npiv_supported = 0;
3786 if (IS_QLA2XXX_MIDTYPE(ha) &&
3787 (ha->fw_attributes & BIT_2)) {
3788 ha->flags.npiv_supported = 1;
3789 if ((!ha->max_npiv_vports) ||
3790 ((ha->max_npiv_vports + 1) %
3791 MIN_MULTI_ID_FABRIC))
3792 ha->max_npiv_vports =
3793 MIN_MULTI_ID_FABRIC - 1;
3794 }
3795 qla2x00_get_resource_cnts(vha);
3796 qla_init_iocb_limit(vha);
3797
3798 /*
3799 * Allocate the array of outstanding commands
3800 * now that we know the firmware resources.
3801 */
3802 rval = qla2x00_alloc_outstanding_cmds(ha,
3803 vha->req);
3804 if (rval != QLA_SUCCESS)
3805 goto failed;
3806
3807 if (!fw_major_version && !(IS_P3P_TYPE(ha)))
3808 qla2x00_alloc_offload_mem(vha);
3809
3810 if (ql2xallocfwdump && !(IS_P3P_TYPE(ha)))
3811 qla2x00_alloc_fw_dump(vha);
3812
3813 } else {
3814 goto failed;
3815 }
3816 } else {
3817 ql_log(ql_log_fatal, vha, 0x00cd,
3818 "ISP Firmware failed checksum.\n");
3819 goto failed;
3820 }
3821
3822 /* Enable PUREX PASSTHRU */
3823 if (ql2xrdpenable || ha->flags.scm_supported_f)
3824 qla25xx_set_els_cmds_supported(vha);
3825 } else
3826 goto failed;
3827
3828 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
3829 /* Enable proper parity. */
3830 spin_lock_irqsave(&ha->hardware_lock, flags);
3831 if (IS_QLA2300(ha))
3832 /* SRAM parity */
3833 wrt_reg_word(®->hccr, HCCR_ENABLE_PARITY + 0x1);
3834 else
3835 /* SRAM, Instruction RAM and GP RAM parity */
3836 wrt_reg_word(®->hccr, HCCR_ENABLE_PARITY + 0x7);
3837 rd_reg_word(®->hccr);
3838 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3839 }
3840
3841 if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
3842 ha->flags.fac_supported = 1;
3843 else if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
3844 uint32_t size;
3845
3846 rval = qla81xx_fac_get_sector_size(vha, &size);
3847 if (rval == QLA_SUCCESS) {
3848 ha->flags.fac_supported = 1;
3849 ha->fdt_block_size = size << 2;
3850 } else {
3851 ql_log(ql_log_warn, vha, 0x00ce,
3852 "Unsupported FAC firmware (%d.%02d.%02d).\n",
3853 ha->fw_major_version, ha->fw_minor_version,
3854 ha->fw_subminor_version);
3855
3856 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
3857 IS_QLA28XX(ha)) {
3858 ha->flags.fac_supported = 0;
3859 rval = QLA_SUCCESS;
3860 }
3861 }
3862 }
3863 failed:
3864 if (rval) {
3865 ql_log(ql_log_fatal, vha, 0x00cf,
3866 "Setup chip ****FAILED****.\n");
3867 }
3868
3869 return (rval);
3870 }
3871
3872 /**
3873 * qla2x00_init_response_q_entries() - Initializes response queue entries.
3874 * @rsp: response queue
3875 *
3876 * Beginning of request ring has initialization control block already built
3877 * by nvram config routine.
3878 *
3879 * Returns 0 on success.
3880 */
3881 void
qla2x00_init_response_q_entries(struct rsp_que * rsp)3882 qla2x00_init_response_q_entries(struct rsp_que *rsp)
3883 {
3884 uint16_t cnt;
3885 response_t *pkt;
3886
3887 rsp->ring_ptr = rsp->ring;
3888 rsp->ring_index = 0;
3889 rsp->status_srb = NULL;
3890 pkt = rsp->ring_ptr;
3891 for (cnt = 0; cnt < rsp->length; cnt++) {
3892 pkt->signature = RESPONSE_PROCESSED;
3893 pkt++;
3894 }
3895 }
3896
3897 /**
3898 * qla2x00_update_fw_options() - Read and process firmware options.
3899 * @vha: HA context
3900 *
3901 * Returns 0 on success.
3902 */
3903 void
qla2x00_update_fw_options(scsi_qla_host_t * vha)3904 qla2x00_update_fw_options(scsi_qla_host_t *vha)
3905 {
3906 uint16_t swing, emphasis, tx_sens, rx_sens;
3907 struct qla_hw_data *ha = vha->hw;
3908
3909 memset(ha->fw_options, 0, sizeof(ha->fw_options));
3910 qla2x00_get_fw_options(vha, ha->fw_options);
3911
3912 if (IS_QLA2100(ha) || IS_QLA2200(ha))
3913 return;
3914
3915 /* Serial Link options. */
3916 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0115,
3917 "Serial link options.\n");
3918 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0109,
3919 ha->fw_seriallink_options, sizeof(ha->fw_seriallink_options));
3920
3921 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
3922 if (ha->fw_seriallink_options[3] & BIT_2) {
3923 ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING;
3924
3925 /* 1G settings */
3926 swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0);
3927 emphasis = (ha->fw_seriallink_options[2] &
3928 (BIT_4 | BIT_3)) >> 3;
3929 tx_sens = ha->fw_seriallink_options[0] &
3930 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
3931 rx_sens = (ha->fw_seriallink_options[0] &
3932 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
3933 ha->fw_options[10] = (emphasis << 14) | (swing << 8);
3934 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
3935 if (rx_sens == 0x0)
3936 rx_sens = 0x3;
3937 ha->fw_options[10] |= (tx_sens << 4) | rx_sens;
3938 } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
3939 ha->fw_options[10] |= BIT_5 |
3940 ((rx_sens & (BIT_1 | BIT_0)) << 2) |
3941 (tx_sens & (BIT_1 | BIT_0));
3942
3943 /* 2G settings */
3944 swing = (ha->fw_seriallink_options[2] &
3945 (BIT_7 | BIT_6 | BIT_5)) >> 5;
3946 emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0);
3947 tx_sens = ha->fw_seriallink_options[1] &
3948 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
3949 rx_sens = (ha->fw_seriallink_options[1] &
3950 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
3951 ha->fw_options[11] = (emphasis << 14) | (swing << 8);
3952 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
3953 if (rx_sens == 0x0)
3954 rx_sens = 0x3;
3955 ha->fw_options[11] |= (tx_sens << 4) | rx_sens;
3956 } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
3957 ha->fw_options[11] |= BIT_5 |
3958 ((rx_sens & (BIT_1 | BIT_0)) << 2) |
3959 (tx_sens & (BIT_1 | BIT_0));
3960 }
3961
3962 /* FCP2 options. */
3963 /* Return command IOCBs without waiting for an ABTS to complete. */
3964 ha->fw_options[3] |= BIT_13;
3965
3966 /* LED scheme. */
3967 if (ha->flags.enable_led_scheme)
3968 ha->fw_options[2] |= BIT_12;
3969
3970 /* Detect ISP6312. */
3971 if (IS_QLA6312(ha))
3972 ha->fw_options[2] |= BIT_13;
3973
3974 /* Set Retry FLOGI in case of P2P connection */
3975 if (ha->operating_mode == P2P) {
3976 ha->fw_options[2] |= BIT_3;
3977 ql_dbg(ql_dbg_disc, vha, 0x2100,
3978 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
3979 __func__, ha->fw_options[2]);
3980 }
3981
3982 /* Update firmware options. */
3983 qla2x00_set_fw_options(vha, ha->fw_options);
3984 }
3985
3986 void
qla24xx_update_fw_options(scsi_qla_host_t * vha)3987 qla24xx_update_fw_options(scsi_qla_host_t *vha)
3988 {
3989 int rval;
3990 struct qla_hw_data *ha = vha->hw;
3991
3992 if (IS_P3P_TYPE(ha))
3993 return;
3994
3995 /* Hold status IOCBs until ABTS response received. */
3996 if (ql2xfwholdabts)
3997 ha->fw_options[3] |= BIT_12;
3998
3999 /* Set Retry FLOGI in case of P2P connection */
4000 if (ha->operating_mode == P2P) {
4001 ha->fw_options[2] |= BIT_3;
4002 ql_dbg(ql_dbg_disc, vha, 0x2101,
4003 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
4004 __func__, ha->fw_options[2]);
4005 }
4006
4007 /* Move PUREX, ABTS RX & RIDA to ATIOQ */
4008 if (ql2xmvasynctoatio &&
4009 (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) {
4010 if (qla_tgt_mode_enabled(vha) ||
4011 qla_dual_mode_enabled(vha))
4012 ha->fw_options[2] |= BIT_11;
4013 else
4014 ha->fw_options[2] &= ~BIT_11;
4015 }
4016
4017 if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
4018 IS_QLA28XX(ha)) {
4019 /*
4020 * Tell FW to track each exchange to prevent
4021 * driver from using stale exchange.
4022 */
4023 if (qla_tgt_mode_enabled(vha) ||
4024 qla_dual_mode_enabled(vha))
4025 ha->fw_options[2] |= BIT_4;
4026 else
4027 ha->fw_options[2] &= ~BIT_4;
4028
4029 /* Reserve 1/2 of emergency exchanges for ELS.*/
4030 if (qla2xuseresexchforels)
4031 ha->fw_options[2] |= BIT_8;
4032 else
4033 ha->fw_options[2] &= ~BIT_8;
4034 }
4035
4036 if (ql2xrdpenable || ha->flags.scm_supported_f)
4037 ha->fw_options[1] |= ADD_FO1_ENABLE_PUREX_IOCB;
4038
4039 /* Enable Async 8130/8131 events -- transceiver insertion/removal */
4040 if (IS_BPM_RANGE_CAPABLE(ha))
4041 ha->fw_options[3] |= BIT_10;
4042
4043 ql_dbg(ql_dbg_init, vha, 0x00e8,
4044 "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
4045 __func__, ha->fw_options[1], ha->fw_options[2],
4046 ha->fw_options[3], vha->host->active_mode);
4047
4048 if (ha->fw_options[1] || ha->fw_options[2] || ha->fw_options[3])
4049 qla2x00_set_fw_options(vha, ha->fw_options);
4050
4051 /* Update Serial Link options. */
4052 if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
4053 return;
4054
4055 rval = qla2x00_set_serdes_params(vha,
4056 le16_to_cpu(ha->fw_seriallink_options24[1]),
4057 le16_to_cpu(ha->fw_seriallink_options24[2]),
4058 le16_to_cpu(ha->fw_seriallink_options24[3]));
4059 if (rval != QLA_SUCCESS) {
4060 ql_log(ql_log_warn, vha, 0x0104,
4061 "Unable to update Serial Link options (%x).\n", rval);
4062 }
4063 }
4064
4065 void
qla2x00_config_rings(struct scsi_qla_host * vha)4066 qla2x00_config_rings(struct scsi_qla_host *vha)
4067 {
4068 struct qla_hw_data *ha = vha->hw;
4069 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
4070 struct req_que *req = ha->req_q_map[0];
4071 struct rsp_que *rsp = ha->rsp_q_map[0];
4072
4073 /* Setup ring parameters in initialization control block. */
4074 ha->init_cb->request_q_outpointer = cpu_to_le16(0);
4075 ha->init_cb->response_q_inpointer = cpu_to_le16(0);
4076 ha->init_cb->request_q_length = cpu_to_le16(req->length);
4077 ha->init_cb->response_q_length = cpu_to_le16(rsp->length);
4078 put_unaligned_le64(req->dma, &ha->init_cb->request_q_address);
4079 put_unaligned_le64(rsp->dma, &ha->init_cb->response_q_address);
4080
4081 wrt_reg_word(ISP_REQ_Q_IN(ha, reg), 0);
4082 wrt_reg_word(ISP_REQ_Q_OUT(ha, reg), 0);
4083 wrt_reg_word(ISP_RSP_Q_IN(ha, reg), 0);
4084 wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), 0);
4085 rd_reg_word(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */
4086 }
4087
4088 void
qla24xx_config_rings(struct scsi_qla_host * vha)4089 qla24xx_config_rings(struct scsi_qla_host *vha)
4090 {
4091 struct qla_hw_data *ha = vha->hw;
4092 device_reg_t *reg = ISP_QUE_REG(ha, 0);
4093 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
4094 struct qla_msix_entry *msix;
4095 struct init_cb_24xx *icb;
4096 uint16_t rid = 0;
4097 struct req_que *req = ha->req_q_map[0];
4098 struct rsp_que *rsp = ha->rsp_q_map[0];
4099
4100 /* Setup ring parameters in initialization control block. */
4101 icb = (struct init_cb_24xx *)ha->init_cb;
4102 icb->request_q_outpointer = cpu_to_le16(0);
4103 icb->response_q_inpointer = cpu_to_le16(0);
4104 icb->request_q_length = cpu_to_le16(req->length);
4105 icb->response_q_length = cpu_to_le16(rsp->length);
4106 put_unaligned_le64(req->dma, &icb->request_q_address);
4107 put_unaligned_le64(rsp->dma, &icb->response_q_address);
4108
4109 /* Setup ATIO queue dma pointers for target mode */
4110 icb->atio_q_inpointer = cpu_to_le16(0);
4111 icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length);
4112 put_unaligned_le64(ha->tgt.atio_dma, &icb->atio_q_address);
4113
4114 if (IS_SHADOW_REG_CAPABLE(ha))
4115 icb->firmware_options_2 |= cpu_to_le32(BIT_30|BIT_29);
4116
4117 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
4118 IS_QLA28XX(ha)) {
4119 icb->qos = cpu_to_le16(QLA_DEFAULT_QUE_QOS);
4120 icb->rid = cpu_to_le16(rid);
4121 if (ha->flags.msix_enabled) {
4122 msix = &ha->msix_entries[1];
4123 ql_dbg(ql_dbg_init, vha, 0x0019,
4124 "Registering vector 0x%x for base que.\n",
4125 msix->entry);
4126 icb->msix = cpu_to_le16(msix->entry);
4127 }
4128 /* Use alternate PCI bus number */
4129 if (MSB(rid))
4130 icb->firmware_options_2 |= cpu_to_le32(BIT_19);
4131 /* Use alternate PCI devfn */
4132 if (LSB(rid))
4133 icb->firmware_options_2 |= cpu_to_le32(BIT_18);
4134
4135 /* Use Disable MSIX Handshake mode for capable adapters */
4136 if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) &&
4137 (ha->flags.msix_enabled)) {
4138 icb->firmware_options_2 &= cpu_to_le32(~BIT_22);
4139 ha->flags.disable_msix_handshake = 1;
4140 ql_dbg(ql_dbg_init, vha, 0x00fe,
4141 "MSIX Handshake Disable Mode turned on.\n");
4142 } else {
4143 icb->firmware_options_2 |= cpu_to_le32(BIT_22);
4144 }
4145 icb->firmware_options_2 |= cpu_to_le32(BIT_23);
4146
4147 wrt_reg_dword(®->isp25mq.req_q_in, 0);
4148 wrt_reg_dword(®->isp25mq.req_q_out, 0);
4149 wrt_reg_dword(®->isp25mq.rsp_q_in, 0);
4150 wrt_reg_dword(®->isp25mq.rsp_q_out, 0);
4151 } else {
4152 wrt_reg_dword(®->isp24.req_q_in, 0);
4153 wrt_reg_dword(®->isp24.req_q_out, 0);
4154 wrt_reg_dword(®->isp24.rsp_q_in, 0);
4155 wrt_reg_dword(®->isp24.rsp_q_out, 0);
4156 }
4157
4158 qlt_24xx_config_rings(vha);
4159
4160 /* If the user has configured the speed, set it here */
4161 if (ha->set_data_rate) {
4162 ql_dbg(ql_dbg_init, vha, 0x00fd,
4163 "Speed set by user : %s Gbps \n",
4164 qla2x00_get_link_speed_str(ha, ha->set_data_rate));
4165 icb->firmware_options_3 = cpu_to_le32(ha->set_data_rate << 13);
4166 }
4167
4168 /* PCI posting */
4169 rd_reg_word(&ioreg->hccr);
4170 }
4171
4172 /**
4173 * qla2x00_init_rings() - Initializes firmware.
4174 * @vha: HA context
4175 *
4176 * Beginning of request ring has initialization control block already built
4177 * by nvram config routine.
4178 *
4179 * Returns 0 on success.
4180 */
4181 int
qla2x00_init_rings(scsi_qla_host_t * vha)4182 qla2x00_init_rings(scsi_qla_host_t *vha)
4183 {
4184 int rval;
4185 unsigned long flags = 0;
4186 int cnt, que;
4187 struct qla_hw_data *ha = vha->hw;
4188 struct req_que *req;
4189 struct rsp_que *rsp;
4190 struct mid_init_cb_24xx *mid_init_cb =
4191 (struct mid_init_cb_24xx *) ha->init_cb;
4192
4193 spin_lock_irqsave(&ha->hardware_lock, flags);
4194
4195 /* Clear outstanding commands array. */
4196 for (que = 0; que < ha->max_req_queues; que++) {
4197 req = ha->req_q_map[que];
4198 if (!req || !test_bit(que, ha->req_qid_map))
4199 continue;
4200 req->out_ptr = (uint16_t *)(req->ring + req->length);
4201 *req->out_ptr = 0;
4202 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
4203 req->outstanding_cmds[cnt] = NULL;
4204
4205 req->current_outstanding_cmd = 1;
4206
4207 /* Initialize firmware. */
4208 req->ring_ptr = req->ring;
4209 req->ring_index = 0;
4210 req->cnt = req->length;
4211 }
4212
4213 for (que = 0; que < ha->max_rsp_queues; que++) {
4214 rsp = ha->rsp_q_map[que];
4215 if (!rsp || !test_bit(que, ha->rsp_qid_map))
4216 continue;
4217 rsp->in_ptr = (uint16_t *)(rsp->ring + rsp->length);
4218 *rsp->in_ptr = 0;
4219 /* Initialize response queue entries */
4220 if (IS_QLAFX00(ha))
4221 qlafx00_init_response_q_entries(rsp);
4222 else
4223 qla2x00_init_response_q_entries(rsp);
4224 }
4225
4226 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
4227 ha->tgt.atio_ring_index = 0;
4228 /* Initialize ATIO queue entries */
4229 qlt_init_atio_q_entries(vha);
4230
4231 ha->isp_ops->config_rings(vha);
4232
4233 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4234
4235 ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n");
4236
4237 if (IS_QLAFX00(ha)) {
4238 rval = qlafx00_init_firmware(vha, ha->init_cb_size);
4239 goto next_check;
4240 }
4241
4242 /* Update any ISP specific firmware options before initialization. */
4243 ha->isp_ops->update_fw_options(vha);
4244
4245 if (ha->flags.npiv_supported) {
4246 if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha))
4247 ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1;
4248 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports);
4249 }
4250
4251 if (IS_FWI2_CAPABLE(ha)) {
4252 mid_init_cb->options = cpu_to_le16(BIT_1);
4253 mid_init_cb->init_cb.execution_throttle =
4254 cpu_to_le16(ha->cur_fw_xcb_count);
4255 ha->flags.dport_enabled =
4256 (le32_to_cpu(mid_init_cb->init_cb.firmware_options_1) &
4257 BIT_7) != 0;
4258 ql_dbg(ql_dbg_init, vha, 0x0191, "DPORT Support: %s.\n",
4259 (ha->flags.dport_enabled) ? "enabled" : "disabled");
4260 /* FA-WWPN Status */
4261 ha->flags.fawwpn_enabled =
4262 (le32_to_cpu(mid_init_cb->init_cb.firmware_options_1) &
4263 BIT_6) != 0;
4264 ql_dbg(ql_dbg_init, vha, 0x00bc, "FA-WWPN Support: %s.\n",
4265 (ha->flags.fawwpn_enabled) ? "enabled" : "disabled");
4266 }
4267
4268 rval = qla2x00_init_firmware(vha, ha->init_cb_size);
4269 next_check:
4270 if (rval) {
4271 ql_log(ql_log_fatal, vha, 0x00d2,
4272 "Init Firmware **** FAILED ****.\n");
4273 } else {
4274 ql_dbg(ql_dbg_init, vha, 0x00d3,
4275 "Init Firmware -- success.\n");
4276 QLA_FW_STARTED(ha);
4277 vha->u_ql2xexchoffld = vha->u_ql2xiniexchg = 0;
4278 }
4279
4280 return (rval);
4281 }
4282
4283 /**
4284 * qla2x00_fw_ready() - Waits for firmware ready.
4285 * @vha: HA context
4286 *
4287 * Returns 0 on success.
4288 */
4289 static int
qla2x00_fw_ready(scsi_qla_host_t * vha)4290 qla2x00_fw_ready(scsi_qla_host_t *vha)
4291 {
4292 int rval;
4293 unsigned long wtime, mtime, cs84xx_time;
4294 uint16_t min_wait; /* Minimum wait time if loop is down */
4295 uint16_t wait_time; /* Wait time if loop is coming ready */
4296 uint16_t state[6];
4297 struct qla_hw_data *ha = vha->hw;
4298
4299 if (IS_QLAFX00(vha->hw))
4300 return qlafx00_fw_ready(vha);
4301
4302 rval = QLA_SUCCESS;
4303
4304 /* Time to wait for loop down */
4305 if (IS_P3P_TYPE(ha))
4306 min_wait = 30;
4307 else
4308 min_wait = 20;
4309
4310 /*
4311 * Firmware should take at most one RATOV to login, plus 5 seconds for
4312 * our own processing.
4313 */
4314 if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) {
4315 wait_time = min_wait;
4316 }
4317
4318 /* Min wait time if loop down */
4319 mtime = jiffies + (min_wait * HZ);
4320
4321 /* wait time before firmware ready */
4322 wtime = jiffies + (wait_time * HZ);
4323
4324 /* Wait for ISP to finish LIP */
4325 if (!vha->flags.init_done)
4326 ql_log(ql_log_info, vha, 0x801e,
4327 "Waiting for LIP to complete.\n");
4328
4329 do {
4330 memset(state, -1, sizeof(state));
4331 rval = qla2x00_get_firmware_state(vha, state);
4332 if (rval == QLA_SUCCESS) {
4333 if (state[0] < FSTATE_LOSS_OF_SYNC) {
4334 vha->device_flags &= ~DFLG_NO_CABLE;
4335 }
4336 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) {
4337 ql_dbg(ql_dbg_taskm, vha, 0x801f,
4338 "fw_state=%x 84xx=%x.\n", state[0],
4339 state[2]);
4340 if ((state[2] & FSTATE_LOGGED_IN) &&
4341 (state[2] & FSTATE_WAITING_FOR_VERIFY)) {
4342 ql_dbg(ql_dbg_taskm, vha, 0x8028,
4343 "Sending verify iocb.\n");
4344
4345 cs84xx_time = jiffies;
4346 rval = qla84xx_init_chip(vha);
4347 if (rval != QLA_SUCCESS) {
4348 ql_log(ql_log_warn,
4349 vha, 0x8007,
4350 "Init chip failed.\n");
4351 break;
4352 }
4353
4354 /* Add time taken to initialize. */
4355 cs84xx_time = jiffies - cs84xx_time;
4356 wtime += cs84xx_time;
4357 mtime += cs84xx_time;
4358 ql_dbg(ql_dbg_taskm, vha, 0x8008,
4359 "Increasing wait time by %ld. "
4360 "New time %ld.\n", cs84xx_time,
4361 wtime);
4362 }
4363 } else if (state[0] == FSTATE_READY) {
4364 ql_dbg(ql_dbg_taskm, vha, 0x8037,
4365 "F/W Ready - OK.\n");
4366
4367 qla2x00_get_retry_cnt(vha, &ha->retry_count,
4368 &ha->login_timeout, &ha->r_a_tov);
4369
4370 rval = QLA_SUCCESS;
4371 break;
4372 }
4373
4374 rval = QLA_FUNCTION_FAILED;
4375
4376 if (atomic_read(&vha->loop_down_timer) &&
4377 state[0] != FSTATE_READY) {
4378 /* Loop down. Timeout on min_wait for states
4379 * other than Wait for Login.
4380 */
4381 if (time_after_eq(jiffies, mtime)) {
4382 ql_log(ql_log_info, vha, 0x8038,
4383 "Cable is unplugged...\n");
4384
4385 vha->device_flags |= DFLG_NO_CABLE;
4386 break;
4387 }
4388 }
4389 } else {
4390 /* Mailbox cmd failed. Timeout on min_wait. */
4391 if (time_after_eq(jiffies, mtime) ||
4392 ha->flags.isp82xx_fw_hung)
4393 break;
4394 }
4395
4396 if (time_after_eq(jiffies, wtime))
4397 break;
4398
4399 /* Delay for a while */
4400 msleep(500);
4401 } while (1);
4402
4403 ql_dbg(ql_dbg_taskm, vha, 0x803a,
4404 "fw_state=%x (%x, %x, %x, %x %x) curr time=%lx.\n", state[0],
4405 state[1], state[2], state[3], state[4], state[5], jiffies);
4406
4407 if (rval && !(vha->device_flags & DFLG_NO_CABLE)) {
4408 ql_log(ql_log_warn, vha, 0x803b,
4409 "Firmware ready **** FAILED ****.\n");
4410 }
4411
4412 return (rval);
4413 }
4414
4415 /*
4416 * qla2x00_configure_hba
4417 * Setup adapter context.
4418 *
4419 * Input:
4420 * ha = adapter state pointer.
4421 *
4422 * Returns:
4423 * 0 = success
4424 *
4425 * Context:
4426 * Kernel context.
4427 */
4428 static int
qla2x00_configure_hba(scsi_qla_host_t * vha)4429 qla2x00_configure_hba(scsi_qla_host_t *vha)
4430 {
4431 int rval;
4432 uint16_t loop_id;
4433 uint16_t topo;
4434 uint16_t sw_cap;
4435 uint8_t al_pa;
4436 uint8_t area;
4437 uint8_t domain;
4438 char connect_type[22];
4439 struct qla_hw_data *ha = vha->hw;
4440 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
4441 port_id_t id;
4442 unsigned long flags;
4443
4444 /* Get host addresses. */
4445 rval = qla2x00_get_adapter_id(vha,
4446 &loop_id, &al_pa, &area, &domain, &topo, &sw_cap);
4447 if (rval != QLA_SUCCESS) {
4448 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
4449 IS_CNA_CAPABLE(ha) ||
4450 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
4451 ql_dbg(ql_dbg_disc, vha, 0x2008,
4452 "Loop is in a transition state.\n");
4453 } else {
4454 ql_log(ql_log_warn, vha, 0x2009,
4455 "Unable to get host loop ID.\n");
4456 if (IS_FWI2_CAPABLE(ha) && (vha == base_vha) &&
4457 (rval == QLA_COMMAND_ERROR && loop_id == 0x1b)) {
4458 ql_log(ql_log_warn, vha, 0x1151,
4459 "Doing link init.\n");
4460 if (qla24xx_link_initialize(vha) == QLA_SUCCESS)
4461 return rval;
4462 }
4463 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
4464 }
4465 return (rval);
4466 }
4467
4468 if (topo == 4) {
4469 ql_log(ql_log_info, vha, 0x200a,
4470 "Cannot get topology - retrying.\n");
4471 return (QLA_FUNCTION_FAILED);
4472 }
4473
4474 vha->loop_id = loop_id;
4475
4476 /* initialize */
4477 ha->min_external_loopid = SNS_FIRST_LOOP_ID;
4478 ha->operating_mode = LOOP;
4479 ha->switch_cap = 0;
4480
4481 switch (topo) {
4482 case 0:
4483 ql_dbg(ql_dbg_disc, vha, 0x200b, "HBA in NL topology.\n");
4484 ha->current_topology = ISP_CFG_NL;
4485 strcpy(connect_type, "(Loop)");
4486 break;
4487
4488 case 1:
4489 ql_dbg(ql_dbg_disc, vha, 0x200c, "HBA in FL topology.\n");
4490 ha->switch_cap = sw_cap;
4491 ha->current_topology = ISP_CFG_FL;
4492 strcpy(connect_type, "(FL_Port)");
4493 break;
4494
4495 case 2:
4496 ql_dbg(ql_dbg_disc, vha, 0x200d, "HBA in N P2P topology.\n");
4497 ha->operating_mode = P2P;
4498 ha->current_topology = ISP_CFG_N;
4499 strcpy(connect_type, "(N_Port-to-N_Port)");
4500 break;
4501
4502 case 3:
4503 ql_dbg(ql_dbg_disc, vha, 0x200e, "HBA in F P2P topology.\n");
4504 ha->switch_cap = sw_cap;
4505 ha->operating_mode = P2P;
4506 ha->current_topology = ISP_CFG_F;
4507 strcpy(connect_type, "(F_Port)");
4508 break;
4509
4510 default:
4511 ql_dbg(ql_dbg_disc, vha, 0x200f,
4512 "HBA in unknown topology %x, using NL.\n", topo);
4513 ha->current_topology = ISP_CFG_NL;
4514 strcpy(connect_type, "(Loop)");
4515 break;
4516 }
4517
4518 /* Save Host port and loop ID. */
4519 /* byte order - Big Endian */
4520 id.b.domain = domain;
4521 id.b.area = area;
4522 id.b.al_pa = al_pa;
4523 id.b.rsvd_1 = 0;
4524 spin_lock_irqsave(&ha->hardware_lock, flags);
4525 if (!(topo == 2 && ha->flags.n2n_bigger))
4526 qlt_update_host_map(vha, id);
4527 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4528
4529 if (!vha->flags.init_done)
4530 ql_log(ql_log_info, vha, 0x2010,
4531 "Topology - %s, Host Loop address 0x%x.\n",
4532 connect_type, vha->loop_id);
4533
4534 return(rval);
4535 }
4536
4537 inline void
qla2x00_set_model_info(scsi_qla_host_t * vha,uint8_t * model,size_t len,const char * def)4538 qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
4539 const char *def)
4540 {
4541 char *st, *en;
4542 uint16_t index;
4543 uint64_t zero[2] = { 0 };
4544 struct qla_hw_data *ha = vha->hw;
4545 int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
4546 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha);
4547
4548 if (len > sizeof(zero))
4549 len = sizeof(zero);
4550 if (memcmp(model, &zero, len) != 0) {
4551 memcpy(ha->model_number, model, len);
4552 st = en = ha->model_number;
4553 en += len - 1;
4554 while (en > st) {
4555 if (*en != 0x20 && *en != 0x00)
4556 break;
4557 *en-- = '\0';
4558 }
4559
4560 index = (ha->pdev->subsystem_device & 0xff);
4561 if (use_tbl &&
4562 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
4563 index < QLA_MODEL_NAMES)
4564 strlcpy(ha->model_desc,
4565 qla2x00_model_name[index * 2 + 1],
4566 sizeof(ha->model_desc));
4567 } else {
4568 index = (ha->pdev->subsystem_device & 0xff);
4569 if (use_tbl &&
4570 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
4571 index < QLA_MODEL_NAMES) {
4572 strlcpy(ha->model_number,
4573 qla2x00_model_name[index * 2],
4574 sizeof(ha->model_number));
4575 strlcpy(ha->model_desc,
4576 qla2x00_model_name[index * 2 + 1],
4577 sizeof(ha->model_desc));
4578 } else {
4579 strlcpy(ha->model_number, def,
4580 sizeof(ha->model_number));
4581 }
4582 }
4583 if (IS_FWI2_CAPABLE(ha))
4584 qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc,
4585 sizeof(ha->model_desc));
4586 }
4587
4588 /* On sparc systems, obtain port and node WWN from firmware
4589 * properties.
4590 */
qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t * vha,nvram_t * nv)4591 static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv)
4592 {
4593 #ifdef CONFIG_SPARC
4594 struct qla_hw_data *ha = vha->hw;
4595 struct pci_dev *pdev = ha->pdev;
4596 struct device_node *dp = pci_device_to_OF_node(pdev);
4597 const u8 *val;
4598 int len;
4599
4600 val = of_get_property(dp, "port-wwn", &len);
4601 if (val && len >= WWN_SIZE)
4602 memcpy(nv->port_name, val, WWN_SIZE);
4603
4604 val = of_get_property(dp, "node-wwn", &len);
4605 if (val && len >= WWN_SIZE)
4606 memcpy(nv->node_name, val, WWN_SIZE);
4607 #endif
4608 }
4609
4610 /*
4611 * NVRAM configuration for ISP 2xxx
4612 *
4613 * Input:
4614 * ha = adapter block pointer.
4615 *
4616 * Output:
4617 * initialization control block in response_ring
4618 * host adapters parameters in host adapter block
4619 *
4620 * Returns:
4621 * 0 = success.
4622 */
4623 int
qla2x00_nvram_config(scsi_qla_host_t * vha)4624 qla2x00_nvram_config(scsi_qla_host_t *vha)
4625 {
4626 int rval;
4627 uint8_t chksum = 0;
4628 uint16_t cnt;
4629 uint8_t *dptr1, *dptr2;
4630 struct qla_hw_data *ha = vha->hw;
4631 init_cb_t *icb = ha->init_cb;
4632 nvram_t *nv = ha->nvram;
4633 uint8_t *ptr = ha->nvram;
4634 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
4635
4636 rval = QLA_SUCCESS;
4637
4638 /* Determine NVRAM starting address. */
4639 ha->nvram_size = sizeof(*nv);
4640 ha->nvram_base = 0;
4641 if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha))
4642 if ((rd_reg_word(®->ctrl_status) >> 14) == 1)
4643 ha->nvram_base = 0x80;
4644
4645 /* Get NVRAM data and calculate checksum. */
4646 ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size);
4647 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
4648 chksum += *ptr++;
4649
4650 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010f,
4651 "Contents of NVRAM.\n");
4652 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0110,
4653 nv, ha->nvram_size);
4654
4655 /* Bad NVRAM data, set defaults parameters. */
4656 if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) ||
4657 nv->nvram_version < 1) {
4658 /* Reset NVRAM data. */
4659 ql_log(ql_log_warn, vha, 0x0064,
4660 "Inconsistent NVRAM detected: checksum=%#x id=%.4s version=%#x.\n",
4661 chksum, nv->id, nv->nvram_version);
4662 ql_log(ql_log_warn, vha, 0x0065,
4663 "Falling back to "
4664 "functioning (yet invalid -- WWPN) defaults.\n");
4665
4666 /*
4667 * Set default initialization control block.
4668 */
4669 memset(nv, 0, ha->nvram_size);
4670 nv->parameter_block_version = ICB_VERSION;
4671
4672 if (IS_QLA23XX(ha)) {
4673 nv->firmware_options[0] = BIT_2 | BIT_1;
4674 nv->firmware_options[1] = BIT_7 | BIT_5;
4675 nv->add_firmware_options[0] = BIT_5;
4676 nv->add_firmware_options[1] = BIT_5 | BIT_4;
4677 nv->frame_payload_size = cpu_to_le16(2048);
4678 nv->special_options[1] = BIT_7;
4679 } else if (IS_QLA2200(ha)) {
4680 nv->firmware_options[0] = BIT_2 | BIT_1;
4681 nv->firmware_options[1] = BIT_7 | BIT_5;
4682 nv->add_firmware_options[0] = BIT_5;
4683 nv->add_firmware_options[1] = BIT_5 | BIT_4;
4684 nv->frame_payload_size = cpu_to_le16(1024);
4685 } else if (IS_QLA2100(ha)) {
4686 nv->firmware_options[0] = BIT_3 | BIT_1;
4687 nv->firmware_options[1] = BIT_5;
4688 nv->frame_payload_size = cpu_to_le16(1024);
4689 }
4690
4691 nv->max_iocb_allocation = cpu_to_le16(256);
4692 nv->execution_throttle = cpu_to_le16(16);
4693 nv->retry_count = 8;
4694 nv->retry_delay = 1;
4695
4696 nv->port_name[0] = 33;
4697 nv->port_name[3] = 224;
4698 nv->port_name[4] = 139;
4699
4700 qla2xxx_nvram_wwn_from_ofw(vha, nv);
4701
4702 nv->login_timeout = 4;
4703
4704 /*
4705 * Set default host adapter parameters
4706 */
4707 nv->host_p[1] = BIT_2;
4708 nv->reset_delay = 5;
4709 nv->port_down_retry_count = 8;
4710 nv->max_luns_per_target = cpu_to_le16(8);
4711 nv->link_down_timeout = 60;
4712
4713 rval = 1;
4714 }
4715
4716 /* Reset Initialization control block */
4717 memset(icb, 0, ha->init_cb_size);
4718
4719 /*
4720 * Setup driver NVRAM options.
4721 */
4722 nv->firmware_options[0] |= (BIT_6 | BIT_1);
4723 nv->firmware_options[0] &= ~(BIT_5 | BIT_4);
4724 nv->firmware_options[1] |= (BIT_5 | BIT_0);
4725 nv->firmware_options[1] &= ~BIT_4;
4726
4727 if (IS_QLA23XX(ha)) {
4728 nv->firmware_options[0] |= BIT_2;
4729 nv->firmware_options[0] &= ~BIT_3;
4730 nv->special_options[0] &= ~BIT_6;
4731 nv->add_firmware_options[1] |= BIT_5 | BIT_4;
4732
4733 if (IS_QLA2300(ha)) {
4734 if (ha->fb_rev == FPM_2310) {
4735 strcpy(ha->model_number, "QLA2310");
4736 } else {
4737 strcpy(ha->model_number, "QLA2300");
4738 }
4739 } else {
4740 qla2x00_set_model_info(vha, nv->model_number,
4741 sizeof(nv->model_number), "QLA23xx");
4742 }
4743 } else if (IS_QLA2200(ha)) {
4744 nv->firmware_options[0] |= BIT_2;
4745 /*
4746 * 'Point-to-point preferred, else loop' is not a safe
4747 * connection mode setting.
4748 */
4749 if ((nv->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) ==
4750 (BIT_5 | BIT_4)) {
4751 /* Force 'loop preferred, else point-to-point'. */
4752 nv->add_firmware_options[0] &= ~(BIT_6 | BIT_5 | BIT_4);
4753 nv->add_firmware_options[0] |= BIT_5;
4754 }
4755 strcpy(ha->model_number, "QLA22xx");
4756 } else /*if (IS_QLA2100(ha))*/ {
4757 strcpy(ha->model_number, "QLA2100");
4758 }
4759
4760 /*
4761 * Copy over NVRAM RISC parameter block to initialization control block.
4762 */
4763 dptr1 = (uint8_t *)icb;
4764 dptr2 = (uint8_t *)&nv->parameter_block_version;
4765 cnt = (uint8_t *)&icb->request_q_outpointer - (uint8_t *)&icb->version;
4766 while (cnt--)
4767 *dptr1++ = *dptr2++;
4768
4769 /* Copy 2nd half. */
4770 dptr1 = (uint8_t *)icb->add_firmware_options;
4771 cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options;
4772 while (cnt--)
4773 *dptr1++ = *dptr2++;
4774 ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size);
4775 /* Use alternate WWN? */
4776 if (nv->host_p[1] & BIT_7) {
4777 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
4778 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
4779 }
4780
4781 /* Prepare nodename */
4782 if ((icb->firmware_options[1] & BIT_6) == 0) {
4783 /*
4784 * Firmware will apply the following mask if the nodename was
4785 * not provided.
4786 */
4787 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
4788 icb->node_name[0] &= 0xF0;
4789 }
4790
4791 /*
4792 * Set host adapter parameters.
4793 */
4794
4795 /*
4796 * BIT_7 in the host-parameters section allows for modification to
4797 * internal driver logging.
4798 */
4799 if (nv->host_p[0] & BIT_7)
4800 ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
4801 ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0);
4802 /* Always load RISC code on non ISP2[12]00 chips. */
4803 if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
4804 ha->flags.disable_risc_code_load = 0;
4805 ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0);
4806 ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0);
4807 ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0);
4808 ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0;
4809 ha->flags.disable_serdes = 0;
4810
4811 ha->operating_mode =
4812 (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4;
4813
4814 memcpy(ha->fw_seriallink_options, nv->seriallink_options,
4815 sizeof(ha->fw_seriallink_options));
4816
4817 /* save HBA serial number */
4818 ha->serial0 = icb->port_name[5];
4819 ha->serial1 = icb->port_name[6];
4820 ha->serial2 = icb->port_name[7];
4821 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
4822 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
4823
4824 icb->execution_throttle = cpu_to_le16(0xFFFF);
4825
4826 ha->retry_count = nv->retry_count;
4827
4828 /* Set minimum login_timeout to 4 seconds. */
4829 if (nv->login_timeout != ql2xlogintimeout)
4830 nv->login_timeout = ql2xlogintimeout;
4831 if (nv->login_timeout < 4)
4832 nv->login_timeout = 4;
4833 ha->login_timeout = nv->login_timeout;
4834
4835 /* Set minimum RATOV to 100 tenths of a second. */
4836 ha->r_a_tov = 100;
4837
4838 ha->loop_reset_delay = nv->reset_delay;
4839
4840 /* Link Down Timeout = 0:
4841 *
4842 * When Port Down timer expires we will start returning
4843 * I/O's to OS with "DID_NO_CONNECT".
4844 *
4845 * Link Down Timeout != 0:
4846 *
4847 * The driver waits for the link to come up after link down
4848 * before returning I/Os to OS with "DID_NO_CONNECT".
4849 */
4850 if (nv->link_down_timeout == 0) {
4851 ha->loop_down_abort_time =
4852 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
4853 } else {
4854 ha->link_down_timeout = nv->link_down_timeout;
4855 ha->loop_down_abort_time =
4856 (LOOP_DOWN_TIME - ha->link_down_timeout);
4857 }
4858
4859 /*
4860 * Need enough time to try and get the port back.
4861 */
4862 ha->port_down_retry_count = nv->port_down_retry_count;
4863 if (qlport_down_retry)
4864 ha->port_down_retry_count = qlport_down_retry;
4865 /* Set login_retry_count */
4866 ha->login_retry_count = nv->retry_count;
4867 if (ha->port_down_retry_count == nv->port_down_retry_count &&
4868 ha->port_down_retry_count > 3)
4869 ha->login_retry_count = ha->port_down_retry_count;
4870 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
4871 ha->login_retry_count = ha->port_down_retry_count;
4872 if (ql2xloginretrycount)
4873 ha->login_retry_count = ql2xloginretrycount;
4874
4875 icb->lun_enables = cpu_to_le16(0);
4876 icb->command_resource_count = 0;
4877 icb->immediate_notify_resource_count = 0;
4878 icb->timeout = cpu_to_le16(0);
4879
4880 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
4881 /* Enable RIO */
4882 icb->firmware_options[0] &= ~BIT_3;
4883 icb->add_firmware_options[0] &=
4884 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
4885 icb->add_firmware_options[0] |= BIT_2;
4886 icb->response_accumulation_timer = 3;
4887 icb->interrupt_delay_timer = 5;
4888
4889 vha->flags.process_response_queue = 1;
4890 } else {
4891 /* Enable ZIO. */
4892 if (!vha->flags.init_done) {
4893 ha->zio_mode = icb->add_firmware_options[0] &
4894 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
4895 ha->zio_timer = icb->interrupt_delay_timer ?
4896 icb->interrupt_delay_timer : 2;
4897 }
4898 icb->add_firmware_options[0] &=
4899 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
4900 vha->flags.process_response_queue = 0;
4901 if (ha->zio_mode != QLA_ZIO_DISABLED) {
4902 ha->zio_mode = QLA_ZIO_MODE_6;
4903
4904 ql_log(ql_log_info, vha, 0x0068,
4905 "ZIO mode %d enabled; timer delay (%d us).\n",
4906 ha->zio_mode, ha->zio_timer * 100);
4907
4908 icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode;
4909 icb->interrupt_delay_timer = (uint8_t)ha->zio_timer;
4910 vha->flags.process_response_queue = 1;
4911 }
4912 }
4913
4914 if (rval) {
4915 ql_log(ql_log_warn, vha, 0x0069,
4916 "NVRAM configuration failed.\n");
4917 }
4918 return (rval);
4919 }
4920
4921 static void
qla2x00_rport_del(void * data)4922 qla2x00_rport_del(void *data)
4923 {
4924 fc_port_t *fcport = data;
4925 struct fc_rport *rport;
4926 unsigned long flags;
4927
4928 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
4929 rport = fcport->drport ? fcport->drport : fcport->rport;
4930 fcport->drport = NULL;
4931 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
4932 if (rport) {
4933 ql_dbg(ql_dbg_disc, fcport->vha, 0x210b,
4934 "%s %8phN. rport %p roles %x\n",
4935 __func__, fcport->port_name, rport,
4936 rport->roles);
4937
4938 fc_remote_port_delete(rport);
4939 }
4940 }
4941
qla2x00_set_fcport_state(fc_port_t * fcport,int state)4942 void qla2x00_set_fcport_state(fc_port_t *fcport, int state)
4943 {
4944 int old_state;
4945
4946 old_state = atomic_read(&fcport->state);
4947 atomic_set(&fcport->state, state);
4948
4949 /* Don't print state transitions during initial allocation of fcport */
4950 if (old_state && old_state != state) {
4951 ql_dbg(ql_dbg_disc, fcport->vha, 0x207d,
4952 "FCPort %8phC state transitioned from %s to %s - portid=%02x%02x%02x.\n",
4953 fcport->port_name, port_state_str[old_state],
4954 port_state_str[state], fcport->d_id.b.domain,
4955 fcport->d_id.b.area, fcport->d_id.b.al_pa);
4956 }
4957 }
4958
4959 /**
4960 * qla2x00_alloc_fcport() - Allocate a generic fcport.
4961 * @vha: HA context
4962 * @flags: allocation flags
4963 *
4964 * Returns a pointer to the allocated fcport, or NULL, if none available.
4965 */
4966 fc_port_t *
qla2x00_alloc_fcport(scsi_qla_host_t * vha,gfp_t flags)4967 qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
4968 {
4969 fc_port_t *fcport;
4970
4971 fcport = kzalloc(sizeof(fc_port_t), flags);
4972 if (!fcport)
4973 return NULL;
4974
4975 fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev,
4976 sizeof(struct ct_sns_pkt), &fcport->ct_desc.ct_sns_dma,
4977 flags);
4978 if (!fcport->ct_desc.ct_sns) {
4979 ql_log(ql_log_warn, vha, 0xd049,
4980 "Failed to allocate ct_sns request.\n");
4981 kfree(fcport);
4982 return NULL;
4983 }
4984
4985 /* Setup fcport template structure. */
4986 fcport->vha = vha;
4987 fcport->port_type = FCT_UNKNOWN;
4988 fcport->loop_id = FC_NO_LOOP_ID;
4989 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
4990 fcport->supported_classes = FC_COS_UNSPECIFIED;
4991 fcport->fp_speed = PORT_SPEED_UNKNOWN;
4992
4993 fcport->disc_state = DSC_DELETED;
4994 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
4995 fcport->deleted = QLA_SESS_DELETED;
4996 fcport->login_retry = vha->hw->login_retry_count;
4997 fcport->chip_reset = vha->hw->base_qpair->chip_reset;
4998 fcport->logout_on_delete = 1;
4999
5000 if (!fcport->ct_desc.ct_sns) {
5001 ql_log(ql_log_warn, vha, 0xd049,
5002 "Failed to allocate ct_sns request.\n");
5003 kfree(fcport);
5004 return NULL;
5005 }
5006
5007 INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
5008 INIT_WORK(&fcport->free_work, qlt_free_session_done);
5009 INIT_WORK(&fcport->reg_work, qla_register_fcport_fn);
5010 INIT_LIST_HEAD(&fcport->gnl_entry);
5011 INIT_LIST_HEAD(&fcport->list);
5012
5013 return fcport;
5014 }
5015
5016 void
qla2x00_free_fcport(fc_port_t * fcport)5017 qla2x00_free_fcport(fc_port_t *fcport)
5018 {
5019 if (fcport->ct_desc.ct_sns) {
5020 dma_free_coherent(&fcport->vha->hw->pdev->dev,
5021 sizeof(struct ct_sns_pkt), fcport->ct_desc.ct_sns,
5022 fcport->ct_desc.ct_sns_dma);
5023
5024 fcport->ct_desc.ct_sns = NULL;
5025 }
5026 list_del(&fcport->list);
5027 qla2x00_clear_loop_id(fcport);
5028 kfree(fcport);
5029 }
5030
qla_get_login_template(scsi_qla_host_t * vha)5031 static void qla_get_login_template(scsi_qla_host_t *vha)
5032 {
5033 struct qla_hw_data *ha = vha->hw;
5034 int rval;
5035 u32 *bp, sz;
5036 __be32 *q;
5037
5038 memset(ha->init_cb, 0, ha->init_cb_size);
5039 sz = min_t(int, sizeof(struct fc_els_flogi), ha->init_cb_size);
5040 rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma,
5041 ha->init_cb, sz);
5042 if (rval != QLA_SUCCESS) {
5043 ql_dbg(ql_dbg_init, vha, 0x00d1,
5044 "PLOGI ELS param read fail.\n");
5045 return;
5046 }
5047 q = (__be32 *)&ha->plogi_els_payld.fl_csp;
5048
5049 bp = (uint32_t *)ha->init_cb;
5050 cpu_to_be32_array(q, bp, sz / 4);
5051 ha->flags.plogi_template_valid = 1;
5052 }
5053
5054 /*
5055 * qla2x00_configure_loop
5056 * Updates Fibre Channel Device Database with what is actually on loop.
5057 *
5058 * Input:
5059 * ha = adapter block pointer.
5060 *
5061 * Returns:
5062 * 0 = success.
5063 * 1 = error.
5064 * 2 = database was full and device was not configured.
5065 */
5066 static int
qla2x00_configure_loop(scsi_qla_host_t * vha)5067 qla2x00_configure_loop(scsi_qla_host_t *vha)
5068 {
5069 int rval;
5070 unsigned long flags, save_flags;
5071 struct qla_hw_data *ha = vha->hw;
5072
5073 rval = QLA_SUCCESS;
5074
5075 /* Get Initiator ID */
5076 if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) {
5077 rval = qla2x00_configure_hba(vha);
5078 if (rval != QLA_SUCCESS) {
5079 ql_dbg(ql_dbg_disc, vha, 0x2013,
5080 "Unable to configure HBA.\n");
5081 return (rval);
5082 }
5083 }
5084
5085 save_flags = flags = vha->dpc_flags;
5086 ql_dbg(ql_dbg_disc, vha, 0x2014,
5087 "Configure loop -- dpc flags = 0x%lx.\n", flags);
5088
5089 /*
5090 * If we have both an RSCN and PORT UPDATE pending then handle them
5091 * both at the same time.
5092 */
5093 clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5094 clear_bit(RSCN_UPDATE, &vha->dpc_flags);
5095
5096 qla2x00_get_data_rate(vha);
5097 qla_get_login_template(vha);
5098
5099 /* Determine what we need to do */
5100 if ((ha->current_topology == ISP_CFG_FL ||
5101 ha->current_topology == ISP_CFG_F) &&
5102 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
5103
5104 set_bit(RSCN_UPDATE, &flags);
5105 clear_bit(LOCAL_LOOP_UPDATE, &flags);
5106
5107 } else if (ha->current_topology == ISP_CFG_NL ||
5108 ha->current_topology == ISP_CFG_N) {
5109 clear_bit(RSCN_UPDATE, &flags);
5110 set_bit(LOCAL_LOOP_UPDATE, &flags);
5111 } else if (!vha->flags.online ||
5112 (test_bit(ABORT_ISP_ACTIVE, &flags))) {
5113 set_bit(RSCN_UPDATE, &flags);
5114 set_bit(LOCAL_LOOP_UPDATE, &flags);
5115 }
5116
5117 if (test_bit(LOCAL_LOOP_UPDATE, &flags)) {
5118 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
5119 ql_dbg(ql_dbg_disc, vha, 0x2015,
5120 "Loop resync needed, failing.\n");
5121 rval = QLA_FUNCTION_FAILED;
5122 } else
5123 rval = qla2x00_configure_local_loop(vha);
5124 }
5125
5126 if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) {
5127 if (LOOP_TRANSITION(vha)) {
5128 ql_dbg(ql_dbg_disc, vha, 0x2099,
5129 "Needs RSCN update and loop transition.\n");
5130 rval = QLA_FUNCTION_FAILED;
5131 }
5132 else
5133 rval = qla2x00_configure_fabric(vha);
5134 }
5135
5136 if (rval == QLA_SUCCESS) {
5137 if (atomic_read(&vha->loop_down_timer) ||
5138 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
5139 rval = QLA_FUNCTION_FAILED;
5140 } else {
5141 atomic_set(&vha->loop_state, LOOP_READY);
5142 ql_dbg(ql_dbg_disc, vha, 0x2069,
5143 "LOOP READY.\n");
5144 ha->flags.fw_init_done = 1;
5145
5146 /*
5147 * Process any ATIO queue entries that came in
5148 * while we weren't online.
5149 */
5150 if (qla_tgt_mode_enabled(vha) ||
5151 qla_dual_mode_enabled(vha)) {
5152 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
5153 qlt_24xx_process_atio_queue(vha, 0);
5154 spin_unlock_irqrestore(&ha->tgt.atio_lock,
5155 flags);
5156 }
5157 }
5158 }
5159
5160 if (rval) {
5161 ql_dbg(ql_dbg_disc, vha, 0x206a,
5162 "%s *** FAILED ***.\n", __func__);
5163 } else {
5164 ql_dbg(ql_dbg_disc, vha, 0x206b,
5165 "%s: exiting normally.\n", __func__);
5166 }
5167
5168 /* Restore state if a resync event occurred during processing */
5169 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
5170 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
5171 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5172 if (test_bit(RSCN_UPDATE, &save_flags)) {
5173 set_bit(RSCN_UPDATE, &vha->dpc_flags);
5174 }
5175 }
5176
5177 return (rval);
5178 }
5179
qla2x00_configure_n2n_loop(scsi_qla_host_t * vha)5180 static int qla2x00_configure_n2n_loop(scsi_qla_host_t *vha)
5181 {
5182 unsigned long flags;
5183 fc_port_t *fcport;
5184
5185 if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags))
5186 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
5187
5188 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5189 if (fcport->n2n_flag) {
5190 qla24xx_fcport_handle_login(vha, fcport);
5191 return QLA_SUCCESS;
5192 }
5193 }
5194
5195 spin_lock_irqsave(&vha->work_lock, flags);
5196 vha->scan.scan_retry++;
5197 spin_unlock_irqrestore(&vha->work_lock, flags);
5198
5199 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
5200 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5201 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5202 }
5203 return QLA_FUNCTION_FAILED;
5204 }
5205
5206 /*
5207 * qla2x00_configure_local_loop
5208 * Updates Fibre Channel Device Database with local loop devices.
5209 *
5210 * Input:
5211 * ha = adapter block pointer.
5212 *
5213 * Returns:
5214 * 0 = success.
5215 */
5216 static int
qla2x00_configure_local_loop(scsi_qla_host_t * vha)5217 qla2x00_configure_local_loop(scsi_qla_host_t *vha)
5218 {
5219 int rval, rval2;
5220 int found_devs;
5221 int found;
5222 fc_port_t *fcport, *new_fcport;
5223 uint16_t index;
5224 uint16_t entries;
5225 struct gid_list_info *gid;
5226 uint16_t loop_id;
5227 uint8_t domain, area, al_pa;
5228 struct qla_hw_data *ha = vha->hw;
5229 unsigned long flags;
5230
5231 /* Inititae N2N login. */
5232 if (N2N_TOPO(ha))
5233 return qla2x00_configure_n2n_loop(vha);
5234
5235 found_devs = 0;
5236 new_fcport = NULL;
5237 entries = MAX_FIBRE_DEVICES_LOOP;
5238
5239 /* Get list of logged in devices. */
5240 memset(ha->gid_list, 0, qla2x00_gid_list_size(ha));
5241 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
5242 &entries);
5243 if (rval != QLA_SUCCESS)
5244 goto err;
5245
5246 ql_dbg(ql_dbg_disc, vha, 0x2011,
5247 "Entries in ID list (%d).\n", entries);
5248 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075,
5249 ha->gid_list, entries * sizeof(*ha->gid_list));
5250
5251 if (entries == 0) {
5252 spin_lock_irqsave(&vha->work_lock, flags);
5253 vha->scan.scan_retry++;
5254 spin_unlock_irqrestore(&vha->work_lock, flags);
5255
5256 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
5257 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5258 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5259 }
5260 } else {
5261 vha->scan.scan_retry = 0;
5262 }
5263
5264 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5265 fcport->scan_state = QLA_FCPORT_SCAN;
5266 }
5267
5268 /* Allocate temporary fcport for any new fcports discovered. */
5269 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
5270 if (new_fcport == NULL) {
5271 ql_log(ql_log_warn, vha, 0x2012,
5272 "Memory allocation failed for fcport.\n");
5273 rval = QLA_MEMORY_ALLOC_FAILED;
5274 goto err;
5275 }
5276 new_fcport->flags &= ~FCF_FABRIC_DEVICE;
5277
5278 /* Add devices to port list. */
5279 gid = ha->gid_list;
5280 for (index = 0; index < entries; index++) {
5281 domain = gid->domain;
5282 area = gid->area;
5283 al_pa = gid->al_pa;
5284 if (IS_QLA2100(ha) || IS_QLA2200(ha))
5285 loop_id = gid->loop_id_2100;
5286 else
5287 loop_id = le16_to_cpu(gid->loop_id);
5288 gid = (void *)gid + ha->gid_list_info_size;
5289
5290 /* Bypass reserved domain fields. */
5291 if ((domain & 0xf0) == 0xf0)
5292 continue;
5293
5294 /* Bypass if not same domain and area of adapter. */
5295 if (area && domain && ((area != vha->d_id.b.area) ||
5296 (domain != vha->d_id.b.domain)) &&
5297 (ha->current_topology == ISP_CFG_NL))
5298 continue;
5299
5300
5301 /* Bypass invalid local loop ID. */
5302 if (loop_id > LAST_LOCAL_LOOP_ID)
5303 continue;
5304
5305 memset(new_fcport->port_name, 0, WWN_SIZE);
5306
5307 /* Fill in member data. */
5308 new_fcport->d_id.b.domain = domain;
5309 new_fcport->d_id.b.area = area;
5310 new_fcport->d_id.b.al_pa = al_pa;
5311 new_fcport->loop_id = loop_id;
5312 new_fcport->scan_state = QLA_FCPORT_FOUND;
5313
5314 rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
5315 if (rval2 != QLA_SUCCESS) {
5316 ql_dbg(ql_dbg_disc, vha, 0x2097,
5317 "Failed to retrieve fcport information "
5318 "-- get_port_database=%x, loop_id=0x%04x.\n",
5319 rval2, new_fcport->loop_id);
5320 /* Skip retry if N2N */
5321 if (ha->current_topology != ISP_CFG_N) {
5322 ql_dbg(ql_dbg_disc, vha, 0x2105,
5323 "Scheduling resync.\n");
5324 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5325 continue;
5326 }
5327 }
5328
5329 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
5330 /* Check for matching device in port list. */
5331 found = 0;
5332 fcport = NULL;
5333 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5334 if (memcmp(new_fcport->port_name, fcport->port_name,
5335 WWN_SIZE))
5336 continue;
5337
5338 fcport->flags &= ~FCF_FABRIC_DEVICE;
5339 fcport->loop_id = new_fcport->loop_id;
5340 fcport->port_type = new_fcport->port_type;
5341 fcport->d_id.b24 = new_fcport->d_id.b24;
5342 memcpy(fcport->node_name, new_fcport->node_name,
5343 WWN_SIZE);
5344 fcport->scan_state = QLA_FCPORT_FOUND;
5345 found++;
5346 break;
5347 }
5348
5349 if (!found) {
5350 /* New device, add to fcports list. */
5351 list_add_tail(&new_fcport->list, &vha->vp_fcports);
5352
5353 /* Allocate a new replacement fcport. */
5354 fcport = new_fcport;
5355
5356 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
5357
5358 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
5359
5360 if (new_fcport == NULL) {
5361 ql_log(ql_log_warn, vha, 0xd031,
5362 "Failed to allocate memory for fcport.\n");
5363 rval = QLA_MEMORY_ALLOC_FAILED;
5364 goto err;
5365 }
5366 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
5367 new_fcport->flags &= ~FCF_FABRIC_DEVICE;
5368 }
5369
5370 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
5371
5372 /* Base iIDMA settings on HBA port speed. */
5373 fcport->fp_speed = ha->link_data_rate;
5374
5375 found_devs++;
5376 }
5377
5378 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5379 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5380 break;
5381
5382 if (fcport->scan_state == QLA_FCPORT_SCAN) {
5383 if ((qla_dual_mode_enabled(vha) ||
5384 qla_ini_mode_enabled(vha)) &&
5385 atomic_read(&fcport->state) == FCS_ONLINE) {
5386 qla2x00_mark_device_lost(vha, fcport,
5387 ql2xplogiabsentdevice);
5388 if (fcport->loop_id != FC_NO_LOOP_ID &&
5389 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
5390 fcport->port_type != FCT_INITIATOR &&
5391 fcport->port_type != FCT_BROADCAST) {
5392 ql_dbg(ql_dbg_disc, vha, 0x20f0,
5393 "%s %d %8phC post del sess\n",
5394 __func__, __LINE__,
5395 fcport->port_name);
5396
5397 qlt_schedule_sess_for_deletion(fcport);
5398 continue;
5399 }
5400 }
5401 }
5402
5403 if (fcport->scan_state == QLA_FCPORT_FOUND)
5404 qla24xx_fcport_handle_login(vha, fcport);
5405 }
5406
5407 qla2x00_free_fcport(new_fcport);
5408
5409 return rval;
5410
5411 err:
5412 ql_dbg(ql_dbg_disc, vha, 0x2098,
5413 "Configure local loop error exit: rval=%x.\n", rval);
5414 return rval;
5415 }
5416
5417 static void
qla2x00_iidma_fcport(scsi_qla_host_t * vha,fc_port_t * fcport)5418 qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
5419 {
5420 int rval;
5421 uint16_t mb[MAILBOX_REGISTER_COUNT];
5422 struct qla_hw_data *ha = vha->hw;
5423
5424 if (!IS_IIDMA_CAPABLE(ha))
5425 return;
5426
5427 if (atomic_read(&fcport->state) != FCS_ONLINE)
5428 return;
5429
5430 if (fcport->fp_speed == PORT_SPEED_UNKNOWN ||
5431 fcport->fp_speed > ha->link_data_rate ||
5432 !ha->flags.gpsc_supported)
5433 return;
5434
5435 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
5436 mb);
5437 if (rval != QLA_SUCCESS) {
5438 ql_dbg(ql_dbg_disc, vha, 0x2004,
5439 "Unable to adjust iIDMA %8phN -- %04x %x %04x %04x.\n",
5440 fcport->port_name, rval, fcport->fp_speed, mb[0], mb[1]);
5441 } else {
5442 ql_dbg(ql_dbg_disc, vha, 0x2005,
5443 "iIDMA adjusted to %s GB/s (%X) on %8phN.\n",
5444 qla2x00_get_link_speed_str(ha, fcport->fp_speed),
5445 fcport->fp_speed, fcport->port_name);
5446 }
5447 }
5448
qla_do_iidma_work(struct scsi_qla_host * vha,fc_port_t * fcport)5449 void qla_do_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport)
5450 {
5451 qla2x00_iidma_fcport(vha, fcport);
5452 qla24xx_update_fcport_fcp_prio(vha, fcport);
5453 }
5454
qla_post_iidma_work(struct scsi_qla_host * vha,fc_port_t * fcport)5455 int qla_post_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport)
5456 {
5457 struct qla_work_evt *e;
5458
5459 e = qla2x00_alloc_work(vha, QLA_EVT_IIDMA);
5460 if (!e)
5461 return QLA_FUNCTION_FAILED;
5462
5463 e->u.fcport.fcport = fcport;
5464 return qla2x00_post_work(vha, e);
5465 }
5466
5467 /* qla2x00_reg_remote_port is reserved for Initiator Mode only.*/
5468 static void
qla2x00_reg_remote_port(scsi_qla_host_t * vha,fc_port_t * fcport)5469 qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
5470 {
5471 struct fc_rport_identifiers rport_ids;
5472 struct fc_rport *rport;
5473 unsigned long flags;
5474
5475 if (atomic_read(&fcport->state) == FCS_ONLINE)
5476 return;
5477
5478 rport_ids.node_name = wwn_to_u64(fcport->node_name);
5479 rport_ids.port_name = wwn_to_u64(fcport->port_name);
5480 rport_ids.port_id = fcport->d_id.b.domain << 16 |
5481 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
5482 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
5483 fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids);
5484 if (!rport) {
5485 ql_log(ql_log_warn, vha, 0x2006,
5486 "Unable to allocate fc remote port.\n");
5487 return;
5488 }
5489
5490 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
5491 *((fc_port_t **)rport->dd_data) = fcport;
5492 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
5493
5494 rport->supported_classes = fcport->supported_classes;
5495
5496 rport_ids.roles = FC_PORT_ROLE_UNKNOWN;
5497 if (fcport->port_type == FCT_INITIATOR)
5498 rport_ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
5499 if (fcport->port_type == FCT_TARGET)
5500 rport_ids.roles |= FC_PORT_ROLE_FCP_TARGET;
5501 if (fcport->port_type & FCT_NVME_INITIATOR)
5502 rport_ids.roles |= FC_PORT_ROLE_NVME_INITIATOR;
5503 if (fcport->port_type & FCT_NVME_TARGET)
5504 rport_ids.roles |= FC_PORT_ROLE_NVME_TARGET;
5505 if (fcport->port_type & FCT_NVME_DISCOVERY)
5506 rport_ids.roles |= FC_PORT_ROLE_NVME_DISCOVERY;
5507
5508 ql_dbg(ql_dbg_disc, vha, 0x20ee,
5509 "%s %8phN. rport %p is %s mode\n",
5510 __func__, fcport->port_name, rport,
5511 (fcport->port_type == FCT_TARGET) ? "tgt" :
5512 ((fcport->port_type & FCT_NVME) ? "nvme" : "ini"));
5513
5514 fc_remote_port_rolechg(rport, rport_ids.roles);
5515 }
5516
5517 /*
5518 * qla2x00_update_fcport
5519 * Updates device on list.
5520 *
5521 * Input:
5522 * ha = adapter block pointer.
5523 * fcport = port structure pointer.
5524 *
5525 * Return:
5526 * 0 - Success
5527 * BIT_0 - error
5528 *
5529 * Context:
5530 * Kernel context.
5531 */
5532 void
qla2x00_update_fcport(scsi_qla_host_t * vha,fc_port_t * fcport)5533 qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
5534 {
5535 if (IS_SW_RESV_ADDR(fcport->d_id))
5536 return;
5537
5538 ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n",
5539 __func__, fcport->port_name);
5540
5541 qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT);
5542 fcport->login_retry = vha->hw->login_retry_count;
5543 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
5544 fcport->deleted = 0;
5545 if (vha->hw->current_topology == ISP_CFG_NL)
5546 fcport->logout_on_delete = 0;
5547 else
5548 fcport->logout_on_delete = 1;
5549 fcport->n2n_chip_reset = fcport->n2n_link_reset_cnt = 0;
5550
5551 switch (vha->hw->current_topology) {
5552 case ISP_CFG_N:
5553 case ISP_CFG_NL:
5554 fcport->keep_nport_handle = 1;
5555 break;
5556 default:
5557 break;
5558 }
5559
5560 qla2x00_iidma_fcport(vha, fcport);
5561
5562 qla2x00_dfs_create_rport(vha, fcport);
5563
5564 if (NVME_TARGET(vha->hw, fcport)) {
5565 qla_nvme_register_remote(vha, fcport);
5566 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE);
5567 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
5568 return;
5569 }
5570
5571 qla24xx_update_fcport_fcp_prio(vha, fcport);
5572
5573 switch (vha->host->active_mode) {
5574 case MODE_INITIATOR:
5575 qla2x00_reg_remote_port(vha, fcport);
5576 break;
5577 case MODE_TARGET:
5578 if (!vha->vha_tgt.qla_tgt->tgt_stop &&
5579 !vha->vha_tgt.qla_tgt->tgt_stopped)
5580 qlt_fc_port_added(vha, fcport);
5581 break;
5582 case MODE_DUAL:
5583 qla2x00_reg_remote_port(vha, fcport);
5584 if (!vha->vha_tgt.qla_tgt->tgt_stop &&
5585 !vha->vha_tgt.qla_tgt->tgt_stopped)
5586 qlt_fc_port_added(vha, fcport);
5587 break;
5588 default:
5589 break;
5590 }
5591
5592 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
5593
5594 if (IS_IIDMA_CAPABLE(vha->hw) && vha->hw->flags.gpsc_supported) {
5595 if (fcport->id_changed) {
5596 fcport->id_changed = 0;
5597 ql_dbg(ql_dbg_disc, vha, 0x20d7,
5598 "%s %d %8phC post gfpnid fcp_cnt %d\n",
5599 __func__, __LINE__, fcport->port_name,
5600 vha->fcport_count);
5601 qla24xx_post_gfpnid_work(vha, fcport);
5602 } else {
5603 ql_dbg(ql_dbg_disc, vha, 0x20d7,
5604 "%s %d %8phC post gpsc fcp_cnt %d\n",
5605 __func__, __LINE__, fcport->port_name,
5606 vha->fcport_count);
5607 qla24xx_post_gpsc_work(vha, fcport);
5608 }
5609 }
5610
5611 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE);
5612 }
5613
qla_register_fcport_fn(struct work_struct * work)5614 void qla_register_fcport_fn(struct work_struct *work)
5615 {
5616 fc_port_t *fcport = container_of(work, struct fc_port, reg_work);
5617 u32 rscn_gen = fcport->rscn_gen;
5618 u16 data[2];
5619
5620 if (IS_SW_RESV_ADDR(fcport->d_id))
5621 return;
5622
5623 qla2x00_update_fcport(fcport->vha, fcport);
5624
5625 if (rscn_gen != fcport->rscn_gen) {
5626 /* RSCN(s) came in while registration */
5627 switch (fcport->next_disc_state) {
5628 case DSC_DELETE_PEND:
5629 qlt_schedule_sess_for_deletion(fcport);
5630 break;
5631 case DSC_ADISC:
5632 data[0] = data[1] = 0;
5633 qla2x00_post_async_adisc_work(fcport->vha, fcport,
5634 data);
5635 break;
5636 default:
5637 break;
5638 }
5639 }
5640 }
5641
5642 /*
5643 * qla2x00_configure_fabric
5644 * Setup SNS devices with loop ID's.
5645 *
5646 * Input:
5647 * ha = adapter block pointer.
5648 *
5649 * Returns:
5650 * 0 = success.
5651 * BIT_0 = error
5652 */
5653 static int
qla2x00_configure_fabric(scsi_qla_host_t * vha)5654 qla2x00_configure_fabric(scsi_qla_host_t *vha)
5655 {
5656 int rval;
5657 fc_port_t *fcport;
5658 uint16_t mb[MAILBOX_REGISTER_COUNT];
5659 uint16_t loop_id;
5660 LIST_HEAD(new_fcports);
5661 struct qla_hw_data *ha = vha->hw;
5662 int discovery_gen;
5663
5664 /* If FL port exists, then SNS is present */
5665 if (IS_FWI2_CAPABLE(ha))
5666 loop_id = NPH_F_PORT;
5667 else
5668 loop_id = SNS_FL_PORT;
5669 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1);
5670 if (rval != QLA_SUCCESS) {
5671 ql_dbg(ql_dbg_disc, vha, 0x20a0,
5672 "MBX_GET_PORT_NAME failed, No FL Port.\n");
5673
5674 vha->device_flags &= ~SWITCH_FOUND;
5675 return (QLA_SUCCESS);
5676 }
5677 vha->device_flags |= SWITCH_FOUND;
5678
5679 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_port_name, 0);
5680 if (rval != QLA_SUCCESS)
5681 ql_dbg(ql_dbg_disc, vha, 0x20ff,
5682 "Failed to get Fabric Port Name\n");
5683
5684 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
5685 rval = qla2x00_send_change_request(vha, 0x3, 0);
5686 if (rval != QLA_SUCCESS)
5687 ql_log(ql_log_warn, vha, 0x121,
5688 "Failed to enable receiving of RSCN requests: 0x%x.\n",
5689 rval);
5690 }
5691
5692 do {
5693 qla2x00_mgmt_svr_login(vha);
5694
5695 /* Ensure we are logged into the SNS. */
5696 loop_id = NPH_SNS_LID(ha);
5697 rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
5698 0xfc, mb, BIT_1|BIT_0);
5699 if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
5700 ql_dbg(ql_dbg_disc, vha, 0x20a1,
5701 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x (%x).\n",
5702 loop_id, mb[0], mb[1], mb[2], mb[6], mb[7], rval);
5703 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5704 return rval;
5705 }
5706
5707 /* FDMI support. */
5708 if (ql2xfdmienable &&
5709 test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags))
5710 qla2x00_fdmi_register(vha);
5711
5712 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
5713 if (qla2x00_rft_id(vha)) {
5714 /* EMPTY */
5715 ql_dbg(ql_dbg_disc, vha, 0x20a2,
5716 "Register FC-4 TYPE failed.\n");
5717 if (test_bit(LOOP_RESYNC_NEEDED,
5718 &vha->dpc_flags))
5719 break;
5720 }
5721 if (qla2x00_rff_id(vha, FC4_TYPE_FCP_SCSI)) {
5722 /* EMPTY */
5723 ql_dbg(ql_dbg_disc, vha, 0x209a,
5724 "Register FC-4 Features failed.\n");
5725 if (test_bit(LOOP_RESYNC_NEEDED,
5726 &vha->dpc_flags))
5727 break;
5728 }
5729 if (vha->flags.nvme_enabled) {
5730 if (qla2x00_rff_id(vha, FC_TYPE_NVME)) {
5731 ql_dbg(ql_dbg_disc, vha, 0x2049,
5732 "Register NVME FC Type Features failed.\n");
5733 }
5734 }
5735 if (qla2x00_rnn_id(vha)) {
5736 /* EMPTY */
5737 ql_dbg(ql_dbg_disc, vha, 0x2104,
5738 "Register Node Name failed.\n");
5739 if (test_bit(LOOP_RESYNC_NEEDED,
5740 &vha->dpc_flags))
5741 break;
5742 } else if (qla2x00_rsnn_nn(vha)) {
5743 /* EMPTY */
5744 ql_dbg(ql_dbg_disc, vha, 0x209b,
5745 "Register Symbolic Node Name failed.\n");
5746 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5747 break;
5748 }
5749 }
5750
5751
5752 /* Mark the time right before querying FW for connected ports.
5753 * This process is long, asynchronous and by the time it's done,
5754 * collected information might not be accurate anymore. E.g.
5755 * disconnected port might have re-connected and a brand new
5756 * session has been created. In this case session's generation
5757 * will be newer than discovery_gen. */
5758 qlt_do_generation_tick(vha, &discovery_gen);
5759
5760 if (USE_ASYNC_SCAN(ha)) {
5761 rval = qla24xx_async_gpnft(vha, FC4_TYPE_FCP_SCSI,
5762 NULL);
5763 if (rval)
5764 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5765 } else {
5766 list_for_each_entry(fcport, &vha->vp_fcports, list)
5767 fcport->scan_state = QLA_FCPORT_SCAN;
5768
5769 rval = qla2x00_find_all_fabric_devs(vha);
5770 }
5771 if (rval != QLA_SUCCESS)
5772 break;
5773 } while (0);
5774
5775 if (!vha->nvme_local_port && vha->flags.nvme_enabled)
5776 qla_nvme_register_hba(vha);
5777
5778 if (rval)
5779 ql_dbg(ql_dbg_disc, vha, 0x2068,
5780 "Configure fabric error exit rval=%d.\n", rval);
5781
5782 return (rval);
5783 }
5784
5785 /*
5786 * qla2x00_find_all_fabric_devs
5787 *
5788 * Input:
5789 * ha = adapter block pointer.
5790 * dev = database device entry pointer.
5791 *
5792 * Returns:
5793 * 0 = success.
5794 *
5795 * Context:
5796 * Kernel context.
5797 */
5798 static int
qla2x00_find_all_fabric_devs(scsi_qla_host_t * vha)5799 qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
5800 {
5801 int rval;
5802 uint16_t loop_id;
5803 fc_port_t *fcport, *new_fcport;
5804 int found;
5805
5806 sw_info_t *swl;
5807 int swl_idx;
5808 int first_dev, last_dev;
5809 port_id_t wrap = {}, nxt_d_id;
5810 struct qla_hw_data *ha = vha->hw;
5811 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
5812 unsigned long flags;
5813
5814 rval = QLA_SUCCESS;
5815
5816 /* Try GID_PT to get device list, else GAN. */
5817 if (!ha->swl)
5818 ha->swl = kcalloc(ha->max_fibre_devices, sizeof(sw_info_t),
5819 GFP_KERNEL);
5820 swl = ha->swl;
5821 if (!swl) {
5822 /*EMPTY*/
5823 ql_dbg(ql_dbg_disc, vha, 0x209c,
5824 "GID_PT allocations failed, fallback on GA_NXT.\n");
5825 } else {
5826 memset(swl, 0, ha->max_fibre_devices * sizeof(sw_info_t));
5827 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
5828 swl = NULL;
5829 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5830 return rval;
5831 } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) {
5832 swl = NULL;
5833 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5834 return rval;
5835 } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) {
5836 swl = NULL;
5837 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5838 return rval;
5839 } else if (qla2x00_gfpn_id(vha, swl) != QLA_SUCCESS) {
5840 swl = NULL;
5841 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5842 return rval;
5843 }
5844
5845 /* If other queries succeeded probe for FC-4 type */
5846 if (swl) {
5847 qla2x00_gff_id(vha, swl);
5848 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5849 return rval;
5850 }
5851 }
5852 swl_idx = 0;
5853
5854 /* Allocate temporary fcport for any new fcports discovered. */
5855 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
5856 if (new_fcport == NULL) {
5857 ql_log(ql_log_warn, vha, 0x209d,
5858 "Failed to allocate memory for fcport.\n");
5859 return (QLA_MEMORY_ALLOC_FAILED);
5860 }
5861 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
5862 /* Set start port ID scan at adapter ID. */
5863 first_dev = 1;
5864 last_dev = 0;
5865
5866 /* Starting free loop ID. */
5867 loop_id = ha->min_external_loopid;
5868 for (; loop_id <= ha->max_loop_id; loop_id++) {
5869 if (qla2x00_is_reserved_id(vha, loop_id))
5870 continue;
5871
5872 if (ha->current_topology == ISP_CFG_FL &&
5873 (atomic_read(&vha->loop_down_timer) ||
5874 LOOP_TRANSITION(vha))) {
5875 atomic_set(&vha->loop_down_timer, 0);
5876 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5877 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5878 break;
5879 }
5880
5881 if (swl != NULL) {
5882 if (last_dev) {
5883 wrap.b24 = new_fcport->d_id.b24;
5884 } else {
5885 new_fcport->d_id.b24 = swl[swl_idx].d_id.b24;
5886 memcpy(new_fcport->node_name,
5887 swl[swl_idx].node_name, WWN_SIZE);
5888 memcpy(new_fcport->port_name,
5889 swl[swl_idx].port_name, WWN_SIZE);
5890 memcpy(new_fcport->fabric_port_name,
5891 swl[swl_idx].fabric_port_name, WWN_SIZE);
5892 new_fcport->fp_speed = swl[swl_idx].fp_speed;
5893 new_fcport->fc4_type = swl[swl_idx].fc4_type;
5894
5895 new_fcport->nvme_flag = 0;
5896 if (vha->flags.nvme_enabled &&
5897 swl[swl_idx].fc4_type & FS_FC4TYPE_NVME) {
5898 ql_log(ql_log_info, vha, 0x2131,
5899 "FOUND: NVME port %8phC as FC Type 28h\n",
5900 new_fcport->port_name);
5901 }
5902
5903 if (swl[swl_idx].d_id.b.rsvd_1 != 0) {
5904 last_dev = 1;
5905 }
5906 swl_idx++;
5907 }
5908 } else {
5909 /* Send GA_NXT to the switch */
5910 rval = qla2x00_ga_nxt(vha, new_fcport);
5911 if (rval != QLA_SUCCESS) {
5912 ql_log(ql_log_warn, vha, 0x209e,
5913 "SNS scan failed -- assuming "
5914 "zero-entry result.\n");
5915 rval = QLA_SUCCESS;
5916 break;
5917 }
5918 }
5919
5920 /* If wrap on switch device list, exit. */
5921 if (first_dev) {
5922 wrap.b24 = new_fcport->d_id.b24;
5923 first_dev = 0;
5924 } else if (new_fcport->d_id.b24 == wrap.b24) {
5925 ql_dbg(ql_dbg_disc, vha, 0x209f,
5926 "Device wrap (%02x%02x%02x).\n",
5927 new_fcport->d_id.b.domain,
5928 new_fcport->d_id.b.area,
5929 new_fcport->d_id.b.al_pa);
5930 break;
5931 }
5932
5933 /* Bypass if same physical adapter. */
5934 if (new_fcport->d_id.b24 == base_vha->d_id.b24)
5935 continue;
5936
5937 /* Bypass virtual ports of the same host. */
5938 if (qla2x00_is_a_vp_did(vha, new_fcport->d_id.b24))
5939 continue;
5940
5941 /* Bypass if same domain and area of adapter. */
5942 if (((new_fcport->d_id.b24 & 0xffff00) ==
5943 (vha->d_id.b24 & 0xffff00)) && ha->current_topology ==
5944 ISP_CFG_FL)
5945 continue;
5946
5947 /* Bypass reserved domain fields. */
5948 if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0)
5949 continue;
5950
5951 /* Bypass ports whose FCP-4 type is not FCP_SCSI */
5952 if (ql2xgffidenable &&
5953 (!(new_fcport->fc4_type & FS_FC4TYPE_FCP) &&
5954 new_fcport->fc4_type != 0))
5955 continue;
5956
5957 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
5958
5959 /* Locate matching device in database. */
5960 found = 0;
5961 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5962 if (memcmp(new_fcport->port_name, fcport->port_name,
5963 WWN_SIZE))
5964 continue;
5965
5966 fcport->scan_state = QLA_FCPORT_FOUND;
5967
5968 found++;
5969
5970 /* Update port state. */
5971 memcpy(fcport->fabric_port_name,
5972 new_fcport->fabric_port_name, WWN_SIZE);
5973 fcport->fp_speed = new_fcport->fp_speed;
5974
5975 /*
5976 * If address the same and state FCS_ONLINE
5977 * (or in target mode), nothing changed.
5978 */
5979 if (fcport->d_id.b24 == new_fcport->d_id.b24 &&
5980 (atomic_read(&fcport->state) == FCS_ONLINE ||
5981 (vha->host->active_mode == MODE_TARGET))) {
5982 break;
5983 }
5984
5985 /*
5986 * If device was not a fabric device before.
5987 */
5988 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
5989 fcport->d_id.b24 = new_fcport->d_id.b24;
5990 qla2x00_clear_loop_id(fcport);
5991 fcport->flags |= (FCF_FABRIC_DEVICE |
5992 FCF_LOGIN_NEEDED);
5993 break;
5994 }
5995
5996 /*
5997 * Port ID changed or device was marked to be updated;
5998 * Log it out if still logged in and mark it for
5999 * relogin later.
6000 */
6001 if (qla_tgt_mode_enabled(base_vha)) {
6002 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080,
6003 "port changed FC ID, %8phC"
6004 " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n",
6005 fcport->port_name,
6006 fcport->d_id.b.domain,
6007 fcport->d_id.b.area,
6008 fcport->d_id.b.al_pa,
6009 fcport->loop_id,
6010 new_fcport->d_id.b.domain,
6011 new_fcport->d_id.b.area,
6012 new_fcport->d_id.b.al_pa);
6013 fcport->d_id.b24 = new_fcport->d_id.b24;
6014 break;
6015 }
6016
6017 fcport->d_id.b24 = new_fcport->d_id.b24;
6018 fcport->flags |= FCF_LOGIN_NEEDED;
6019 break;
6020 }
6021
6022 if (found && NVME_TARGET(vha->hw, fcport)) {
6023 if (fcport->disc_state == DSC_DELETE_PEND) {
6024 qla2x00_set_fcport_disc_state(fcport, DSC_GNL);
6025 vha->fcport_count--;
6026 fcport->login_succ = 0;
6027 }
6028 }
6029
6030 if (found) {
6031 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
6032 continue;
6033 }
6034 /* If device was not in our fcports list, then add it. */
6035 new_fcport->scan_state = QLA_FCPORT_FOUND;
6036 list_add_tail(&new_fcport->list, &vha->vp_fcports);
6037
6038 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
6039
6040
6041 /* Allocate a new replacement fcport. */
6042 nxt_d_id.b24 = new_fcport->d_id.b24;
6043 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
6044 if (new_fcport == NULL) {
6045 ql_log(ql_log_warn, vha, 0xd032,
6046 "Memory allocation failed for fcport.\n");
6047 return (QLA_MEMORY_ALLOC_FAILED);
6048 }
6049 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
6050 new_fcport->d_id.b24 = nxt_d_id.b24;
6051 }
6052
6053 qla2x00_free_fcport(new_fcport);
6054
6055 /*
6056 * Logout all previous fabric dev marked lost, except FCP2 devices.
6057 */
6058 list_for_each_entry(fcport, &vha->vp_fcports, list) {
6059 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
6060 break;
6061
6062 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
6063 continue;
6064
6065 if (fcport->scan_state == QLA_FCPORT_SCAN) {
6066 if ((qla_dual_mode_enabled(vha) ||
6067 qla_ini_mode_enabled(vha)) &&
6068 atomic_read(&fcport->state) == FCS_ONLINE) {
6069 qla2x00_mark_device_lost(vha, fcport,
6070 ql2xplogiabsentdevice);
6071 if (fcport->loop_id != FC_NO_LOOP_ID &&
6072 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
6073 fcport->port_type != FCT_INITIATOR &&
6074 fcport->port_type != FCT_BROADCAST) {
6075 ql_dbg(ql_dbg_disc, vha, 0x20f0,
6076 "%s %d %8phC post del sess\n",
6077 __func__, __LINE__,
6078 fcport->port_name);
6079 qlt_schedule_sess_for_deletion(fcport);
6080 continue;
6081 }
6082 }
6083 }
6084
6085 if (fcport->scan_state == QLA_FCPORT_FOUND &&
6086 (fcport->flags & FCF_LOGIN_NEEDED) != 0)
6087 qla24xx_fcport_handle_login(vha, fcport);
6088 }
6089 return (rval);
6090 }
6091
6092 /* FW does not set aside Loop id for MGMT Server/FFFFFAh */
6093 int
qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t * vha)6094 qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *vha)
6095 {
6096 int loop_id = FC_NO_LOOP_ID;
6097 int lid = NPH_MGMT_SERVER - vha->vp_idx;
6098 unsigned long flags;
6099 struct qla_hw_data *ha = vha->hw;
6100
6101 if (vha->vp_idx == 0) {
6102 set_bit(NPH_MGMT_SERVER, ha->loop_id_map);
6103 return NPH_MGMT_SERVER;
6104 }
6105
6106 /* pick id from high and work down to low */
6107 spin_lock_irqsave(&ha->vport_slock, flags);
6108 for (; lid > 0; lid--) {
6109 if (!test_bit(lid, vha->hw->loop_id_map)) {
6110 set_bit(lid, vha->hw->loop_id_map);
6111 loop_id = lid;
6112 break;
6113 }
6114 }
6115 spin_unlock_irqrestore(&ha->vport_slock, flags);
6116
6117 return loop_id;
6118 }
6119
6120 /*
6121 * qla2x00_fabric_login
6122 * Issue fabric login command.
6123 *
6124 * Input:
6125 * ha = adapter block pointer.
6126 * device = pointer to FC device type structure.
6127 *
6128 * Returns:
6129 * 0 - Login successfully
6130 * 1 - Login failed
6131 * 2 - Initiator device
6132 * 3 - Fatal error
6133 */
6134 int
qla2x00_fabric_login(scsi_qla_host_t * vha,fc_port_t * fcport,uint16_t * next_loopid)6135 qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
6136 uint16_t *next_loopid)
6137 {
6138 int rval;
6139 int retry;
6140 uint16_t tmp_loopid;
6141 uint16_t mb[MAILBOX_REGISTER_COUNT];
6142 struct qla_hw_data *ha = vha->hw;
6143
6144 retry = 0;
6145 tmp_loopid = 0;
6146
6147 for (;;) {
6148 ql_dbg(ql_dbg_disc, vha, 0x2000,
6149 "Trying Fabric Login w/loop id 0x%04x for port "
6150 "%02x%02x%02x.\n",
6151 fcport->loop_id, fcport->d_id.b.domain,
6152 fcport->d_id.b.area, fcport->d_id.b.al_pa);
6153
6154 /* Login fcport on switch. */
6155 rval = ha->isp_ops->fabric_login(vha, fcport->loop_id,
6156 fcport->d_id.b.domain, fcport->d_id.b.area,
6157 fcport->d_id.b.al_pa, mb, BIT_0);
6158 if (rval != QLA_SUCCESS) {
6159 return rval;
6160 }
6161 if (mb[0] == MBS_PORT_ID_USED) {
6162 /*
6163 * Device has another loop ID. The firmware team
6164 * recommends the driver perform an implicit login with
6165 * the specified ID again. The ID we just used is save
6166 * here so we return with an ID that can be tried by
6167 * the next login.
6168 */
6169 retry++;
6170 tmp_loopid = fcport->loop_id;
6171 fcport->loop_id = mb[1];
6172
6173 ql_dbg(ql_dbg_disc, vha, 0x2001,
6174 "Fabric Login: port in use - next loop "
6175 "id=0x%04x, port id= %02x%02x%02x.\n",
6176 fcport->loop_id, fcport->d_id.b.domain,
6177 fcport->d_id.b.area, fcport->d_id.b.al_pa);
6178
6179 } else if (mb[0] == MBS_COMMAND_COMPLETE) {
6180 /*
6181 * Login succeeded.
6182 */
6183 if (retry) {
6184 /* A retry occurred before. */
6185 *next_loopid = tmp_loopid;
6186 } else {
6187 /*
6188 * No retry occurred before. Just increment the
6189 * ID value for next login.
6190 */
6191 *next_loopid = (fcport->loop_id + 1);
6192 }
6193
6194 if (mb[1] & BIT_0) {
6195 fcport->port_type = FCT_INITIATOR;
6196 } else {
6197 fcport->port_type = FCT_TARGET;
6198 if (mb[1] & BIT_1) {
6199 fcport->flags |= FCF_FCP2_DEVICE;
6200 }
6201 }
6202
6203 if (mb[10] & BIT_0)
6204 fcport->supported_classes |= FC_COS_CLASS2;
6205 if (mb[10] & BIT_1)
6206 fcport->supported_classes |= FC_COS_CLASS3;
6207
6208 if (IS_FWI2_CAPABLE(ha)) {
6209 if (mb[10] & BIT_7)
6210 fcport->flags |=
6211 FCF_CONF_COMP_SUPPORTED;
6212 }
6213
6214 rval = QLA_SUCCESS;
6215 break;
6216 } else if (mb[0] == MBS_LOOP_ID_USED) {
6217 /*
6218 * Loop ID already used, try next loop ID.
6219 */
6220 fcport->loop_id++;
6221 rval = qla2x00_find_new_loop_id(vha, fcport);
6222 if (rval != QLA_SUCCESS) {
6223 /* Ran out of loop IDs to use */
6224 break;
6225 }
6226 } else if (mb[0] == MBS_COMMAND_ERROR) {
6227 /*
6228 * Firmware possibly timed out during login. If NO
6229 * retries are left to do then the device is declared
6230 * dead.
6231 */
6232 *next_loopid = fcport->loop_id;
6233 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
6234 fcport->d_id.b.domain, fcport->d_id.b.area,
6235 fcport->d_id.b.al_pa);
6236 qla2x00_mark_device_lost(vha, fcport, 1);
6237
6238 rval = 1;
6239 break;
6240 } else {
6241 /*
6242 * unrecoverable / not handled error
6243 */
6244 ql_dbg(ql_dbg_disc, vha, 0x2002,
6245 "Failed=%x port_id=%02x%02x%02x loop_id=%x "
6246 "jiffies=%lx.\n", mb[0], fcport->d_id.b.domain,
6247 fcport->d_id.b.area, fcport->d_id.b.al_pa,
6248 fcport->loop_id, jiffies);
6249
6250 *next_loopid = fcport->loop_id;
6251 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
6252 fcport->d_id.b.domain, fcport->d_id.b.area,
6253 fcport->d_id.b.al_pa);
6254 qla2x00_clear_loop_id(fcport);
6255 fcport->login_retry = 0;
6256
6257 rval = 3;
6258 break;
6259 }
6260 }
6261
6262 return (rval);
6263 }
6264
6265 /*
6266 * qla2x00_local_device_login
6267 * Issue local device login command.
6268 *
6269 * Input:
6270 * ha = adapter block pointer.
6271 * loop_id = loop id of device to login to.
6272 *
6273 * Returns (Where's the #define!!!!):
6274 * 0 - Login successfully
6275 * 1 - Login failed
6276 * 3 - Fatal error
6277 */
6278 int
qla2x00_local_device_login(scsi_qla_host_t * vha,fc_port_t * fcport)6279 qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport)
6280 {
6281 int rval;
6282 uint16_t mb[MAILBOX_REGISTER_COUNT];
6283
6284 memset(mb, 0, sizeof(mb));
6285 rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0);
6286 if (rval == QLA_SUCCESS) {
6287 /* Interrogate mailbox registers for any errors */
6288 if (mb[0] == MBS_COMMAND_ERROR)
6289 rval = 1;
6290 else if (mb[0] == MBS_COMMAND_PARAMETER_ERROR)
6291 /* device not in PCB table */
6292 rval = 3;
6293 }
6294
6295 return (rval);
6296 }
6297
6298 /*
6299 * qla2x00_loop_resync
6300 * Resync with fibre channel devices.
6301 *
6302 * Input:
6303 * ha = adapter block pointer.
6304 *
6305 * Returns:
6306 * 0 = success
6307 */
6308 int
qla2x00_loop_resync(scsi_qla_host_t * vha)6309 qla2x00_loop_resync(scsi_qla_host_t *vha)
6310 {
6311 int rval = QLA_SUCCESS;
6312 uint32_t wait_time;
6313
6314 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
6315 if (vha->flags.online) {
6316 if (!(rval = qla2x00_fw_ready(vha))) {
6317 /* Wait at most MAX_TARGET RSCNs for a stable link. */
6318 wait_time = 256;
6319 do {
6320 if (!IS_QLAFX00(vha->hw)) {
6321 /*
6322 * Issue a marker after FW becomes
6323 * ready.
6324 */
6325 qla2x00_marker(vha, vha->hw->base_qpair,
6326 0, 0, MK_SYNC_ALL);
6327 vha->marker_needed = 0;
6328 }
6329
6330 /* Remap devices on Loop. */
6331 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
6332
6333 if (IS_QLAFX00(vha->hw))
6334 qlafx00_configure_devices(vha);
6335 else
6336 qla2x00_configure_loop(vha);
6337
6338 wait_time--;
6339 } while (!atomic_read(&vha->loop_down_timer) &&
6340 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
6341 && wait_time && (test_bit(LOOP_RESYNC_NEEDED,
6342 &vha->dpc_flags)));
6343 }
6344 }
6345
6346 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
6347 return (QLA_FUNCTION_FAILED);
6348
6349 if (rval)
6350 ql_dbg(ql_dbg_disc, vha, 0x206c,
6351 "%s *** FAILED ***.\n", __func__);
6352
6353 return (rval);
6354 }
6355
6356 /*
6357 * qla2x00_perform_loop_resync
6358 * Description: This function will set the appropriate flags and call
6359 * qla2x00_loop_resync. If successful loop will be resynced
6360 * Arguments : scsi_qla_host_t pointer
6361 * returm : Success or Failure
6362 */
6363
qla2x00_perform_loop_resync(scsi_qla_host_t * ha)6364 int qla2x00_perform_loop_resync(scsi_qla_host_t *ha)
6365 {
6366 int32_t rval = 0;
6367
6368 if (!test_and_set_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags)) {
6369 /*Configure the flags so that resync happens properly*/
6370 atomic_set(&ha->loop_down_timer, 0);
6371 if (!(ha->device_flags & DFLG_NO_CABLE)) {
6372 atomic_set(&ha->loop_state, LOOP_UP);
6373 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
6374 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
6375 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
6376
6377 rval = qla2x00_loop_resync(ha);
6378 } else
6379 atomic_set(&ha->loop_state, LOOP_DEAD);
6380
6381 clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags);
6382 }
6383
6384 return rval;
6385 }
6386
6387 void
qla2x00_update_fcports(scsi_qla_host_t * base_vha)6388 qla2x00_update_fcports(scsi_qla_host_t *base_vha)
6389 {
6390 fc_port_t *fcport;
6391 struct scsi_qla_host *vha;
6392 struct qla_hw_data *ha = base_vha->hw;
6393 unsigned long flags;
6394
6395 spin_lock_irqsave(&ha->vport_slock, flags);
6396 /* Go with deferred removal of rport references. */
6397 list_for_each_entry(vha, &base_vha->hw->vp_list, list) {
6398 atomic_inc(&vha->vref_count);
6399 list_for_each_entry(fcport, &vha->vp_fcports, list) {
6400 if (fcport->drport &&
6401 atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
6402 spin_unlock_irqrestore(&ha->vport_slock, flags);
6403 qla2x00_rport_del(fcport);
6404
6405 spin_lock_irqsave(&ha->vport_slock, flags);
6406 }
6407 }
6408 atomic_dec(&vha->vref_count);
6409 wake_up(&vha->vref_waitq);
6410 }
6411 spin_unlock_irqrestore(&ha->vport_slock, flags);
6412 }
6413
6414 /* Assumes idc_lock always held on entry */
6415 void
qla83xx_reset_ownership(scsi_qla_host_t * vha)6416 qla83xx_reset_ownership(scsi_qla_host_t *vha)
6417 {
6418 struct qla_hw_data *ha = vha->hw;
6419 uint32_t drv_presence, drv_presence_mask;
6420 uint32_t dev_part_info1, dev_part_info2, class_type;
6421 uint32_t class_type_mask = 0x3;
6422 uint16_t fcoe_other_function = 0xffff, i;
6423
6424 if (IS_QLA8044(ha)) {
6425 drv_presence = qla8044_rd_direct(vha,
6426 QLA8044_CRB_DRV_ACTIVE_INDEX);
6427 dev_part_info1 = qla8044_rd_direct(vha,
6428 QLA8044_CRB_DEV_PART_INFO_INDEX);
6429 dev_part_info2 = qla8044_rd_direct(vha,
6430 QLA8044_CRB_DEV_PART_INFO2);
6431 } else {
6432 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
6433 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1);
6434 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2);
6435 }
6436 for (i = 0; i < 8; i++) {
6437 class_type = ((dev_part_info1 >> (i * 4)) & class_type_mask);
6438 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
6439 (i != ha->portnum)) {
6440 fcoe_other_function = i;
6441 break;
6442 }
6443 }
6444 if (fcoe_other_function == 0xffff) {
6445 for (i = 0; i < 8; i++) {
6446 class_type = ((dev_part_info2 >> (i * 4)) &
6447 class_type_mask);
6448 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
6449 ((i + 8) != ha->portnum)) {
6450 fcoe_other_function = i + 8;
6451 break;
6452 }
6453 }
6454 }
6455 /*
6456 * Prepare drv-presence mask based on fcoe functions present.
6457 * However consider only valid physical fcoe function numbers (0-15).
6458 */
6459 drv_presence_mask = ~((1 << (ha->portnum)) |
6460 ((fcoe_other_function == 0xffff) ?
6461 0 : (1 << (fcoe_other_function))));
6462
6463 /* We are the reset owner iff:
6464 * - No other protocol drivers present.
6465 * - This is the lowest among fcoe functions. */
6466 if (!(drv_presence & drv_presence_mask) &&
6467 (ha->portnum < fcoe_other_function)) {
6468 ql_dbg(ql_dbg_p3p, vha, 0xb07f,
6469 "This host is Reset owner.\n");
6470 ha->flags.nic_core_reset_owner = 1;
6471 }
6472 }
6473
6474 static int
__qla83xx_set_drv_ack(scsi_qla_host_t * vha)6475 __qla83xx_set_drv_ack(scsi_qla_host_t *vha)
6476 {
6477 int rval = QLA_SUCCESS;
6478 struct qla_hw_data *ha = vha->hw;
6479 uint32_t drv_ack;
6480
6481 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
6482 if (rval == QLA_SUCCESS) {
6483 drv_ack |= (1 << ha->portnum);
6484 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
6485 }
6486
6487 return rval;
6488 }
6489
6490 static int
__qla83xx_clear_drv_ack(scsi_qla_host_t * vha)6491 __qla83xx_clear_drv_ack(scsi_qla_host_t *vha)
6492 {
6493 int rval = QLA_SUCCESS;
6494 struct qla_hw_data *ha = vha->hw;
6495 uint32_t drv_ack;
6496
6497 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
6498 if (rval == QLA_SUCCESS) {
6499 drv_ack &= ~(1 << ha->portnum);
6500 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
6501 }
6502
6503 return rval;
6504 }
6505
6506 static const char *
qla83xx_dev_state_to_string(uint32_t dev_state)6507 qla83xx_dev_state_to_string(uint32_t dev_state)
6508 {
6509 switch (dev_state) {
6510 case QLA8XXX_DEV_COLD:
6511 return "COLD/RE-INIT";
6512 case QLA8XXX_DEV_INITIALIZING:
6513 return "INITIALIZING";
6514 case QLA8XXX_DEV_READY:
6515 return "READY";
6516 case QLA8XXX_DEV_NEED_RESET:
6517 return "NEED RESET";
6518 case QLA8XXX_DEV_NEED_QUIESCENT:
6519 return "NEED QUIESCENT";
6520 case QLA8XXX_DEV_FAILED:
6521 return "FAILED";
6522 case QLA8XXX_DEV_QUIESCENT:
6523 return "QUIESCENT";
6524 default:
6525 return "Unknown";
6526 }
6527 }
6528
6529 /* Assumes idc-lock always held on entry */
6530 void
qla83xx_idc_audit(scsi_qla_host_t * vha,int audit_type)6531 qla83xx_idc_audit(scsi_qla_host_t *vha, int audit_type)
6532 {
6533 struct qla_hw_data *ha = vha->hw;
6534 uint32_t idc_audit_reg = 0, duration_secs = 0;
6535
6536 switch (audit_type) {
6537 case IDC_AUDIT_TIMESTAMP:
6538 ha->idc_audit_ts = (jiffies_to_msecs(jiffies) / 1000);
6539 idc_audit_reg = (ha->portnum) |
6540 (IDC_AUDIT_TIMESTAMP << 7) | (ha->idc_audit_ts << 8);
6541 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
6542 break;
6543
6544 case IDC_AUDIT_COMPLETION:
6545 duration_secs = ((jiffies_to_msecs(jiffies) -
6546 jiffies_to_msecs(ha->idc_audit_ts)) / 1000);
6547 idc_audit_reg = (ha->portnum) |
6548 (IDC_AUDIT_COMPLETION << 7) | (duration_secs << 8);
6549 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
6550 break;
6551
6552 default:
6553 ql_log(ql_log_warn, vha, 0xb078,
6554 "Invalid audit type specified.\n");
6555 break;
6556 }
6557 }
6558
6559 /* Assumes idc_lock always held on entry */
6560 static int
qla83xx_initiating_reset(scsi_qla_host_t * vha)6561 qla83xx_initiating_reset(scsi_qla_host_t *vha)
6562 {
6563 struct qla_hw_data *ha = vha->hw;
6564 uint32_t idc_control, dev_state;
6565
6566 __qla83xx_get_idc_control(vha, &idc_control);
6567 if ((idc_control & QLA83XX_IDC_RESET_DISABLED)) {
6568 ql_log(ql_log_info, vha, 0xb080,
6569 "NIC Core reset has been disabled. idc-control=0x%x\n",
6570 idc_control);
6571 return QLA_FUNCTION_FAILED;
6572 }
6573
6574 /* Set NEED-RESET iff in READY state and we are the reset-owner */
6575 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
6576 if (ha->flags.nic_core_reset_owner && dev_state == QLA8XXX_DEV_READY) {
6577 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
6578 QLA8XXX_DEV_NEED_RESET);
6579 ql_log(ql_log_info, vha, 0xb056, "HW State: NEED RESET.\n");
6580 qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
6581 } else {
6582 const char *state = qla83xx_dev_state_to_string(dev_state);
6583
6584 ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n", state);
6585
6586 /* SV: XXX: Is timeout required here? */
6587 /* Wait for IDC state change READY -> NEED_RESET */
6588 while (dev_state == QLA8XXX_DEV_READY) {
6589 qla83xx_idc_unlock(vha, 0);
6590 msleep(200);
6591 qla83xx_idc_lock(vha, 0);
6592 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
6593 }
6594 }
6595
6596 /* Send IDC ack by writing to drv-ack register */
6597 __qla83xx_set_drv_ack(vha);
6598
6599 return QLA_SUCCESS;
6600 }
6601
6602 int
__qla83xx_set_idc_control(scsi_qla_host_t * vha,uint32_t idc_control)6603 __qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control)
6604 {
6605 return qla83xx_wr_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
6606 }
6607
6608 int
__qla83xx_get_idc_control(scsi_qla_host_t * vha,uint32_t * idc_control)6609 __qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control)
6610 {
6611 return qla83xx_rd_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
6612 }
6613
6614 static int
qla83xx_check_driver_presence(scsi_qla_host_t * vha)6615 qla83xx_check_driver_presence(scsi_qla_host_t *vha)
6616 {
6617 uint32_t drv_presence = 0;
6618 struct qla_hw_data *ha = vha->hw;
6619
6620 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
6621 if (drv_presence & (1 << ha->portnum))
6622 return QLA_SUCCESS;
6623 else
6624 return QLA_TEST_FAILED;
6625 }
6626
6627 int
qla83xx_nic_core_reset(scsi_qla_host_t * vha)6628 qla83xx_nic_core_reset(scsi_qla_host_t *vha)
6629 {
6630 int rval = QLA_SUCCESS;
6631 struct qla_hw_data *ha = vha->hw;
6632
6633 ql_dbg(ql_dbg_p3p, vha, 0xb058,
6634 "Entered %s().\n", __func__);
6635
6636 if (vha->device_flags & DFLG_DEV_FAILED) {
6637 ql_log(ql_log_warn, vha, 0xb059,
6638 "Device in unrecoverable FAILED state.\n");
6639 return QLA_FUNCTION_FAILED;
6640 }
6641
6642 qla83xx_idc_lock(vha, 0);
6643
6644 if (qla83xx_check_driver_presence(vha) != QLA_SUCCESS) {
6645 ql_log(ql_log_warn, vha, 0xb05a,
6646 "Function=0x%x has been removed from IDC participation.\n",
6647 ha->portnum);
6648 rval = QLA_FUNCTION_FAILED;
6649 goto exit;
6650 }
6651
6652 qla83xx_reset_ownership(vha);
6653
6654 rval = qla83xx_initiating_reset(vha);
6655
6656 /*
6657 * Perform reset if we are the reset-owner,
6658 * else wait till IDC state changes to READY/FAILED.
6659 */
6660 if (rval == QLA_SUCCESS) {
6661 rval = qla83xx_idc_state_handler(vha);
6662
6663 if (rval == QLA_SUCCESS)
6664 ha->flags.nic_core_hung = 0;
6665 __qla83xx_clear_drv_ack(vha);
6666 }
6667
6668 exit:
6669 qla83xx_idc_unlock(vha, 0);
6670
6671 ql_dbg(ql_dbg_p3p, vha, 0xb05b, "Exiting %s.\n", __func__);
6672
6673 return rval;
6674 }
6675
6676 int
qla2xxx_mctp_dump(scsi_qla_host_t * vha)6677 qla2xxx_mctp_dump(scsi_qla_host_t *vha)
6678 {
6679 struct qla_hw_data *ha = vha->hw;
6680 int rval = QLA_FUNCTION_FAILED;
6681
6682 if (!IS_MCTP_CAPABLE(ha)) {
6683 /* This message can be removed from the final version */
6684 ql_log(ql_log_info, vha, 0x506d,
6685 "This board is not MCTP capable\n");
6686 return rval;
6687 }
6688
6689 if (!ha->mctp_dump) {
6690 ha->mctp_dump = dma_alloc_coherent(&ha->pdev->dev,
6691 MCTP_DUMP_SIZE, &ha->mctp_dump_dma, GFP_KERNEL);
6692
6693 if (!ha->mctp_dump) {
6694 ql_log(ql_log_warn, vha, 0x506e,
6695 "Failed to allocate memory for mctp dump\n");
6696 return rval;
6697 }
6698 }
6699
6700 #define MCTP_DUMP_STR_ADDR 0x00000000
6701 rval = qla2x00_dump_mctp_data(vha, ha->mctp_dump_dma,
6702 MCTP_DUMP_STR_ADDR, MCTP_DUMP_SIZE/4);
6703 if (rval != QLA_SUCCESS) {
6704 ql_log(ql_log_warn, vha, 0x506f,
6705 "Failed to capture mctp dump\n");
6706 } else {
6707 ql_log(ql_log_info, vha, 0x5070,
6708 "Mctp dump capture for host (%ld/%p).\n",
6709 vha->host_no, ha->mctp_dump);
6710 ha->mctp_dumped = 1;
6711 }
6712
6713 if (!ha->flags.nic_core_reset_hdlr_active && !ha->portnum) {
6714 ha->flags.nic_core_reset_hdlr_active = 1;
6715 rval = qla83xx_restart_nic_firmware(vha);
6716 if (rval)
6717 /* NIC Core reset failed. */
6718 ql_log(ql_log_warn, vha, 0x5071,
6719 "Failed to restart nic firmware\n");
6720 else
6721 ql_dbg(ql_dbg_p3p, vha, 0xb084,
6722 "Restarted NIC firmware successfully.\n");
6723 ha->flags.nic_core_reset_hdlr_active = 0;
6724 }
6725
6726 return rval;
6727
6728 }
6729
6730 /*
6731 * qla2x00_quiesce_io
6732 * Description: This function will block the new I/Os
6733 * Its not aborting any I/Os as context
6734 * is not destroyed during quiescence
6735 * Arguments: scsi_qla_host_t
6736 * return : void
6737 */
6738 void
qla2x00_quiesce_io(scsi_qla_host_t * vha)6739 qla2x00_quiesce_io(scsi_qla_host_t *vha)
6740 {
6741 struct qla_hw_data *ha = vha->hw;
6742 struct scsi_qla_host *vp;
6743
6744 ql_dbg(ql_dbg_dpc, vha, 0x401d,
6745 "Quiescing I/O - ha=%p.\n", ha);
6746
6747 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
6748 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
6749 atomic_set(&vha->loop_state, LOOP_DOWN);
6750 qla2x00_mark_all_devices_lost(vha);
6751 list_for_each_entry(vp, &ha->vp_list, list)
6752 qla2x00_mark_all_devices_lost(vp);
6753 } else {
6754 if (!atomic_read(&vha->loop_down_timer))
6755 atomic_set(&vha->loop_down_timer,
6756 LOOP_DOWN_TIME);
6757 }
6758 /* Wait for pending cmds to complete */
6759 WARN_ON_ONCE(qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST)
6760 != QLA_SUCCESS);
6761 }
6762
6763 void
qla2x00_abort_isp_cleanup(scsi_qla_host_t * vha)6764 qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
6765 {
6766 struct qla_hw_data *ha = vha->hw;
6767 struct scsi_qla_host *vp;
6768 unsigned long flags;
6769 fc_port_t *fcport;
6770 u16 i;
6771
6772 /* For ISP82XX, driver waits for completion of the commands.
6773 * online flag should be set.
6774 */
6775 if (!(IS_P3P_TYPE(ha)))
6776 vha->flags.online = 0;
6777 ha->flags.chip_reset_done = 0;
6778 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
6779 vha->qla_stats.total_isp_aborts++;
6780
6781 ql_log(ql_log_info, vha, 0x00af,
6782 "Performing ISP error recovery - ha=%p.\n", ha);
6783
6784 ha->flags.purge_mbox = 1;
6785 /* For ISP82XX, reset_chip is just disabling interrupts.
6786 * Driver waits for the completion of the commands.
6787 * the interrupts need to be enabled.
6788 */
6789 if (!(IS_P3P_TYPE(ha)))
6790 ha->isp_ops->reset_chip(vha);
6791
6792 ha->link_data_rate = PORT_SPEED_UNKNOWN;
6793 SAVE_TOPO(ha);
6794 ha->flags.rida_fmt2 = 0;
6795 ha->flags.n2n_ae = 0;
6796 ha->flags.lip_ae = 0;
6797 ha->current_topology = 0;
6798 QLA_FW_STOPPED(ha);
6799 ha->flags.fw_init_done = 0;
6800 ha->chip_reset++;
6801 ha->base_qpair->chip_reset = ha->chip_reset;
6802 for (i = 0; i < ha->max_qpairs; i++) {
6803 if (ha->queue_pair_map[i])
6804 ha->queue_pair_map[i]->chip_reset =
6805 ha->base_qpair->chip_reset;
6806 }
6807
6808 /* purge MBox commands */
6809 if (atomic_read(&ha->num_pend_mbx_stage3)) {
6810 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
6811 complete(&ha->mbx_intr_comp);
6812 }
6813
6814 i = 0;
6815 while (atomic_read(&ha->num_pend_mbx_stage3) ||
6816 atomic_read(&ha->num_pend_mbx_stage2) ||
6817 atomic_read(&ha->num_pend_mbx_stage1)) {
6818 msleep(20);
6819 i++;
6820 if (i > 50)
6821 break;
6822 }
6823 ha->flags.purge_mbox = 0;
6824
6825 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
6826 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
6827 atomic_set(&vha->loop_state, LOOP_DOWN);
6828 qla2x00_mark_all_devices_lost(vha);
6829
6830 spin_lock_irqsave(&ha->vport_slock, flags);
6831 list_for_each_entry(vp, &ha->vp_list, list) {
6832 atomic_inc(&vp->vref_count);
6833 spin_unlock_irqrestore(&ha->vport_slock, flags);
6834
6835 qla2x00_mark_all_devices_lost(vp);
6836
6837 spin_lock_irqsave(&ha->vport_slock, flags);
6838 atomic_dec(&vp->vref_count);
6839 }
6840 spin_unlock_irqrestore(&ha->vport_slock, flags);
6841 } else {
6842 if (!atomic_read(&vha->loop_down_timer))
6843 atomic_set(&vha->loop_down_timer,
6844 LOOP_DOWN_TIME);
6845 }
6846
6847 /* Clear all async request states across all VPs. */
6848 list_for_each_entry(fcport, &vha->vp_fcports, list) {
6849 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
6850 fcport->scan_state = 0;
6851 }
6852 spin_lock_irqsave(&ha->vport_slock, flags);
6853 list_for_each_entry(vp, &ha->vp_list, list) {
6854 atomic_inc(&vp->vref_count);
6855 spin_unlock_irqrestore(&ha->vport_slock, flags);
6856
6857 list_for_each_entry(fcport, &vp->vp_fcports, list)
6858 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
6859
6860 spin_lock_irqsave(&ha->vport_slock, flags);
6861 atomic_dec(&vp->vref_count);
6862 }
6863 spin_unlock_irqrestore(&ha->vport_slock, flags);
6864
6865 if (!ha->flags.eeh_busy) {
6866 /* Make sure for ISP 82XX IO DMA is complete */
6867 if (IS_P3P_TYPE(ha)) {
6868 qla82xx_chip_reset_cleanup(vha);
6869 ql_log(ql_log_info, vha, 0x00b4,
6870 "Done chip reset cleanup.\n");
6871
6872 /* Done waiting for pending commands.
6873 * Reset the online flag.
6874 */
6875 vha->flags.online = 0;
6876 }
6877
6878 /* Requeue all commands in outstanding command list. */
6879 qla2x00_abort_all_cmds(vha, DID_RESET << 16);
6880 }
6881 /* memory barrier */
6882 wmb();
6883 }
6884
6885 /*
6886 * qla2x00_abort_isp
6887 * Resets ISP and aborts all outstanding commands.
6888 *
6889 * Input:
6890 * ha = adapter block pointer.
6891 *
6892 * Returns:
6893 * 0 = success
6894 */
6895 int
qla2x00_abort_isp(scsi_qla_host_t * vha)6896 qla2x00_abort_isp(scsi_qla_host_t *vha)
6897 {
6898 int rval;
6899 uint8_t status = 0;
6900 struct qla_hw_data *ha = vha->hw;
6901 struct scsi_qla_host *vp;
6902 struct req_que *req = ha->req_q_map[0];
6903 unsigned long flags;
6904
6905 if (vha->flags.online) {
6906 qla2x00_abort_isp_cleanup(vha);
6907
6908 if (test_and_clear_bit(ISP_ABORT_TO_ROM, &vha->dpc_flags)) {
6909 ha->flags.chip_reset_done = 1;
6910 vha->flags.online = 1;
6911 status = 0;
6912 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
6913 return status;
6914 }
6915
6916 if (IS_QLA8031(ha)) {
6917 ql_dbg(ql_dbg_p3p, vha, 0xb05c,
6918 "Clearing fcoe driver presence.\n");
6919 if (qla83xx_clear_drv_presence(vha) != QLA_SUCCESS)
6920 ql_dbg(ql_dbg_p3p, vha, 0xb073,
6921 "Error while clearing DRV-Presence.\n");
6922 }
6923
6924 if (unlikely(pci_channel_offline(ha->pdev) &&
6925 ha->flags.pci_channel_io_perm_failure)) {
6926 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
6927 status = 0;
6928 return status;
6929 }
6930
6931 switch (vha->qlini_mode) {
6932 case QLA2XXX_INI_MODE_DISABLED:
6933 if (!qla_tgt_mode_enabled(vha))
6934 return 0;
6935 break;
6936 case QLA2XXX_INI_MODE_DUAL:
6937 if (!qla_dual_mode_enabled(vha) &&
6938 !qla_ini_mode_enabled(vha))
6939 return 0;
6940 break;
6941 case QLA2XXX_INI_MODE_ENABLED:
6942 default:
6943 break;
6944 }
6945
6946 ha->isp_ops->get_flash_version(vha, req->ring);
6947
6948 ha->isp_ops->nvram_config(vha);
6949
6950 if (!qla2x00_restart_isp(vha)) {
6951 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
6952
6953 if (!atomic_read(&vha->loop_down_timer)) {
6954 /*
6955 * Issue marker command only when we are going
6956 * to start the I/O .
6957 */
6958 vha->marker_needed = 1;
6959 }
6960
6961 vha->flags.online = 1;
6962
6963 ha->isp_ops->enable_intrs(ha);
6964
6965 ha->isp_abort_cnt = 0;
6966 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
6967
6968 if (IS_QLA81XX(ha) || IS_QLA8031(ha))
6969 qla2x00_get_fw_version(vha);
6970 if (ha->fce) {
6971 ha->flags.fce_enabled = 1;
6972 memset(ha->fce, 0,
6973 fce_calc_size(ha->fce_bufs));
6974 rval = qla2x00_enable_fce_trace(vha,
6975 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
6976 &ha->fce_bufs);
6977 if (rval) {
6978 ql_log(ql_log_warn, vha, 0x8033,
6979 "Unable to reinitialize FCE "
6980 "(%d).\n", rval);
6981 ha->flags.fce_enabled = 0;
6982 }
6983 }
6984
6985 if (ha->eft) {
6986 memset(ha->eft, 0, EFT_SIZE);
6987 rval = qla2x00_enable_eft_trace(vha,
6988 ha->eft_dma, EFT_NUM_BUFFERS);
6989 if (rval) {
6990 ql_log(ql_log_warn, vha, 0x8034,
6991 "Unable to reinitialize EFT "
6992 "(%d).\n", rval);
6993 }
6994 }
6995 } else { /* failed the ISP abort */
6996 vha->flags.online = 1;
6997 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
6998 if (ha->isp_abort_cnt == 0) {
6999 ql_log(ql_log_fatal, vha, 0x8035,
7000 "ISP error recover failed - "
7001 "board disabled.\n");
7002 /*
7003 * The next call disables the board
7004 * completely.
7005 */
7006 qla2x00_abort_isp_cleanup(vha);
7007 vha->flags.online = 0;
7008 clear_bit(ISP_ABORT_RETRY,
7009 &vha->dpc_flags);
7010 status = 0;
7011 } else { /* schedule another ISP abort */
7012 ha->isp_abort_cnt--;
7013 ql_dbg(ql_dbg_taskm, vha, 0x8020,
7014 "ISP abort - retry remaining %d.\n",
7015 ha->isp_abort_cnt);
7016 status = 1;
7017 }
7018 } else {
7019 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
7020 ql_dbg(ql_dbg_taskm, vha, 0x8021,
7021 "ISP error recovery - retrying (%d) "
7022 "more times.\n", ha->isp_abort_cnt);
7023 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
7024 status = 1;
7025 }
7026 }
7027
7028 }
7029
7030 if (!status) {
7031 ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__);
7032 qla2x00_configure_hba(vha);
7033 spin_lock_irqsave(&ha->vport_slock, flags);
7034 list_for_each_entry(vp, &ha->vp_list, list) {
7035 if (vp->vp_idx) {
7036 atomic_inc(&vp->vref_count);
7037 spin_unlock_irqrestore(&ha->vport_slock, flags);
7038
7039 qla2x00_vp_abort_isp(vp);
7040
7041 spin_lock_irqsave(&ha->vport_slock, flags);
7042 atomic_dec(&vp->vref_count);
7043 }
7044 }
7045 spin_unlock_irqrestore(&ha->vport_slock, flags);
7046
7047 if (IS_QLA8031(ha)) {
7048 ql_dbg(ql_dbg_p3p, vha, 0xb05d,
7049 "Setting back fcoe driver presence.\n");
7050 if (qla83xx_set_drv_presence(vha) != QLA_SUCCESS)
7051 ql_dbg(ql_dbg_p3p, vha, 0xb074,
7052 "Error while setting DRV-Presence.\n");
7053 }
7054 } else {
7055 ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n",
7056 __func__);
7057 }
7058
7059 return(status);
7060 }
7061
7062 /*
7063 * qla2x00_restart_isp
7064 * restarts the ISP after a reset
7065 *
7066 * Input:
7067 * ha = adapter block pointer.
7068 *
7069 * Returns:
7070 * 0 = success
7071 */
7072 static int
qla2x00_restart_isp(scsi_qla_host_t * vha)7073 qla2x00_restart_isp(scsi_qla_host_t *vha)
7074 {
7075 int status;
7076 struct qla_hw_data *ha = vha->hw;
7077
7078 /* If firmware needs to be loaded */
7079 if (qla2x00_isp_firmware(vha)) {
7080 vha->flags.online = 0;
7081 status = ha->isp_ops->chip_diag(vha);
7082 if (status)
7083 return status;
7084 status = qla2x00_setup_chip(vha);
7085 if (status)
7086 return status;
7087 }
7088
7089 status = qla2x00_init_rings(vha);
7090 if (status)
7091 return status;
7092
7093 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
7094 ha->flags.chip_reset_done = 1;
7095
7096 /* Initialize the queues in use */
7097 qla25xx_init_queues(ha);
7098
7099 status = qla2x00_fw_ready(vha);
7100 if (status) {
7101 /* if no cable then assume it's good */
7102 return vha->device_flags & DFLG_NO_CABLE ? 0 : status;
7103 }
7104
7105 /* Issue a marker after FW becomes ready. */
7106 qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL);
7107 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
7108
7109 return 0;
7110 }
7111
7112 static int
qla25xx_init_queues(struct qla_hw_data * ha)7113 qla25xx_init_queues(struct qla_hw_data *ha)
7114 {
7115 struct rsp_que *rsp = NULL;
7116 struct req_que *req = NULL;
7117 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
7118 int ret = -1;
7119 int i;
7120
7121 for (i = 1; i < ha->max_rsp_queues; i++) {
7122 rsp = ha->rsp_q_map[i];
7123 if (rsp && test_bit(i, ha->rsp_qid_map)) {
7124 rsp->options &= ~BIT_0;
7125 ret = qla25xx_init_rsp_que(base_vha, rsp);
7126 if (ret != QLA_SUCCESS)
7127 ql_dbg(ql_dbg_init, base_vha, 0x00ff,
7128 "%s Rsp que: %d init failed.\n",
7129 __func__, rsp->id);
7130 else
7131 ql_dbg(ql_dbg_init, base_vha, 0x0100,
7132 "%s Rsp que: %d inited.\n",
7133 __func__, rsp->id);
7134 }
7135 }
7136 for (i = 1; i < ha->max_req_queues; i++) {
7137 req = ha->req_q_map[i];
7138 if (req && test_bit(i, ha->req_qid_map)) {
7139 /* Clear outstanding commands array. */
7140 req->options &= ~BIT_0;
7141 ret = qla25xx_init_req_que(base_vha, req);
7142 if (ret != QLA_SUCCESS)
7143 ql_dbg(ql_dbg_init, base_vha, 0x0101,
7144 "%s Req que: %d init failed.\n",
7145 __func__, req->id);
7146 else
7147 ql_dbg(ql_dbg_init, base_vha, 0x0102,
7148 "%s Req que: %d inited.\n",
7149 __func__, req->id);
7150 }
7151 }
7152 return ret;
7153 }
7154
7155 /*
7156 * qla2x00_reset_adapter
7157 * Reset adapter.
7158 *
7159 * Input:
7160 * ha = adapter block pointer.
7161 */
7162 int
qla2x00_reset_adapter(scsi_qla_host_t * vha)7163 qla2x00_reset_adapter(scsi_qla_host_t *vha)
7164 {
7165 unsigned long flags = 0;
7166 struct qla_hw_data *ha = vha->hw;
7167 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
7168
7169 vha->flags.online = 0;
7170 ha->isp_ops->disable_intrs(ha);
7171
7172 spin_lock_irqsave(&ha->hardware_lock, flags);
7173 wrt_reg_word(®->hccr, HCCR_RESET_RISC);
7174 rd_reg_word(®->hccr); /* PCI Posting. */
7175 wrt_reg_word(®->hccr, HCCR_RELEASE_RISC);
7176 rd_reg_word(®->hccr); /* PCI Posting. */
7177 spin_unlock_irqrestore(&ha->hardware_lock, flags);
7178
7179 return QLA_SUCCESS;
7180 }
7181
7182 int
qla24xx_reset_adapter(scsi_qla_host_t * vha)7183 qla24xx_reset_adapter(scsi_qla_host_t *vha)
7184 {
7185 unsigned long flags = 0;
7186 struct qla_hw_data *ha = vha->hw;
7187 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
7188
7189 if (IS_P3P_TYPE(ha))
7190 return QLA_SUCCESS;
7191
7192 vha->flags.online = 0;
7193 ha->isp_ops->disable_intrs(ha);
7194
7195 spin_lock_irqsave(&ha->hardware_lock, flags);
7196 wrt_reg_dword(®->hccr, HCCRX_SET_RISC_RESET);
7197 rd_reg_dword(®->hccr);
7198 wrt_reg_dword(®->hccr, HCCRX_REL_RISC_PAUSE);
7199 rd_reg_dword(®->hccr);
7200 spin_unlock_irqrestore(&ha->hardware_lock, flags);
7201
7202 if (IS_NOPOLLING_TYPE(ha))
7203 ha->isp_ops->enable_intrs(ha);
7204
7205 return QLA_SUCCESS;
7206 }
7207
7208 /* On sparc systems, obtain port and node WWN from firmware
7209 * properties.
7210 */
qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t * vha,struct nvram_24xx * nv)7211 static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha,
7212 struct nvram_24xx *nv)
7213 {
7214 #ifdef CONFIG_SPARC
7215 struct qla_hw_data *ha = vha->hw;
7216 struct pci_dev *pdev = ha->pdev;
7217 struct device_node *dp = pci_device_to_OF_node(pdev);
7218 const u8 *val;
7219 int len;
7220
7221 val = of_get_property(dp, "port-wwn", &len);
7222 if (val && len >= WWN_SIZE)
7223 memcpy(nv->port_name, val, WWN_SIZE);
7224
7225 val = of_get_property(dp, "node-wwn", &len);
7226 if (val && len >= WWN_SIZE)
7227 memcpy(nv->node_name, val, WWN_SIZE);
7228 #endif
7229 }
7230
7231 int
qla24xx_nvram_config(scsi_qla_host_t * vha)7232 qla24xx_nvram_config(scsi_qla_host_t *vha)
7233 {
7234 int rval;
7235 struct init_cb_24xx *icb;
7236 struct nvram_24xx *nv;
7237 __le32 *dptr;
7238 uint8_t *dptr1, *dptr2;
7239 uint32_t chksum;
7240 uint16_t cnt;
7241 struct qla_hw_data *ha = vha->hw;
7242
7243 rval = QLA_SUCCESS;
7244 icb = (struct init_cb_24xx *)ha->init_cb;
7245 nv = ha->nvram;
7246
7247 /* Determine NVRAM starting address. */
7248 if (ha->port_no == 0) {
7249 ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
7250 ha->vpd_base = FA_NVRAM_VPD0_ADDR;
7251 } else {
7252 ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
7253 ha->vpd_base = FA_NVRAM_VPD1_ADDR;
7254 }
7255
7256 ha->nvram_size = sizeof(*nv);
7257 ha->vpd_size = FA_NVRAM_VPD_SIZE;
7258
7259 /* Get VPD data into cache */
7260 ha->vpd = ha->nvram + VPD_OFFSET;
7261 ha->isp_ops->read_nvram(vha, ha->vpd,
7262 ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
7263
7264 /* Get NVRAM data into cache and calculate checksum. */
7265 dptr = (__force __le32 *)nv;
7266 ha->isp_ops->read_nvram(vha, dptr, ha->nvram_base, ha->nvram_size);
7267 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
7268 chksum += le32_to_cpu(*dptr);
7269
7270 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a,
7271 "Contents of NVRAM\n");
7272 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010d,
7273 nv, ha->nvram_size);
7274
7275 /* Bad NVRAM data, set defaults parameters. */
7276 if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) ||
7277 le16_to_cpu(nv->nvram_version) < ICB_VERSION) {
7278 /* Reset NVRAM data. */
7279 ql_log(ql_log_warn, vha, 0x006b,
7280 "Inconsistent NVRAM checksum=%#x id=%.4s version=%#x.\n",
7281 chksum, nv->id, nv->nvram_version);
7282 ql_dump_buffer(ql_dbg_init, vha, 0x006b, nv, sizeof(*nv));
7283 ql_log(ql_log_warn, vha, 0x006c,
7284 "Falling back to functioning (yet invalid -- WWPN) "
7285 "defaults.\n");
7286
7287 /*
7288 * Set default initialization control block.
7289 */
7290 memset(nv, 0, ha->nvram_size);
7291 nv->nvram_version = cpu_to_le16(ICB_VERSION);
7292 nv->version = cpu_to_le16(ICB_VERSION);
7293 nv->frame_payload_size = cpu_to_le16(2048);
7294 nv->execution_throttle = cpu_to_le16(0xFFFF);
7295 nv->exchange_count = cpu_to_le16(0);
7296 nv->hard_address = cpu_to_le16(124);
7297 nv->port_name[0] = 0x21;
7298 nv->port_name[1] = 0x00 + ha->port_no + 1;
7299 nv->port_name[2] = 0x00;
7300 nv->port_name[3] = 0xe0;
7301 nv->port_name[4] = 0x8b;
7302 nv->port_name[5] = 0x1c;
7303 nv->port_name[6] = 0x55;
7304 nv->port_name[7] = 0x86;
7305 nv->node_name[0] = 0x20;
7306 nv->node_name[1] = 0x00;
7307 nv->node_name[2] = 0x00;
7308 nv->node_name[3] = 0xe0;
7309 nv->node_name[4] = 0x8b;
7310 nv->node_name[5] = 0x1c;
7311 nv->node_name[6] = 0x55;
7312 nv->node_name[7] = 0x86;
7313 qla24xx_nvram_wwn_from_ofw(vha, nv);
7314 nv->login_retry_count = cpu_to_le16(8);
7315 nv->interrupt_delay_timer = cpu_to_le16(0);
7316 nv->login_timeout = cpu_to_le16(0);
7317 nv->firmware_options_1 =
7318 cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
7319 nv->firmware_options_2 = cpu_to_le32(2 << 4);
7320 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
7321 nv->firmware_options_3 = cpu_to_le32(2 << 13);
7322 nv->host_p = cpu_to_le32(BIT_11|BIT_10);
7323 nv->efi_parameters = cpu_to_le32(0);
7324 nv->reset_delay = 5;
7325 nv->max_luns_per_target = cpu_to_le16(128);
7326 nv->port_down_retry_count = cpu_to_le16(30);
7327 nv->link_down_timeout = cpu_to_le16(30);
7328
7329 rval = 1;
7330 }
7331
7332 if (qla_tgt_mode_enabled(vha)) {
7333 /* Don't enable full login after initial LIP */
7334 nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
7335 /* Don't enable LIP full login for initiator */
7336 nv->host_p &= cpu_to_le32(~BIT_10);
7337 }
7338
7339 qlt_24xx_config_nvram_stage1(vha, nv);
7340
7341 /* Reset Initialization control block */
7342 memset(icb, 0, ha->init_cb_size);
7343
7344 /* Copy 1st segment. */
7345 dptr1 = (uint8_t *)icb;
7346 dptr2 = (uint8_t *)&nv->version;
7347 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
7348 while (cnt--)
7349 *dptr1++ = *dptr2++;
7350
7351 icb->login_retry_count = nv->login_retry_count;
7352 icb->link_down_on_nos = nv->link_down_on_nos;
7353
7354 /* Copy 2nd segment. */
7355 dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
7356 dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
7357 cnt = (uint8_t *)&icb->reserved_3 -
7358 (uint8_t *)&icb->interrupt_delay_timer;
7359 while (cnt--)
7360 *dptr1++ = *dptr2++;
7361 ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size);
7362 /*
7363 * Setup driver NVRAM options.
7364 */
7365 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
7366 "QLA2462");
7367
7368 qlt_24xx_config_nvram_stage2(vha, icb);
7369
7370 if (nv->host_p & cpu_to_le32(BIT_15)) {
7371 /* Use alternate WWN? */
7372 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
7373 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
7374 }
7375
7376 /* Prepare nodename */
7377 if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
7378 /*
7379 * Firmware will apply the following mask if the nodename was
7380 * not provided.
7381 */
7382 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
7383 icb->node_name[0] &= 0xF0;
7384 }
7385
7386 /* Set host adapter parameters. */
7387 ha->flags.disable_risc_code_load = 0;
7388 ha->flags.enable_lip_reset = 0;
7389 ha->flags.enable_lip_full_login =
7390 le32_to_cpu(nv->host_p) & BIT_10 ? 1 : 0;
7391 ha->flags.enable_target_reset =
7392 le32_to_cpu(nv->host_p) & BIT_11 ? 1 : 0;
7393 ha->flags.enable_led_scheme = 0;
7394 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1 : 0;
7395
7396 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
7397 (BIT_6 | BIT_5 | BIT_4)) >> 4;
7398
7399 memcpy(ha->fw_seriallink_options24, nv->seriallink_options,
7400 sizeof(ha->fw_seriallink_options24));
7401
7402 /* save HBA serial number */
7403 ha->serial0 = icb->port_name[5];
7404 ha->serial1 = icb->port_name[6];
7405 ha->serial2 = icb->port_name[7];
7406 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
7407 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
7408
7409 icb->execution_throttle = cpu_to_le16(0xFFFF);
7410
7411 ha->retry_count = le16_to_cpu(nv->login_retry_count);
7412
7413 /* Set minimum login_timeout to 4 seconds. */
7414 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
7415 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
7416 if (le16_to_cpu(nv->login_timeout) < 4)
7417 nv->login_timeout = cpu_to_le16(4);
7418 ha->login_timeout = le16_to_cpu(nv->login_timeout);
7419
7420 /* Set minimum RATOV to 100 tenths of a second. */
7421 ha->r_a_tov = 100;
7422
7423 ha->loop_reset_delay = nv->reset_delay;
7424
7425 /* Link Down Timeout = 0:
7426 *
7427 * When Port Down timer expires we will start returning
7428 * I/O's to OS with "DID_NO_CONNECT".
7429 *
7430 * Link Down Timeout != 0:
7431 *
7432 * The driver waits for the link to come up after link down
7433 * before returning I/Os to OS with "DID_NO_CONNECT".
7434 */
7435 if (le16_to_cpu(nv->link_down_timeout) == 0) {
7436 ha->loop_down_abort_time =
7437 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
7438 } else {
7439 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
7440 ha->loop_down_abort_time =
7441 (LOOP_DOWN_TIME - ha->link_down_timeout);
7442 }
7443
7444 /* Need enough time to try and get the port back. */
7445 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
7446 if (qlport_down_retry)
7447 ha->port_down_retry_count = qlport_down_retry;
7448
7449 /* Set login_retry_count */
7450 ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
7451 if (ha->port_down_retry_count ==
7452 le16_to_cpu(nv->port_down_retry_count) &&
7453 ha->port_down_retry_count > 3)
7454 ha->login_retry_count = ha->port_down_retry_count;
7455 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
7456 ha->login_retry_count = ha->port_down_retry_count;
7457 if (ql2xloginretrycount)
7458 ha->login_retry_count = ql2xloginretrycount;
7459
7460 /* N2N: driver will initiate Login instead of FW */
7461 icb->firmware_options_3 |= cpu_to_le32(BIT_8);
7462
7463 /* Enable ZIO. */
7464 if (!vha->flags.init_done) {
7465 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
7466 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
7467 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
7468 le16_to_cpu(icb->interrupt_delay_timer) : 2;
7469 }
7470 icb->firmware_options_2 &= cpu_to_le32(
7471 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
7472 if (ha->zio_mode != QLA_ZIO_DISABLED) {
7473 ha->zio_mode = QLA_ZIO_MODE_6;
7474
7475 ql_log(ql_log_info, vha, 0x006f,
7476 "ZIO mode %d enabled; timer delay (%d us).\n",
7477 ha->zio_mode, ha->zio_timer * 100);
7478
7479 icb->firmware_options_2 |= cpu_to_le32(
7480 (uint32_t)ha->zio_mode);
7481 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
7482 }
7483
7484 if (rval) {
7485 ql_log(ql_log_warn, vha, 0x0070,
7486 "NVRAM configuration failed.\n");
7487 }
7488 return (rval);
7489 }
7490
7491 static void
qla27xx_print_image(struct scsi_qla_host * vha,char * name,struct qla27xx_image_status * image_status)7492 qla27xx_print_image(struct scsi_qla_host *vha, char *name,
7493 struct qla27xx_image_status *image_status)
7494 {
7495 ql_dbg(ql_dbg_init, vha, 0x018b,
7496 "%s %s: mask=%#02x gen=%#04x ver=%u.%u map=%#01x sum=%#08x sig=%#08x\n",
7497 name, "status",
7498 image_status->image_status_mask,
7499 le16_to_cpu(image_status->generation),
7500 image_status->ver_major,
7501 image_status->ver_minor,
7502 image_status->bitmap,
7503 le32_to_cpu(image_status->checksum),
7504 le32_to_cpu(image_status->signature));
7505 }
7506
7507 static bool
qla28xx_check_aux_image_status_signature(struct qla27xx_image_status * image_status)7508 qla28xx_check_aux_image_status_signature(
7509 struct qla27xx_image_status *image_status)
7510 {
7511 ulong signature = le32_to_cpu(image_status->signature);
7512
7513 return signature != QLA28XX_AUX_IMG_STATUS_SIGN;
7514 }
7515
7516 static bool
qla27xx_check_image_status_signature(struct qla27xx_image_status * image_status)7517 qla27xx_check_image_status_signature(struct qla27xx_image_status *image_status)
7518 {
7519 ulong signature = le32_to_cpu(image_status->signature);
7520
7521 return
7522 signature != QLA27XX_IMG_STATUS_SIGN &&
7523 signature != QLA28XX_IMG_STATUS_SIGN;
7524 }
7525
7526 static ulong
qla27xx_image_status_checksum(struct qla27xx_image_status * image_status)7527 qla27xx_image_status_checksum(struct qla27xx_image_status *image_status)
7528 {
7529 __le32 *p = (__force __le32 *)image_status;
7530 uint n = sizeof(*image_status) / sizeof(*p);
7531 uint32_t sum = 0;
7532
7533 for ( ; n--; p++)
7534 sum += le32_to_cpup(p);
7535
7536 return sum;
7537 }
7538
7539 static inline uint
qla28xx_component_bitmask(struct qla27xx_image_status * aux,uint bitmask)7540 qla28xx_component_bitmask(struct qla27xx_image_status *aux, uint bitmask)
7541 {
7542 return aux->bitmap & bitmask ?
7543 QLA27XX_SECONDARY_IMAGE : QLA27XX_PRIMARY_IMAGE;
7544 }
7545
7546 static void
qla28xx_component_status(struct active_regions * active_regions,struct qla27xx_image_status * aux)7547 qla28xx_component_status(
7548 struct active_regions *active_regions, struct qla27xx_image_status *aux)
7549 {
7550 active_regions->aux.board_config =
7551 qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_BOARD_CONFIG);
7552
7553 active_regions->aux.vpd_nvram =
7554 qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_VPD_NVRAM);
7555
7556 active_regions->aux.npiv_config_0_1 =
7557 qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NPIV_CONFIG_0_1);
7558
7559 active_regions->aux.npiv_config_2_3 =
7560 qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NPIV_CONFIG_2_3);
7561 }
7562
7563 static int
qla27xx_compare_image_generation(struct qla27xx_image_status * pri_image_status,struct qla27xx_image_status * sec_image_status)7564 qla27xx_compare_image_generation(
7565 struct qla27xx_image_status *pri_image_status,
7566 struct qla27xx_image_status *sec_image_status)
7567 {
7568 /* calculate generation delta as uint16 (this accounts for wrap) */
7569 int16_t delta =
7570 le16_to_cpu(pri_image_status->generation) -
7571 le16_to_cpu(sec_image_status->generation);
7572
7573 ql_dbg(ql_dbg_init, NULL, 0x0180, "generation delta = %d\n", delta);
7574
7575 return delta;
7576 }
7577
7578 void
qla28xx_get_aux_images(struct scsi_qla_host * vha,struct active_regions * active_regions)7579 qla28xx_get_aux_images(
7580 struct scsi_qla_host *vha, struct active_regions *active_regions)
7581 {
7582 struct qla_hw_data *ha = vha->hw;
7583 struct qla27xx_image_status pri_aux_image_status, sec_aux_image_status;
7584 bool valid_pri_image = false, valid_sec_image = false;
7585 bool active_pri_image = false, active_sec_image = false;
7586
7587 if (!ha->flt_region_aux_img_status_pri) {
7588 ql_dbg(ql_dbg_init, vha, 0x018a, "Primary aux image not addressed\n");
7589 goto check_sec_image;
7590 }
7591
7592 qla24xx_read_flash_data(vha, (uint32_t *)&pri_aux_image_status,
7593 ha->flt_region_aux_img_status_pri,
7594 sizeof(pri_aux_image_status) >> 2);
7595 qla27xx_print_image(vha, "Primary aux image", &pri_aux_image_status);
7596
7597 if (qla28xx_check_aux_image_status_signature(&pri_aux_image_status)) {
7598 ql_dbg(ql_dbg_init, vha, 0x018b,
7599 "Primary aux image signature (%#x) not valid\n",
7600 le32_to_cpu(pri_aux_image_status.signature));
7601 goto check_sec_image;
7602 }
7603
7604 if (qla27xx_image_status_checksum(&pri_aux_image_status)) {
7605 ql_dbg(ql_dbg_init, vha, 0x018c,
7606 "Primary aux image checksum failed\n");
7607 goto check_sec_image;
7608 }
7609
7610 valid_pri_image = true;
7611
7612 if (pri_aux_image_status.image_status_mask & 1) {
7613 ql_dbg(ql_dbg_init, vha, 0x018d,
7614 "Primary aux image is active\n");
7615 active_pri_image = true;
7616 }
7617
7618 check_sec_image:
7619 if (!ha->flt_region_aux_img_status_sec) {
7620 ql_dbg(ql_dbg_init, vha, 0x018a,
7621 "Secondary aux image not addressed\n");
7622 goto check_valid_image;
7623 }
7624
7625 qla24xx_read_flash_data(vha, (uint32_t *)&sec_aux_image_status,
7626 ha->flt_region_aux_img_status_sec,
7627 sizeof(sec_aux_image_status) >> 2);
7628 qla27xx_print_image(vha, "Secondary aux image", &sec_aux_image_status);
7629
7630 if (qla28xx_check_aux_image_status_signature(&sec_aux_image_status)) {
7631 ql_dbg(ql_dbg_init, vha, 0x018b,
7632 "Secondary aux image signature (%#x) not valid\n",
7633 le32_to_cpu(sec_aux_image_status.signature));
7634 goto check_valid_image;
7635 }
7636
7637 if (qla27xx_image_status_checksum(&sec_aux_image_status)) {
7638 ql_dbg(ql_dbg_init, vha, 0x018c,
7639 "Secondary aux image checksum failed\n");
7640 goto check_valid_image;
7641 }
7642
7643 valid_sec_image = true;
7644
7645 if (sec_aux_image_status.image_status_mask & 1) {
7646 ql_dbg(ql_dbg_init, vha, 0x018d,
7647 "Secondary aux image is active\n");
7648 active_sec_image = true;
7649 }
7650
7651 check_valid_image:
7652 if (valid_pri_image && active_pri_image &&
7653 valid_sec_image && active_sec_image) {
7654 if (qla27xx_compare_image_generation(&pri_aux_image_status,
7655 &sec_aux_image_status) >= 0) {
7656 qla28xx_component_status(active_regions,
7657 &pri_aux_image_status);
7658 } else {
7659 qla28xx_component_status(active_regions,
7660 &sec_aux_image_status);
7661 }
7662 } else if (valid_pri_image && active_pri_image) {
7663 qla28xx_component_status(active_regions, &pri_aux_image_status);
7664 } else if (valid_sec_image && active_sec_image) {
7665 qla28xx_component_status(active_regions, &sec_aux_image_status);
7666 }
7667
7668 ql_dbg(ql_dbg_init, vha, 0x018f,
7669 "aux images active: BCFG=%u VPD/NVR=%u NPIV0/1=%u NPIV2/3=%u\n",
7670 active_regions->aux.board_config,
7671 active_regions->aux.vpd_nvram,
7672 active_regions->aux.npiv_config_0_1,
7673 active_regions->aux.npiv_config_2_3);
7674 }
7675
7676 void
qla27xx_get_active_image(struct scsi_qla_host * vha,struct active_regions * active_regions)7677 qla27xx_get_active_image(struct scsi_qla_host *vha,
7678 struct active_regions *active_regions)
7679 {
7680 struct qla_hw_data *ha = vha->hw;
7681 struct qla27xx_image_status pri_image_status, sec_image_status;
7682 bool valid_pri_image = false, valid_sec_image = false;
7683 bool active_pri_image = false, active_sec_image = false;
7684
7685 if (!ha->flt_region_img_status_pri) {
7686 ql_dbg(ql_dbg_init, vha, 0x018a, "Primary image not addressed\n");
7687 goto check_sec_image;
7688 }
7689
7690 if (qla24xx_read_flash_data(vha, (uint32_t *)&pri_image_status,
7691 ha->flt_region_img_status_pri, sizeof(pri_image_status) >> 2) !=
7692 QLA_SUCCESS) {
7693 WARN_ON_ONCE(true);
7694 goto check_sec_image;
7695 }
7696 qla27xx_print_image(vha, "Primary image", &pri_image_status);
7697
7698 if (qla27xx_check_image_status_signature(&pri_image_status)) {
7699 ql_dbg(ql_dbg_init, vha, 0x018b,
7700 "Primary image signature (%#x) not valid\n",
7701 le32_to_cpu(pri_image_status.signature));
7702 goto check_sec_image;
7703 }
7704
7705 if (qla27xx_image_status_checksum(&pri_image_status)) {
7706 ql_dbg(ql_dbg_init, vha, 0x018c,
7707 "Primary image checksum failed\n");
7708 goto check_sec_image;
7709 }
7710
7711 valid_pri_image = true;
7712
7713 if (pri_image_status.image_status_mask & 1) {
7714 ql_dbg(ql_dbg_init, vha, 0x018d,
7715 "Primary image is active\n");
7716 active_pri_image = true;
7717 }
7718
7719 check_sec_image:
7720 if (!ha->flt_region_img_status_sec) {
7721 ql_dbg(ql_dbg_init, vha, 0x018a, "Secondary image not addressed\n");
7722 goto check_valid_image;
7723 }
7724
7725 qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status),
7726 ha->flt_region_img_status_sec, sizeof(sec_image_status) >> 2);
7727 qla27xx_print_image(vha, "Secondary image", &sec_image_status);
7728
7729 if (qla27xx_check_image_status_signature(&sec_image_status)) {
7730 ql_dbg(ql_dbg_init, vha, 0x018b,
7731 "Secondary image signature (%#x) not valid\n",
7732 le32_to_cpu(sec_image_status.signature));
7733 goto check_valid_image;
7734 }
7735
7736 if (qla27xx_image_status_checksum(&sec_image_status)) {
7737 ql_dbg(ql_dbg_init, vha, 0x018c,
7738 "Secondary image checksum failed\n");
7739 goto check_valid_image;
7740 }
7741
7742 valid_sec_image = true;
7743
7744 if (sec_image_status.image_status_mask & 1) {
7745 ql_dbg(ql_dbg_init, vha, 0x018d,
7746 "Secondary image is active\n");
7747 active_sec_image = true;
7748 }
7749
7750 check_valid_image:
7751 if (valid_pri_image && active_pri_image)
7752 active_regions->global = QLA27XX_PRIMARY_IMAGE;
7753
7754 if (valid_sec_image && active_sec_image) {
7755 if (!active_regions->global ||
7756 qla27xx_compare_image_generation(
7757 &pri_image_status, &sec_image_status) < 0) {
7758 active_regions->global = QLA27XX_SECONDARY_IMAGE;
7759 }
7760 }
7761
7762 ql_dbg(ql_dbg_init, vha, 0x018f, "active image %s (%u)\n",
7763 active_regions->global == QLA27XX_DEFAULT_IMAGE ?
7764 "default (boot/fw)" :
7765 active_regions->global == QLA27XX_PRIMARY_IMAGE ?
7766 "primary" :
7767 active_regions->global == QLA27XX_SECONDARY_IMAGE ?
7768 "secondary" : "invalid",
7769 active_regions->global);
7770 }
7771
qla24xx_risc_firmware_invalid(uint32_t * dword)7772 bool qla24xx_risc_firmware_invalid(uint32_t *dword)
7773 {
7774 return
7775 !(dword[4] | dword[5] | dword[6] | dword[7]) ||
7776 !(~dword[4] | ~dword[5] | ~dword[6] | ~dword[7]);
7777 }
7778
7779 static int
qla24xx_load_risc_flash(scsi_qla_host_t * vha,uint32_t * srisc_addr,uint32_t faddr)7780 qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
7781 uint32_t faddr)
7782 {
7783 int rval;
7784 uint templates, segments, fragment;
7785 ulong i;
7786 uint j;
7787 ulong dlen;
7788 uint32_t *dcode;
7789 uint32_t risc_addr, risc_size, risc_attr = 0;
7790 struct qla_hw_data *ha = vha->hw;
7791 struct req_que *req = ha->req_q_map[0];
7792 struct fwdt *fwdt = ha->fwdt;
7793
7794 ql_dbg(ql_dbg_init, vha, 0x008b,
7795 "FW: Loading firmware from flash (%x).\n", faddr);
7796
7797 dcode = (uint32_t *)req->ring;
7798 qla24xx_read_flash_data(vha, dcode, faddr, 8);
7799 if (qla24xx_risc_firmware_invalid(dcode)) {
7800 ql_log(ql_log_fatal, vha, 0x008c,
7801 "Unable to verify the integrity of flash firmware "
7802 "image.\n");
7803 ql_log(ql_log_fatal, vha, 0x008d,
7804 "Firmware data: %08x %08x %08x %08x.\n",
7805 dcode[0], dcode[1], dcode[2], dcode[3]);
7806
7807 return QLA_FUNCTION_FAILED;
7808 }
7809
7810 dcode = (uint32_t *)req->ring;
7811 *srisc_addr = 0;
7812 segments = FA_RISC_CODE_SEGMENTS;
7813 for (j = 0; j < segments; j++) {
7814 ql_dbg(ql_dbg_init, vha, 0x008d,
7815 "-> Loading segment %u...\n", j);
7816 qla24xx_read_flash_data(vha, dcode, faddr, 10);
7817 risc_addr = be32_to_cpu((__force __be32)dcode[2]);
7818 risc_size = be32_to_cpu((__force __be32)dcode[3]);
7819 if (!*srisc_addr) {
7820 *srisc_addr = risc_addr;
7821 risc_attr = be32_to_cpu((__force __be32)dcode[9]);
7822 }
7823
7824 dlen = ha->fw_transfer_size >> 2;
7825 for (fragment = 0; risc_size; fragment++) {
7826 if (dlen > risc_size)
7827 dlen = risc_size;
7828
7829 ql_dbg(ql_dbg_init, vha, 0x008e,
7830 "-> Loading fragment %u: %#x <- %#x (%#lx dwords)...\n",
7831 fragment, risc_addr, faddr, dlen);
7832 qla24xx_read_flash_data(vha, dcode, faddr, dlen);
7833 for (i = 0; i < dlen; i++)
7834 dcode[i] = swab32(dcode[i]);
7835
7836 rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen);
7837 if (rval) {
7838 ql_log(ql_log_fatal, vha, 0x008f,
7839 "-> Failed load firmware fragment %u.\n",
7840 fragment);
7841 return QLA_FUNCTION_FAILED;
7842 }
7843
7844 faddr += dlen;
7845 risc_addr += dlen;
7846 risc_size -= dlen;
7847 }
7848 }
7849
7850 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
7851 return QLA_SUCCESS;
7852
7853 templates = (risc_attr & BIT_9) ? 2 : 1;
7854 ql_dbg(ql_dbg_init, vha, 0x0160, "-> templates = %u\n", templates);
7855 for (j = 0; j < templates; j++, fwdt++) {
7856 if (fwdt->template)
7857 vfree(fwdt->template);
7858 fwdt->template = NULL;
7859 fwdt->length = 0;
7860
7861 dcode = (uint32_t *)req->ring;
7862 qla24xx_read_flash_data(vha, dcode, faddr, 7);
7863 risc_size = be32_to_cpu((__force __be32)dcode[2]);
7864 ql_dbg(ql_dbg_init, vha, 0x0161,
7865 "-> fwdt%u template array at %#x (%#x dwords)\n",
7866 j, faddr, risc_size);
7867 if (!risc_size || !~risc_size) {
7868 ql_dbg(ql_dbg_init, vha, 0x0162,
7869 "-> fwdt%u failed to read array\n", j);
7870 goto failed;
7871 }
7872
7873 /* skip header and ignore checksum */
7874 faddr += 7;
7875 risc_size -= 8;
7876
7877 ql_dbg(ql_dbg_init, vha, 0x0163,
7878 "-> fwdt%u template allocate template %#x words...\n",
7879 j, risc_size);
7880 fwdt->template = vmalloc(risc_size * sizeof(*dcode));
7881 if (!fwdt->template) {
7882 ql_log(ql_log_warn, vha, 0x0164,
7883 "-> fwdt%u failed allocate template.\n", j);
7884 goto failed;
7885 }
7886
7887 dcode = fwdt->template;
7888 qla24xx_read_flash_data(vha, dcode, faddr, risc_size);
7889
7890 if (!qla27xx_fwdt_template_valid(dcode)) {
7891 ql_log(ql_log_warn, vha, 0x0165,
7892 "-> fwdt%u failed template validate\n", j);
7893 goto failed;
7894 }
7895
7896 dlen = qla27xx_fwdt_template_size(dcode);
7897 ql_dbg(ql_dbg_init, vha, 0x0166,
7898 "-> fwdt%u template size %#lx bytes (%#lx words)\n",
7899 j, dlen, dlen / sizeof(*dcode));
7900 if (dlen > risc_size * sizeof(*dcode)) {
7901 ql_log(ql_log_warn, vha, 0x0167,
7902 "-> fwdt%u template exceeds array (%-lu bytes)\n",
7903 j, dlen - risc_size * sizeof(*dcode));
7904 goto failed;
7905 }
7906
7907 fwdt->length = dlen;
7908 ql_dbg(ql_dbg_init, vha, 0x0168,
7909 "-> fwdt%u loaded template ok\n", j);
7910
7911 faddr += risc_size + 1;
7912 }
7913
7914 return QLA_SUCCESS;
7915
7916 failed:
7917 if (fwdt->template)
7918 vfree(fwdt->template);
7919 fwdt->template = NULL;
7920 fwdt->length = 0;
7921
7922 return QLA_SUCCESS;
7923 }
7924
7925 #define QLA_FW_URL "http://ldriver.qlogic.com/firmware/"
7926
7927 int
qla2x00_load_risc(scsi_qla_host_t * vha,uint32_t * srisc_addr)7928 qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
7929 {
7930 int rval;
7931 int i, fragment;
7932 uint16_t *wcode;
7933 __be16 *fwcode;
7934 uint32_t risc_addr, risc_size, fwclen, wlen, *seg;
7935 struct fw_blob *blob;
7936 struct qla_hw_data *ha = vha->hw;
7937 struct req_que *req = ha->req_q_map[0];
7938
7939 /* Load firmware blob. */
7940 blob = qla2x00_request_firmware(vha);
7941 if (!blob) {
7942 ql_log(ql_log_info, vha, 0x0083,
7943 "Firmware image unavailable.\n");
7944 ql_log(ql_log_info, vha, 0x0084,
7945 "Firmware images can be retrieved from: "QLA_FW_URL ".\n");
7946 return QLA_FUNCTION_FAILED;
7947 }
7948
7949 rval = QLA_SUCCESS;
7950
7951 wcode = (uint16_t *)req->ring;
7952 *srisc_addr = 0;
7953 fwcode = (__force __be16 *)blob->fw->data;
7954 fwclen = 0;
7955
7956 /* Validate firmware image by checking version. */
7957 if (blob->fw->size < 8 * sizeof(uint16_t)) {
7958 ql_log(ql_log_fatal, vha, 0x0085,
7959 "Unable to verify integrity of firmware image (%zd).\n",
7960 blob->fw->size);
7961 goto fail_fw_integrity;
7962 }
7963 for (i = 0; i < 4; i++)
7964 wcode[i] = be16_to_cpu(fwcode[i + 4]);
7965 if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff &&
7966 wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 &&
7967 wcode[2] == 0 && wcode[3] == 0)) {
7968 ql_log(ql_log_fatal, vha, 0x0086,
7969 "Unable to verify integrity of firmware image.\n");
7970 ql_log(ql_log_fatal, vha, 0x0087,
7971 "Firmware data: %04x %04x %04x %04x.\n",
7972 wcode[0], wcode[1], wcode[2], wcode[3]);
7973 goto fail_fw_integrity;
7974 }
7975
7976 seg = blob->segs;
7977 while (*seg && rval == QLA_SUCCESS) {
7978 risc_addr = *seg;
7979 *srisc_addr = *srisc_addr == 0 ? *seg : *srisc_addr;
7980 risc_size = be16_to_cpu(fwcode[3]);
7981
7982 /* Validate firmware image size. */
7983 fwclen += risc_size * sizeof(uint16_t);
7984 if (blob->fw->size < fwclen) {
7985 ql_log(ql_log_fatal, vha, 0x0088,
7986 "Unable to verify integrity of firmware image "
7987 "(%zd).\n", blob->fw->size);
7988 goto fail_fw_integrity;
7989 }
7990
7991 fragment = 0;
7992 while (risc_size > 0 && rval == QLA_SUCCESS) {
7993 wlen = (uint16_t)(ha->fw_transfer_size >> 1);
7994 if (wlen > risc_size)
7995 wlen = risc_size;
7996 ql_dbg(ql_dbg_init, vha, 0x0089,
7997 "Loading risc segment@ risc addr %x number of "
7998 "words 0x%x.\n", risc_addr, wlen);
7999
8000 for (i = 0; i < wlen; i++)
8001 wcode[i] = swab16((__force u32)fwcode[i]);
8002
8003 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
8004 wlen);
8005 if (rval) {
8006 ql_log(ql_log_fatal, vha, 0x008a,
8007 "Failed to load segment %d of firmware.\n",
8008 fragment);
8009 break;
8010 }
8011
8012 fwcode += wlen;
8013 risc_addr += wlen;
8014 risc_size -= wlen;
8015 fragment++;
8016 }
8017
8018 /* Next segment. */
8019 seg++;
8020 }
8021 return rval;
8022
8023 fail_fw_integrity:
8024 return QLA_FUNCTION_FAILED;
8025 }
8026
8027 static int
qla24xx_load_risc_blob(scsi_qla_host_t * vha,uint32_t * srisc_addr)8028 qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
8029 {
8030 int rval;
8031 uint templates, segments, fragment;
8032 uint32_t *dcode;
8033 ulong dlen;
8034 uint32_t risc_addr, risc_size, risc_attr = 0;
8035 ulong i;
8036 uint j;
8037 struct fw_blob *blob;
8038 __be32 *fwcode;
8039 struct qla_hw_data *ha = vha->hw;
8040 struct req_que *req = ha->req_q_map[0];
8041 struct fwdt *fwdt = ha->fwdt;
8042
8043 ql_dbg(ql_dbg_init, vha, 0x0090,
8044 "-> FW: Loading via request-firmware.\n");
8045
8046 blob = qla2x00_request_firmware(vha);
8047 if (!blob) {
8048 ql_log(ql_log_warn, vha, 0x0092,
8049 "-> Firmware file not found.\n");
8050
8051 return QLA_FUNCTION_FAILED;
8052 }
8053
8054 fwcode = (__force __be32 *)blob->fw->data;
8055 dcode = (__force uint32_t *)fwcode;
8056 if (qla24xx_risc_firmware_invalid(dcode)) {
8057 ql_log(ql_log_fatal, vha, 0x0093,
8058 "Unable to verify integrity of firmware image (%zd).\n",
8059 blob->fw->size);
8060 ql_log(ql_log_fatal, vha, 0x0095,
8061 "Firmware data: %08x %08x %08x %08x.\n",
8062 dcode[0], dcode[1], dcode[2], dcode[3]);
8063 return QLA_FUNCTION_FAILED;
8064 }
8065
8066 dcode = (uint32_t *)req->ring;
8067 *srisc_addr = 0;
8068 segments = FA_RISC_CODE_SEGMENTS;
8069 for (j = 0; j < segments; j++) {
8070 ql_dbg(ql_dbg_init, vha, 0x0096,
8071 "-> Loading segment %u...\n", j);
8072 risc_addr = be32_to_cpu(fwcode[2]);
8073 risc_size = be32_to_cpu(fwcode[3]);
8074
8075 if (!*srisc_addr) {
8076 *srisc_addr = risc_addr;
8077 risc_attr = be32_to_cpu(fwcode[9]);
8078 }
8079
8080 dlen = ha->fw_transfer_size >> 2;
8081 for (fragment = 0; risc_size; fragment++) {
8082 if (dlen > risc_size)
8083 dlen = risc_size;
8084
8085 ql_dbg(ql_dbg_init, vha, 0x0097,
8086 "-> Loading fragment %u: %#x <- %#x (%#lx words)...\n",
8087 fragment, risc_addr,
8088 (uint32_t)(fwcode - (typeof(fwcode))blob->fw->data),
8089 dlen);
8090
8091 for (i = 0; i < dlen; i++)
8092 dcode[i] = swab32((__force u32)fwcode[i]);
8093
8094 rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen);
8095 if (rval) {
8096 ql_log(ql_log_fatal, vha, 0x0098,
8097 "-> Failed load firmware fragment %u.\n",
8098 fragment);
8099 return QLA_FUNCTION_FAILED;
8100 }
8101
8102 fwcode += dlen;
8103 risc_addr += dlen;
8104 risc_size -= dlen;
8105 }
8106 }
8107
8108 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
8109 return QLA_SUCCESS;
8110
8111 templates = (risc_attr & BIT_9) ? 2 : 1;
8112 ql_dbg(ql_dbg_init, vha, 0x0170, "-> templates = %u\n", templates);
8113 for (j = 0; j < templates; j++, fwdt++) {
8114 if (fwdt->template)
8115 vfree(fwdt->template);
8116 fwdt->template = NULL;
8117 fwdt->length = 0;
8118
8119 risc_size = be32_to_cpu(fwcode[2]);
8120 ql_dbg(ql_dbg_init, vha, 0x0171,
8121 "-> fwdt%u template array at %#x (%#x dwords)\n",
8122 j, (uint32_t)((void *)fwcode - (void *)blob->fw->data),
8123 risc_size);
8124 if (!risc_size || !~risc_size) {
8125 ql_dbg(ql_dbg_init, vha, 0x0172,
8126 "-> fwdt%u failed to read array\n", j);
8127 goto failed;
8128 }
8129
8130 /* skip header and ignore checksum */
8131 fwcode += 7;
8132 risc_size -= 8;
8133
8134 ql_dbg(ql_dbg_init, vha, 0x0173,
8135 "-> fwdt%u template allocate template %#x words...\n",
8136 j, risc_size);
8137 fwdt->template = vmalloc(risc_size * sizeof(*dcode));
8138 if (!fwdt->template) {
8139 ql_log(ql_log_warn, vha, 0x0174,
8140 "-> fwdt%u failed allocate template.\n", j);
8141 goto failed;
8142 }
8143
8144 dcode = fwdt->template;
8145 for (i = 0; i < risc_size; i++)
8146 dcode[i] = (__force u32)fwcode[i];
8147
8148 if (!qla27xx_fwdt_template_valid(dcode)) {
8149 ql_log(ql_log_warn, vha, 0x0175,
8150 "-> fwdt%u failed template validate\n", j);
8151 goto failed;
8152 }
8153
8154 dlen = qla27xx_fwdt_template_size(dcode);
8155 ql_dbg(ql_dbg_init, vha, 0x0176,
8156 "-> fwdt%u template size %#lx bytes (%#lx words)\n",
8157 j, dlen, dlen / sizeof(*dcode));
8158 if (dlen > risc_size * sizeof(*dcode)) {
8159 ql_log(ql_log_warn, vha, 0x0177,
8160 "-> fwdt%u template exceeds array (%-lu bytes)\n",
8161 j, dlen - risc_size * sizeof(*dcode));
8162 goto failed;
8163 }
8164
8165 fwdt->length = dlen;
8166 ql_dbg(ql_dbg_init, vha, 0x0178,
8167 "-> fwdt%u loaded template ok\n", j);
8168
8169 fwcode += risc_size + 1;
8170 }
8171
8172 return QLA_SUCCESS;
8173
8174 failed:
8175 if (fwdt->template)
8176 vfree(fwdt->template);
8177 fwdt->template = NULL;
8178 fwdt->length = 0;
8179
8180 return QLA_SUCCESS;
8181 }
8182
8183 int
qla24xx_load_risc(scsi_qla_host_t * vha,uint32_t * srisc_addr)8184 qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
8185 {
8186 int rval;
8187
8188 if (ql2xfwloadbin == 1)
8189 return qla81xx_load_risc(vha, srisc_addr);
8190
8191 /*
8192 * FW Load priority:
8193 * 1) Firmware via request-firmware interface (.bin file).
8194 * 2) Firmware residing in flash.
8195 */
8196 rval = qla24xx_load_risc_blob(vha, srisc_addr);
8197 if (rval == QLA_SUCCESS)
8198 return rval;
8199
8200 return qla24xx_load_risc_flash(vha, srisc_addr,
8201 vha->hw->flt_region_fw);
8202 }
8203
8204 int
qla81xx_load_risc(scsi_qla_host_t * vha,uint32_t * srisc_addr)8205 qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
8206 {
8207 int rval;
8208 struct qla_hw_data *ha = vha->hw;
8209 struct active_regions active_regions = { };
8210
8211 if (ql2xfwloadbin == 2)
8212 goto try_blob_fw;
8213
8214 /* FW Load priority:
8215 * 1) Firmware residing in flash.
8216 * 2) Firmware via request-firmware interface (.bin file).
8217 * 3) Golden-Firmware residing in flash -- (limited operation).
8218 */
8219
8220 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
8221 goto try_primary_fw;
8222
8223 qla27xx_get_active_image(vha, &active_regions);
8224
8225 if (active_regions.global != QLA27XX_SECONDARY_IMAGE)
8226 goto try_primary_fw;
8227
8228 ql_dbg(ql_dbg_init, vha, 0x008b,
8229 "Loading secondary firmware image.\n");
8230 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw_sec);
8231 if (!rval)
8232 return rval;
8233
8234 try_primary_fw:
8235 ql_dbg(ql_dbg_init, vha, 0x008b,
8236 "Loading primary firmware image.\n");
8237 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw);
8238 if (!rval)
8239 return rval;
8240
8241 try_blob_fw:
8242 rval = qla24xx_load_risc_blob(vha, srisc_addr);
8243 if (!rval || !ha->flt_region_gold_fw)
8244 return rval;
8245
8246 ql_log(ql_log_info, vha, 0x0099,
8247 "Attempting to fallback to golden firmware.\n");
8248 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
8249 if (rval)
8250 return rval;
8251
8252 ql_log(ql_log_info, vha, 0x009a, "Need firmware flash update.\n");
8253 ha->flags.running_gold_fw = 1;
8254 return rval;
8255 }
8256
8257 void
qla2x00_try_to_stop_firmware(scsi_qla_host_t * vha)8258 qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
8259 {
8260 int ret, retries;
8261 struct qla_hw_data *ha = vha->hw;
8262
8263 if (ha->flags.pci_channel_io_perm_failure)
8264 return;
8265 if (!IS_FWI2_CAPABLE(ha))
8266 return;
8267 if (!ha->fw_major_version)
8268 return;
8269 if (!ha->flags.fw_started)
8270 return;
8271
8272 ret = qla2x00_stop_firmware(vha);
8273 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
8274 ret != QLA_INVALID_COMMAND && retries ; retries--) {
8275 ha->isp_ops->reset_chip(vha);
8276 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS)
8277 continue;
8278 if (qla2x00_setup_chip(vha) != QLA_SUCCESS)
8279 continue;
8280 ql_log(ql_log_info, vha, 0x8015,
8281 "Attempting retry of stop-firmware command.\n");
8282 ret = qla2x00_stop_firmware(vha);
8283 }
8284
8285 QLA_FW_STOPPED(ha);
8286 ha->flags.fw_init_done = 0;
8287 }
8288
8289 int
qla24xx_configure_vhba(scsi_qla_host_t * vha)8290 qla24xx_configure_vhba(scsi_qla_host_t *vha)
8291 {
8292 int rval = QLA_SUCCESS;
8293 int rval2;
8294 uint16_t mb[MAILBOX_REGISTER_COUNT];
8295 struct qla_hw_data *ha = vha->hw;
8296 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
8297
8298 if (!vha->vp_idx)
8299 return -EINVAL;
8300
8301 rval = qla2x00_fw_ready(base_vha);
8302
8303 if (rval == QLA_SUCCESS) {
8304 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
8305 qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL);
8306 }
8307
8308 vha->flags.management_server_logged_in = 0;
8309
8310 /* Login to SNS first */
8311 rval2 = ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb,
8312 BIT_1);
8313 if (rval2 != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
8314 if (rval2 == QLA_MEMORY_ALLOC_FAILED)
8315 ql_dbg(ql_dbg_init, vha, 0x0120,
8316 "Failed SNS login: loop_id=%x, rval2=%d\n",
8317 NPH_SNS, rval2);
8318 else
8319 ql_dbg(ql_dbg_init, vha, 0x0103,
8320 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
8321 "mb[2]=%x mb[6]=%x mb[7]=%x.\n",
8322 NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]);
8323 return (QLA_FUNCTION_FAILED);
8324 }
8325
8326 atomic_set(&vha->loop_down_timer, 0);
8327 atomic_set(&vha->loop_state, LOOP_UP);
8328 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
8329 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
8330 rval = qla2x00_loop_resync(base_vha);
8331
8332 return rval;
8333 }
8334
8335 /* 84XX Support **************************************************************/
8336
8337 static LIST_HEAD(qla_cs84xx_list);
8338 static DEFINE_MUTEX(qla_cs84xx_mutex);
8339
8340 static struct qla_chip_state_84xx *
qla84xx_get_chip(struct scsi_qla_host * vha)8341 qla84xx_get_chip(struct scsi_qla_host *vha)
8342 {
8343 struct qla_chip_state_84xx *cs84xx;
8344 struct qla_hw_data *ha = vha->hw;
8345
8346 mutex_lock(&qla_cs84xx_mutex);
8347
8348 /* Find any shared 84xx chip. */
8349 list_for_each_entry(cs84xx, &qla_cs84xx_list, list) {
8350 if (cs84xx->bus == ha->pdev->bus) {
8351 kref_get(&cs84xx->kref);
8352 goto done;
8353 }
8354 }
8355
8356 cs84xx = kzalloc(sizeof(*cs84xx), GFP_KERNEL);
8357 if (!cs84xx)
8358 goto done;
8359
8360 kref_init(&cs84xx->kref);
8361 spin_lock_init(&cs84xx->access_lock);
8362 mutex_init(&cs84xx->fw_update_mutex);
8363 cs84xx->bus = ha->pdev->bus;
8364
8365 list_add_tail(&cs84xx->list, &qla_cs84xx_list);
8366 done:
8367 mutex_unlock(&qla_cs84xx_mutex);
8368 return cs84xx;
8369 }
8370
8371 static void
__qla84xx_chip_release(struct kref * kref)8372 __qla84xx_chip_release(struct kref *kref)
8373 {
8374 struct qla_chip_state_84xx *cs84xx =
8375 container_of(kref, struct qla_chip_state_84xx, kref);
8376
8377 mutex_lock(&qla_cs84xx_mutex);
8378 list_del(&cs84xx->list);
8379 mutex_unlock(&qla_cs84xx_mutex);
8380 kfree(cs84xx);
8381 }
8382
8383 void
qla84xx_put_chip(struct scsi_qla_host * vha)8384 qla84xx_put_chip(struct scsi_qla_host *vha)
8385 {
8386 struct qla_hw_data *ha = vha->hw;
8387
8388 if (ha->cs84xx)
8389 kref_put(&ha->cs84xx->kref, __qla84xx_chip_release);
8390 }
8391
8392 static int
qla84xx_init_chip(scsi_qla_host_t * vha)8393 qla84xx_init_chip(scsi_qla_host_t *vha)
8394 {
8395 int rval;
8396 uint16_t status[2];
8397 struct qla_hw_data *ha = vha->hw;
8398
8399 mutex_lock(&ha->cs84xx->fw_update_mutex);
8400
8401 rval = qla84xx_verify_chip(vha, status);
8402
8403 mutex_unlock(&ha->cs84xx->fw_update_mutex);
8404
8405 return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED :
8406 QLA_SUCCESS;
8407 }
8408
8409 /* 81XX Support **************************************************************/
8410
8411 int
qla81xx_nvram_config(scsi_qla_host_t * vha)8412 qla81xx_nvram_config(scsi_qla_host_t *vha)
8413 {
8414 int rval;
8415 struct init_cb_81xx *icb;
8416 struct nvram_81xx *nv;
8417 __le32 *dptr;
8418 uint8_t *dptr1, *dptr2;
8419 uint32_t chksum;
8420 uint16_t cnt;
8421 struct qla_hw_data *ha = vha->hw;
8422 uint32_t faddr;
8423 struct active_regions active_regions = { };
8424
8425 rval = QLA_SUCCESS;
8426 icb = (struct init_cb_81xx *)ha->init_cb;
8427 nv = ha->nvram;
8428
8429 /* Determine NVRAM starting address. */
8430 ha->nvram_size = sizeof(*nv);
8431 ha->vpd_size = FA_NVRAM_VPD_SIZE;
8432 if (IS_P3P_TYPE(ha) || IS_QLA8031(ha))
8433 ha->vpd_size = FA_VPD_SIZE_82XX;
8434
8435 if (IS_QLA28XX(ha) || IS_QLA27XX(ha))
8436 qla28xx_get_aux_images(vha, &active_regions);
8437
8438 /* Get VPD data into cache */
8439 ha->vpd = ha->nvram + VPD_OFFSET;
8440
8441 faddr = ha->flt_region_vpd;
8442 if (IS_QLA28XX(ha)) {
8443 if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
8444 faddr = ha->flt_region_vpd_sec;
8445 ql_dbg(ql_dbg_init, vha, 0x0110,
8446 "Loading %s nvram image.\n",
8447 active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ?
8448 "primary" : "secondary");
8449 }
8450 ha->isp_ops->read_optrom(vha, ha->vpd, faddr << 2, ha->vpd_size);
8451
8452 /* Get NVRAM data into cache and calculate checksum. */
8453 faddr = ha->flt_region_nvram;
8454 if (IS_QLA28XX(ha)) {
8455 if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
8456 faddr = ha->flt_region_nvram_sec;
8457 }
8458 ql_dbg(ql_dbg_init, vha, 0x0110,
8459 "Loading %s nvram image.\n",
8460 active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ?
8461 "primary" : "secondary");
8462 ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size);
8463
8464 dptr = (__force __le32 *)nv;
8465 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
8466 chksum += le32_to_cpu(*dptr);
8467
8468 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111,
8469 "Contents of NVRAM:\n");
8470 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0112,
8471 nv, ha->nvram_size);
8472
8473 /* Bad NVRAM data, set defaults parameters. */
8474 if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) ||
8475 le16_to_cpu(nv->nvram_version) < ICB_VERSION) {
8476 /* Reset NVRAM data. */
8477 ql_log(ql_log_info, vha, 0x0073,
8478 "Inconsistent NVRAM checksum=%#x id=%.4s version=%#x.\n",
8479 chksum, nv->id, le16_to_cpu(nv->nvram_version));
8480 ql_dump_buffer(ql_dbg_init, vha, 0x0073, nv, sizeof(*nv));
8481 ql_log(ql_log_info, vha, 0x0074,
8482 "Falling back to functioning (yet invalid -- WWPN) "
8483 "defaults.\n");
8484
8485 /*
8486 * Set default initialization control block.
8487 */
8488 memset(nv, 0, ha->nvram_size);
8489 nv->nvram_version = cpu_to_le16(ICB_VERSION);
8490 nv->version = cpu_to_le16(ICB_VERSION);
8491 nv->frame_payload_size = cpu_to_le16(2048);
8492 nv->execution_throttle = cpu_to_le16(0xFFFF);
8493 nv->exchange_count = cpu_to_le16(0);
8494 nv->port_name[0] = 0x21;
8495 nv->port_name[1] = 0x00 + ha->port_no + 1;
8496 nv->port_name[2] = 0x00;
8497 nv->port_name[3] = 0xe0;
8498 nv->port_name[4] = 0x8b;
8499 nv->port_name[5] = 0x1c;
8500 nv->port_name[6] = 0x55;
8501 nv->port_name[7] = 0x86;
8502 nv->node_name[0] = 0x20;
8503 nv->node_name[1] = 0x00;
8504 nv->node_name[2] = 0x00;
8505 nv->node_name[3] = 0xe0;
8506 nv->node_name[4] = 0x8b;
8507 nv->node_name[5] = 0x1c;
8508 nv->node_name[6] = 0x55;
8509 nv->node_name[7] = 0x86;
8510 nv->login_retry_count = cpu_to_le16(8);
8511 nv->interrupt_delay_timer = cpu_to_le16(0);
8512 nv->login_timeout = cpu_to_le16(0);
8513 nv->firmware_options_1 =
8514 cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
8515 nv->firmware_options_2 = cpu_to_le32(2 << 4);
8516 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
8517 nv->firmware_options_3 = cpu_to_le32(2 << 13);
8518 nv->host_p = cpu_to_le32(BIT_11|BIT_10);
8519 nv->efi_parameters = cpu_to_le32(0);
8520 nv->reset_delay = 5;
8521 nv->max_luns_per_target = cpu_to_le16(128);
8522 nv->port_down_retry_count = cpu_to_le16(30);
8523 nv->link_down_timeout = cpu_to_le16(180);
8524 nv->enode_mac[0] = 0x00;
8525 nv->enode_mac[1] = 0xC0;
8526 nv->enode_mac[2] = 0xDD;
8527 nv->enode_mac[3] = 0x04;
8528 nv->enode_mac[4] = 0x05;
8529 nv->enode_mac[5] = 0x06 + ha->port_no + 1;
8530
8531 rval = 1;
8532 }
8533
8534 if (IS_T10_PI_CAPABLE(ha))
8535 nv->frame_payload_size &= cpu_to_le16(~7);
8536
8537 qlt_81xx_config_nvram_stage1(vha, nv);
8538
8539 /* Reset Initialization control block */
8540 memset(icb, 0, ha->init_cb_size);
8541
8542 /* Copy 1st segment. */
8543 dptr1 = (uint8_t *)icb;
8544 dptr2 = (uint8_t *)&nv->version;
8545 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
8546 while (cnt--)
8547 *dptr1++ = *dptr2++;
8548
8549 icb->login_retry_count = nv->login_retry_count;
8550
8551 /* Copy 2nd segment. */
8552 dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
8553 dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
8554 cnt = (uint8_t *)&icb->reserved_5 -
8555 (uint8_t *)&icb->interrupt_delay_timer;
8556 while (cnt--)
8557 *dptr1++ = *dptr2++;
8558
8559 memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac));
8560 /* Some boards (with valid NVRAMs) still have NULL enode_mac!! */
8561 if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) {
8562 icb->enode_mac[0] = 0x00;
8563 icb->enode_mac[1] = 0xC0;
8564 icb->enode_mac[2] = 0xDD;
8565 icb->enode_mac[3] = 0x04;
8566 icb->enode_mac[4] = 0x05;
8567 icb->enode_mac[5] = 0x06 + ha->port_no + 1;
8568 }
8569
8570 /* Use extended-initialization control block. */
8571 memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb));
8572 ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size);
8573 /*
8574 * Setup driver NVRAM options.
8575 */
8576 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
8577 "QLE8XXX");
8578
8579 qlt_81xx_config_nvram_stage2(vha, icb);
8580
8581 /* Use alternate WWN? */
8582 if (nv->host_p & cpu_to_le32(BIT_15)) {
8583 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
8584 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
8585 }
8586
8587 /* Prepare nodename */
8588 if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
8589 /*
8590 * Firmware will apply the following mask if the nodename was
8591 * not provided.
8592 */
8593 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
8594 icb->node_name[0] &= 0xF0;
8595 }
8596
8597 if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) {
8598 if ((nv->enhanced_features & BIT_7) == 0)
8599 ha->flags.scm_supported_a = 1;
8600 }
8601
8602 /* Set host adapter parameters. */
8603 ha->flags.disable_risc_code_load = 0;
8604 ha->flags.enable_lip_reset = 0;
8605 ha->flags.enable_lip_full_login =
8606 le32_to_cpu(nv->host_p) & BIT_10 ? 1 : 0;
8607 ha->flags.enable_target_reset =
8608 le32_to_cpu(nv->host_p) & BIT_11 ? 1 : 0;
8609 ha->flags.enable_led_scheme = 0;
8610 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1 : 0;
8611
8612 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
8613 (BIT_6 | BIT_5 | BIT_4)) >> 4;
8614
8615 /* save HBA serial number */
8616 ha->serial0 = icb->port_name[5];
8617 ha->serial1 = icb->port_name[6];
8618 ha->serial2 = icb->port_name[7];
8619 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
8620 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
8621
8622 icb->execution_throttle = cpu_to_le16(0xFFFF);
8623
8624 ha->retry_count = le16_to_cpu(nv->login_retry_count);
8625
8626 /* Set minimum login_timeout to 4 seconds. */
8627 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
8628 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
8629 if (le16_to_cpu(nv->login_timeout) < 4)
8630 nv->login_timeout = cpu_to_le16(4);
8631 ha->login_timeout = le16_to_cpu(nv->login_timeout);
8632
8633 /* Set minimum RATOV to 100 tenths of a second. */
8634 ha->r_a_tov = 100;
8635
8636 ha->loop_reset_delay = nv->reset_delay;
8637
8638 /* Link Down Timeout = 0:
8639 *
8640 * When Port Down timer expires we will start returning
8641 * I/O's to OS with "DID_NO_CONNECT".
8642 *
8643 * Link Down Timeout != 0:
8644 *
8645 * The driver waits for the link to come up after link down
8646 * before returning I/Os to OS with "DID_NO_CONNECT".
8647 */
8648 if (le16_to_cpu(nv->link_down_timeout) == 0) {
8649 ha->loop_down_abort_time =
8650 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
8651 } else {
8652 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
8653 ha->loop_down_abort_time =
8654 (LOOP_DOWN_TIME - ha->link_down_timeout);
8655 }
8656
8657 /* Need enough time to try and get the port back. */
8658 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
8659 if (qlport_down_retry)
8660 ha->port_down_retry_count = qlport_down_retry;
8661
8662 /* Set login_retry_count */
8663 ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
8664 if (ha->port_down_retry_count ==
8665 le16_to_cpu(nv->port_down_retry_count) &&
8666 ha->port_down_retry_count > 3)
8667 ha->login_retry_count = ha->port_down_retry_count;
8668 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
8669 ha->login_retry_count = ha->port_down_retry_count;
8670 if (ql2xloginretrycount)
8671 ha->login_retry_count = ql2xloginretrycount;
8672
8673 /* if not running MSI-X we need handshaking on interrupts */
8674 if (!vha->hw->flags.msix_enabled &&
8675 (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)))
8676 icb->firmware_options_2 |= cpu_to_le32(BIT_22);
8677
8678 /* Enable ZIO. */
8679 if (!vha->flags.init_done) {
8680 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
8681 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
8682 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
8683 le16_to_cpu(icb->interrupt_delay_timer) : 2;
8684 }
8685 icb->firmware_options_2 &= cpu_to_le32(
8686 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
8687 vha->flags.process_response_queue = 0;
8688 if (ha->zio_mode != QLA_ZIO_DISABLED) {
8689 ha->zio_mode = QLA_ZIO_MODE_6;
8690
8691 ql_log(ql_log_info, vha, 0x0075,
8692 "ZIO mode %d enabled; timer delay (%d us).\n",
8693 ha->zio_mode,
8694 ha->zio_timer * 100);
8695
8696 icb->firmware_options_2 |= cpu_to_le32(
8697 (uint32_t)ha->zio_mode);
8698 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
8699 vha->flags.process_response_queue = 1;
8700 }
8701
8702 /* enable RIDA Format2 */
8703 icb->firmware_options_3 |= cpu_to_le32(BIT_0);
8704
8705 /* N2N: driver will initiate Login instead of FW */
8706 icb->firmware_options_3 |= cpu_to_le32(BIT_8);
8707
8708 /* Determine NVMe/FCP priority for target ports */
8709 ha->fc4_type_priority = qla2xxx_get_fc4_priority(vha);
8710
8711 if (rval) {
8712 ql_log(ql_log_warn, vha, 0x0076,
8713 "NVRAM configuration failed.\n");
8714 }
8715 return (rval);
8716 }
8717
8718 int
qla82xx_restart_isp(scsi_qla_host_t * vha)8719 qla82xx_restart_isp(scsi_qla_host_t *vha)
8720 {
8721 int status, rval;
8722 struct qla_hw_data *ha = vha->hw;
8723 struct scsi_qla_host *vp;
8724 unsigned long flags;
8725
8726 status = qla2x00_init_rings(vha);
8727 if (!status) {
8728 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
8729 ha->flags.chip_reset_done = 1;
8730
8731 status = qla2x00_fw_ready(vha);
8732 if (!status) {
8733 /* Issue a marker after FW becomes ready. */
8734 qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL);
8735 vha->flags.online = 1;
8736 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
8737 }
8738
8739 /* if no cable then assume it's good */
8740 if ((vha->device_flags & DFLG_NO_CABLE))
8741 status = 0;
8742 }
8743
8744 if (!status) {
8745 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
8746
8747 if (!atomic_read(&vha->loop_down_timer)) {
8748 /*
8749 * Issue marker command only when we are going
8750 * to start the I/O .
8751 */
8752 vha->marker_needed = 1;
8753 }
8754
8755 ha->isp_ops->enable_intrs(ha);
8756
8757 ha->isp_abort_cnt = 0;
8758 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
8759
8760 /* Update the firmware version */
8761 status = qla82xx_check_md_needed(vha);
8762
8763 if (ha->fce) {
8764 ha->flags.fce_enabled = 1;
8765 memset(ha->fce, 0,
8766 fce_calc_size(ha->fce_bufs));
8767 rval = qla2x00_enable_fce_trace(vha,
8768 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
8769 &ha->fce_bufs);
8770 if (rval) {
8771 ql_log(ql_log_warn, vha, 0x8001,
8772 "Unable to reinitialize FCE (%d).\n",
8773 rval);
8774 ha->flags.fce_enabled = 0;
8775 }
8776 }
8777
8778 if (ha->eft) {
8779 memset(ha->eft, 0, EFT_SIZE);
8780 rval = qla2x00_enable_eft_trace(vha,
8781 ha->eft_dma, EFT_NUM_BUFFERS);
8782 if (rval) {
8783 ql_log(ql_log_warn, vha, 0x8010,
8784 "Unable to reinitialize EFT (%d).\n",
8785 rval);
8786 }
8787 }
8788 }
8789
8790 if (!status) {
8791 ql_dbg(ql_dbg_taskm, vha, 0x8011,
8792 "qla82xx_restart_isp succeeded.\n");
8793
8794 spin_lock_irqsave(&ha->vport_slock, flags);
8795 list_for_each_entry(vp, &ha->vp_list, list) {
8796 if (vp->vp_idx) {
8797 atomic_inc(&vp->vref_count);
8798 spin_unlock_irqrestore(&ha->vport_slock, flags);
8799
8800 qla2x00_vp_abort_isp(vp);
8801
8802 spin_lock_irqsave(&ha->vport_slock, flags);
8803 atomic_dec(&vp->vref_count);
8804 }
8805 }
8806 spin_unlock_irqrestore(&ha->vport_slock, flags);
8807
8808 } else {
8809 ql_log(ql_log_warn, vha, 0x8016,
8810 "qla82xx_restart_isp **** FAILED ****.\n");
8811 }
8812
8813 return status;
8814 }
8815
8816 /*
8817 * qla24xx_get_fcp_prio
8818 * Gets the fcp cmd priority value for the logged in port.
8819 * Looks for a match of the port descriptors within
8820 * each of the fcp prio config entries. If a match is found,
8821 * the tag (priority) value is returned.
8822 *
8823 * Input:
8824 * vha = scsi host structure pointer.
8825 * fcport = port structure pointer.
8826 *
8827 * Return:
8828 * non-zero (if found)
8829 * -1 (if not found)
8830 *
8831 * Context:
8832 * Kernel context
8833 */
8834 static int
qla24xx_get_fcp_prio(scsi_qla_host_t * vha,fc_port_t * fcport)8835 qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
8836 {
8837 int i, entries;
8838 uint8_t pid_match, wwn_match;
8839 int priority;
8840 uint32_t pid1, pid2;
8841 uint64_t wwn1, wwn2;
8842 struct qla_fcp_prio_entry *pri_entry;
8843 struct qla_hw_data *ha = vha->hw;
8844
8845 if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled)
8846 return -1;
8847
8848 priority = -1;
8849 entries = ha->fcp_prio_cfg->num_entries;
8850 pri_entry = &ha->fcp_prio_cfg->entry[0];
8851
8852 for (i = 0; i < entries; i++) {
8853 pid_match = wwn_match = 0;
8854
8855 if (!(pri_entry->flags & FCP_PRIO_ENTRY_VALID)) {
8856 pri_entry++;
8857 continue;
8858 }
8859
8860 /* check source pid for a match */
8861 if (pri_entry->flags & FCP_PRIO_ENTRY_SPID_VALID) {
8862 pid1 = pri_entry->src_pid & INVALID_PORT_ID;
8863 pid2 = vha->d_id.b24 & INVALID_PORT_ID;
8864 if (pid1 == INVALID_PORT_ID)
8865 pid_match++;
8866 else if (pid1 == pid2)
8867 pid_match++;
8868 }
8869
8870 /* check destination pid for a match */
8871 if (pri_entry->flags & FCP_PRIO_ENTRY_DPID_VALID) {
8872 pid1 = pri_entry->dst_pid & INVALID_PORT_ID;
8873 pid2 = fcport->d_id.b24 & INVALID_PORT_ID;
8874 if (pid1 == INVALID_PORT_ID)
8875 pid_match++;
8876 else if (pid1 == pid2)
8877 pid_match++;
8878 }
8879
8880 /* check source WWN for a match */
8881 if (pri_entry->flags & FCP_PRIO_ENTRY_SWWN_VALID) {
8882 wwn1 = wwn_to_u64(vha->port_name);
8883 wwn2 = wwn_to_u64(pri_entry->src_wwpn);
8884 if (wwn2 == (uint64_t)-1)
8885 wwn_match++;
8886 else if (wwn1 == wwn2)
8887 wwn_match++;
8888 }
8889
8890 /* check destination WWN for a match */
8891 if (pri_entry->flags & FCP_PRIO_ENTRY_DWWN_VALID) {
8892 wwn1 = wwn_to_u64(fcport->port_name);
8893 wwn2 = wwn_to_u64(pri_entry->dst_wwpn);
8894 if (wwn2 == (uint64_t)-1)
8895 wwn_match++;
8896 else if (wwn1 == wwn2)
8897 wwn_match++;
8898 }
8899
8900 if (pid_match == 2 || wwn_match == 2) {
8901 /* Found a matching entry */
8902 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
8903 priority = pri_entry->tag;
8904 break;
8905 }
8906
8907 pri_entry++;
8908 }
8909
8910 return priority;
8911 }
8912
8913 /*
8914 * qla24xx_update_fcport_fcp_prio
8915 * Activates fcp priority for the logged in fc port
8916 *
8917 * Input:
8918 * vha = scsi host structure pointer.
8919 * fcp = port structure pointer.
8920 *
8921 * Return:
8922 * QLA_SUCCESS or QLA_FUNCTION_FAILED
8923 *
8924 * Context:
8925 * Kernel context.
8926 */
8927 int
qla24xx_update_fcport_fcp_prio(scsi_qla_host_t * vha,fc_port_t * fcport)8928 qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
8929 {
8930 int ret;
8931 int priority;
8932 uint16_t mb[5];
8933
8934 if (fcport->port_type != FCT_TARGET ||
8935 fcport->loop_id == FC_NO_LOOP_ID)
8936 return QLA_FUNCTION_FAILED;
8937
8938 priority = qla24xx_get_fcp_prio(vha, fcport);
8939 if (priority < 0)
8940 return QLA_FUNCTION_FAILED;
8941
8942 if (IS_P3P_TYPE(vha->hw)) {
8943 fcport->fcp_prio = priority & 0xf;
8944 return QLA_SUCCESS;
8945 }
8946
8947 ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb);
8948 if (ret == QLA_SUCCESS) {
8949 if (fcport->fcp_prio != priority)
8950 ql_dbg(ql_dbg_user, vha, 0x709e,
8951 "Updated FCP_CMND priority - value=%d loop_id=%d "
8952 "port_id=%02x%02x%02x.\n", priority,
8953 fcport->loop_id, fcport->d_id.b.domain,
8954 fcport->d_id.b.area, fcport->d_id.b.al_pa);
8955 fcport->fcp_prio = priority & 0xf;
8956 } else
8957 ql_dbg(ql_dbg_user, vha, 0x704f,
8958 "Unable to update FCP_CMND priority - ret=0x%x for "
8959 "loop_id=%d port_id=%02x%02x%02x.\n", ret, fcport->loop_id,
8960 fcport->d_id.b.domain, fcport->d_id.b.area,
8961 fcport->d_id.b.al_pa);
8962 return ret;
8963 }
8964
8965 /*
8966 * qla24xx_update_all_fcp_prio
8967 * Activates fcp priority for all the logged in ports
8968 *
8969 * Input:
8970 * ha = adapter block pointer.
8971 *
8972 * Return:
8973 * QLA_SUCCESS or QLA_FUNCTION_FAILED
8974 *
8975 * Context:
8976 * Kernel context.
8977 */
8978 int
qla24xx_update_all_fcp_prio(scsi_qla_host_t * vha)8979 qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha)
8980 {
8981 int ret;
8982 fc_port_t *fcport;
8983
8984 ret = QLA_FUNCTION_FAILED;
8985 /* We need to set priority for all logged in ports */
8986 list_for_each_entry(fcport, &vha->vp_fcports, list)
8987 ret = qla24xx_update_fcport_fcp_prio(vha, fcport);
8988
8989 return ret;
8990 }
8991
qla2xxx_create_qpair(struct scsi_qla_host * vha,int qos,int vp_idx,bool startqp)8992 struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
8993 int vp_idx, bool startqp)
8994 {
8995 int rsp_id = 0;
8996 int req_id = 0;
8997 int i;
8998 struct qla_hw_data *ha = vha->hw;
8999 uint16_t qpair_id = 0;
9000 struct qla_qpair *qpair = NULL;
9001 struct qla_msix_entry *msix;
9002
9003 if (!(ha->fw_attributes & BIT_6) || !ha->flags.msix_enabled) {
9004 ql_log(ql_log_warn, vha, 0x00181,
9005 "FW/Driver is not multi-queue capable.\n");
9006 return NULL;
9007 }
9008
9009 if (ql2xmqsupport || ql2xnvmeenable) {
9010 qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
9011 if (qpair == NULL) {
9012 ql_log(ql_log_warn, vha, 0x0182,
9013 "Failed to allocate memory for queue pair.\n");
9014 return NULL;
9015 }
9016
9017 qpair->hw = vha->hw;
9018 qpair->vha = vha;
9019 qpair->qp_lock_ptr = &qpair->qp_lock;
9020 spin_lock_init(&qpair->qp_lock);
9021 qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0;
9022
9023 /* Assign available que pair id */
9024 mutex_lock(&ha->mq_lock);
9025 qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs);
9026 if (ha->num_qpairs >= ha->max_qpairs) {
9027 mutex_unlock(&ha->mq_lock);
9028 ql_log(ql_log_warn, vha, 0x0183,
9029 "No resources to create additional q pair.\n");
9030 goto fail_qid_map;
9031 }
9032 ha->num_qpairs++;
9033 set_bit(qpair_id, ha->qpair_qid_map);
9034 ha->queue_pair_map[qpair_id] = qpair;
9035 qpair->id = qpair_id;
9036 qpair->vp_idx = vp_idx;
9037 qpair->fw_started = ha->flags.fw_started;
9038 INIT_LIST_HEAD(&qpair->hints_list);
9039 qpair->chip_reset = ha->base_qpair->chip_reset;
9040 qpair->enable_class_2 = ha->base_qpair->enable_class_2;
9041 qpair->enable_explicit_conf =
9042 ha->base_qpair->enable_explicit_conf;
9043
9044 for (i = 0; i < ha->msix_count; i++) {
9045 msix = &ha->msix_entries[i];
9046 if (msix->in_use)
9047 continue;
9048 qpair->msix = msix;
9049 ql_dbg(ql_dbg_multiq, vha, 0xc00f,
9050 "Vector %x selected for qpair\n", msix->vector);
9051 break;
9052 }
9053 if (!qpair->msix) {
9054 ql_log(ql_log_warn, vha, 0x0184,
9055 "Out of MSI-X vectors!.\n");
9056 goto fail_msix;
9057 }
9058
9059 qpair->msix->in_use = 1;
9060 list_add_tail(&qpair->qp_list_elem, &vha->qp_list);
9061 qpair->pdev = ha->pdev;
9062 if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))
9063 qpair->reqq_start_iocbs = qla_83xx_start_iocbs;
9064
9065 mutex_unlock(&ha->mq_lock);
9066
9067 /* Create response queue first */
9068 rsp_id = qla25xx_create_rsp_que(ha, 0, 0, 0, qpair, startqp);
9069 if (!rsp_id) {
9070 ql_log(ql_log_warn, vha, 0x0185,
9071 "Failed to create response queue.\n");
9072 goto fail_rsp;
9073 }
9074
9075 qpair->rsp = ha->rsp_q_map[rsp_id];
9076
9077 /* Create request queue */
9078 req_id = qla25xx_create_req_que(ha, 0, vp_idx, 0, rsp_id, qos,
9079 startqp);
9080 if (!req_id) {
9081 ql_log(ql_log_warn, vha, 0x0186,
9082 "Failed to create request queue.\n");
9083 goto fail_req;
9084 }
9085
9086 qpair->req = ha->req_q_map[req_id];
9087 qpair->rsp->req = qpair->req;
9088 qpair->rsp->qpair = qpair;
9089 /* init qpair to this cpu. Will adjust at run time. */
9090 qla_cpu_update(qpair, smp_processor_id());
9091
9092 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
9093 if (ha->fw_attributes & BIT_4)
9094 qpair->difdix_supported = 1;
9095 }
9096
9097 qpair->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
9098 if (!qpair->srb_mempool) {
9099 ql_log(ql_log_warn, vha, 0xd036,
9100 "Failed to create srb mempool for qpair %d\n",
9101 qpair->id);
9102 goto fail_mempool;
9103 }
9104
9105 /* Mark as online */
9106 qpair->online = 1;
9107
9108 if (!vha->flags.qpairs_available)
9109 vha->flags.qpairs_available = 1;
9110
9111 ql_dbg(ql_dbg_multiq, vha, 0xc00d,
9112 "Request/Response queue pair created, id %d\n",
9113 qpair->id);
9114 ql_dbg(ql_dbg_init, vha, 0x0187,
9115 "Request/Response queue pair created, id %d\n",
9116 qpair->id);
9117 }
9118 return qpair;
9119
9120 fail_mempool:
9121 fail_req:
9122 qla25xx_delete_rsp_que(vha, qpair->rsp);
9123 fail_rsp:
9124 mutex_lock(&ha->mq_lock);
9125 qpair->msix->in_use = 0;
9126 list_del(&qpair->qp_list_elem);
9127 if (list_empty(&vha->qp_list))
9128 vha->flags.qpairs_available = 0;
9129 fail_msix:
9130 ha->queue_pair_map[qpair_id] = NULL;
9131 clear_bit(qpair_id, ha->qpair_qid_map);
9132 ha->num_qpairs--;
9133 mutex_unlock(&ha->mq_lock);
9134 fail_qid_map:
9135 kfree(qpair);
9136 return NULL;
9137 }
9138
qla2xxx_delete_qpair(struct scsi_qla_host * vha,struct qla_qpair * qpair)9139 int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
9140 {
9141 int ret = QLA_FUNCTION_FAILED;
9142 struct qla_hw_data *ha = qpair->hw;
9143
9144 qpair->delete_in_progress = 1;
9145
9146 ret = qla25xx_delete_req_que(vha, qpair->req);
9147 if (ret != QLA_SUCCESS)
9148 goto fail;
9149
9150 ret = qla25xx_delete_rsp_que(vha, qpair->rsp);
9151 if (ret != QLA_SUCCESS)
9152 goto fail;
9153
9154 mutex_lock(&ha->mq_lock);
9155 ha->queue_pair_map[qpair->id] = NULL;
9156 clear_bit(qpair->id, ha->qpair_qid_map);
9157 ha->num_qpairs--;
9158 list_del(&qpair->qp_list_elem);
9159 if (list_empty(&vha->qp_list)) {
9160 vha->flags.qpairs_available = 0;
9161 vha->flags.qpairs_req_created = 0;
9162 vha->flags.qpairs_rsp_created = 0;
9163 }
9164 mempool_destroy(qpair->srb_mempool);
9165 kfree(qpair);
9166 mutex_unlock(&ha->mq_lock);
9167
9168 return QLA_SUCCESS;
9169 fail:
9170 return ret;
9171 }
9172