1 /*
2 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
3 * Copyright (c) 2014- QLogic Corporation.
4 * All rights reserved
5 * www.qlogic.com
6 *
7 * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License (GPL) Version 2 as
11 * published by the Free Software Foundation
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 */
18
19 #include "bfad_drv.h"
20 #include "bfad_im.h"
21 #include "bfa_plog.h"
22 #include "bfa_cs.h"
23 #include "bfa_modules.h"
24
25 BFA_TRC_FILE(HAL, FCXP);
26
27 /*
28 * LPS related definitions
29 */
30 #define BFA_LPS_MIN_LPORTS (1)
31 #define BFA_LPS_MAX_LPORTS (256)
32
33 /*
34 * Maximum Vports supported per physical port or vf.
35 */
36 #define BFA_LPS_MAX_VPORTS_SUPP_CB 255
37 #define BFA_LPS_MAX_VPORTS_SUPP_CT 190
38
39
40 /*
41 * FC PORT related definitions
42 */
43 /*
44 * The port is considered disabled if corresponding physical port or IOC are
45 * disabled explicitly
46 */
47 #define BFA_PORT_IS_DISABLED(bfa) \
48 ((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \
49 (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
50
51 /*
52 * BFA port state machine events
53 */
54 enum bfa_fcport_sm_event {
55 BFA_FCPORT_SM_START = 1, /* start port state machine */
56 BFA_FCPORT_SM_STOP = 2, /* stop port state machine */
57 BFA_FCPORT_SM_ENABLE = 3, /* enable port */
58 BFA_FCPORT_SM_DISABLE = 4, /* disable port state machine */
59 BFA_FCPORT_SM_FWRSP = 5, /* firmware enable/disable rsp */
60 BFA_FCPORT_SM_LINKUP = 6, /* firmware linkup event */
61 BFA_FCPORT_SM_LINKDOWN = 7, /* firmware linkup down */
62 BFA_FCPORT_SM_QRESUME = 8, /* CQ space available */
63 BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */
64 BFA_FCPORT_SM_DPORTENABLE = 10, /* enable dport */
65 BFA_FCPORT_SM_DPORTDISABLE = 11,/* disable dport */
66 BFA_FCPORT_SM_FAA_MISCONFIG = 12, /* FAA misconfiguratin */
67 BFA_FCPORT_SM_DDPORTENABLE = 13, /* enable ddport */
68 BFA_FCPORT_SM_DDPORTDISABLE = 14, /* disable ddport */
69 };
70
71 /*
72 * BFA port link notification state machine events
73 */
74
75 enum bfa_fcport_ln_sm_event {
76 BFA_FCPORT_LN_SM_LINKUP = 1, /* linkup event */
77 BFA_FCPORT_LN_SM_LINKDOWN = 2, /* linkdown event */
78 BFA_FCPORT_LN_SM_NOTIFICATION = 3 /* done notification */
79 };
80
81 /*
82 * RPORT related definitions
83 */
84 #define bfa_rport_offline_cb(__rp) do { \
85 if ((__rp)->bfa->fcs) \
86 bfa_cb_rport_offline((__rp)->rport_drv); \
87 else { \
88 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
89 __bfa_cb_rport_offline, (__rp)); \
90 } \
91 } while (0)
92
93 #define bfa_rport_online_cb(__rp) do { \
94 if ((__rp)->bfa->fcs) \
95 bfa_cb_rport_online((__rp)->rport_drv); \
96 else { \
97 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
98 __bfa_cb_rport_online, (__rp)); \
99 } \
100 } while (0)
101
102 /*
103 * forward declarations FCXP related functions
104 */
105 static void __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
106 static void hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
107 struct bfi_fcxp_send_rsp_s *fcxp_rsp);
108 static void hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen,
109 struct bfa_fcxp_s *fcxp, struct fchs_s *fchs);
110 static void bfa_fcxp_qresume(void *cbarg);
111 static void bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
112 struct bfi_fcxp_send_req_s *send_req);
113
114 /*
115 * forward declarations for LPS functions
116 */
117 static void bfa_lps_login_rsp(struct bfa_s *bfa,
118 struct bfi_lps_login_rsp_s *rsp);
119 static void bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count);
120 static void bfa_lps_logout_rsp(struct bfa_s *bfa,
121 struct bfi_lps_logout_rsp_s *rsp);
122 static void bfa_lps_reqq_resume(void *lps_arg);
123 static void bfa_lps_free(struct bfa_lps_s *lps);
124 static void bfa_lps_send_login(struct bfa_lps_s *lps);
125 static void bfa_lps_send_logout(struct bfa_lps_s *lps);
126 static void bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps);
127 static void bfa_lps_login_comp(struct bfa_lps_s *lps);
128 static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
129 static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
130
131 /*
132 * forward declaration for LPS state machine
133 */
134 static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
135 static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event);
136 static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event
137 event);
138 static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event);
139 static void bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps,
140 enum bfa_lps_event event);
141 static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event);
142 static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event
143 event);
144
145 /*
146 * forward declaration for FC Port functions
147 */
148 static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
149 static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport);
150 static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport);
151 static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport);
152 static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport);
153 static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete);
154 static void bfa_fcport_scn(struct bfa_fcport_s *fcport,
155 enum bfa_port_linkstate event, bfa_boolean_t trunk);
156 static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln,
157 enum bfa_port_linkstate event);
158 static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete);
159 static void bfa_fcport_stats_get_timeout(void *cbarg);
160 static void bfa_fcport_stats_clr_timeout(void *cbarg);
161 static void bfa_trunk_iocdisable(struct bfa_s *bfa);
162
163 /*
164 * forward declaration for FC PORT state machine
165 */
166 static void bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
167 enum bfa_fcport_sm_event event);
168 static void bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
169 enum bfa_fcport_sm_event event);
170 static void bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
171 enum bfa_fcport_sm_event event);
172 static void bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
173 enum bfa_fcport_sm_event event);
174 static void bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
175 enum bfa_fcport_sm_event event);
176 static void bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
177 enum bfa_fcport_sm_event event);
178 static void bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
179 enum bfa_fcport_sm_event event);
180 static void bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
181 enum bfa_fcport_sm_event event);
182 static void bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
183 enum bfa_fcport_sm_event event);
184 static void bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
185 enum bfa_fcport_sm_event event);
186 static void bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
187 enum bfa_fcport_sm_event event);
188 static void bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
189 enum bfa_fcport_sm_event event);
190 static void bfa_fcport_sm_dport(struct bfa_fcport_s *fcport,
191 enum bfa_fcport_sm_event event);
192 static void bfa_fcport_sm_ddport(struct bfa_fcport_s *fcport,
193 enum bfa_fcport_sm_event event);
194 static void bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s *fcport,
195 enum bfa_fcport_sm_event event);
196
197 static void bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
198 enum bfa_fcport_ln_sm_event event);
199 static void bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
200 enum bfa_fcport_ln_sm_event event);
201 static void bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
202 enum bfa_fcport_ln_sm_event event);
203 static void bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
204 enum bfa_fcport_ln_sm_event event);
205 static void bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
206 enum bfa_fcport_ln_sm_event event);
207 static void bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
208 enum bfa_fcport_ln_sm_event event);
209 static void bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
210 enum bfa_fcport_ln_sm_event event);
211
212 static struct bfa_sm_table_s hal_port_sm_table[] = {
213 {BFA_SM(bfa_fcport_sm_uninit), BFA_PORT_ST_UNINIT},
214 {BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PORT_ST_ENABLING_QWAIT},
215 {BFA_SM(bfa_fcport_sm_enabling), BFA_PORT_ST_ENABLING},
216 {BFA_SM(bfa_fcport_sm_linkdown), BFA_PORT_ST_LINKDOWN},
217 {BFA_SM(bfa_fcport_sm_linkup), BFA_PORT_ST_LINKUP},
218 {BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PORT_ST_DISABLING_QWAIT},
219 {BFA_SM(bfa_fcport_sm_toggling_qwait), BFA_PORT_ST_TOGGLING_QWAIT},
220 {BFA_SM(bfa_fcport_sm_disabling), BFA_PORT_ST_DISABLING},
221 {BFA_SM(bfa_fcport_sm_disabled), BFA_PORT_ST_DISABLED},
222 {BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED},
223 {BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN},
224 {BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN},
225 {BFA_SM(bfa_fcport_sm_dport), BFA_PORT_ST_DPORT},
226 {BFA_SM(bfa_fcport_sm_ddport), BFA_PORT_ST_DDPORT},
227 {BFA_SM(bfa_fcport_sm_faa_misconfig), BFA_PORT_ST_FAA_MISCONFIG},
228 };
229
230
231 /*
232 * forward declaration for RPORT related functions
233 */
234 static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
235 static void bfa_rport_free(struct bfa_rport_s *rport);
236 static bfa_boolean_t bfa_rport_send_fwcreate(struct bfa_rport_s *rp);
237 static bfa_boolean_t bfa_rport_send_fwdelete(struct bfa_rport_s *rp);
238 static bfa_boolean_t bfa_rport_send_fwspeed(struct bfa_rport_s *rp);
239 static void __bfa_cb_rport_online(void *cbarg,
240 bfa_boolean_t complete);
241 static void __bfa_cb_rport_offline(void *cbarg,
242 bfa_boolean_t complete);
243
244 /*
245 * forward declaration for RPORT state machine
246 */
247 static void bfa_rport_sm_uninit(struct bfa_rport_s *rp,
248 enum bfa_rport_event event);
249 static void bfa_rport_sm_created(struct bfa_rport_s *rp,
250 enum bfa_rport_event event);
251 static void bfa_rport_sm_fwcreate(struct bfa_rport_s *rp,
252 enum bfa_rport_event event);
253 static void bfa_rport_sm_online(struct bfa_rport_s *rp,
254 enum bfa_rport_event event);
255 static void bfa_rport_sm_fwdelete(struct bfa_rport_s *rp,
256 enum bfa_rport_event event);
257 static void bfa_rport_sm_offline(struct bfa_rport_s *rp,
258 enum bfa_rport_event event);
259 static void bfa_rport_sm_deleting(struct bfa_rport_s *rp,
260 enum bfa_rport_event event);
261 static void bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
262 enum bfa_rport_event event);
263 static void bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
264 enum bfa_rport_event event);
265 static void bfa_rport_sm_iocdisable(struct bfa_rport_s *rp,
266 enum bfa_rport_event event);
267 static void bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp,
268 enum bfa_rport_event event);
269 static void bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp,
270 enum bfa_rport_event event);
271 static void bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
272 enum bfa_rport_event event);
273
274 /*
275 * PLOG related definitions
276 */
277 static int
plkd_validate_logrec(struct bfa_plog_rec_s * pl_rec)278 plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec)
279 {
280 if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
281 (pl_rec->log_type != BFA_PL_LOG_TYPE_STRING))
282 return 1;
283
284 if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
285 (pl_rec->log_num_ints > BFA_PL_INT_LOG_SZ))
286 return 1;
287
288 return 0;
289 }
290
291 static u64
bfa_get_log_time(void)292 bfa_get_log_time(void)
293 {
294 u64 system_time = 0;
295 struct timeval tv;
296 do_gettimeofday(&tv);
297
298 /* We are interested in seconds only. */
299 system_time = tv.tv_sec;
300 return system_time;
301 }
302
303 static void
bfa_plog_add(struct bfa_plog_s * plog,struct bfa_plog_rec_s * pl_rec)304 bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
305 {
306 u16 tail;
307 struct bfa_plog_rec_s *pl_recp;
308
309 if (plog->plog_enabled == 0)
310 return;
311
312 if (plkd_validate_logrec(pl_rec)) {
313 WARN_ON(1);
314 return;
315 }
316
317 tail = plog->tail;
318
319 pl_recp = &(plog->plog_recs[tail]);
320
321 memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
322
323 pl_recp->tv = bfa_get_log_time();
324 BFA_PL_LOG_REC_INCR(plog->tail);
325
326 if (plog->head == plog->tail)
327 BFA_PL_LOG_REC_INCR(plog->head);
328 }
329
330 void
bfa_plog_init(struct bfa_plog_s * plog)331 bfa_plog_init(struct bfa_plog_s *plog)
332 {
333 memset((char *)plog, 0, sizeof(struct bfa_plog_s));
334
335 memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN);
336 plog->head = plog->tail = 0;
337 plog->plog_enabled = 1;
338 }
339
340 void
bfa_plog_str(struct bfa_plog_s * plog,enum bfa_plog_mid mid,enum bfa_plog_eid event,u16 misc,char * log_str)341 bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
342 enum bfa_plog_eid event,
343 u16 misc, char *log_str)
344 {
345 struct bfa_plog_rec_s lp;
346
347 if (plog->plog_enabled) {
348 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
349 lp.mid = mid;
350 lp.eid = event;
351 lp.log_type = BFA_PL_LOG_TYPE_STRING;
352 lp.misc = misc;
353 strlcpy(lp.log_entry.string_log, log_str,
354 BFA_PL_STRING_LOG_SZ);
355 lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
356 bfa_plog_add(plog, &lp);
357 }
358 }
359
360 void
bfa_plog_intarr(struct bfa_plog_s * plog,enum bfa_plog_mid mid,enum bfa_plog_eid event,u16 misc,u32 * intarr,u32 num_ints)361 bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
362 enum bfa_plog_eid event,
363 u16 misc, u32 *intarr, u32 num_ints)
364 {
365 struct bfa_plog_rec_s lp;
366 u32 i;
367
368 if (num_ints > BFA_PL_INT_LOG_SZ)
369 num_ints = BFA_PL_INT_LOG_SZ;
370
371 if (plog->plog_enabled) {
372 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
373 lp.mid = mid;
374 lp.eid = event;
375 lp.log_type = BFA_PL_LOG_TYPE_INT;
376 lp.misc = misc;
377
378 for (i = 0; i < num_ints; i++)
379 lp.log_entry.int_log[i] = intarr[i];
380
381 lp.log_num_ints = (u8) num_ints;
382
383 bfa_plog_add(plog, &lp);
384 }
385 }
386
387 void
bfa_plog_fchdr(struct bfa_plog_s * plog,enum bfa_plog_mid mid,enum bfa_plog_eid event,u16 misc,struct fchs_s * fchdr)388 bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
389 enum bfa_plog_eid event,
390 u16 misc, struct fchs_s *fchdr)
391 {
392 struct bfa_plog_rec_s lp;
393 u32 *tmp_int = (u32 *) fchdr;
394 u32 ints[BFA_PL_INT_LOG_SZ];
395
396 if (plog->plog_enabled) {
397 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
398
399 ints[0] = tmp_int[0];
400 ints[1] = tmp_int[1];
401 ints[2] = tmp_int[4];
402
403 bfa_plog_intarr(plog, mid, event, misc, ints, 3);
404 }
405 }
406
407 void
bfa_plog_fchdr_and_pl(struct bfa_plog_s * plog,enum bfa_plog_mid mid,enum bfa_plog_eid event,u16 misc,struct fchs_s * fchdr,u32 pld_w0)408 bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
409 enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr,
410 u32 pld_w0)
411 {
412 struct bfa_plog_rec_s lp;
413 u32 *tmp_int = (u32 *) fchdr;
414 u32 ints[BFA_PL_INT_LOG_SZ];
415
416 if (plog->plog_enabled) {
417 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
418
419 ints[0] = tmp_int[0];
420 ints[1] = tmp_int[1];
421 ints[2] = tmp_int[4];
422 ints[3] = pld_w0;
423
424 bfa_plog_intarr(plog, mid, event, misc, ints, 4);
425 }
426 }
427
428
429 /*
430 * fcxp_pvt BFA FCXP private functions
431 */
432
433 static void
claim_fcxps_mem(struct bfa_fcxp_mod_s * mod)434 claim_fcxps_mem(struct bfa_fcxp_mod_s *mod)
435 {
436 u16 i;
437 struct bfa_fcxp_s *fcxp;
438
439 fcxp = (struct bfa_fcxp_s *) bfa_mem_kva_curp(mod);
440 memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
441
442 INIT_LIST_HEAD(&mod->fcxp_req_free_q);
443 INIT_LIST_HEAD(&mod->fcxp_rsp_free_q);
444 INIT_LIST_HEAD(&mod->fcxp_active_q);
445 INIT_LIST_HEAD(&mod->fcxp_req_unused_q);
446 INIT_LIST_HEAD(&mod->fcxp_rsp_unused_q);
447
448 mod->fcxp_list = fcxp;
449
450 for (i = 0; i < mod->num_fcxps; i++) {
451 fcxp->fcxp_mod = mod;
452 fcxp->fcxp_tag = i;
453
454 if (i < (mod->num_fcxps / 2)) {
455 list_add_tail(&fcxp->qe, &mod->fcxp_req_free_q);
456 fcxp->req_rsp = BFA_TRUE;
457 } else {
458 list_add_tail(&fcxp->qe, &mod->fcxp_rsp_free_q);
459 fcxp->req_rsp = BFA_FALSE;
460 }
461
462 bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp);
463 fcxp->reqq_waiting = BFA_FALSE;
464
465 fcxp = fcxp + 1;
466 }
467
468 bfa_mem_kva_curp(mod) = (void *)fcxp;
469 }
470
471 void
bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s * cfg,struct bfa_meminfo_s * minfo,struct bfa_s * bfa)472 bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
473 struct bfa_s *bfa)
474 {
475 struct bfa_fcxp_mod_s *fcxp_mod = BFA_FCXP_MOD(bfa);
476 struct bfa_mem_kva_s *fcxp_kva = BFA_MEM_FCXP_KVA(bfa);
477 struct bfa_mem_dma_s *seg_ptr;
478 u16 nsegs, idx, per_seg_fcxp;
479 u16 num_fcxps = cfg->fwcfg.num_fcxp_reqs;
480 u32 per_fcxp_sz;
481
482 if (num_fcxps == 0)
483 return;
484
485 if (cfg->drvcfg.min_cfg)
486 per_fcxp_sz = 2 * BFA_FCXP_MAX_IBUF_SZ;
487 else
488 per_fcxp_sz = BFA_FCXP_MAX_IBUF_SZ + BFA_FCXP_MAX_LBUF_SZ;
489
490 /* dma memory */
491 nsegs = BFI_MEM_DMA_NSEGS(num_fcxps, per_fcxp_sz);
492 per_seg_fcxp = BFI_MEM_NREQS_SEG(per_fcxp_sz);
493
494 bfa_mem_dma_seg_iter(fcxp_mod, seg_ptr, nsegs, idx) {
495 if (num_fcxps >= per_seg_fcxp) {
496 num_fcxps -= per_seg_fcxp;
497 bfa_mem_dma_setup(minfo, seg_ptr,
498 per_seg_fcxp * per_fcxp_sz);
499 } else
500 bfa_mem_dma_setup(minfo, seg_ptr,
501 num_fcxps * per_fcxp_sz);
502 }
503
504 /* kva memory */
505 bfa_mem_kva_setup(minfo, fcxp_kva,
506 cfg->fwcfg.num_fcxp_reqs * sizeof(struct bfa_fcxp_s));
507 }
508
509 void
bfa_fcxp_attach(struct bfa_s * bfa,void * bfad,struct bfa_iocfc_cfg_s * cfg,struct bfa_pcidev_s * pcidev)510 bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
511 struct bfa_pcidev_s *pcidev)
512 {
513 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
514
515 mod->bfa = bfa;
516 mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
517
518 /*
519 * Initialize FCXP request and response payload sizes.
520 */
521 mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
522 if (!cfg->drvcfg.min_cfg)
523 mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ;
524
525 INIT_LIST_HEAD(&mod->req_wait_q);
526 INIT_LIST_HEAD(&mod->rsp_wait_q);
527
528 claim_fcxps_mem(mod);
529 }
530
531 void
bfa_fcxp_iocdisable(struct bfa_s * bfa)532 bfa_fcxp_iocdisable(struct bfa_s *bfa)
533 {
534 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
535 struct bfa_fcxp_s *fcxp;
536 struct list_head *qe, *qen;
537
538 /* Enqueue unused fcxp resources to free_q */
539 list_splice_tail_init(&mod->fcxp_req_unused_q, &mod->fcxp_req_free_q);
540 list_splice_tail_init(&mod->fcxp_rsp_unused_q, &mod->fcxp_rsp_free_q);
541
542 list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
543 fcxp = (struct bfa_fcxp_s *) qe;
544 if (fcxp->caller == NULL) {
545 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
546 BFA_STATUS_IOC_FAILURE, 0, 0, NULL);
547 bfa_fcxp_free(fcxp);
548 } else {
549 fcxp->rsp_status = BFA_STATUS_IOC_FAILURE;
550 bfa_cb_queue(bfa, &fcxp->hcb_qe,
551 __bfa_fcxp_send_cbfn, fcxp);
552 }
553 }
554 }
555
556 static struct bfa_fcxp_s *
bfa_fcxp_get(struct bfa_fcxp_mod_s * fm,bfa_boolean_t req)557 bfa_fcxp_get(struct bfa_fcxp_mod_s *fm, bfa_boolean_t req)
558 {
559 struct bfa_fcxp_s *fcxp;
560
561 if (req)
562 bfa_q_deq(&fm->fcxp_req_free_q, &fcxp);
563 else
564 bfa_q_deq(&fm->fcxp_rsp_free_q, &fcxp);
565
566 if (fcxp)
567 list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
568
569 return fcxp;
570 }
571
572 static void
bfa_fcxp_init_reqrsp(struct bfa_fcxp_s * fcxp,struct bfa_s * bfa,u8 * use_ibuf,u32 * nr_sgles,bfa_fcxp_get_sgaddr_t * r_sga_cbfn,bfa_fcxp_get_sglen_t * r_sglen_cbfn,struct list_head * r_sgpg_q,int n_sgles,bfa_fcxp_get_sgaddr_t sga_cbfn,bfa_fcxp_get_sglen_t sglen_cbfn)573 bfa_fcxp_init_reqrsp(struct bfa_fcxp_s *fcxp,
574 struct bfa_s *bfa,
575 u8 *use_ibuf,
576 u32 *nr_sgles,
577 bfa_fcxp_get_sgaddr_t *r_sga_cbfn,
578 bfa_fcxp_get_sglen_t *r_sglen_cbfn,
579 struct list_head *r_sgpg_q,
580 int n_sgles,
581 bfa_fcxp_get_sgaddr_t sga_cbfn,
582 bfa_fcxp_get_sglen_t sglen_cbfn)
583 {
584
585 WARN_ON(bfa == NULL);
586
587 bfa_trc(bfa, fcxp->fcxp_tag);
588
589 if (n_sgles == 0) {
590 *use_ibuf = 1;
591 } else {
592 WARN_ON(*sga_cbfn == NULL);
593 WARN_ON(*sglen_cbfn == NULL);
594
595 *use_ibuf = 0;
596 *r_sga_cbfn = sga_cbfn;
597 *r_sglen_cbfn = sglen_cbfn;
598
599 *nr_sgles = n_sgles;
600
601 /*
602 * alloc required sgpgs
603 */
604 if (n_sgles > BFI_SGE_INLINE)
605 WARN_ON(1);
606 }
607
608 }
609
610 static void
bfa_fcxp_init(struct bfa_fcxp_s * fcxp,void * caller,struct bfa_s * bfa,int nreq_sgles,int nrsp_sgles,bfa_fcxp_get_sgaddr_t req_sga_cbfn,bfa_fcxp_get_sglen_t req_sglen_cbfn,bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,bfa_fcxp_get_sglen_t rsp_sglen_cbfn)611 bfa_fcxp_init(struct bfa_fcxp_s *fcxp,
612 void *caller, struct bfa_s *bfa, int nreq_sgles,
613 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
614 bfa_fcxp_get_sglen_t req_sglen_cbfn,
615 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
616 bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
617 {
618
619 WARN_ON(bfa == NULL);
620
621 bfa_trc(bfa, fcxp->fcxp_tag);
622
623 fcxp->caller = caller;
624
625 bfa_fcxp_init_reqrsp(fcxp, bfa,
626 &fcxp->use_ireqbuf, &fcxp->nreq_sgles, &fcxp->req_sga_cbfn,
627 &fcxp->req_sglen_cbfn, &fcxp->req_sgpg_q,
628 nreq_sgles, req_sga_cbfn, req_sglen_cbfn);
629
630 bfa_fcxp_init_reqrsp(fcxp, bfa,
631 &fcxp->use_irspbuf, &fcxp->nrsp_sgles, &fcxp->rsp_sga_cbfn,
632 &fcxp->rsp_sglen_cbfn, &fcxp->rsp_sgpg_q,
633 nrsp_sgles, rsp_sga_cbfn, rsp_sglen_cbfn);
634
635 }
636
637 static void
bfa_fcxp_put(struct bfa_fcxp_s * fcxp)638 bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
639 {
640 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
641 struct bfa_fcxp_wqe_s *wqe;
642
643 if (fcxp->req_rsp)
644 bfa_q_deq(&mod->req_wait_q, &wqe);
645 else
646 bfa_q_deq(&mod->rsp_wait_q, &wqe);
647
648 if (wqe) {
649 bfa_trc(mod->bfa, fcxp->fcxp_tag);
650
651 bfa_fcxp_init(fcxp, wqe->caller, wqe->bfa, wqe->nreq_sgles,
652 wqe->nrsp_sgles, wqe->req_sga_cbfn,
653 wqe->req_sglen_cbfn, wqe->rsp_sga_cbfn,
654 wqe->rsp_sglen_cbfn);
655
656 wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp);
657 return;
658 }
659
660 WARN_ON(!bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
661 list_del(&fcxp->qe);
662
663 if (fcxp->req_rsp)
664 list_add_tail(&fcxp->qe, &mod->fcxp_req_free_q);
665 else
666 list_add_tail(&fcxp->qe, &mod->fcxp_rsp_free_q);
667 }
668
669 static void
bfa_fcxp_null_comp(void * bfad_fcxp,struct bfa_fcxp_s * fcxp,void * cbarg,bfa_status_t req_status,u32 rsp_len,u32 resid_len,struct fchs_s * rsp_fchs)670 bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
671 bfa_status_t req_status, u32 rsp_len,
672 u32 resid_len, struct fchs_s *rsp_fchs)
673 {
674 /* discarded fcxp completion */
675 }
676
677 static void
__bfa_fcxp_send_cbfn(void * cbarg,bfa_boolean_t complete)678 __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete)
679 {
680 struct bfa_fcxp_s *fcxp = cbarg;
681
682 if (complete) {
683 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
684 fcxp->rsp_status, fcxp->rsp_len,
685 fcxp->residue_len, &fcxp->rsp_fchs);
686 } else {
687 bfa_fcxp_free(fcxp);
688 }
689 }
690
691 static void
hal_fcxp_send_comp(struct bfa_s * bfa,struct bfi_fcxp_send_rsp_s * fcxp_rsp)692 hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
693 {
694 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
695 struct bfa_fcxp_s *fcxp;
696 u16 fcxp_tag = be16_to_cpu(fcxp_rsp->fcxp_tag);
697
698 bfa_trc(bfa, fcxp_tag);
699
700 fcxp_rsp->rsp_len = be32_to_cpu(fcxp_rsp->rsp_len);
701
702 /*
703 * @todo f/w should not set residue to non-0 when everything
704 * is received.
705 */
706 if (fcxp_rsp->req_status == BFA_STATUS_OK)
707 fcxp_rsp->residue_len = 0;
708 else
709 fcxp_rsp->residue_len = be32_to_cpu(fcxp_rsp->residue_len);
710
711 fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
712
713 WARN_ON(fcxp->send_cbfn == NULL);
714
715 hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp);
716
717 if (fcxp->send_cbfn != NULL) {
718 bfa_trc(mod->bfa, (NULL == fcxp->caller));
719 if (fcxp->caller == NULL) {
720 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
721 fcxp_rsp->req_status, fcxp_rsp->rsp_len,
722 fcxp_rsp->residue_len, &fcxp_rsp->fchs);
723 /*
724 * fcxp automatically freed on return from the callback
725 */
726 bfa_fcxp_free(fcxp);
727 } else {
728 fcxp->rsp_status = fcxp_rsp->req_status;
729 fcxp->rsp_len = fcxp_rsp->rsp_len;
730 fcxp->residue_len = fcxp_rsp->residue_len;
731 fcxp->rsp_fchs = fcxp_rsp->fchs;
732
733 bfa_cb_queue(bfa, &fcxp->hcb_qe,
734 __bfa_fcxp_send_cbfn, fcxp);
735 }
736 } else {
737 bfa_trc(bfa, (NULL == fcxp->send_cbfn));
738 }
739 }
740
741 static void
hal_fcxp_tx_plog(struct bfa_s * bfa,u32 reqlen,struct bfa_fcxp_s * fcxp,struct fchs_s * fchs)742 hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp,
743 struct fchs_s *fchs)
744 {
745 /*
746 * TODO: TX ox_id
747 */
748 if (reqlen > 0) {
749 if (fcxp->use_ireqbuf) {
750 u32 pld_w0 =
751 *((u32 *) BFA_FCXP_REQ_PLD(fcxp));
752
753 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
754 BFA_PL_EID_TX,
755 reqlen + sizeof(struct fchs_s), fchs,
756 pld_w0);
757 } else {
758 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
759 BFA_PL_EID_TX,
760 reqlen + sizeof(struct fchs_s),
761 fchs);
762 }
763 } else {
764 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX,
765 reqlen + sizeof(struct fchs_s), fchs);
766 }
767 }
768
769 static void
hal_fcxp_rx_plog(struct bfa_s * bfa,struct bfa_fcxp_s * fcxp,struct bfi_fcxp_send_rsp_s * fcxp_rsp)770 hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
771 struct bfi_fcxp_send_rsp_s *fcxp_rsp)
772 {
773 if (fcxp_rsp->rsp_len > 0) {
774 if (fcxp->use_irspbuf) {
775 u32 pld_w0 =
776 *((u32 *) BFA_FCXP_RSP_PLD(fcxp));
777
778 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
779 BFA_PL_EID_RX,
780 (u16) fcxp_rsp->rsp_len,
781 &fcxp_rsp->fchs, pld_w0);
782 } else {
783 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
784 BFA_PL_EID_RX,
785 (u16) fcxp_rsp->rsp_len,
786 &fcxp_rsp->fchs);
787 }
788 } else {
789 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX,
790 (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs);
791 }
792 }
793
794 /*
795 * Handler to resume sending fcxp when space in available in cpe queue.
796 */
797 static void
bfa_fcxp_qresume(void * cbarg)798 bfa_fcxp_qresume(void *cbarg)
799 {
800 struct bfa_fcxp_s *fcxp = cbarg;
801 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
802 struct bfi_fcxp_send_req_s *send_req;
803
804 fcxp->reqq_waiting = BFA_FALSE;
805 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
806 bfa_fcxp_queue(fcxp, send_req);
807 }
808
809 /*
810 * Queue fcxp send request to foimrware.
811 */
812 static void
bfa_fcxp_queue(struct bfa_fcxp_s * fcxp,struct bfi_fcxp_send_req_s * send_req)813 bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
814 {
815 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
816 struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
817 struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
818 struct bfa_rport_s *rport = reqi->bfa_rport;
819
820 bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
821 bfa_fn_lpu(bfa));
822
823 send_req->fcxp_tag = cpu_to_be16(fcxp->fcxp_tag);
824 if (rport) {
825 send_req->rport_fw_hndl = rport->fw_handle;
826 send_req->max_frmsz = cpu_to_be16(rport->rport_info.max_frmsz);
827 if (send_req->max_frmsz == 0)
828 send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
829 } else {
830 send_req->rport_fw_hndl = 0;
831 send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
832 }
833
834 send_req->vf_id = cpu_to_be16(reqi->vf_id);
835 send_req->lp_fwtag = bfa_lps_get_fwtag(bfa, reqi->lp_tag);
836 send_req->class = reqi->class;
837 send_req->rsp_timeout = rspi->rsp_timeout;
838 send_req->cts = reqi->cts;
839 send_req->fchs = reqi->fchs;
840
841 send_req->req_len = cpu_to_be32(reqi->req_tot_len);
842 send_req->rsp_maxlen = cpu_to_be32(rspi->rsp_maxlen);
843
844 /*
845 * setup req sgles
846 */
847 if (fcxp->use_ireqbuf == 1) {
848 bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
849 BFA_FCXP_REQ_PLD_PA(fcxp));
850 } else {
851 if (fcxp->nreq_sgles > 0) {
852 WARN_ON(fcxp->nreq_sgles != 1);
853 bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
854 fcxp->req_sga_cbfn(fcxp->caller, 0));
855 } else {
856 WARN_ON(reqi->req_tot_len != 0);
857 bfa_alen_set(&send_req->rsp_alen, 0, 0);
858 }
859 }
860
861 /*
862 * setup rsp sgles
863 */
864 if (fcxp->use_irspbuf == 1) {
865 WARN_ON(rspi->rsp_maxlen > BFA_FCXP_MAX_LBUF_SZ);
866
867 bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
868 BFA_FCXP_RSP_PLD_PA(fcxp));
869 } else {
870 if (fcxp->nrsp_sgles > 0) {
871 WARN_ON(fcxp->nrsp_sgles != 1);
872 bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
873 fcxp->rsp_sga_cbfn(fcxp->caller, 0));
874
875 } else {
876 WARN_ON(rspi->rsp_maxlen != 0);
877 bfa_alen_set(&send_req->rsp_alen, 0, 0);
878 }
879 }
880
881 hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs);
882
883 bfa_reqq_produce(bfa, BFA_REQQ_FCXP, send_req->mh);
884
885 bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP));
886 bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
887 }
888
889 /*
890 * Allocate an FCXP instance to send a response or to send a request
891 * that has a response. Request/response buffers are allocated by caller.
892 *
893 * @param[in] bfa BFA bfa instance
894 * @param[in] nreq_sgles Number of SG elements required for request
895 * buffer. 0, if fcxp internal buffers are used.
896 * Use bfa_fcxp_get_reqbuf() to get the
897 * internal req buffer.
898 * @param[in] req_sgles SG elements describing request buffer. Will be
899 * copied in by BFA and hence can be freed on
900 * return from this function.
901 * @param[in] get_req_sga function ptr to be called to get a request SG
902 * Address (given the sge index).
903 * @param[in] get_req_sglen function ptr to be called to get a request SG
904 * len (given the sge index).
905 * @param[in] get_rsp_sga function ptr to be called to get a response SG
906 * Address (given the sge index).
907 * @param[in] get_rsp_sglen function ptr to be called to get a response SG
908 * len (given the sge index).
909 * @param[in] req Allocated FCXP is used to send req or rsp?
910 * request - BFA_TRUE, response - BFA_FALSE
911 *
912 * @return FCXP instance. NULL on failure.
913 */
914 struct bfa_fcxp_s *
bfa_fcxp_req_rsp_alloc(void * caller,struct bfa_s * bfa,int nreq_sgles,int nrsp_sgles,bfa_fcxp_get_sgaddr_t req_sga_cbfn,bfa_fcxp_get_sglen_t req_sglen_cbfn,bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,bfa_fcxp_get_sglen_t rsp_sglen_cbfn,bfa_boolean_t req)915 bfa_fcxp_req_rsp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
916 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
917 bfa_fcxp_get_sglen_t req_sglen_cbfn,
918 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
919 bfa_fcxp_get_sglen_t rsp_sglen_cbfn, bfa_boolean_t req)
920 {
921 struct bfa_fcxp_s *fcxp = NULL;
922
923 WARN_ON(bfa == NULL);
924
925 fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa), req);
926 if (fcxp == NULL)
927 return NULL;
928
929 bfa_trc(bfa, fcxp->fcxp_tag);
930
931 bfa_fcxp_init(fcxp, caller, bfa, nreq_sgles, nrsp_sgles, req_sga_cbfn,
932 req_sglen_cbfn, rsp_sga_cbfn, rsp_sglen_cbfn);
933
934 return fcxp;
935 }
936
937 /*
938 * Get the internal request buffer pointer
939 *
940 * @param[in] fcxp BFA fcxp pointer
941 *
942 * @return pointer to the internal request buffer
943 */
944 void *
bfa_fcxp_get_reqbuf(struct bfa_fcxp_s * fcxp)945 bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp)
946 {
947 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
948 void *reqbuf;
949
950 WARN_ON(fcxp->use_ireqbuf != 1);
951 reqbuf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
952 mod->req_pld_sz + mod->rsp_pld_sz);
953 return reqbuf;
954 }
955
956 u32
bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s * fcxp)957 bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp)
958 {
959 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
960
961 return mod->req_pld_sz;
962 }
963
964 /*
965 * Get the internal response buffer pointer
966 *
967 * @param[in] fcxp BFA fcxp pointer
968 *
969 * @return pointer to the internal request buffer
970 */
971 void *
bfa_fcxp_get_rspbuf(struct bfa_fcxp_s * fcxp)972 bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
973 {
974 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
975 void *fcxp_buf;
976
977 WARN_ON(fcxp->use_irspbuf != 1);
978
979 fcxp_buf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
980 mod->req_pld_sz + mod->rsp_pld_sz);
981
982 /* fcxp_buf = req_buf + rsp_buf :- add req_buf_sz to get to rsp_buf */
983 return ((u8 *) fcxp_buf) + mod->req_pld_sz;
984 }
985
986 /*
987 * Free the BFA FCXP
988 *
989 * @param[in] fcxp BFA fcxp pointer
990 *
991 * @return void
992 */
993 void
bfa_fcxp_free(struct bfa_fcxp_s * fcxp)994 bfa_fcxp_free(struct bfa_fcxp_s *fcxp)
995 {
996 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
997
998 WARN_ON(fcxp == NULL);
999 bfa_trc(mod->bfa, fcxp->fcxp_tag);
1000 bfa_fcxp_put(fcxp);
1001 }
1002
1003 /*
1004 * Send a FCXP request
1005 *
1006 * @param[in] fcxp BFA fcxp pointer
1007 * @param[in] rport BFA rport pointer. Could be left NULL for WKA rports
1008 * @param[in] vf_id virtual Fabric ID
1009 * @param[in] lp_tag lport tag
1010 * @param[in] cts use Continuous sequence
1011 * @param[in] cos fc Class of Service
1012 * @param[in] reqlen request length, does not include FCHS length
1013 * @param[in] fchs fc Header Pointer. The header content will be copied
1014 * in by BFA.
1015 *
1016 * @param[in] cbfn call back function to be called on receiving
1017 * the response
1018 * @param[in] cbarg arg for cbfn
1019 * @param[in] rsp_timeout
1020 * response timeout
1021 *
1022 * @return bfa_status_t
1023 */
1024 void
bfa_fcxp_send(struct bfa_fcxp_s * fcxp,struct bfa_rport_s * rport,u16 vf_id,u8 lp_tag,bfa_boolean_t cts,enum fc_cos cos,u32 reqlen,struct fchs_s * fchs,bfa_cb_fcxp_send_t cbfn,void * cbarg,u32 rsp_maxlen,u8 rsp_timeout)1025 bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
1026 u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos,
1027 u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn,
1028 void *cbarg, u32 rsp_maxlen, u8 rsp_timeout)
1029 {
1030 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
1031 struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
1032 struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
1033 struct bfi_fcxp_send_req_s *send_req;
1034
1035 bfa_trc(bfa, fcxp->fcxp_tag);
1036
1037 /*
1038 * setup request/response info
1039 */
1040 reqi->bfa_rport = rport;
1041 reqi->vf_id = vf_id;
1042 reqi->lp_tag = lp_tag;
1043 reqi->class = cos;
1044 rspi->rsp_timeout = rsp_timeout;
1045 reqi->cts = cts;
1046 reqi->fchs = *fchs;
1047 reqi->req_tot_len = reqlen;
1048 rspi->rsp_maxlen = rsp_maxlen;
1049 fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
1050 fcxp->send_cbarg = cbarg;
1051
1052 /*
1053 * If no room in CPE queue, wait for space in request queue
1054 */
1055 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
1056 if (!send_req) {
1057 bfa_trc(bfa, fcxp->fcxp_tag);
1058 fcxp->reqq_waiting = BFA_TRUE;
1059 bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe);
1060 return;
1061 }
1062
1063 bfa_fcxp_queue(fcxp, send_req);
1064 }
1065
1066 /*
1067 * Abort a BFA FCXP
1068 *
1069 * @param[in] fcxp BFA fcxp pointer
1070 *
1071 * @return void
1072 */
1073 bfa_status_t
bfa_fcxp_abort(struct bfa_fcxp_s * fcxp)1074 bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
1075 {
1076 bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag);
1077 WARN_ON(1);
1078 return BFA_STATUS_OK;
1079 }
1080
1081 void
bfa_fcxp_req_rsp_alloc_wait(struct bfa_s * bfa,struct bfa_fcxp_wqe_s * wqe,bfa_fcxp_alloc_cbfn_t alloc_cbfn,void * alloc_cbarg,void * caller,int nreq_sgles,int nrsp_sgles,bfa_fcxp_get_sgaddr_t req_sga_cbfn,bfa_fcxp_get_sglen_t req_sglen_cbfn,bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,bfa_fcxp_get_sglen_t rsp_sglen_cbfn,bfa_boolean_t req)1082 bfa_fcxp_req_rsp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
1083 bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg,
1084 void *caller, int nreq_sgles,
1085 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
1086 bfa_fcxp_get_sglen_t req_sglen_cbfn,
1087 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
1088 bfa_fcxp_get_sglen_t rsp_sglen_cbfn, bfa_boolean_t req)
1089 {
1090 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1091
1092 if (req)
1093 WARN_ON(!list_empty(&mod->fcxp_req_free_q));
1094 else
1095 WARN_ON(!list_empty(&mod->fcxp_rsp_free_q));
1096
1097 wqe->alloc_cbfn = alloc_cbfn;
1098 wqe->alloc_cbarg = alloc_cbarg;
1099 wqe->caller = caller;
1100 wqe->bfa = bfa;
1101 wqe->nreq_sgles = nreq_sgles;
1102 wqe->nrsp_sgles = nrsp_sgles;
1103 wqe->req_sga_cbfn = req_sga_cbfn;
1104 wqe->req_sglen_cbfn = req_sglen_cbfn;
1105 wqe->rsp_sga_cbfn = rsp_sga_cbfn;
1106 wqe->rsp_sglen_cbfn = rsp_sglen_cbfn;
1107
1108 if (req)
1109 list_add_tail(&wqe->qe, &mod->req_wait_q);
1110 else
1111 list_add_tail(&wqe->qe, &mod->rsp_wait_q);
1112 }
1113
1114 void
bfa_fcxp_walloc_cancel(struct bfa_s * bfa,struct bfa_fcxp_wqe_s * wqe)1115 bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
1116 {
1117 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1118
1119 WARN_ON(!bfa_q_is_on_q(&mod->req_wait_q, wqe) ||
1120 !bfa_q_is_on_q(&mod->rsp_wait_q, wqe));
1121 list_del(&wqe->qe);
1122 }
1123
1124 void
bfa_fcxp_discard(struct bfa_fcxp_s * fcxp)1125 bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
1126 {
1127 /*
1128 * If waiting for room in request queue, cancel reqq wait
1129 * and free fcxp.
1130 */
1131 if (fcxp->reqq_waiting) {
1132 fcxp->reqq_waiting = BFA_FALSE;
1133 bfa_reqq_wcancel(&fcxp->reqq_wqe);
1134 bfa_fcxp_free(fcxp);
1135 return;
1136 }
1137
1138 fcxp->send_cbfn = bfa_fcxp_null_comp;
1139 }
1140
1141 void
bfa_fcxp_isr(struct bfa_s * bfa,struct bfi_msg_s * msg)1142 bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
1143 {
1144 switch (msg->mhdr.msg_id) {
1145 case BFI_FCXP_I2H_SEND_RSP:
1146 hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg);
1147 break;
1148
1149 default:
1150 bfa_trc(bfa, msg->mhdr.msg_id);
1151 WARN_ON(1);
1152 }
1153 }
1154
1155 u32
bfa_fcxp_get_maxrsp(struct bfa_s * bfa)1156 bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
1157 {
1158 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1159
1160 return mod->rsp_pld_sz;
1161 }
1162
1163 void
bfa_fcxp_res_recfg(struct bfa_s * bfa,u16 num_fcxp_fw)1164 bfa_fcxp_res_recfg(struct bfa_s *bfa, u16 num_fcxp_fw)
1165 {
1166 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1167 struct list_head *qe;
1168 int i;
1169
1170 for (i = 0; i < (mod->num_fcxps - num_fcxp_fw); i++) {
1171 if (i < ((mod->num_fcxps - num_fcxp_fw) / 2)) {
1172 bfa_q_deq_tail(&mod->fcxp_req_free_q, &qe);
1173 list_add_tail(qe, &mod->fcxp_req_unused_q);
1174 } else {
1175 bfa_q_deq_tail(&mod->fcxp_rsp_free_q, &qe);
1176 list_add_tail(qe, &mod->fcxp_rsp_unused_q);
1177 }
1178 }
1179 }
1180
1181 /*
1182 * BFA LPS state machine functions
1183 */
1184
1185 /*
1186 * Init state -- no login
1187 */
1188 static void
bfa_lps_sm_init(struct bfa_lps_s * lps,enum bfa_lps_event event)1189 bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
1190 {
1191 bfa_trc(lps->bfa, lps->bfa_tag);
1192 bfa_trc(lps->bfa, event);
1193
1194 switch (event) {
1195 case BFA_LPS_SM_LOGIN:
1196 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1197 bfa_sm_set_state(lps, bfa_lps_sm_loginwait);
1198 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1199 } else {
1200 bfa_sm_set_state(lps, bfa_lps_sm_login);
1201 bfa_lps_send_login(lps);
1202 }
1203
1204 if (lps->fdisc)
1205 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1206 BFA_PL_EID_LOGIN, 0, "FDISC Request");
1207 else
1208 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1209 BFA_PL_EID_LOGIN, 0, "FLOGI Request");
1210 break;
1211
1212 case BFA_LPS_SM_LOGOUT:
1213 bfa_lps_logout_comp(lps);
1214 break;
1215
1216 case BFA_LPS_SM_DELETE:
1217 bfa_lps_free(lps);
1218 break;
1219
1220 case BFA_LPS_SM_RX_CVL:
1221 case BFA_LPS_SM_OFFLINE:
1222 break;
1223
1224 case BFA_LPS_SM_FWRSP:
1225 /*
1226 * Could happen when fabric detects loopback and discards
1227 * the lps request. Fw will eventually sent out the timeout
1228 * Just ignore
1229 */
1230 break;
1231 case BFA_LPS_SM_SET_N2N_PID:
1232 /*
1233 * When topology is set to loop, bfa_lps_set_n2n_pid() sends
1234 * this event. Ignore this event.
1235 */
1236 break;
1237
1238 default:
1239 bfa_sm_fault(lps->bfa, event);
1240 }
1241 }
1242
1243 /*
1244 * login is in progress -- awaiting response from firmware
1245 */
1246 static void
bfa_lps_sm_login(struct bfa_lps_s * lps,enum bfa_lps_event event)1247 bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
1248 {
1249 bfa_trc(lps->bfa, lps->bfa_tag);
1250 bfa_trc(lps->bfa, event);
1251
1252 switch (event) {
1253 case BFA_LPS_SM_FWRSP:
1254 if (lps->status == BFA_STATUS_OK) {
1255 bfa_sm_set_state(lps, bfa_lps_sm_online);
1256 if (lps->fdisc)
1257 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1258 BFA_PL_EID_LOGIN, 0, "FDISC Accept");
1259 else
1260 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1261 BFA_PL_EID_LOGIN, 0, "FLOGI Accept");
1262 /* If N2N, send the assigned PID to FW */
1263 bfa_trc(lps->bfa, lps->fport);
1264 bfa_trc(lps->bfa, lps->lp_pid);
1265
1266 if (!lps->fport && lps->lp_pid)
1267 bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
1268 } else {
1269 bfa_sm_set_state(lps, bfa_lps_sm_init);
1270 if (lps->fdisc)
1271 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1272 BFA_PL_EID_LOGIN, 0,
1273 "FDISC Fail (RJT or timeout)");
1274 else
1275 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1276 BFA_PL_EID_LOGIN, 0,
1277 "FLOGI Fail (RJT or timeout)");
1278 }
1279 bfa_lps_login_comp(lps);
1280 break;
1281
1282 case BFA_LPS_SM_OFFLINE:
1283 case BFA_LPS_SM_DELETE:
1284 bfa_sm_set_state(lps, bfa_lps_sm_init);
1285 break;
1286
1287 case BFA_LPS_SM_SET_N2N_PID:
1288 bfa_trc(lps->bfa, lps->fport);
1289 bfa_trc(lps->bfa, lps->lp_pid);
1290 break;
1291
1292 default:
1293 bfa_sm_fault(lps->bfa, event);
1294 }
1295 }
1296
1297 /*
1298 * login pending - awaiting space in request queue
1299 */
1300 static void
bfa_lps_sm_loginwait(struct bfa_lps_s * lps,enum bfa_lps_event event)1301 bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1302 {
1303 bfa_trc(lps->bfa, lps->bfa_tag);
1304 bfa_trc(lps->bfa, event);
1305
1306 switch (event) {
1307 case BFA_LPS_SM_RESUME:
1308 bfa_sm_set_state(lps, bfa_lps_sm_login);
1309 bfa_lps_send_login(lps);
1310 break;
1311
1312 case BFA_LPS_SM_OFFLINE:
1313 case BFA_LPS_SM_DELETE:
1314 bfa_sm_set_state(lps, bfa_lps_sm_init);
1315 bfa_reqq_wcancel(&lps->wqe);
1316 break;
1317
1318 case BFA_LPS_SM_RX_CVL:
1319 /*
1320 * Login was not even sent out; so when getting out
1321 * of this state, it will appear like a login retry
1322 * after Clear virtual link
1323 */
1324 break;
1325
1326 default:
1327 bfa_sm_fault(lps->bfa, event);
1328 }
1329 }
1330
1331 /*
1332 * login complete
1333 */
1334 static void
bfa_lps_sm_online(struct bfa_lps_s * lps,enum bfa_lps_event event)1335 bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
1336 {
1337 bfa_trc(lps->bfa, lps->bfa_tag);
1338 bfa_trc(lps->bfa, event);
1339
1340 switch (event) {
1341 case BFA_LPS_SM_LOGOUT:
1342 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1343 bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1344 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1345 } else {
1346 bfa_sm_set_state(lps, bfa_lps_sm_logout);
1347 bfa_lps_send_logout(lps);
1348 }
1349 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1350 BFA_PL_EID_LOGO, 0, "Logout");
1351 break;
1352
1353 case BFA_LPS_SM_RX_CVL:
1354 bfa_sm_set_state(lps, bfa_lps_sm_init);
1355
1356 /* Let the vport module know about this event */
1357 bfa_lps_cvl_event(lps);
1358 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1359 BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1360 break;
1361
1362 case BFA_LPS_SM_SET_N2N_PID:
1363 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1364 bfa_sm_set_state(lps, bfa_lps_sm_online_n2n_pid_wait);
1365 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1366 } else
1367 bfa_lps_send_set_n2n_pid(lps);
1368 break;
1369
1370 case BFA_LPS_SM_OFFLINE:
1371 case BFA_LPS_SM_DELETE:
1372 bfa_sm_set_state(lps, bfa_lps_sm_init);
1373 break;
1374
1375 default:
1376 bfa_sm_fault(lps->bfa, event);
1377 }
1378 }
1379
1380 /*
1381 * login complete
1382 */
1383 static void
bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s * lps,enum bfa_lps_event event)1384 bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1385 {
1386 bfa_trc(lps->bfa, lps->bfa_tag);
1387 bfa_trc(lps->bfa, event);
1388
1389 switch (event) {
1390 case BFA_LPS_SM_RESUME:
1391 bfa_sm_set_state(lps, bfa_lps_sm_online);
1392 bfa_lps_send_set_n2n_pid(lps);
1393 break;
1394
1395 case BFA_LPS_SM_LOGOUT:
1396 bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1397 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1398 BFA_PL_EID_LOGO, 0, "Logout");
1399 break;
1400
1401 case BFA_LPS_SM_RX_CVL:
1402 bfa_sm_set_state(lps, bfa_lps_sm_init);
1403 bfa_reqq_wcancel(&lps->wqe);
1404
1405 /* Let the vport module know about this event */
1406 bfa_lps_cvl_event(lps);
1407 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1408 BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1409 break;
1410
1411 case BFA_LPS_SM_OFFLINE:
1412 case BFA_LPS_SM_DELETE:
1413 bfa_sm_set_state(lps, bfa_lps_sm_init);
1414 bfa_reqq_wcancel(&lps->wqe);
1415 break;
1416
1417 default:
1418 bfa_sm_fault(lps->bfa, event);
1419 }
1420 }
1421
1422 /*
1423 * logout in progress - awaiting firmware response
1424 */
1425 static void
bfa_lps_sm_logout(struct bfa_lps_s * lps,enum bfa_lps_event event)1426 bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
1427 {
1428 bfa_trc(lps->bfa, lps->bfa_tag);
1429 bfa_trc(lps->bfa, event);
1430
1431 switch (event) {
1432 case BFA_LPS_SM_FWRSP:
1433 case BFA_LPS_SM_OFFLINE:
1434 bfa_sm_set_state(lps, bfa_lps_sm_init);
1435 bfa_lps_logout_comp(lps);
1436 break;
1437
1438 case BFA_LPS_SM_DELETE:
1439 bfa_sm_set_state(lps, bfa_lps_sm_init);
1440 break;
1441
1442 default:
1443 bfa_sm_fault(lps->bfa, event);
1444 }
1445 }
1446
1447 /*
1448 * logout pending -- awaiting space in request queue
1449 */
1450 static void
bfa_lps_sm_logowait(struct bfa_lps_s * lps,enum bfa_lps_event event)1451 bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1452 {
1453 bfa_trc(lps->bfa, lps->bfa_tag);
1454 bfa_trc(lps->bfa, event);
1455
1456 switch (event) {
1457 case BFA_LPS_SM_RESUME:
1458 bfa_sm_set_state(lps, bfa_lps_sm_logout);
1459 bfa_lps_send_logout(lps);
1460 break;
1461
1462 case BFA_LPS_SM_OFFLINE:
1463 case BFA_LPS_SM_DELETE:
1464 bfa_sm_set_state(lps, bfa_lps_sm_init);
1465 bfa_reqq_wcancel(&lps->wqe);
1466 break;
1467
1468 default:
1469 bfa_sm_fault(lps->bfa, event);
1470 }
1471 }
1472
1473
1474
1475 /*
1476 * lps_pvt BFA LPS private functions
1477 */
1478
1479 /*
1480 * return memory requirement
1481 */
1482 void
bfa_lps_meminfo(struct bfa_iocfc_cfg_s * cfg,struct bfa_meminfo_s * minfo,struct bfa_s * bfa)1483 bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
1484 struct bfa_s *bfa)
1485 {
1486 struct bfa_mem_kva_s *lps_kva = BFA_MEM_LPS_KVA(bfa);
1487
1488 if (cfg->drvcfg.min_cfg)
1489 bfa_mem_kva_setup(minfo, lps_kva,
1490 sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS);
1491 else
1492 bfa_mem_kva_setup(minfo, lps_kva,
1493 sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS);
1494 }
1495
1496 /*
1497 * bfa module attach at initialization time
1498 */
1499 void
bfa_lps_attach(struct bfa_s * bfa,void * bfad,struct bfa_iocfc_cfg_s * cfg,struct bfa_pcidev_s * pcidev)1500 bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1501 struct bfa_pcidev_s *pcidev)
1502 {
1503 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1504 struct bfa_lps_s *lps;
1505 int i;
1506
1507 mod->num_lps = BFA_LPS_MAX_LPORTS;
1508 if (cfg->drvcfg.min_cfg)
1509 mod->num_lps = BFA_LPS_MIN_LPORTS;
1510 else
1511 mod->num_lps = BFA_LPS_MAX_LPORTS;
1512 mod->lps_arr = lps = (struct bfa_lps_s *) bfa_mem_kva_curp(mod);
1513
1514 bfa_mem_kva_curp(mod) += mod->num_lps * sizeof(struct bfa_lps_s);
1515
1516 INIT_LIST_HEAD(&mod->lps_free_q);
1517 INIT_LIST_HEAD(&mod->lps_active_q);
1518 INIT_LIST_HEAD(&mod->lps_login_q);
1519
1520 for (i = 0; i < mod->num_lps; i++, lps++) {
1521 lps->bfa = bfa;
1522 lps->bfa_tag = (u8) i;
1523 lps->reqq = BFA_REQQ_LPS;
1524 bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps);
1525 list_add_tail(&lps->qe, &mod->lps_free_q);
1526 }
1527 }
1528
1529 /*
1530 * IOC in disabled state -- consider all lps offline
1531 */
1532 void
bfa_lps_iocdisable(struct bfa_s * bfa)1533 bfa_lps_iocdisable(struct bfa_s *bfa)
1534 {
1535 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1536 struct bfa_lps_s *lps;
1537 struct list_head *qe, *qen;
1538
1539 list_for_each_safe(qe, qen, &mod->lps_active_q) {
1540 lps = (struct bfa_lps_s *) qe;
1541 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1542 }
1543 list_for_each_safe(qe, qen, &mod->lps_login_q) {
1544 lps = (struct bfa_lps_s *) qe;
1545 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1546 }
1547 list_splice_tail_init(&mod->lps_login_q, &mod->lps_active_q);
1548 }
1549
1550 /*
1551 * Firmware login response
1552 */
1553 static void
bfa_lps_login_rsp(struct bfa_s * bfa,struct bfi_lps_login_rsp_s * rsp)1554 bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
1555 {
1556 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1557 struct bfa_lps_s *lps;
1558
1559 WARN_ON(rsp->bfa_tag >= mod->num_lps);
1560 lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
1561
1562 lps->status = rsp->status;
1563 switch (rsp->status) {
1564 case BFA_STATUS_OK:
1565 lps->fw_tag = rsp->fw_tag;
1566 lps->fport = rsp->f_port;
1567 if (lps->fport)
1568 lps->lp_pid = rsp->lp_pid;
1569 lps->npiv_en = rsp->npiv_en;
1570 lps->pr_bbcred = be16_to_cpu(rsp->bb_credit);
1571 lps->pr_pwwn = rsp->port_name;
1572 lps->pr_nwwn = rsp->node_name;
1573 lps->auth_req = rsp->auth_req;
1574 lps->lp_mac = rsp->lp_mac;
1575 lps->brcd_switch = rsp->brcd_switch;
1576 lps->fcf_mac = rsp->fcf_mac;
1577
1578 break;
1579
1580 case BFA_STATUS_FABRIC_RJT:
1581 lps->lsrjt_rsn = rsp->lsrjt_rsn;
1582 lps->lsrjt_expl = rsp->lsrjt_expl;
1583
1584 break;
1585
1586 case BFA_STATUS_EPROTOCOL:
1587 lps->ext_status = rsp->ext_status;
1588
1589 break;
1590
1591 case BFA_STATUS_VPORT_MAX:
1592 if (rsp->ext_status)
1593 bfa_lps_no_res(lps, rsp->ext_status);
1594 break;
1595
1596 default:
1597 /* Nothing to do with other status */
1598 break;
1599 }
1600
1601 list_del(&lps->qe);
1602 list_add_tail(&lps->qe, &mod->lps_active_q);
1603 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1604 }
1605
1606 static void
bfa_lps_no_res(struct bfa_lps_s * first_lps,u8 count)1607 bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count)
1608 {
1609 struct bfa_s *bfa = first_lps->bfa;
1610 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1611 struct list_head *qe, *qe_next;
1612 struct bfa_lps_s *lps;
1613
1614 bfa_trc(bfa, count);
1615
1616 qe = bfa_q_next(first_lps);
1617
1618 while (count && qe) {
1619 qe_next = bfa_q_next(qe);
1620 lps = (struct bfa_lps_s *)qe;
1621 bfa_trc(bfa, lps->bfa_tag);
1622 lps->status = first_lps->status;
1623 list_del(&lps->qe);
1624 list_add_tail(&lps->qe, &mod->lps_active_q);
1625 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1626 qe = qe_next;
1627 count--;
1628 }
1629 }
1630
1631 /*
1632 * Firmware logout response
1633 */
1634 static void
bfa_lps_logout_rsp(struct bfa_s * bfa,struct bfi_lps_logout_rsp_s * rsp)1635 bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
1636 {
1637 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1638 struct bfa_lps_s *lps;
1639
1640 WARN_ON(rsp->bfa_tag >= mod->num_lps);
1641 lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
1642
1643 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1644 }
1645
1646 /*
1647 * Firmware received a Clear virtual link request (for FCoE)
1648 */
1649 static void
bfa_lps_rx_cvl_event(struct bfa_s * bfa,struct bfi_lps_cvl_event_s * cvl)1650 bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
1651 {
1652 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1653 struct bfa_lps_s *lps;
1654
1655 lps = BFA_LPS_FROM_TAG(mod, cvl->bfa_tag);
1656
1657 bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
1658 }
1659
1660 /*
1661 * Space is available in request queue, resume queueing request to firmware.
1662 */
1663 static void
bfa_lps_reqq_resume(void * lps_arg)1664 bfa_lps_reqq_resume(void *lps_arg)
1665 {
1666 struct bfa_lps_s *lps = lps_arg;
1667
1668 bfa_sm_send_event(lps, BFA_LPS_SM_RESUME);
1669 }
1670
1671 /*
1672 * lps is freed -- triggered by vport delete
1673 */
1674 static void
bfa_lps_free(struct bfa_lps_s * lps)1675 bfa_lps_free(struct bfa_lps_s *lps)
1676 {
1677 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(lps->bfa);
1678
1679 lps->lp_pid = 0;
1680 list_del(&lps->qe);
1681 list_add_tail(&lps->qe, &mod->lps_free_q);
1682 }
1683
1684 /*
1685 * send login request to firmware
1686 */
1687 static void
bfa_lps_send_login(struct bfa_lps_s * lps)1688 bfa_lps_send_login(struct bfa_lps_s *lps)
1689 {
1690 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(lps->bfa);
1691 struct bfi_lps_login_req_s *m;
1692
1693 m = bfa_reqq_next(lps->bfa, lps->reqq);
1694 WARN_ON(!m);
1695
1696 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ,
1697 bfa_fn_lpu(lps->bfa));
1698
1699 m->bfa_tag = lps->bfa_tag;
1700 m->alpa = lps->alpa;
1701 m->pdu_size = cpu_to_be16(lps->pdusz);
1702 m->pwwn = lps->pwwn;
1703 m->nwwn = lps->nwwn;
1704 m->fdisc = lps->fdisc;
1705 m->auth_en = lps->auth_en;
1706
1707 bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1708 list_del(&lps->qe);
1709 list_add_tail(&lps->qe, &mod->lps_login_q);
1710 }
1711
1712 /*
1713 * send logout request to firmware
1714 */
1715 static void
bfa_lps_send_logout(struct bfa_lps_s * lps)1716 bfa_lps_send_logout(struct bfa_lps_s *lps)
1717 {
1718 struct bfi_lps_logout_req_s *m;
1719
1720 m = bfa_reqq_next(lps->bfa, lps->reqq);
1721 WARN_ON(!m);
1722
1723 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ,
1724 bfa_fn_lpu(lps->bfa));
1725
1726 m->fw_tag = lps->fw_tag;
1727 m->port_name = lps->pwwn;
1728 bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1729 }
1730
1731 /*
1732 * send n2n pid set request to firmware
1733 */
1734 static void
bfa_lps_send_set_n2n_pid(struct bfa_lps_s * lps)1735 bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps)
1736 {
1737 struct bfi_lps_n2n_pid_req_s *m;
1738
1739 m = bfa_reqq_next(lps->bfa, lps->reqq);
1740 WARN_ON(!m);
1741
1742 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_N2N_PID_REQ,
1743 bfa_fn_lpu(lps->bfa));
1744
1745 m->fw_tag = lps->fw_tag;
1746 m->lp_pid = lps->lp_pid;
1747 bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1748 }
1749
1750 /*
1751 * Indirect login completion handler for non-fcs
1752 */
1753 static void
bfa_lps_login_comp_cb(void * arg,bfa_boolean_t complete)1754 bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete)
1755 {
1756 struct bfa_lps_s *lps = arg;
1757
1758 if (!complete)
1759 return;
1760
1761 if (lps->fdisc)
1762 bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1763 else
1764 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1765 }
1766
1767 /*
1768 * Login completion handler -- direct call for fcs, queue for others
1769 */
1770 static void
bfa_lps_login_comp(struct bfa_lps_s * lps)1771 bfa_lps_login_comp(struct bfa_lps_s *lps)
1772 {
1773 if (!lps->bfa->fcs) {
1774 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_login_comp_cb,
1775 lps);
1776 return;
1777 }
1778
1779 if (lps->fdisc)
1780 bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1781 else
1782 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1783 }
1784
1785 /*
1786 * Indirect logout completion handler for non-fcs
1787 */
1788 static void
bfa_lps_logout_comp_cb(void * arg,bfa_boolean_t complete)1789 bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
1790 {
1791 struct bfa_lps_s *lps = arg;
1792
1793 if (!complete)
1794 return;
1795
1796 if (lps->fdisc)
1797 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1798 else
1799 bfa_cb_lps_flogo_comp(lps->bfa->bfad, lps->uarg);
1800 }
1801
1802 /*
1803 * Logout completion handler -- direct call for fcs, queue for others
1804 */
1805 static void
bfa_lps_logout_comp(struct bfa_lps_s * lps)1806 bfa_lps_logout_comp(struct bfa_lps_s *lps)
1807 {
1808 if (!lps->bfa->fcs) {
1809 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_logout_comp_cb,
1810 lps);
1811 return;
1812 }
1813 if (lps->fdisc)
1814 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1815 }
1816
1817 /*
1818 * Clear virtual link completion handler for non-fcs
1819 */
1820 static void
bfa_lps_cvl_event_cb(void * arg,bfa_boolean_t complete)1821 bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete)
1822 {
1823 struct bfa_lps_s *lps = arg;
1824
1825 if (!complete)
1826 return;
1827
1828 /* Clear virtual link to base port will result in link down */
1829 if (lps->fdisc)
1830 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1831 }
1832
1833 /*
1834 * Received Clear virtual link event --direct call for fcs,
1835 * queue for others
1836 */
1837 static void
bfa_lps_cvl_event(struct bfa_lps_s * lps)1838 bfa_lps_cvl_event(struct bfa_lps_s *lps)
1839 {
1840 if (!lps->bfa->fcs) {
1841 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb,
1842 lps);
1843 return;
1844 }
1845
1846 /* Clear virtual link to base port will result in link down */
1847 if (lps->fdisc)
1848 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1849 }
1850
1851
1852
1853 /*
1854 * lps_public BFA LPS public functions
1855 */
1856
1857 u32
bfa_lps_get_max_vport(struct bfa_s * bfa)1858 bfa_lps_get_max_vport(struct bfa_s *bfa)
1859 {
1860 if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT)
1861 return BFA_LPS_MAX_VPORTS_SUPP_CT;
1862 else
1863 return BFA_LPS_MAX_VPORTS_SUPP_CB;
1864 }
1865
1866 /*
1867 * Allocate a lport srvice tag.
1868 */
1869 struct bfa_lps_s *
bfa_lps_alloc(struct bfa_s * bfa)1870 bfa_lps_alloc(struct bfa_s *bfa)
1871 {
1872 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1873 struct bfa_lps_s *lps = NULL;
1874
1875 bfa_q_deq(&mod->lps_free_q, &lps);
1876
1877 if (lps == NULL)
1878 return NULL;
1879
1880 list_add_tail(&lps->qe, &mod->lps_active_q);
1881
1882 bfa_sm_set_state(lps, bfa_lps_sm_init);
1883 return lps;
1884 }
1885
1886 /*
1887 * Free lport service tag. This can be called anytime after an alloc.
1888 * No need to wait for any pending login/logout completions.
1889 */
1890 void
bfa_lps_delete(struct bfa_lps_s * lps)1891 bfa_lps_delete(struct bfa_lps_s *lps)
1892 {
1893 bfa_sm_send_event(lps, BFA_LPS_SM_DELETE);
1894 }
1895
1896 /*
1897 * Initiate a lport login.
1898 */
1899 void
bfa_lps_flogi(struct bfa_lps_s * lps,void * uarg,u8 alpa,u16 pdusz,wwn_t pwwn,wwn_t nwwn,bfa_boolean_t auth_en)1900 bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
1901 wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en)
1902 {
1903 lps->uarg = uarg;
1904 lps->alpa = alpa;
1905 lps->pdusz = pdusz;
1906 lps->pwwn = pwwn;
1907 lps->nwwn = nwwn;
1908 lps->fdisc = BFA_FALSE;
1909 lps->auth_en = auth_en;
1910 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1911 }
1912
1913 /*
1914 * Initiate a lport fdisc login.
1915 */
1916 void
bfa_lps_fdisc(struct bfa_lps_s * lps,void * uarg,u16 pdusz,wwn_t pwwn,wwn_t nwwn)1917 bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
1918 wwn_t nwwn)
1919 {
1920 lps->uarg = uarg;
1921 lps->alpa = 0;
1922 lps->pdusz = pdusz;
1923 lps->pwwn = pwwn;
1924 lps->nwwn = nwwn;
1925 lps->fdisc = BFA_TRUE;
1926 lps->auth_en = BFA_FALSE;
1927 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1928 }
1929
1930
1931 /*
1932 * Initiate a lport FDSIC logout.
1933 */
1934 void
bfa_lps_fdisclogo(struct bfa_lps_s * lps)1935 bfa_lps_fdisclogo(struct bfa_lps_s *lps)
1936 {
1937 bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
1938 }
1939
1940 u8
bfa_lps_get_fwtag(struct bfa_s * bfa,u8 lp_tag)1941 bfa_lps_get_fwtag(struct bfa_s *bfa, u8 lp_tag)
1942 {
1943 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1944
1945 return BFA_LPS_FROM_TAG(mod, lp_tag)->fw_tag;
1946 }
1947
1948 /*
1949 * Return lport services tag given the pid
1950 */
1951 u8
bfa_lps_get_tag_from_pid(struct bfa_s * bfa,u32 pid)1952 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
1953 {
1954 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1955 struct bfa_lps_s *lps;
1956 int i;
1957
1958 for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) {
1959 if (lps->lp_pid == pid)
1960 return lps->bfa_tag;
1961 }
1962
1963 /* Return base port tag anyway */
1964 return 0;
1965 }
1966
1967
1968 /*
1969 * return port id assigned to the base lport
1970 */
1971 u32
bfa_lps_get_base_pid(struct bfa_s * bfa)1972 bfa_lps_get_base_pid(struct bfa_s *bfa)
1973 {
1974 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1975
1976 return BFA_LPS_FROM_TAG(mod, 0)->lp_pid;
1977 }
1978
1979 /*
1980 * Set PID in case of n2n (which is assigned during PLOGI)
1981 */
1982 void
bfa_lps_set_n2n_pid(struct bfa_lps_s * lps,uint32_t n2n_pid)1983 bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, uint32_t n2n_pid)
1984 {
1985 bfa_trc(lps->bfa, lps->bfa_tag);
1986 bfa_trc(lps->bfa, n2n_pid);
1987
1988 lps->lp_pid = n2n_pid;
1989 bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
1990 }
1991
1992 /*
1993 * LPS firmware message class handler.
1994 */
1995 void
bfa_lps_isr(struct bfa_s * bfa,struct bfi_msg_s * m)1996 bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1997 {
1998 union bfi_lps_i2h_msg_u msg;
1999
2000 bfa_trc(bfa, m->mhdr.msg_id);
2001 msg.msg = m;
2002
2003 switch (m->mhdr.msg_id) {
2004 case BFI_LPS_I2H_LOGIN_RSP:
2005 bfa_lps_login_rsp(bfa, msg.login_rsp);
2006 break;
2007
2008 case BFI_LPS_I2H_LOGOUT_RSP:
2009 bfa_lps_logout_rsp(bfa, msg.logout_rsp);
2010 break;
2011
2012 case BFI_LPS_I2H_CVL_EVENT:
2013 bfa_lps_rx_cvl_event(bfa, msg.cvl_event);
2014 break;
2015
2016 default:
2017 bfa_trc(bfa, m->mhdr.msg_id);
2018 WARN_ON(1);
2019 }
2020 }
2021
2022 static void
bfa_fcport_aen_post(struct bfa_fcport_s * fcport,enum bfa_port_aen_event event)2023 bfa_fcport_aen_post(struct bfa_fcport_s *fcport, enum bfa_port_aen_event event)
2024 {
2025 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2026 struct bfa_aen_entry_s *aen_entry;
2027
2028 bfad_get_aen_entry(bfad, aen_entry);
2029 if (!aen_entry)
2030 return;
2031
2032 aen_entry->aen_data.port.ioc_type = bfa_get_type(fcport->bfa);
2033 aen_entry->aen_data.port.pwwn = fcport->pwwn;
2034
2035 /* Send the AEN notification */
2036 bfad_im_post_vendor_event(aen_entry, bfad, ++fcport->bfa->bfa_aen_seq,
2037 BFA_AEN_CAT_PORT, event);
2038 }
2039
2040 /*
2041 * FC PORT state machine functions
2042 */
2043 static void
bfa_fcport_sm_uninit(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2044 bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
2045 enum bfa_fcport_sm_event event)
2046 {
2047 bfa_trc(fcport->bfa, event);
2048
2049 switch (event) {
2050 case BFA_FCPORT_SM_START:
2051 /*
2052 * Start event after IOC is configured and BFA is started.
2053 */
2054 fcport->use_flash_cfg = BFA_TRUE;
2055
2056 if (bfa_fcport_send_enable(fcport)) {
2057 bfa_trc(fcport->bfa, BFA_TRUE);
2058 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2059 } else {
2060 bfa_trc(fcport->bfa, BFA_FALSE);
2061 bfa_sm_set_state(fcport,
2062 bfa_fcport_sm_enabling_qwait);
2063 }
2064 break;
2065
2066 case BFA_FCPORT_SM_ENABLE:
2067 /*
2068 * Port is persistently configured to be in enabled state. Do
2069 * not change state. Port enabling is done when START event is
2070 * received.
2071 */
2072 break;
2073
2074 case BFA_FCPORT_SM_DISABLE:
2075 /*
2076 * If a port is persistently configured to be disabled, the
2077 * first event will a port disable request.
2078 */
2079 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2080 break;
2081
2082 case BFA_FCPORT_SM_HWFAIL:
2083 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2084 break;
2085
2086 default:
2087 bfa_sm_fault(fcport->bfa, event);
2088 }
2089 }
2090
2091 static void
bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2092 bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
2093 enum bfa_fcport_sm_event event)
2094 {
2095 char pwwn_buf[BFA_STRING_32];
2096 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2097 bfa_trc(fcport->bfa, event);
2098
2099 switch (event) {
2100 case BFA_FCPORT_SM_QRESUME:
2101 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2102 bfa_fcport_send_enable(fcport);
2103 break;
2104
2105 case BFA_FCPORT_SM_STOP:
2106 bfa_reqq_wcancel(&fcport->reqq_wait);
2107 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2108 break;
2109
2110 case BFA_FCPORT_SM_ENABLE:
2111 /*
2112 * Already enable is in progress.
2113 */
2114 break;
2115
2116 case BFA_FCPORT_SM_DISABLE:
2117 /*
2118 * Just send disable request to firmware when room becomes
2119 * available in request queue.
2120 */
2121 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2122 bfa_reqq_wcancel(&fcport->reqq_wait);
2123 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2124 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2125 wwn2str(pwwn_buf, fcport->pwwn);
2126 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2127 "Base port disabled: WWN = %s\n", pwwn_buf);
2128 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2129 break;
2130
2131 case BFA_FCPORT_SM_LINKUP:
2132 case BFA_FCPORT_SM_LINKDOWN:
2133 /*
2134 * Possible to get link events when doing back-to-back
2135 * enable/disables.
2136 */
2137 break;
2138
2139 case BFA_FCPORT_SM_HWFAIL:
2140 bfa_reqq_wcancel(&fcport->reqq_wait);
2141 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2142 break;
2143
2144 case BFA_FCPORT_SM_FAA_MISCONFIG:
2145 bfa_fcport_reset_linkinfo(fcport);
2146 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2147 bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2148 break;
2149
2150 default:
2151 bfa_sm_fault(fcport->bfa, event);
2152 }
2153 }
2154
2155 static void
bfa_fcport_sm_enabling(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2156 bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
2157 enum bfa_fcport_sm_event event)
2158 {
2159 char pwwn_buf[BFA_STRING_32];
2160 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2161 bfa_trc(fcport->bfa, event);
2162
2163 switch (event) {
2164 case BFA_FCPORT_SM_FWRSP:
2165 case BFA_FCPORT_SM_LINKDOWN:
2166 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2167 break;
2168
2169 case BFA_FCPORT_SM_LINKUP:
2170 bfa_fcport_update_linkinfo(fcport);
2171 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2172
2173 WARN_ON(!fcport->event_cbfn);
2174 bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2175 break;
2176
2177 case BFA_FCPORT_SM_ENABLE:
2178 /*
2179 * Already being enabled.
2180 */
2181 break;
2182
2183 case BFA_FCPORT_SM_DISABLE:
2184 if (bfa_fcport_send_disable(fcport))
2185 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2186 else
2187 bfa_sm_set_state(fcport,
2188 bfa_fcport_sm_disabling_qwait);
2189
2190 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2191 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2192 wwn2str(pwwn_buf, fcport->pwwn);
2193 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2194 "Base port disabled: WWN = %s\n", pwwn_buf);
2195 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2196 break;
2197
2198 case BFA_FCPORT_SM_STOP:
2199 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2200 break;
2201
2202 case BFA_FCPORT_SM_HWFAIL:
2203 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2204 break;
2205
2206 case BFA_FCPORT_SM_FAA_MISCONFIG:
2207 bfa_fcport_reset_linkinfo(fcport);
2208 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2209 bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2210 break;
2211
2212 default:
2213 bfa_sm_fault(fcport->bfa, event);
2214 }
2215 }
2216
2217 static void
bfa_fcport_sm_linkdown(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2218 bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
2219 enum bfa_fcport_sm_event event)
2220 {
2221 struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2222 char pwwn_buf[BFA_STRING_32];
2223 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2224
2225 bfa_trc(fcport->bfa, event);
2226
2227 switch (event) {
2228 case BFA_FCPORT_SM_LINKUP:
2229 bfa_fcport_update_linkinfo(fcport);
2230 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2231 WARN_ON(!fcport->event_cbfn);
2232 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2233 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
2234 if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
2235
2236 bfa_trc(fcport->bfa,
2237 pevent->link_state.attr.vc_fcf.fcf.fipenabled);
2238 bfa_trc(fcport->bfa,
2239 pevent->link_state.attr.vc_fcf.fcf.fipfailed);
2240
2241 if (pevent->link_state.attr.vc_fcf.fcf.fipfailed)
2242 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2243 BFA_PL_EID_FIP_FCF_DISC, 0,
2244 "FIP FCF Discovery Failed");
2245 else
2246 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2247 BFA_PL_EID_FIP_FCF_DISC, 0,
2248 "FIP FCF Discovered");
2249 }
2250
2251 bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2252 wwn2str(pwwn_buf, fcport->pwwn);
2253 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2254 "Base port online: WWN = %s\n", pwwn_buf);
2255 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ONLINE);
2256
2257 /* If QoS is enabled and it is not online, send AEN */
2258 if (fcport->cfg.qos_enabled &&
2259 fcport->qos_attr.state != BFA_QOS_ONLINE)
2260 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_QOS_NEG);
2261 break;
2262
2263 case BFA_FCPORT_SM_LINKDOWN:
2264 /*
2265 * Possible to get link down event.
2266 */
2267 break;
2268
2269 case BFA_FCPORT_SM_ENABLE:
2270 /*
2271 * Already enabled.
2272 */
2273 break;
2274
2275 case BFA_FCPORT_SM_DISABLE:
2276 if (bfa_fcport_send_disable(fcport))
2277 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2278 else
2279 bfa_sm_set_state(fcport,
2280 bfa_fcport_sm_disabling_qwait);
2281
2282 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2283 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2284 wwn2str(pwwn_buf, fcport->pwwn);
2285 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2286 "Base port disabled: WWN = %s\n", pwwn_buf);
2287 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2288 break;
2289
2290 case BFA_FCPORT_SM_STOP:
2291 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2292 break;
2293
2294 case BFA_FCPORT_SM_HWFAIL:
2295 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2296 break;
2297
2298 case BFA_FCPORT_SM_FAA_MISCONFIG:
2299 bfa_fcport_reset_linkinfo(fcport);
2300 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2301 bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2302 break;
2303
2304 default:
2305 bfa_sm_fault(fcport->bfa, event);
2306 }
2307 }
2308
2309 static void
bfa_fcport_sm_linkup(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2310 bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
2311 enum bfa_fcport_sm_event event)
2312 {
2313 char pwwn_buf[BFA_STRING_32];
2314 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2315
2316 bfa_trc(fcport->bfa, event);
2317
2318 switch (event) {
2319 case BFA_FCPORT_SM_ENABLE:
2320 /*
2321 * Already enabled.
2322 */
2323 break;
2324
2325 case BFA_FCPORT_SM_DISABLE:
2326 if (bfa_fcport_send_disable(fcport))
2327 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2328 else
2329 bfa_sm_set_state(fcport,
2330 bfa_fcport_sm_disabling_qwait);
2331
2332 bfa_fcport_reset_linkinfo(fcport);
2333 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2334 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2335 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2336 wwn2str(pwwn_buf, fcport->pwwn);
2337 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2338 "Base port offline: WWN = %s\n", pwwn_buf);
2339 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2340 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2341 "Base port disabled: WWN = %s\n", pwwn_buf);
2342 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2343 break;
2344
2345 case BFA_FCPORT_SM_LINKDOWN:
2346 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2347 bfa_fcport_reset_linkinfo(fcport);
2348 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2349 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2350 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
2351 wwn2str(pwwn_buf, fcport->pwwn);
2352 if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
2353 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2354 "Base port offline: WWN = %s\n", pwwn_buf);
2355 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2356 } else {
2357 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2358 "Base port (WWN = %s) "
2359 "lost fabric connectivity\n", pwwn_buf);
2360 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2361 }
2362 break;
2363
2364 case BFA_FCPORT_SM_STOP:
2365 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2366 bfa_fcport_reset_linkinfo(fcport);
2367 wwn2str(pwwn_buf, fcport->pwwn);
2368 if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
2369 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2370 "Base port offline: WWN = %s\n", pwwn_buf);
2371 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2372 } else {
2373 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2374 "Base port (WWN = %s) "
2375 "lost fabric connectivity\n", pwwn_buf);
2376 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2377 }
2378 break;
2379
2380 case BFA_FCPORT_SM_HWFAIL:
2381 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2382 bfa_fcport_reset_linkinfo(fcport);
2383 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2384 wwn2str(pwwn_buf, fcport->pwwn);
2385 if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
2386 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2387 "Base port offline: WWN = %s\n", pwwn_buf);
2388 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2389 } else {
2390 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2391 "Base port (WWN = %s) "
2392 "lost fabric connectivity\n", pwwn_buf);
2393 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2394 }
2395 break;
2396
2397 case BFA_FCPORT_SM_FAA_MISCONFIG:
2398 bfa_fcport_reset_linkinfo(fcport);
2399 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2400 bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2401 break;
2402
2403 default:
2404 bfa_sm_fault(fcport->bfa, event);
2405 }
2406 }
2407
2408 static void
bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2409 bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
2410 enum bfa_fcport_sm_event event)
2411 {
2412 bfa_trc(fcport->bfa, event);
2413
2414 switch (event) {
2415 case BFA_FCPORT_SM_QRESUME:
2416 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2417 bfa_fcport_send_disable(fcport);
2418 break;
2419
2420 case BFA_FCPORT_SM_STOP:
2421 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2422 bfa_reqq_wcancel(&fcport->reqq_wait);
2423 break;
2424
2425 case BFA_FCPORT_SM_ENABLE:
2426 bfa_sm_set_state(fcport, bfa_fcport_sm_toggling_qwait);
2427 break;
2428
2429 case BFA_FCPORT_SM_DISABLE:
2430 /*
2431 * Already being disabled.
2432 */
2433 break;
2434
2435 case BFA_FCPORT_SM_LINKUP:
2436 case BFA_FCPORT_SM_LINKDOWN:
2437 /*
2438 * Possible to get link events when doing back-to-back
2439 * enable/disables.
2440 */
2441 break;
2442
2443 case BFA_FCPORT_SM_HWFAIL:
2444 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2445 bfa_reqq_wcancel(&fcport->reqq_wait);
2446 break;
2447
2448 case BFA_FCPORT_SM_FAA_MISCONFIG:
2449 bfa_fcport_reset_linkinfo(fcport);
2450 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2451 bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2452 break;
2453
2454 default:
2455 bfa_sm_fault(fcport->bfa, event);
2456 }
2457 }
2458
2459 static void
bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2460 bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
2461 enum bfa_fcport_sm_event event)
2462 {
2463 bfa_trc(fcport->bfa, event);
2464
2465 switch (event) {
2466 case BFA_FCPORT_SM_QRESUME:
2467 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2468 bfa_fcport_send_disable(fcport);
2469 if (bfa_fcport_send_enable(fcport))
2470 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2471 else
2472 bfa_sm_set_state(fcport,
2473 bfa_fcport_sm_enabling_qwait);
2474 break;
2475
2476 case BFA_FCPORT_SM_STOP:
2477 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2478 bfa_reqq_wcancel(&fcport->reqq_wait);
2479 break;
2480
2481 case BFA_FCPORT_SM_ENABLE:
2482 break;
2483
2484 case BFA_FCPORT_SM_DISABLE:
2485 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
2486 break;
2487
2488 case BFA_FCPORT_SM_LINKUP:
2489 case BFA_FCPORT_SM_LINKDOWN:
2490 /*
2491 * Possible to get link events when doing back-to-back
2492 * enable/disables.
2493 */
2494 break;
2495
2496 case BFA_FCPORT_SM_HWFAIL:
2497 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2498 bfa_reqq_wcancel(&fcport->reqq_wait);
2499 break;
2500
2501 default:
2502 bfa_sm_fault(fcport->bfa, event);
2503 }
2504 }
2505
2506 static void
bfa_fcport_sm_disabling(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2507 bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
2508 enum bfa_fcport_sm_event event)
2509 {
2510 char pwwn_buf[BFA_STRING_32];
2511 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2512 bfa_trc(fcport->bfa, event);
2513
2514 switch (event) {
2515 case BFA_FCPORT_SM_FWRSP:
2516 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2517 break;
2518
2519 case BFA_FCPORT_SM_DISABLE:
2520 /*
2521 * Already being disabled.
2522 */
2523 break;
2524
2525 case BFA_FCPORT_SM_ENABLE:
2526 if (bfa_fcport_send_enable(fcport))
2527 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2528 else
2529 bfa_sm_set_state(fcport,
2530 bfa_fcport_sm_enabling_qwait);
2531
2532 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2533 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2534 wwn2str(pwwn_buf, fcport->pwwn);
2535 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2536 "Base port enabled: WWN = %s\n", pwwn_buf);
2537 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
2538 break;
2539
2540 case BFA_FCPORT_SM_STOP:
2541 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2542 break;
2543
2544 case BFA_FCPORT_SM_LINKUP:
2545 case BFA_FCPORT_SM_LINKDOWN:
2546 /*
2547 * Possible to get link events when doing back-to-back
2548 * enable/disables.
2549 */
2550 break;
2551
2552 case BFA_FCPORT_SM_HWFAIL:
2553 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2554 break;
2555
2556 default:
2557 bfa_sm_fault(fcport->bfa, event);
2558 }
2559 }
2560
2561 static void
bfa_fcport_sm_disabled(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2562 bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
2563 enum bfa_fcport_sm_event event)
2564 {
2565 char pwwn_buf[BFA_STRING_32];
2566 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2567 bfa_trc(fcport->bfa, event);
2568
2569 switch (event) {
2570 case BFA_FCPORT_SM_START:
2571 /*
2572 * Ignore start event for a port that is disabled.
2573 */
2574 break;
2575
2576 case BFA_FCPORT_SM_STOP:
2577 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2578 break;
2579
2580 case BFA_FCPORT_SM_ENABLE:
2581 if (bfa_fcport_send_enable(fcport))
2582 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2583 else
2584 bfa_sm_set_state(fcport,
2585 bfa_fcport_sm_enabling_qwait);
2586
2587 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2588 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2589 wwn2str(pwwn_buf, fcport->pwwn);
2590 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2591 "Base port enabled: WWN = %s\n", pwwn_buf);
2592 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
2593 break;
2594
2595 case BFA_FCPORT_SM_DISABLE:
2596 /*
2597 * Already disabled.
2598 */
2599 break;
2600
2601 case BFA_FCPORT_SM_HWFAIL:
2602 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2603 break;
2604
2605 case BFA_FCPORT_SM_DPORTENABLE:
2606 bfa_sm_set_state(fcport, bfa_fcport_sm_dport);
2607 break;
2608
2609 case BFA_FCPORT_SM_DDPORTENABLE:
2610 bfa_sm_set_state(fcport, bfa_fcport_sm_ddport);
2611 break;
2612
2613 default:
2614 bfa_sm_fault(fcport->bfa, event);
2615 }
2616 }
2617
2618 static void
bfa_fcport_sm_stopped(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2619 bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
2620 enum bfa_fcport_sm_event event)
2621 {
2622 bfa_trc(fcport->bfa, event);
2623
2624 switch (event) {
2625 case BFA_FCPORT_SM_START:
2626 if (bfa_fcport_send_enable(fcport))
2627 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2628 else
2629 bfa_sm_set_state(fcport,
2630 bfa_fcport_sm_enabling_qwait);
2631 break;
2632
2633 default:
2634 /*
2635 * Ignore all other events.
2636 */
2637 ;
2638 }
2639 }
2640
2641 /*
2642 * Port is enabled. IOC is down/failed.
2643 */
2644 static void
bfa_fcport_sm_iocdown(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2645 bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
2646 enum bfa_fcport_sm_event event)
2647 {
2648 bfa_trc(fcport->bfa, event);
2649
2650 switch (event) {
2651 case BFA_FCPORT_SM_START:
2652 if (bfa_fcport_send_enable(fcport))
2653 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2654 else
2655 bfa_sm_set_state(fcport,
2656 bfa_fcport_sm_enabling_qwait);
2657 break;
2658
2659 default:
2660 /*
2661 * Ignore all events.
2662 */
2663 ;
2664 }
2665 }
2666
2667 /*
2668 * Port is disabled. IOC is down/failed.
2669 */
2670 static void
bfa_fcport_sm_iocfail(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2671 bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
2672 enum bfa_fcport_sm_event event)
2673 {
2674 bfa_trc(fcport->bfa, event);
2675
2676 switch (event) {
2677 case BFA_FCPORT_SM_START:
2678 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2679 break;
2680
2681 case BFA_FCPORT_SM_ENABLE:
2682 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2683 break;
2684
2685 default:
2686 /*
2687 * Ignore all events.
2688 */
2689 ;
2690 }
2691 }
2692
2693 static void
bfa_fcport_sm_dport(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2694 bfa_fcport_sm_dport(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event)
2695 {
2696 bfa_trc(fcport->bfa, event);
2697
2698 switch (event) {
2699 case BFA_FCPORT_SM_DPORTENABLE:
2700 case BFA_FCPORT_SM_DISABLE:
2701 case BFA_FCPORT_SM_ENABLE:
2702 case BFA_FCPORT_SM_START:
2703 /*
2704 * Ignore event for a port that is dport
2705 */
2706 break;
2707
2708 case BFA_FCPORT_SM_STOP:
2709 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2710 break;
2711
2712 case BFA_FCPORT_SM_HWFAIL:
2713 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2714 break;
2715
2716 case BFA_FCPORT_SM_DPORTDISABLE:
2717 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2718 break;
2719
2720 default:
2721 bfa_sm_fault(fcport->bfa, event);
2722 }
2723 }
2724
2725 static void
bfa_fcport_sm_ddport(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2726 bfa_fcport_sm_ddport(struct bfa_fcport_s *fcport,
2727 enum bfa_fcport_sm_event event)
2728 {
2729 bfa_trc(fcport->bfa, event);
2730
2731 switch (event) {
2732 case BFA_FCPORT_SM_DISABLE:
2733 case BFA_FCPORT_SM_DDPORTDISABLE:
2734 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2735 break;
2736
2737 case BFA_FCPORT_SM_DPORTENABLE:
2738 case BFA_FCPORT_SM_DPORTDISABLE:
2739 case BFA_FCPORT_SM_ENABLE:
2740 case BFA_FCPORT_SM_START:
2741 /**
2742 * Ignore event for a port that is ddport
2743 */
2744 break;
2745
2746 case BFA_FCPORT_SM_STOP:
2747 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2748 break;
2749
2750 case BFA_FCPORT_SM_HWFAIL:
2751 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2752 break;
2753
2754 default:
2755 bfa_sm_fault(fcport->bfa, event);
2756 }
2757 }
2758
2759 static void
bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2760 bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s *fcport,
2761 enum bfa_fcport_sm_event event)
2762 {
2763 bfa_trc(fcport->bfa, event);
2764
2765 switch (event) {
2766 case BFA_FCPORT_SM_DPORTENABLE:
2767 case BFA_FCPORT_SM_ENABLE:
2768 case BFA_FCPORT_SM_START:
2769 /*
2770 * Ignore event for a port as there is FAA misconfig
2771 */
2772 break;
2773
2774 case BFA_FCPORT_SM_DISABLE:
2775 if (bfa_fcport_send_disable(fcport))
2776 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2777 else
2778 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
2779
2780 bfa_fcport_reset_linkinfo(fcport);
2781 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2782 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2783 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2784 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2785 break;
2786
2787 case BFA_FCPORT_SM_STOP:
2788 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2789 break;
2790
2791 case BFA_FCPORT_SM_HWFAIL:
2792 bfa_fcport_reset_linkinfo(fcport);
2793 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2794 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2795 break;
2796
2797 default:
2798 bfa_sm_fault(fcport->bfa, event);
2799 }
2800 }
2801
2802 /*
2803 * Link state is down
2804 */
2805 static void
bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s * ln,enum bfa_fcport_ln_sm_event event)2806 bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
2807 enum bfa_fcport_ln_sm_event event)
2808 {
2809 bfa_trc(ln->fcport->bfa, event);
2810
2811 switch (event) {
2812 case BFA_FCPORT_LN_SM_LINKUP:
2813 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2814 bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2815 break;
2816
2817 default:
2818 bfa_sm_fault(ln->fcport->bfa, event);
2819 }
2820 }
2821
2822 /*
2823 * Link state is waiting for down notification
2824 */
2825 static void
bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s * ln,enum bfa_fcport_ln_sm_event event)2826 bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
2827 enum bfa_fcport_ln_sm_event event)
2828 {
2829 bfa_trc(ln->fcport->bfa, event);
2830
2831 switch (event) {
2832 case BFA_FCPORT_LN_SM_LINKUP:
2833 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2834 break;
2835
2836 case BFA_FCPORT_LN_SM_NOTIFICATION:
2837 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2838 break;
2839
2840 default:
2841 bfa_sm_fault(ln->fcport->bfa, event);
2842 }
2843 }
2844
2845 /*
2846 * Link state is waiting for down notification and there is a pending up
2847 */
2848 static void
bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s * ln,enum bfa_fcport_ln_sm_event event)2849 bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
2850 enum bfa_fcport_ln_sm_event event)
2851 {
2852 bfa_trc(ln->fcport->bfa, event);
2853
2854 switch (event) {
2855 case BFA_FCPORT_LN_SM_LINKDOWN:
2856 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2857 break;
2858
2859 case BFA_FCPORT_LN_SM_NOTIFICATION:
2860 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2861 bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2862 break;
2863
2864 default:
2865 bfa_sm_fault(ln->fcport->bfa, event);
2866 }
2867 }
2868
2869 /*
2870 * Link state is up
2871 */
2872 static void
bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s * ln,enum bfa_fcport_ln_sm_event event)2873 bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
2874 enum bfa_fcport_ln_sm_event event)
2875 {
2876 bfa_trc(ln->fcport->bfa, event);
2877
2878 switch (event) {
2879 case BFA_FCPORT_LN_SM_LINKDOWN:
2880 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2881 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2882 break;
2883
2884 default:
2885 bfa_sm_fault(ln->fcport->bfa, event);
2886 }
2887 }
2888
2889 /*
2890 * Link state is waiting for up notification
2891 */
2892 static void
bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s * ln,enum bfa_fcport_ln_sm_event event)2893 bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
2894 enum bfa_fcport_ln_sm_event event)
2895 {
2896 bfa_trc(ln->fcport->bfa, event);
2897
2898 switch (event) {
2899 case BFA_FCPORT_LN_SM_LINKDOWN:
2900 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2901 break;
2902
2903 case BFA_FCPORT_LN_SM_NOTIFICATION:
2904 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up);
2905 break;
2906
2907 default:
2908 bfa_sm_fault(ln->fcport->bfa, event);
2909 }
2910 }
2911
2912 /*
2913 * Link state is waiting for up notification and there is a pending down
2914 */
2915 static void
bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s * ln,enum bfa_fcport_ln_sm_event event)2916 bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
2917 enum bfa_fcport_ln_sm_event event)
2918 {
2919 bfa_trc(ln->fcport->bfa, event);
2920
2921 switch (event) {
2922 case BFA_FCPORT_LN_SM_LINKUP:
2923 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf);
2924 break;
2925
2926 case BFA_FCPORT_LN_SM_NOTIFICATION:
2927 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2928 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2929 break;
2930
2931 default:
2932 bfa_sm_fault(ln->fcport->bfa, event);
2933 }
2934 }
2935
2936 /*
2937 * Link state is waiting for up notification and there are pending down and up
2938 */
2939 static void
bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s * ln,enum bfa_fcport_ln_sm_event event)2940 bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
2941 enum bfa_fcport_ln_sm_event event)
2942 {
2943 bfa_trc(ln->fcport->bfa, event);
2944
2945 switch (event) {
2946 case BFA_FCPORT_LN_SM_LINKDOWN:
2947 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2948 break;
2949
2950 case BFA_FCPORT_LN_SM_NOTIFICATION:
2951 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2952 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2953 break;
2954
2955 default:
2956 bfa_sm_fault(ln->fcport->bfa, event);
2957 }
2958 }
2959
2960 static void
__bfa_cb_fcport_event(void * cbarg,bfa_boolean_t complete)2961 __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
2962 {
2963 struct bfa_fcport_ln_s *ln = cbarg;
2964
2965 if (complete)
2966 ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event);
2967 else
2968 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2969 }
2970
2971 /*
2972 * Send SCN notification to upper layers.
2973 * trunk - false if caller is fcport to ignore fcport event in trunked mode
2974 */
2975 static void
bfa_fcport_scn(struct bfa_fcport_s * fcport,enum bfa_port_linkstate event,bfa_boolean_t trunk)2976 bfa_fcport_scn(struct bfa_fcport_s *fcport, enum bfa_port_linkstate event,
2977 bfa_boolean_t trunk)
2978 {
2979 if (fcport->cfg.trunked && !trunk)
2980 return;
2981
2982 switch (event) {
2983 case BFA_PORT_LINKUP:
2984 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP);
2985 break;
2986 case BFA_PORT_LINKDOWN:
2987 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN);
2988 break;
2989 default:
2990 WARN_ON(1);
2991 }
2992 }
2993
2994 static void
bfa_fcport_queue_cb(struct bfa_fcport_ln_s * ln,enum bfa_port_linkstate event)2995 bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event)
2996 {
2997 struct bfa_fcport_s *fcport = ln->fcport;
2998
2999 if (fcport->bfa->fcs) {
3000 fcport->event_cbfn(fcport->event_cbarg, event);
3001 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
3002 } else {
3003 ln->ln_event = event;
3004 bfa_cb_queue(fcport->bfa, &ln->ln_qe,
3005 __bfa_cb_fcport_event, ln);
3006 }
3007 }
3008
3009 #define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \
3010 BFA_CACHELINE_SZ))
3011
3012 void
bfa_fcport_meminfo(struct bfa_iocfc_cfg_s * cfg,struct bfa_meminfo_s * minfo,struct bfa_s * bfa)3013 bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
3014 struct bfa_s *bfa)
3015 {
3016 struct bfa_mem_dma_s *fcport_dma = BFA_MEM_FCPORT_DMA(bfa);
3017
3018 bfa_mem_dma_setup(minfo, fcport_dma, FCPORT_STATS_DMA_SZ);
3019 }
3020
3021 static void
bfa_fcport_qresume(void * cbarg)3022 bfa_fcport_qresume(void *cbarg)
3023 {
3024 struct bfa_fcport_s *fcport = cbarg;
3025
3026 bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME);
3027 }
3028
3029 static void
bfa_fcport_mem_claim(struct bfa_fcport_s * fcport)3030 bfa_fcport_mem_claim(struct bfa_fcport_s *fcport)
3031 {
3032 struct bfa_mem_dma_s *fcport_dma = &fcport->fcport_dma;
3033
3034 fcport->stats_kva = bfa_mem_dma_virt(fcport_dma);
3035 fcport->stats_pa = bfa_mem_dma_phys(fcport_dma);
3036 fcport->stats = (union bfa_fcport_stats_u *)
3037 bfa_mem_dma_virt(fcport_dma);
3038 }
3039
3040 /*
3041 * Memory initialization.
3042 */
3043 void
bfa_fcport_attach(struct bfa_s * bfa,void * bfad,struct bfa_iocfc_cfg_s * cfg,struct bfa_pcidev_s * pcidev)3044 bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
3045 struct bfa_pcidev_s *pcidev)
3046 {
3047 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3048 struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
3049 struct bfa_fcport_ln_s *ln = &fcport->ln;
3050 struct timeval tv;
3051
3052 fcport->bfa = bfa;
3053 ln->fcport = fcport;
3054
3055 bfa_fcport_mem_claim(fcport);
3056
3057 bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
3058 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
3059
3060 /*
3061 * initialize time stamp for stats reset
3062 */
3063 do_gettimeofday(&tv);
3064 fcport->stats_reset_time = tv.tv_sec;
3065 fcport->stats_dma_ready = BFA_FALSE;
3066
3067 /*
3068 * initialize and set default configuration
3069 */
3070 port_cfg->topology = BFA_PORT_TOPOLOGY_P2P;
3071 port_cfg->speed = BFA_PORT_SPEED_AUTO;
3072 port_cfg->trunked = BFA_FALSE;
3073 port_cfg->maxfrsize = 0;
3074
3075 port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS;
3076 port_cfg->qos_bw.high = BFA_QOS_BW_HIGH;
3077 port_cfg->qos_bw.med = BFA_QOS_BW_MED;
3078 port_cfg->qos_bw.low = BFA_QOS_BW_LOW;
3079
3080 fcport->fec_state = BFA_FEC_OFFLINE;
3081
3082 INIT_LIST_HEAD(&fcport->stats_pending_q);
3083 INIT_LIST_HEAD(&fcport->statsclr_pending_q);
3084
3085 bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
3086 }
3087
3088 void
bfa_fcport_start(struct bfa_s * bfa)3089 bfa_fcport_start(struct bfa_s *bfa)
3090 {
3091 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
3092 }
3093
3094 /*
3095 * Called when IOC failure is detected.
3096 */
3097 void
bfa_fcport_iocdisable(struct bfa_s * bfa)3098 bfa_fcport_iocdisable(struct bfa_s *bfa)
3099 {
3100 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3101
3102 bfa_sm_send_event(fcport, BFA_FCPORT_SM_HWFAIL);
3103 bfa_trunk_iocdisable(bfa);
3104 }
3105
3106 /*
3107 * Update loop info in fcport for SCN online
3108 */
3109 static void
bfa_fcport_update_loop_info(struct bfa_fcport_s * fcport,struct bfa_fcport_loop_info_s * loop_info)3110 bfa_fcport_update_loop_info(struct bfa_fcport_s *fcport,
3111 struct bfa_fcport_loop_info_s *loop_info)
3112 {
3113 fcport->myalpa = loop_info->myalpa;
3114 fcport->alpabm_valid =
3115 loop_info->alpabm_val;
3116 memcpy(fcport->alpabm.alpa_bm,
3117 loop_info->alpabm.alpa_bm,
3118 sizeof(struct fc_alpabm_s));
3119 }
3120
3121 static void
bfa_fcport_update_linkinfo(struct bfa_fcport_s * fcport)3122 bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
3123 {
3124 struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
3125 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
3126
3127 fcport->speed = pevent->link_state.speed;
3128 fcport->topology = pevent->link_state.topology;
3129
3130 if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP) {
3131 bfa_fcport_update_loop_info(fcport,
3132 &pevent->link_state.attr.loop_info);
3133 return;
3134 }
3135
3136 /* QoS Details */
3137 fcport->qos_attr = pevent->link_state.qos_attr;
3138 fcport->qos_vc_attr = pevent->link_state.attr.vc_fcf.qos_vc_attr;
3139
3140 if (fcport->cfg.bb_cr_enabled)
3141 fcport->bbcr_attr = pevent->link_state.attr.bbcr_attr;
3142
3143 fcport->fec_state = pevent->link_state.fec_state;
3144
3145 /*
3146 * update trunk state if applicable
3147 */
3148 if (!fcport->cfg.trunked)
3149 trunk->attr.state = BFA_TRUNK_DISABLED;
3150
3151 /* update FCoE specific */
3152 fcport->fcoe_vlan =
3153 be16_to_cpu(pevent->link_state.attr.vc_fcf.fcf.vlan);
3154
3155 bfa_trc(fcport->bfa, fcport->speed);
3156 bfa_trc(fcport->bfa, fcport->topology);
3157 }
3158
3159 static void
bfa_fcport_reset_linkinfo(struct bfa_fcport_s * fcport)3160 bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
3161 {
3162 fcport->speed = BFA_PORT_SPEED_UNKNOWN;
3163 fcport->topology = BFA_PORT_TOPOLOGY_NONE;
3164 fcport->fec_state = BFA_FEC_OFFLINE;
3165 }
3166
3167 /*
3168 * Send port enable message to firmware.
3169 */
3170 static bfa_boolean_t
bfa_fcport_send_enable(struct bfa_fcport_s * fcport)3171 bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
3172 {
3173 struct bfi_fcport_enable_req_s *m;
3174
3175 /*
3176 * Increment message tag before queue check, so that responses to old
3177 * requests are discarded.
3178 */
3179 fcport->msgtag++;
3180
3181 /*
3182 * check for room in queue to send request now
3183 */
3184 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3185 if (!m) {
3186 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3187 &fcport->reqq_wait);
3188 return BFA_FALSE;
3189 }
3190
3191 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
3192 bfa_fn_lpu(fcport->bfa));
3193 m->nwwn = fcport->nwwn;
3194 m->pwwn = fcport->pwwn;
3195 m->port_cfg = fcport->cfg;
3196 m->msgtag = fcport->msgtag;
3197 m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize);
3198 m->use_flash_cfg = fcport->use_flash_cfg;
3199 bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
3200 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
3201 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
3202
3203 /*
3204 * queue I/O message to firmware
3205 */
3206 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
3207 return BFA_TRUE;
3208 }
3209
3210 /*
3211 * Send port disable message to firmware.
3212 */
3213 static bfa_boolean_t
bfa_fcport_send_disable(struct bfa_fcport_s * fcport)3214 bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
3215 {
3216 struct bfi_fcport_req_s *m;
3217
3218 /*
3219 * Increment message tag before queue check, so that responses to old
3220 * requests are discarded.
3221 */
3222 fcport->msgtag++;
3223
3224 /*
3225 * check for room in queue to send request now
3226 */
3227 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3228 if (!m) {
3229 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3230 &fcport->reqq_wait);
3231 return BFA_FALSE;
3232 }
3233
3234 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
3235 bfa_fn_lpu(fcport->bfa));
3236 m->msgtag = fcport->msgtag;
3237
3238 /*
3239 * queue I/O message to firmware
3240 */
3241 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
3242
3243 return BFA_TRUE;
3244 }
3245
3246 static void
bfa_fcport_set_wwns(struct bfa_fcport_s * fcport)3247 bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
3248 {
3249 fcport->pwwn = fcport->bfa->ioc.attr->pwwn;
3250 fcport->nwwn = fcport->bfa->ioc.attr->nwwn;
3251
3252 bfa_trc(fcport->bfa, fcport->pwwn);
3253 bfa_trc(fcport->bfa, fcport->nwwn);
3254 }
3255
3256 static void
bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s * d,struct bfa_qos_stats_s * s)3257 bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
3258 struct bfa_qos_stats_s *s)
3259 {
3260 u32 *dip = (u32 *) d;
3261 __be32 *sip = (__be32 *) s;
3262 int i;
3263
3264 /* Now swap the 32 bit fields */
3265 for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i)
3266 dip[i] = be32_to_cpu(sip[i]);
3267 }
3268
3269 static void
bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s * d,struct bfa_fcoe_stats_s * s)3270 bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
3271 struct bfa_fcoe_stats_s *s)
3272 {
3273 u32 *dip = (u32 *) d;
3274 __be32 *sip = (__be32 *) s;
3275 int i;
3276
3277 for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
3278 i = i + 2) {
3279 #ifdef __BIG_ENDIAN
3280 dip[i] = be32_to_cpu(sip[i]);
3281 dip[i + 1] = be32_to_cpu(sip[i + 1]);
3282 #else
3283 dip[i] = be32_to_cpu(sip[i + 1]);
3284 dip[i + 1] = be32_to_cpu(sip[i]);
3285 #endif
3286 }
3287 }
3288
3289 static void
__bfa_cb_fcport_stats_get(void * cbarg,bfa_boolean_t complete)3290 __bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
3291 {
3292 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *)cbarg;
3293 struct bfa_cb_pending_q_s *cb;
3294 struct list_head *qe, *qen;
3295 union bfa_fcport_stats_u *ret;
3296
3297 if (complete) {
3298 struct timeval tv;
3299 if (fcport->stats_status == BFA_STATUS_OK)
3300 do_gettimeofday(&tv);
3301
3302 list_for_each_safe(qe, qen, &fcport->stats_pending_q) {
3303 bfa_q_deq(&fcport->stats_pending_q, &qe);
3304 cb = (struct bfa_cb_pending_q_s *)qe;
3305 if (fcport->stats_status == BFA_STATUS_OK) {
3306 ret = (union bfa_fcport_stats_u *)cb->data;
3307 /* Swap FC QoS or FCoE stats */
3308 if (bfa_ioc_get_fcmode(&fcport->bfa->ioc))
3309 bfa_fcport_qos_stats_swap(&ret->fcqos,
3310 &fcport->stats->fcqos);
3311 else {
3312 bfa_fcport_fcoe_stats_swap(&ret->fcoe,
3313 &fcport->stats->fcoe);
3314 ret->fcoe.secs_reset =
3315 tv.tv_sec - fcport->stats_reset_time;
3316 }
3317 }
3318 bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
3319 fcport->stats_status);
3320 }
3321 fcport->stats_status = BFA_STATUS_OK;
3322 } else {
3323 INIT_LIST_HEAD(&fcport->stats_pending_q);
3324 fcport->stats_status = BFA_STATUS_OK;
3325 }
3326 }
3327
3328 static void
bfa_fcport_stats_get_timeout(void * cbarg)3329 bfa_fcport_stats_get_timeout(void *cbarg)
3330 {
3331 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3332
3333 bfa_trc(fcport->bfa, fcport->stats_qfull);
3334
3335 if (fcport->stats_qfull) {
3336 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3337 fcport->stats_qfull = BFA_FALSE;
3338 }
3339
3340 fcport->stats_status = BFA_STATUS_ETIMER;
3341 __bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
3342 }
3343
3344 static void
bfa_fcport_send_stats_get(void * cbarg)3345 bfa_fcport_send_stats_get(void *cbarg)
3346 {
3347 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3348 struct bfi_fcport_req_s *msg;
3349
3350 msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3351
3352 if (!msg) {
3353 fcport->stats_qfull = BFA_TRUE;
3354 bfa_reqq_winit(&fcport->stats_reqq_wait,
3355 bfa_fcport_send_stats_get, fcport);
3356 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3357 &fcport->stats_reqq_wait);
3358 return;
3359 }
3360 fcport->stats_qfull = BFA_FALSE;
3361
3362 memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3363 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
3364 bfa_fn_lpu(fcport->bfa));
3365 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
3366 }
3367
3368 static void
__bfa_cb_fcport_stats_clr(void * cbarg,bfa_boolean_t complete)3369 __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
3370 {
3371 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3372 struct bfa_cb_pending_q_s *cb;
3373 struct list_head *qe, *qen;
3374
3375 if (complete) {
3376 struct timeval tv;
3377
3378 /*
3379 * re-initialize time stamp for stats reset
3380 */
3381 do_gettimeofday(&tv);
3382 fcport->stats_reset_time = tv.tv_sec;
3383 list_for_each_safe(qe, qen, &fcport->statsclr_pending_q) {
3384 bfa_q_deq(&fcport->statsclr_pending_q, &qe);
3385 cb = (struct bfa_cb_pending_q_s *)qe;
3386 bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
3387 fcport->stats_status);
3388 }
3389 fcport->stats_status = BFA_STATUS_OK;
3390 } else {
3391 INIT_LIST_HEAD(&fcport->statsclr_pending_q);
3392 fcport->stats_status = BFA_STATUS_OK;
3393 }
3394 }
3395
3396 static void
bfa_fcport_stats_clr_timeout(void * cbarg)3397 bfa_fcport_stats_clr_timeout(void *cbarg)
3398 {
3399 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3400
3401 bfa_trc(fcport->bfa, fcport->stats_qfull);
3402
3403 if (fcport->stats_qfull) {
3404 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3405 fcport->stats_qfull = BFA_FALSE;
3406 }
3407
3408 fcport->stats_status = BFA_STATUS_ETIMER;
3409 __bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
3410 }
3411
3412 static void
bfa_fcport_send_stats_clear(void * cbarg)3413 bfa_fcport_send_stats_clear(void *cbarg)
3414 {
3415 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3416 struct bfi_fcport_req_s *msg;
3417
3418 msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3419
3420 if (!msg) {
3421 fcport->stats_qfull = BFA_TRUE;
3422 bfa_reqq_winit(&fcport->stats_reqq_wait,
3423 bfa_fcport_send_stats_clear, fcport);
3424 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3425 &fcport->stats_reqq_wait);
3426 return;
3427 }
3428 fcport->stats_qfull = BFA_FALSE;
3429
3430 memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3431 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
3432 bfa_fn_lpu(fcport->bfa));
3433 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
3434 }
3435
3436 /*
3437 * Handle trunk SCN event from firmware.
3438 */
3439 static void
bfa_trunk_scn(struct bfa_fcport_s * fcport,struct bfi_fcport_trunk_scn_s * scn)3440 bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn)
3441 {
3442 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
3443 struct bfi_fcport_trunk_link_s *tlink;
3444 struct bfa_trunk_link_attr_s *lattr;
3445 enum bfa_trunk_state state_prev;
3446 int i;
3447 int link_bm = 0;
3448
3449 bfa_trc(fcport->bfa, fcport->cfg.trunked);
3450 WARN_ON(scn->trunk_state != BFA_TRUNK_ONLINE &&
3451 scn->trunk_state != BFA_TRUNK_OFFLINE);
3452
3453 bfa_trc(fcport->bfa, trunk->attr.state);
3454 bfa_trc(fcport->bfa, scn->trunk_state);
3455 bfa_trc(fcport->bfa, scn->trunk_speed);
3456
3457 /*
3458 * Save off new state for trunk attribute query
3459 */
3460 state_prev = trunk->attr.state;
3461 if (fcport->cfg.trunked && (trunk->attr.state != BFA_TRUNK_DISABLED))
3462 trunk->attr.state = scn->trunk_state;
3463 trunk->attr.speed = scn->trunk_speed;
3464 for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3465 lattr = &trunk->attr.link_attr[i];
3466 tlink = &scn->tlink[i];
3467
3468 lattr->link_state = tlink->state;
3469 lattr->trunk_wwn = tlink->trunk_wwn;
3470 lattr->fctl = tlink->fctl;
3471 lattr->speed = tlink->speed;
3472 lattr->deskew = be32_to_cpu(tlink->deskew);
3473
3474 if (tlink->state == BFA_TRUNK_LINK_STATE_UP) {
3475 fcport->speed = tlink->speed;
3476 fcport->topology = BFA_PORT_TOPOLOGY_P2P;
3477 link_bm |= 1 << i;
3478 }
3479
3480 bfa_trc(fcport->bfa, lattr->link_state);
3481 bfa_trc(fcport->bfa, lattr->trunk_wwn);
3482 bfa_trc(fcport->bfa, lattr->fctl);
3483 bfa_trc(fcport->bfa, lattr->speed);
3484 bfa_trc(fcport->bfa, lattr->deskew);
3485 }
3486
3487 switch (link_bm) {
3488 case 3:
3489 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3490 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,1)");
3491 break;
3492 case 2:
3493 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3494 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(-,1)");
3495 break;
3496 case 1:
3497 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3498 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,-)");
3499 break;
3500 default:
3501 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3502 BFA_PL_EID_TRUNK_SCN, 0, "Trunk down");
3503 }
3504
3505 /*
3506 * Notify upper layers if trunk state changed.
3507 */
3508 if ((state_prev != trunk->attr.state) ||
3509 (scn->trunk_state == BFA_TRUNK_OFFLINE)) {
3510 bfa_fcport_scn(fcport, (scn->trunk_state == BFA_TRUNK_ONLINE) ?
3511 BFA_PORT_LINKUP : BFA_PORT_LINKDOWN, BFA_TRUE);
3512 }
3513 }
3514
3515 static void
bfa_trunk_iocdisable(struct bfa_s * bfa)3516 bfa_trunk_iocdisable(struct bfa_s *bfa)
3517 {
3518 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3519 int i = 0;
3520
3521 /*
3522 * In trunked mode, notify upper layers that link is down
3523 */
3524 if (fcport->cfg.trunked) {
3525 if (fcport->trunk.attr.state == BFA_TRUNK_ONLINE)
3526 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_TRUE);
3527
3528 fcport->trunk.attr.state = BFA_TRUNK_OFFLINE;
3529 fcport->trunk.attr.speed = BFA_PORT_SPEED_UNKNOWN;
3530 for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3531 fcport->trunk.attr.link_attr[i].trunk_wwn = 0;
3532 fcport->trunk.attr.link_attr[i].fctl =
3533 BFA_TRUNK_LINK_FCTL_NORMAL;
3534 fcport->trunk.attr.link_attr[i].link_state =
3535 BFA_TRUNK_LINK_STATE_DN_LINKDN;
3536 fcport->trunk.attr.link_attr[i].speed =
3537 BFA_PORT_SPEED_UNKNOWN;
3538 fcport->trunk.attr.link_attr[i].deskew = 0;
3539 }
3540 }
3541 }
3542
3543 /*
3544 * Called to initialize port attributes
3545 */
3546 void
bfa_fcport_init(struct bfa_s * bfa)3547 bfa_fcport_init(struct bfa_s *bfa)
3548 {
3549 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3550
3551 /*
3552 * Initialize port attributes from IOC hardware data.
3553 */
3554 bfa_fcport_set_wwns(fcport);
3555 if (fcport->cfg.maxfrsize == 0)
3556 fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
3557 fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
3558 fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
3559
3560 if (bfa_fcport_is_pbcdisabled(bfa))
3561 bfa->modules.port.pbc_disabled = BFA_TRUE;
3562
3563 WARN_ON(!fcport->cfg.maxfrsize);
3564 WARN_ON(!fcport->cfg.rx_bbcredit);
3565 WARN_ON(!fcport->speed_sup);
3566 }
3567
3568 /*
3569 * Firmware message handler.
3570 */
3571 void
bfa_fcport_isr(struct bfa_s * bfa,struct bfi_msg_s * msg)3572 bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
3573 {
3574 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3575 union bfi_fcport_i2h_msg_u i2hmsg;
3576
3577 i2hmsg.msg = msg;
3578 fcport->event_arg.i2hmsg = i2hmsg;
3579
3580 bfa_trc(bfa, msg->mhdr.msg_id);
3581 bfa_trc(bfa, bfa_sm_to_state(hal_port_sm_table, fcport->sm));
3582
3583 switch (msg->mhdr.msg_id) {
3584 case BFI_FCPORT_I2H_ENABLE_RSP:
3585 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) {
3586
3587 fcport->stats_dma_ready = BFA_TRUE;
3588 if (fcport->use_flash_cfg) {
3589 fcport->cfg = i2hmsg.penable_rsp->port_cfg;
3590 fcport->cfg.maxfrsize =
3591 cpu_to_be16(fcport->cfg.maxfrsize);
3592 fcport->cfg.path_tov =
3593 cpu_to_be16(fcport->cfg.path_tov);
3594 fcport->cfg.q_depth =
3595 cpu_to_be16(fcport->cfg.q_depth);
3596
3597 if (fcport->cfg.trunked)
3598 fcport->trunk.attr.state =
3599 BFA_TRUNK_OFFLINE;
3600 else
3601 fcport->trunk.attr.state =
3602 BFA_TRUNK_DISABLED;
3603 fcport->qos_attr.qos_bw =
3604 i2hmsg.penable_rsp->port_cfg.qos_bw;
3605 fcport->use_flash_cfg = BFA_FALSE;
3606 }
3607
3608 if (fcport->cfg.qos_enabled)
3609 fcport->qos_attr.state = BFA_QOS_OFFLINE;
3610 else
3611 fcport->qos_attr.state = BFA_QOS_DISABLED;
3612
3613 fcport->qos_attr.qos_bw_op =
3614 i2hmsg.penable_rsp->port_cfg.qos_bw;
3615
3616 if (fcport->cfg.bb_cr_enabled)
3617 fcport->bbcr_attr.state = BFA_BBCR_OFFLINE;
3618 else
3619 fcport->bbcr_attr.state = BFA_BBCR_DISABLED;
3620
3621 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3622 }
3623 break;
3624
3625 case BFI_FCPORT_I2H_DISABLE_RSP:
3626 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
3627 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3628 break;
3629
3630 case BFI_FCPORT_I2H_EVENT:
3631 if (fcport->cfg.bb_cr_enabled)
3632 fcport->bbcr_attr.state = BFA_BBCR_OFFLINE;
3633 else
3634 fcport->bbcr_attr.state = BFA_BBCR_DISABLED;
3635
3636 if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP)
3637 bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
3638 else {
3639 if (i2hmsg.event->link_state.linkstate_rsn ==
3640 BFA_PORT_LINKSTATE_RSN_FAA_MISCONFIG)
3641 bfa_sm_send_event(fcport,
3642 BFA_FCPORT_SM_FAA_MISCONFIG);
3643 else
3644 bfa_sm_send_event(fcport,
3645 BFA_FCPORT_SM_LINKDOWN);
3646 }
3647 fcport->qos_attr.qos_bw_op =
3648 i2hmsg.event->link_state.qos_attr.qos_bw_op;
3649 break;
3650
3651 case BFI_FCPORT_I2H_TRUNK_SCN:
3652 bfa_trunk_scn(fcport, i2hmsg.trunk_scn);
3653 break;
3654
3655 case BFI_FCPORT_I2H_STATS_GET_RSP:
3656 /*
3657 * check for timer pop before processing the rsp
3658 */
3659 if (list_empty(&fcport->stats_pending_q) ||
3660 (fcport->stats_status == BFA_STATUS_ETIMER))
3661 break;
3662
3663 bfa_timer_stop(&fcport->timer);
3664 fcport->stats_status = i2hmsg.pstatsget_rsp->status;
3665 __bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
3666 break;
3667
3668 case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
3669 /*
3670 * check for timer pop before processing the rsp
3671 */
3672 if (list_empty(&fcport->statsclr_pending_q) ||
3673 (fcport->stats_status == BFA_STATUS_ETIMER))
3674 break;
3675
3676 bfa_timer_stop(&fcport->timer);
3677 fcport->stats_status = BFA_STATUS_OK;
3678 __bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
3679 break;
3680
3681 case BFI_FCPORT_I2H_ENABLE_AEN:
3682 bfa_sm_send_event(fcport, BFA_FCPORT_SM_ENABLE);
3683 break;
3684
3685 case BFI_FCPORT_I2H_DISABLE_AEN:
3686 bfa_sm_send_event(fcport, BFA_FCPORT_SM_DISABLE);
3687 break;
3688
3689 default:
3690 WARN_ON(1);
3691 break;
3692 }
3693 }
3694
3695 /*
3696 * Registered callback for port events.
3697 */
3698 void
bfa_fcport_event_register(struct bfa_s * bfa,void (* cbfn)(void * cbarg,enum bfa_port_linkstate event),void * cbarg)3699 bfa_fcport_event_register(struct bfa_s *bfa,
3700 void (*cbfn) (void *cbarg,
3701 enum bfa_port_linkstate event),
3702 void *cbarg)
3703 {
3704 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3705
3706 fcport->event_cbfn = cbfn;
3707 fcport->event_cbarg = cbarg;
3708 }
3709
3710 bfa_status_t
bfa_fcport_enable(struct bfa_s * bfa)3711 bfa_fcport_enable(struct bfa_s *bfa)
3712 {
3713 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3714
3715 if (bfa_fcport_is_pbcdisabled(bfa))
3716 return BFA_STATUS_PBC;
3717
3718 if (bfa_ioc_is_disabled(&bfa->ioc))
3719 return BFA_STATUS_IOC_DISABLED;
3720
3721 if (fcport->diag_busy)
3722 return BFA_STATUS_DIAG_BUSY;
3723
3724 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE);
3725 return BFA_STATUS_OK;
3726 }
3727
3728 bfa_status_t
bfa_fcport_disable(struct bfa_s * bfa)3729 bfa_fcport_disable(struct bfa_s *bfa)
3730 {
3731 if (bfa_fcport_is_pbcdisabled(bfa))
3732 return BFA_STATUS_PBC;
3733
3734 if (bfa_ioc_is_disabled(&bfa->ioc))
3735 return BFA_STATUS_IOC_DISABLED;
3736
3737 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE);
3738 return BFA_STATUS_OK;
3739 }
3740
3741 /* If PBC is disabled on port, return error */
3742 bfa_status_t
bfa_fcport_is_pbcdisabled(struct bfa_s * bfa)3743 bfa_fcport_is_pbcdisabled(struct bfa_s *bfa)
3744 {
3745 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3746 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
3747 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
3748
3749 if (cfgrsp->pbc_cfg.port_enabled == BFI_PBC_PORT_DISABLED) {
3750 bfa_trc(bfa, fcport->pwwn);
3751 return BFA_STATUS_PBC;
3752 }
3753 return BFA_STATUS_OK;
3754 }
3755
3756 /*
3757 * Configure port speed.
3758 */
3759 bfa_status_t
bfa_fcport_cfg_speed(struct bfa_s * bfa,enum bfa_port_speed speed)3760 bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
3761 {
3762 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3763
3764 bfa_trc(bfa, speed);
3765
3766 if (fcport->cfg.trunked == BFA_TRUE)
3767 return BFA_STATUS_TRUNK_ENABLED;
3768 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
3769 (speed == BFA_PORT_SPEED_16GBPS))
3770 return BFA_STATUS_UNSUPP_SPEED;
3771 if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) {
3772 bfa_trc(bfa, fcport->speed_sup);
3773 return BFA_STATUS_UNSUPP_SPEED;
3774 }
3775
3776 /* Port speed entered needs to be checked */
3777 if (bfa_ioc_get_type(&fcport->bfa->ioc) == BFA_IOC_TYPE_FC) {
3778 /* For CT2, 1G is not supported */
3779 if ((speed == BFA_PORT_SPEED_1GBPS) &&
3780 (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)))
3781 return BFA_STATUS_UNSUPP_SPEED;
3782
3783 /* Already checked for Auto Speed and Max Speed supp */
3784 if (!(speed == BFA_PORT_SPEED_1GBPS ||
3785 speed == BFA_PORT_SPEED_2GBPS ||
3786 speed == BFA_PORT_SPEED_4GBPS ||
3787 speed == BFA_PORT_SPEED_8GBPS ||
3788 speed == BFA_PORT_SPEED_16GBPS ||
3789 speed == BFA_PORT_SPEED_AUTO))
3790 return BFA_STATUS_UNSUPP_SPEED;
3791 } else {
3792 if (speed != BFA_PORT_SPEED_10GBPS)
3793 return BFA_STATUS_UNSUPP_SPEED;
3794 }
3795
3796 fcport->cfg.speed = speed;
3797
3798 return BFA_STATUS_OK;
3799 }
3800
3801 /*
3802 * Get current speed.
3803 */
3804 enum bfa_port_speed
bfa_fcport_get_speed(struct bfa_s * bfa)3805 bfa_fcport_get_speed(struct bfa_s *bfa)
3806 {
3807 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3808
3809 return fcport->speed;
3810 }
3811
3812 /*
3813 * Configure port topology.
3814 */
3815 bfa_status_t
bfa_fcport_cfg_topology(struct bfa_s * bfa,enum bfa_port_topology topology)3816 bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology)
3817 {
3818 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3819
3820 bfa_trc(bfa, topology);
3821 bfa_trc(bfa, fcport->cfg.topology);
3822
3823 switch (topology) {
3824 case BFA_PORT_TOPOLOGY_P2P:
3825 break;
3826
3827 case BFA_PORT_TOPOLOGY_LOOP:
3828 if ((bfa_fcport_is_qos_enabled(bfa) != BFA_FALSE) ||
3829 (fcport->qos_attr.state != BFA_QOS_DISABLED))
3830 return BFA_STATUS_ERROR_QOS_ENABLED;
3831 if (fcport->cfg.ratelimit != BFA_FALSE)
3832 return BFA_STATUS_ERROR_TRL_ENABLED;
3833 if ((bfa_fcport_is_trunk_enabled(bfa) != BFA_FALSE) ||
3834 (fcport->trunk.attr.state != BFA_TRUNK_DISABLED))
3835 return BFA_STATUS_ERROR_TRUNK_ENABLED;
3836 if ((bfa_fcport_get_speed(bfa) == BFA_PORT_SPEED_16GBPS) ||
3837 (fcport->cfg.speed == BFA_PORT_SPEED_16GBPS))
3838 return BFA_STATUS_UNSUPP_SPEED;
3839 if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type))
3840 return BFA_STATUS_LOOP_UNSUPP_MEZZ;
3841 if (bfa_fcport_is_dport(bfa) != BFA_FALSE)
3842 return BFA_STATUS_DPORT_ERR;
3843 if (bfa_fcport_is_ddport(bfa) != BFA_FALSE)
3844 return BFA_STATUS_DPORT_ERR;
3845 break;
3846
3847 case BFA_PORT_TOPOLOGY_AUTO:
3848 break;
3849
3850 default:
3851 return BFA_STATUS_EINVAL;
3852 }
3853
3854 fcport->cfg.topology = topology;
3855 return BFA_STATUS_OK;
3856 }
3857
3858 /*
3859 * Get current topology.
3860 */
3861 enum bfa_port_topology
bfa_fcport_get_topology(struct bfa_s * bfa)3862 bfa_fcport_get_topology(struct bfa_s *bfa)
3863 {
3864 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3865
3866 return fcport->topology;
3867 }
3868
3869 /**
3870 * Get config topology.
3871 */
3872 enum bfa_port_topology
bfa_fcport_get_cfg_topology(struct bfa_s * bfa)3873 bfa_fcport_get_cfg_topology(struct bfa_s *bfa)
3874 {
3875 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3876
3877 return fcport->cfg.topology;
3878 }
3879
3880 bfa_status_t
bfa_fcport_cfg_hardalpa(struct bfa_s * bfa,u8 alpa)3881 bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
3882 {
3883 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3884
3885 bfa_trc(bfa, alpa);
3886 bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3887 bfa_trc(bfa, fcport->cfg.hardalpa);
3888
3889 fcport->cfg.cfg_hardalpa = BFA_TRUE;
3890 fcport->cfg.hardalpa = alpa;
3891
3892 return BFA_STATUS_OK;
3893 }
3894
3895 bfa_status_t
bfa_fcport_clr_hardalpa(struct bfa_s * bfa)3896 bfa_fcport_clr_hardalpa(struct bfa_s *bfa)
3897 {
3898 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3899
3900 bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3901 bfa_trc(bfa, fcport->cfg.hardalpa);
3902
3903 fcport->cfg.cfg_hardalpa = BFA_FALSE;
3904 return BFA_STATUS_OK;
3905 }
3906
3907 bfa_boolean_t
bfa_fcport_get_hardalpa(struct bfa_s * bfa,u8 * alpa)3908 bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
3909 {
3910 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3911
3912 *alpa = fcport->cfg.hardalpa;
3913 return fcport->cfg.cfg_hardalpa;
3914 }
3915
3916 u8
bfa_fcport_get_myalpa(struct bfa_s * bfa)3917 bfa_fcport_get_myalpa(struct bfa_s *bfa)
3918 {
3919 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3920
3921 return fcport->myalpa;
3922 }
3923
3924 bfa_status_t
bfa_fcport_cfg_maxfrsize(struct bfa_s * bfa,u16 maxfrsize)3925 bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
3926 {
3927 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3928
3929 bfa_trc(bfa, maxfrsize);
3930 bfa_trc(bfa, fcport->cfg.maxfrsize);
3931
3932 /* with in range */
3933 if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ))
3934 return BFA_STATUS_INVLD_DFSZ;
3935
3936 /* power of 2, if not the max frame size of 2112 */
3937 if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
3938 return BFA_STATUS_INVLD_DFSZ;
3939
3940 fcport->cfg.maxfrsize = maxfrsize;
3941 return BFA_STATUS_OK;
3942 }
3943
3944 u16
bfa_fcport_get_maxfrsize(struct bfa_s * bfa)3945 bfa_fcport_get_maxfrsize(struct bfa_s *bfa)
3946 {
3947 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3948
3949 return fcport->cfg.maxfrsize;
3950 }
3951
3952 u8
bfa_fcport_get_rx_bbcredit(struct bfa_s * bfa)3953 bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
3954 {
3955 if (bfa_fcport_get_topology(bfa) != BFA_PORT_TOPOLOGY_LOOP)
3956 return (BFA_FCPORT_MOD(bfa))->cfg.rx_bbcredit;
3957
3958 else
3959 return 0;
3960 }
3961
3962 void
bfa_fcport_set_tx_bbcredit(struct bfa_s * bfa,u16 tx_bbcredit)3963 bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
3964 {
3965 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3966
3967 fcport->cfg.tx_bbcredit = (u8)tx_bbcredit;
3968 }
3969
3970 /*
3971 * Get port attributes.
3972 */
3973
3974 wwn_t
bfa_fcport_get_wwn(struct bfa_s * bfa,bfa_boolean_t node)3975 bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
3976 {
3977 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3978 if (node)
3979 return fcport->nwwn;
3980 else
3981 return fcport->pwwn;
3982 }
3983
3984 void
bfa_fcport_get_attr(struct bfa_s * bfa,struct bfa_port_attr_s * attr)3985 bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
3986 {
3987 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3988
3989 memset(attr, 0, sizeof(struct bfa_port_attr_s));
3990
3991 attr->nwwn = fcport->nwwn;
3992 attr->pwwn = fcport->pwwn;
3993
3994 attr->factorypwwn = bfa->ioc.attr->mfg_pwwn;
3995 attr->factorynwwn = bfa->ioc.attr->mfg_nwwn;
3996
3997 memcpy(&attr->pport_cfg, &fcport->cfg,
3998 sizeof(struct bfa_port_cfg_s));
3999 /* speed attributes */
4000 attr->pport_cfg.speed = fcport->cfg.speed;
4001 attr->speed_supported = fcport->speed_sup;
4002 attr->speed = fcport->speed;
4003 attr->cos_supported = FC_CLASS_3;
4004
4005 /* topology attributes */
4006 attr->pport_cfg.topology = fcport->cfg.topology;
4007 attr->topology = fcport->topology;
4008 attr->pport_cfg.trunked = fcport->cfg.trunked;
4009
4010 /* beacon attributes */
4011 attr->beacon = fcport->beacon;
4012 attr->link_e2e_beacon = fcport->link_e2e_beacon;
4013
4014 attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa);
4015 attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa);
4016 attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm);
4017
4018 attr->fec_state = fcport->fec_state;
4019
4020 /* PBC Disabled State */
4021 if (bfa_fcport_is_pbcdisabled(bfa))
4022 attr->port_state = BFA_PORT_ST_PREBOOT_DISABLED;
4023 else {
4024 if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
4025 attr->port_state = BFA_PORT_ST_IOCDIS;
4026 else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
4027 attr->port_state = BFA_PORT_ST_FWMISMATCH;
4028 }
4029
4030 /* FCoE vlan */
4031 attr->fcoe_vlan = fcport->fcoe_vlan;
4032 }
4033
4034 #define BFA_FCPORT_STATS_TOV 1000
4035
4036 /*
4037 * Fetch port statistics (FCQoS or FCoE).
4038 */
4039 bfa_status_t
bfa_fcport_get_stats(struct bfa_s * bfa,struct bfa_cb_pending_q_s * cb)4040 bfa_fcport_get_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
4041 {
4042 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4043
4044 if (!bfa_iocfc_is_operational(bfa) ||
4045 !fcport->stats_dma_ready)
4046 return BFA_STATUS_IOC_NON_OP;
4047
4048 if (!list_empty(&fcport->statsclr_pending_q))
4049 return BFA_STATUS_DEVBUSY;
4050
4051 if (list_empty(&fcport->stats_pending_q)) {
4052 list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
4053 bfa_fcport_send_stats_get(fcport);
4054 bfa_timer_start(bfa, &fcport->timer,
4055 bfa_fcport_stats_get_timeout,
4056 fcport, BFA_FCPORT_STATS_TOV);
4057 } else
4058 list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
4059
4060 return BFA_STATUS_OK;
4061 }
4062
4063 /*
4064 * Reset port statistics (FCQoS or FCoE).
4065 */
4066 bfa_status_t
bfa_fcport_clear_stats(struct bfa_s * bfa,struct bfa_cb_pending_q_s * cb)4067 bfa_fcport_clear_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
4068 {
4069 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4070
4071 if (!bfa_iocfc_is_operational(bfa) ||
4072 !fcport->stats_dma_ready)
4073 return BFA_STATUS_IOC_NON_OP;
4074
4075 if (!list_empty(&fcport->stats_pending_q))
4076 return BFA_STATUS_DEVBUSY;
4077
4078 if (list_empty(&fcport->statsclr_pending_q)) {
4079 list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
4080 bfa_fcport_send_stats_clear(fcport);
4081 bfa_timer_start(bfa, &fcport->timer,
4082 bfa_fcport_stats_clr_timeout,
4083 fcport, BFA_FCPORT_STATS_TOV);
4084 } else
4085 list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
4086
4087 return BFA_STATUS_OK;
4088 }
4089
4090 /*
4091 * Fetch port attributes.
4092 */
4093 bfa_boolean_t
bfa_fcport_is_disabled(struct bfa_s * bfa)4094 bfa_fcport_is_disabled(struct bfa_s *bfa)
4095 {
4096 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4097
4098 return bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
4099 BFA_PORT_ST_DISABLED;
4100
4101 }
4102
4103 bfa_boolean_t
bfa_fcport_is_dport(struct bfa_s * bfa)4104 bfa_fcport_is_dport(struct bfa_s *bfa)
4105 {
4106 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4107
4108 return (bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
4109 BFA_PORT_ST_DPORT);
4110 }
4111
4112 bfa_boolean_t
bfa_fcport_is_ddport(struct bfa_s * bfa)4113 bfa_fcport_is_ddport(struct bfa_s *bfa)
4114 {
4115 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4116
4117 return (bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
4118 BFA_PORT_ST_DDPORT);
4119 }
4120
4121 bfa_status_t
bfa_fcport_set_qos_bw(struct bfa_s * bfa,struct bfa_qos_bw_s * qos_bw)4122 bfa_fcport_set_qos_bw(struct bfa_s *bfa, struct bfa_qos_bw_s *qos_bw)
4123 {
4124 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4125 enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa);
4126
4127 bfa_trc(bfa, ioc_type);
4128
4129 if ((qos_bw->high == 0) || (qos_bw->med == 0) || (qos_bw->low == 0))
4130 return BFA_STATUS_QOS_BW_INVALID;
4131
4132 if ((qos_bw->high + qos_bw->med + qos_bw->low) != 100)
4133 return BFA_STATUS_QOS_BW_INVALID;
4134
4135 if ((qos_bw->med > qos_bw->high) || (qos_bw->low > qos_bw->med) ||
4136 (qos_bw->low > qos_bw->high))
4137 return BFA_STATUS_QOS_BW_INVALID;
4138
4139 if ((ioc_type == BFA_IOC_TYPE_FC) &&
4140 (fcport->cfg.topology != BFA_PORT_TOPOLOGY_LOOP))
4141 fcport->cfg.qos_bw = *qos_bw;
4142
4143 return BFA_STATUS_OK;
4144 }
4145
4146 bfa_boolean_t
bfa_fcport_is_ratelim(struct bfa_s * bfa)4147 bfa_fcport_is_ratelim(struct bfa_s *bfa)
4148 {
4149 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4150
4151 return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
4152
4153 }
4154
4155 /*
4156 * Enable/Disable FAA feature in port config
4157 */
4158 void
bfa_fcport_cfg_faa(struct bfa_s * bfa,u8 state)4159 bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state)
4160 {
4161 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4162
4163 bfa_trc(bfa, state);
4164 fcport->cfg.faa_state = state;
4165 }
4166
4167 /*
4168 * Get default minimum ratelim speed
4169 */
4170 enum bfa_port_speed
bfa_fcport_get_ratelim_speed(struct bfa_s * bfa)4171 bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
4172 {
4173 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4174
4175 bfa_trc(bfa, fcport->cfg.trl_def_speed);
4176 return fcport->cfg.trl_def_speed;
4177
4178 }
4179
4180 void
bfa_fcport_beacon(void * dev,bfa_boolean_t beacon,bfa_boolean_t link_e2e_beacon)4181 bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
4182 bfa_boolean_t link_e2e_beacon)
4183 {
4184 struct bfa_s *bfa = dev;
4185 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4186
4187 bfa_trc(bfa, beacon);
4188 bfa_trc(bfa, link_e2e_beacon);
4189 bfa_trc(bfa, fcport->beacon);
4190 bfa_trc(bfa, fcport->link_e2e_beacon);
4191
4192 fcport->beacon = beacon;
4193 fcport->link_e2e_beacon = link_e2e_beacon;
4194 }
4195
4196 bfa_boolean_t
bfa_fcport_is_linkup(struct bfa_s * bfa)4197 bfa_fcport_is_linkup(struct bfa_s *bfa)
4198 {
4199 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4200
4201 return (!fcport->cfg.trunked &&
4202 bfa_sm_cmp_state(fcport, bfa_fcport_sm_linkup)) ||
4203 (fcport->cfg.trunked &&
4204 fcport->trunk.attr.state == BFA_TRUNK_ONLINE);
4205 }
4206
4207 bfa_boolean_t
bfa_fcport_is_qos_enabled(struct bfa_s * bfa)4208 bfa_fcport_is_qos_enabled(struct bfa_s *bfa)
4209 {
4210 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4211
4212 return fcport->cfg.qos_enabled;
4213 }
4214
4215 bfa_boolean_t
bfa_fcport_is_trunk_enabled(struct bfa_s * bfa)4216 bfa_fcport_is_trunk_enabled(struct bfa_s *bfa)
4217 {
4218 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4219
4220 return fcport->cfg.trunked;
4221 }
4222
4223 bfa_status_t
bfa_fcport_cfg_bbcr(struct bfa_s * bfa,bfa_boolean_t on_off,u8 bb_scn)4224 bfa_fcport_cfg_bbcr(struct bfa_s *bfa, bfa_boolean_t on_off, u8 bb_scn)
4225 {
4226 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4227
4228 bfa_trc(bfa, on_off);
4229
4230 if (bfa_ioc_get_type(&fcport->bfa->ioc) != BFA_IOC_TYPE_FC)
4231 return BFA_STATUS_BBCR_FC_ONLY;
4232
4233 if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type) &&
4234 (bfa->ioc.attr->card_type != BFA_MFG_TYPE_CHINOOK))
4235 return BFA_STATUS_CMD_NOTSUPP_MEZZ;
4236
4237 if (on_off) {
4238 if (fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP)
4239 return BFA_STATUS_TOPOLOGY_LOOP;
4240
4241 if (fcport->cfg.qos_enabled)
4242 return BFA_STATUS_ERROR_QOS_ENABLED;
4243
4244 if (fcport->cfg.trunked)
4245 return BFA_STATUS_TRUNK_ENABLED;
4246
4247 if ((fcport->cfg.speed != BFA_PORT_SPEED_AUTO) &&
4248 (fcport->cfg.speed < bfa_ioc_speed_sup(&bfa->ioc)))
4249 return BFA_STATUS_ERR_BBCR_SPEED_UNSUPPORT;
4250
4251 if (bfa_ioc_speed_sup(&bfa->ioc) < BFA_PORT_SPEED_8GBPS)
4252 return BFA_STATUS_FEATURE_NOT_SUPPORTED;
4253
4254 if (fcport->cfg.bb_cr_enabled) {
4255 if (bb_scn != fcport->cfg.bb_scn)
4256 return BFA_STATUS_BBCR_CFG_NO_CHANGE;
4257 else
4258 return BFA_STATUS_NO_CHANGE;
4259 }
4260
4261 if ((bb_scn == 0) || (bb_scn > BFA_BB_SCN_MAX))
4262 bb_scn = BFA_BB_SCN_DEF;
4263
4264 fcport->cfg.bb_cr_enabled = on_off;
4265 fcport->cfg.bb_scn = bb_scn;
4266 } else {
4267 if (!fcport->cfg.bb_cr_enabled)
4268 return BFA_STATUS_NO_CHANGE;
4269
4270 fcport->cfg.bb_cr_enabled = on_off;
4271 fcport->cfg.bb_scn = 0;
4272 }
4273
4274 return BFA_STATUS_OK;
4275 }
4276
4277 bfa_status_t
bfa_fcport_get_bbcr_attr(struct bfa_s * bfa,struct bfa_bbcr_attr_s * bbcr_attr)4278 bfa_fcport_get_bbcr_attr(struct bfa_s *bfa,
4279 struct bfa_bbcr_attr_s *bbcr_attr)
4280 {
4281 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4282
4283 if (bfa_ioc_get_type(&fcport->bfa->ioc) != BFA_IOC_TYPE_FC)
4284 return BFA_STATUS_BBCR_FC_ONLY;
4285
4286 if (fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP)
4287 return BFA_STATUS_TOPOLOGY_LOOP;
4288
4289 *bbcr_attr = fcport->bbcr_attr;
4290
4291 return BFA_STATUS_OK;
4292 }
4293
4294 void
bfa_fcport_dportenable(struct bfa_s * bfa)4295 bfa_fcport_dportenable(struct bfa_s *bfa)
4296 {
4297 /*
4298 * Assume caller check for port is in disable state
4299 */
4300 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DPORTENABLE);
4301 bfa_port_set_dportenabled(&bfa->modules.port, BFA_TRUE);
4302 }
4303
4304 void
bfa_fcport_dportdisable(struct bfa_s * bfa)4305 bfa_fcport_dportdisable(struct bfa_s *bfa)
4306 {
4307 /*
4308 * Assume caller check for port is in disable state
4309 */
4310 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DPORTDISABLE);
4311 bfa_port_set_dportenabled(&bfa->modules.port, BFA_FALSE);
4312 }
4313
4314 void
bfa_fcport_ddportenable(struct bfa_s * bfa)4315 bfa_fcport_ddportenable(struct bfa_s *bfa)
4316 {
4317 /*
4318 * Assume caller check for port is in disable state
4319 */
4320 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DDPORTENABLE);
4321 }
4322
4323 void
bfa_fcport_ddportdisable(struct bfa_s * bfa)4324 bfa_fcport_ddportdisable(struct bfa_s *bfa)
4325 {
4326 /*
4327 * Assume caller check for port is in disable state
4328 */
4329 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DDPORTDISABLE);
4330 }
4331
4332 /*
4333 * Rport State machine functions
4334 */
4335 /*
4336 * Beginning state, only online event expected.
4337 */
4338 static void
bfa_rport_sm_uninit(struct bfa_rport_s * rp,enum bfa_rport_event event)4339 bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event)
4340 {
4341 bfa_trc(rp->bfa, rp->rport_tag);
4342 bfa_trc(rp->bfa, event);
4343
4344 switch (event) {
4345 case BFA_RPORT_SM_CREATE:
4346 bfa_stats(rp, sm_un_cr);
4347 bfa_sm_set_state(rp, bfa_rport_sm_created);
4348 break;
4349
4350 default:
4351 bfa_stats(rp, sm_un_unexp);
4352 bfa_sm_fault(rp->bfa, event);
4353 }
4354 }
4355
4356 static void
bfa_rport_sm_created(struct bfa_rport_s * rp,enum bfa_rport_event event)4357 bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
4358 {
4359 bfa_trc(rp->bfa, rp->rport_tag);
4360 bfa_trc(rp->bfa, event);
4361
4362 switch (event) {
4363 case BFA_RPORT_SM_ONLINE:
4364 bfa_stats(rp, sm_cr_on);
4365 if (bfa_rport_send_fwcreate(rp))
4366 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4367 else
4368 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4369 break;
4370
4371 case BFA_RPORT_SM_DELETE:
4372 bfa_stats(rp, sm_cr_del);
4373 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4374 bfa_rport_free(rp);
4375 break;
4376
4377 case BFA_RPORT_SM_HWFAIL:
4378 bfa_stats(rp, sm_cr_hwf);
4379 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4380 break;
4381
4382 default:
4383 bfa_stats(rp, sm_cr_unexp);
4384 bfa_sm_fault(rp->bfa, event);
4385 }
4386 }
4387
4388 /*
4389 * Waiting for rport create response from firmware.
4390 */
4391 static void
bfa_rport_sm_fwcreate(struct bfa_rport_s * rp,enum bfa_rport_event event)4392 bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
4393 {
4394 bfa_trc(rp->bfa, rp->rport_tag);
4395 bfa_trc(rp->bfa, event);
4396
4397 switch (event) {
4398 case BFA_RPORT_SM_FWRSP:
4399 bfa_stats(rp, sm_fwc_rsp);
4400 bfa_sm_set_state(rp, bfa_rport_sm_online);
4401 bfa_rport_online_cb(rp);
4402 break;
4403
4404 case BFA_RPORT_SM_DELETE:
4405 bfa_stats(rp, sm_fwc_del);
4406 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4407 break;
4408
4409 case BFA_RPORT_SM_OFFLINE:
4410 bfa_stats(rp, sm_fwc_off);
4411 bfa_sm_set_state(rp, bfa_rport_sm_offline_pending);
4412 break;
4413
4414 case BFA_RPORT_SM_HWFAIL:
4415 bfa_stats(rp, sm_fwc_hwf);
4416 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4417 break;
4418
4419 default:
4420 bfa_stats(rp, sm_fwc_unexp);
4421 bfa_sm_fault(rp->bfa, event);
4422 }
4423 }
4424
4425 /*
4426 * Request queue is full, awaiting queue resume to send create request.
4427 */
4428 static void
bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s * rp,enum bfa_rport_event event)4429 bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4430 {
4431 bfa_trc(rp->bfa, rp->rport_tag);
4432 bfa_trc(rp->bfa, event);
4433
4434 switch (event) {
4435 case BFA_RPORT_SM_QRESUME:
4436 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4437 bfa_rport_send_fwcreate(rp);
4438 break;
4439
4440 case BFA_RPORT_SM_DELETE:
4441 bfa_stats(rp, sm_fwc_del);
4442 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4443 bfa_reqq_wcancel(&rp->reqq_wait);
4444 bfa_rport_free(rp);
4445 break;
4446
4447 case BFA_RPORT_SM_OFFLINE:
4448 bfa_stats(rp, sm_fwc_off);
4449 bfa_sm_set_state(rp, bfa_rport_sm_offline);
4450 bfa_reqq_wcancel(&rp->reqq_wait);
4451 bfa_rport_offline_cb(rp);
4452 break;
4453
4454 case BFA_RPORT_SM_HWFAIL:
4455 bfa_stats(rp, sm_fwc_hwf);
4456 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4457 bfa_reqq_wcancel(&rp->reqq_wait);
4458 break;
4459
4460 default:
4461 bfa_stats(rp, sm_fwc_unexp);
4462 bfa_sm_fault(rp->bfa, event);
4463 }
4464 }
4465
4466 /*
4467 * Online state - normal parking state.
4468 */
4469 static void
bfa_rport_sm_online(struct bfa_rport_s * rp,enum bfa_rport_event event)4470 bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
4471 {
4472 struct bfi_rport_qos_scn_s *qos_scn;
4473
4474 bfa_trc(rp->bfa, rp->rport_tag);
4475 bfa_trc(rp->bfa, event);
4476
4477 switch (event) {
4478 case BFA_RPORT_SM_OFFLINE:
4479 bfa_stats(rp, sm_on_off);
4480 if (bfa_rport_send_fwdelete(rp))
4481 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4482 else
4483 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4484 break;
4485
4486 case BFA_RPORT_SM_DELETE:
4487 bfa_stats(rp, sm_on_del);
4488 if (bfa_rport_send_fwdelete(rp))
4489 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4490 else
4491 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4492 break;
4493
4494 case BFA_RPORT_SM_HWFAIL:
4495 bfa_stats(rp, sm_on_hwf);
4496 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4497 break;
4498
4499 case BFA_RPORT_SM_SET_SPEED:
4500 bfa_rport_send_fwspeed(rp);
4501 break;
4502
4503 case BFA_RPORT_SM_QOS_SCN:
4504 qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg;
4505 rp->qos_attr = qos_scn->new_qos_attr;
4506 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id);
4507 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id);
4508 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority);
4509 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority);
4510
4511 qos_scn->old_qos_attr.qos_flow_id =
4512 be32_to_cpu(qos_scn->old_qos_attr.qos_flow_id);
4513 qos_scn->new_qos_attr.qos_flow_id =
4514 be32_to_cpu(qos_scn->new_qos_attr.qos_flow_id);
4515
4516 if (qos_scn->old_qos_attr.qos_flow_id !=
4517 qos_scn->new_qos_attr.qos_flow_id)
4518 bfa_cb_rport_qos_scn_flowid(rp->rport_drv,
4519 qos_scn->old_qos_attr,
4520 qos_scn->new_qos_attr);
4521 if (qos_scn->old_qos_attr.qos_priority !=
4522 qos_scn->new_qos_attr.qos_priority)
4523 bfa_cb_rport_qos_scn_prio(rp->rport_drv,
4524 qos_scn->old_qos_attr,
4525 qos_scn->new_qos_attr);
4526 break;
4527
4528 default:
4529 bfa_stats(rp, sm_on_unexp);
4530 bfa_sm_fault(rp->bfa, event);
4531 }
4532 }
4533
4534 /*
4535 * Firmware rport is being deleted - awaiting f/w response.
4536 */
4537 static void
bfa_rport_sm_fwdelete(struct bfa_rport_s * rp,enum bfa_rport_event event)4538 bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event)
4539 {
4540 bfa_trc(rp->bfa, rp->rport_tag);
4541 bfa_trc(rp->bfa, event);
4542
4543 switch (event) {
4544 case BFA_RPORT_SM_FWRSP:
4545 bfa_stats(rp, sm_fwd_rsp);
4546 bfa_sm_set_state(rp, bfa_rport_sm_offline);
4547 bfa_rport_offline_cb(rp);
4548 break;
4549
4550 case BFA_RPORT_SM_DELETE:
4551 bfa_stats(rp, sm_fwd_del);
4552 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4553 break;
4554
4555 case BFA_RPORT_SM_HWFAIL:
4556 bfa_stats(rp, sm_fwd_hwf);
4557 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4558 bfa_rport_offline_cb(rp);
4559 break;
4560
4561 default:
4562 bfa_stats(rp, sm_fwd_unexp);
4563 bfa_sm_fault(rp->bfa, event);
4564 }
4565 }
4566
4567 static void
bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s * rp,enum bfa_rport_event event)4568 bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4569 {
4570 bfa_trc(rp->bfa, rp->rport_tag);
4571 bfa_trc(rp->bfa, event);
4572
4573 switch (event) {
4574 case BFA_RPORT_SM_QRESUME:
4575 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4576 bfa_rport_send_fwdelete(rp);
4577 break;
4578
4579 case BFA_RPORT_SM_DELETE:
4580 bfa_stats(rp, sm_fwd_del);
4581 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4582 break;
4583
4584 case BFA_RPORT_SM_HWFAIL:
4585 bfa_stats(rp, sm_fwd_hwf);
4586 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4587 bfa_reqq_wcancel(&rp->reqq_wait);
4588 bfa_rport_offline_cb(rp);
4589 break;
4590
4591 default:
4592 bfa_stats(rp, sm_fwd_unexp);
4593 bfa_sm_fault(rp->bfa, event);
4594 }
4595 }
4596
4597 /*
4598 * Offline state.
4599 */
4600 static void
bfa_rport_sm_offline(struct bfa_rport_s * rp,enum bfa_rport_event event)4601 bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
4602 {
4603 bfa_trc(rp->bfa, rp->rport_tag);
4604 bfa_trc(rp->bfa, event);
4605
4606 switch (event) {
4607 case BFA_RPORT_SM_DELETE:
4608 bfa_stats(rp, sm_off_del);
4609 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4610 bfa_rport_free(rp);
4611 break;
4612
4613 case BFA_RPORT_SM_ONLINE:
4614 bfa_stats(rp, sm_off_on);
4615 if (bfa_rport_send_fwcreate(rp))
4616 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4617 else
4618 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4619 break;
4620
4621 case BFA_RPORT_SM_HWFAIL:
4622 bfa_stats(rp, sm_off_hwf);
4623 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4624 break;
4625
4626 case BFA_RPORT_SM_OFFLINE:
4627 bfa_rport_offline_cb(rp);
4628 break;
4629
4630 default:
4631 bfa_stats(rp, sm_off_unexp);
4632 bfa_sm_fault(rp->bfa, event);
4633 }
4634 }
4635
4636 /*
4637 * Rport is deleted, waiting for firmware response to delete.
4638 */
4639 static void
bfa_rport_sm_deleting(struct bfa_rport_s * rp,enum bfa_rport_event event)4640 bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event)
4641 {
4642 bfa_trc(rp->bfa, rp->rport_tag);
4643 bfa_trc(rp->bfa, event);
4644
4645 switch (event) {
4646 case BFA_RPORT_SM_FWRSP:
4647 bfa_stats(rp, sm_del_fwrsp);
4648 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4649 bfa_rport_free(rp);
4650 break;
4651
4652 case BFA_RPORT_SM_HWFAIL:
4653 bfa_stats(rp, sm_del_hwf);
4654 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4655 bfa_rport_free(rp);
4656 break;
4657
4658 default:
4659 bfa_sm_fault(rp->bfa, event);
4660 }
4661 }
4662
4663 static void
bfa_rport_sm_deleting_qfull(struct bfa_rport_s * rp,enum bfa_rport_event event)4664 bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4665 {
4666 bfa_trc(rp->bfa, rp->rport_tag);
4667 bfa_trc(rp->bfa, event);
4668
4669 switch (event) {
4670 case BFA_RPORT_SM_QRESUME:
4671 bfa_stats(rp, sm_del_fwrsp);
4672 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4673 bfa_rport_send_fwdelete(rp);
4674 break;
4675
4676 case BFA_RPORT_SM_HWFAIL:
4677 bfa_stats(rp, sm_del_hwf);
4678 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4679 bfa_reqq_wcancel(&rp->reqq_wait);
4680 bfa_rport_free(rp);
4681 break;
4682
4683 default:
4684 bfa_sm_fault(rp->bfa, event);
4685 }
4686 }
4687
4688 /*
4689 * Waiting for rport create response from firmware. A delete is pending.
4690 */
4691 static void
bfa_rport_sm_delete_pending(struct bfa_rport_s * rp,enum bfa_rport_event event)4692 bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
4693 enum bfa_rport_event event)
4694 {
4695 bfa_trc(rp->bfa, rp->rport_tag);
4696 bfa_trc(rp->bfa, event);
4697
4698 switch (event) {
4699 case BFA_RPORT_SM_FWRSP:
4700 bfa_stats(rp, sm_delp_fwrsp);
4701 if (bfa_rport_send_fwdelete(rp))
4702 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4703 else
4704 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4705 break;
4706
4707 case BFA_RPORT_SM_HWFAIL:
4708 bfa_stats(rp, sm_delp_hwf);
4709 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4710 bfa_rport_free(rp);
4711 break;
4712
4713 default:
4714 bfa_stats(rp, sm_delp_unexp);
4715 bfa_sm_fault(rp->bfa, event);
4716 }
4717 }
4718
4719 /*
4720 * Waiting for rport create response from firmware. Rport offline is pending.
4721 */
4722 static void
bfa_rport_sm_offline_pending(struct bfa_rport_s * rp,enum bfa_rport_event event)4723 bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
4724 enum bfa_rport_event event)
4725 {
4726 bfa_trc(rp->bfa, rp->rport_tag);
4727 bfa_trc(rp->bfa, event);
4728
4729 switch (event) {
4730 case BFA_RPORT_SM_FWRSP:
4731 bfa_stats(rp, sm_offp_fwrsp);
4732 if (bfa_rport_send_fwdelete(rp))
4733 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4734 else
4735 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4736 break;
4737
4738 case BFA_RPORT_SM_DELETE:
4739 bfa_stats(rp, sm_offp_del);
4740 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4741 break;
4742
4743 case BFA_RPORT_SM_HWFAIL:
4744 bfa_stats(rp, sm_offp_hwf);
4745 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4746 bfa_rport_offline_cb(rp);
4747 break;
4748
4749 default:
4750 bfa_stats(rp, sm_offp_unexp);
4751 bfa_sm_fault(rp->bfa, event);
4752 }
4753 }
4754
4755 /*
4756 * IOC h/w failed.
4757 */
4758 static void
bfa_rport_sm_iocdisable(struct bfa_rport_s * rp,enum bfa_rport_event event)4759 bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
4760 {
4761 bfa_trc(rp->bfa, rp->rport_tag);
4762 bfa_trc(rp->bfa, event);
4763
4764 switch (event) {
4765 case BFA_RPORT_SM_OFFLINE:
4766 bfa_stats(rp, sm_iocd_off);
4767 bfa_rport_offline_cb(rp);
4768 break;
4769
4770 case BFA_RPORT_SM_DELETE:
4771 bfa_stats(rp, sm_iocd_del);
4772 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4773 bfa_rport_free(rp);
4774 break;
4775
4776 case BFA_RPORT_SM_ONLINE:
4777 bfa_stats(rp, sm_iocd_on);
4778 if (bfa_rport_send_fwcreate(rp))
4779 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4780 else
4781 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4782 break;
4783
4784 case BFA_RPORT_SM_HWFAIL:
4785 break;
4786
4787 default:
4788 bfa_stats(rp, sm_iocd_unexp);
4789 bfa_sm_fault(rp->bfa, event);
4790 }
4791 }
4792
4793
4794
4795 /*
4796 * bfa_rport_private BFA rport private functions
4797 */
4798
4799 static void
__bfa_cb_rport_online(void * cbarg,bfa_boolean_t complete)4800 __bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete)
4801 {
4802 struct bfa_rport_s *rp = cbarg;
4803
4804 if (complete)
4805 bfa_cb_rport_online(rp->rport_drv);
4806 }
4807
4808 static void
__bfa_cb_rport_offline(void * cbarg,bfa_boolean_t complete)4809 __bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete)
4810 {
4811 struct bfa_rport_s *rp = cbarg;
4812
4813 if (complete)
4814 bfa_cb_rport_offline(rp->rport_drv);
4815 }
4816
4817 static void
bfa_rport_qresume(void * cbarg)4818 bfa_rport_qresume(void *cbarg)
4819 {
4820 struct bfa_rport_s *rp = cbarg;
4821
4822 bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME);
4823 }
4824
4825 void
bfa_rport_meminfo(struct bfa_iocfc_cfg_s * cfg,struct bfa_meminfo_s * minfo,struct bfa_s * bfa)4826 bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
4827 struct bfa_s *bfa)
4828 {
4829 struct bfa_mem_kva_s *rport_kva = BFA_MEM_RPORT_KVA(bfa);
4830
4831 if (cfg->fwcfg.num_rports < BFA_RPORT_MIN)
4832 cfg->fwcfg.num_rports = BFA_RPORT_MIN;
4833
4834 /* kva memory */
4835 bfa_mem_kva_setup(minfo, rport_kva,
4836 cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s));
4837 }
4838
4839 void
bfa_rport_attach(struct bfa_s * bfa,void * bfad,struct bfa_iocfc_cfg_s * cfg,struct bfa_pcidev_s * pcidev)4840 bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4841 struct bfa_pcidev_s *pcidev)
4842 {
4843 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4844 struct bfa_rport_s *rp;
4845 u16 i;
4846
4847 INIT_LIST_HEAD(&mod->rp_free_q);
4848 INIT_LIST_HEAD(&mod->rp_active_q);
4849 INIT_LIST_HEAD(&mod->rp_unused_q);
4850
4851 rp = (struct bfa_rport_s *) bfa_mem_kva_curp(mod);
4852 mod->rps_list = rp;
4853 mod->num_rports = cfg->fwcfg.num_rports;
4854
4855 WARN_ON(!mod->num_rports ||
4856 (mod->num_rports & (mod->num_rports - 1)));
4857
4858 for (i = 0; i < mod->num_rports; i++, rp++) {
4859 memset(rp, 0, sizeof(struct bfa_rport_s));
4860 rp->bfa = bfa;
4861 rp->rport_tag = i;
4862 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4863
4864 /*
4865 * - is unused
4866 */
4867 if (i)
4868 list_add_tail(&rp->qe, &mod->rp_free_q);
4869
4870 bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
4871 }
4872
4873 /*
4874 * consume memory
4875 */
4876 bfa_mem_kva_curp(mod) = (u8 *) rp;
4877 }
4878
4879 void
bfa_rport_iocdisable(struct bfa_s * bfa)4880 bfa_rport_iocdisable(struct bfa_s *bfa)
4881 {
4882 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4883 struct bfa_rport_s *rport;
4884 struct list_head *qe, *qen;
4885
4886 /* Enqueue unused rport resources to free_q */
4887 list_splice_tail_init(&mod->rp_unused_q, &mod->rp_free_q);
4888
4889 list_for_each_safe(qe, qen, &mod->rp_active_q) {
4890 rport = (struct bfa_rport_s *) qe;
4891 bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL);
4892 }
4893 }
4894
4895 static struct bfa_rport_s *
bfa_rport_alloc(struct bfa_rport_mod_s * mod)4896 bfa_rport_alloc(struct bfa_rport_mod_s *mod)
4897 {
4898 struct bfa_rport_s *rport;
4899
4900 bfa_q_deq(&mod->rp_free_q, &rport);
4901 if (rport)
4902 list_add_tail(&rport->qe, &mod->rp_active_q);
4903
4904 return rport;
4905 }
4906
4907 static void
bfa_rport_free(struct bfa_rport_s * rport)4908 bfa_rport_free(struct bfa_rport_s *rport)
4909 {
4910 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa);
4911
4912 WARN_ON(!bfa_q_is_on_q(&mod->rp_active_q, rport));
4913 list_del(&rport->qe);
4914 list_add_tail(&rport->qe, &mod->rp_free_q);
4915 }
4916
4917 static bfa_boolean_t
bfa_rport_send_fwcreate(struct bfa_rport_s * rp)4918 bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
4919 {
4920 struct bfi_rport_create_req_s *m;
4921
4922 /*
4923 * check for room in queue to send request now
4924 */
4925 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4926 if (!m) {
4927 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4928 return BFA_FALSE;
4929 }
4930
4931 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
4932 bfa_fn_lpu(rp->bfa));
4933 m->bfa_handle = rp->rport_tag;
4934 m->max_frmsz = cpu_to_be16(rp->rport_info.max_frmsz);
4935 m->pid = rp->rport_info.pid;
4936 m->lp_fwtag = bfa_lps_get_fwtag(rp->bfa, (u8)rp->rport_info.lp_tag);
4937 m->local_pid = rp->rport_info.local_pid;
4938 m->fc_class = rp->rport_info.fc_class;
4939 m->vf_en = rp->rport_info.vf_en;
4940 m->vf_id = rp->rport_info.vf_id;
4941 m->cisc = rp->rport_info.cisc;
4942
4943 /*
4944 * queue I/O message to firmware
4945 */
4946 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
4947 return BFA_TRUE;
4948 }
4949
4950 static bfa_boolean_t
bfa_rport_send_fwdelete(struct bfa_rport_s * rp)4951 bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
4952 {
4953 struct bfi_rport_delete_req_s *m;
4954
4955 /*
4956 * check for room in queue to send request now
4957 */
4958 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4959 if (!m) {
4960 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4961 return BFA_FALSE;
4962 }
4963
4964 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ,
4965 bfa_fn_lpu(rp->bfa));
4966 m->fw_handle = rp->fw_handle;
4967
4968 /*
4969 * queue I/O message to firmware
4970 */
4971 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
4972 return BFA_TRUE;
4973 }
4974
4975 static bfa_boolean_t
bfa_rport_send_fwspeed(struct bfa_rport_s * rp)4976 bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
4977 {
4978 struct bfa_rport_speed_req_s *m;
4979
4980 /*
4981 * check for room in queue to send request now
4982 */
4983 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4984 if (!m) {
4985 bfa_trc(rp->bfa, rp->rport_info.speed);
4986 return BFA_FALSE;
4987 }
4988
4989 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ,
4990 bfa_fn_lpu(rp->bfa));
4991 m->fw_handle = rp->fw_handle;
4992 m->speed = (u8)rp->rport_info.speed;
4993
4994 /*
4995 * queue I/O message to firmware
4996 */
4997 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
4998 return BFA_TRUE;
4999 }
5000
5001
5002
5003 /*
5004 * bfa_rport_public
5005 */
5006
5007 /*
5008 * Rport interrupt processing.
5009 */
5010 void
bfa_rport_isr(struct bfa_s * bfa,struct bfi_msg_s * m)5011 bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
5012 {
5013 union bfi_rport_i2h_msg_u msg;
5014 struct bfa_rport_s *rp;
5015
5016 bfa_trc(bfa, m->mhdr.msg_id);
5017
5018 msg.msg = m;
5019
5020 switch (m->mhdr.msg_id) {
5021 case BFI_RPORT_I2H_CREATE_RSP:
5022 rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
5023 rp->fw_handle = msg.create_rsp->fw_handle;
5024 rp->qos_attr = msg.create_rsp->qos_attr;
5025 bfa_rport_set_lunmask(bfa, rp);
5026 WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
5027 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
5028 break;
5029
5030 case BFI_RPORT_I2H_DELETE_RSP:
5031 rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
5032 WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
5033 bfa_rport_unset_lunmask(bfa, rp);
5034 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
5035 break;
5036
5037 case BFI_RPORT_I2H_QOS_SCN:
5038 rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle);
5039 rp->event_arg.fw_msg = msg.qos_scn_evt;
5040 bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN);
5041 break;
5042
5043 case BFI_RPORT_I2H_LIP_SCN_ONLINE:
5044 bfa_fcport_update_loop_info(BFA_FCPORT_MOD(bfa),
5045 &msg.lip_scn->loop_info);
5046 bfa_cb_rport_scn_online(bfa);
5047 break;
5048
5049 case BFI_RPORT_I2H_LIP_SCN_OFFLINE:
5050 bfa_cb_rport_scn_offline(bfa);
5051 break;
5052
5053 case BFI_RPORT_I2H_NO_DEV:
5054 rp = BFA_RPORT_FROM_TAG(bfa, msg.lip_scn->bfa_handle);
5055 bfa_cb_rport_scn_no_dev(rp->rport_drv);
5056 break;
5057
5058 default:
5059 bfa_trc(bfa, m->mhdr.msg_id);
5060 WARN_ON(1);
5061 }
5062 }
5063
5064 void
bfa_rport_res_recfg(struct bfa_s * bfa,u16 num_rport_fw)5065 bfa_rport_res_recfg(struct bfa_s *bfa, u16 num_rport_fw)
5066 {
5067 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
5068 struct list_head *qe;
5069 int i;
5070
5071 for (i = 0; i < (mod->num_rports - num_rport_fw); i++) {
5072 bfa_q_deq_tail(&mod->rp_free_q, &qe);
5073 list_add_tail(qe, &mod->rp_unused_q);
5074 }
5075 }
5076
5077 /*
5078 * bfa_rport_api
5079 */
5080
5081 struct bfa_rport_s *
bfa_rport_create(struct bfa_s * bfa,void * rport_drv)5082 bfa_rport_create(struct bfa_s *bfa, void *rport_drv)
5083 {
5084 struct bfa_rport_s *rp;
5085
5086 rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa));
5087
5088 if (rp == NULL)
5089 return NULL;
5090
5091 rp->bfa = bfa;
5092 rp->rport_drv = rport_drv;
5093 memset(&rp->stats, 0, sizeof(rp->stats));
5094
5095 WARN_ON(!bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
5096 bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE);
5097
5098 return rp;
5099 }
5100
5101 void
bfa_rport_online(struct bfa_rport_s * rport,struct bfa_rport_info_s * rport_info)5102 bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
5103 {
5104 WARN_ON(rport_info->max_frmsz == 0);
5105
5106 /*
5107 * Some JBODs are seen to be not setting PDU size correctly in PLOGI
5108 * responses. Default to minimum size.
5109 */
5110 if (rport_info->max_frmsz == 0) {
5111 bfa_trc(rport->bfa, rport->rport_tag);
5112 rport_info->max_frmsz = FC_MIN_PDUSZ;
5113 }
5114
5115 rport->rport_info = *rport_info;
5116 bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE);
5117 }
5118
5119 void
bfa_rport_speed(struct bfa_rport_s * rport,enum bfa_port_speed speed)5120 bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
5121 {
5122 WARN_ON(speed == 0);
5123 WARN_ON(speed == BFA_PORT_SPEED_AUTO);
5124
5125 if (rport) {
5126 rport->rport_info.speed = speed;
5127 bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
5128 }
5129 }
5130
5131 /* Set Rport LUN Mask */
5132 void
bfa_rport_set_lunmask(struct bfa_s * bfa,struct bfa_rport_s * rp)5133 bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
5134 {
5135 struct bfa_lps_mod_s *lps_mod = BFA_LPS_MOD(bfa);
5136 wwn_t lp_wwn, rp_wwn;
5137 u8 lp_tag = (u8)rp->rport_info.lp_tag;
5138
5139 rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
5140 lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
5141
5142 BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
5143 rp->lun_mask = BFA_TRUE;
5144 bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn, rp->rport_tag, lp_tag);
5145 }
5146
5147 /* Unset Rport LUN mask */
5148 void
bfa_rport_unset_lunmask(struct bfa_s * bfa,struct bfa_rport_s * rp)5149 bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
5150 {
5151 struct bfa_lps_mod_s *lps_mod = BFA_LPS_MOD(bfa);
5152 wwn_t lp_wwn, rp_wwn;
5153
5154 rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
5155 lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
5156
5157 BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
5158 rp->lun_mask = BFA_FALSE;
5159 bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn,
5160 BFA_RPORT_TAG_INVALID, BFA_LP_TAG_INVALID);
5161 }
5162
5163 /*
5164 * SGPG related functions
5165 */
5166
5167 /*
5168 * Compute and return memory needed by FCP(im) module.
5169 */
5170 void
bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s * cfg,struct bfa_meminfo_s * minfo,struct bfa_s * bfa)5171 bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
5172 struct bfa_s *bfa)
5173 {
5174 struct bfa_sgpg_mod_s *sgpg_mod = BFA_SGPG_MOD(bfa);
5175 struct bfa_mem_kva_s *sgpg_kva = BFA_MEM_SGPG_KVA(bfa);
5176 struct bfa_mem_dma_s *seg_ptr;
5177 u16 nsegs, idx, per_seg_sgpg, num_sgpg;
5178 u32 sgpg_sz = sizeof(struct bfi_sgpg_s);
5179
5180 if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN)
5181 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
5182 else if (cfg->drvcfg.num_sgpgs > BFA_SGPG_MAX)
5183 cfg->drvcfg.num_sgpgs = BFA_SGPG_MAX;
5184
5185 num_sgpg = cfg->drvcfg.num_sgpgs;
5186
5187 nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
5188 per_seg_sgpg = BFI_MEM_NREQS_SEG(sgpg_sz);
5189
5190 bfa_mem_dma_seg_iter(sgpg_mod, seg_ptr, nsegs, idx) {
5191 if (num_sgpg >= per_seg_sgpg) {
5192 num_sgpg -= per_seg_sgpg;
5193 bfa_mem_dma_setup(minfo, seg_ptr,
5194 per_seg_sgpg * sgpg_sz);
5195 } else
5196 bfa_mem_dma_setup(minfo, seg_ptr,
5197 num_sgpg * sgpg_sz);
5198 }
5199
5200 /* kva memory */
5201 bfa_mem_kva_setup(minfo, sgpg_kva,
5202 cfg->drvcfg.num_sgpgs * sizeof(struct bfa_sgpg_s));
5203 }
5204
5205 void
bfa_sgpg_attach(struct bfa_s * bfa,void * bfad,struct bfa_iocfc_cfg_s * cfg,struct bfa_pcidev_s * pcidev)5206 bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5207 struct bfa_pcidev_s *pcidev)
5208 {
5209 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5210 struct bfa_sgpg_s *hsgpg;
5211 struct bfi_sgpg_s *sgpg;
5212 u64 align_len;
5213 struct bfa_mem_dma_s *seg_ptr;
5214 u32 sgpg_sz = sizeof(struct bfi_sgpg_s);
5215 u16 i, idx, nsegs, per_seg_sgpg, num_sgpg;
5216
5217 union {
5218 u64 pa;
5219 union bfi_addr_u addr;
5220 } sgpg_pa, sgpg_pa_tmp;
5221
5222 INIT_LIST_HEAD(&mod->sgpg_q);
5223 INIT_LIST_HEAD(&mod->sgpg_wait_q);
5224
5225 bfa_trc(bfa, cfg->drvcfg.num_sgpgs);
5226
5227 mod->free_sgpgs = mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
5228
5229 num_sgpg = cfg->drvcfg.num_sgpgs;
5230 nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
5231
5232 /* dma/kva mem claim */
5233 hsgpg = (struct bfa_sgpg_s *) bfa_mem_kva_curp(mod);
5234
5235 bfa_mem_dma_seg_iter(mod, seg_ptr, nsegs, idx) {
5236
5237 if (!bfa_mem_dma_virt(seg_ptr))
5238 break;
5239
5240 align_len = BFA_SGPG_ROUNDUP(bfa_mem_dma_phys(seg_ptr)) -
5241 bfa_mem_dma_phys(seg_ptr);
5242
5243 sgpg = (struct bfi_sgpg_s *)
5244 (((u8 *) bfa_mem_dma_virt(seg_ptr)) + align_len);
5245 sgpg_pa.pa = bfa_mem_dma_phys(seg_ptr) + align_len;
5246 WARN_ON(sgpg_pa.pa & (sgpg_sz - 1));
5247
5248 per_seg_sgpg = (seg_ptr->mem_len - (u32)align_len) / sgpg_sz;
5249
5250 for (i = 0; num_sgpg > 0 && i < per_seg_sgpg; i++, num_sgpg--) {
5251 memset(hsgpg, 0, sizeof(*hsgpg));
5252 memset(sgpg, 0, sizeof(*sgpg));
5253
5254 hsgpg->sgpg = sgpg;
5255 sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa);
5256 hsgpg->sgpg_pa = sgpg_pa_tmp.addr;
5257 list_add_tail(&hsgpg->qe, &mod->sgpg_q);
5258
5259 sgpg++;
5260 hsgpg++;
5261 sgpg_pa.pa += sgpg_sz;
5262 }
5263 }
5264
5265 bfa_mem_kva_curp(mod) = (u8 *) hsgpg;
5266 }
5267
5268 bfa_status_t
bfa_sgpg_malloc(struct bfa_s * bfa,struct list_head * sgpg_q,int nsgpgs)5269 bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs)
5270 {
5271 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5272 struct bfa_sgpg_s *hsgpg;
5273 int i;
5274
5275 if (mod->free_sgpgs < nsgpgs)
5276 return BFA_STATUS_ENOMEM;
5277
5278 for (i = 0; i < nsgpgs; i++) {
5279 bfa_q_deq(&mod->sgpg_q, &hsgpg);
5280 WARN_ON(!hsgpg);
5281 list_add_tail(&hsgpg->qe, sgpg_q);
5282 }
5283
5284 mod->free_sgpgs -= nsgpgs;
5285 return BFA_STATUS_OK;
5286 }
5287
5288 void
bfa_sgpg_mfree(struct bfa_s * bfa,struct list_head * sgpg_q,int nsgpg)5289 bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg)
5290 {
5291 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5292 struct bfa_sgpg_wqe_s *wqe;
5293
5294 mod->free_sgpgs += nsgpg;
5295 WARN_ON(mod->free_sgpgs > mod->num_sgpgs);
5296
5297 list_splice_tail_init(sgpg_q, &mod->sgpg_q);
5298
5299 if (list_empty(&mod->sgpg_wait_q))
5300 return;
5301
5302 /*
5303 * satisfy as many waiting requests as possible
5304 */
5305 do {
5306 wqe = bfa_q_first(&mod->sgpg_wait_q);
5307 if (mod->free_sgpgs < wqe->nsgpg)
5308 nsgpg = mod->free_sgpgs;
5309 else
5310 nsgpg = wqe->nsgpg;
5311 bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg);
5312 wqe->nsgpg -= nsgpg;
5313 if (wqe->nsgpg == 0) {
5314 list_del(&wqe->qe);
5315 wqe->cbfn(wqe->cbarg);
5316 }
5317 } while (mod->free_sgpgs && !list_empty(&mod->sgpg_wait_q));
5318 }
5319
5320 void
bfa_sgpg_wait(struct bfa_s * bfa,struct bfa_sgpg_wqe_s * wqe,int nsgpg)5321 bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg)
5322 {
5323 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5324
5325 WARN_ON(nsgpg <= 0);
5326 WARN_ON(nsgpg <= mod->free_sgpgs);
5327
5328 wqe->nsgpg_total = wqe->nsgpg = nsgpg;
5329
5330 /*
5331 * allocate any left to this one first
5332 */
5333 if (mod->free_sgpgs) {
5334 /*
5335 * no one else is waiting for SGPG
5336 */
5337 WARN_ON(!list_empty(&mod->sgpg_wait_q));
5338 list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q);
5339 wqe->nsgpg -= mod->free_sgpgs;
5340 mod->free_sgpgs = 0;
5341 }
5342
5343 list_add_tail(&wqe->qe, &mod->sgpg_wait_q);
5344 }
5345
5346 void
bfa_sgpg_wcancel(struct bfa_s * bfa,struct bfa_sgpg_wqe_s * wqe)5347 bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe)
5348 {
5349 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5350
5351 WARN_ON(!bfa_q_is_on_q(&mod->sgpg_wait_q, wqe));
5352 list_del(&wqe->qe);
5353
5354 if (wqe->nsgpg_total != wqe->nsgpg)
5355 bfa_sgpg_mfree(bfa, &wqe->sgpg_q,
5356 wqe->nsgpg_total - wqe->nsgpg);
5357 }
5358
5359 void
bfa_sgpg_winit(struct bfa_sgpg_wqe_s * wqe,void (* cbfn)(void * cbarg),void * cbarg)5360 bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg),
5361 void *cbarg)
5362 {
5363 INIT_LIST_HEAD(&wqe->sgpg_q);
5364 wqe->cbfn = cbfn;
5365 wqe->cbarg = cbarg;
5366 }
5367
5368 /*
5369 * UF related functions
5370 */
5371 /*
5372 *****************************************************************************
5373 * Internal functions
5374 *****************************************************************************
5375 */
5376 static void
__bfa_cb_uf_recv(void * cbarg,bfa_boolean_t complete)5377 __bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete)
5378 {
5379 struct bfa_uf_s *uf = cbarg;
5380 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(uf->bfa);
5381
5382 if (complete)
5383 ufm->ufrecv(ufm->cbarg, uf);
5384 }
5385
5386 static void
claim_uf_post_msgs(struct bfa_uf_mod_s * ufm)5387 claim_uf_post_msgs(struct bfa_uf_mod_s *ufm)
5388 {
5389 struct bfi_uf_buf_post_s *uf_bp_msg;
5390 u16 i;
5391 u16 buf_len;
5392
5393 ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_mem_kva_curp(ufm);
5394 uf_bp_msg = ufm->uf_buf_posts;
5395
5396 for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
5397 i++, uf_bp_msg++) {
5398 memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s));
5399
5400 uf_bp_msg->buf_tag = i;
5401 buf_len = sizeof(struct bfa_uf_buf_s);
5402 uf_bp_msg->buf_len = cpu_to_be16(buf_len);
5403 bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
5404 bfa_fn_lpu(ufm->bfa));
5405 bfa_alen_set(&uf_bp_msg->alen, buf_len, ufm_pbs_pa(ufm, i));
5406 }
5407
5408 /*
5409 * advance pointer beyond consumed memory
5410 */
5411 bfa_mem_kva_curp(ufm) = (u8 *) uf_bp_msg;
5412 }
5413
5414 static void
claim_ufs(struct bfa_uf_mod_s * ufm)5415 claim_ufs(struct bfa_uf_mod_s *ufm)
5416 {
5417 u16 i;
5418 struct bfa_uf_s *uf;
5419
5420 /*
5421 * Claim block of memory for UF list
5422 */
5423 ufm->uf_list = (struct bfa_uf_s *) bfa_mem_kva_curp(ufm);
5424
5425 /*
5426 * Initialize UFs and queue it in UF free queue
5427 */
5428 for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) {
5429 memset(uf, 0, sizeof(struct bfa_uf_s));
5430 uf->bfa = ufm->bfa;
5431 uf->uf_tag = i;
5432 uf->pb_len = BFA_PER_UF_DMA_SZ;
5433 uf->buf_kva = bfa_mem_get_dmabuf_kva(ufm, i, BFA_PER_UF_DMA_SZ);
5434 uf->buf_pa = ufm_pbs_pa(ufm, i);
5435 list_add_tail(&uf->qe, &ufm->uf_free_q);
5436 }
5437
5438 /*
5439 * advance memory pointer
5440 */
5441 bfa_mem_kva_curp(ufm) = (u8 *) uf;
5442 }
5443
5444 static void
uf_mem_claim(struct bfa_uf_mod_s * ufm)5445 uf_mem_claim(struct bfa_uf_mod_s *ufm)
5446 {
5447 claim_ufs(ufm);
5448 claim_uf_post_msgs(ufm);
5449 }
5450
5451 void
bfa_uf_meminfo(struct bfa_iocfc_cfg_s * cfg,struct bfa_meminfo_s * minfo,struct bfa_s * bfa)5452 bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
5453 struct bfa_s *bfa)
5454 {
5455 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5456 struct bfa_mem_kva_s *uf_kva = BFA_MEM_UF_KVA(bfa);
5457 u32 num_ufs = cfg->fwcfg.num_uf_bufs;
5458 struct bfa_mem_dma_s *seg_ptr;
5459 u16 nsegs, idx, per_seg_uf = 0;
5460
5461 nsegs = BFI_MEM_DMA_NSEGS(num_ufs, BFA_PER_UF_DMA_SZ);
5462 per_seg_uf = BFI_MEM_NREQS_SEG(BFA_PER_UF_DMA_SZ);
5463
5464 bfa_mem_dma_seg_iter(ufm, seg_ptr, nsegs, idx) {
5465 if (num_ufs >= per_seg_uf) {
5466 num_ufs -= per_seg_uf;
5467 bfa_mem_dma_setup(minfo, seg_ptr,
5468 per_seg_uf * BFA_PER_UF_DMA_SZ);
5469 } else
5470 bfa_mem_dma_setup(minfo, seg_ptr,
5471 num_ufs * BFA_PER_UF_DMA_SZ);
5472 }
5473
5474 /* kva memory */
5475 bfa_mem_kva_setup(minfo, uf_kva, cfg->fwcfg.num_uf_bufs *
5476 (sizeof(struct bfa_uf_s) + sizeof(struct bfi_uf_buf_post_s)));
5477 }
5478
5479 void
bfa_uf_attach(struct bfa_s * bfa,void * bfad,struct bfa_iocfc_cfg_s * cfg,struct bfa_pcidev_s * pcidev)5480 bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5481 struct bfa_pcidev_s *pcidev)
5482 {
5483 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5484
5485 ufm->bfa = bfa;
5486 ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
5487 INIT_LIST_HEAD(&ufm->uf_free_q);
5488 INIT_LIST_HEAD(&ufm->uf_posted_q);
5489 INIT_LIST_HEAD(&ufm->uf_unused_q);
5490
5491 uf_mem_claim(ufm);
5492 }
5493
5494 static struct bfa_uf_s *
bfa_uf_get(struct bfa_uf_mod_s * uf_mod)5495 bfa_uf_get(struct bfa_uf_mod_s *uf_mod)
5496 {
5497 struct bfa_uf_s *uf;
5498
5499 bfa_q_deq(&uf_mod->uf_free_q, &uf);
5500 return uf;
5501 }
5502
5503 static void
bfa_uf_put(struct bfa_uf_mod_s * uf_mod,struct bfa_uf_s * uf)5504 bfa_uf_put(struct bfa_uf_mod_s *uf_mod, struct bfa_uf_s *uf)
5505 {
5506 list_add_tail(&uf->qe, &uf_mod->uf_free_q);
5507 }
5508
5509 static bfa_status_t
bfa_uf_post(struct bfa_uf_mod_s * ufm,struct bfa_uf_s * uf)5510 bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf)
5511 {
5512 struct bfi_uf_buf_post_s *uf_post_msg;
5513
5514 uf_post_msg = bfa_reqq_next(ufm->bfa, BFA_REQQ_FCXP);
5515 if (!uf_post_msg)
5516 return BFA_STATUS_FAILED;
5517
5518 memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
5519 sizeof(struct bfi_uf_buf_post_s));
5520 bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP, uf_post_msg->mh);
5521
5522 bfa_trc(ufm->bfa, uf->uf_tag);
5523
5524 list_add_tail(&uf->qe, &ufm->uf_posted_q);
5525 return BFA_STATUS_OK;
5526 }
5527
5528 static void
bfa_uf_post_all(struct bfa_uf_mod_s * uf_mod)5529 bfa_uf_post_all(struct bfa_uf_mod_s *uf_mod)
5530 {
5531 struct bfa_uf_s *uf;
5532
5533 while ((uf = bfa_uf_get(uf_mod)) != NULL) {
5534 if (bfa_uf_post(uf_mod, uf) != BFA_STATUS_OK)
5535 break;
5536 }
5537 }
5538
5539 static void
uf_recv(struct bfa_s * bfa,struct bfi_uf_frm_rcvd_s * m)5540 uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
5541 {
5542 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5543 u16 uf_tag = m->buf_tag;
5544 struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
5545 struct bfa_uf_buf_s *uf_buf;
5546 uint8_t *buf;
5547 struct fchs_s *fchs;
5548
5549 uf_buf = (struct bfa_uf_buf_s *)
5550 bfa_mem_get_dmabuf_kva(ufm, uf_tag, uf->pb_len);
5551 buf = &uf_buf->d[0];
5552
5553 m->frm_len = be16_to_cpu(m->frm_len);
5554 m->xfr_len = be16_to_cpu(m->xfr_len);
5555
5556 fchs = (struct fchs_s *)uf_buf;
5557
5558 list_del(&uf->qe); /* dequeue from posted queue */
5559
5560 uf->data_ptr = buf;
5561 uf->data_len = m->xfr_len;
5562
5563 WARN_ON(uf->data_len < sizeof(struct fchs_s));
5564
5565 if (uf->data_len == sizeof(struct fchs_s)) {
5566 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX,
5567 uf->data_len, (struct fchs_s *)buf);
5568 } else {
5569 u32 pld_w0 = *((u32 *) (buf + sizeof(struct fchs_s)));
5570 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_UF,
5571 BFA_PL_EID_RX, uf->data_len,
5572 (struct fchs_s *)buf, pld_w0);
5573 }
5574
5575 if (bfa->fcs)
5576 __bfa_cb_uf_recv(uf, BFA_TRUE);
5577 else
5578 bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf);
5579 }
5580
5581 void
bfa_uf_iocdisable(struct bfa_s * bfa)5582 bfa_uf_iocdisable(struct bfa_s *bfa)
5583 {
5584 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5585 struct bfa_uf_s *uf;
5586 struct list_head *qe, *qen;
5587
5588 /* Enqueue unused uf resources to free_q */
5589 list_splice_tail_init(&ufm->uf_unused_q, &ufm->uf_free_q);
5590
5591 list_for_each_safe(qe, qen, &ufm->uf_posted_q) {
5592 uf = (struct bfa_uf_s *) qe;
5593 list_del(&uf->qe);
5594 bfa_uf_put(ufm, uf);
5595 }
5596 }
5597
5598 void
bfa_uf_start(struct bfa_s * bfa)5599 bfa_uf_start(struct bfa_s *bfa)
5600 {
5601 bfa_uf_post_all(BFA_UF_MOD(bfa));
5602 }
5603
5604 /*
5605 * Register handler for all unsolicted receive frames.
5606 *
5607 * @param[in] bfa BFA instance
5608 * @param[in] ufrecv receive handler function
5609 * @param[in] cbarg receive handler arg
5610 */
5611 void
bfa_uf_recv_register(struct bfa_s * bfa,bfa_cb_uf_recv_t ufrecv,void * cbarg)5612 bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg)
5613 {
5614 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5615
5616 ufm->ufrecv = ufrecv;
5617 ufm->cbarg = cbarg;
5618 }
5619
5620 /*
5621 * Free an unsolicited frame back to BFA.
5622 *
5623 * @param[in] uf unsolicited frame to be freed
5624 *
5625 * @return None
5626 */
5627 void
bfa_uf_free(struct bfa_uf_s * uf)5628 bfa_uf_free(struct bfa_uf_s *uf)
5629 {
5630 bfa_uf_put(BFA_UF_MOD(uf->bfa), uf);
5631 bfa_uf_post_all(BFA_UF_MOD(uf->bfa));
5632 }
5633
5634
5635
5636 /*
5637 * uf_pub BFA uf module public functions
5638 */
5639 void
bfa_uf_isr(struct bfa_s * bfa,struct bfi_msg_s * msg)5640 bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
5641 {
5642 bfa_trc(bfa, msg->mhdr.msg_id);
5643
5644 switch (msg->mhdr.msg_id) {
5645 case BFI_UF_I2H_FRM_RCVD:
5646 uf_recv(bfa, (struct bfi_uf_frm_rcvd_s *) msg);
5647 break;
5648
5649 default:
5650 bfa_trc(bfa, msg->mhdr.msg_id);
5651 WARN_ON(1);
5652 }
5653 }
5654
5655 void
bfa_uf_res_recfg(struct bfa_s * bfa,u16 num_uf_fw)5656 bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw)
5657 {
5658 struct bfa_uf_mod_s *mod = BFA_UF_MOD(bfa);
5659 struct list_head *qe;
5660 int i;
5661
5662 for (i = 0; i < (mod->num_ufs - num_uf_fw); i++) {
5663 bfa_q_deq_tail(&mod->uf_free_q, &qe);
5664 list_add_tail(qe, &mod->uf_unused_q);
5665 }
5666 }
5667
5668 /*
5669 * Dport forward declaration
5670 */
5671
5672 enum bfa_dport_test_state_e {
5673 BFA_DPORT_ST_DISABLED = 0, /*!< dport is disabled */
5674 BFA_DPORT_ST_INP = 1, /*!< test in progress */
5675 BFA_DPORT_ST_COMP = 2, /*!< test complete successfully */
5676 BFA_DPORT_ST_NO_SFP = 3, /*!< sfp is not present */
5677 BFA_DPORT_ST_NOTSTART = 4, /*!< test not start dport is enabled */
5678 };
5679
5680 /*
5681 * BFA DPORT state machine events
5682 */
5683 enum bfa_dport_sm_event {
5684 BFA_DPORT_SM_ENABLE = 1, /* dport enable event */
5685 BFA_DPORT_SM_DISABLE = 2, /* dport disable event */
5686 BFA_DPORT_SM_FWRSP = 3, /* fw enable/disable rsp */
5687 BFA_DPORT_SM_QRESUME = 4, /* CQ space available */
5688 BFA_DPORT_SM_HWFAIL = 5, /* IOC h/w failure */
5689 BFA_DPORT_SM_START = 6, /* re-start dport test */
5690 BFA_DPORT_SM_REQFAIL = 7, /* request failure */
5691 BFA_DPORT_SM_SCN = 8, /* state change notify frm fw */
5692 };
5693
5694 static void bfa_dport_sm_disabled(struct bfa_dport_s *dport,
5695 enum bfa_dport_sm_event event);
5696 static void bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport,
5697 enum bfa_dport_sm_event event);
5698 static void bfa_dport_sm_enabling(struct bfa_dport_s *dport,
5699 enum bfa_dport_sm_event event);
5700 static void bfa_dport_sm_enabled(struct bfa_dport_s *dport,
5701 enum bfa_dport_sm_event event);
5702 static void bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport,
5703 enum bfa_dport_sm_event event);
5704 static void bfa_dport_sm_disabling(struct bfa_dport_s *dport,
5705 enum bfa_dport_sm_event event);
5706 static void bfa_dport_sm_starting_qwait(struct bfa_dport_s *dport,
5707 enum bfa_dport_sm_event event);
5708 static void bfa_dport_sm_starting(struct bfa_dport_s *dport,
5709 enum bfa_dport_sm_event event);
5710 static void bfa_dport_sm_dynamic_disabling(struct bfa_dport_s *dport,
5711 enum bfa_dport_sm_event event);
5712 static void bfa_dport_sm_dynamic_disabling_qwait(struct bfa_dport_s *dport,
5713 enum bfa_dport_sm_event event);
5714 static void bfa_dport_qresume(void *cbarg);
5715 static void bfa_dport_req_comp(struct bfa_dport_s *dport,
5716 struct bfi_diag_dport_rsp_s *msg);
5717 static void bfa_dport_scn(struct bfa_dport_s *dport,
5718 struct bfi_diag_dport_scn_s *msg);
5719
5720 /*
5721 * BFA fcdiag module
5722 */
5723 #define BFA_DIAG_QTEST_TOV 1000 /* msec */
5724
5725 /*
5726 * Set port status to busy
5727 */
5728 static void
bfa_fcdiag_set_busy_status(struct bfa_fcdiag_s * fcdiag)5729 bfa_fcdiag_set_busy_status(struct bfa_fcdiag_s *fcdiag)
5730 {
5731 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(fcdiag->bfa);
5732
5733 if (fcdiag->lb.lock)
5734 fcport->diag_busy = BFA_TRUE;
5735 else
5736 fcport->diag_busy = BFA_FALSE;
5737 }
5738
5739 void
bfa_fcdiag_attach(struct bfa_s * bfa,void * bfad,struct bfa_iocfc_cfg_s * cfg,struct bfa_pcidev_s * pcidev)5740 bfa_fcdiag_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5741 struct bfa_pcidev_s *pcidev)
5742 {
5743 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5744 struct bfa_dport_s *dport = &fcdiag->dport;
5745
5746 fcdiag->bfa = bfa;
5747 fcdiag->trcmod = bfa->trcmod;
5748 /* The common DIAG attach bfa_diag_attach() will do all memory claim */
5749 dport->bfa = bfa;
5750 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
5751 bfa_reqq_winit(&dport->reqq_wait, bfa_dport_qresume, dport);
5752 dport->cbfn = NULL;
5753 dport->cbarg = NULL;
5754 dport->test_state = BFA_DPORT_ST_DISABLED;
5755 memset(&dport->result, 0, sizeof(struct bfa_diag_dport_result_s));
5756 }
5757
5758 void
bfa_fcdiag_iocdisable(struct bfa_s * bfa)5759 bfa_fcdiag_iocdisable(struct bfa_s *bfa)
5760 {
5761 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5762 struct bfa_dport_s *dport = &fcdiag->dport;
5763
5764 bfa_trc(fcdiag, fcdiag->lb.lock);
5765 if (fcdiag->lb.lock) {
5766 fcdiag->lb.status = BFA_STATUS_IOC_FAILURE;
5767 fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
5768 fcdiag->lb.lock = 0;
5769 bfa_fcdiag_set_busy_status(fcdiag);
5770 }
5771
5772 bfa_sm_send_event(dport, BFA_DPORT_SM_HWFAIL);
5773 }
5774
5775 static void
bfa_fcdiag_queuetest_timeout(void * cbarg)5776 bfa_fcdiag_queuetest_timeout(void *cbarg)
5777 {
5778 struct bfa_fcdiag_s *fcdiag = cbarg;
5779 struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
5780
5781 bfa_trc(fcdiag, fcdiag->qtest.all);
5782 bfa_trc(fcdiag, fcdiag->qtest.count);
5783
5784 fcdiag->qtest.timer_active = 0;
5785
5786 res->status = BFA_STATUS_ETIMER;
5787 res->count = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
5788 if (fcdiag->qtest.all)
5789 res->queue = fcdiag->qtest.all;
5790
5791 bfa_trc(fcdiag, BFA_STATUS_ETIMER);
5792 fcdiag->qtest.status = BFA_STATUS_ETIMER;
5793 fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
5794 fcdiag->qtest.lock = 0;
5795 }
5796
5797 static bfa_status_t
bfa_fcdiag_queuetest_send(struct bfa_fcdiag_s * fcdiag)5798 bfa_fcdiag_queuetest_send(struct bfa_fcdiag_s *fcdiag)
5799 {
5800 u32 i;
5801 struct bfi_diag_qtest_req_s *req;
5802
5803 req = bfa_reqq_next(fcdiag->bfa, fcdiag->qtest.queue);
5804 if (!req)
5805 return BFA_STATUS_DEVBUSY;
5806
5807 /* build host command */
5808 bfi_h2i_set(req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_QTEST,
5809 bfa_fn_lpu(fcdiag->bfa));
5810
5811 for (i = 0; i < BFI_LMSG_PL_WSZ; i++)
5812 req->data[i] = QTEST_PAT_DEFAULT;
5813
5814 bfa_trc(fcdiag, fcdiag->qtest.queue);
5815 /* ring door bell */
5816 bfa_reqq_produce(fcdiag->bfa, fcdiag->qtest.queue, req->mh);
5817 return BFA_STATUS_OK;
5818 }
5819
5820 static void
bfa_fcdiag_queuetest_comp(struct bfa_fcdiag_s * fcdiag,bfi_diag_qtest_rsp_t * rsp)5821 bfa_fcdiag_queuetest_comp(struct bfa_fcdiag_s *fcdiag,
5822 bfi_diag_qtest_rsp_t *rsp)
5823 {
5824 struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
5825 bfa_status_t status = BFA_STATUS_OK;
5826 int i;
5827
5828 /* Check timer, should still be active */
5829 if (!fcdiag->qtest.timer_active) {
5830 bfa_trc(fcdiag, fcdiag->qtest.timer_active);
5831 return;
5832 }
5833
5834 /* update count */
5835 fcdiag->qtest.count--;
5836
5837 /* Check result */
5838 for (i = 0; i < BFI_LMSG_PL_WSZ; i++) {
5839 if (rsp->data[i] != ~(QTEST_PAT_DEFAULT)) {
5840 res->status = BFA_STATUS_DATACORRUPTED;
5841 break;
5842 }
5843 }
5844
5845 if (res->status == BFA_STATUS_OK) {
5846 if (fcdiag->qtest.count > 0) {
5847 status = bfa_fcdiag_queuetest_send(fcdiag);
5848 if (status == BFA_STATUS_OK)
5849 return;
5850 else
5851 res->status = status;
5852 } else if (fcdiag->qtest.all > 0 &&
5853 fcdiag->qtest.queue < (BFI_IOC_MAX_CQS - 1)) {
5854 fcdiag->qtest.count = QTEST_CNT_DEFAULT;
5855 fcdiag->qtest.queue++;
5856 status = bfa_fcdiag_queuetest_send(fcdiag);
5857 if (status == BFA_STATUS_OK)
5858 return;
5859 else
5860 res->status = status;
5861 }
5862 }
5863
5864 /* Stop timer when we comp all queue */
5865 if (fcdiag->qtest.timer_active) {
5866 bfa_timer_stop(&fcdiag->qtest.timer);
5867 fcdiag->qtest.timer_active = 0;
5868 }
5869 res->queue = fcdiag->qtest.queue;
5870 res->count = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
5871 bfa_trc(fcdiag, res->count);
5872 bfa_trc(fcdiag, res->status);
5873 fcdiag->qtest.status = res->status;
5874 fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
5875 fcdiag->qtest.lock = 0;
5876 }
5877
5878 static void
bfa_fcdiag_loopback_comp(struct bfa_fcdiag_s * fcdiag,struct bfi_diag_lb_rsp_s * rsp)5879 bfa_fcdiag_loopback_comp(struct bfa_fcdiag_s *fcdiag,
5880 struct bfi_diag_lb_rsp_s *rsp)
5881 {
5882 struct bfa_diag_loopback_result_s *res = fcdiag->lb.result;
5883
5884 res->numtxmfrm = be32_to_cpu(rsp->res.numtxmfrm);
5885 res->numosffrm = be32_to_cpu(rsp->res.numosffrm);
5886 res->numrcvfrm = be32_to_cpu(rsp->res.numrcvfrm);
5887 res->badfrminf = be32_to_cpu(rsp->res.badfrminf);
5888 res->badfrmnum = be32_to_cpu(rsp->res.badfrmnum);
5889 res->status = rsp->res.status;
5890 fcdiag->lb.status = rsp->res.status;
5891 bfa_trc(fcdiag, fcdiag->lb.status);
5892 fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
5893 fcdiag->lb.lock = 0;
5894 bfa_fcdiag_set_busy_status(fcdiag);
5895 }
5896
5897 static bfa_status_t
bfa_fcdiag_loopback_send(struct bfa_fcdiag_s * fcdiag,struct bfa_diag_loopback_s * loopback)5898 bfa_fcdiag_loopback_send(struct bfa_fcdiag_s *fcdiag,
5899 struct bfa_diag_loopback_s *loopback)
5900 {
5901 struct bfi_diag_lb_req_s *lb_req;
5902
5903 lb_req = bfa_reqq_next(fcdiag->bfa, BFA_REQQ_DIAG);
5904 if (!lb_req)
5905 return BFA_STATUS_DEVBUSY;
5906
5907 /* build host command */
5908 bfi_h2i_set(lb_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LOOPBACK,
5909 bfa_fn_lpu(fcdiag->bfa));
5910
5911 lb_req->lb_mode = loopback->lb_mode;
5912 lb_req->speed = loopback->speed;
5913 lb_req->loopcnt = loopback->loopcnt;
5914 lb_req->pattern = loopback->pattern;
5915
5916 /* ring door bell */
5917 bfa_reqq_produce(fcdiag->bfa, BFA_REQQ_DIAG, lb_req->mh);
5918
5919 bfa_trc(fcdiag, loopback->lb_mode);
5920 bfa_trc(fcdiag, loopback->speed);
5921 bfa_trc(fcdiag, loopback->loopcnt);
5922 bfa_trc(fcdiag, loopback->pattern);
5923 return BFA_STATUS_OK;
5924 }
5925
5926 /*
5927 * cpe/rme intr handler
5928 */
5929 void
bfa_fcdiag_intr(struct bfa_s * bfa,struct bfi_msg_s * msg)5930 bfa_fcdiag_intr(struct bfa_s *bfa, struct bfi_msg_s *msg)
5931 {
5932 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5933
5934 switch (msg->mhdr.msg_id) {
5935 case BFI_DIAG_I2H_LOOPBACK:
5936 bfa_fcdiag_loopback_comp(fcdiag,
5937 (struct bfi_diag_lb_rsp_s *) msg);
5938 break;
5939 case BFI_DIAG_I2H_QTEST:
5940 bfa_fcdiag_queuetest_comp(fcdiag, (bfi_diag_qtest_rsp_t *)msg);
5941 break;
5942 case BFI_DIAG_I2H_DPORT:
5943 bfa_dport_req_comp(&fcdiag->dport,
5944 (struct bfi_diag_dport_rsp_s *)msg);
5945 break;
5946 case BFI_DIAG_I2H_DPORT_SCN:
5947 bfa_dport_scn(&fcdiag->dport,
5948 (struct bfi_diag_dport_scn_s *)msg);
5949 break;
5950 default:
5951 bfa_trc(fcdiag, msg->mhdr.msg_id);
5952 WARN_ON(1);
5953 }
5954 }
5955
5956 /*
5957 * Loopback test
5958 *
5959 * @param[in] *bfa - bfa data struct
5960 * @param[in] opmode - port operation mode
5961 * @param[in] speed - port speed
5962 * @param[in] lpcnt - loop count
5963 * @param[in] pat - pattern to build packet
5964 * @param[in] *result - pt to bfa_diag_loopback_result_t data struct
5965 * @param[in] cbfn - callback function
5966 * @param[in] cbarg - callback functioin arg
5967 *
5968 * @param[out]
5969 */
5970 bfa_status_t
bfa_fcdiag_loopback(struct bfa_s * bfa,enum bfa_port_opmode opmode,enum bfa_port_speed speed,u32 lpcnt,u32 pat,struct bfa_diag_loopback_result_s * result,bfa_cb_diag_t cbfn,void * cbarg)5971 bfa_fcdiag_loopback(struct bfa_s *bfa, enum bfa_port_opmode opmode,
5972 enum bfa_port_speed speed, u32 lpcnt, u32 pat,
5973 struct bfa_diag_loopback_result_s *result, bfa_cb_diag_t cbfn,
5974 void *cbarg)
5975 {
5976 struct bfa_diag_loopback_s loopback;
5977 struct bfa_port_attr_s attr;
5978 bfa_status_t status;
5979 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5980
5981 if (!bfa_iocfc_is_operational(bfa))
5982 return BFA_STATUS_IOC_NON_OP;
5983
5984 /* if port is PBC disabled, return error */
5985 if (bfa_fcport_is_pbcdisabled(bfa)) {
5986 bfa_trc(fcdiag, BFA_STATUS_PBC);
5987 return BFA_STATUS_PBC;
5988 }
5989
5990 if (bfa_fcport_is_disabled(bfa) == BFA_FALSE) {
5991 bfa_trc(fcdiag, opmode);
5992 return BFA_STATUS_PORT_NOT_DISABLED;
5993 }
5994
5995 /*
5996 * Check if input speed is supported by the port mode
5997 */
5998 if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
5999 if (!(speed == BFA_PORT_SPEED_1GBPS ||
6000 speed == BFA_PORT_SPEED_2GBPS ||
6001 speed == BFA_PORT_SPEED_4GBPS ||
6002 speed == BFA_PORT_SPEED_8GBPS ||
6003 speed == BFA_PORT_SPEED_16GBPS ||
6004 speed == BFA_PORT_SPEED_AUTO)) {
6005 bfa_trc(fcdiag, speed);
6006 return BFA_STATUS_UNSUPP_SPEED;
6007 }
6008 bfa_fcport_get_attr(bfa, &attr);
6009 bfa_trc(fcdiag, attr.speed_supported);
6010 if (speed > attr.speed_supported)
6011 return BFA_STATUS_UNSUPP_SPEED;
6012 } else {
6013 if (speed != BFA_PORT_SPEED_10GBPS) {
6014 bfa_trc(fcdiag, speed);
6015 return BFA_STATUS_UNSUPP_SPEED;
6016 }
6017 }
6018
6019 /*
6020 * For CT2, 1G is not supported
6021 */
6022 if ((speed == BFA_PORT_SPEED_1GBPS) &&
6023 (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id))) {
6024 bfa_trc(fcdiag, speed);
6025 return BFA_STATUS_UNSUPP_SPEED;
6026 }
6027
6028 /* For Mezz card, port speed entered needs to be checked */
6029 if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) {
6030 if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
6031 if (!(speed == BFA_PORT_SPEED_1GBPS ||
6032 speed == BFA_PORT_SPEED_2GBPS ||
6033 speed == BFA_PORT_SPEED_4GBPS ||
6034 speed == BFA_PORT_SPEED_8GBPS ||
6035 speed == BFA_PORT_SPEED_16GBPS ||
6036 speed == BFA_PORT_SPEED_AUTO))
6037 return BFA_STATUS_UNSUPP_SPEED;
6038 } else {
6039 if (speed != BFA_PORT_SPEED_10GBPS)
6040 return BFA_STATUS_UNSUPP_SPEED;
6041 }
6042 }
6043 /* check to see if fcport is dport */
6044 if (bfa_fcport_is_dport(bfa)) {
6045 bfa_trc(fcdiag, fcdiag->lb.lock);
6046 return BFA_STATUS_DPORT_ENABLED;
6047 }
6048 /* check to see if there is another destructive diag cmd running */
6049 if (fcdiag->lb.lock) {
6050 bfa_trc(fcdiag, fcdiag->lb.lock);
6051 return BFA_STATUS_DEVBUSY;
6052 }
6053
6054 fcdiag->lb.lock = 1;
6055 loopback.lb_mode = opmode;
6056 loopback.speed = speed;
6057 loopback.loopcnt = lpcnt;
6058 loopback.pattern = pat;
6059 fcdiag->lb.result = result;
6060 fcdiag->lb.cbfn = cbfn;
6061 fcdiag->lb.cbarg = cbarg;
6062 memset(result, 0, sizeof(struct bfa_diag_loopback_result_s));
6063 bfa_fcdiag_set_busy_status(fcdiag);
6064
6065 /* Send msg to fw */
6066 status = bfa_fcdiag_loopback_send(fcdiag, &loopback);
6067 return status;
6068 }
6069
6070 /*
6071 * DIAG queue test command
6072 *
6073 * @param[in] *bfa - bfa data struct
6074 * @param[in] force - 1: don't do ioc op checking
6075 * @param[in] queue - queue no. to test
6076 * @param[in] *result - pt to bfa_diag_qtest_result_t data struct
6077 * @param[in] cbfn - callback function
6078 * @param[in] *cbarg - callback functioin arg
6079 *
6080 * @param[out]
6081 */
6082 bfa_status_t
bfa_fcdiag_queuetest(struct bfa_s * bfa,u32 force,u32 queue,struct bfa_diag_qtest_result_s * result,bfa_cb_diag_t cbfn,void * cbarg)6083 bfa_fcdiag_queuetest(struct bfa_s *bfa, u32 force, u32 queue,
6084 struct bfa_diag_qtest_result_s *result, bfa_cb_diag_t cbfn,
6085 void *cbarg)
6086 {
6087 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6088 bfa_status_t status;
6089 bfa_trc(fcdiag, force);
6090 bfa_trc(fcdiag, queue);
6091
6092 if (!force && !bfa_iocfc_is_operational(bfa))
6093 return BFA_STATUS_IOC_NON_OP;
6094
6095 /* check to see if there is another destructive diag cmd running */
6096 if (fcdiag->qtest.lock) {
6097 bfa_trc(fcdiag, fcdiag->qtest.lock);
6098 return BFA_STATUS_DEVBUSY;
6099 }
6100
6101 /* Initialization */
6102 fcdiag->qtest.lock = 1;
6103 fcdiag->qtest.cbfn = cbfn;
6104 fcdiag->qtest.cbarg = cbarg;
6105 fcdiag->qtest.result = result;
6106 fcdiag->qtest.count = QTEST_CNT_DEFAULT;
6107
6108 /* Init test results */
6109 fcdiag->qtest.result->status = BFA_STATUS_OK;
6110 fcdiag->qtest.result->count = 0;
6111
6112 /* send */
6113 if (queue < BFI_IOC_MAX_CQS) {
6114 fcdiag->qtest.result->queue = (u8)queue;
6115 fcdiag->qtest.queue = (u8)queue;
6116 fcdiag->qtest.all = 0;
6117 } else {
6118 fcdiag->qtest.result->queue = 0;
6119 fcdiag->qtest.queue = 0;
6120 fcdiag->qtest.all = 1;
6121 }
6122 status = bfa_fcdiag_queuetest_send(fcdiag);
6123
6124 /* Start a timer */
6125 if (status == BFA_STATUS_OK) {
6126 bfa_timer_start(bfa, &fcdiag->qtest.timer,
6127 bfa_fcdiag_queuetest_timeout, fcdiag,
6128 BFA_DIAG_QTEST_TOV);
6129 fcdiag->qtest.timer_active = 1;
6130 }
6131 return status;
6132 }
6133
6134 /*
6135 * DIAG PLB is running
6136 *
6137 * @param[in] *bfa - bfa data struct
6138 *
6139 * @param[out]
6140 */
6141 bfa_status_t
bfa_fcdiag_lb_is_running(struct bfa_s * bfa)6142 bfa_fcdiag_lb_is_running(struct bfa_s *bfa)
6143 {
6144 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6145 return fcdiag->lb.lock ? BFA_STATUS_DIAG_BUSY : BFA_STATUS_OK;
6146 }
6147
6148 /*
6149 * D-port
6150 */
6151 #define bfa_dport_result_start(__dport, __mode) do { \
6152 (__dport)->result.start_time = bfa_get_log_time(); \
6153 (__dport)->result.status = DPORT_TEST_ST_INPRG; \
6154 (__dport)->result.mode = (__mode); \
6155 (__dport)->result.rp_pwwn = (__dport)->rp_pwwn; \
6156 (__dport)->result.rp_nwwn = (__dport)->rp_nwwn; \
6157 (__dport)->result.lpcnt = (__dport)->lpcnt; \
6158 } while (0)
6159
6160 static bfa_boolean_t bfa_dport_send_req(struct bfa_dport_s *dport,
6161 enum bfi_dport_req req);
6162 static void
bfa_cb_fcdiag_dport(struct bfa_dport_s * dport,bfa_status_t bfa_status)6163 bfa_cb_fcdiag_dport(struct bfa_dport_s *dport, bfa_status_t bfa_status)
6164 {
6165 if (dport->cbfn != NULL) {
6166 dport->cbfn(dport->cbarg, bfa_status);
6167 dport->cbfn = NULL;
6168 dport->cbarg = NULL;
6169 }
6170 }
6171
6172 static void
bfa_dport_sm_disabled(struct bfa_dport_s * dport,enum bfa_dport_sm_event event)6173 bfa_dport_sm_disabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6174 {
6175 bfa_trc(dport->bfa, event);
6176
6177 switch (event) {
6178 case BFA_DPORT_SM_ENABLE:
6179 bfa_fcport_dportenable(dport->bfa);
6180 if (bfa_dport_send_req(dport, BFI_DPORT_ENABLE))
6181 bfa_sm_set_state(dport, bfa_dport_sm_enabling);
6182 else
6183 bfa_sm_set_state(dport, bfa_dport_sm_enabling_qwait);
6184 break;
6185
6186 case BFA_DPORT_SM_DISABLE:
6187 /* Already disabled */
6188 break;
6189
6190 case BFA_DPORT_SM_HWFAIL:
6191 /* ignore */
6192 break;
6193
6194 case BFA_DPORT_SM_SCN:
6195 if (dport->i2hmsg.scn.state == BFI_DPORT_SCN_DDPORT_ENABLE) {
6196 bfa_fcport_ddportenable(dport->bfa);
6197 dport->dynamic = BFA_TRUE;
6198 dport->test_state = BFA_DPORT_ST_NOTSTART;
6199 bfa_sm_set_state(dport, bfa_dport_sm_enabled);
6200 } else {
6201 bfa_trc(dport->bfa, dport->i2hmsg.scn.state);
6202 WARN_ON(1);
6203 }
6204 break;
6205
6206 default:
6207 bfa_sm_fault(dport->bfa, event);
6208 }
6209 }
6210
6211 static void
bfa_dport_sm_enabling_qwait(struct bfa_dport_s * dport,enum bfa_dport_sm_event event)6212 bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport,
6213 enum bfa_dport_sm_event event)
6214 {
6215 bfa_trc(dport->bfa, event);
6216
6217 switch (event) {
6218 case BFA_DPORT_SM_QRESUME:
6219 bfa_sm_set_state(dport, bfa_dport_sm_enabling);
6220 bfa_dport_send_req(dport, BFI_DPORT_ENABLE);
6221 break;
6222
6223 case BFA_DPORT_SM_HWFAIL:
6224 bfa_reqq_wcancel(&dport->reqq_wait);
6225 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6226 bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
6227 break;
6228
6229 default:
6230 bfa_sm_fault(dport->bfa, event);
6231 }
6232 }
6233
6234 static void
bfa_dport_sm_enabling(struct bfa_dport_s * dport,enum bfa_dport_sm_event event)6235 bfa_dport_sm_enabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6236 {
6237 bfa_trc(dport->bfa, event);
6238
6239 switch (event) {
6240 case BFA_DPORT_SM_FWRSP:
6241 memset(&dport->result, 0,
6242 sizeof(struct bfa_diag_dport_result_s));
6243 if (dport->i2hmsg.rsp.status == BFA_STATUS_DPORT_INV_SFP) {
6244 dport->test_state = BFA_DPORT_ST_NO_SFP;
6245 } else {
6246 dport->test_state = BFA_DPORT_ST_INP;
6247 bfa_dport_result_start(dport, BFA_DPORT_OPMODE_AUTO);
6248 }
6249 bfa_sm_set_state(dport, bfa_dport_sm_enabled);
6250 break;
6251
6252 case BFA_DPORT_SM_REQFAIL:
6253 dport->test_state = BFA_DPORT_ST_DISABLED;
6254 bfa_fcport_dportdisable(dport->bfa);
6255 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6256 break;
6257
6258 case BFA_DPORT_SM_HWFAIL:
6259 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6260 bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
6261 break;
6262
6263 default:
6264 bfa_sm_fault(dport->bfa, event);
6265 }
6266 }
6267
6268 static void
bfa_dport_sm_enabled(struct bfa_dport_s * dport,enum bfa_dport_sm_event event)6269 bfa_dport_sm_enabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6270 {
6271 bfa_trc(dport->bfa, event);
6272
6273 switch (event) {
6274 case BFA_DPORT_SM_START:
6275 if (bfa_dport_send_req(dport, BFI_DPORT_START))
6276 bfa_sm_set_state(dport, bfa_dport_sm_starting);
6277 else
6278 bfa_sm_set_state(dport, bfa_dport_sm_starting_qwait);
6279 break;
6280
6281 case BFA_DPORT_SM_DISABLE:
6282 bfa_fcport_dportdisable(dport->bfa);
6283 if (bfa_dport_send_req(dport, BFI_DPORT_DISABLE))
6284 bfa_sm_set_state(dport, bfa_dport_sm_disabling);
6285 else
6286 bfa_sm_set_state(dport, bfa_dport_sm_disabling_qwait);
6287 break;
6288
6289 case BFA_DPORT_SM_HWFAIL:
6290 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6291 break;
6292
6293 case BFA_DPORT_SM_SCN:
6294 switch (dport->i2hmsg.scn.state) {
6295 case BFI_DPORT_SCN_TESTCOMP:
6296 dport->test_state = BFA_DPORT_ST_COMP;
6297 break;
6298
6299 case BFI_DPORT_SCN_TESTSTART:
6300 dport->test_state = BFA_DPORT_ST_INP;
6301 break;
6302
6303 case BFI_DPORT_SCN_TESTSKIP:
6304 case BFI_DPORT_SCN_SUBTESTSTART:
6305 /* no state change */
6306 break;
6307
6308 case BFI_DPORT_SCN_SFP_REMOVED:
6309 dport->test_state = BFA_DPORT_ST_NO_SFP;
6310 break;
6311
6312 case BFI_DPORT_SCN_DDPORT_DISABLE:
6313 bfa_fcport_ddportdisable(dport->bfa);
6314
6315 if (bfa_dport_send_req(dport, BFI_DPORT_DYN_DISABLE))
6316 bfa_sm_set_state(dport,
6317 bfa_dport_sm_dynamic_disabling);
6318 else
6319 bfa_sm_set_state(dport,
6320 bfa_dport_sm_dynamic_disabling_qwait);
6321 break;
6322
6323 case BFI_DPORT_SCN_FCPORT_DISABLE:
6324 bfa_fcport_ddportdisable(dport->bfa);
6325
6326 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6327 dport->dynamic = BFA_FALSE;
6328 break;
6329
6330 default:
6331 bfa_trc(dport->bfa, dport->i2hmsg.scn.state);
6332 bfa_sm_fault(dport->bfa, event);
6333 }
6334 break;
6335 default:
6336 bfa_sm_fault(dport->bfa, event);
6337 }
6338 }
6339
6340 static void
bfa_dport_sm_disabling_qwait(struct bfa_dport_s * dport,enum bfa_dport_sm_event event)6341 bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport,
6342 enum bfa_dport_sm_event event)
6343 {
6344 bfa_trc(dport->bfa, event);
6345
6346 switch (event) {
6347 case BFA_DPORT_SM_QRESUME:
6348 bfa_sm_set_state(dport, bfa_dport_sm_disabling);
6349 bfa_dport_send_req(dport, BFI_DPORT_DISABLE);
6350 break;
6351
6352 case BFA_DPORT_SM_HWFAIL:
6353 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6354 bfa_reqq_wcancel(&dport->reqq_wait);
6355 bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
6356 break;
6357
6358 case BFA_DPORT_SM_SCN:
6359 /* ignore */
6360 break;
6361
6362 default:
6363 bfa_sm_fault(dport->bfa, event);
6364 }
6365 }
6366
6367 static void
bfa_dport_sm_disabling(struct bfa_dport_s * dport,enum bfa_dport_sm_event event)6368 bfa_dport_sm_disabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6369 {
6370 bfa_trc(dport->bfa, event);
6371
6372 switch (event) {
6373 case BFA_DPORT_SM_FWRSP:
6374 dport->test_state = BFA_DPORT_ST_DISABLED;
6375 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6376 break;
6377
6378 case BFA_DPORT_SM_HWFAIL:
6379 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6380 bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
6381 break;
6382
6383 case BFA_DPORT_SM_SCN:
6384 /* no state change */
6385 break;
6386
6387 default:
6388 bfa_sm_fault(dport->bfa, event);
6389 }
6390 }
6391
6392 static void
bfa_dport_sm_starting_qwait(struct bfa_dport_s * dport,enum bfa_dport_sm_event event)6393 bfa_dport_sm_starting_qwait(struct bfa_dport_s *dport,
6394 enum bfa_dport_sm_event event)
6395 {
6396 bfa_trc(dport->bfa, event);
6397
6398 switch (event) {
6399 case BFA_DPORT_SM_QRESUME:
6400 bfa_sm_set_state(dport, bfa_dport_sm_starting);
6401 bfa_dport_send_req(dport, BFI_DPORT_START);
6402 break;
6403
6404 case BFA_DPORT_SM_HWFAIL:
6405 bfa_reqq_wcancel(&dport->reqq_wait);
6406 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6407 bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
6408 break;
6409
6410 default:
6411 bfa_sm_fault(dport->bfa, event);
6412 }
6413 }
6414
6415 static void
bfa_dport_sm_starting(struct bfa_dport_s * dport,enum bfa_dport_sm_event event)6416 bfa_dport_sm_starting(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6417 {
6418 bfa_trc(dport->bfa, event);
6419
6420 switch (event) {
6421 case BFA_DPORT_SM_FWRSP:
6422 memset(&dport->result, 0,
6423 sizeof(struct bfa_diag_dport_result_s));
6424 if (dport->i2hmsg.rsp.status == BFA_STATUS_DPORT_INV_SFP) {
6425 dport->test_state = BFA_DPORT_ST_NO_SFP;
6426 } else {
6427 dport->test_state = BFA_DPORT_ST_INP;
6428 bfa_dport_result_start(dport, BFA_DPORT_OPMODE_MANU);
6429 }
6430 /* fall thru */
6431
6432 case BFA_DPORT_SM_REQFAIL:
6433 bfa_sm_set_state(dport, bfa_dport_sm_enabled);
6434 break;
6435
6436 case BFA_DPORT_SM_HWFAIL:
6437 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6438 bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
6439 break;
6440
6441 default:
6442 bfa_sm_fault(dport->bfa, event);
6443 }
6444 }
6445
6446 static void
bfa_dport_sm_dynamic_disabling(struct bfa_dport_s * dport,enum bfa_dport_sm_event event)6447 bfa_dport_sm_dynamic_disabling(struct bfa_dport_s *dport,
6448 enum bfa_dport_sm_event event)
6449 {
6450 bfa_trc(dport->bfa, event);
6451
6452 switch (event) {
6453 case BFA_DPORT_SM_SCN:
6454 switch (dport->i2hmsg.scn.state) {
6455 case BFI_DPORT_SCN_DDPORT_DISABLED:
6456 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6457 dport->dynamic = BFA_FALSE;
6458 bfa_fcport_enable(dport->bfa);
6459 break;
6460
6461 default:
6462 bfa_trc(dport->bfa, dport->i2hmsg.scn.state);
6463 bfa_sm_fault(dport->bfa, event);
6464
6465 }
6466 break;
6467
6468 case BFA_DPORT_SM_HWFAIL:
6469 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6470 bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
6471 break;
6472
6473 default:
6474 bfa_sm_fault(dport->bfa, event);
6475 }
6476 }
6477
6478 static void
bfa_dport_sm_dynamic_disabling_qwait(struct bfa_dport_s * dport,enum bfa_dport_sm_event event)6479 bfa_dport_sm_dynamic_disabling_qwait(struct bfa_dport_s *dport,
6480 enum bfa_dport_sm_event event)
6481 {
6482 bfa_trc(dport->bfa, event);
6483
6484 switch (event) {
6485 case BFA_DPORT_SM_QRESUME:
6486 bfa_sm_set_state(dport, bfa_dport_sm_dynamic_disabling);
6487 bfa_dport_send_req(dport, BFI_DPORT_DYN_DISABLE);
6488 break;
6489
6490 case BFA_DPORT_SM_HWFAIL:
6491 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6492 bfa_reqq_wcancel(&dport->reqq_wait);
6493 bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
6494 break;
6495
6496 case BFA_DPORT_SM_SCN:
6497 /* ignore */
6498 break;
6499
6500 default:
6501 bfa_sm_fault(dport->bfa, event);
6502 }
6503 }
6504
6505 static bfa_boolean_t
bfa_dport_send_req(struct bfa_dport_s * dport,enum bfi_dport_req req)6506 bfa_dport_send_req(struct bfa_dport_s *dport, enum bfi_dport_req req)
6507 {
6508 struct bfi_diag_dport_req_s *m;
6509
6510 /*
6511 * check for room in queue to send request now
6512 */
6513 m = bfa_reqq_next(dport->bfa, BFA_REQQ_DIAG);
6514 if (!m) {
6515 bfa_reqq_wait(dport->bfa, BFA_REQQ_PORT, &dport->reqq_wait);
6516 return BFA_FALSE;
6517 }
6518
6519 bfi_h2i_set(m->mh, BFI_MC_DIAG, BFI_DIAG_H2I_DPORT,
6520 bfa_fn_lpu(dport->bfa));
6521 m->req = req;
6522 if ((req == BFI_DPORT_ENABLE) || (req == BFI_DPORT_START)) {
6523 m->lpcnt = cpu_to_be32(dport->lpcnt);
6524 m->payload = cpu_to_be32(dport->payload);
6525 }
6526
6527 /*
6528 * queue I/O message to firmware
6529 */
6530 bfa_reqq_produce(dport->bfa, BFA_REQQ_DIAG, m->mh);
6531
6532 return BFA_TRUE;
6533 }
6534
6535 static void
bfa_dport_qresume(void * cbarg)6536 bfa_dport_qresume(void *cbarg)
6537 {
6538 struct bfa_dport_s *dport = cbarg;
6539
6540 bfa_sm_send_event(dport, BFA_DPORT_SM_QRESUME);
6541 }
6542
6543 static void
bfa_dport_req_comp(struct bfa_dport_s * dport,struct bfi_diag_dport_rsp_s * msg)6544 bfa_dport_req_comp(struct bfa_dport_s *dport, struct bfi_diag_dport_rsp_s *msg)
6545 {
6546 msg->status = cpu_to_be32(msg->status);
6547 dport->i2hmsg.rsp.status = msg->status;
6548 dport->rp_pwwn = msg->pwwn;
6549 dport->rp_nwwn = msg->nwwn;
6550
6551 if ((msg->status == BFA_STATUS_OK) ||
6552 (msg->status == BFA_STATUS_DPORT_NO_SFP)) {
6553 bfa_trc(dport->bfa, msg->status);
6554 bfa_trc(dport->bfa, dport->rp_pwwn);
6555 bfa_trc(dport->bfa, dport->rp_nwwn);
6556 bfa_sm_send_event(dport, BFA_DPORT_SM_FWRSP);
6557
6558 } else {
6559 bfa_trc(dport->bfa, msg->status);
6560 bfa_sm_send_event(dport, BFA_DPORT_SM_REQFAIL);
6561 }
6562 bfa_cb_fcdiag_dport(dport, msg->status);
6563 }
6564
6565 static bfa_boolean_t
bfa_dport_is_sending_req(struct bfa_dport_s * dport)6566 bfa_dport_is_sending_req(struct bfa_dport_s *dport)
6567 {
6568 if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling) ||
6569 bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait) ||
6570 bfa_sm_cmp_state(dport, bfa_dport_sm_disabling) ||
6571 bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait) ||
6572 bfa_sm_cmp_state(dport, bfa_dport_sm_starting) ||
6573 bfa_sm_cmp_state(dport, bfa_dport_sm_starting_qwait)) {
6574 return BFA_TRUE;
6575 } else {
6576 return BFA_FALSE;
6577 }
6578 }
6579
6580 static void
bfa_dport_scn(struct bfa_dport_s * dport,struct bfi_diag_dport_scn_s * msg)6581 bfa_dport_scn(struct bfa_dport_s *dport, struct bfi_diag_dport_scn_s *msg)
6582 {
6583 int i;
6584 uint8_t subtesttype;
6585
6586 bfa_trc(dport->bfa, msg->state);
6587 dport->i2hmsg.scn.state = msg->state;
6588
6589 switch (dport->i2hmsg.scn.state) {
6590 case BFI_DPORT_SCN_TESTCOMP:
6591 dport->result.end_time = bfa_get_log_time();
6592 bfa_trc(dport->bfa, dport->result.end_time);
6593
6594 dport->result.status = msg->info.testcomp.status;
6595 bfa_trc(dport->bfa, dport->result.status);
6596
6597 dport->result.roundtrip_latency =
6598 cpu_to_be32(msg->info.testcomp.latency);
6599 dport->result.est_cable_distance =
6600 cpu_to_be32(msg->info.testcomp.distance);
6601 dport->result.buffer_required =
6602 be16_to_cpu(msg->info.testcomp.numbuffer);
6603
6604 dport->result.frmsz = be16_to_cpu(msg->info.testcomp.frm_sz);
6605 dport->result.speed = msg->info.testcomp.speed;
6606
6607 bfa_trc(dport->bfa, dport->result.roundtrip_latency);
6608 bfa_trc(dport->bfa, dport->result.est_cable_distance);
6609 bfa_trc(dport->bfa, dport->result.buffer_required);
6610 bfa_trc(dport->bfa, dport->result.frmsz);
6611 bfa_trc(dport->bfa, dport->result.speed);
6612
6613 for (i = DPORT_TEST_ELOOP; i < DPORT_TEST_MAX; i++) {
6614 dport->result.subtest[i].status =
6615 msg->info.testcomp.subtest_status[i];
6616 bfa_trc(dport->bfa, dport->result.subtest[i].status);
6617 }
6618 break;
6619
6620 case BFI_DPORT_SCN_TESTSKIP:
6621 case BFI_DPORT_SCN_DDPORT_ENABLE:
6622 memset(&dport->result, 0,
6623 sizeof(struct bfa_diag_dport_result_s));
6624 break;
6625
6626 case BFI_DPORT_SCN_TESTSTART:
6627 memset(&dport->result, 0,
6628 sizeof(struct bfa_diag_dport_result_s));
6629 dport->rp_pwwn = msg->info.teststart.pwwn;
6630 dport->rp_nwwn = msg->info.teststart.nwwn;
6631 dport->lpcnt = cpu_to_be32(msg->info.teststart.numfrm);
6632 bfa_dport_result_start(dport, msg->info.teststart.mode);
6633 break;
6634
6635 case BFI_DPORT_SCN_SUBTESTSTART:
6636 subtesttype = msg->info.teststart.type;
6637 dport->result.subtest[subtesttype].start_time =
6638 bfa_get_log_time();
6639 dport->result.subtest[subtesttype].status =
6640 DPORT_TEST_ST_INPRG;
6641
6642 bfa_trc(dport->bfa, subtesttype);
6643 bfa_trc(dport->bfa,
6644 dport->result.subtest[subtesttype].start_time);
6645 break;
6646
6647 case BFI_DPORT_SCN_SFP_REMOVED:
6648 case BFI_DPORT_SCN_DDPORT_DISABLED:
6649 case BFI_DPORT_SCN_DDPORT_DISABLE:
6650 case BFI_DPORT_SCN_FCPORT_DISABLE:
6651 dport->result.status = DPORT_TEST_ST_IDLE;
6652 break;
6653
6654 default:
6655 bfa_sm_fault(dport->bfa, msg->state);
6656 }
6657
6658 bfa_sm_send_event(dport, BFA_DPORT_SM_SCN);
6659 }
6660
6661 /*
6662 * Dport enable
6663 *
6664 * @param[in] *bfa - bfa data struct
6665 */
6666 bfa_status_t
bfa_dport_enable(struct bfa_s * bfa,u32 lpcnt,u32 pat,bfa_cb_diag_t cbfn,void * cbarg)6667 bfa_dport_enable(struct bfa_s *bfa, u32 lpcnt, u32 pat,
6668 bfa_cb_diag_t cbfn, void *cbarg)
6669 {
6670 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6671 struct bfa_dport_s *dport = &fcdiag->dport;
6672
6673 /*
6674 * Dport is not support in MEZZ card
6675 */
6676 if (bfa_mfg_is_mezz(dport->bfa->ioc.attr->card_type)) {
6677 bfa_trc(dport->bfa, BFA_STATUS_PBC);
6678 return BFA_STATUS_CMD_NOTSUPP_MEZZ;
6679 }
6680
6681 /*
6682 * Dport is supported in CT2 or above
6683 */
6684 if (!(bfa_asic_id_ct2(dport->bfa->ioc.pcidev.device_id))) {
6685 bfa_trc(dport->bfa, dport->bfa->ioc.pcidev.device_id);
6686 return BFA_STATUS_FEATURE_NOT_SUPPORTED;
6687 }
6688
6689 /*
6690 * Check to see if IOC is down
6691 */
6692 if (!bfa_iocfc_is_operational(bfa))
6693 return BFA_STATUS_IOC_NON_OP;
6694
6695 /* if port is PBC disabled, return error */
6696 if (bfa_fcport_is_pbcdisabled(bfa)) {
6697 bfa_trc(dport->bfa, BFA_STATUS_PBC);
6698 return BFA_STATUS_PBC;
6699 }
6700
6701 /*
6702 * Check if port mode is FC port
6703 */
6704 if (bfa_ioc_get_type(&bfa->ioc) != BFA_IOC_TYPE_FC) {
6705 bfa_trc(dport->bfa, bfa_ioc_get_type(&bfa->ioc));
6706 return BFA_STATUS_CMD_NOTSUPP_CNA;
6707 }
6708
6709 /*
6710 * Check if port is in LOOP mode
6711 */
6712 if ((bfa_fcport_get_cfg_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) ||
6713 (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP)) {
6714 bfa_trc(dport->bfa, 0);
6715 return BFA_STATUS_TOPOLOGY_LOOP;
6716 }
6717
6718 /*
6719 * Check if port is TRUNK mode
6720 */
6721 if (bfa_fcport_is_trunk_enabled(bfa)) {
6722 bfa_trc(dport->bfa, 0);
6723 return BFA_STATUS_ERROR_TRUNK_ENABLED;
6724 }
6725
6726 /*
6727 * Check if diag loopback is running
6728 */
6729 if (bfa_fcdiag_lb_is_running(bfa)) {
6730 bfa_trc(dport->bfa, 0);
6731 return BFA_STATUS_DIAG_BUSY;
6732 }
6733
6734 /*
6735 * Check to see if port is disable or in dport state
6736 */
6737 if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) &&
6738 (bfa_fcport_is_dport(bfa) == BFA_FALSE)) {
6739 bfa_trc(dport->bfa, 0);
6740 return BFA_STATUS_PORT_NOT_DISABLED;
6741 }
6742
6743 /*
6744 * Check if dport is in dynamic mode
6745 */
6746 if (dport->dynamic)
6747 return BFA_STATUS_DDPORT_ERR;
6748
6749 /*
6750 * Check if dport is busy
6751 */
6752 if (bfa_dport_is_sending_req(dport))
6753 return BFA_STATUS_DEVBUSY;
6754
6755 /*
6756 * Check if dport is already enabled
6757 */
6758 if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) {
6759 bfa_trc(dport->bfa, 0);
6760 return BFA_STATUS_DPORT_ENABLED;
6761 }
6762
6763 bfa_trc(dport->bfa, lpcnt);
6764 bfa_trc(dport->bfa, pat);
6765 dport->lpcnt = (lpcnt) ? lpcnt : DPORT_ENABLE_LOOPCNT_DEFAULT;
6766 dport->payload = (pat) ? pat : LB_PATTERN_DEFAULT;
6767 dport->cbfn = cbfn;
6768 dport->cbarg = cbarg;
6769
6770 bfa_sm_send_event(dport, BFA_DPORT_SM_ENABLE);
6771 return BFA_STATUS_OK;
6772 }
6773
6774 /*
6775 * Dport disable
6776 *
6777 * @param[in] *bfa - bfa data struct
6778 */
6779 bfa_status_t
bfa_dport_disable(struct bfa_s * bfa,bfa_cb_diag_t cbfn,void * cbarg)6780 bfa_dport_disable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg)
6781 {
6782 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6783 struct bfa_dport_s *dport = &fcdiag->dport;
6784
6785 if (bfa_ioc_is_disabled(&bfa->ioc))
6786 return BFA_STATUS_IOC_DISABLED;
6787
6788 /* if port is PBC disabled, return error */
6789 if (bfa_fcport_is_pbcdisabled(bfa)) {
6790 bfa_trc(dport->bfa, BFA_STATUS_PBC);
6791 return BFA_STATUS_PBC;
6792 }
6793
6794 /*
6795 * Check if dport is in dynamic mode
6796 */
6797 if (dport->dynamic) {
6798 return BFA_STATUS_DDPORT_ERR;
6799 }
6800
6801 /*
6802 * Check to see if port is disable or in dport state
6803 */
6804 if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) &&
6805 (bfa_fcport_is_dport(bfa) == BFA_FALSE)) {
6806 bfa_trc(dport->bfa, 0);
6807 return BFA_STATUS_PORT_NOT_DISABLED;
6808 }
6809
6810 /*
6811 * Check if dport is busy
6812 */
6813 if (bfa_dport_is_sending_req(dport))
6814 return BFA_STATUS_DEVBUSY;
6815
6816 /*
6817 * Check if dport is already disabled
6818 */
6819 if (bfa_sm_cmp_state(dport, bfa_dport_sm_disabled)) {
6820 bfa_trc(dport->bfa, 0);
6821 return BFA_STATUS_DPORT_DISABLED;
6822 }
6823
6824 dport->cbfn = cbfn;
6825 dport->cbarg = cbarg;
6826
6827 bfa_sm_send_event(dport, BFA_DPORT_SM_DISABLE);
6828 return BFA_STATUS_OK;
6829 }
6830
6831 /*
6832 * Dport start -- restart dport test
6833 *
6834 * @param[in] *bfa - bfa data struct
6835 */
6836 bfa_status_t
bfa_dport_start(struct bfa_s * bfa,u32 lpcnt,u32 pat,bfa_cb_diag_t cbfn,void * cbarg)6837 bfa_dport_start(struct bfa_s *bfa, u32 lpcnt, u32 pat,
6838 bfa_cb_diag_t cbfn, void *cbarg)
6839 {
6840 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6841 struct bfa_dport_s *dport = &fcdiag->dport;
6842
6843 /*
6844 * Check to see if IOC is down
6845 */
6846 if (!bfa_iocfc_is_operational(bfa))
6847 return BFA_STATUS_IOC_NON_OP;
6848
6849 /*
6850 * Check if dport is in dynamic mode
6851 */
6852 if (dport->dynamic)
6853 return BFA_STATUS_DDPORT_ERR;
6854
6855 /*
6856 * Check if dport is busy
6857 */
6858 if (bfa_dport_is_sending_req(dport))
6859 return BFA_STATUS_DEVBUSY;
6860
6861 /*
6862 * Check if dport is in enabled state.
6863 * Test can only be restart when previous test has completed
6864 */
6865 if (!bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) {
6866 bfa_trc(dport->bfa, 0);
6867 return BFA_STATUS_DPORT_DISABLED;
6868
6869 } else {
6870 if (dport->test_state == BFA_DPORT_ST_NO_SFP)
6871 return BFA_STATUS_DPORT_INV_SFP;
6872
6873 if (dport->test_state == BFA_DPORT_ST_INP)
6874 return BFA_STATUS_DEVBUSY;
6875
6876 WARN_ON(dport->test_state != BFA_DPORT_ST_COMP);
6877 }
6878
6879 bfa_trc(dport->bfa, lpcnt);
6880 bfa_trc(dport->bfa, pat);
6881
6882 dport->lpcnt = (lpcnt) ? lpcnt : DPORT_ENABLE_LOOPCNT_DEFAULT;
6883 dport->payload = (pat) ? pat : LB_PATTERN_DEFAULT;
6884
6885 dport->cbfn = cbfn;
6886 dport->cbarg = cbarg;
6887
6888 bfa_sm_send_event(dport, BFA_DPORT_SM_START);
6889 return BFA_STATUS_OK;
6890 }
6891
6892 /*
6893 * Dport show -- return dport test result
6894 *
6895 * @param[in] *bfa - bfa data struct
6896 */
6897 bfa_status_t
bfa_dport_show(struct bfa_s * bfa,struct bfa_diag_dport_result_s * result)6898 bfa_dport_show(struct bfa_s *bfa, struct bfa_diag_dport_result_s *result)
6899 {
6900 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6901 struct bfa_dport_s *dport = &fcdiag->dport;
6902
6903 /*
6904 * Check to see if IOC is down
6905 */
6906 if (!bfa_iocfc_is_operational(bfa))
6907 return BFA_STATUS_IOC_NON_OP;
6908
6909 /*
6910 * Check if dport is busy
6911 */
6912 if (bfa_dport_is_sending_req(dport))
6913 return BFA_STATUS_DEVBUSY;
6914
6915 /*
6916 * Check if dport is in enabled state.
6917 */
6918 if (!bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) {
6919 bfa_trc(dport->bfa, 0);
6920 return BFA_STATUS_DPORT_DISABLED;
6921
6922 }
6923
6924 /*
6925 * Check if there is SFP
6926 */
6927 if (dport->test_state == BFA_DPORT_ST_NO_SFP)
6928 return BFA_STATUS_DPORT_INV_SFP;
6929
6930 memcpy(result, &dport->result, sizeof(struct bfa_diag_dport_result_s));
6931
6932 return BFA_STATUS_OK;
6933 }
6934