• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3  * All rights reserved
4  * www.brocade.com
5  *
6  * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License (GPL) Version 2 as
10  * published by the Free Software Foundation
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  */
17 
18 #include "bfad_drv.h"
19 #include "bfad_im.h"
20 #include "bfa_plog.h"
21 #include "bfa_cs.h"
22 #include "bfa_modules.h"
23 
24 BFA_TRC_FILE(HAL, FCXP);
25 BFA_MODULE(fcdiag);
26 BFA_MODULE(fcxp);
27 BFA_MODULE(sgpg);
28 BFA_MODULE(lps);
29 BFA_MODULE(fcport);
30 BFA_MODULE(rport);
31 BFA_MODULE(uf);
32 
33 /*
34  * LPS related definitions
35  */
36 #define BFA_LPS_MIN_LPORTS      (1)
37 #define BFA_LPS_MAX_LPORTS      (256)
38 
39 /*
40  * Maximum Vports supported per physical port or vf.
41  */
42 #define BFA_LPS_MAX_VPORTS_SUPP_CB  255
43 #define BFA_LPS_MAX_VPORTS_SUPP_CT  190
44 
45 
46 /*
47  * FC PORT related definitions
48  */
49 /*
50  * The port is considered disabled if corresponding physical port or IOC are
51  * disabled explicitly
52  */
53 #define BFA_PORT_IS_DISABLED(bfa) \
54 	((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \
55 	(bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
56 
57 /*
58  * BFA port state machine events
59  */
60 enum bfa_fcport_sm_event {
61 	BFA_FCPORT_SM_START	= 1,	/*  start port state machine	*/
62 	BFA_FCPORT_SM_STOP	= 2,	/*  stop port state machine	*/
63 	BFA_FCPORT_SM_ENABLE	= 3,	/*  enable port		*/
64 	BFA_FCPORT_SM_DISABLE	= 4,	/*  disable port state machine */
65 	BFA_FCPORT_SM_FWRSP	= 5,	/*  firmware enable/disable rsp */
66 	BFA_FCPORT_SM_LINKUP	= 6,	/*  firmware linkup event	*/
67 	BFA_FCPORT_SM_LINKDOWN	= 7,	/*  firmware linkup down	*/
68 	BFA_FCPORT_SM_QRESUME	= 8,	/*  CQ space available	*/
69 	BFA_FCPORT_SM_HWFAIL	= 9,	/*  IOC h/w failure		*/
70 };
71 
72 /*
73  * BFA port link notification state machine events
74  */
75 
76 enum bfa_fcport_ln_sm_event {
77 	BFA_FCPORT_LN_SM_LINKUP		= 1,	/*  linkup event	*/
78 	BFA_FCPORT_LN_SM_LINKDOWN	= 2,	/*  linkdown event	*/
79 	BFA_FCPORT_LN_SM_NOTIFICATION	= 3	/*  done notification	*/
80 };
81 
82 /*
83  * RPORT related definitions
84  */
85 #define bfa_rport_offline_cb(__rp) do {					\
86 	if ((__rp)->bfa->fcs)						\
87 		bfa_cb_rport_offline((__rp)->rport_drv);      \
88 	else {								\
89 		bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe,		\
90 				__bfa_cb_rport_offline, (__rp));      \
91 	}								\
92 } while (0)
93 
94 #define bfa_rport_online_cb(__rp) do {					\
95 	if ((__rp)->bfa->fcs)						\
96 		bfa_cb_rport_online((__rp)->rport_drv);      \
97 	else {								\
98 		bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe,		\
99 				  __bfa_cb_rport_online, (__rp));      \
100 		}							\
101 } while (0)
102 
103 /*
104  * forward declarations FCXP related functions
105  */
106 static void	__bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
107 static void	hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
108 				struct bfi_fcxp_send_rsp_s *fcxp_rsp);
109 static void	hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen,
110 				struct bfa_fcxp_s *fcxp, struct fchs_s *fchs);
111 static void	bfa_fcxp_qresume(void *cbarg);
112 static void	bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
113 				struct bfi_fcxp_send_req_s *send_req);
114 
115 /*
116  * forward declarations for LPS functions
117  */
118 static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg,
119 		struct bfa_meminfo_s *minfo, struct bfa_s *bfa);
120 static void bfa_lps_attach(struct bfa_s *bfa, void *bfad,
121 				struct bfa_iocfc_cfg_s *cfg,
122 				struct bfa_pcidev_s *pcidev);
123 static void bfa_lps_detach(struct bfa_s *bfa);
124 static void bfa_lps_start(struct bfa_s *bfa);
125 static void bfa_lps_stop(struct bfa_s *bfa);
126 static void bfa_lps_iocdisable(struct bfa_s *bfa);
127 static void bfa_lps_login_rsp(struct bfa_s *bfa,
128 				struct bfi_lps_login_rsp_s *rsp);
129 static void bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count);
130 static void bfa_lps_logout_rsp(struct bfa_s *bfa,
131 				struct bfi_lps_logout_rsp_s *rsp);
132 static void bfa_lps_reqq_resume(void *lps_arg);
133 static void bfa_lps_free(struct bfa_lps_s *lps);
134 static void bfa_lps_send_login(struct bfa_lps_s *lps);
135 static void bfa_lps_send_logout(struct bfa_lps_s *lps);
136 static void bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps);
137 static void bfa_lps_login_comp(struct bfa_lps_s *lps);
138 static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
139 static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
140 
141 /*
142  * forward declaration for LPS state machine
143  */
144 static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
145 static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event);
146 static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event
147 					event);
148 static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event);
149 static void bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps,
150 					enum bfa_lps_event event);
151 static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event);
152 static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event
153 					event);
154 
155 /*
156  * forward declaration for FC Port functions
157  */
158 static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
159 static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport);
160 static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport);
161 static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport);
162 static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport);
163 static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete);
164 static void bfa_fcport_scn(struct bfa_fcport_s *fcport,
165 			enum bfa_port_linkstate event, bfa_boolean_t trunk);
166 static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln,
167 				enum bfa_port_linkstate event);
168 static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete);
169 static void bfa_fcport_stats_get_timeout(void *cbarg);
170 static void bfa_fcport_stats_clr_timeout(void *cbarg);
171 static void bfa_trunk_iocdisable(struct bfa_s *bfa);
172 
173 /*
174  * forward declaration for FC PORT state machine
175  */
176 static void     bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
177 					enum bfa_fcport_sm_event event);
178 static void     bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
179 					enum bfa_fcport_sm_event event);
180 static void     bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
181 					enum bfa_fcport_sm_event event);
182 static void     bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
183 					enum bfa_fcport_sm_event event);
184 static void     bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
185 					enum bfa_fcport_sm_event event);
186 static void     bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
187 					enum bfa_fcport_sm_event event);
188 static void     bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
189 					enum bfa_fcport_sm_event event);
190 static void     bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
191 					enum bfa_fcport_sm_event event);
192 static void     bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
193 					enum bfa_fcport_sm_event event);
194 static void     bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
195 					enum bfa_fcport_sm_event event);
196 static void     bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
197 					enum bfa_fcport_sm_event event);
198 static void     bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
199 					enum bfa_fcport_sm_event event);
200 
201 static void     bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
202 					enum bfa_fcport_ln_sm_event event);
203 static void     bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
204 					enum bfa_fcport_ln_sm_event event);
205 static void     bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
206 					enum bfa_fcport_ln_sm_event event);
207 static void     bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
208 					enum bfa_fcport_ln_sm_event event);
209 static void     bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
210 					enum bfa_fcport_ln_sm_event event);
211 static void     bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
212 					enum bfa_fcport_ln_sm_event event);
213 static void     bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
214 					enum bfa_fcport_ln_sm_event event);
215 
216 static struct bfa_sm_table_s hal_port_sm_table[] = {
217 	{BFA_SM(bfa_fcport_sm_uninit), BFA_PORT_ST_UNINIT},
218 	{BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PORT_ST_ENABLING_QWAIT},
219 	{BFA_SM(bfa_fcport_sm_enabling), BFA_PORT_ST_ENABLING},
220 	{BFA_SM(bfa_fcport_sm_linkdown), BFA_PORT_ST_LINKDOWN},
221 	{BFA_SM(bfa_fcport_sm_linkup), BFA_PORT_ST_LINKUP},
222 	{BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PORT_ST_DISABLING_QWAIT},
223 	{BFA_SM(bfa_fcport_sm_toggling_qwait), BFA_PORT_ST_TOGGLING_QWAIT},
224 	{BFA_SM(bfa_fcport_sm_disabling), BFA_PORT_ST_DISABLING},
225 	{BFA_SM(bfa_fcport_sm_disabled), BFA_PORT_ST_DISABLED},
226 	{BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED},
227 	{BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN},
228 	{BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN},
229 };
230 
231 
232 /*
233  * forward declaration for RPORT related functions
234  */
235 static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
236 static void		bfa_rport_free(struct bfa_rport_s *rport);
237 static bfa_boolean_t	bfa_rport_send_fwcreate(struct bfa_rport_s *rp);
238 static bfa_boolean_t	bfa_rport_send_fwdelete(struct bfa_rport_s *rp);
239 static bfa_boolean_t	bfa_rport_send_fwspeed(struct bfa_rport_s *rp);
240 static void		__bfa_cb_rport_online(void *cbarg,
241 						bfa_boolean_t complete);
242 static void		__bfa_cb_rport_offline(void *cbarg,
243 						bfa_boolean_t complete);
244 
245 /*
246  * forward declaration for RPORT state machine
247  */
248 static void     bfa_rport_sm_uninit(struct bfa_rport_s *rp,
249 					enum bfa_rport_event event);
250 static void     bfa_rport_sm_created(struct bfa_rport_s *rp,
251 					enum bfa_rport_event event);
252 static void     bfa_rport_sm_fwcreate(struct bfa_rport_s *rp,
253 					enum bfa_rport_event event);
254 static void     bfa_rport_sm_online(struct bfa_rport_s *rp,
255 					enum bfa_rport_event event);
256 static void     bfa_rport_sm_fwdelete(struct bfa_rport_s *rp,
257 					enum bfa_rport_event event);
258 static void     bfa_rport_sm_offline(struct bfa_rport_s *rp,
259 					enum bfa_rport_event event);
260 static void     bfa_rport_sm_deleting(struct bfa_rport_s *rp,
261 					enum bfa_rport_event event);
262 static void     bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
263 					enum bfa_rport_event event);
264 static void     bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
265 					enum bfa_rport_event event);
266 static void     bfa_rport_sm_iocdisable(struct bfa_rport_s *rp,
267 					enum bfa_rport_event event);
268 static void     bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp,
269 					enum bfa_rport_event event);
270 static void     bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp,
271 					enum bfa_rport_event event);
272 static void     bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
273 					enum bfa_rport_event event);
274 
275 /*
276  * PLOG related definitions
277  */
278 static int
plkd_validate_logrec(struct bfa_plog_rec_s * pl_rec)279 plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec)
280 {
281 	if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
282 		(pl_rec->log_type != BFA_PL_LOG_TYPE_STRING))
283 		return 1;
284 
285 	if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
286 		(pl_rec->log_num_ints > BFA_PL_INT_LOG_SZ))
287 		return 1;
288 
289 	return 0;
290 }
291 
292 static u64
bfa_get_log_time(void)293 bfa_get_log_time(void)
294 {
295 	u64 system_time = 0;
296 	struct timeval tv;
297 	do_gettimeofday(&tv);
298 
299 	/* We are interested in seconds only. */
300 	system_time = tv.tv_sec;
301 	return system_time;
302 }
303 
304 static void
bfa_plog_add(struct bfa_plog_s * plog,struct bfa_plog_rec_s * pl_rec)305 bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
306 {
307 	u16 tail;
308 	struct bfa_plog_rec_s *pl_recp;
309 
310 	if (plog->plog_enabled == 0)
311 		return;
312 
313 	if (plkd_validate_logrec(pl_rec)) {
314 		WARN_ON(1);
315 		return;
316 	}
317 
318 	tail = plog->tail;
319 
320 	pl_recp = &(plog->plog_recs[tail]);
321 
322 	memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
323 
324 	pl_recp->tv = bfa_get_log_time();
325 	BFA_PL_LOG_REC_INCR(plog->tail);
326 
327 	if (plog->head == plog->tail)
328 		BFA_PL_LOG_REC_INCR(plog->head);
329 }
330 
331 void
bfa_plog_init(struct bfa_plog_s * plog)332 bfa_plog_init(struct bfa_plog_s *plog)
333 {
334 	memset((char *)plog, 0, sizeof(struct bfa_plog_s));
335 
336 	memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN);
337 	plog->head = plog->tail = 0;
338 	plog->plog_enabled = 1;
339 }
340 
341 void
bfa_plog_str(struct bfa_plog_s * plog,enum bfa_plog_mid mid,enum bfa_plog_eid event,u16 misc,char * log_str)342 bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
343 		enum bfa_plog_eid event,
344 		u16 misc, char *log_str)
345 {
346 	struct bfa_plog_rec_s  lp;
347 
348 	if (plog->plog_enabled) {
349 		memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
350 		lp.mid = mid;
351 		lp.eid = event;
352 		lp.log_type = BFA_PL_LOG_TYPE_STRING;
353 		lp.misc = misc;
354 		strncpy(lp.log_entry.string_log, log_str,
355 			BFA_PL_STRING_LOG_SZ - 1);
356 		lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
357 		bfa_plog_add(plog, &lp);
358 	}
359 }
360 
361 void
bfa_plog_intarr(struct bfa_plog_s * plog,enum bfa_plog_mid mid,enum bfa_plog_eid event,u16 misc,u32 * intarr,u32 num_ints)362 bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
363 		enum bfa_plog_eid event,
364 		u16 misc, u32 *intarr, u32 num_ints)
365 {
366 	struct bfa_plog_rec_s  lp;
367 	u32 i;
368 
369 	if (num_ints > BFA_PL_INT_LOG_SZ)
370 		num_ints = BFA_PL_INT_LOG_SZ;
371 
372 	if (plog->plog_enabled) {
373 		memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
374 		lp.mid = mid;
375 		lp.eid = event;
376 		lp.log_type = BFA_PL_LOG_TYPE_INT;
377 		lp.misc = misc;
378 
379 		for (i = 0; i < num_ints; i++)
380 			lp.log_entry.int_log[i] = intarr[i];
381 
382 		lp.log_num_ints = (u8) num_ints;
383 
384 		bfa_plog_add(plog, &lp);
385 	}
386 }
387 
388 void
bfa_plog_fchdr(struct bfa_plog_s * plog,enum bfa_plog_mid mid,enum bfa_plog_eid event,u16 misc,struct fchs_s * fchdr)389 bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
390 			enum bfa_plog_eid event,
391 			u16 misc, struct fchs_s *fchdr)
392 {
393 	struct bfa_plog_rec_s  lp;
394 	u32	*tmp_int = (u32 *) fchdr;
395 	u32	ints[BFA_PL_INT_LOG_SZ];
396 
397 	if (plog->plog_enabled) {
398 		memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
399 
400 		ints[0] = tmp_int[0];
401 		ints[1] = tmp_int[1];
402 		ints[2] = tmp_int[4];
403 
404 		bfa_plog_intarr(plog, mid, event, misc, ints, 3);
405 	}
406 }
407 
408 void
bfa_plog_fchdr_and_pl(struct bfa_plog_s * plog,enum bfa_plog_mid mid,enum bfa_plog_eid event,u16 misc,struct fchs_s * fchdr,u32 pld_w0)409 bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
410 		      enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr,
411 		      u32 pld_w0)
412 {
413 	struct bfa_plog_rec_s  lp;
414 	u32	*tmp_int = (u32 *) fchdr;
415 	u32	ints[BFA_PL_INT_LOG_SZ];
416 
417 	if (plog->plog_enabled) {
418 		memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
419 
420 		ints[0] = tmp_int[0];
421 		ints[1] = tmp_int[1];
422 		ints[2] = tmp_int[4];
423 		ints[3] = pld_w0;
424 
425 		bfa_plog_intarr(plog, mid, event, misc, ints, 4);
426 	}
427 }
428 
429 
430 /*
431  *  fcxp_pvt BFA FCXP private functions
432  */
433 
434 static void
claim_fcxps_mem(struct bfa_fcxp_mod_s * mod)435 claim_fcxps_mem(struct bfa_fcxp_mod_s *mod)
436 {
437 	u16	i;
438 	struct bfa_fcxp_s *fcxp;
439 
440 	fcxp = (struct bfa_fcxp_s *) bfa_mem_kva_curp(mod);
441 	memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
442 
443 	INIT_LIST_HEAD(&mod->fcxp_free_q);
444 	INIT_LIST_HEAD(&mod->fcxp_active_q);
445 	INIT_LIST_HEAD(&mod->fcxp_unused_q);
446 
447 	mod->fcxp_list = fcxp;
448 
449 	for (i = 0; i < mod->num_fcxps; i++) {
450 		fcxp->fcxp_mod = mod;
451 		fcxp->fcxp_tag = i;
452 
453 		list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
454 		bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp);
455 		fcxp->reqq_waiting = BFA_FALSE;
456 
457 		fcxp = fcxp + 1;
458 	}
459 
460 	bfa_mem_kva_curp(mod) = (void *)fcxp;
461 }
462 
463 static void
bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s * cfg,struct bfa_meminfo_s * minfo,struct bfa_s * bfa)464 bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
465 		struct bfa_s *bfa)
466 {
467 	struct bfa_fcxp_mod_s *fcxp_mod = BFA_FCXP_MOD(bfa);
468 	struct bfa_mem_kva_s *fcxp_kva = BFA_MEM_FCXP_KVA(bfa);
469 	struct bfa_mem_dma_s *seg_ptr;
470 	u16	nsegs, idx, per_seg_fcxp;
471 	u16	num_fcxps = cfg->fwcfg.num_fcxp_reqs;
472 	u32	per_fcxp_sz;
473 
474 	if (num_fcxps == 0)
475 		return;
476 
477 	if (cfg->drvcfg.min_cfg)
478 		per_fcxp_sz = 2 * BFA_FCXP_MAX_IBUF_SZ;
479 	else
480 		per_fcxp_sz = BFA_FCXP_MAX_IBUF_SZ + BFA_FCXP_MAX_LBUF_SZ;
481 
482 	/* dma memory */
483 	nsegs = BFI_MEM_DMA_NSEGS(num_fcxps, per_fcxp_sz);
484 	per_seg_fcxp = BFI_MEM_NREQS_SEG(per_fcxp_sz);
485 
486 	bfa_mem_dma_seg_iter(fcxp_mod, seg_ptr, nsegs, idx) {
487 		if (num_fcxps >= per_seg_fcxp) {
488 			num_fcxps -= per_seg_fcxp;
489 			bfa_mem_dma_setup(minfo, seg_ptr,
490 				per_seg_fcxp * per_fcxp_sz);
491 		} else
492 			bfa_mem_dma_setup(minfo, seg_ptr,
493 				num_fcxps * per_fcxp_sz);
494 	}
495 
496 	/* kva memory */
497 	bfa_mem_kva_setup(minfo, fcxp_kva,
498 		cfg->fwcfg.num_fcxp_reqs * sizeof(struct bfa_fcxp_s));
499 }
500 
501 static void
bfa_fcxp_attach(struct bfa_s * bfa,void * bfad,struct bfa_iocfc_cfg_s * cfg,struct bfa_pcidev_s * pcidev)502 bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
503 		struct bfa_pcidev_s *pcidev)
504 {
505 	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
506 
507 	mod->bfa = bfa;
508 	mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
509 
510 	/*
511 	 * Initialize FCXP request and response payload sizes.
512 	 */
513 	mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
514 	if (!cfg->drvcfg.min_cfg)
515 		mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ;
516 
517 	INIT_LIST_HEAD(&mod->wait_q);
518 
519 	claim_fcxps_mem(mod);
520 }
521 
522 static void
bfa_fcxp_detach(struct bfa_s * bfa)523 bfa_fcxp_detach(struct bfa_s *bfa)
524 {
525 }
526 
527 static void
bfa_fcxp_start(struct bfa_s * bfa)528 bfa_fcxp_start(struct bfa_s *bfa)
529 {
530 }
531 
532 static void
bfa_fcxp_stop(struct bfa_s * bfa)533 bfa_fcxp_stop(struct bfa_s *bfa)
534 {
535 }
536 
537 static void
bfa_fcxp_iocdisable(struct bfa_s * bfa)538 bfa_fcxp_iocdisable(struct bfa_s *bfa)
539 {
540 	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
541 	struct bfa_fcxp_s *fcxp;
542 	struct list_head	      *qe, *qen;
543 
544 	/* Enqueue unused fcxp resources to free_q */
545 	list_splice_tail_init(&mod->fcxp_unused_q, &mod->fcxp_free_q);
546 
547 	list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
548 		fcxp = (struct bfa_fcxp_s *) qe;
549 		if (fcxp->caller == NULL) {
550 			fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
551 					BFA_STATUS_IOC_FAILURE, 0, 0, NULL);
552 			bfa_fcxp_free(fcxp);
553 		} else {
554 			fcxp->rsp_status = BFA_STATUS_IOC_FAILURE;
555 			bfa_cb_queue(bfa, &fcxp->hcb_qe,
556 				     __bfa_fcxp_send_cbfn, fcxp);
557 		}
558 	}
559 }
560 
561 static struct bfa_fcxp_s *
bfa_fcxp_get(struct bfa_fcxp_mod_s * fm)562 bfa_fcxp_get(struct bfa_fcxp_mod_s *fm)
563 {
564 	struct bfa_fcxp_s *fcxp;
565 
566 	bfa_q_deq(&fm->fcxp_free_q, &fcxp);
567 
568 	if (fcxp)
569 		list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
570 
571 	return fcxp;
572 }
573 
574 static void
bfa_fcxp_init_reqrsp(struct bfa_fcxp_s * fcxp,struct bfa_s * bfa,u8 * use_ibuf,u32 * nr_sgles,bfa_fcxp_get_sgaddr_t * r_sga_cbfn,bfa_fcxp_get_sglen_t * r_sglen_cbfn,struct list_head * r_sgpg_q,int n_sgles,bfa_fcxp_get_sgaddr_t sga_cbfn,bfa_fcxp_get_sglen_t sglen_cbfn)575 bfa_fcxp_init_reqrsp(struct bfa_fcxp_s *fcxp,
576 	       struct bfa_s *bfa,
577 	       u8 *use_ibuf,
578 	       u32 *nr_sgles,
579 	       bfa_fcxp_get_sgaddr_t *r_sga_cbfn,
580 	       bfa_fcxp_get_sglen_t *r_sglen_cbfn,
581 	       struct list_head *r_sgpg_q,
582 	       int n_sgles,
583 	       bfa_fcxp_get_sgaddr_t sga_cbfn,
584 	       bfa_fcxp_get_sglen_t sglen_cbfn)
585 {
586 
587 	WARN_ON(bfa == NULL);
588 
589 	bfa_trc(bfa, fcxp->fcxp_tag);
590 
591 	if (n_sgles == 0) {
592 		*use_ibuf = 1;
593 	} else {
594 		WARN_ON(*sga_cbfn == NULL);
595 		WARN_ON(*sglen_cbfn == NULL);
596 
597 		*use_ibuf = 0;
598 		*r_sga_cbfn = sga_cbfn;
599 		*r_sglen_cbfn = sglen_cbfn;
600 
601 		*nr_sgles = n_sgles;
602 
603 		/*
604 		 * alloc required sgpgs
605 		 */
606 		if (n_sgles > BFI_SGE_INLINE)
607 			WARN_ON(1);
608 	}
609 
610 }
611 
612 static void
bfa_fcxp_init(struct bfa_fcxp_s * fcxp,void * caller,struct bfa_s * bfa,int nreq_sgles,int nrsp_sgles,bfa_fcxp_get_sgaddr_t req_sga_cbfn,bfa_fcxp_get_sglen_t req_sglen_cbfn,bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,bfa_fcxp_get_sglen_t rsp_sglen_cbfn)613 bfa_fcxp_init(struct bfa_fcxp_s *fcxp,
614 	       void *caller, struct bfa_s *bfa, int nreq_sgles,
615 	       int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
616 	       bfa_fcxp_get_sglen_t req_sglen_cbfn,
617 	       bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
618 	       bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
619 {
620 
621 	WARN_ON(bfa == NULL);
622 
623 	bfa_trc(bfa, fcxp->fcxp_tag);
624 
625 	fcxp->caller = caller;
626 
627 	bfa_fcxp_init_reqrsp(fcxp, bfa,
628 		&fcxp->use_ireqbuf, &fcxp->nreq_sgles, &fcxp->req_sga_cbfn,
629 		&fcxp->req_sglen_cbfn, &fcxp->req_sgpg_q,
630 		nreq_sgles, req_sga_cbfn, req_sglen_cbfn);
631 
632 	bfa_fcxp_init_reqrsp(fcxp, bfa,
633 		&fcxp->use_irspbuf, &fcxp->nrsp_sgles, &fcxp->rsp_sga_cbfn,
634 		&fcxp->rsp_sglen_cbfn, &fcxp->rsp_sgpg_q,
635 		nrsp_sgles, rsp_sga_cbfn, rsp_sglen_cbfn);
636 
637 }
638 
639 static void
bfa_fcxp_put(struct bfa_fcxp_s * fcxp)640 bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
641 {
642 	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
643 	struct bfa_fcxp_wqe_s *wqe;
644 
645 	bfa_q_deq(&mod->wait_q, &wqe);
646 	if (wqe) {
647 		bfa_trc(mod->bfa, fcxp->fcxp_tag);
648 
649 		bfa_fcxp_init(fcxp, wqe->caller, wqe->bfa, wqe->nreq_sgles,
650 			wqe->nrsp_sgles, wqe->req_sga_cbfn,
651 			wqe->req_sglen_cbfn, wqe->rsp_sga_cbfn,
652 			wqe->rsp_sglen_cbfn);
653 
654 		wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp);
655 		return;
656 	}
657 
658 	WARN_ON(!bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
659 	list_del(&fcxp->qe);
660 	list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
661 }
662 
663 static void
bfa_fcxp_null_comp(void * bfad_fcxp,struct bfa_fcxp_s * fcxp,void * cbarg,bfa_status_t req_status,u32 rsp_len,u32 resid_len,struct fchs_s * rsp_fchs)664 bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
665 		   bfa_status_t req_status, u32 rsp_len,
666 		   u32 resid_len, struct fchs_s *rsp_fchs)
667 {
668 	/* discarded fcxp completion */
669 }
670 
671 static void
__bfa_fcxp_send_cbfn(void * cbarg,bfa_boolean_t complete)672 __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete)
673 {
674 	struct bfa_fcxp_s *fcxp = cbarg;
675 
676 	if (complete) {
677 		fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
678 				fcxp->rsp_status, fcxp->rsp_len,
679 				fcxp->residue_len, &fcxp->rsp_fchs);
680 	} else {
681 		bfa_fcxp_free(fcxp);
682 	}
683 }
684 
685 static void
hal_fcxp_send_comp(struct bfa_s * bfa,struct bfi_fcxp_send_rsp_s * fcxp_rsp)686 hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
687 {
688 	struct bfa_fcxp_mod_s	*mod = BFA_FCXP_MOD(bfa);
689 	struct bfa_fcxp_s	*fcxp;
690 	u16		fcxp_tag = be16_to_cpu(fcxp_rsp->fcxp_tag);
691 
692 	bfa_trc(bfa, fcxp_tag);
693 
694 	fcxp_rsp->rsp_len = be32_to_cpu(fcxp_rsp->rsp_len);
695 
696 	/*
697 	 * @todo f/w should not set residue to non-0 when everything
698 	 *	 is received.
699 	 */
700 	if (fcxp_rsp->req_status == BFA_STATUS_OK)
701 		fcxp_rsp->residue_len = 0;
702 	else
703 		fcxp_rsp->residue_len = be32_to_cpu(fcxp_rsp->residue_len);
704 
705 	fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
706 
707 	WARN_ON(fcxp->send_cbfn == NULL);
708 
709 	hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp);
710 
711 	if (fcxp->send_cbfn != NULL) {
712 		bfa_trc(mod->bfa, (NULL == fcxp->caller));
713 		if (fcxp->caller == NULL) {
714 			fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
715 					fcxp_rsp->req_status, fcxp_rsp->rsp_len,
716 					fcxp_rsp->residue_len, &fcxp_rsp->fchs);
717 			/*
718 			 * fcxp automatically freed on return from the callback
719 			 */
720 			bfa_fcxp_free(fcxp);
721 		} else {
722 			fcxp->rsp_status = fcxp_rsp->req_status;
723 			fcxp->rsp_len = fcxp_rsp->rsp_len;
724 			fcxp->residue_len = fcxp_rsp->residue_len;
725 			fcxp->rsp_fchs = fcxp_rsp->fchs;
726 
727 			bfa_cb_queue(bfa, &fcxp->hcb_qe,
728 					__bfa_fcxp_send_cbfn, fcxp);
729 		}
730 	} else {
731 		bfa_trc(bfa, (NULL == fcxp->send_cbfn));
732 	}
733 }
734 
735 static void
hal_fcxp_tx_plog(struct bfa_s * bfa,u32 reqlen,struct bfa_fcxp_s * fcxp,struct fchs_s * fchs)736 hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp,
737 		 struct fchs_s *fchs)
738 {
739 	/*
740 	 * TODO: TX ox_id
741 	 */
742 	if (reqlen > 0) {
743 		if (fcxp->use_ireqbuf) {
744 			u32	pld_w0 =
745 				*((u32 *) BFA_FCXP_REQ_PLD(fcxp));
746 
747 			bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
748 					BFA_PL_EID_TX,
749 					reqlen + sizeof(struct fchs_s), fchs,
750 					pld_w0);
751 		} else {
752 			bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
753 					BFA_PL_EID_TX,
754 					reqlen + sizeof(struct fchs_s),
755 					fchs);
756 		}
757 	} else {
758 		bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX,
759 			       reqlen + sizeof(struct fchs_s), fchs);
760 	}
761 }
762 
763 static void
hal_fcxp_rx_plog(struct bfa_s * bfa,struct bfa_fcxp_s * fcxp,struct bfi_fcxp_send_rsp_s * fcxp_rsp)764 hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
765 		 struct bfi_fcxp_send_rsp_s *fcxp_rsp)
766 {
767 	if (fcxp_rsp->rsp_len > 0) {
768 		if (fcxp->use_irspbuf) {
769 			u32	pld_w0 =
770 				*((u32 *) BFA_FCXP_RSP_PLD(fcxp));
771 
772 			bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
773 					      BFA_PL_EID_RX,
774 					      (u16) fcxp_rsp->rsp_len,
775 					      &fcxp_rsp->fchs, pld_w0);
776 		} else {
777 			bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
778 				       BFA_PL_EID_RX,
779 				       (u16) fcxp_rsp->rsp_len,
780 				       &fcxp_rsp->fchs);
781 		}
782 	} else {
783 		bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX,
784 			       (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs);
785 	}
786 }
787 
788 /*
789  * Handler to resume sending fcxp when space in available in cpe queue.
790  */
791 static void
bfa_fcxp_qresume(void * cbarg)792 bfa_fcxp_qresume(void *cbarg)
793 {
794 	struct bfa_fcxp_s		*fcxp = cbarg;
795 	struct bfa_s			*bfa = fcxp->fcxp_mod->bfa;
796 	struct bfi_fcxp_send_req_s	*send_req;
797 
798 	fcxp->reqq_waiting = BFA_FALSE;
799 	send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
800 	bfa_fcxp_queue(fcxp, send_req);
801 }
802 
803 /*
804  * Queue fcxp send request to foimrware.
805  */
806 static void
bfa_fcxp_queue(struct bfa_fcxp_s * fcxp,struct bfi_fcxp_send_req_s * send_req)807 bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
808 {
809 	struct bfa_s			*bfa = fcxp->fcxp_mod->bfa;
810 	struct bfa_fcxp_req_info_s	*reqi = &fcxp->req_info;
811 	struct bfa_fcxp_rsp_info_s	*rspi = &fcxp->rsp_info;
812 	struct bfa_rport_s		*rport = reqi->bfa_rport;
813 
814 	bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
815 		    bfa_fn_lpu(bfa));
816 
817 	send_req->fcxp_tag = cpu_to_be16(fcxp->fcxp_tag);
818 	if (rport) {
819 		send_req->rport_fw_hndl = rport->fw_handle;
820 		send_req->max_frmsz = cpu_to_be16(rport->rport_info.max_frmsz);
821 		if (send_req->max_frmsz == 0)
822 			send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
823 	} else {
824 		send_req->rport_fw_hndl = 0;
825 		send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
826 	}
827 
828 	send_req->vf_id = cpu_to_be16(reqi->vf_id);
829 	send_req->lp_fwtag = bfa_lps_get_fwtag(bfa, reqi->lp_tag);
830 	send_req->class = reqi->class;
831 	send_req->rsp_timeout = rspi->rsp_timeout;
832 	send_req->cts = reqi->cts;
833 	send_req->fchs = reqi->fchs;
834 
835 	send_req->req_len = cpu_to_be32(reqi->req_tot_len);
836 	send_req->rsp_maxlen = cpu_to_be32(rspi->rsp_maxlen);
837 
838 	/*
839 	 * setup req sgles
840 	 */
841 	if (fcxp->use_ireqbuf == 1) {
842 		bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
843 					BFA_FCXP_REQ_PLD_PA(fcxp));
844 	} else {
845 		if (fcxp->nreq_sgles > 0) {
846 			WARN_ON(fcxp->nreq_sgles != 1);
847 			bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
848 				fcxp->req_sga_cbfn(fcxp->caller, 0));
849 		} else {
850 			WARN_ON(reqi->req_tot_len != 0);
851 			bfa_alen_set(&send_req->rsp_alen, 0, 0);
852 		}
853 	}
854 
855 	/*
856 	 * setup rsp sgles
857 	 */
858 	if (fcxp->use_irspbuf == 1) {
859 		WARN_ON(rspi->rsp_maxlen > BFA_FCXP_MAX_LBUF_SZ);
860 
861 		bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
862 					BFA_FCXP_RSP_PLD_PA(fcxp));
863 	} else {
864 		if (fcxp->nrsp_sgles > 0) {
865 			WARN_ON(fcxp->nrsp_sgles != 1);
866 			bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
867 				fcxp->rsp_sga_cbfn(fcxp->caller, 0));
868 
869 		} else {
870 			WARN_ON(rspi->rsp_maxlen != 0);
871 			bfa_alen_set(&send_req->rsp_alen, 0, 0);
872 		}
873 	}
874 
875 	hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs);
876 
877 	bfa_reqq_produce(bfa, BFA_REQQ_FCXP, send_req->mh);
878 
879 	bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP));
880 	bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
881 }
882 
883 /*
884  * Allocate an FCXP instance to send a response or to send a request
885  * that has a response. Request/response buffers are allocated by caller.
886  *
887  * @param[in]	bfa		BFA bfa instance
888  * @param[in]	nreq_sgles	Number of SG elements required for request
889  *				buffer. 0, if fcxp internal buffers are	used.
890  *				Use bfa_fcxp_get_reqbuf() to get the
891  *				internal req buffer.
892  * @param[in]	req_sgles	SG elements describing request buffer. Will be
893  *				copied in by BFA and hence can be freed on
894  *				return from this function.
895  * @param[in]	get_req_sga	function ptr to be called to get a request SG
896  *				Address (given the sge index).
897  * @param[in]	get_req_sglen	function ptr to be called to get a request SG
898  *				len (given the sge index).
899  * @param[in]	get_rsp_sga	function ptr to be called to get a response SG
900  *				Address (given the sge index).
901  * @param[in]	get_rsp_sglen	function ptr to be called to get a response SG
902  *				len (given the sge index).
903  *
904  * @return FCXP instance. NULL on failure.
905  */
906 struct bfa_fcxp_s *
bfa_fcxp_alloc(void * caller,struct bfa_s * bfa,int nreq_sgles,int nrsp_sgles,bfa_fcxp_get_sgaddr_t req_sga_cbfn,bfa_fcxp_get_sglen_t req_sglen_cbfn,bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,bfa_fcxp_get_sglen_t rsp_sglen_cbfn)907 bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
908 	       int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
909 	       bfa_fcxp_get_sglen_t req_sglen_cbfn,
910 	       bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
911 	       bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
912 {
913 	struct bfa_fcxp_s *fcxp = NULL;
914 
915 	WARN_ON(bfa == NULL);
916 
917 	fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa));
918 	if (fcxp == NULL)
919 		return NULL;
920 
921 	bfa_trc(bfa, fcxp->fcxp_tag);
922 
923 	bfa_fcxp_init(fcxp, caller, bfa, nreq_sgles, nrsp_sgles, req_sga_cbfn,
924 			req_sglen_cbfn, rsp_sga_cbfn, rsp_sglen_cbfn);
925 
926 	return fcxp;
927 }
928 
929 /*
930  * Get the internal request buffer pointer
931  *
932  * @param[in]	fcxp	BFA fcxp pointer
933  *
934  * @return		pointer to the internal request buffer
935  */
936 void *
bfa_fcxp_get_reqbuf(struct bfa_fcxp_s * fcxp)937 bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp)
938 {
939 	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
940 	void	*reqbuf;
941 
942 	WARN_ON(fcxp->use_ireqbuf != 1);
943 	reqbuf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
944 				mod->req_pld_sz + mod->rsp_pld_sz);
945 	return reqbuf;
946 }
947 
948 u32
bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s * fcxp)949 bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp)
950 {
951 	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
952 
953 	return mod->req_pld_sz;
954 }
955 
956 /*
957  * Get the internal response buffer pointer
958  *
959  * @param[in]	fcxp	BFA fcxp pointer
960  *
961  * @return		pointer to the internal request buffer
962  */
963 void *
bfa_fcxp_get_rspbuf(struct bfa_fcxp_s * fcxp)964 bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
965 {
966 	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
967 	void	*fcxp_buf;
968 
969 	WARN_ON(fcxp->use_irspbuf != 1);
970 
971 	fcxp_buf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
972 				mod->req_pld_sz + mod->rsp_pld_sz);
973 
974 	/* fcxp_buf = req_buf + rsp_buf :- add req_buf_sz to get to rsp_buf */
975 	return ((u8 *) fcxp_buf) + mod->req_pld_sz;
976 }
977 
978 /*
979  * Free the BFA FCXP
980  *
981  * @param[in]	fcxp			BFA fcxp pointer
982  *
983  * @return		void
984  */
985 void
bfa_fcxp_free(struct bfa_fcxp_s * fcxp)986 bfa_fcxp_free(struct bfa_fcxp_s *fcxp)
987 {
988 	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
989 
990 	WARN_ON(fcxp == NULL);
991 	bfa_trc(mod->bfa, fcxp->fcxp_tag);
992 	bfa_fcxp_put(fcxp);
993 }
994 
995 /*
996  * Send a FCXP request
997  *
998  * @param[in]	fcxp	BFA fcxp pointer
999  * @param[in]	rport	BFA rport pointer. Could be left NULL for WKA rports
1000  * @param[in]	vf_id	virtual Fabric ID
1001  * @param[in]	lp_tag	lport tag
1002  * @param[in]	cts	use Continuous sequence
1003  * @param[in]	cos	fc Class of Service
1004  * @param[in]	reqlen	request length, does not include FCHS length
1005  * @param[in]	fchs	fc Header Pointer. The header content will be copied
1006  *			in by BFA.
1007  *
1008  * @param[in]	cbfn	call back function to be called on receiving
1009  *								the response
1010  * @param[in]	cbarg	arg for cbfn
1011  * @param[in]	rsp_timeout
1012  *			response timeout
1013  *
1014  * @return		bfa_status_t
1015  */
1016 void
bfa_fcxp_send(struct bfa_fcxp_s * fcxp,struct bfa_rport_s * rport,u16 vf_id,u8 lp_tag,bfa_boolean_t cts,enum fc_cos cos,u32 reqlen,struct fchs_s * fchs,bfa_cb_fcxp_send_t cbfn,void * cbarg,u32 rsp_maxlen,u8 rsp_timeout)1017 bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
1018 	      u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos,
1019 	      u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn,
1020 	      void *cbarg, u32 rsp_maxlen, u8 rsp_timeout)
1021 {
1022 	struct bfa_s			*bfa  = fcxp->fcxp_mod->bfa;
1023 	struct bfa_fcxp_req_info_s	*reqi = &fcxp->req_info;
1024 	struct bfa_fcxp_rsp_info_s	*rspi = &fcxp->rsp_info;
1025 	struct bfi_fcxp_send_req_s	*send_req;
1026 
1027 	bfa_trc(bfa, fcxp->fcxp_tag);
1028 
1029 	/*
1030 	 * setup request/response info
1031 	 */
1032 	reqi->bfa_rport = rport;
1033 	reqi->vf_id = vf_id;
1034 	reqi->lp_tag = lp_tag;
1035 	reqi->class = cos;
1036 	rspi->rsp_timeout = rsp_timeout;
1037 	reqi->cts = cts;
1038 	reqi->fchs = *fchs;
1039 	reqi->req_tot_len = reqlen;
1040 	rspi->rsp_maxlen = rsp_maxlen;
1041 	fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
1042 	fcxp->send_cbarg = cbarg;
1043 
1044 	/*
1045 	 * If no room in CPE queue, wait for space in request queue
1046 	 */
1047 	send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
1048 	if (!send_req) {
1049 		bfa_trc(bfa, fcxp->fcxp_tag);
1050 		fcxp->reqq_waiting = BFA_TRUE;
1051 		bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe);
1052 		return;
1053 	}
1054 
1055 	bfa_fcxp_queue(fcxp, send_req);
1056 }
1057 
1058 /*
1059  * Abort a BFA FCXP
1060  *
1061  * @param[in]	fcxp	BFA fcxp pointer
1062  *
1063  * @return		void
1064  */
1065 bfa_status_t
bfa_fcxp_abort(struct bfa_fcxp_s * fcxp)1066 bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
1067 {
1068 	bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag);
1069 	WARN_ON(1);
1070 	return BFA_STATUS_OK;
1071 }
1072 
1073 void
bfa_fcxp_alloc_wait(struct bfa_s * bfa,struct bfa_fcxp_wqe_s * wqe,bfa_fcxp_alloc_cbfn_t alloc_cbfn,void * alloc_cbarg,void * caller,int nreq_sgles,int nrsp_sgles,bfa_fcxp_get_sgaddr_t req_sga_cbfn,bfa_fcxp_get_sglen_t req_sglen_cbfn,bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,bfa_fcxp_get_sglen_t rsp_sglen_cbfn)1074 bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
1075 	       bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg,
1076 	       void *caller, int nreq_sgles,
1077 	       int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
1078 	       bfa_fcxp_get_sglen_t req_sglen_cbfn,
1079 	       bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
1080 	       bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
1081 {
1082 	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1083 
1084 	WARN_ON(!list_empty(&mod->fcxp_free_q));
1085 
1086 	wqe->alloc_cbfn = alloc_cbfn;
1087 	wqe->alloc_cbarg = alloc_cbarg;
1088 	wqe->caller = caller;
1089 	wqe->bfa = bfa;
1090 	wqe->nreq_sgles = nreq_sgles;
1091 	wqe->nrsp_sgles = nrsp_sgles;
1092 	wqe->req_sga_cbfn = req_sga_cbfn;
1093 	wqe->req_sglen_cbfn = req_sglen_cbfn;
1094 	wqe->rsp_sga_cbfn = rsp_sga_cbfn;
1095 	wqe->rsp_sglen_cbfn = rsp_sglen_cbfn;
1096 
1097 	list_add_tail(&wqe->qe, &mod->wait_q);
1098 }
1099 
1100 void
bfa_fcxp_walloc_cancel(struct bfa_s * bfa,struct bfa_fcxp_wqe_s * wqe)1101 bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
1102 {
1103 	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1104 
1105 	WARN_ON(!bfa_q_is_on_q(&mod->wait_q, wqe));
1106 	list_del(&wqe->qe);
1107 }
1108 
1109 void
bfa_fcxp_discard(struct bfa_fcxp_s * fcxp)1110 bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
1111 {
1112 	/*
1113 	 * If waiting for room in request queue, cancel reqq wait
1114 	 * and free fcxp.
1115 	 */
1116 	if (fcxp->reqq_waiting) {
1117 		fcxp->reqq_waiting = BFA_FALSE;
1118 		bfa_reqq_wcancel(&fcxp->reqq_wqe);
1119 		bfa_fcxp_free(fcxp);
1120 		return;
1121 	}
1122 
1123 	fcxp->send_cbfn = bfa_fcxp_null_comp;
1124 }
1125 
1126 void
bfa_fcxp_isr(struct bfa_s * bfa,struct bfi_msg_s * msg)1127 bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
1128 {
1129 	switch (msg->mhdr.msg_id) {
1130 	case BFI_FCXP_I2H_SEND_RSP:
1131 		hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg);
1132 		break;
1133 
1134 	default:
1135 		bfa_trc(bfa, msg->mhdr.msg_id);
1136 		WARN_ON(1);
1137 	}
1138 }
1139 
1140 u32
bfa_fcxp_get_maxrsp(struct bfa_s * bfa)1141 bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
1142 {
1143 	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1144 
1145 	return mod->rsp_pld_sz;
1146 }
1147 
1148 void
bfa_fcxp_res_recfg(struct bfa_s * bfa,u16 num_fcxp_fw)1149 bfa_fcxp_res_recfg(struct bfa_s *bfa, u16 num_fcxp_fw)
1150 {
1151 	struct bfa_fcxp_mod_s	*mod = BFA_FCXP_MOD(bfa);
1152 	struct list_head	*qe;
1153 	int	i;
1154 
1155 	for (i = 0; i < (mod->num_fcxps - num_fcxp_fw); i++) {
1156 		bfa_q_deq_tail(&mod->fcxp_free_q, &qe);
1157 		list_add_tail(qe, &mod->fcxp_unused_q);
1158 	}
1159 }
1160 
1161 /*
1162  *  BFA LPS state machine functions
1163  */
1164 
1165 /*
1166  * Init state -- no login
1167  */
1168 static void
bfa_lps_sm_init(struct bfa_lps_s * lps,enum bfa_lps_event event)1169 bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
1170 {
1171 	bfa_trc(lps->bfa, lps->bfa_tag);
1172 	bfa_trc(lps->bfa, event);
1173 
1174 	switch (event) {
1175 	case BFA_LPS_SM_LOGIN:
1176 		if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1177 			bfa_sm_set_state(lps, bfa_lps_sm_loginwait);
1178 			bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1179 		} else {
1180 			bfa_sm_set_state(lps, bfa_lps_sm_login);
1181 			bfa_lps_send_login(lps);
1182 		}
1183 
1184 		if (lps->fdisc)
1185 			bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1186 				BFA_PL_EID_LOGIN, 0, "FDISC Request");
1187 		else
1188 			bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1189 				BFA_PL_EID_LOGIN, 0, "FLOGI Request");
1190 		break;
1191 
1192 	case BFA_LPS_SM_LOGOUT:
1193 		bfa_lps_logout_comp(lps);
1194 		break;
1195 
1196 	case BFA_LPS_SM_DELETE:
1197 		bfa_lps_free(lps);
1198 		break;
1199 
1200 	case BFA_LPS_SM_RX_CVL:
1201 	case BFA_LPS_SM_OFFLINE:
1202 		break;
1203 
1204 	case BFA_LPS_SM_FWRSP:
1205 		/*
1206 		 * Could happen when fabric detects loopback and discards
1207 		 * the lps request. Fw will eventually sent out the timeout
1208 		 * Just ignore
1209 		 */
1210 		break;
1211 
1212 	default:
1213 		bfa_sm_fault(lps->bfa, event);
1214 	}
1215 }
1216 
1217 /*
1218  * login is in progress -- awaiting response from firmware
1219  */
1220 static void
bfa_lps_sm_login(struct bfa_lps_s * lps,enum bfa_lps_event event)1221 bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
1222 {
1223 	bfa_trc(lps->bfa, lps->bfa_tag);
1224 	bfa_trc(lps->bfa, event);
1225 
1226 	switch (event) {
1227 	case BFA_LPS_SM_FWRSP:
1228 		if (lps->status == BFA_STATUS_OK) {
1229 			bfa_sm_set_state(lps, bfa_lps_sm_online);
1230 			if (lps->fdisc)
1231 				bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1232 					BFA_PL_EID_LOGIN, 0, "FDISC Accept");
1233 			else
1234 				bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1235 					BFA_PL_EID_LOGIN, 0, "FLOGI Accept");
1236 			/* If N2N, send the assigned PID to FW */
1237 			bfa_trc(lps->bfa, lps->fport);
1238 			bfa_trc(lps->bfa, lps->lp_pid);
1239 
1240 			if (!lps->fport && lps->lp_pid)
1241 				bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
1242 		} else {
1243 			bfa_sm_set_state(lps, bfa_lps_sm_init);
1244 			if (lps->fdisc)
1245 				bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1246 					BFA_PL_EID_LOGIN, 0,
1247 					"FDISC Fail (RJT or timeout)");
1248 			else
1249 				bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1250 					BFA_PL_EID_LOGIN, 0,
1251 					"FLOGI Fail (RJT or timeout)");
1252 		}
1253 		bfa_lps_login_comp(lps);
1254 		break;
1255 
1256 	case BFA_LPS_SM_OFFLINE:
1257 	case BFA_LPS_SM_DELETE:
1258 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1259 		break;
1260 
1261 	case BFA_LPS_SM_SET_N2N_PID:
1262 		bfa_trc(lps->bfa, lps->fport);
1263 		bfa_trc(lps->bfa, lps->lp_pid);
1264 		break;
1265 
1266 	default:
1267 		bfa_sm_fault(lps->bfa, event);
1268 	}
1269 }
1270 
1271 /*
1272  * login pending - awaiting space in request queue
1273  */
1274 static void
bfa_lps_sm_loginwait(struct bfa_lps_s * lps,enum bfa_lps_event event)1275 bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1276 {
1277 	bfa_trc(lps->bfa, lps->bfa_tag);
1278 	bfa_trc(lps->bfa, event);
1279 
1280 	switch (event) {
1281 	case BFA_LPS_SM_RESUME:
1282 		bfa_sm_set_state(lps, bfa_lps_sm_login);
1283 		bfa_lps_send_login(lps);
1284 		break;
1285 
1286 	case BFA_LPS_SM_OFFLINE:
1287 	case BFA_LPS_SM_DELETE:
1288 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1289 		bfa_reqq_wcancel(&lps->wqe);
1290 		break;
1291 
1292 	case BFA_LPS_SM_RX_CVL:
1293 		/*
1294 		 * Login was not even sent out; so when getting out
1295 		 * of this state, it will appear like a login retry
1296 		 * after Clear virtual link
1297 		 */
1298 		break;
1299 
1300 	default:
1301 		bfa_sm_fault(lps->bfa, event);
1302 	}
1303 }
1304 
1305 /*
1306  * login complete
1307  */
1308 static void
bfa_lps_sm_online(struct bfa_lps_s * lps,enum bfa_lps_event event)1309 bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
1310 {
1311 	bfa_trc(lps->bfa, lps->bfa_tag);
1312 	bfa_trc(lps->bfa, event);
1313 
1314 	switch (event) {
1315 	case BFA_LPS_SM_LOGOUT:
1316 		if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1317 			bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1318 			bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1319 		} else {
1320 			bfa_sm_set_state(lps, bfa_lps_sm_logout);
1321 			bfa_lps_send_logout(lps);
1322 		}
1323 		bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1324 			BFA_PL_EID_LOGO, 0, "Logout");
1325 		break;
1326 
1327 	case BFA_LPS_SM_RX_CVL:
1328 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1329 
1330 		/* Let the vport module know about this event */
1331 		bfa_lps_cvl_event(lps);
1332 		bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1333 			BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1334 		break;
1335 
1336 	case BFA_LPS_SM_SET_N2N_PID:
1337 		if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1338 			bfa_sm_set_state(lps, bfa_lps_sm_online_n2n_pid_wait);
1339 			bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1340 		} else
1341 			bfa_lps_send_set_n2n_pid(lps);
1342 		break;
1343 
1344 	case BFA_LPS_SM_OFFLINE:
1345 	case BFA_LPS_SM_DELETE:
1346 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1347 		break;
1348 
1349 	default:
1350 		bfa_sm_fault(lps->bfa, event);
1351 	}
1352 }
1353 
1354 /*
1355  * login complete
1356  */
1357 static void
bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s * lps,enum bfa_lps_event event)1358 bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1359 {
1360 	bfa_trc(lps->bfa, lps->bfa_tag);
1361 	bfa_trc(lps->bfa, event);
1362 
1363 	switch (event) {
1364 	case BFA_LPS_SM_RESUME:
1365 		bfa_sm_set_state(lps, bfa_lps_sm_online);
1366 		bfa_lps_send_set_n2n_pid(lps);
1367 		break;
1368 
1369 	case BFA_LPS_SM_LOGOUT:
1370 		bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1371 		bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1372 			BFA_PL_EID_LOGO, 0, "Logout");
1373 		break;
1374 
1375 	case BFA_LPS_SM_RX_CVL:
1376 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1377 		bfa_reqq_wcancel(&lps->wqe);
1378 
1379 		/* Let the vport module know about this event */
1380 		bfa_lps_cvl_event(lps);
1381 		bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1382 			BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1383 		break;
1384 
1385 	case BFA_LPS_SM_OFFLINE:
1386 	case BFA_LPS_SM_DELETE:
1387 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1388 		bfa_reqq_wcancel(&lps->wqe);
1389 		break;
1390 
1391 	default:
1392 		bfa_sm_fault(lps->bfa, event);
1393 	}
1394 }
1395 
1396 /*
1397  * logout in progress - awaiting firmware response
1398  */
1399 static void
bfa_lps_sm_logout(struct bfa_lps_s * lps,enum bfa_lps_event event)1400 bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
1401 {
1402 	bfa_trc(lps->bfa, lps->bfa_tag);
1403 	bfa_trc(lps->bfa, event);
1404 
1405 	switch (event) {
1406 	case BFA_LPS_SM_FWRSP:
1407 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1408 		bfa_lps_logout_comp(lps);
1409 		break;
1410 
1411 	case BFA_LPS_SM_OFFLINE:
1412 	case BFA_LPS_SM_DELETE:
1413 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1414 		break;
1415 
1416 	default:
1417 		bfa_sm_fault(lps->bfa, event);
1418 	}
1419 }
1420 
1421 /*
1422  * logout pending -- awaiting space in request queue
1423  */
1424 static void
bfa_lps_sm_logowait(struct bfa_lps_s * lps,enum bfa_lps_event event)1425 bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1426 {
1427 	bfa_trc(lps->bfa, lps->bfa_tag);
1428 	bfa_trc(lps->bfa, event);
1429 
1430 	switch (event) {
1431 	case BFA_LPS_SM_RESUME:
1432 		bfa_sm_set_state(lps, bfa_lps_sm_logout);
1433 		bfa_lps_send_logout(lps);
1434 		break;
1435 
1436 	case BFA_LPS_SM_OFFLINE:
1437 	case BFA_LPS_SM_DELETE:
1438 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1439 		bfa_reqq_wcancel(&lps->wqe);
1440 		break;
1441 
1442 	default:
1443 		bfa_sm_fault(lps->bfa, event);
1444 	}
1445 }
1446 
1447 
1448 
1449 /*
1450  *  lps_pvt BFA LPS private functions
1451  */
1452 
1453 /*
1454  * return memory requirement
1455  */
1456 static void
bfa_lps_meminfo(struct bfa_iocfc_cfg_s * cfg,struct bfa_meminfo_s * minfo,struct bfa_s * bfa)1457 bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
1458 		struct bfa_s *bfa)
1459 {
1460 	struct bfa_mem_kva_s *lps_kva = BFA_MEM_LPS_KVA(bfa);
1461 
1462 	if (cfg->drvcfg.min_cfg)
1463 		bfa_mem_kva_setup(minfo, lps_kva,
1464 			sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS);
1465 	else
1466 		bfa_mem_kva_setup(minfo, lps_kva,
1467 			sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS);
1468 }
1469 
1470 /*
1471  * bfa module attach at initialization time
1472  */
1473 static void
bfa_lps_attach(struct bfa_s * bfa,void * bfad,struct bfa_iocfc_cfg_s * cfg,struct bfa_pcidev_s * pcidev)1474 bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1475 	struct bfa_pcidev_s *pcidev)
1476 {
1477 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1478 	struct bfa_lps_s	*lps;
1479 	int			i;
1480 
1481 	mod->num_lps = BFA_LPS_MAX_LPORTS;
1482 	if (cfg->drvcfg.min_cfg)
1483 		mod->num_lps = BFA_LPS_MIN_LPORTS;
1484 	else
1485 		mod->num_lps = BFA_LPS_MAX_LPORTS;
1486 	mod->lps_arr = lps = (struct bfa_lps_s *) bfa_mem_kva_curp(mod);
1487 
1488 	bfa_mem_kva_curp(mod) += mod->num_lps * sizeof(struct bfa_lps_s);
1489 
1490 	INIT_LIST_HEAD(&mod->lps_free_q);
1491 	INIT_LIST_HEAD(&mod->lps_active_q);
1492 	INIT_LIST_HEAD(&mod->lps_login_q);
1493 
1494 	for (i = 0; i < mod->num_lps; i++, lps++) {
1495 		lps->bfa	= bfa;
1496 		lps->bfa_tag	= (u8) i;
1497 		lps->reqq	= BFA_REQQ_LPS;
1498 		bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps);
1499 		list_add_tail(&lps->qe, &mod->lps_free_q);
1500 	}
1501 }
1502 
1503 static void
bfa_lps_detach(struct bfa_s * bfa)1504 bfa_lps_detach(struct bfa_s *bfa)
1505 {
1506 }
1507 
1508 static void
bfa_lps_start(struct bfa_s * bfa)1509 bfa_lps_start(struct bfa_s *bfa)
1510 {
1511 }
1512 
1513 static void
bfa_lps_stop(struct bfa_s * bfa)1514 bfa_lps_stop(struct bfa_s *bfa)
1515 {
1516 }
1517 
1518 /*
1519  * IOC in disabled state -- consider all lps offline
1520  */
1521 static void
bfa_lps_iocdisable(struct bfa_s * bfa)1522 bfa_lps_iocdisable(struct bfa_s *bfa)
1523 {
1524 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1525 	struct bfa_lps_s	*lps;
1526 	struct list_head		*qe, *qen;
1527 
1528 	list_for_each_safe(qe, qen, &mod->lps_active_q) {
1529 		lps = (struct bfa_lps_s *) qe;
1530 		bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1531 	}
1532 	list_for_each_safe(qe, qen, &mod->lps_login_q) {
1533 		lps = (struct bfa_lps_s *) qe;
1534 		bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1535 	}
1536 	list_splice_tail_init(&mod->lps_login_q, &mod->lps_active_q);
1537 }
1538 
1539 /*
1540  * Firmware login response
1541  */
1542 static void
bfa_lps_login_rsp(struct bfa_s * bfa,struct bfi_lps_login_rsp_s * rsp)1543 bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
1544 {
1545 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1546 	struct bfa_lps_s	*lps;
1547 
1548 	WARN_ON(rsp->bfa_tag >= mod->num_lps);
1549 	lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
1550 
1551 	lps->status = rsp->status;
1552 	switch (rsp->status) {
1553 	case BFA_STATUS_OK:
1554 		lps->fw_tag	= rsp->fw_tag;
1555 		lps->fport	= rsp->f_port;
1556 		if (lps->fport)
1557 			lps->lp_pid = rsp->lp_pid;
1558 		lps->npiv_en	= rsp->npiv_en;
1559 		lps->pr_bbcred	= be16_to_cpu(rsp->bb_credit);
1560 		lps->pr_pwwn	= rsp->port_name;
1561 		lps->pr_nwwn	= rsp->node_name;
1562 		lps->auth_req	= rsp->auth_req;
1563 		lps->lp_mac	= rsp->lp_mac;
1564 		lps->brcd_switch = rsp->brcd_switch;
1565 		lps->fcf_mac	= rsp->fcf_mac;
1566 		lps->pr_bbscn	= rsp->bb_scn;
1567 
1568 		break;
1569 
1570 	case BFA_STATUS_FABRIC_RJT:
1571 		lps->lsrjt_rsn = rsp->lsrjt_rsn;
1572 		lps->lsrjt_expl = rsp->lsrjt_expl;
1573 
1574 		break;
1575 
1576 	case BFA_STATUS_EPROTOCOL:
1577 		lps->ext_status = rsp->ext_status;
1578 
1579 		break;
1580 
1581 	case BFA_STATUS_VPORT_MAX:
1582 		if (rsp->ext_status)
1583 			bfa_lps_no_res(lps, rsp->ext_status);
1584 		break;
1585 
1586 	default:
1587 		/* Nothing to do with other status */
1588 		break;
1589 	}
1590 
1591 	list_del(&lps->qe);
1592 	list_add_tail(&lps->qe, &mod->lps_active_q);
1593 	bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1594 }
1595 
1596 static void
bfa_lps_no_res(struct bfa_lps_s * first_lps,u8 count)1597 bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count)
1598 {
1599 	struct bfa_s		*bfa = first_lps->bfa;
1600 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1601 	struct list_head	*qe, *qe_next;
1602 	struct bfa_lps_s	*lps;
1603 
1604 	bfa_trc(bfa, count);
1605 
1606 	qe = bfa_q_next(first_lps);
1607 
1608 	while (count && qe) {
1609 		qe_next = bfa_q_next(qe);
1610 		lps = (struct bfa_lps_s *)qe;
1611 		bfa_trc(bfa, lps->bfa_tag);
1612 		lps->status = first_lps->status;
1613 		list_del(&lps->qe);
1614 		list_add_tail(&lps->qe, &mod->lps_active_q);
1615 		bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1616 		qe = qe_next;
1617 		count--;
1618 	}
1619 }
1620 
1621 /*
1622  * Firmware logout response
1623  */
1624 static void
bfa_lps_logout_rsp(struct bfa_s * bfa,struct bfi_lps_logout_rsp_s * rsp)1625 bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
1626 {
1627 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1628 	struct bfa_lps_s	*lps;
1629 
1630 	WARN_ON(rsp->bfa_tag >= mod->num_lps);
1631 	lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
1632 
1633 	bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1634 }
1635 
1636 /*
1637  * Firmware received a Clear virtual link request (for FCoE)
1638  */
1639 static void
bfa_lps_rx_cvl_event(struct bfa_s * bfa,struct bfi_lps_cvl_event_s * cvl)1640 bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
1641 {
1642 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1643 	struct bfa_lps_s	*lps;
1644 
1645 	lps = BFA_LPS_FROM_TAG(mod, cvl->bfa_tag);
1646 
1647 	bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
1648 }
1649 
1650 /*
1651  * Space is available in request queue, resume queueing request to firmware.
1652  */
1653 static void
bfa_lps_reqq_resume(void * lps_arg)1654 bfa_lps_reqq_resume(void *lps_arg)
1655 {
1656 	struct bfa_lps_s	*lps = lps_arg;
1657 
1658 	bfa_sm_send_event(lps, BFA_LPS_SM_RESUME);
1659 }
1660 
1661 /*
1662  * lps is freed -- triggered by vport delete
1663  */
1664 static void
bfa_lps_free(struct bfa_lps_s * lps)1665 bfa_lps_free(struct bfa_lps_s *lps)
1666 {
1667 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(lps->bfa);
1668 
1669 	lps->lp_pid = 0;
1670 	list_del(&lps->qe);
1671 	list_add_tail(&lps->qe, &mod->lps_free_q);
1672 }
1673 
1674 /*
1675  * send login request to firmware
1676  */
1677 static void
bfa_lps_send_login(struct bfa_lps_s * lps)1678 bfa_lps_send_login(struct bfa_lps_s *lps)
1679 {
1680 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(lps->bfa);
1681 	struct bfi_lps_login_req_s	*m;
1682 
1683 	m = bfa_reqq_next(lps->bfa, lps->reqq);
1684 	WARN_ON(!m);
1685 
1686 	bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ,
1687 		bfa_fn_lpu(lps->bfa));
1688 
1689 	m->bfa_tag	= lps->bfa_tag;
1690 	m->alpa		= lps->alpa;
1691 	m->pdu_size	= cpu_to_be16(lps->pdusz);
1692 	m->pwwn		= lps->pwwn;
1693 	m->nwwn		= lps->nwwn;
1694 	m->fdisc	= lps->fdisc;
1695 	m->auth_en	= lps->auth_en;
1696 	m->bb_scn	= lps->bb_scn;
1697 
1698 	bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1699 	list_del(&lps->qe);
1700 	list_add_tail(&lps->qe, &mod->lps_login_q);
1701 }
1702 
1703 /*
1704  * send logout request to firmware
1705  */
1706 static void
bfa_lps_send_logout(struct bfa_lps_s * lps)1707 bfa_lps_send_logout(struct bfa_lps_s *lps)
1708 {
1709 	struct bfi_lps_logout_req_s *m;
1710 
1711 	m = bfa_reqq_next(lps->bfa, lps->reqq);
1712 	WARN_ON(!m);
1713 
1714 	bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ,
1715 		bfa_fn_lpu(lps->bfa));
1716 
1717 	m->fw_tag = lps->fw_tag;
1718 	m->port_name = lps->pwwn;
1719 	bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1720 }
1721 
1722 /*
1723  * send n2n pid set request to firmware
1724  */
1725 static void
bfa_lps_send_set_n2n_pid(struct bfa_lps_s * lps)1726 bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps)
1727 {
1728 	struct bfi_lps_n2n_pid_req_s *m;
1729 
1730 	m = bfa_reqq_next(lps->bfa, lps->reqq);
1731 	WARN_ON(!m);
1732 
1733 	bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_N2N_PID_REQ,
1734 		bfa_fn_lpu(lps->bfa));
1735 
1736 	m->fw_tag = lps->fw_tag;
1737 	m->lp_pid = lps->lp_pid;
1738 	bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1739 }
1740 
1741 /*
1742  * Indirect login completion handler for non-fcs
1743  */
1744 static void
bfa_lps_login_comp_cb(void * arg,bfa_boolean_t complete)1745 bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete)
1746 {
1747 	struct bfa_lps_s *lps	= arg;
1748 
1749 	if (!complete)
1750 		return;
1751 
1752 	if (lps->fdisc)
1753 		bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1754 	else
1755 		bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1756 }
1757 
1758 /*
1759  * Login completion handler -- direct call for fcs, queue for others
1760  */
1761 static void
bfa_lps_login_comp(struct bfa_lps_s * lps)1762 bfa_lps_login_comp(struct bfa_lps_s *lps)
1763 {
1764 	if (!lps->bfa->fcs) {
1765 		bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_login_comp_cb,
1766 			lps);
1767 		return;
1768 	}
1769 
1770 	if (lps->fdisc)
1771 		bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1772 	else
1773 		bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1774 }
1775 
1776 /*
1777  * Indirect logout completion handler for non-fcs
1778  */
1779 static void
bfa_lps_logout_comp_cb(void * arg,bfa_boolean_t complete)1780 bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
1781 {
1782 	struct bfa_lps_s *lps	= arg;
1783 
1784 	if (!complete)
1785 		return;
1786 
1787 	if (lps->fdisc)
1788 		bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1789 }
1790 
1791 /*
1792  * Logout completion handler -- direct call for fcs, queue for others
1793  */
1794 static void
bfa_lps_logout_comp(struct bfa_lps_s * lps)1795 bfa_lps_logout_comp(struct bfa_lps_s *lps)
1796 {
1797 	if (!lps->bfa->fcs) {
1798 		bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_logout_comp_cb,
1799 			lps);
1800 		return;
1801 	}
1802 	if (lps->fdisc)
1803 		bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1804 }
1805 
1806 /*
1807  * Clear virtual link completion handler for non-fcs
1808  */
1809 static void
bfa_lps_cvl_event_cb(void * arg,bfa_boolean_t complete)1810 bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete)
1811 {
1812 	struct bfa_lps_s *lps	= arg;
1813 
1814 	if (!complete)
1815 		return;
1816 
1817 	/* Clear virtual link to base port will result in link down */
1818 	if (lps->fdisc)
1819 		bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1820 }
1821 
1822 /*
1823  * Received Clear virtual link event --direct call for fcs,
1824  * queue for others
1825  */
1826 static void
bfa_lps_cvl_event(struct bfa_lps_s * lps)1827 bfa_lps_cvl_event(struct bfa_lps_s *lps)
1828 {
1829 	if (!lps->bfa->fcs) {
1830 		bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb,
1831 			lps);
1832 		return;
1833 	}
1834 
1835 	/* Clear virtual link to base port will result in link down */
1836 	if (lps->fdisc)
1837 		bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1838 }
1839 
1840 
1841 
1842 /*
1843  *  lps_public BFA LPS public functions
1844  */
1845 
1846 u32
bfa_lps_get_max_vport(struct bfa_s * bfa)1847 bfa_lps_get_max_vport(struct bfa_s *bfa)
1848 {
1849 	if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT)
1850 		return BFA_LPS_MAX_VPORTS_SUPP_CT;
1851 	else
1852 		return BFA_LPS_MAX_VPORTS_SUPP_CB;
1853 }
1854 
1855 /*
1856  * Allocate a lport srvice tag.
1857  */
1858 struct bfa_lps_s  *
bfa_lps_alloc(struct bfa_s * bfa)1859 bfa_lps_alloc(struct bfa_s *bfa)
1860 {
1861 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1862 	struct bfa_lps_s	*lps = NULL;
1863 
1864 	bfa_q_deq(&mod->lps_free_q, &lps);
1865 
1866 	if (lps == NULL)
1867 		return NULL;
1868 
1869 	list_add_tail(&lps->qe, &mod->lps_active_q);
1870 
1871 	bfa_sm_set_state(lps, bfa_lps_sm_init);
1872 	return lps;
1873 }
1874 
1875 /*
1876  * Free lport service tag. This can be called anytime after an alloc.
1877  * No need to wait for any pending login/logout completions.
1878  */
1879 void
bfa_lps_delete(struct bfa_lps_s * lps)1880 bfa_lps_delete(struct bfa_lps_s *lps)
1881 {
1882 	bfa_sm_send_event(lps, BFA_LPS_SM_DELETE);
1883 }
1884 
1885 /*
1886  * Initiate a lport login.
1887  */
1888 void
bfa_lps_flogi(struct bfa_lps_s * lps,void * uarg,u8 alpa,u16 pdusz,wwn_t pwwn,wwn_t nwwn,bfa_boolean_t auth_en,uint8_t bb_scn)1889 bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
1890 	wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en, uint8_t bb_scn)
1891 {
1892 	lps->uarg	= uarg;
1893 	lps->alpa	= alpa;
1894 	lps->pdusz	= pdusz;
1895 	lps->pwwn	= pwwn;
1896 	lps->nwwn	= nwwn;
1897 	lps->fdisc	= BFA_FALSE;
1898 	lps->auth_en	= auth_en;
1899 	lps->bb_scn	= bb_scn;
1900 	bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1901 }
1902 
1903 /*
1904  * Initiate a lport fdisc login.
1905  */
1906 void
bfa_lps_fdisc(struct bfa_lps_s * lps,void * uarg,u16 pdusz,wwn_t pwwn,wwn_t nwwn)1907 bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
1908 	wwn_t nwwn)
1909 {
1910 	lps->uarg	= uarg;
1911 	lps->alpa	= 0;
1912 	lps->pdusz	= pdusz;
1913 	lps->pwwn	= pwwn;
1914 	lps->nwwn	= nwwn;
1915 	lps->fdisc	= BFA_TRUE;
1916 	lps->auth_en	= BFA_FALSE;
1917 	bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1918 }
1919 
1920 
1921 /*
1922  * Initiate a lport FDSIC logout.
1923  */
1924 void
bfa_lps_fdisclogo(struct bfa_lps_s * lps)1925 bfa_lps_fdisclogo(struct bfa_lps_s *lps)
1926 {
1927 	bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
1928 }
1929 
1930 u8
bfa_lps_get_fwtag(struct bfa_s * bfa,u8 lp_tag)1931 bfa_lps_get_fwtag(struct bfa_s *bfa, u8 lp_tag)
1932 {
1933 	struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1934 
1935 	return BFA_LPS_FROM_TAG(mod, lp_tag)->fw_tag;
1936 }
1937 
1938 /*
1939  * Return lport services tag given the pid
1940  */
1941 u8
bfa_lps_get_tag_from_pid(struct bfa_s * bfa,u32 pid)1942 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
1943 {
1944 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1945 	struct bfa_lps_s	*lps;
1946 	int			i;
1947 
1948 	for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) {
1949 		if (lps->lp_pid == pid)
1950 			return lps->bfa_tag;
1951 	}
1952 
1953 	/* Return base port tag anyway */
1954 	return 0;
1955 }
1956 
1957 
1958 /*
1959  * return port id assigned to the base lport
1960  */
1961 u32
bfa_lps_get_base_pid(struct bfa_s * bfa)1962 bfa_lps_get_base_pid(struct bfa_s *bfa)
1963 {
1964 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1965 
1966 	return BFA_LPS_FROM_TAG(mod, 0)->lp_pid;
1967 }
1968 
1969 /*
1970  * Set PID in case of n2n (which is assigned during PLOGI)
1971  */
1972 void
bfa_lps_set_n2n_pid(struct bfa_lps_s * lps,uint32_t n2n_pid)1973 bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, uint32_t n2n_pid)
1974 {
1975 	bfa_trc(lps->bfa, lps->bfa_tag);
1976 	bfa_trc(lps->bfa, n2n_pid);
1977 
1978 	lps->lp_pid = n2n_pid;
1979 	bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
1980 }
1981 
1982 /*
1983  * LPS firmware message class handler.
1984  */
1985 void
bfa_lps_isr(struct bfa_s * bfa,struct bfi_msg_s * m)1986 bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1987 {
1988 	union bfi_lps_i2h_msg_u	msg;
1989 
1990 	bfa_trc(bfa, m->mhdr.msg_id);
1991 	msg.msg = m;
1992 
1993 	switch (m->mhdr.msg_id) {
1994 	case BFI_LPS_I2H_LOGIN_RSP:
1995 		bfa_lps_login_rsp(bfa, msg.login_rsp);
1996 		break;
1997 
1998 	case BFI_LPS_I2H_LOGOUT_RSP:
1999 		bfa_lps_logout_rsp(bfa, msg.logout_rsp);
2000 		break;
2001 
2002 	case BFI_LPS_I2H_CVL_EVENT:
2003 		bfa_lps_rx_cvl_event(bfa, msg.cvl_event);
2004 		break;
2005 
2006 	default:
2007 		bfa_trc(bfa, m->mhdr.msg_id);
2008 		WARN_ON(1);
2009 	}
2010 }
2011 
2012 static void
bfa_fcport_aen_post(struct bfa_fcport_s * fcport,enum bfa_port_aen_event event)2013 bfa_fcport_aen_post(struct bfa_fcport_s *fcport, enum bfa_port_aen_event event)
2014 {
2015 	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2016 	struct bfa_aen_entry_s  *aen_entry;
2017 
2018 	bfad_get_aen_entry(bfad, aen_entry);
2019 	if (!aen_entry)
2020 		return;
2021 
2022 	aen_entry->aen_data.port.ioc_type = bfa_get_type(fcport->bfa);
2023 	aen_entry->aen_data.port.pwwn = fcport->pwwn;
2024 
2025 	/* Send the AEN notification */
2026 	bfad_im_post_vendor_event(aen_entry, bfad, ++fcport->bfa->bfa_aen_seq,
2027 				  BFA_AEN_CAT_PORT, event);
2028 }
2029 
2030 /*
2031  * FC PORT state machine functions
2032  */
2033 static void
bfa_fcport_sm_uninit(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2034 bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
2035 			enum bfa_fcport_sm_event event)
2036 {
2037 	bfa_trc(fcport->bfa, event);
2038 
2039 	switch (event) {
2040 	case BFA_FCPORT_SM_START:
2041 		/*
2042 		 * Start event after IOC is configured and BFA is started.
2043 		 */
2044 		fcport->use_flash_cfg = BFA_TRUE;
2045 
2046 		if (bfa_fcport_send_enable(fcport)) {
2047 			bfa_trc(fcport->bfa, BFA_TRUE);
2048 			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2049 		} else {
2050 			bfa_trc(fcport->bfa, BFA_FALSE);
2051 			bfa_sm_set_state(fcport,
2052 					bfa_fcport_sm_enabling_qwait);
2053 		}
2054 		break;
2055 
2056 	case BFA_FCPORT_SM_ENABLE:
2057 		/*
2058 		 * Port is persistently configured to be in enabled state. Do
2059 		 * not change state. Port enabling is done when START event is
2060 		 * received.
2061 		 */
2062 		break;
2063 
2064 	case BFA_FCPORT_SM_DISABLE:
2065 		/*
2066 		 * If a port is persistently configured to be disabled, the
2067 		 * first event will a port disable request.
2068 		 */
2069 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2070 		break;
2071 
2072 	case BFA_FCPORT_SM_HWFAIL:
2073 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2074 		break;
2075 
2076 	default:
2077 		bfa_sm_fault(fcport->bfa, event);
2078 	}
2079 }
2080 
2081 static void
bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2082 bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
2083 				enum bfa_fcport_sm_event event)
2084 {
2085 	char pwwn_buf[BFA_STRING_32];
2086 	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2087 	bfa_trc(fcport->bfa, event);
2088 
2089 	switch (event) {
2090 	case BFA_FCPORT_SM_QRESUME:
2091 		bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2092 		bfa_fcport_send_enable(fcport);
2093 		break;
2094 
2095 	case BFA_FCPORT_SM_STOP:
2096 		bfa_reqq_wcancel(&fcport->reqq_wait);
2097 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2098 		break;
2099 
2100 	case BFA_FCPORT_SM_ENABLE:
2101 		/*
2102 		 * Already enable is in progress.
2103 		 */
2104 		break;
2105 
2106 	case BFA_FCPORT_SM_DISABLE:
2107 		/*
2108 		 * Just send disable request to firmware when room becomes
2109 		 * available in request queue.
2110 		 */
2111 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2112 		bfa_reqq_wcancel(&fcport->reqq_wait);
2113 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2114 				BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2115 		wwn2str(pwwn_buf, fcport->pwwn);
2116 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2117 			"Base port disabled: WWN = %s\n", pwwn_buf);
2118 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2119 		break;
2120 
2121 	case BFA_FCPORT_SM_LINKUP:
2122 	case BFA_FCPORT_SM_LINKDOWN:
2123 		/*
2124 		 * Possible to get link events when doing back-to-back
2125 		 * enable/disables.
2126 		 */
2127 		break;
2128 
2129 	case BFA_FCPORT_SM_HWFAIL:
2130 		bfa_reqq_wcancel(&fcport->reqq_wait);
2131 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2132 		break;
2133 
2134 	default:
2135 		bfa_sm_fault(fcport->bfa, event);
2136 	}
2137 }
2138 
2139 static void
bfa_fcport_sm_enabling(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2140 bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
2141 						enum bfa_fcport_sm_event event)
2142 {
2143 	char pwwn_buf[BFA_STRING_32];
2144 	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2145 	bfa_trc(fcport->bfa, event);
2146 
2147 	switch (event) {
2148 	case BFA_FCPORT_SM_FWRSP:
2149 	case BFA_FCPORT_SM_LINKDOWN:
2150 		bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2151 		break;
2152 
2153 	case BFA_FCPORT_SM_LINKUP:
2154 		bfa_fcport_update_linkinfo(fcport);
2155 		bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2156 
2157 		WARN_ON(!fcport->event_cbfn);
2158 		bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2159 		break;
2160 
2161 	case BFA_FCPORT_SM_ENABLE:
2162 		/*
2163 		 * Already being enabled.
2164 		 */
2165 		break;
2166 
2167 	case BFA_FCPORT_SM_DISABLE:
2168 		if (bfa_fcport_send_disable(fcport))
2169 			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2170 		else
2171 			bfa_sm_set_state(fcport,
2172 					 bfa_fcport_sm_disabling_qwait);
2173 
2174 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2175 				BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2176 		wwn2str(pwwn_buf, fcport->pwwn);
2177 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2178 			"Base port disabled: WWN = %s\n", pwwn_buf);
2179 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2180 		break;
2181 
2182 	case BFA_FCPORT_SM_STOP:
2183 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2184 		break;
2185 
2186 	case BFA_FCPORT_SM_HWFAIL:
2187 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2188 		break;
2189 
2190 	default:
2191 		bfa_sm_fault(fcport->bfa, event);
2192 	}
2193 }
2194 
2195 static void
bfa_fcport_sm_linkdown(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2196 bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
2197 						enum bfa_fcport_sm_event event)
2198 {
2199 	struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2200 	char pwwn_buf[BFA_STRING_32];
2201 	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2202 
2203 	bfa_trc(fcport->bfa, event);
2204 
2205 	switch (event) {
2206 	case BFA_FCPORT_SM_LINKUP:
2207 		bfa_fcport_update_linkinfo(fcport);
2208 		bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2209 		WARN_ON(!fcport->event_cbfn);
2210 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2211 				BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
2212 		if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
2213 
2214 			bfa_trc(fcport->bfa,
2215 				pevent->link_state.vc_fcf.fcf.fipenabled);
2216 			bfa_trc(fcport->bfa,
2217 				pevent->link_state.vc_fcf.fcf.fipfailed);
2218 
2219 			if (pevent->link_state.vc_fcf.fcf.fipfailed)
2220 				bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2221 					BFA_PL_EID_FIP_FCF_DISC, 0,
2222 					"FIP FCF Discovery Failed");
2223 			else
2224 				bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2225 					BFA_PL_EID_FIP_FCF_DISC, 0,
2226 					"FIP FCF Discovered");
2227 		}
2228 
2229 		bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2230 		wwn2str(pwwn_buf, fcport->pwwn);
2231 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2232 			"Base port online: WWN = %s\n", pwwn_buf);
2233 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ONLINE);
2234 
2235 		/* If QoS is enabled and it is not online, send AEN */
2236 		if (fcport->cfg.qos_enabled &&
2237 		    fcport->qos_attr.state != BFA_QOS_ONLINE)
2238 			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_QOS_NEG);
2239 		break;
2240 
2241 	case BFA_FCPORT_SM_LINKDOWN:
2242 		/*
2243 		 * Possible to get link down event.
2244 		 */
2245 		break;
2246 
2247 	case BFA_FCPORT_SM_ENABLE:
2248 		/*
2249 		 * Already enabled.
2250 		 */
2251 		break;
2252 
2253 	case BFA_FCPORT_SM_DISABLE:
2254 		if (bfa_fcport_send_disable(fcport))
2255 			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2256 		else
2257 			bfa_sm_set_state(fcport,
2258 					 bfa_fcport_sm_disabling_qwait);
2259 
2260 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2261 				BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2262 		wwn2str(pwwn_buf, fcport->pwwn);
2263 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2264 			"Base port disabled: WWN = %s\n", pwwn_buf);
2265 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2266 		break;
2267 
2268 	case BFA_FCPORT_SM_STOP:
2269 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2270 		break;
2271 
2272 	case BFA_FCPORT_SM_HWFAIL:
2273 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2274 		break;
2275 
2276 	default:
2277 		bfa_sm_fault(fcport->bfa, event);
2278 	}
2279 }
2280 
2281 static void
bfa_fcport_sm_linkup(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2282 bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
2283 	enum bfa_fcport_sm_event event)
2284 {
2285 	char pwwn_buf[BFA_STRING_32];
2286 	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2287 
2288 	bfa_trc(fcport->bfa, event);
2289 
2290 	switch (event) {
2291 	case BFA_FCPORT_SM_ENABLE:
2292 		/*
2293 		 * Already enabled.
2294 		 */
2295 		break;
2296 
2297 	case BFA_FCPORT_SM_DISABLE:
2298 		if (bfa_fcport_send_disable(fcport))
2299 			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2300 		else
2301 			bfa_sm_set_state(fcport,
2302 					 bfa_fcport_sm_disabling_qwait);
2303 
2304 		bfa_fcport_reset_linkinfo(fcport);
2305 		bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2306 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2307 				BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2308 		wwn2str(pwwn_buf, fcport->pwwn);
2309 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2310 			"Base port offline: WWN = %s\n", pwwn_buf);
2311 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2312 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2313 			"Base port disabled: WWN = %s\n", pwwn_buf);
2314 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2315 		break;
2316 
2317 	case BFA_FCPORT_SM_LINKDOWN:
2318 		bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2319 		bfa_fcport_reset_linkinfo(fcport);
2320 		bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2321 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2322 				BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
2323 		wwn2str(pwwn_buf, fcport->pwwn);
2324 		if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
2325 			BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2326 				"Base port offline: WWN = %s\n", pwwn_buf);
2327 			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2328 		} else {
2329 			BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2330 				"Base port (WWN = %s) "
2331 				"lost fabric connectivity\n", pwwn_buf);
2332 			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2333 		}
2334 		break;
2335 
2336 	case BFA_FCPORT_SM_STOP:
2337 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2338 		bfa_fcport_reset_linkinfo(fcport);
2339 		wwn2str(pwwn_buf, fcport->pwwn);
2340 		if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
2341 			BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2342 				"Base port offline: WWN = %s\n", pwwn_buf);
2343 			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2344 		} else {
2345 			BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2346 				"Base port (WWN = %s) "
2347 				"lost fabric connectivity\n", pwwn_buf);
2348 			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2349 		}
2350 		break;
2351 
2352 	case BFA_FCPORT_SM_HWFAIL:
2353 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2354 		bfa_fcport_reset_linkinfo(fcport);
2355 		bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2356 		wwn2str(pwwn_buf, fcport->pwwn);
2357 		if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
2358 			BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2359 				"Base port offline: WWN = %s\n", pwwn_buf);
2360 			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2361 		} else {
2362 			BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2363 				"Base port (WWN = %s) "
2364 				"lost fabric connectivity\n", pwwn_buf);
2365 			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2366 		}
2367 		break;
2368 
2369 	default:
2370 		bfa_sm_fault(fcport->bfa, event);
2371 	}
2372 }
2373 
2374 static void
bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2375 bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
2376 				 enum bfa_fcport_sm_event event)
2377 {
2378 	bfa_trc(fcport->bfa, event);
2379 
2380 	switch (event) {
2381 	case BFA_FCPORT_SM_QRESUME:
2382 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2383 		bfa_fcport_send_disable(fcport);
2384 		break;
2385 
2386 	case BFA_FCPORT_SM_STOP:
2387 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2388 		bfa_reqq_wcancel(&fcport->reqq_wait);
2389 		break;
2390 
2391 	case BFA_FCPORT_SM_ENABLE:
2392 		bfa_sm_set_state(fcport, bfa_fcport_sm_toggling_qwait);
2393 		break;
2394 
2395 	case BFA_FCPORT_SM_DISABLE:
2396 		/*
2397 		 * Already being disabled.
2398 		 */
2399 		break;
2400 
2401 	case BFA_FCPORT_SM_LINKUP:
2402 	case BFA_FCPORT_SM_LINKDOWN:
2403 		/*
2404 		 * Possible to get link events when doing back-to-back
2405 		 * enable/disables.
2406 		 */
2407 		break;
2408 
2409 	case BFA_FCPORT_SM_HWFAIL:
2410 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2411 		bfa_reqq_wcancel(&fcport->reqq_wait);
2412 		break;
2413 
2414 	default:
2415 		bfa_sm_fault(fcport->bfa, event);
2416 	}
2417 }
2418 
2419 static void
bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2420 bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
2421 				 enum bfa_fcport_sm_event event)
2422 {
2423 	bfa_trc(fcport->bfa, event);
2424 
2425 	switch (event) {
2426 	case BFA_FCPORT_SM_QRESUME:
2427 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2428 		bfa_fcport_send_disable(fcport);
2429 		if (bfa_fcport_send_enable(fcport))
2430 			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2431 		else
2432 			bfa_sm_set_state(fcport,
2433 					 bfa_fcport_sm_enabling_qwait);
2434 		break;
2435 
2436 	case BFA_FCPORT_SM_STOP:
2437 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2438 		bfa_reqq_wcancel(&fcport->reqq_wait);
2439 		break;
2440 
2441 	case BFA_FCPORT_SM_ENABLE:
2442 		break;
2443 
2444 	case BFA_FCPORT_SM_DISABLE:
2445 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
2446 		break;
2447 
2448 	case BFA_FCPORT_SM_LINKUP:
2449 	case BFA_FCPORT_SM_LINKDOWN:
2450 		/*
2451 		 * Possible to get link events when doing back-to-back
2452 		 * enable/disables.
2453 		 */
2454 		break;
2455 
2456 	case BFA_FCPORT_SM_HWFAIL:
2457 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2458 		bfa_reqq_wcancel(&fcport->reqq_wait);
2459 		break;
2460 
2461 	default:
2462 		bfa_sm_fault(fcport->bfa, event);
2463 	}
2464 }
2465 
2466 static void
bfa_fcport_sm_disabling(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2467 bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
2468 						enum bfa_fcport_sm_event event)
2469 {
2470 	char pwwn_buf[BFA_STRING_32];
2471 	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2472 	bfa_trc(fcport->bfa, event);
2473 
2474 	switch (event) {
2475 	case BFA_FCPORT_SM_FWRSP:
2476 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2477 		break;
2478 
2479 	case BFA_FCPORT_SM_DISABLE:
2480 		/*
2481 		 * Already being disabled.
2482 		 */
2483 		break;
2484 
2485 	case BFA_FCPORT_SM_ENABLE:
2486 		if (bfa_fcport_send_enable(fcport))
2487 			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2488 		else
2489 			bfa_sm_set_state(fcport,
2490 					 bfa_fcport_sm_enabling_qwait);
2491 
2492 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2493 				BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2494 		wwn2str(pwwn_buf, fcport->pwwn);
2495 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2496 			"Base port enabled: WWN = %s\n", pwwn_buf);
2497 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
2498 		break;
2499 
2500 	case BFA_FCPORT_SM_STOP:
2501 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2502 		break;
2503 
2504 	case BFA_FCPORT_SM_LINKUP:
2505 	case BFA_FCPORT_SM_LINKDOWN:
2506 		/*
2507 		 * Possible to get link events when doing back-to-back
2508 		 * enable/disables.
2509 		 */
2510 		break;
2511 
2512 	case BFA_FCPORT_SM_HWFAIL:
2513 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2514 		break;
2515 
2516 	default:
2517 		bfa_sm_fault(fcport->bfa, event);
2518 	}
2519 }
2520 
2521 static void
bfa_fcport_sm_disabled(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2522 bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
2523 						enum bfa_fcport_sm_event event)
2524 {
2525 	char pwwn_buf[BFA_STRING_32];
2526 	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2527 	bfa_trc(fcport->bfa, event);
2528 
2529 	switch (event) {
2530 	case BFA_FCPORT_SM_START:
2531 		/*
2532 		 * Ignore start event for a port that is disabled.
2533 		 */
2534 		break;
2535 
2536 	case BFA_FCPORT_SM_STOP:
2537 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2538 		break;
2539 
2540 	case BFA_FCPORT_SM_ENABLE:
2541 		if (bfa_fcport_send_enable(fcport))
2542 			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2543 		else
2544 			bfa_sm_set_state(fcport,
2545 					 bfa_fcport_sm_enabling_qwait);
2546 
2547 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2548 				BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2549 		wwn2str(pwwn_buf, fcport->pwwn);
2550 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2551 			"Base port enabled: WWN = %s\n", pwwn_buf);
2552 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
2553 		break;
2554 
2555 	case BFA_FCPORT_SM_DISABLE:
2556 		/*
2557 		 * Already disabled.
2558 		 */
2559 		break;
2560 
2561 	case BFA_FCPORT_SM_HWFAIL:
2562 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2563 		break;
2564 
2565 	default:
2566 		bfa_sm_fault(fcport->bfa, event);
2567 	}
2568 }
2569 
2570 static void
bfa_fcport_sm_stopped(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2571 bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
2572 			 enum bfa_fcport_sm_event event)
2573 {
2574 	bfa_trc(fcport->bfa, event);
2575 
2576 	switch (event) {
2577 	case BFA_FCPORT_SM_START:
2578 		if (bfa_fcport_send_enable(fcport))
2579 			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2580 		else
2581 			bfa_sm_set_state(fcport,
2582 					 bfa_fcport_sm_enabling_qwait);
2583 		break;
2584 
2585 	default:
2586 		/*
2587 		 * Ignore all other events.
2588 		 */
2589 		;
2590 	}
2591 }
2592 
2593 /*
2594  * Port is enabled. IOC is down/failed.
2595  */
2596 static void
bfa_fcport_sm_iocdown(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2597 bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
2598 			 enum bfa_fcport_sm_event event)
2599 {
2600 	bfa_trc(fcport->bfa, event);
2601 
2602 	switch (event) {
2603 	case BFA_FCPORT_SM_START:
2604 		if (bfa_fcport_send_enable(fcport))
2605 			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2606 		else
2607 			bfa_sm_set_state(fcport,
2608 					 bfa_fcport_sm_enabling_qwait);
2609 		break;
2610 
2611 	default:
2612 		/*
2613 		 * Ignore all events.
2614 		 */
2615 		;
2616 	}
2617 }
2618 
2619 /*
2620  * Port is disabled. IOC is down/failed.
2621  */
2622 static void
bfa_fcport_sm_iocfail(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2623 bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
2624 			 enum bfa_fcport_sm_event event)
2625 {
2626 	bfa_trc(fcport->bfa, event);
2627 
2628 	switch (event) {
2629 	case BFA_FCPORT_SM_START:
2630 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2631 		break;
2632 
2633 	case BFA_FCPORT_SM_ENABLE:
2634 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2635 		break;
2636 
2637 	default:
2638 		/*
2639 		 * Ignore all events.
2640 		 */
2641 		;
2642 	}
2643 }
2644 
2645 /*
2646  * Link state is down
2647  */
2648 static void
bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s * ln,enum bfa_fcport_ln_sm_event event)2649 bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
2650 		enum bfa_fcport_ln_sm_event event)
2651 {
2652 	bfa_trc(ln->fcport->bfa, event);
2653 
2654 	switch (event) {
2655 	case BFA_FCPORT_LN_SM_LINKUP:
2656 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2657 		bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2658 		break;
2659 
2660 	default:
2661 		bfa_sm_fault(ln->fcport->bfa, event);
2662 	}
2663 }
2664 
2665 /*
2666  * Link state is waiting for down notification
2667  */
2668 static void
bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s * ln,enum bfa_fcport_ln_sm_event event)2669 bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
2670 		enum bfa_fcport_ln_sm_event event)
2671 {
2672 	bfa_trc(ln->fcport->bfa, event);
2673 
2674 	switch (event) {
2675 	case BFA_FCPORT_LN_SM_LINKUP:
2676 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2677 		break;
2678 
2679 	case BFA_FCPORT_LN_SM_NOTIFICATION:
2680 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2681 		break;
2682 
2683 	default:
2684 		bfa_sm_fault(ln->fcport->bfa, event);
2685 	}
2686 }
2687 
2688 /*
2689  * Link state is waiting for down notification and there is a pending up
2690  */
2691 static void
bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s * ln,enum bfa_fcport_ln_sm_event event)2692 bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
2693 		enum bfa_fcport_ln_sm_event event)
2694 {
2695 	bfa_trc(ln->fcport->bfa, event);
2696 
2697 	switch (event) {
2698 	case BFA_FCPORT_LN_SM_LINKDOWN:
2699 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2700 		break;
2701 
2702 	case BFA_FCPORT_LN_SM_NOTIFICATION:
2703 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2704 		bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2705 		break;
2706 
2707 	default:
2708 		bfa_sm_fault(ln->fcport->bfa, event);
2709 	}
2710 }
2711 
2712 /*
2713  * Link state is up
2714  */
2715 static void
bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s * ln,enum bfa_fcport_ln_sm_event event)2716 bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
2717 		enum bfa_fcport_ln_sm_event event)
2718 {
2719 	bfa_trc(ln->fcport->bfa, event);
2720 
2721 	switch (event) {
2722 	case BFA_FCPORT_LN_SM_LINKDOWN:
2723 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2724 		bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2725 		break;
2726 
2727 	default:
2728 		bfa_sm_fault(ln->fcport->bfa, event);
2729 	}
2730 }
2731 
2732 /*
2733  * Link state is waiting for up notification
2734  */
2735 static void
bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s * ln,enum bfa_fcport_ln_sm_event event)2736 bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
2737 		enum bfa_fcport_ln_sm_event event)
2738 {
2739 	bfa_trc(ln->fcport->bfa, event);
2740 
2741 	switch (event) {
2742 	case BFA_FCPORT_LN_SM_LINKDOWN:
2743 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2744 		break;
2745 
2746 	case BFA_FCPORT_LN_SM_NOTIFICATION:
2747 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up);
2748 		break;
2749 
2750 	default:
2751 		bfa_sm_fault(ln->fcport->bfa, event);
2752 	}
2753 }
2754 
2755 /*
2756  * Link state is waiting for up notification and there is a pending down
2757  */
2758 static void
bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s * ln,enum bfa_fcport_ln_sm_event event)2759 bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
2760 		enum bfa_fcport_ln_sm_event event)
2761 {
2762 	bfa_trc(ln->fcport->bfa, event);
2763 
2764 	switch (event) {
2765 	case BFA_FCPORT_LN_SM_LINKUP:
2766 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf);
2767 		break;
2768 
2769 	case BFA_FCPORT_LN_SM_NOTIFICATION:
2770 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2771 		bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2772 		break;
2773 
2774 	default:
2775 		bfa_sm_fault(ln->fcport->bfa, event);
2776 	}
2777 }
2778 
2779 /*
2780  * Link state is waiting for up notification and there are pending down and up
2781  */
2782 static void
bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s * ln,enum bfa_fcport_ln_sm_event event)2783 bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
2784 			enum bfa_fcport_ln_sm_event event)
2785 {
2786 	bfa_trc(ln->fcport->bfa, event);
2787 
2788 	switch (event) {
2789 	case BFA_FCPORT_LN_SM_LINKDOWN:
2790 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2791 		break;
2792 
2793 	case BFA_FCPORT_LN_SM_NOTIFICATION:
2794 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2795 		bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2796 		break;
2797 
2798 	default:
2799 		bfa_sm_fault(ln->fcport->bfa, event);
2800 	}
2801 }
2802 
2803 static void
__bfa_cb_fcport_event(void * cbarg,bfa_boolean_t complete)2804 __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
2805 {
2806 	struct bfa_fcport_ln_s *ln = cbarg;
2807 
2808 	if (complete)
2809 		ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event);
2810 	else
2811 		bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2812 }
2813 
2814 /*
2815  * Send SCN notification to upper layers.
2816  * trunk - false if caller is fcport to ignore fcport event in trunked mode
2817  */
2818 static void
bfa_fcport_scn(struct bfa_fcport_s * fcport,enum bfa_port_linkstate event,bfa_boolean_t trunk)2819 bfa_fcport_scn(struct bfa_fcport_s *fcport, enum bfa_port_linkstate event,
2820 	bfa_boolean_t trunk)
2821 {
2822 	if (fcport->cfg.trunked && !trunk)
2823 		return;
2824 
2825 	switch (event) {
2826 	case BFA_PORT_LINKUP:
2827 		bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP);
2828 		break;
2829 	case BFA_PORT_LINKDOWN:
2830 		bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN);
2831 		break;
2832 	default:
2833 		WARN_ON(1);
2834 	}
2835 }
2836 
2837 static void
bfa_fcport_queue_cb(struct bfa_fcport_ln_s * ln,enum bfa_port_linkstate event)2838 bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event)
2839 {
2840 	struct bfa_fcport_s *fcport = ln->fcport;
2841 
2842 	if (fcport->bfa->fcs) {
2843 		fcport->event_cbfn(fcport->event_cbarg, event);
2844 		bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2845 	} else {
2846 		ln->ln_event = event;
2847 		bfa_cb_queue(fcport->bfa, &ln->ln_qe,
2848 			__bfa_cb_fcport_event, ln);
2849 	}
2850 }
2851 
2852 #define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \
2853 							BFA_CACHELINE_SZ))
2854 
2855 static void
bfa_fcport_meminfo(struct bfa_iocfc_cfg_s * cfg,struct bfa_meminfo_s * minfo,struct bfa_s * bfa)2856 bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
2857 		   struct bfa_s *bfa)
2858 {
2859 	struct bfa_mem_dma_s *fcport_dma = BFA_MEM_FCPORT_DMA(bfa);
2860 
2861 	bfa_mem_dma_setup(minfo, fcport_dma, FCPORT_STATS_DMA_SZ);
2862 }
2863 
2864 static void
bfa_fcport_qresume(void * cbarg)2865 bfa_fcport_qresume(void *cbarg)
2866 {
2867 	struct bfa_fcport_s *fcport = cbarg;
2868 
2869 	bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME);
2870 }
2871 
2872 static void
bfa_fcport_mem_claim(struct bfa_fcport_s * fcport)2873 bfa_fcport_mem_claim(struct bfa_fcport_s *fcport)
2874 {
2875 	struct bfa_mem_dma_s *fcport_dma = &fcport->fcport_dma;
2876 
2877 	fcport->stats_kva = bfa_mem_dma_virt(fcport_dma);
2878 	fcport->stats_pa  = bfa_mem_dma_phys(fcport_dma);
2879 	fcport->stats = (union bfa_fcport_stats_u *)
2880 				bfa_mem_dma_virt(fcport_dma);
2881 }
2882 
2883 /*
2884  * Memory initialization.
2885  */
2886 static void
bfa_fcport_attach(struct bfa_s * bfa,void * bfad,struct bfa_iocfc_cfg_s * cfg,struct bfa_pcidev_s * pcidev)2887 bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
2888 		struct bfa_pcidev_s *pcidev)
2889 {
2890 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
2891 	struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
2892 	struct bfa_fcport_ln_s *ln = &fcport->ln;
2893 	struct timeval tv;
2894 
2895 	fcport->bfa = bfa;
2896 	ln->fcport = fcport;
2897 
2898 	bfa_fcport_mem_claim(fcport);
2899 
2900 	bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
2901 	bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2902 
2903 	/*
2904 	 * initialize time stamp for stats reset
2905 	 */
2906 	do_gettimeofday(&tv);
2907 	fcport->stats_reset_time = tv.tv_sec;
2908 
2909 	/*
2910 	 * initialize and set default configuration
2911 	 */
2912 	port_cfg->topology = BFA_PORT_TOPOLOGY_P2P;
2913 	port_cfg->speed = BFA_PORT_SPEED_AUTO;
2914 	port_cfg->trunked = BFA_FALSE;
2915 	port_cfg->maxfrsize = 0;
2916 
2917 	port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS;
2918 
2919 	INIT_LIST_HEAD(&fcport->stats_pending_q);
2920 	INIT_LIST_HEAD(&fcport->statsclr_pending_q);
2921 
2922 	bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
2923 }
2924 
2925 static void
bfa_fcport_detach(struct bfa_s * bfa)2926 bfa_fcport_detach(struct bfa_s *bfa)
2927 {
2928 }
2929 
2930 /*
2931  * Called when IOC is ready.
2932  */
2933 static void
bfa_fcport_start(struct bfa_s * bfa)2934 bfa_fcport_start(struct bfa_s *bfa)
2935 {
2936 	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
2937 }
2938 
2939 /*
2940  * Called before IOC is stopped.
2941  */
2942 static void
bfa_fcport_stop(struct bfa_s * bfa)2943 bfa_fcport_stop(struct bfa_s *bfa)
2944 {
2945 	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_STOP);
2946 	bfa_trunk_iocdisable(bfa);
2947 }
2948 
2949 /*
2950  * Called when IOC failure is detected.
2951  */
2952 static void
bfa_fcport_iocdisable(struct bfa_s * bfa)2953 bfa_fcport_iocdisable(struct bfa_s *bfa)
2954 {
2955 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
2956 
2957 	bfa_sm_send_event(fcport, BFA_FCPORT_SM_HWFAIL);
2958 	bfa_trunk_iocdisable(bfa);
2959 }
2960 
2961 static void
bfa_fcport_update_linkinfo(struct bfa_fcport_s * fcport)2962 bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
2963 {
2964 	struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2965 	struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
2966 
2967 	fcport->speed = pevent->link_state.speed;
2968 	fcport->topology = pevent->link_state.topology;
2969 
2970 	if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)
2971 		fcport->myalpa = 0;
2972 
2973 	/* QoS Details */
2974 	fcport->qos_attr = pevent->link_state.qos_attr;
2975 	fcport->qos_vc_attr = pevent->link_state.vc_fcf.qos_vc_attr;
2976 
2977 	/*
2978 	 * update trunk state if applicable
2979 	 */
2980 	if (!fcport->cfg.trunked)
2981 		trunk->attr.state = BFA_TRUNK_DISABLED;
2982 
2983 	/* update FCoE specific */
2984 	fcport->fcoe_vlan = be16_to_cpu(pevent->link_state.vc_fcf.fcf.vlan);
2985 
2986 	bfa_trc(fcport->bfa, fcport->speed);
2987 	bfa_trc(fcport->bfa, fcport->topology);
2988 }
2989 
2990 static void
bfa_fcport_reset_linkinfo(struct bfa_fcport_s * fcport)2991 bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
2992 {
2993 	fcport->speed = BFA_PORT_SPEED_UNKNOWN;
2994 	fcport->topology = BFA_PORT_TOPOLOGY_NONE;
2995 	fcport->bbsc_op_state = BFA_FALSE;
2996 }
2997 
2998 /*
2999  * Send port enable message to firmware.
3000  */
3001 static bfa_boolean_t
bfa_fcport_send_enable(struct bfa_fcport_s * fcport)3002 bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
3003 {
3004 	struct bfi_fcport_enable_req_s *m;
3005 
3006 	/*
3007 	 * Increment message tag before queue check, so that responses to old
3008 	 * requests are discarded.
3009 	 */
3010 	fcport->msgtag++;
3011 
3012 	/*
3013 	 * check for room in queue to send request now
3014 	 */
3015 	m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3016 	if (!m) {
3017 		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3018 							&fcport->reqq_wait);
3019 		return BFA_FALSE;
3020 	}
3021 
3022 	bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
3023 			bfa_fn_lpu(fcport->bfa));
3024 	m->nwwn = fcport->nwwn;
3025 	m->pwwn = fcport->pwwn;
3026 	m->port_cfg = fcport->cfg;
3027 	m->msgtag = fcport->msgtag;
3028 	m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize);
3029 	 m->use_flash_cfg = fcport->use_flash_cfg;
3030 	bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
3031 	bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
3032 	bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
3033 
3034 	/*
3035 	 * queue I/O message to firmware
3036 	 */
3037 	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
3038 	return BFA_TRUE;
3039 }
3040 
3041 /*
3042  * Send port disable message to firmware.
3043  */
3044 static	bfa_boolean_t
bfa_fcport_send_disable(struct bfa_fcport_s * fcport)3045 bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
3046 {
3047 	struct bfi_fcport_req_s *m;
3048 
3049 	/*
3050 	 * Increment message tag before queue check, so that responses to old
3051 	 * requests are discarded.
3052 	 */
3053 	fcport->msgtag++;
3054 
3055 	/*
3056 	 * check for room in queue to send request now
3057 	 */
3058 	m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3059 	if (!m) {
3060 		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3061 							&fcport->reqq_wait);
3062 		return BFA_FALSE;
3063 	}
3064 
3065 	bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
3066 			bfa_fn_lpu(fcport->bfa));
3067 	m->msgtag = fcport->msgtag;
3068 
3069 	/*
3070 	 * queue I/O message to firmware
3071 	 */
3072 	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
3073 
3074 	return BFA_TRUE;
3075 }
3076 
3077 static void
bfa_fcport_set_wwns(struct bfa_fcport_s * fcport)3078 bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
3079 {
3080 	fcport->pwwn = fcport->bfa->ioc.attr->pwwn;
3081 	fcport->nwwn = fcport->bfa->ioc.attr->nwwn;
3082 
3083 	bfa_trc(fcport->bfa, fcport->pwwn);
3084 	bfa_trc(fcport->bfa, fcport->nwwn);
3085 }
3086 
3087 static void
bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s * d,struct bfa_qos_stats_s * s)3088 bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
3089 	struct bfa_qos_stats_s *s)
3090 {
3091 	u32	*dip = (u32 *) d;
3092 	__be32	*sip = (__be32 *) s;
3093 	int		i;
3094 
3095 	/* Now swap the 32 bit fields */
3096 	for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i)
3097 		dip[i] = be32_to_cpu(sip[i]);
3098 }
3099 
3100 static void
bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s * d,struct bfa_fcoe_stats_s * s)3101 bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
3102 	struct bfa_fcoe_stats_s *s)
3103 {
3104 	u32	*dip = (u32 *) d;
3105 	__be32	*sip = (__be32 *) s;
3106 	int		i;
3107 
3108 	for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
3109 	     i = i + 2) {
3110 #ifdef __BIG_ENDIAN
3111 		dip[i] = be32_to_cpu(sip[i]);
3112 		dip[i + 1] = be32_to_cpu(sip[i + 1]);
3113 #else
3114 		dip[i] = be32_to_cpu(sip[i + 1]);
3115 		dip[i + 1] = be32_to_cpu(sip[i]);
3116 #endif
3117 	}
3118 }
3119 
3120 static void
__bfa_cb_fcport_stats_get(void * cbarg,bfa_boolean_t complete)3121 __bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
3122 {
3123 	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *)cbarg;
3124 	struct bfa_cb_pending_q_s *cb;
3125 	struct list_head *qe, *qen;
3126 	union bfa_fcport_stats_u *ret;
3127 
3128 	if (complete) {
3129 		struct timeval tv;
3130 		if (fcport->stats_status == BFA_STATUS_OK)
3131 			do_gettimeofday(&tv);
3132 
3133 		list_for_each_safe(qe, qen, &fcport->stats_pending_q) {
3134 			bfa_q_deq(&fcport->stats_pending_q, &qe);
3135 			cb = (struct bfa_cb_pending_q_s *)qe;
3136 			if (fcport->stats_status == BFA_STATUS_OK) {
3137 				ret = (union bfa_fcport_stats_u *)cb->data;
3138 				/* Swap FC QoS or FCoE stats */
3139 				if (bfa_ioc_get_fcmode(&fcport->bfa->ioc))
3140 					bfa_fcport_qos_stats_swap(&ret->fcqos,
3141 							&fcport->stats->fcqos);
3142 				else {
3143 					bfa_fcport_fcoe_stats_swap(&ret->fcoe,
3144 							&fcport->stats->fcoe);
3145 					ret->fcoe.secs_reset =
3146 					tv.tv_sec - fcport->stats_reset_time;
3147 				}
3148 			}
3149 			bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
3150 					fcport->stats_status);
3151 		}
3152 		fcport->stats_status = BFA_STATUS_OK;
3153 	} else {
3154 		INIT_LIST_HEAD(&fcport->stats_pending_q);
3155 		fcport->stats_status = BFA_STATUS_OK;
3156 	}
3157 }
3158 
3159 static void
bfa_fcport_stats_get_timeout(void * cbarg)3160 bfa_fcport_stats_get_timeout(void *cbarg)
3161 {
3162 	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3163 
3164 	bfa_trc(fcport->bfa, fcport->stats_qfull);
3165 
3166 	if (fcport->stats_qfull) {
3167 		bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3168 		fcport->stats_qfull = BFA_FALSE;
3169 	}
3170 
3171 	fcport->stats_status = BFA_STATUS_ETIMER;
3172 	__bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
3173 }
3174 
3175 static void
bfa_fcport_send_stats_get(void * cbarg)3176 bfa_fcport_send_stats_get(void *cbarg)
3177 {
3178 	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3179 	struct bfi_fcport_req_s *msg;
3180 
3181 	msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3182 
3183 	if (!msg) {
3184 		fcport->stats_qfull = BFA_TRUE;
3185 		bfa_reqq_winit(&fcport->stats_reqq_wait,
3186 				bfa_fcport_send_stats_get, fcport);
3187 		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3188 				&fcport->stats_reqq_wait);
3189 		return;
3190 	}
3191 	fcport->stats_qfull = BFA_FALSE;
3192 
3193 	memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3194 	bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
3195 			bfa_fn_lpu(fcport->bfa));
3196 	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
3197 }
3198 
3199 static void
__bfa_cb_fcport_stats_clr(void * cbarg,bfa_boolean_t complete)3200 __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
3201 {
3202 	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3203 	struct bfa_cb_pending_q_s *cb;
3204 	struct list_head *qe, *qen;
3205 
3206 	if (complete) {
3207 		struct timeval tv;
3208 
3209 		/*
3210 		 * re-initialize time stamp for stats reset
3211 		 */
3212 		do_gettimeofday(&tv);
3213 		fcport->stats_reset_time = tv.tv_sec;
3214 		list_for_each_safe(qe, qen, &fcport->statsclr_pending_q) {
3215 			bfa_q_deq(&fcport->statsclr_pending_q, &qe);
3216 			cb = (struct bfa_cb_pending_q_s *)qe;
3217 			bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
3218 						fcport->stats_status);
3219 		}
3220 		fcport->stats_status = BFA_STATUS_OK;
3221 	} else {
3222 		INIT_LIST_HEAD(&fcport->statsclr_pending_q);
3223 		fcport->stats_status = BFA_STATUS_OK;
3224 	}
3225 }
3226 
3227 static void
bfa_fcport_stats_clr_timeout(void * cbarg)3228 bfa_fcport_stats_clr_timeout(void *cbarg)
3229 {
3230 	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3231 
3232 	bfa_trc(fcport->bfa, fcport->stats_qfull);
3233 
3234 	if (fcport->stats_qfull) {
3235 		bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3236 		fcport->stats_qfull = BFA_FALSE;
3237 	}
3238 
3239 	fcport->stats_status = BFA_STATUS_ETIMER;
3240 	__bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
3241 }
3242 
3243 static void
bfa_fcport_send_stats_clear(void * cbarg)3244 bfa_fcport_send_stats_clear(void *cbarg)
3245 {
3246 	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3247 	struct bfi_fcport_req_s *msg;
3248 
3249 	msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3250 
3251 	if (!msg) {
3252 		fcport->stats_qfull = BFA_TRUE;
3253 		bfa_reqq_winit(&fcport->stats_reqq_wait,
3254 				bfa_fcport_send_stats_clear, fcport);
3255 		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3256 						&fcport->stats_reqq_wait);
3257 		return;
3258 	}
3259 	fcport->stats_qfull = BFA_FALSE;
3260 
3261 	memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3262 	bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
3263 			bfa_fn_lpu(fcport->bfa));
3264 	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
3265 }
3266 
3267 /*
3268  * Handle trunk SCN event from firmware.
3269  */
3270 static void
bfa_trunk_scn(struct bfa_fcport_s * fcport,struct bfi_fcport_trunk_scn_s * scn)3271 bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn)
3272 {
3273 	struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
3274 	struct bfi_fcport_trunk_link_s *tlink;
3275 	struct bfa_trunk_link_attr_s *lattr;
3276 	enum bfa_trunk_state state_prev;
3277 	int i;
3278 	int link_bm = 0;
3279 
3280 	bfa_trc(fcport->bfa, fcport->cfg.trunked);
3281 	WARN_ON(scn->trunk_state != BFA_TRUNK_ONLINE &&
3282 		   scn->trunk_state != BFA_TRUNK_OFFLINE);
3283 
3284 	bfa_trc(fcport->bfa, trunk->attr.state);
3285 	bfa_trc(fcport->bfa, scn->trunk_state);
3286 	bfa_trc(fcport->bfa, scn->trunk_speed);
3287 
3288 	/*
3289 	 * Save off new state for trunk attribute query
3290 	 */
3291 	state_prev = trunk->attr.state;
3292 	if (fcport->cfg.trunked && (trunk->attr.state != BFA_TRUNK_DISABLED))
3293 		trunk->attr.state = scn->trunk_state;
3294 	trunk->attr.speed = scn->trunk_speed;
3295 	for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3296 		lattr = &trunk->attr.link_attr[i];
3297 		tlink = &scn->tlink[i];
3298 
3299 		lattr->link_state = tlink->state;
3300 		lattr->trunk_wwn  = tlink->trunk_wwn;
3301 		lattr->fctl	  = tlink->fctl;
3302 		lattr->speed	  = tlink->speed;
3303 		lattr->deskew	  = be32_to_cpu(tlink->deskew);
3304 
3305 		if (tlink->state == BFA_TRUNK_LINK_STATE_UP) {
3306 			fcport->speed	 = tlink->speed;
3307 			fcport->topology = BFA_PORT_TOPOLOGY_P2P;
3308 			link_bm |= 1 << i;
3309 		}
3310 
3311 		bfa_trc(fcport->bfa, lattr->link_state);
3312 		bfa_trc(fcport->bfa, lattr->trunk_wwn);
3313 		bfa_trc(fcport->bfa, lattr->fctl);
3314 		bfa_trc(fcport->bfa, lattr->speed);
3315 		bfa_trc(fcport->bfa, lattr->deskew);
3316 	}
3317 
3318 	switch (link_bm) {
3319 	case 3:
3320 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3321 			BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,1)");
3322 		break;
3323 	case 2:
3324 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3325 			BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(-,1)");
3326 		break;
3327 	case 1:
3328 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3329 			BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,-)");
3330 		break;
3331 	default:
3332 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3333 			BFA_PL_EID_TRUNK_SCN, 0, "Trunk down");
3334 	}
3335 
3336 	/*
3337 	 * Notify upper layers if trunk state changed.
3338 	 */
3339 	if ((state_prev != trunk->attr.state) ||
3340 		(scn->trunk_state == BFA_TRUNK_OFFLINE)) {
3341 		bfa_fcport_scn(fcport, (scn->trunk_state == BFA_TRUNK_ONLINE) ?
3342 			BFA_PORT_LINKUP : BFA_PORT_LINKDOWN, BFA_TRUE);
3343 	}
3344 }
3345 
3346 static void
bfa_trunk_iocdisable(struct bfa_s * bfa)3347 bfa_trunk_iocdisable(struct bfa_s *bfa)
3348 {
3349 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3350 	int i = 0;
3351 
3352 	/*
3353 	 * In trunked mode, notify upper layers that link is down
3354 	 */
3355 	if (fcport->cfg.trunked) {
3356 		if (fcport->trunk.attr.state == BFA_TRUNK_ONLINE)
3357 			bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_TRUE);
3358 
3359 		fcport->trunk.attr.state = BFA_TRUNK_OFFLINE;
3360 		fcport->trunk.attr.speed = BFA_PORT_SPEED_UNKNOWN;
3361 		for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3362 			fcport->trunk.attr.link_attr[i].trunk_wwn = 0;
3363 			fcport->trunk.attr.link_attr[i].fctl =
3364 						BFA_TRUNK_LINK_FCTL_NORMAL;
3365 			fcport->trunk.attr.link_attr[i].link_state =
3366 						BFA_TRUNK_LINK_STATE_DN_LINKDN;
3367 			fcport->trunk.attr.link_attr[i].speed =
3368 						BFA_PORT_SPEED_UNKNOWN;
3369 			fcport->trunk.attr.link_attr[i].deskew = 0;
3370 		}
3371 	}
3372 }
3373 
3374 /*
3375  * Called to initialize port attributes
3376  */
3377 void
bfa_fcport_init(struct bfa_s * bfa)3378 bfa_fcport_init(struct bfa_s *bfa)
3379 {
3380 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3381 
3382 	/*
3383 	 * Initialize port attributes from IOC hardware data.
3384 	 */
3385 	bfa_fcport_set_wwns(fcport);
3386 	if (fcport->cfg.maxfrsize == 0)
3387 		fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
3388 	fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
3389 	fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
3390 
3391 	if (bfa_fcport_is_pbcdisabled(bfa))
3392 		bfa->modules.port.pbc_disabled = BFA_TRUE;
3393 
3394 	WARN_ON(!fcport->cfg.maxfrsize);
3395 	WARN_ON(!fcport->cfg.rx_bbcredit);
3396 	WARN_ON(!fcport->speed_sup);
3397 }
3398 
3399 /*
3400  * Firmware message handler.
3401  */
3402 void
bfa_fcport_isr(struct bfa_s * bfa,struct bfi_msg_s * msg)3403 bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
3404 {
3405 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3406 	union bfi_fcport_i2h_msg_u i2hmsg;
3407 
3408 	i2hmsg.msg = msg;
3409 	fcport->event_arg.i2hmsg = i2hmsg;
3410 
3411 	bfa_trc(bfa, msg->mhdr.msg_id);
3412 	bfa_trc(bfa, bfa_sm_to_state(hal_port_sm_table, fcport->sm));
3413 
3414 	switch (msg->mhdr.msg_id) {
3415 	case BFI_FCPORT_I2H_ENABLE_RSP:
3416 		if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) {
3417 
3418 			if (fcport->use_flash_cfg) {
3419 				fcport->cfg = i2hmsg.penable_rsp->port_cfg;
3420 				fcport->cfg.maxfrsize =
3421 					cpu_to_be16(fcport->cfg.maxfrsize);
3422 				fcport->cfg.path_tov =
3423 					cpu_to_be16(fcport->cfg.path_tov);
3424 				fcport->cfg.q_depth =
3425 					cpu_to_be16(fcport->cfg.q_depth);
3426 
3427 				if (fcport->cfg.trunked)
3428 					fcport->trunk.attr.state =
3429 						BFA_TRUNK_OFFLINE;
3430 				else
3431 					fcport->trunk.attr.state =
3432 						BFA_TRUNK_DISABLED;
3433 				fcport->use_flash_cfg = BFA_FALSE;
3434 			}
3435 
3436 			if (fcport->cfg.qos_enabled)
3437 				fcport->qos_attr.state = BFA_QOS_OFFLINE;
3438 			else
3439 				fcport->qos_attr.state = BFA_QOS_DISABLED;
3440 
3441 			bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3442 		}
3443 		break;
3444 
3445 	case BFI_FCPORT_I2H_DISABLE_RSP:
3446 		if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
3447 			bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3448 		break;
3449 
3450 	case BFI_FCPORT_I2H_EVENT:
3451 		if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP)
3452 			bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
3453 		else
3454 			bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKDOWN);
3455 		break;
3456 
3457 	case BFI_FCPORT_I2H_TRUNK_SCN:
3458 		bfa_trunk_scn(fcport, i2hmsg.trunk_scn);
3459 		break;
3460 
3461 	case BFI_FCPORT_I2H_STATS_GET_RSP:
3462 		/*
3463 		 * check for timer pop before processing the rsp
3464 		 */
3465 		if (list_empty(&fcport->stats_pending_q) ||
3466 		    (fcport->stats_status == BFA_STATUS_ETIMER))
3467 			break;
3468 
3469 		bfa_timer_stop(&fcport->timer);
3470 		fcport->stats_status = i2hmsg.pstatsget_rsp->status;
3471 		__bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
3472 		break;
3473 
3474 	case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
3475 		/*
3476 		 * check for timer pop before processing the rsp
3477 		 */
3478 		if (list_empty(&fcport->statsclr_pending_q) ||
3479 		    (fcport->stats_status == BFA_STATUS_ETIMER))
3480 			break;
3481 
3482 		bfa_timer_stop(&fcport->timer);
3483 		fcport->stats_status = BFA_STATUS_OK;
3484 		__bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
3485 		break;
3486 
3487 	case BFI_FCPORT_I2H_ENABLE_AEN:
3488 		bfa_sm_send_event(fcport, BFA_FCPORT_SM_ENABLE);
3489 		break;
3490 
3491 	case BFI_FCPORT_I2H_DISABLE_AEN:
3492 		bfa_sm_send_event(fcport, BFA_FCPORT_SM_DISABLE);
3493 		break;
3494 
3495 	default:
3496 		WARN_ON(1);
3497 	break;
3498 	}
3499 }
3500 
3501 /*
3502  * Registered callback for port events.
3503  */
3504 void
bfa_fcport_event_register(struct bfa_s * bfa,void (* cbfn)(void * cbarg,enum bfa_port_linkstate event),void * cbarg)3505 bfa_fcport_event_register(struct bfa_s *bfa,
3506 				void (*cbfn) (void *cbarg,
3507 				enum bfa_port_linkstate event),
3508 				void *cbarg)
3509 {
3510 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3511 
3512 	fcport->event_cbfn = cbfn;
3513 	fcport->event_cbarg = cbarg;
3514 }
3515 
3516 bfa_status_t
bfa_fcport_enable(struct bfa_s * bfa)3517 bfa_fcport_enable(struct bfa_s *bfa)
3518 {
3519 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3520 
3521 	if (bfa_fcport_is_pbcdisabled(bfa))
3522 		return BFA_STATUS_PBC;
3523 
3524 	if (bfa_ioc_is_disabled(&bfa->ioc))
3525 		return BFA_STATUS_IOC_DISABLED;
3526 
3527 	if (fcport->diag_busy)
3528 		return BFA_STATUS_DIAG_BUSY;
3529 
3530 	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE);
3531 	return BFA_STATUS_OK;
3532 }
3533 
3534 bfa_status_t
bfa_fcport_disable(struct bfa_s * bfa)3535 bfa_fcport_disable(struct bfa_s *bfa)
3536 {
3537 	if (bfa_fcport_is_pbcdisabled(bfa))
3538 		return BFA_STATUS_PBC;
3539 
3540 	if (bfa_ioc_is_disabled(&bfa->ioc))
3541 		return BFA_STATUS_IOC_DISABLED;
3542 
3543 	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE);
3544 	return BFA_STATUS_OK;
3545 }
3546 
3547 /* If PBC is disabled on port, return error */
3548 bfa_status_t
bfa_fcport_is_pbcdisabled(struct bfa_s * bfa)3549 bfa_fcport_is_pbcdisabled(struct bfa_s *bfa)
3550 {
3551 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3552 	struct bfa_iocfc_s *iocfc = &bfa->iocfc;
3553 	struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
3554 
3555 	if (cfgrsp->pbc_cfg.port_enabled == BFI_PBC_PORT_DISABLED) {
3556 		bfa_trc(bfa, fcport->pwwn);
3557 		return BFA_STATUS_PBC;
3558 	}
3559 	return BFA_STATUS_OK;
3560 }
3561 
3562 /*
3563  * Configure port speed.
3564  */
3565 bfa_status_t
bfa_fcport_cfg_speed(struct bfa_s * bfa,enum bfa_port_speed speed)3566 bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
3567 {
3568 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3569 
3570 	bfa_trc(bfa, speed);
3571 
3572 	if (fcport->cfg.trunked == BFA_TRUE)
3573 		return BFA_STATUS_TRUNK_ENABLED;
3574 	if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) {
3575 		bfa_trc(bfa, fcport->speed_sup);
3576 		return BFA_STATUS_UNSUPP_SPEED;
3577 	}
3578 
3579 	/* Port speed entered needs to be checked */
3580 	if (bfa_ioc_get_type(&fcport->bfa->ioc) == BFA_IOC_TYPE_FC) {
3581 		/* For CT2, 1G is not supported */
3582 		if ((speed == BFA_PORT_SPEED_1GBPS) &&
3583 		    (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)))
3584 			return BFA_STATUS_UNSUPP_SPEED;
3585 
3586 		/* Already checked for Auto Speed and Max Speed supp */
3587 		if (!(speed == BFA_PORT_SPEED_1GBPS ||
3588 		      speed == BFA_PORT_SPEED_2GBPS ||
3589 		      speed == BFA_PORT_SPEED_4GBPS ||
3590 		      speed == BFA_PORT_SPEED_8GBPS ||
3591 		      speed == BFA_PORT_SPEED_16GBPS ||
3592 		      speed == BFA_PORT_SPEED_AUTO))
3593 			return BFA_STATUS_UNSUPP_SPEED;
3594 	} else {
3595 		if (speed != BFA_PORT_SPEED_10GBPS)
3596 			return BFA_STATUS_UNSUPP_SPEED;
3597 	}
3598 
3599 	fcport->cfg.speed = speed;
3600 
3601 	return BFA_STATUS_OK;
3602 }
3603 
3604 /*
3605  * Get current speed.
3606  */
3607 enum bfa_port_speed
bfa_fcport_get_speed(struct bfa_s * bfa)3608 bfa_fcport_get_speed(struct bfa_s *bfa)
3609 {
3610 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3611 
3612 	return fcport->speed;
3613 }
3614 
3615 /*
3616  * Configure port topology.
3617  */
3618 bfa_status_t
bfa_fcport_cfg_topology(struct bfa_s * bfa,enum bfa_port_topology topology)3619 bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology)
3620 {
3621 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3622 
3623 	bfa_trc(bfa, topology);
3624 	bfa_trc(bfa, fcport->cfg.topology);
3625 
3626 	switch (topology) {
3627 	case BFA_PORT_TOPOLOGY_P2P:
3628 	case BFA_PORT_TOPOLOGY_LOOP:
3629 	case BFA_PORT_TOPOLOGY_AUTO:
3630 		break;
3631 
3632 	default:
3633 		return BFA_STATUS_EINVAL;
3634 	}
3635 
3636 	fcport->cfg.topology = topology;
3637 	return BFA_STATUS_OK;
3638 }
3639 
3640 /*
3641  * Get current topology.
3642  */
3643 enum bfa_port_topology
bfa_fcport_get_topology(struct bfa_s * bfa)3644 bfa_fcport_get_topology(struct bfa_s *bfa)
3645 {
3646 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3647 
3648 	return fcport->topology;
3649 }
3650 
3651 bfa_status_t
bfa_fcport_cfg_hardalpa(struct bfa_s * bfa,u8 alpa)3652 bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
3653 {
3654 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3655 
3656 	bfa_trc(bfa, alpa);
3657 	bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3658 	bfa_trc(bfa, fcport->cfg.hardalpa);
3659 
3660 	fcport->cfg.cfg_hardalpa = BFA_TRUE;
3661 	fcport->cfg.hardalpa = alpa;
3662 
3663 	return BFA_STATUS_OK;
3664 }
3665 
3666 bfa_status_t
bfa_fcport_clr_hardalpa(struct bfa_s * bfa)3667 bfa_fcport_clr_hardalpa(struct bfa_s *bfa)
3668 {
3669 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3670 
3671 	bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3672 	bfa_trc(bfa, fcport->cfg.hardalpa);
3673 
3674 	fcport->cfg.cfg_hardalpa = BFA_FALSE;
3675 	return BFA_STATUS_OK;
3676 }
3677 
3678 bfa_boolean_t
bfa_fcport_get_hardalpa(struct bfa_s * bfa,u8 * alpa)3679 bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
3680 {
3681 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3682 
3683 	*alpa = fcport->cfg.hardalpa;
3684 	return fcport->cfg.cfg_hardalpa;
3685 }
3686 
3687 u8
bfa_fcport_get_myalpa(struct bfa_s * bfa)3688 bfa_fcport_get_myalpa(struct bfa_s *bfa)
3689 {
3690 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3691 
3692 	return fcport->myalpa;
3693 }
3694 
3695 bfa_status_t
bfa_fcport_cfg_maxfrsize(struct bfa_s * bfa,u16 maxfrsize)3696 bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
3697 {
3698 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3699 
3700 	bfa_trc(bfa, maxfrsize);
3701 	bfa_trc(bfa, fcport->cfg.maxfrsize);
3702 
3703 	/* with in range */
3704 	if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ))
3705 		return BFA_STATUS_INVLD_DFSZ;
3706 
3707 	/* power of 2, if not the max frame size of 2112 */
3708 	if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
3709 		return BFA_STATUS_INVLD_DFSZ;
3710 
3711 	fcport->cfg.maxfrsize = maxfrsize;
3712 	return BFA_STATUS_OK;
3713 }
3714 
3715 u16
bfa_fcport_get_maxfrsize(struct bfa_s * bfa)3716 bfa_fcport_get_maxfrsize(struct bfa_s *bfa)
3717 {
3718 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3719 
3720 	return fcport->cfg.maxfrsize;
3721 }
3722 
3723 u8
bfa_fcport_get_rx_bbcredit(struct bfa_s * bfa)3724 bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
3725 {
3726 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3727 
3728 	return fcport->cfg.rx_bbcredit;
3729 }
3730 
3731 void
bfa_fcport_set_tx_bbcredit(struct bfa_s * bfa,u16 tx_bbcredit,u8 bb_scn)3732 bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit, u8 bb_scn)
3733 {
3734 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3735 
3736 	fcport->cfg.tx_bbcredit = (u8)tx_bbcredit;
3737 	fcport->cfg.bb_scn = bb_scn;
3738 	if (bb_scn)
3739 		fcport->bbsc_op_state = BFA_TRUE;
3740 }
3741 
3742 /*
3743  * Get port attributes.
3744  */
3745 
3746 wwn_t
bfa_fcport_get_wwn(struct bfa_s * bfa,bfa_boolean_t node)3747 bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
3748 {
3749 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3750 	if (node)
3751 		return fcport->nwwn;
3752 	else
3753 		return fcport->pwwn;
3754 }
3755 
3756 void
bfa_fcport_get_attr(struct bfa_s * bfa,struct bfa_port_attr_s * attr)3757 bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
3758 {
3759 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3760 
3761 	memset(attr, 0, sizeof(struct bfa_port_attr_s));
3762 
3763 	attr->nwwn = fcport->nwwn;
3764 	attr->pwwn = fcport->pwwn;
3765 
3766 	attr->factorypwwn =  bfa->ioc.attr->mfg_pwwn;
3767 	attr->factorynwwn =  bfa->ioc.attr->mfg_nwwn;
3768 
3769 	memcpy(&attr->pport_cfg, &fcport->cfg,
3770 		sizeof(struct bfa_port_cfg_s));
3771 	/* speed attributes */
3772 	attr->pport_cfg.speed = fcport->cfg.speed;
3773 	attr->speed_supported = fcport->speed_sup;
3774 	attr->speed = fcport->speed;
3775 	attr->cos_supported = FC_CLASS_3;
3776 
3777 	/* topology attributes */
3778 	attr->pport_cfg.topology = fcport->cfg.topology;
3779 	attr->topology = fcport->topology;
3780 	attr->pport_cfg.trunked = fcport->cfg.trunked;
3781 
3782 	/* beacon attributes */
3783 	attr->beacon = fcport->beacon;
3784 	attr->link_e2e_beacon = fcport->link_e2e_beacon;
3785 
3786 	attr->pport_cfg.path_tov  = bfa_fcpim_path_tov_get(bfa);
3787 	attr->pport_cfg.q_depth  = bfa_fcpim_qdepth_get(bfa);
3788 	attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm);
3789 	attr->bbsc_op_status =  fcport->bbsc_op_state;
3790 
3791 	/* PBC Disabled State */
3792 	if (bfa_fcport_is_pbcdisabled(bfa))
3793 		attr->port_state = BFA_PORT_ST_PREBOOT_DISABLED;
3794 	else {
3795 		if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
3796 			attr->port_state = BFA_PORT_ST_IOCDIS;
3797 		else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
3798 			attr->port_state = BFA_PORT_ST_FWMISMATCH;
3799 	}
3800 
3801 	/* FCoE vlan */
3802 	attr->fcoe_vlan = fcport->fcoe_vlan;
3803 }
3804 
3805 #define BFA_FCPORT_STATS_TOV	1000
3806 
3807 /*
3808  * Fetch port statistics (FCQoS or FCoE).
3809  */
3810 bfa_status_t
bfa_fcport_get_stats(struct bfa_s * bfa,struct bfa_cb_pending_q_s * cb)3811 bfa_fcport_get_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
3812 {
3813 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3814 
3815 	if (bfa_ioc_is_disabled(&bfa->ioc))
3816 		return BFA_STATUS_IOC_DISABLED;
3817 
3818 	if (!list_empty(&fcport->statsclr_pending_q))
3819 		return BFA_STATUS_DEVBUSY;
3820 
3821 	if (list_empty(&fcport->stats_pending_q)) {
3822 		list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
3823 		bfa_fcport_send_stats_get(fcport);
3824 		bfa_timer_start(bfa, &fcport->timer,
3825 				bfa_fcport_stats_get_timeout,
3826 				fcport, BFA_FCPORT_STATS_TOV);
3827 	} else
3828 		list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
3829 
3830 	return BFA_STATUS_OK;
3831 }
3832 
3833 /*
3834  * Reset port statistics (FCQoS or FCoE).
3835  */
3836 bfa_status_t
bfa_fcport_clear_stats(struct bfa_s * bfa,struct bfa_cb_pending_q_s * cb)3837 bfa_fcport_clear_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
3838 {
3839 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3840 
3841 	if (!list_empty(&fcport->stats_pending_q))
3842 		return BFA_STATUS_DEVBUSY;
3843 
3844 	if (list_empty(&fcport->statsclr_pending_q)) {
3845 		list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
3846 		bfa_fcport_send_stats_clear(fcport);
3847 		bfa_timer_start(bfa, &fcport->timer,
3848 				bfa_fcport_stats_clr_timeout,
3849 				fcport, BFA_FCPORT_STATS_TOV);
3850 	} else
3851 		list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
3852 
3853 	return BFA_STATUS_OK;
3854 }
3855 
3856 /*
3857  * Fetch port attributes.
3858  */
3859 bfa_boolean_t
bfa_fcport_is_disabled(struct bfa_s * bfa)3860 bfa_fcport_is_disabled(struct bfa_s *bfa)
3861 {
3862 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3863 
3864 	return bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
3865 		BFA_PORT_ST_DISABLED;
3866 
3867 }
3868 
3869 bfa_boolean_t
bfa_fcport_is_ratelim(struct bfa_s * bfa)3870 bfa_fcport_is_ratelim(struct bfa_s *bfa)
3871 {
3872 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3873 
3874 	return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
3875 
3876 }
3877 
3878 /*
3879  *	Enable/Disable FAA feature in port config
3880  */
3881 void
bfa_fcport_cfg_faa(struct bfa_s * bfa,u8 state)3882 bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state)
3883 {
3884 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3885 
3886 	bfa_trc(bfa, state);
3887 	fcport->cfg.faa_state = state;
3888 }
3889 
3890 /*
3891  * Get default minimum ratelim speed
3892  */
3893 enum bfa_port_speed
bfa_fcport_get_ratelim_speed(struct bfa_s * bfa)3894 bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
3895 {
3896 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3897 
3898 	bfa_trc(bfa, fcport->cfg.trl_def_speed);
3899 	return fcport->cfg.trl_def_speed;
3900 
3901 }
3902 
3903 void
bfa_fcport_beacon(void * dev,bfa_boolean_t beacon,bfa_boolean_t link_e2e_beacon)3904 bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
3905 		  bfa_boolean_t link_e2e_beacon)
3906 {
3907 	struct bfa_s *bfa = dev;
3908 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3909 
3910 	bfa_trc(bfa, beacon);
3911 	bfa_trc(bfa, link_e2e_beacon);
3912 	bfa_trc(bfa, fcport->beacon);
3913 	bfa_trc(bfa, fcport->link_e2e_beacon);
3914 
3915 	fcport->beacon = beacon;
3916 	fcport->link_e2e_beacon = link_e2e_beacon;
3917 }
3918 
3919 bfa_boolean_t
bfa_fcport_is_linkup(struct bfa_s * bfa)3920 bfa_fcport_is_linkup(struct bfa_s *bfa)
3921 {
3922 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3923 
3924 	return	(!fcport->cfg.trunked &&
3925 		 bfa_sm_cmp_state(fcport, bfa_fcport_sm_linkup)) ||
3926 		(fcport->cfg.trunked &&
3927 		 fcport->trunk.attr.state == BFA_TRUNK_ONLINE);
3928 }
3929 
3930 bfa_boolean_t
bfa_fcport_is_qos_enabled(struct bfa_s * bfa)3931 bfa_fcport_is_qos_enabled(struct bfa_s *bfa)
3932 {
3933 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3934 
3935 	return fcport->cfg.qos_enabled;
3936 }
3937 
3938 bfa_boolean_t
bfa_fcport_is_trunk_enabled(struct bfa_s * bfa)3939 bfa_fcport_is_trunk_enabled(struct bfa_s *bfa)
3940 {
3941 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3942 
3943 	return fcport->cfg.trunked;
3944 }
3945 
3946 /*
3947  * Rport State machine functions
3948  */
3949 /*
3950  * Beginning state, only online event expected.
3951  */
3952 static void
bfa_rport_sm_uninit(struct bfa_rport_s * rp,enum bfa_rport_event event)3953 bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event)
3954 {
3955 	bfa_trc(rp->bfa, rp->rport_tag);
3956 	bfa_trc(rp->bfa, event);
3957 
3958 	switch (event) {
3959 	case BFA_RPORT_SM_CREATE:
3960 		bfa_stats(rp, sm_un_cr);
3961 		bfa_sm_set_state(rp, bfa_rport_sm_created);
3962 		break;
3963 
3964 	default:
3965 		bfa_stats(rp, sm_un_unexp);
3966 		bfa_sm_fault(rp->bfa, event);
3967 	}
3968 }
3969 
3970 static void
bfa_rport_sm_created(struct bfa_rport_s * rp,enum bfa_rport_event event)3971 bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
3972 {
3973 	bfa_trc(rp->bfa, rp->rport_tag);
3974 	bfa_trc(rp->bfa, event);
3975 
3976 	switch (event) {
3977 	case BFA_RPORT_SM_ONLINE:
3978 		bfa_stats(rp, sm_cr_on);
3979 		if (bfa_rport_send_fwcreate(rp))
3980 			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
3981 		else
3982 			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
3983 		break;
3984 
3985 	case BFA_RPORT_SM_DELETE:
3986 		bfa_stats(rp, sm_cr_del);
3987 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
3988 		bfa_rport_free(rp);
3989 		break;
3990 
3991 	case BFA_RPORT_SM_HWFAIL:
3992 		bfa_stats(rp, sm_cr_hwf);
3993 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3994 		break;
3995 
3996 	default:
3997 		bfa_stats(rp, sm_cr_unexp);
3998 		bfa_sm_fault(rp->bfa, event);
3999 	}
4000 }
4001 
4002 /*
4003  * Waiting for rport create response from firmware.
4004  */
4005 static void
bfa_rport_sm_fwcreate(struct bfa_rport_s * rp,enum bfa_rport_event event)4006 bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
4007 {
4008 	bfa_trc(rp->bfa, rp->rport_tag);
4009 	bfa_trc(rp->bfa, event);
4010 
4011 	switch (event) {
4012 	case BFA_RPORT_SM_FWRSP:
4013 		bfa_stats(rp, sm_fwc_rsp);
4014 		bfa_sm_set_state(rp, bfa_rport_sm_online);
4015 		bfa_rport_online_cb(rp);
4016 		break;
4017 
4018 	case BFA_RPORT_SM_DELETE:
4019 		bfa_stats(rp, sm_fwc_del);
4020 		bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4021 		break;
4022 
4023 	case BFA_RPORT_SM_OFFLINE:
4024 		bfa_stats(rp, sm_fwc_off);
4025 		bfa_sm_set_state(rp, bfa_rport_sm_offline_pending);
4026 		break;
4027 
4028 	case BFA_RPORT_SM_HWFAIL:
4029 		bfa_stats(rp, sm_fwc_hwf);
4030 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4031 		break;
4032 
4033 	default:
4034 		bfa_stats(rp, sm_fwc_unexp);
4035 		bfa_sm_fault(rp->bfa, event);
4036 	}
4037 }
4038 
4039 /*
4040  * Request queue is full, awaiting queue resume to send create request.
4041  */
4042 static void
bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s * rp,enum bfa_rport_event event)4043 bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4044 {
4045 	bfa_trc(rp->bfa, rp->rport_tag);
4046 	bfa_trc(rp->bfa, event);
4047 
4048 	switch (event) {
4049 	case BFA_RPORT_SM_QRESUME:
4050 		bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4051 		bfa_rport_send_fwcreate(rp);
4052 		break;
4053 
4054 	case BFA_RPORT_SM_DELETE:
4055 		bfa_stats(rp, sm_fwc_del);
4056 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4057 		bfa_reqq_wcancel(&rp->reqq_wait);
4058 		bfa_rport_free(rp);
4059 		break;
4060 
4061 	case BFA_RPORT_SM_OFFLINE:
4062 		bfa_stats(rp, sm_fwc_off);
4063 		bfa_sm_set_state(rp, bfa_rport_sm_offline);
4064 		bfa_reqq_wcancel(&rp->reqq_wait);
4065 		bfa_rport_offline_cb(rp);
4066 		break;
4067 
4068 	case BFA_RPORT_SM_HWFAIL:
4069 		bfa_stats(rp, sm_fwc_hwf);
4070 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4071 		bfa_reqq_wcancel(&rp->reqq_wait);
4072 		break;
4073 
4074 	default:
4075 		bfa_stats(rp, sm_fwc_unexp);
4076 		bfa_sm_fault(rp->bfa, event);
4077 	}
4078 }
4079 
4080 /*
4081  * Online state - normal parking state.
4082  */
4083 static void
bfa_rport_sm_online(struct bfa_rport_s * rp,enum bfa_rport_event event)4084 bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
4085 {
4086 	struct bfi_rport_qos_scn_s *qos_scn;
4087 
4088 	bfa_trc(rp->bfa, rp->rport_tag);
4089 	bfa_trc(rp->bfa, event);
4090 
4091 	switch (event) {
4092 	case BFA_RPORT_SM_OFFLINE:
4093 		bfa_stats(rp, sm_on_off);
4094 		if (bfa_rport_send_fwdelete(rp))
4095 			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4096 		else
4097 			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4098 		break;
4099 
4100 	case BFA_RPORT_SM_DELETE:
4101 		bfa_stats(rp, sm_on_del);
4102 		if (bfa_rport_send_fwdelete(rp))
4103 			bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4104 		else
4105 			bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4106 		break;
4107 
4108 	case BFA_RPORT_SM_HWFAIL:
4109 		bfa_stats(rp, sm_on_hwf);
4110 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4111 		break;
4112 
4113 	case BFA_RPORT_SM_SET_SPEED:
4114 		bfa_rport_send_fwspeed(rp);
4115 		break;
4116 
4117 	case BFA_RPORT_SM_QOS_SCN:
4118 		qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg;
4119 		rp->qos_attr = qos_scn->new_qos_attr;
4120 		bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id);
4121 		bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id);
4122 		bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority);
4123 		bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority);
4124 
4125 		qos_scn->old_qos_attr.qos_flow_id  =
4126 			be32_to_cpu(qos_scn->old_qos_attr.qos_flow_id);
4127 		qos_scn->new_qos_attr.qos_flow_id  =
4128 			be32_to_cpu(qos_scn->new_qos_attr.qos_flow_id);
4129 
4130 		if (qos_scn->old_qos_attr.qos_flow_id !=
4131 			qos_scn->new_qos_attr.qos_flow_id)
4132 			bfa_cb_rport_qos_scn_flowid(rp->rport_drv,
4133 						    qos_scn->old_qos_attr,
4134 						    qos_scn->new_qos_attr);
4135 		if (qos_scn->old_qos_attr.qos_priority !=
4136 			qos_scn->new_qos_attr.qos_priority)
4137 			bfa_cb_rport_qos_scn_prio(rp->rport_drv,
4138 						  qos_scn->old_qos_attr,
4139 						  qos_scn->new_qos_attr);
4140 		break;
4141 
4142 	default:
4143 		bfa_stats(rp, sm_on_unexp);
4144 		bfa_sm_fault(rp->bfa, event);
4145 	}
4146 }
4147 
4148 /*
4149  * Firmware rport is being deleted - awaiting f/w response.
4150  */
4151 static void
bfa_rport_sm_fwdelete(struct bfa_rport_s * rp,enum bfa_rport_event event)4152 bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event)
4153 {
4154 	bfa_trc(rp->bfa, rp->rport_tag);
4155 	bfa_trc(rp->bfa, event);
4156 
4157 	switch (event) {
4158 	case BFA_RPORT_SM_FWRSP:
4159 		bfa_stats(rp, sm_fwd_rsp);
4160 		bfa_sm_set_state(rp, bfa_rport_sm_offline);
4161 		bfa_rport_offline_cb(rp);
4162 		break;
4163 
4164 	case BFA_RPORT_SM_DELETE:
4165 		bfa_stats(rp, sm_fwd_del);
4166 		bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4167 		break;
4168 
4169 	case BFA_RPORT_SM_HWFAIL:
4170 		bfa_stats(rp, sm_fwd_hwf);
4171 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4172 		bfa_rport_offline_cb(rp);
4173 		break;
4174 
4175 	default:
4176 		bfa_stats(rp, sm_fwd_unexp);
4177 		bfa_sm_fault(rp->bfa, event);
4178 	}
4179 }
4180 
4181 static void
bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s * rp,enum bfa_rport_event event)4182 bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4183 {
4184 	bfa_trc(rp->bfa, rp->rport_tag);
4185 	bfa_trc(rp->bfa, event);
4186 
4187 	switch (event) {
4188 	case BFA_RPORT_SM_QRESUME:
4189 		bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4190 		bfa_rport_send_fwdelete(rp);
4191 		break;
4192 
4193 	case BFA_RPORT_SM_DELETE:
4194 		bfa_stats(rp, sm_fwd_del);
4195 		bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4196 		break;
4197 
4198 	case BFA_RPORT_SM_HWFAIL:
4199 		bfa_stats(rp, sm_fwd_hwf);
4200 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4201 		bfa_reqq_wcancel(&rp->reqq_wait);
4202 		bfa_rport_offline_cb(rp);
4203 		break;
4204 
4205 	default:
4206 		bfa_stats(rp, sm_fwd_unexp);
4207 		bfa_sm_fault(rp->bfa, event);
4208 	}
4209 }
4210 
4211 /*
4212  * Offline state.
4213  */
4214 static void
bfa_rport_sm_offline(struct bfa_rport_s * rp,enum bfa_rport_event event)4215 bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
4216 {
4217 	bfa_trc(rp->bfa, rp->rport_tag);
4218 	bfa_trc(rp->bfa, event);
4219 
4220 	switch (event) {
4221 	case BFA_RPORT_SM_DELETE:
4222 		bfa_stats(rp, sm_off_del);
4223 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4224 		bfa_rport_free(rp);
4225 		break;
4226 
4227 	case BFA_RPORT_SM_ONLINE:
4228 		bfa_stats(rp, sm_off_on);
4229 		if (bfa_rport_send_fwcreate(rp))
4230 			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4231 		else
4232 			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4233 		break;
4234 
4235 	case BFA_RPORT_SM_HWFAIL:
4236 		bfa_stats(rp, sm_off_hwf);
4237 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4238 		break;
4239 
4240 	default:
4241 		bfa_stats(rp, sm_off_unexp);
4242 		bfa_sm_fault(rp->bfa, event);
4243 	}
4244 }
4245 
4246 /*
4247  * Rport is deleted, waiting for firmware response to delete.
4248  */
4249 static void
bfa_rport_sm_deleting(struct bfa_rport_s * rp,enum bfa_rport_event event)4250 bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event)
4251 {
4252 	bfa_trc(rp->bfa, rp->rport_tag);
4253 	bfa_trc(rp->bfa, event);
4254 
4255 	switch (event) {
4256 	case BFA_RPORT_SM_FWRSP:
4257 		bfa_stats(rp, sm_del_fwrsp);
4258 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4259 		bfa_rport_free(rp);
4260 		break;
4261 
4262 	case BFA_RPORT_SM_HWFAIL:
4263 		bfa_stats(rp, sm_del_hwf);
4264 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4265 		bfa_rport_free(rp);
4266 		break;
4267 
4268 	default:
4269 		bfa_sm_fault(rp->bfa, event);
4270 	}
4271 }
4272 
4273 static void
bfa_rport_sm_deleting_qfull(struct bfa_rport_s * rp,enum bfa_rport_event event)4274 bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4275 {
4276 	bfa_trc(rp->bfa, rp->rport_tag);
4277 	bfa_trc(rp->bfa, event);
4278 
4279 	switch (event) {
4280 	case BFA_RPORT_SM_QRESUME:
4281 		bfa_stats(rp, sm_del_fwrsp);
4282 		bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4283 		bfa_rport_send_fwdelete(rp);
4284 		break;
4285 
4286 	case BFA_RPORT_SM_HWFAIL:
4287 		bfa_stats(rp, sm_del_hwf);
4288 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4289 		bfa_reqq_wcancel(&rp->reqq_wait);
4290 		bfa_rport_free(rp);
4291 		break;
4292 
4293 	default:
4294 		bfa_sm_fault(rp->bfa, event);
4295 	}
4296 }
4297 
4298 /*
4299  * Waiting for rport create response from firmware. A delete is pending.
4300  */
4301 static void
bfa_rport_sm_delete_pending(struct bfa_rport_s * rp,enum bfa_rport_event event)4302 bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
4303 				enum bfa_rport_event event)
4304 {
4305 	bfa_trc(rp->bfa, rp->rport_tag);
4306 	bfa_trc(rp->bfa, event);
4307 
4308 	switch (event) {
4309 	case BFA_RPORT_SM_FWRSP:
4310 		bfa_stats(rp, sm_delp_fwrsp);
4311 		if (bfa_rport_send_fwdelete(rp))
4312 			bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4313 		else
4314 			bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4315 		break;
4316 
4317 	case BFA_RPORT_SM_HWFAIL:
4318 		bfa_stats(rp, sm_delp_hwf);
4319 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4320 		bfa_rport_free(rp);
4321 		break;
4322 
4323 	default:
4324 		bfa_stats(rp, sm_delp_unexp);
4325 		bfa_sm_fault(rp->bfa, event);
4326 	}
4327 }
4328 
4329 /*
4330  * Waiting for rport create response from firmware. Rport offline is pending.
4331  */
4332 static void
bfa_rport_sm_offline_pending(struct bfa_rport_s * rp,enum bfa_rport_event event)4333 bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
4334 				 enum bfa_rport_event event)
4335 {
4336 	bfa_trc(rp->bfa, rp->rport_tag);
4337 	bfa_trc(rp->bfa, event);
4338 
4339 	switch (event) {
4340 	case BFA_RPORT_SM_FWRSP:
4341 		bfa_stats(rp, sm_offp_fwrsp);
4342 		if (bfa_rport_send_fwdelete(rp))
4343 			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4344 		else
4345 			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4346 		break;
4347 
4348 	case BFA_RPORT_SM_DELETE:
4349 		bfa_stats(rp, sm_offp_del);
4350 		bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4351 		break;
4352 
4353 	case BFA_RPORT_SM_HWFAIL:
4354 		bfa_stats(rp, sm_offp_hwf);
4355 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4356 		break;
4357 
4358 	default:
4359 		bfa_stats(rp, sm_offp_unexp);
4360 		bfa_sm_fault(rp->bfa, event);
4361 	}
4362 }
4363 
4364 /*
4365  * IOC h/w failed.
4366  */
4367 static void
bfa_rport_sm_iocdisable(struct bfa_rport_s * rp,enum bfa_rport_event event)4368 bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
4369 {
4370 	bfa_trc(rp->bfa, rp->rport_tag);
4371 	bfa_trc(rp->bfa, event);
4372 
4373 	switch (event) {
4374 	case BFA_RPORT_SM_OFFLINE:
4375 		bfa_stats(rp, sm_iocd_off);
4376 		bfa_rport_offline_cb(rp);
4377 		break;
4378 
4379 	case BFA_RPORT_SM_DELETE:
4380 		bfa_stats(rp, sm_iocd_del);
4381 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4382 		bfa_rport_free(rp);
4383 		break;
4384 
4385 	case BFA_RPORT_SM_ONLINE:
4386 		bfa_stats(rp, sm_iocd_on);
4387 		if (bfa_rport_send_fwcreate(rp))
4388 			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4389 		else
4390 			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4391 		break;
4392 
4393 	case BFA_RPORT_SM_HWFAIL:
4394 		break;
4395 
4396 	default:
4397 		bfa_stats(rp, sm_iocd_unexp);
4398 		bfa_sm_fault(rp->bfa, event);
4399 	}
4400 }
4401 
4402 
4403 
4404 /*
4405  *  bfa_rport_private BFA rport private functions
4406  */
4407 
4408 static void
__bfa_cb_rport_online(void * cbarg,bfa_boolean_t complete)4409 __bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete)
4410 {
4411 	struct bfa_rport_s *rp = cbarg;
4412 
4413 	if (complete)
4414 		bfa_cb_rport_online(rp->rport_drv);
4415 }
4416 
4417 static void
__bfa_cb_rport_offline(void * cbarg,bfa_boolean_t complete)4418 __bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete)
4419 {
4420 	struct bfa_rport_s *rp = cbarg;
4421 
4422 	if (complete)
4423 		bfa_cb_rport_offline(rp->rport_drv);
4424 }
4425 
4426 static void
bfa_rport_qresume(void * cbarg)4427 bfa_rport_qresume(void *cbarg)
4428 {
4429 	struct bfa_rport_s	*rp = cbarg;
4430 
4431 	bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME);
4432 }
4433 
4434 static void
bfa_rport_meminfo(struct bfa_iocfc_cfg_s * cfg,struct bfa_meminfo_s * minfo,struct bfa_s * bfa)4435 bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
4436 		struct bfa_s *bfa)
4437 {
4438 	struct bfa_mem_kva_s *rport_kva = BFA_MEM_RPORT_KVA(bfa);
4439 
4440 	if (cfg->fwcfg.num_rports < BFA_RPORT_MIN)
4441 		cfg->fwcfg.num_rports = BFA_RPORT_MIN;
4442 
4443 	/* kva memory */
4444 	bfa_mem_kva_setup(minfo, rport_kva,
4445 		cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s));
4446 }
4447 
4448 static void
bfa_rport_attach(struct bfa_s * bfa,void * bfad,struct bfa_iocfc_cfg_s * cfg,struct bfa_pcidev_s * pcidev)4449 bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4450 		struct bfa_pcidev_s *pcidev)
4451 {
4452 	struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4453 	struct bfa_rport_s *rp;
4454 	u16 i;
4455 
4456 	INIT_LIST_HEAD(&mod->rp_free_q);
4457 	INIT_LIST_HEAD(&mod->rp_active_q);
4458 	INIT_LIST_HEAD(&mod->rp_unused_q);
4459 
4460 	rp = (struct bfa_rport_s *) bfa_mem_kva_curp(mod);
4461 	mod->rps_list = rp;
4462 	mod->num_rports = cfg->fwcfg.num_rports;
4463 
4464 	WARN_ON(!mod->num_rports ||
4465 		   (mod->num_rports & (mod->num_rports - 1)));
4466 
4467 	for (i = 0; i < mod->num_rports; i++, rp++) {
4468 		memset(rp, 0, sizeof(struct bfa_rport_s));
4469 		rp->bfa = bfa;
4470 		rp->rport_tag = i;
4471 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4472 
4473 		/*
4474 		 *  - is unused
4475 		 */
4476 		if (i)
4477 			list_add_tail(&rp->qe, &mod->rp_free_q);
4478 
4479 		bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
4480 	}
4481 
4482 	/*
4483 	 * consume memory
4484 	 */
4485 	bfa_mem_kva_curp(mod) = (u8 *) rp;
4486 }
4487 
4488 static void
bfa_rport_detach(struct bfa_s * bfa)4489 bfa_rport_detach(struct bfa_s *bfa)
4490 {
4491 }
4492 
4493 static void
bfa_rport_start(struct bfa_s * bfa)4494 bfa_rport_start(struct bfa_s *bfa)
4495 {
4496 }
4497 
4498 static void
bfa_rport_stop(struct bfa_s * bfa)4499 bfa_rport_stop(struct bfa_s *bfa)
4500 {
4501 }
4502 
4503 static void
bfa_rport_iocdisable(struct bfa_s * bfa)4504 bfa_rport_iocdisable(struct bfa_s *bfa)
4505 {
4506 	struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4507 	struct bfa_rport_s *rport;
4508 	struct list_head *qe, *qen;
4509 
4510 	/* Enqueue unused rport resources to free_q */
4511 	list_splice_tail_init(&mod->rp_unused_q, &mod->rp_free_q);
4512 
4513 	list_for_each_safe(qe, qen, &mod->rp_active_q) {
4514 		rport = (struct bfa_rport_s *) qe;
4515 		bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL);
4516 	}
4517 }
4518 
4519 static struct bfa_rport_s *
bfa_rport_alloc(struct bfa_rport_mod_s * mod)4520 bfa_rport_alloc(struct bfa_rport_mod_s *mod)
4521 {
4522 	struct bfa_rport_s *rport;
4523 
4524 	bfa_q_deq(&mod->rp_free_q, &rport);
4525 	if (rport)
4526 		list_add_tail(&rport->qe, &mod->rp_active_q);
4527 
4528 	return rport;
4529 }
4530 
4531 static void
bfa_rport_free(struct bfa_rport_s * rport)4532 bfa_rport_free(struct bfa_rport_s *rport)
4533 {
4534 	struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa);
4535 
4536 	WARN_ON(!bfa_q_is_on_q(&mod->rp_active_q, rport));
4537 	list_del(&rport->qe);
4538 	list_add_tail(&rport->qe, &mod->rp_free_q);
4539 }
4540 
4541 static bfa_boolean_t
bfa_rport_send_fwcreate(struct bfa_rport_s * rp)4542 bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
4543 {
4544 	struct bfi_rport_create_req_s *m;
4545 
4546 	/*
4547 	 * check for room in queue to send request now
4548 	 */
4549 	m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4550 	if (!m) {
4551 		bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4552 		return BFA_FALSE;
4553 	}
4554 
4555 	bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
4556 			bfa_fn_lpu(rp->bfa));
4557 	m->bfa_handle = rp->rport_tag;
4558 	m->max_frmsz = cpu_to_be16(rp->rport_info.max_frmsz);
4559 	m->pid = rp->rport_info.pid;
4560 	m->lp_fwtag = bfa_lps_get_fwtag(rp->bfa, (u8)rp->rport_info.lp_tag);
4561 	m->local_pid = rp->rport_info.local_pid;
4562 	m->fc_class = rp->rport_info.fc_class;
4563 	m->vf_en = rp->rport_info.vf_en;
4564 	m->vf_id = rp->rport_info.vf_id;
4565 	m->cisc = rp->rport_info.cisc;
4566 
4567 	/*
4568 	 * queue I/O message to firmware
4569 	 */
4570 	bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
4571 	return BFA_TRUE;
4572 }
4573 
4574 static bfa_boolean_t
bfa_rport_send_fwdelete(struct bfa_rport_s * rp)4575 bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
4576 {
4577 	struct bfi_rport_delete_req_s *m;
4578 
4579 	/*
4580 	 * check for room in queue to send request now
4581 	 */
4582 	m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4583 	if (!m) {
4584 		bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4585 		return BFA_FALSE;
4586 	}
4587 
4588 	bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ,
4589 			bfa_fn_lpu(rp->bfa));
4590 	m->fw_handle = rp->fw_handle;
4591 
4592 	/*
4593 	 * queue I/O message to firmware
4594 	 */
4595 	bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
4596 	return BFA_TRUE;
4597 }
4598 
4599 static bfa_boolean_t
bfa_rport_send_fwspeed(struct bfa_rport_s * rp)4600 bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
4601 {
4602 	struct bfa_rport_speed_req_s *m;
4603 
4604 	/*
4605 	 * check for room in queue to send request now
4606 	 */
4607 	m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4608 	if (!m) {
4609 		bfa_trc(rp->bfa, rp->rport_info.speed);
4610 		return BFA_FALSE;
4611 	}
4612 
4613 	bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ,
4614 			bfa_fn_lpu(rp->bfa));
4615 	m->fw_handle = rp->fw_handle;
4616 	m->speed = (u8)rp->rport_info.speed;
4617 
4618 	/*
4619 	 * queue I/O message to firmware
4620 	 */
4621 	bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
4622 	return BFA_TRUE;
4623 }
4624 
4625 
4626 
4627 /*
4628  *  bfa_rport_public
4629  */
4630 
4631 /*
4632  * Rport interrupt processing.
4633  */
4634 void
bfa_rport_isr(struct bfa_s * bfa,struct bfi_msg_s * m)4635 bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
4636 {
4637 	union bfi_rport_i2h_msg_u msg;
4638 	struct bfa_rport_s *rp;
4639 
4640 	bfa_trc(bfa, m->mhdr.msg_id);
4641 
4642 	msg.msg = m;
4643 
4644 	switch (m->mhdr.msg_id) {
4645 	case BFI_RPORT_I2H_CREATE_RSP:
4646 		rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
4647 		rp->fw_handle = msg.create_rsp->fw_handle;
4648 		rp->qos_attr = msg.create_rsp->qos_attr;
4649 		bfa_rport_set_lunmask(bfa, rp);
4650 		WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
4651 		bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4652 		break;
4653 
4654 	case BFI_RPORT_I2H_DELETE_RSP:
4655 		rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
4656 		WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
4657 		bfa_rport_unset_lunmask(bfa, rp);
4658 		bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4659 		break;
4660 
4661 	case BFI_RPORT_I2H_QOS_SCN:
4662 		rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle);
4663 		rp->event_arg.fw_msg = msg.qos_scn_evt;
4664 		bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN);
4665 		break;
4666 
4667 	default:
4668 		bfa_trc(bfa, m->mhdr.msg_id);
4669 		WARN_ON(1);
4670 	}
4671 }
4672 
4673 void
bfa_rport_res_recfg(struct bfa_s * bfa,u16 num_rport_fw)4674 bfa_rport_res_recfg(struct bfa_s *bfa, u16 num_rport_fw)
4675 {
4676 	struct bfa_rport_mod_s	*mod = BFA_RPORT_MOD(bfa);
4677 	struct list_head	*qe;
4678 	int	i;
4679 
4680 	for (i = 0; i < (mod->num_rports - num_rport_fw); i++) {
4681 		bfa_q_deq_tail(&mod->rp_free_q, &qe);
4682 		list_add_tail(qe, &mod->rp_unused_q);
4683 	}
4684 }
4685 
4686 /*
4687  *  bfa_rport_api
4688  */
4689 
4690 struct bfa_rport_s *
bfa_rport_create(struct bfa_s * bfa,void * rport_drv)4691 bfa_rport_create(struct bfa_s *bfa, void *rport_drv)
4692 {
4693 	struct bfa_rport_s *rp;
4694 
4695 	rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa));
4696 
4697 	if (rp == NULL)
4698 		return NULL;
4699 
4700 	rp->bfa = bfa;
4701 	rp->rport_drv = rport_drv;
4702 	memset(&rp->stats, 0, sizeof(rp->stats));
4703 
4704 	WARN_ON(!bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
4705 	bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE);
4706 
4707 	return rp;
4708 }
4709 
4710 void
bfa_rport_online(struct bfa_rport_s * rport,struct bfa_rport_info_s * rport_info)4711 bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
4712 {
4713 	WARN_ON(rport_info->max_frmsz == 0);
4714 
4715 	/*
4716 	 * Some JBODs are seen to be not setting PDU size correctly in PLOGI
4717 	 * responses. Default to minimum size.
4718 	 */
4719 	if (rport_info->max_frmsz == 0) {
4720 		bfa_trc(rport->bfa, rport->rport_tag);
4721 		rport_info->max_frmsz = FC_MIN_PDUSZ;
4722 	}
4723 
4724 	rport->rport_info = *rport_info;
4725 	bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE);
4726 }
4727 
4728 void
bfa_rport_speed(struct bfa_rport_s * rport,enum bfa_port_speed speed)4729 bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
4730 {
4731 	WARN_ON(speed == 0);
4732 	WARN_ON(speed == BFA_PORT_SPEED_AUTO);
4733 
4734 	rport->rport_info.speed = speed;
4735 	bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
4736 }
4737 
4738 /* Set Rport LUN Mask */
4739 void
bfa_rport_set_lunmask(struct bfa_s * bfa,struct bfa_rport_s * rp)4740 bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
4741 {
4742 	struct bfa_lps_mod_s	*lps_mod = BFA_LPS_MOD(bfa);
4743 	wwn_t	lp_wwn, rp_wwn;
4744 	u8 lp_tag = (u8)rp->rport_info.lp_tag;
4745 
4746 	rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
4747 	lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
4748 
4749 	BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
4750 					rp->lun_mask = BFA_TRUE;
4751 	bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn, rp->rport_tag, lp_tag);
4752 }
4753 
4754 /* Unset Rport LUN mask */
4755 void
bfa_rport_unset_lunmask(struct bfa_s * bfa,struct bfa_rport_s * rp)4756 bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
4757 {
4758 	struct bfa_lps_mod_s	*lps_mod = BFA_LPS_MOD(bfa);
4759 	wwn_t	lp_wwn, rp_wwn;
4760 
4761 	rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
4762 	lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
4763 
4764 	BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
4765 				rp->lun_mask = BFA_FALSE;
4766 	bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn,
4767 			BFA_RPORT_TAG_INVALID, BFA_LP_TAG_INVALID);
4768 }
4769 
4770 /*
4771  * SGPG related functions
4772  */
4773 
4774 /*
4775  * Compute and return memory needed by FCP(im) module.
4776  */
4777 static void
bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s * cfg,struct bfa_meminfo_s * minfo,struct bfa_s * bfa)4778 bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
4779 		struct bfa_s *bfa)
4780 {
4781 	struct bfa_sgpg_mod_s *sgpg_mod = BFA_SGPG_MOD(bfa);
4782 	struct bfa_mem_kva_s *sgpg_kva = BFA_MEM_SGPG_KVA(bfa);
4783 	struct bfa_mem_dma_s *seg_ptr;
4784 	u16	nsegs, idx, per_seg_sgpg, num_sgpg;
4785 	u32	sgpg_sz = sizeof(struct bfi_sgpg_s);
4786 
4787 	if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN)
4788 		cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
4789 	else if (cfg->drvcfg.num_sgpgs > BFA_SGPG_MAX)
4790 		cfg->drvcfg.num_sgpgs = BFA_SGPG_MAX;
4791 
4792 	num_sgpg = cfg->drvcfg.num_sgpgs;
4793 
4794 	nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
4795 	per_seg_sgpg = BFI_MEM_NREQS_SEG(sgpg_sz);
4796 
4797 	bfa_mem_dma_seg_iter(sgpg_mod, seg_ptr, nsegs, idx) {
4798 		if (num_sgpg >= per_seg_sgpg) {
4799 			num_sgpg -= per_seg_sgpg;
4800 			bfa_mem_dma_setup(minfo, seg_ptr,
4801 					per_seg_sgpg * sgpg_sz);
4802 		} else
4803 			bfa_mem_dma_setup(minfo, seg_ptr,
4804 					num_sgpg * sgpg_sz);
4805 	}
4806 
4807 	/* kva memory */
4808 	bfa_mem_kva_setup(minfo, sgpg_kva,
4809 		cfg->drvcfg.num_sgpgs * sizeof(struct bfa_sgpg_s));
4810 }
4811 
4812 static void
bfa_sgpg_attach(struct bfa_s * bfa,void * bfad,struct bfa_iocfc_cfg_s * cfg,struct bfa_pcidev_s * pcidev)4813 bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4814 		struct bfa_pcidev_s *pcidev)
4815 {
4816 	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4817 	struct bfa_sgpg_s *hsgpg;
4818 	struct bfi_sgpg_s *sgpg;
4819 	u64 align_len;
4820 	struct bfa_mem_dma_s *seg_ptr;
4821 	u32	sgpg_sz = sizeof(struct bfi_sgpg_s);
4822 	u16	i, idx, nsegs, per_seg_sgpg, num_sgpg;
4823 
4824 	union {
4825 		u64 pa;
4826 		union bfi_addr_u addr;
4827 	} sgpg_pa, sgpg_pa_tmp;
4828 
4829 	INIT_LIST_HEAD(&mod->sgpg_q);
4830 	INIT_LIST_HEAD(&mod->sgpg_wait_q);
4831 
4832 	bfa_trc(bfa, cfg->drvcfg.num_sgpgs);
4833 
4834 	mod->free_sgpgs = mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
4835 
4836 	num_sgpg = cfg->drvcfg.num_sgpgs;
4837 	nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
4838 
4839 	/* dma/kva mem claim */
4840 	hsgpg = (struct bfa_sgpg_s *) bfa_mem_kva_curp(mod);
4841 
4842 	bfa_mem_dma_seg_iter(mod, seg_ptr, nsegs, idx) {
4843 
4844 		if (!bfa_mem_dma_virt(seg_ptr))
4845 			break;
4846 
4847 		align_len = BFA_SGPG_ROUNDUP(bfa_mem_dma_phys(seg_ptr)) -
4848 					     bfa_mem_dma_phys(seg_ptr);
4849 
4850 		sgpg = (struct bfi_sgpg_s *)
4851 			(((u8 *) bfa_mem_dma_virt(seg_ptr)) + align_len);
4852 		sgpg_pa.pa = bfa_mem_dma_phys(seg_ptr) + align_len;
4853 		WARN_ON(sgpg_pa.pa & (sgpg_sz - 1));
4854 
4855 		per_seg_sgpg = (seg_ptr->mem_len - (u32)align_len) / sgpg_sz;
4856 
4857 		for (i = 0; num_sgpg > 0 && i < per_seg_sgpg; i++, num_sgpg--) {
4858 			memset(hsgpg, 0, sizeof(*hsgpg));
4859 			memset(sgpg, 0, sizeof(*sgpg));
4860 
4861 			hsgpg->sgpg = sgpg;
4862 			sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa);
4863 			hsgpg->sgpg_pa = sgpg_pa_tmp.addr;
4864 			list_add_tail(&hsgpg->qe, &mod->sgpg_q);
4865 
4866 			sgpg++;
4867 			hsgpg++;
4868 			sgpg_pa.pa += sgpg_sz;
4869 		}
4870 	}
4871 
4872 	bfa_mem_kva_curp(mod) = (u8 *) hsgpg;
4873 }
4874 
4875 static void
bfa_sgpg_detach(struct bfa_s * bfa)4876 bfa_sgpg_detach(struct bfa_s *bfa)
4877 {
4878 }
4879 
4880 static void
bfa_sgpg_start(struct bfa_s * bfa)4881 bfa_sgpg_start(struct bfa_s *bfa)
4882 {
4883 }
4884 
4885 static void
bfa_sgpg_stop(struct bfa_s * bfa)4886 bfa_sgpg_stop(struct bfa_s *bfa)
4887 {
4888 }
4889 
4890 static void
bfa_sgpg_iocdisable(struct bfa_s * bfa)4891 bfa_sgpg_iocdisable(struct bfa_s *bfa)
4892 {
4893 }
4894 
4895 bfa_status_t
bfa_sgpg_malloc(struct bfa_s * bfa,struct list_head * sgpg_q,int nsgpgs)4896 bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs)
4897 {
4898 	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4899 	struct bfa_sgpg_s *hsgpg;
4900 	int i;
4901 
4902 	if (mod->free_sgpgs < nsgpgs)
4903 		return BFA_STATUS_ENOMEM;
4904 
4905 	for (i = 0; i < nsgpgs; i++) {
4906 		bfa_q_deq(&mod->sgpg_q, &hsgpg);
4907 		WARN_ON(!hsgpg);
4908 		list_add_tail(&hsgpg->qe, sgpg_q);
4909 	}
4910 
4911 	mod->free_sgpgs -= nsgpgs;
4912 	return BFA_STATUS_OK;
4913 }
4914 
4915 void
bfa_sgpg_mfree(struct bfa_s * bfa,struct list_head * sgpg_q,int nsgpg)4916 bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg)
4917 {
4918 	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4919 	struct bfa_sgpg_wqe_s *wqe;
4920 
4921 	mod->free_sgpgs += nsgpg;
4922 	WARN_ON(mod->free_sgpgs > mod->num_sgpgs);
4923 
4924 	list_splice_tail_init(sgpg_q, &mod->sgpg_q);
4925 
4926 	if (list_empty(&mod->sgpg_wait_q))
4927 		return;
4928 
4929 	/*
4930 	 * satisfy as many waiting requests as possible
4931 	 */
4932 	do {
4933 		wqe = bfa_q_first(&mod->sgpg_wait_q);
4934 		if (mod->free_sgpgs < wqe->nsgpg)
4935 			nsgpg = mod->free_sgpgs;
4936 		else
4937 			nsgpg = wqe->nsgpg;
4938 		bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg);
4939 		wqe->nsgpg -= nsgpg;
4940 		if (wqe->nsgpg == 0) {
4941 			list_del(&wqe->qe);
4942 			wqe->cbfn(wqe->cbarg);
4943 		}
4944 	} while (mod->free_sgpgs && !list_empty(&mod->sgpg_wait_q));
4945 }
4946 
4947 void
bfa_sgpg_wait(struct bfa_s * bfa,struct bfa_sgpg_wqe_s * wqe,int nsgpg)4948 bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg)
4949 {
4950 	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4951 
4952 	WARN_ON(nsgpg <= 0);
4953 	WARN_ON(nsgpg <= mod->free_sgpgs);
4954 
4955 	wqe->nsgpg_total = wqe->nsgpg = nsgpg;
4956 
4957 	/*
4958 	 * allocate any left to this one first
4959 	 */
4960 	if (mod->free_sgpgs) {
4961 		/*
4962 		 * no one else is waiting for SGPG
4963 		 */
4964 		WARN_ON(!list_empty(&mod->sgpg_wait_q));
4965 		list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q);
4966 		wqe->nsgpg -= mod->free_sgpgs;
4967 		mod->free_sgpgs = 0;
4968 	}
4969 
4970 	list_add_tail(&wqe->qe, &mod->sgpg_wait_q);
4971 }
4972 
4973 void
bfa_sgpg_wcancel(struct bfa_s * bfa,struct bfa_sgpg_wqe_s * wqe)4974 bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe)
4975 {
4976 	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4977 
4978 	WARN_ON(!bfa_q_is_on_q(&mod->sgpg_wait_q, wqe));
4979 	list_del(&wqe->qe);
4980 
4981 	if (wqe->nsgpg_total != wqe->nsgpg)
4982 		bfa_sgpg_mfree(bfa, &wqe->sgpg_q,
4983 				   wqe->nsgpg_total - wqe->nsgpg);
4984 }
4985 
4986 void
bfa_sgpg_winit(struct bfa_sgpg_wqe_s * wqe,void (* cbfn)(void * cbarg),void * cbarg)4987 bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg),
4988 		   void *cbarg)
4989 {
4990 	INIT_LIST_HEAD(&wqe->sgpg_q);
4991 	wqe->cbfn = cbfn;
4992 	wqe->cbarg = cbarg;
4993 }
4994 
4995 /*
4996  *  UF related functions
4997  */
4998 /*
4999  *****************************************************************************
5000  * Internal functions
5001  *****************************************************************************
5002  */
5003 static void
__bfa_cb_uf_recv(void * cbarg,bfa_boolean_t complete)5004 __bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete)
5005 {
5006 	struct bfa_uf_s   *uf = cbarg;
5007 	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(uf->bfa);
5008 
5009 	if (complete)
5010 		ufm->ufrecv(ufm->cbarg, uf);
5011 }
5012 
5013 static void
claim_uf_post_msgs(struct bfa_uf_mod_s * ufm)5014 claim_uf_post_msgs(struct bfa_uf_mod_s *ufm)
5015 {
5016 	struct bfi_uf_buf_post_s *uf_bp_msg;
5017 	u16 i;
5018 	u16 buf_len;
5019 
5020 	ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_mem_kva_curp(ufm);
5021 	uf_bp_msg = ufm->uf_buf_posts;
5022 
5023 	for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
5024 	     i++, uf_bp_msg++) {
5025 		memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s));
5026 
5027 		uf_bp_msg->buf_tag = i;
5028 		buf_len = sizeof(struct bfa_uf_buf_s);
5029 		uf_bp_msg->buf_len = cpu_to_be16(buf_len);
5030 		bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
5031 			    bfa_fn_lpu(ufm->bfa));
5032 		bfa_alen_set(&uf_bp_msg->alen, buf_len, ufm_pbs_pa(ufm, i));
5033 	}
5034 
5035 	/*
5036 	 * advance pointer beyond consumed memory
5037 	 */
5038 	bfa_mem_kva_curp(ufm) = (u8 *) uf_bp_msg;
5039 }
5040 
5041 static void
claim_ufs(struct bfa_uf_mod_s * ufm)5042 claim_ufs(struct bfa_uf_mod_s *ufm)
5043 {
5044 	u16 i;
5045 	struct bfa_uf_s   *uf;
5046 
5047 	/*
5048 	 * Claim block of memory for UF list
5049 	 */
5050 	ufm->uf_list = (struct bfa_uf_s *) bfa_mem_kva_curp(ufm);
5051 
5052 	/*
5053 	 * Initialize UFs and queue it in UF free queue
5054 	 */
5055 	for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) {
5056 		memset(uf, 0, sizeof(struct bfa_uf_s));
5057 		uf->bfa = ufm->bfa;
5058 		uf->uf_tag = i;
5059 		uf->pb_len = BFA_PER_UF_DMA_SZ;
5060 		uf->buf_kva = bfa_mem_get_dmabuf_kva(ufm, i, BFA_PER_UF_DMA_SZ);
5061 		uf->buf_pa = ufm_pbs_pa(ufm, i);
5062 		list_add_tail(&uf->qe, &ufm->uf_free_q);
5063 	}
5064 
5065 	/*
5066 	 * advance memory pointer
5067 	 */
5068 	bfa_mem_kva_curp(ufm) = (u8 *) uf;
5069 }
5070 
5071 static void
uf_mem_claim(struct bfa_uf_mod_s * ufm)5072 uf_mem_claim(struct bfa_uf_mod_s *ufm)
5073 {
5074 	claim_ufs(ufm);
5075 	claim_uf_post_msgs(ufm);
5076 }
5077 
5078 static void
bfa_uf_meminfo(struct bfa_iocfc_cfg_s * cfg,struct bfa_meminfo_s * minfo,struct bfa_s * bfa)5079 bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
5080 		struct bfa_s *bfa)
5081 {
5082 	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5083 	struct bfa_mem_kva_s *uf_kva = BFA_MEM_UF_KVA(bfa);
5084 	u32	num_ufs = cfg->fwcfg.num_uf_bufs;
5085 	struct bfa_mem_dma_s *seg_ptr;
5086 	u16	nsegs, idx, per_seg_uf = 0;
5087 
5088 	nsegs = BFI_MEM_DMA_NSEGS(num_ufs, BFA_PER_UF_DMA_SZ);
5089 	per_seg_uf = BFI_MEM_NREQS_SEG(BFA_PER_UF_DMA_SZ);
5090 
5091 	bfa_mem_dma_seg_iter(ufm, seg_ptr, nsegs, idx) {
5092 		if (num_ufs >= per_seg_uf) {
5093 			num_ufs -= per_seg_uf;
5094 			bfa_mem_dma_setup(minfo, seg_ptr,
5095 				per_seg_uf * BFA_PER_UF_DMA_SZ);
5096 		} else
5097 			bfa_mem_dma_setup(minfo, seg_ptr,
5098 				num_ufs * BFA_PER_UF_DMA_SZ);
5099 	}
5100 
5101 	/* kva memory */
5102 	bfa_mem_kva_setup(minfo, uf_kva, cfg->fwcfg.num_uf_bufs *
5103 		(sizeof(struct bfa_uf_s) + sizeof(struct bfi_uf_buf_post_s)));
5104 }
5105 
5106 static void
bfa_uf_attach(struct bfa_s * bfa,void * bfad,struct bfa_iocfc_cfg_s * cfg,struct bfa_pcidev_s * pcidev)5107 bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5108 		struct bfa_pcidev_s *pcidev)
5109 {
5110 	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5111 
5112 	ufm->bfa = bfa;
5113 	ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
5114 	INIT_LIST_HEAD(&ufm->uf_free_q);
5115 	INIT_LIST_HEAD(&ufm->uf_posted_q);
5116 	INIT_LIST_HEAD(&ufm->uf_unused_q);
5117 
5118 	uf_mem_claim(ufm);
5119 }
5120 
5121 static void
bfa_uf_detach(struct bfa_s * bfa)5122 bfa_uf_detach(struct bfa_s *bfa)
5123 {
5124 }
5125 
5126 static struct bfa_uf_s *
bfa_uf_get(struct bfa_uf_mod_s * uf_mod)5127 bfa_uf_get(struct bfa_uf_mod_s *uf_mod)
5128 {
5129 	struct bfa_uf_s   *uf;
5130 
5131 	bfa_q_deq(&uf_mod->uf_free_q, &uf);
5132 	return uf;
5133 }
5134 
5135 static void
bfa_uf_put(struct bfa_uf_mod_s * uf_mod,struct bfa_uf_s * uf)5136 bfa_uf_put(struct bfa_uf_mod_s *uf_mod, struct bfa_uf_s *uf)
5137 {
5138 	list_add_tail(&uf->qe, &uf_mod->uf_free_q);
5139 }
5140 
5141 static bfa_status_t
bfa_uf_post(struct bfa_uf_mod_s * ufm,struct bfa_uf_s * uf)5142 bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf)
5143 {
5144 	struct bfi_uf_buf_post_s *uf_post_msg;
5145 
5146 	uf_post_msg = bfa_reqq_next(ufm->bfa, BFA_REQQ_FCXP);
5147 	if (!uf_post_msg)
5148 		return BFA_STATUS_FAILED;
5149 
5150 	memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
5151 		      sizeof(struct bfi_uf_buf_post_s));
5152 	bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP, uf_post_msg->mh);
5153 
5154 	bfa_trc(ufm->bfa, uf->uf_tag);
5155 
5156 	list_add_tail(&uf->qe, &ufm->uf_posted_q);
5157 	return BFA_STATUS_OK;
5158 }
5159 
5160 static void
bfa_uf_post_all(struct bfa_uf_mod_s * uf_mod)5161 bfa_uf_post_all(struct bfa_uf_mod_s *uf_mod)
5162 {
5163 	struct bfa_uf_s   *uf;
5164 
5165 	while ((uf = bfa_uf_get(uf_mod)) != NULL) {
5166 		if (bfa_uf_post(uf_mod, uf) != BFA_STATUS_OK)
5167 			break;
5168 	}
5169 }
5170 
5171 static void
uf_recv(struct bfa_s * bfa,struct bfi_uf_frm_rcvd_s * m)5172 uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
5173 {
5174 	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5175 	u16 uf_tag = m->buf_tag;
5176 	struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
5177 	struct bfa_uf_buf_s *uf_buf;
5178 	uint8_t *buf;
5179 	struct fchs_s *fchs;
5180 
5181 	uf_buf = (struct bfa_uf_buf_s *)
5182 			bfa_mem_get_dmabuf_kva(ufm, uf_tag, uf->pb_len);
5183 	buf = &uf_buf->d[0];
5184 
5185 	m->frm_len = be16_to_cpu(m->frm_len);
5186 	m->xfr_len = be16_to_cpu(m->xfr_len);
5187 
5188 	fchs = (struct fchs_s *)uf_buf;
5189 
5190 	list_del(&uf->qe);	/* dequeue from posted queue */
5191 
5192 	uf->data_ptr = buf;
5193 	uf->data_len = m->xfr_len;
5194 
5195 	WARN_ON(uf->data_len < sizeof(struct fchs_s));
5196 
5197 	if (uf->data_len == sizeof(struct fchs_s)) {
5198 		bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX,
5199 			       uf->data_len, (struct fchs_s *)buf);
5200 	} else {
5201 		u32 pld_w0 = *((u32 *) (buf + sizeof(struct fchs_s)));
5202 		bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_UF,
5203 				      BFA_PL_EID_RX, uf->data_len,
5204 				      (struct fchs_s *)buf, pld_w0);
5205 	}
5206 
5207 	if (bfa->fcs)
5208 		__bfa_cb_uf_recv(uf, BFA_TRUE);
5209 	else
5210 		bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf);
5211 }
5212 
5213 static void
bfa_uf_stop(struct bfa_s * bfa)5214 bfa_uf_stop(struct bfa_s *bfa)
5215 {
5216 }
5217 
5218 static void
bfa_uf_iocdisable(struct bfa_s * bfa)5219 bfa_uf_iocdisable(struct bfa_s *bfa)
5220 {
5221 	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5222 	struct bfa_uf_s *uf;
5223 	struct list_head *qe, *qen;
5224 
5225 	/* Enqueue unused uf resources to free_q */
5226 	list_splice_tail_init(&ufm->uf_unused_q, &ufm->uf_free_q);
5227 
5228 	list_for_each_safe(qe, qen, &ufm->uf_posted_q) {
5229 		uf = (struct bfa_uf_s *) qe;
5230 		list_del(&uf->qe);
5231 		bfa_uf_put(ufm, uf);
5232 	}
5233 }
5234 
5235 static void
bfa_uf_start(struct bfa_s * bfa)5236 bfa_uf_start(struct bfa_s *bfa)
5237 {
5238 	bfa_uf_post_all(BFA_UF_MOD(bfa));
5239 }
5240 
5241 /*
5242  * Register handler for all unsolicted receive frames.
5243  *
5244  * @param[in]	bfa		BFA instance
5245  * @param[in]	ufrecv	receive handler function
5246  * @param[in]	cbarg	receive handler arg
5247  */
5248 void
bfa_uf_recv_register(struct bfa_s * bfa,bfa_cb_uf_recv_t ufrecv,void * cbarg)5249 bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg)
5250 {
5251 	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5252 
5253 	ufm->ufrecv = ufrecv;
5254 	ufm->cbarg = cbarg;
5255 }
5256 
5257 /*
5258  *	Free an unsolicited frame back to BFA.
5259  *
5260  * @param[in]		uf		unsolicited frame to be freed
5261  *
5262  * @return None
5263  */
5264 void
bfa_uf_free(struct bfa_uf_s * uf)5265 bfa_uf_free(struct bfa_uf_s *uf)
5266 {
5267 	bfa_uf_put(BFA_UF_MOD(uf->bfa), uf);
5268 	bfa_uf_post_all(BFA_UF_MOD(uf->bfa));
5269 }
5270 
5271 
5272 
5273 /*
5274  *  uf_pub BFA uf module public functions
5275  */
5276 void
bfa_uf_isr(struct bfa_s * bfa,struct bfi_msg_s * msg)5277 bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
5278 {
5279 	bfa_trc(bfa, msg->mhdr.msg_id);
5280 
5281 	switch (msg->mhdr.msg_id) {
5282 	case BFI_UF_I2H_FRM_RCVD:
5283 		uf_recv(bfa, (struct bfi_uf_frm_rcvd_s *) msg);
5284 		break;
5285 
5286 	default:
5287 		bfa_trc(bfa, msg->mhdr.msg_id);
5288 		WARN_ON(1);
5289 	}
5290 }
5291 
5292 void
bfa_uf_res_recfg(struct bfa_s * bfa,u16 num_uf_fw)5293 bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw)
5294 {
5295 	struct bfa_uf_mod_s	*mod = BFA_UF_MOD(bfa);
5296 	struct list_head	*qe;
5297 	int	i;
5298 
5299 	for (i = 0; i < (mod->num_ufs - num_uf_fw); i++) {
5300 		bfa_q_deq_tail(&mod->uf_free_q, &qe);
5301 		list_add_tail(qe, &mod->uf_unused_q);
5302 	}
5303 }
5304 
5305 /*
5306  *	BFA fcdiag module
5307  */
5308 #define BFA_DIAG_QTEST_TOV	1000    /* msec */
5309 
5310 /*
5311  *	Set port status to busy
5312  */
5313 static void
bfa_fcdiag_set_busy_status(struct bfa_fcdiag_s * fcdiag)5314 bfa_fcdiag_set_busy_status(struct bfa_fcdiag_s *fcdiag)
5315 {
5316 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(fcdiag->bfa);
5317 
5318 	if (fcdiag->lb.lock)
5319 		fcport->diag_busy = BFA_TRUE;
5320 	else
5321 		fcport->diag_busy = BFA_FALSE;
5322 }
5323 
5324 static void
bfa_fcdiag_meminfo(struct bfa_iocfc_cfg_s * cfg,struct bfa_meminfo_s * meminfo,struct bfa_s * bfa)5325 bfa_fcdiag_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
5326 		struct bfa_s *bfa)
5327 {
5328 }
5329 
5330 static void
bfa_fcdiag_attach(struct bfa_s * bfa,void * bfad,struct bfa_iocfc_cfg_s * cfg,struct bfa_pcidev_s * pcidev)5331 bfa_fcdiag_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5332 		struct bfa_pcidev_s *pcidev)
5333 {
5334 	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5335 	fcdiag->bfa             = bfa;
5336 	fcdiag->trcmod  = bfa->trcmod;
5337 	/* The common DIAG attach bfa_diag_attach() will do all memory claim */
5338 }
5339 
5340 static void
bfa_fcdiag_iocdisable(struct bfa_s * bfa)5341 bfa_fcdiag_iocdisable(struct bfa_s *bfa)
5342 {
5343 	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5344 	bfa_trc(fcdiag, fcdiag->lb.lock);
5345 	if (fcdiag->lb.lock) {
5346 		fcdiag->lb.status = BFA_STATUS_IOC_FAILURE;
5347 		fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
5348 		fcdiag->lb.lock = 0;
5349 		bfa_fcdiag_set_busy_status(fcdiag);
5350 	}
5351 }
5352 
5353 static void
bfa_fcdiag_detach(struct bfa_s * bfa)5354 bfa_fcdiag_detach(struct bfa_s *bfa)
5355 {
5356 }
5357 
5358 static void
bfa_fcdiag_start(struct bfa_s * bfa)5359 bfa_fcdiag_start(struct bfa_s *bfa)
5360 {
5361 }
5362 
5363 static void
bfa_fcdiag_stop(struct bfa_s * bfa)5364 bfa_fcdiag_stop(struct bfa_s *bfa)
5365 {
5366 }
5367 
5368 static void
bfa_fcdiag_queuetest_timeout(void * cbarg)5369 bfa_fcdiag_queuetest_timeout(void *cbarg)
5370 {
5371 	struct bfa_fcdiag_s       *fcdiag = cbarg;
5372 	struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
5373 
5374 	bfa_trc(fcdiag, fcdiag->qtest.all);
5375 	bfa_trc(fcdiag, fcdiag->qtest.count);
5376 
5377 	fcdiag->qtest.timer_active = 0;
5378 
5379 	res->status = BFA_STATUS_ETIMER;
5380 	res->count  = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
5381 	if (fcdiag->qtest.all)
5382 		res->queue  = fcdiag->qtest.all;
5383 
5384 	bfa_trc(fcdiag, BFA_STATUS_ETIMER);
5385 	fcdiag->qtest.status = BFA_STATUS_ETIMER;
5386 	fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
5387 	fcdiag->qtest.lock = 0;
5388 }
5389 
5390 static bfa_status_t
bfa_fcdiag_queuetest_send(struct bfa_fcdiag_s * fcdiag)5391 bfa_fcdiag_queuetest_send(struct bfa_fcdiag_s *fcdiag)
5392 {
5393 	u32	i;
5394 	struct bfi_diag_qtest_req_s *req;
5395 
5396 	req = bfa_reqq_next(fcdiag->bfa, fcdiag->qtest.queue);
5397 	if (!req)
5398 		return BFA_STATUS_DEVBUSY;
5399 
5400 	/* build host command */
5401 	bfi_h2i_set(req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_QTEST,
5402 		bfa_fn_lpu(fcdiag->bfa));
5403 
5404 	for (i = 0; i < BFI_LMSG_PL_WSZ; i++)
5405 		req->data[i] = QTEST_PAT_DEFAULT;
5406 
5407 	bfa_trc(fcdiag, fcdiag->qtest.queue);
5408 	/* ring door bell */
5409 	bfa_reqq_produce(fcdiag->bfa, fcdiag->qtest.queue, req->mh);
5410 	return BFA_STATUS_OK;
5411 }
5412 
5413 static void
bfa_fcdiag_queuetest_comp(struct bfa_fcdiag_s * fcdiag,bfi_diag_qtest_rsp_t * rsp)5414 bfa_fcdiag_queuetest_comp(struct bfa_fcdiag_s *fcdiag,
5415 			bfi_diag_qtest_rsp_t *rsp)
5416 {
5417 	struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
5418 	bfa_status_t status = BFA_STATUS_OK;
5419 	int i;
5420 
5421 	/* Check timer, should still be active   */
5422 	if (!fcdiag->qtest.timer_active) {
5423 		bfa_trc(fcdiag, fcdiag->qtest.timer_active);
5424 		return;
5425 	}
5426 
5427 	/* update count */
5428 	fcdiag->qtest.count--;
5429 
5430 	/* Check result */
5431 	for (i = 0; i < BFI_LMSG_PL_WSZ; i++) {
5432 		if (rsp->data[i] != ~(QTEST_PAT_DEFAULT)) {
5433 			res->status = BFA_STATUS_DATACORRUPTED;
5434 			break;
5435 		}
5436 	}
5437 
5438 	if (res->status == BFA_STATUS_OK) {
5439 		if (fcdiag->qtest.count > 0) {
5440 			status = bfa_fcdiag_queuetest_send(fcdiag);
5441 			if (status == BFA_STATUS_OK)
5442 				return;
5443 			else
5444 				res->status = status;
5445 		} else if (fcdiag->qtest.all > 0 &&
5446 			fcdiag->qtest.queue < (BFI_IOC_MAX_CQS - 1)) {
5447 			fcdiag->qtest.count = QTEST_CNT_DEFAULT;
5448 			fcdiag->qtest.queue++;
5449 			status = bfa_fcdiag_queuetest_send(fcdiag);
5450 			if (status == BFA_STATUS_OK)
5451 				return;
5452 			else
5453 				res->status = status;
5454 		}
5455 	}
5456 
5457 	/* Stop timer when we comp all queue */
5458 	if (fcdiag->qtest.timer_active) {
5459 		bfa_timer_stop(&fcdiag->qtest.timer);
5460 		fcdiag->qtest.timer_active = 0;
5461 	}
5462 	res->queue = fcdiag->qtest.queue;
5463 	res->count = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
5464 	bfa_trc(fcdiag, res->count);
5465 	bfa_trc(fcdiag, res->status);
5466 	fcdiag->qtest.status = res->status;
5467 	fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
5468 	fcdiag->qtest.lock = 0;
5469 }
5470 
5471 static void
bfa_fcdiag_loopback_comp(struct bfa_fcdiag_s * fcdiag,struct bfi_diag_lb_rsp_s * rsp)5472 bfa_fcdiag_loopback_comp(struct bfa_fcdiag_s *fcdiag,
5473 			struct bfi_diag_lb_rsp_s *rsp)
5474 {
5475 	struct bfa_diag_loopback_result_s *res = fcdiag->lb.result;
5476 
5477 	res->numtxmfrm  = be32_to_cpu(rsp->res.numtxmfrm);
5478 	res->numosffrm  = be32_to_cpu(rsp->res.numosffrm);
5479 	res->numrcvfrm  = be32_to_cpu(rsp->res.numrcvfrm);
5480 	res->badfrminf  = be32_to_cpu(rsp->res.badfrminf);
5481 	res->badfrmnum  = be32_to_cpu(rsp->res.badfrmnum);
5482 	res->status     = rsp->res.status;
5483 	fcdiag->lb.status = rsp->res.status;
5484 	bfa_trc(fcdiag, fcdiag->lb.status);
5485 	fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
5486 	fcdiag->lb.lock = 0;
5487 	bfa_fcdiag_set_busy_status(fcdiag);
5488 }
5489 
5490 static bfa_status_t
bfa_fcdiag_loopback_send(struct bfa_fcdiag_s * fcdiag,struct bfa_diag_loopback_s * loopback)5491 bfa_fcdiag_loopback_send(struct bfa_fcdiag_s *fcdiag,
5492 			struct bfa_diag_loopback_s *loopback)
5493 {
5494 	struct bfi_diag_lb_req_s *lb_req;
5495 
5496 	lb_req = bfa_reqq_next(fcdiag->bfa, BFA_REQQ_DIAG);
5497 	if (!lb_req)
5498 		return BFA_STATUS_DEVBUSY;
5499 
5500 	/* build host command */
5501 	bfi_h2i_set(lb_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LOOPBACK,
5502 		bfa_fn_lpu(fcdiag->bfa));
5503 
5504 	lb_req->lb_mode = loopback->lb_mode;
5505 	lb_req->speed = loopback->speed;
5506 	lb_req->loopcnt = loopback->loopcnt;
5507 	lb_req->pattern = loopback->pattern;
5508 
5509 	/* ring door bell */
5510 	bfa_reqq_produce(fcdiag->bfa, BFA_REQQ_DIAG, lb_req->mh);
5511 
5512 	bfa_trc(fcdiag, loopback->lb_mode);
5513 	bfa_trc(fcdiag, loopback->speed);
5514 	bfa_trc(fcdiag, loopback->loopcnt);
5515 	bfa_trc(fcdiag, loopback->pattern);
5516 	return BFA_STATUS_OK;
5517 }
5518 
5519 /*
5520  *	cpe/rme intr handler
5521  */
5522 void
bfa_fcdiag_intr(struct bfa_s * bfa,struct bfi_msg_s * msg)5523 bfa_fcdiag_intr(struct bfa_s *bfa, struct bfi_msg_s *msg)
5524 {
5525 	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5526 
5527 	switch (msg->mhdr.msg_id) {
5528 	case BFI_DIAG_I2H_LOOPBACK:
5529 		bfa_fcdiag_loopback_comp(fcdiag,
5530 				(struct bfi_diag_lb_rsp_s *) msg);
5531 		break;
5532 	case BFI_DIAG_I2H_QTEST:
5533 		bfa_fcdiag_queuetest_comp(fcdiag, (bfi_diag_qtest_rsp_t *)msg);
5534 		break;
5535 	default:
5536 		bfa_trc(fcdiag, msg->mhdr.msg_id);
5537 		WARN_ON(1);
5538 	}
5539 }
5540 
5541 /*
5542  *	Loopback test
5543  *
5544  *   @param[in] *bfa            - bfa data struct
5545  *   @param[in] opmode          - port operation mode
5546  *   @param[in] speed           - port speed
5547  *   @param[in] lpcnt           - loop count
5548  *   @param[in] pat                     - pattern to build packet
5549  *   @param[in] *result         - pt to bfa_diag_loopback_result_t data struct
5550  *   @param[in] cbfn            - callback function
5551  *   @param[in] cbarg           - callback functioin arg
5552  *
5553  *   @param[out]
5554  */
5555 bfa_status_t
bfa_fcdiag_loopback(struct bfa_s * bfa,enum bfa_port_opmode opmode,enum bfa_port_speed speed,u32 lpcnt,u32 pat,struct bfa_diag_loopback_result_s * result,bfa_cb_diag_t cbfn,void * cbarg)5556 bfa_fcdiag_loopback(struct bfa_s *bfa, enum bfa_port_opmode opmode,
5557 		enum bfa_port_speed speed, u32 lpcnt, u32 pat,
5558 		struct bfa_diag_loopback_result_s *result, bfa_cb_diag_t cbfn,
5559 		void *cbarg)
5560 {
5561 	struct  bfa_diag_loopback_s loopback;
5562 	struct bfa_port_attr_s attr;
5563 	bfa_status_t status;
5564 	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5565 
5566 	if (!bfa_iocfc_is_operational(bfa))
5567 		return BFA_STATUS_IOC_NON_OP;
5568 
5569 	/* if port is PBC disabled, return error */
5570 	if (bfa_fcport_is_pbcdisabled(bfa)) {
5571 		bfa_trc(fcdiag, BFA_STATUS_PBC);
5572 		return BFA_STATUS_PBC;
5573 	}
5574 
5575 	if (bfa_fcport_is_disabled(bfa) == BFA_FALSE) {
5576 		bfa_trc(fcdiag, opmode);
5577 		return BFA_STATUS_PORT_NOT_DISABLED;
5578 	}
5579 
5580 	/*
5581 	 * Check if input speed is supported by the port mode
5582 	 */
5583 	if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
5584 		if (!(speed == BFA_PORT_SPEED_1GBPS ||
5585 		      speed == BFA_PORT_SPEED_2GBPS ||
5586 		      speed == BFA_PORT_SPEED_4GBPS ||
5587 		      speed == BFA_PORT_SPEED_8GBPS ||
5588 		      speed == BFA_PORT_SPEED_16GBPS ||
5589 		      speed == BFA_PORT_SPEED_AUTO)) {
5590 			bfa_trc(fcdiag, speed);
5591 			return BFA_STATUS_UNSUPP_SPEED;
5592 		}
5593 		bfa_fcport_get_attr(bfa, &attr);
5594 		bfa_trc(fcdiag, attr.speed_supported);
5595 		if (speed > attr.speed_supported)
5596 			return BFA_STATUS_UNSUPP_SPEED;
5597 	} else {
5598 		if (speed != BFA_PORT_SPEED_10GBPS) {
5599 			bfa_trc(fcdiag, speed);
5600 			return BFA_STATUS_UNSUPP_SPEED;
5601 		}
5602 	}
5603 
5604 	/* For Mezz card, port speed entered needs to be checked */
5605 	if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) {
5606 		if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
5607 			if ((speed == BFA_PORT_SPEED_1GBPS) &&
5608 			    (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)))
5609 				return BFA_STATUS_UNSUPP_SPEED;
5610 			if (!(speed == BFA_PORT_SPEED_1GBPS ||
5611 			      speed == BFA_PORT_SPEED_2GBPS ||
5612 			      speed == BFA_PORT_SPEED_4GBPS ||
5613 			      speed == BFA_PORT_SPEED_8GBPS ||
5614 			      speed == BFA_PORT_SPEED_16GBPS ||
5615 			      speed == BFA_PORT_SPEED_AUTO))
5616 				return BFA_STATUS_UNSUPP_SPEED;
5617 		} else {
5618 			if (speed != BFA_PORT_SPEED_10GBPS)
5619 				return BFA_STATUS_UNSUPP_SPEED;
5620 		}
5621 	}
5622 
5623 	/* check to see if there is another destructive diag cmd running */
5624 	if (fcdiag->lb.lock) {
5625 		bfa_trc(fcdiag, fcdiag->lb.lock);
5626 		return BFA_STATUS_DEVBUSY;
5627 	}
5628 
5629 	fcdiag->lb.lock = 1;
5630 	loopback.lb_mode = opmode;
5631 	loopback.speed = speed;
5632 	loopback.loopcnt = lpcnt;
5633 	loopback.pattern = pat;
5634 	fcdiag->lb.result = result;
5635 	fcdiag->lb.cbfn = cbfn;
5636 	fcdiag->lb.cbarg = cbarg;
5637 	memset(result, 0, sizeof(struct bfa_diag_loopback_result_s));
5638 	bfa_fcdiag_set_busy_status(fcdiag);
5639 
5640 	/* Send msg to fw */
5641 	status = bfa_fcdiag_loopback_send(fcdiag, &loopback);
5642 	return status;
5643 }
5644 
5645 /*
5646  *	DIAG queue test command
5647  *
5648  *   @param[in] *bfa            - bfa data struct
5649  *   @param[in] force           - 1: don't do ioc op checking
5650  *   @param[in] queue           - queue no. to test
5651  *   @param[in] *result         - pt to bfa_diag_qtest_result_t data struct
5652  *   @param[in] cbfn            - callback function
5653  *   @param[in] *cbarg          - callback functioin arg
5654  *
5655  *   @param[out]
5656  */
5657 bfa_status_t
bfa_fcdiag_queuetest(struct bfa_s * bfa,u32 force,u32 queue,struct bfa_diag_qtest_result_s * result,bfa_cb_diag_t cbfn,void * cbarg)5658 bfa_fcdiag_queuetest(struct bfa_s *bfa, u32 force, u32 queue,
5659 		struct bfa_diag_qtest_result_s *result, bfa_cb_diag_t cbfn,
5660 		void *cbarg)
5661 {
5662 	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5663 	bfa_status_t status;
5664 	bfa_trc(fcdiag, force);
5665 	bfa_trc(fcdiag, queue);
5666 
5667 	if (!force && !bfa_iocfc_is_operational(bfa))
5668 		return BFA_STATUS_IOC_NON_OP;
5669 
5670 	/* check to see if there is another destructive diag cmd running */
5671 	if (fcdiag->qtest.lock) {
5672 		bfa_trc(fcdiag, fcdiag->qtest.lock);
5673 		return BFA_STATUS_DEVBUSY;
5674 	}
5675 
5676 	/* Initialization */
5677 	fcdiag->qtest.lock = 1;
5678 	fcdiag->qtest.cbfn = cbfn;
5679 	fcdiag->qtest.cbarg = cbarg;
5680 	fcdiag->qtest.result = result;
5681 	fcdiag->qtest.count = QTEST_CNT_DEFAULT;
5682 
5683 	/* Init test results */
5684 	fcdiag->qtest.result->status = BFA_STATUS_OK;
5685 	fcdiag->qtest.result->count  = 0;
5686 
5687 	/* send */
5688 	if (queue < BFI_IOC_MAX_CQS) {
5689 		fcdiag->qtest.result->queue  = (u8)queue;
5690 		fcdiag->qtest.queue = (u8)queue;
5691 		fcdiag->qtest.all   = 0;
5692 	} else {
5693 		fcdiag->qtest.result->queue  = 0;
5694 		fcdiag->qtest.queue = 0;
5695 		fcdiag->qtest.all   = 1;
5696 	}
5697 	status = bfa_fcdiag_queuetest_send(fcdiag);
5698 
5699 	/* Start a timer */
5700 	if (status == BFA_STATUS_OK) {
5701 		bfa_timer_start(bfa, &fcdiag->qtest.timer,
5702 				bfa_fcdiag_queuetest_timeout, fcdiag,
5703 				BFA_DIAG_QTEST_TOV);
5704 		fcdiag->qtest.timer_active = 1;
5705 	}
5706 	return status;
5707 }
5708 
5709 /*
5710  * DIAG PLB is running
5711  *
5712  *   @param[in] *bfa    - bfa data struct
5713  *
5714  *   @param[out]
5715  */
5716 bfa_status_t
bfa_fcdiag_lb_is_running(struct bfa_s * bfa)5717 bfa_fcdiag_lb_is_running(struct bfa_s *bfa)
5718 {
5719 	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5720 	return fcdiag->lb.lock ?  BFA_STATUS_DIAG_BUSY : BFA_STATUS_OK;
5721 }
5722