1 /*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13 /*
14 * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18 #include "bna.h"
19
20 static inline int
ethport_can_be_up(struct bna_ethport * ethport)21 ethport_can_be_up(struct bna_ethport *ethport)
22 {
23 int ready = 0;
24 if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
25 ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) &&
26 (ethport->flags & BNA_ETHPORT_F_RX_STARTED) &&
27 (ethport->flags & BNA_ETHPORT_F_PORT_ENABLED));
28 else
29 ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) &&
30 (ethport->flags & BNA_ETHPORT_F_RX_STARTED) &&
31 !(ethport->flags & BNA_ETHPORT_F_PORT_ENABLED));
32 return ready;
33 }
34
35 #define ethport_is_up ethport_can_be_up
36
37 enum bna_ethport_event {
38 ETHPORT_E_START = 1,
39 ETHPORT_E_STOP = 2,
40 ETHPORT_E_FAIL = 3,
41 ETHPORT_E_UP = 4,
42 ETHPORT_E_DOWN = 5,
43 ETHPORT_E_FWRESP_UP_OK = 6,
44 ETHPORT_E_FWRESP_DOWN = 7,
45 ETHPORT_E_FWRESP_UP_FAIL = 8,
46 };
47
48 enum bna_enet_event {
49 ENET_E_START = 1,
50 ENET_E_STOP = 2,
51 ENET_E_FAIL = 3,
52 ENET_E_PAUSE_CFG = 4,
53 ENET_E_MTU_CFG = 5,
54 ENET_E_FWRESP_PAUSE = 6,
55 ENET_E_CHLD_STOPPED = 7,
56 };
57
58 enum bna_ioceth_event {
59 IOCETH_E_ENABLE = 1,
60 IOCETH_E_DISABLE = 2,
61 IOCETH_E_IOC_RESET = 3,
62 IOCETH_E_IOC_FAILED = 4,
63 IOCETH_E_IOC_READY = 5,
64 IOCETH_E_ENET_ATTR_RESP = 6,
65 IOCETH_E_ENET_STOPPED = 7,
66 IOCETH_E_IOC_DISABLED = 8,
67 };
68
69 #define bna_stats_copy(_name, _type) \
70 do { \
71 count = sizeof(struct bfi_enet_stats_ ## _type) / sizeof(u64); \
72 stats_src = (u64 *)&bna->stats.hw_stats_kva->_name ## _stats; \
73 stats_dst = (u64 *)&bna->stats.hw_stats._name ## _stats; \
74 for (i = 0; i < count; i++) \
75 stats_dst[i] = be64_to_cpu(stats_src[i]); \
76 } while (0) \
77
78 /*
79 * FW response handlers
80 */
81
82 static void
bna_bfi_ethport_enable_aen(struct bna_ethport * ethport,struct bfi_msgq_mhdr * msghdr)83 bna_bfi_ethport_enable_aen(struct bna_ethport *ethport,
84 struct bfi_msgq_mhdr *msghdr)
85 {
86 ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED;
87
88 if (ethport_can_be_up(ethport))
89 bfa_fsm_send_event(ethport, ETHPORT_E_UP);
90 }
91
92 static void
bna_bfi_ethport_disable_aen(struct bna_ethport * ethport,struct bfi_msgq_mhdr * msghdr)93 bna_bfi_ethport_disable_aen(struct bna_ethport *ethport,
94 struct bfi_msgq_mhdr *msghdr)
95 {
96 int ethport_up = ethport_is_up(ethport);
97
98 ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
99
100 if (ethport_up)
101 bfa_fsm_send_event(ethport, ETHPORT_E_DOWN);
102 }
103
104 static void
bna_bfi_ethport_admin_rsp(struct bna_ethport * ethport,struct bfi_msgq_mhdr * msghdr)105 bna_bfi_ethport_admin_rsp(struct bna_ethport *ethport,
106 struct bfi_msgq_mhdr *msghdr)
107 {
108 struct bfi_enet_enable_req *admin_req =
109 ðport->bfi_enet_cmd.admin_req;
110 struct bfi_enet_rsp *rsp =
111 container_of(msghdr, struct bfi_enet_rsp, mh);
112
113 switch (admin_req->enable) {
114 case BNA_STATUS_T_ENABLED:
115 if (rsp->error == BFI_ENET_CMD_OK)
116 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK);
117 else {
118 ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
119 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL);
120 }
121 break;
122
123 case BNA_STATUS_T_DISABLED:
124 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN);
125 ethport->link_status = BNA_LINK_DOWN;
126 ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
127 break;
128 }
129 }
130
131 static void
bna_bfi_ethport_lpbk_rsp(struct bna_ethport * ethport,struct bfi_msgq_mhdr * msghdr)132 bna_bfi_ethport_lpbk_rsp(struct bna_ethport *ethport,
133 struct bfi_msgq_mhdr *msghdr)
134 {
135 struct bfi_enet_diag_lb_req *diag_lb_req =
136 ðport->bfi_enet_cmd.lpbk_req;
137 struct bfi_enet_rsp *rsp =
138 container_of(msghdr, struct bfi_enet_rsp, mh);
139
140 switch (diag_lb_req->enable) {
141 case BNA_STATUS_T_ENABLED:
142 if (rsp->error == BFI_ENET_CMD_OK)
143 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK);
144 else {
145 ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP;
146 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL);
147 }
148 break;
149
150 case BNA_STATUS_T_DISABLED:
151 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN);
152 break;
153 }
154 }
155
156 static void
bna_bfi_pause_set_rsp(struct bna_enet * enet,struct bfi_msgq_mhdr * msghdr)157 bna_bfi_pause_set_rsp(struct bna_enet *enet, struct bfi_msgq_mhdr *msghdr)
158 {
159 bfa_fsm_send_event(enet, ENET_E_FWRESP_PAUSE);
160 }
161
162 static void
bna_bfi_attr_get_rsp(struct bna_ioceth * ioceth,struct bfi_msgq_mhdr * msghdr)163 bna_bfi_attr_get_rsp(struct bna_ioceth *ioceth,
164 struct bfi_msgq_mhdr *msghdr)
165 {
166 struct bfi_enet_attr_rsp *rsp =
167 container_of(msghdr, struct bfi_enet_attr_rsp, mh);
168
169 /**
170 * Store only if not set earlier, since BNAD can override the HW
171 * attributes
172 */
173 if (!ioceth->attr.fw_query_complete) {
174 ioceth->attr.num_txq = ntohl(rsp->max_cfg);
175 ioceth->attr.num_rxp = ntohl(rsp->max_cfg);
176 ioceth->attr.num_ucmac = ntohl(rsp->max_ucmac);
177 ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM;
178 ioceth->attr.max_rit_size = ntohl(rsp->rit_size);
179 ioceth->attr.fw_query_complete = true;
180 }
181
182 bfa_fsm_send_event(ioceth, IOCETH_E_ENET_ATTR_RESP);
183 }
184
185 static void
bna_bfi_stats_get_rsp(struct bna * bna,struct bfi_msgq_mhdr * msghdr)186 bna_bfi_stats_get_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr)
187 {
188 struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get;
189 u64 *stats_src;
190 u64 *stats_dst;
191 u32 tx_enet_mask = ntohl(stats_req->tx_enet_mask);
192 u32 rx_enet_mask = ntohl(stats_req->rx_enet_mask);
193 int count;
194 int i;
195
196 bna_stats_copy(mac, mac);
197 bna_stats_copy(bpc, bpc);
198 bna_stats_copy(rad, rad);
199 bna_stats_copy(rlb, rad);
200 bna_stats_copy(fc_rx, fc_rx);
201 bna_stats_copy(fc_tx, fc_tx);
202
203 stats_src = (u64 *)&(bna->stats.hw_stats_kva->rxf_stats[0]);
204
205 /* Copy Rxf stats to SW area, scatter them while copying */
206 for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
207 stats_dst = (u64 *)&(bna->stats.hw_stats.rxf_stats[i]);
208 memset(stats_dst, 0, sizeof(struct bfi_enet_stats_rxf));
209 if (rx_enet_mask & ((u32)(1 << i))) {
210 int k;
211 count = sizeof(struct bfi_enet_stats_rxf) /
212 sizeof(u64);
213 for (k = 0; k < count; k++) {
214 stats_dst[k] = be64_to_cpu(*stats_src);
215 stats_src++;
216 }
217 }
218 }
219
220 /* Copy Txf stats to SW area, scatter them while copying */
221 for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
222 stats_dst = (u64 *)&(bna->stats.hw_stats.txf_stats[i]);
223 memset(stats_dst, 0, sizeof(struct bfi_enet_stats_txf));
224 if (tx_enet_mask & ((u32)(1 << i))) {
225 int k;
226 count = sizeof(struct bfi_enet_stats_txf) /
227 sizeof(u64);
228 for (k = 0; k < count; k++) {
229 stats_dst[k] = be64_to_cpu(*stats_src);
230 stats_src++;
231 }
232 }
233 }
234
235 bna->stats_mod.stats_get_busy = false;
236 bnad_cb_stats_get(bna->bnad, BNA_CB_SUCCESS, &bna->stats);
237 }
238
239 static void
bna_bfi_ethport_linkup_aen(struct bna_ethport * ethport,struct bfi_msgq_mhdr * msghdr)240 bna_bfi_ethport_linkup_aen(struct bna_ethport *ethport,
241 struct bfi_msgq_mhdr *msghdr)
242 {
243 ethport->link_status = BNA_LINK_UP;
244
245 /* Dispatch events */
246 ethport->link_cbfn(ethport->bna->bnad, ethport->link_status);
247 }
248
249 static void
bna_bfi_ethport_linkdown_aen(struct bna_ethport * ethport,struct bfi_msgq_mhdr * msghdr)250 bna_bfi_ethport_linkdown_aen(struct bna_ethport *ethport,
251 struct bfi_msgq_mhdr *msghdr)
252 {
253 ethport->link_status = BNA_LINK_DOWN;
254
255 /* Dispatch events */
256 ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
257 }
258
259 static void
bna_err_handler(struct bna * bna,u32 intr_status)260 bna_err_handler(struct bna *bna, u32 intr_status)
261 {
262 if (BNA_IS_HALT_INTR(bna, intr_status))
263 bna_halt_clear(bna);
264
265 bfa_nw_ioc_error_isr(&bna->ioceth.ioc);
266 }
267
268 void
bna_mbox_handler(struct bna * bna,u32 intr_status)269 bna_mbox_handler(struct bna *bna, u32 intr_status)
270 {
271 if (BNA_IS_ERR_INTR(bna, intr_status)) {
272 bna_err_handler(bna, intr_status);
273 return;
274 }
275 if (BNA_IS_MBOX_INTR(bna, intr_status))
276 bfa_nw_ioc_mbox_isr(&bna->ioceth.ioc);
277 }
278
279 static void
bna_msgq_rsp_handler(void * arg,struct bfi_msgq_mhdr * msghdr)280 bna_msgq_rsp_handler(void *arg, struct bfi_msgq_mhdr *msghdr)
281 {
282 struct bna *bna = (struct bna *)arg;
283 struct bna_tx *tx;
284 struct bna_rx *rx;
285
286 switch (msghdr->msg_id) {
287 case BFI_ENET_I2H_RX_CFG_SET_RSP:
288 bna_rx_from_rid(bna, msghdr->enet_id, rx);
289 if (rx)
290 bna_bfi_rx_enet_start_rsp(rx, msghdr);
291 break;
292
293 case BFI_ENET_I2H_RX_CFG_CLR_RSP:
294 bna_rx_from_rid(bna, msghdr->enet_id, rx);
295 if (rx)
296 bna_bfi_rx_enet_stop_rsp(rx, msghdr);
297 break;
298
299 case BFI_ENET_I2H_RIT_CFG_RSP:
300 case BFI_ENET_I2H_RSS_CFG_RSP:
301 case BFI_ENET_I2H_RSS_ENABLE_RSP:
302 case BFI_ENET_I2H_RX_PROMISCUOUS_RSP:
303 case BFI_ENET_I2H_RX_DEFAULT_RSP:
304 case BFI_ENET_I2H_MAC_UCAST_CLR_RSP:
305 case BFI_ENET_I2H_MAC_UCAST_ADD_RSP:
306 case BFI_ENET_I2H_MAC_UCAST_DEL_RSP:
307 case BFI_ENET_I2H_MAC_MCAST_DEL_RSP:
308 case BFI_ENET_I2H_MAC_MCAST_FILTER_RSP:
309 case BFI_ENET_I2H_RX_VLAN_SET_RSP:
310 case BFI_ENET_I2H_RX_VLAN_STRIP_ENABLE_RSP:
311 bna_rx_from_rid(bna, msghdr->enet_id, rx);
312 if (rx)
313 bna_bfi_rxf_cfg_rsp(&rx->rxf, msghdr);
314 break;
315
316 case BFI_ENET_I2H_MAC_UCAST_SET_RSP:
317 bna_rx_from_rid(bna, msghdr->enet_id, rx);
318 if (rx)
319 bna_bfi_rxf_ucast_set_rsp(&rx->rxf, msghdr);
320 break;
321
322 case BFI_ENET_I2H_MAC_MCAST_ADD_RSP:
323 bna_rx_from_rid(bna, msghdr->enet_id, rx);
324 if (rx)
325 bna_bfi_rxf_mcast_add_rsp(&rx->rxf, msghdr);
326 break;
327
328 case BFI_ENET_I2H_TX_CFG_SET_RSP:
329 bna_tx_from_rid(bna, msghdr->enet_id, tx);
330 if (tx)
331 bna_bfi_tx_enet_start_rsp(tx, msghdr);
332 break;
333
334 case BFI_ENET_I2H_TX_CFG_CLR_RSP:
335 bna_tx_from_rid(bna, msghdr->enet_id, tx);
336 if (tx)
337 bna_bfi_tx_enet_stop_rsp(tx, msghdr);
338 break;
339
340 case BFI_ENET_I2H_PORT_ADMIN_RSP:
341 bna_bfi_ethport_admin_rsp(&bna->ethport, msghdr);
342 break;
343
344 case BFI_ENET_I2H_DIAG_LOOPBACK_RSP:
345 bna_bfi_ethport_lpbk_rsp(&bna->ethport, msghdr);
346 break;
347
348 case BFI_ENET_I2H_SET_PAUSE_RSP:
349 bna_bfi_pause_set_rsp(&bna->enet, msghdr);
350 break;
351
352 case BFI_ENET_I2H_GET_ATTR_RSP:
353 bna_bfi_attr_get_rsp(&bna->ioceth, msghdr);
354 break;
355
356 case BFI_ENET_I2H_STATS_GET_RSP:
357 bna_bfi_stats_get_rsp(bna, msghdr);
358 break;
359
360 case BFI_ENET_I2H_STATS_CLR_RSP:
361 /* No-op */
362 break;
363
364 case BFI_ENET_I2H_LINK_UP_AEN:
365 bna_bfi_ethport_linkup_aen(&bna->ethport, msghdr);
366 break;
367
368 case BFI_ENET_I2H_LINK_DOWN_AEN:
369 bna_bfi_ethport_linkdown_aen(&bna->ethport, msghdr);
370 break;
371
372 case BFI_ENET_I2H_PORT_ENABLE_AEN:
373 bna_bfi_ethport_enable_aen(&bna->ethport, msghdr);
374 break;
375
376 case BFI_ENET_I2H_PORT_DISABLE_AEN:
377 bna_bfi_ethport_disable_aen(&bna->ethport, msghdr);
378 break;
379
380 case BFI_ENET_I2H_BW_UPDATE_AEN:
381 bna_bfi_bw_update_aen(&bna->tx_mod);
382 break;
383
384 default:
385 break;
386 }
387 }
388
389 /* ETHPORT */
390
391 #define call_ethport_stop_cbfn(_ethport) \
392 do { \
393 if ((_ethport)->stop_cbfn) { \
394 void (*cbfn)(struct bna_enet *); \
395 cbfn = (_ethport)->stop_cbfn; \
396 (_ethport)->stop_cbfn = NULL; \
397 cbfn(&(_ethport)->bna->enet); \
398 } \
399 } while (0)
400
401 #define call_ethport_adminup_cbfn(ethport, status) \
402 do { \
403 if ((ethport)->adminup_cbfn) { \
404 void (*cbfn)(struct bnad *, enum bna_cb_status); \
405 cbfn = (ethport)->adminup_cbfn; \
406 (ethport)->adminup_cbfn = NULL; \
407 cbfn((ethport)->bna->bnad, status); \
408 } \
409 } while (0)
410
411 static void
bna_bfi_ethport_admin_up(struct bna_ethport * ethport)412 bna_bfi_ethport_admin_up(struct bna_ethport *ethport)
413 {
414 struct bfi_enet_enable_req *admin_up_req =
415 ðport->bfi_enet_cmd.admin_req;
416
417 bfi_msgq_mhdr_set(admin_up_req->mh, BFI_MC_ENET,
418 BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0);
419 admin_up_req->mh.num_entries = htons(
420 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
421 admin_up_req->enable = BNA_STATUS_T_ENABLED;
422
423 bfa_msgq_cmd_set(ðport->msgq_cmd, NULL, NULL,
424 sizeof(struct bfi_enet_enable_req), &admin_up_req->mh);
425 bfa_msgq_cmd_post(ðport->bna->msgq, ðport->msgq_cmd);
426 }
427
428 static void
bna_bfi_ethport_admin_down(struct bna_ethport * ethport)429 bna_bfi_ethport_admin_down(struct bna_ethport *ethport)
430 {
431 struct bfi_enet_enable_req *admin_down_req =
432 ðport->bfi_enet_cmd.admin_req;
433
434 bfi_msgq_mhdr_set(admin_down_req->mh, BFI_MC_ENET,
435 BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0);
436 admin_down_req->mh.num_entries = htons(
437 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
438 admin_down_req->enable = BNA_STATUS_T_DISABLED;
439
440 bfa_msgq_cmd_set(ðport->msgq_cmd, NULL, NULL,
441 sizeof(struct bfi_enet_enable_req), &admin_down_req->mh);
442 bfa_msgq_cmd_post(ðport->bna->msgq, ðport->msgq_cmd);
443 }
444
445 static void
bna_bfi_ethport_lpbk_up(struct bna_ethport * ethport)446 bna_bfi_ethport_lpbk_up(struct bna_ethport *ethport)
447 {
448 struct bfi_enet_diag_lb_req *lpbk_up_req =
449 ðport->bfi_enet_cmd.lpbk_req;
450
451 bfi_msgq_mhdr_set(lpbk_up_req->mh, BFI_MC_ENET,
452 BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0);
453 lpbk_up_req->mh.num_entries = htons(
454 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req)));
455 lpbk_up_req->mode = (ethport->bna->enet.type ==
456 BNA_ENET_T_LOOPBACK_INTERNAL) ?
457 BFI_ENET_DIAG_LB_OPMODE_EXT :
458 BFI_ENET_DIAG_LB_OPMODE_CBL;
459 lpbk_up_req->enable = BNA_STATUS_T_ENABLED;
460
461 bfa_msgq_cmd_set(ðport->msgq_cmd, NULL, NULL,
462 sizeof(struct bfi_enet_diag_lb_req), &lpbk_up_req->mh);
463 bfa_msgq_cmd_post(ðport->bna->msgq, ðport->msgq_cmd);
464 }
465
466 static void
bna_bfi_ethport_lpbk_down(struct bna_ethport * ethport)467 bna_bfi_ethport_lpbk_down(struct bna_ethport *ethport)
468 {
469 struct bfi_enet_diag_lb_req *lpbk_down_req =
470 ðport->bfi_enet_cmd.lpbk_req;
471
472 bfi_msgq_mhdr_set(lpbk_down_req->mh, BFI_MC_ENET,
473 BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0);
474 lpbk_down_req->mh.num_entries = htons(
475 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req)));
476 lpbk_down_req->enable = BNA_STATUS_T_DISABLED;
477
478 bfa_msgq_cmd_set(ðport->msgq_cmd, NULL, NULL,
479 sizeof(struct bfi_enet_diag_lb_req), &lpbk_down_req->mh);
480 bfa_msgq_cmd_post(ðport->bna->msgq, ðport->msgq_cmd);
481 }
482
483 static void
bna_bfi_ethport_up(struct bna_ethport * ethport)484 bna_bfi_ethport_up(struct bna_ethport *ethport)
485 {
486 if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
487 bna_bfi_ethport_admin_up(ethport);
488 else
489 bna_bfi_ethport_lpbk_up(ethport);
490 }
491
492 static void
bna_bfi_ethport_down(struct bna_ethport * ethport)493 bna_bfi_ethport_down(struct bna_ethport *ethport)
494 {
495 if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
496 bna_bfi_ethport_admin_down(ethport);
497 else
498 bna_bfi_ethport_lpbk_down(ethport);
499 }
500
501 bfa_fsm_state_decl(bna_ethport, stopped, struct bna_ethport,
502 enum bna_ethport_event);
503 bfa_fsm_state_decl(bna_ethport, down, struct bna_ethport,
504 enum bna_ethport_event);
505 bfa_fsm_state_decl(bna_ethport, up_resp_wait, struct bna_ethport,
506 enum bna_ethport_event);
507 bfa_fsm_state_decl(bna_ethport, down_resp_wait, struct bna_ethport,
508 enum bna_ethport_event);
509 bfa_fsm_state_decl(bna_ethport, up, struct bna_ethport,
510 enum bna_ethport_event);
511 bfa_fsm_state_decl(bna_ethport, last_resp_wait, struct bna_ethport,
512 enum bna_ethport_event);
513
514 static void
bna_ethport_sm_stopped_entry(struct bna_ethport * ethport)515 bna_ethport_sm_stopped_entry(struct bna_ethport *ethport)
516 {
517 call_ethport_stop_cbfn(ethport);
518 }
519
520 static void
bna_ethport_sm_stopped(struct bna_ethport * ethport,enum bna_ethport_event event)521 bna_ethport_sm_stopped(struct bna_ethport *ethport,
522 enum bna_ethport_event event)
523 {
524 switch (event) {
525 case ETHPORT_E_START:
526 bfa_fsm_set_state(ethport, bna_ethport_sm_down);
527 break;
528
529 case ETHPORT_E_STOP:
530 call_ethport_stop_cbfn(ethport);
531 break;
532
533 case ETHPORT_E_FAIL:
534 /* No-op */
535 break;
536
537 case ETHPORT_E_DOWN:
538 /* This event is received due to Rx objects failing */
539 /* No-op */
540 break;
541
542 default:
543 bfa_sm_fault(event);
544 }
545 }
546
547 static void
bna_ethport_sm_down_entry(struct bna_ethport * ethport)548 bna_ethport_sm_down_entry(struct bna_ethport *ethport)
549 {
550 }
551
552 static void
bna_ethport_sm_down(struct bna_ethport * ethport,enum bna_ethport_event event)553 bna_ethport_sm_down(struct bna_ethport *ethport,
554 enum bna_ethport_event event)
555 {
556 switch (event) {
557 case ETHPORT_E_STOP:
558 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
559 break;
560
561 case ETHPORT_E_FAIL:
562 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
563 break;
564
565 case ETHPORT_E_UP:
566 bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait);
567 bna_bfi_ethport_up(ethport);
568 break;
569
570 default:
571 bfa_sm_fault(event);
572 }
573 }
574
575 static void
bna_ethport_sm_up_resp_wait_entry(struct bna_ethport * ethport)576 bna_ethport_sm_up_resp_wait_entry(struct bna_ethport *ethport)
577 {
578 }
579
580 static void
bna_ethport_sm_up_resp_wait(struct bna_ethport * ethport,enum bna_ethport_event event)581 bna_ethport_sm_up_resp_wait(struct bna_ethport *ethport,
582 enum bna_ethport_event event)
583 {
584 switch (event) {
585 case ETHPORT_E_STOP:
586 bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
587 break;
588
589 case ETHPORT_E_FAIL:
590 call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL);
591 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
592 break;
593
594 case ETHPORT_E_DOWN:
595 call_ethport_adminup_cbfn(ethport, BNA_CB_INTERRUPT);
596 bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait);
597 break;
598
599 case ETHPORT_E_FWRESP_UP_OK:
600 call_ethport_adminup_cbfn(ethport, BNA_CB_SUCCESS);
601 bfa_fsm_set_state(ethport, bna_ethport_sm_up);
602 break;
603
604 case ETHPORT_E_FWRESP_UP_FAIL:
605 call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL);
606 bfa_fsm_set_state(ethport, bna_ethport_sm_down);
607 break;
608
609 case ETHPORT_E_FWRESP_DOWN:
610 /* down_resp_wait -> up_resp_wait transition on ETHPORT_E_UP */
611 bna_bfi_ethport_up(ethport);
612 break;
613
614 default:
615 bfa_sm_fault(event);
616 }
617 }
618
619 static void
bna_ethport_sm_down_resp_wait_entry(struct bna_ethport * ethport)620 bna_ethport_sm_down_resp_wait_entry(struct bna_ethport *ethport)
621 {
622 /**
623 * NOTE: Do not call bna_bfi_ethport_down() here. That will over step
624 * mbox due to up_resp_wait -> down_resp_wait transition on event
625 * ETHPORT_E_DOWN
626 */
627 }
628
629 static void
bna_ethport_sm_down_resp_wait(struct bna_ethport * ethport,enum bna_ethport_event event)630 bna_ethport_sm_down_resp_wait(struct bna_ethport *ethport,
631 enum bna_ethport_event event)
632 {
633 switch (event) {
634 case ETHPORT_E_STOP:
635 bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
636 break;
637
638 case ETHPORT_E_FAIL:
639 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
640 break;
641
642 case ETHPORT_E_UP:
643 bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait);
644 break;
645
646 case ETHPORT_E_FWRESP_UP_OK:
647 /* up_resp_wait->down_resp_wait transition on ETHPORT_E_DOWN */
648 bna_bfi_ethport_down(ethport);
649 break;
650
651 case ETHPORT_E_FWRESP_UP_FAIL:
652 case ETHPORT_E_FWRESP_DOWN:
653 bfa_fsm_set_state(ethport, bna_ethport_sm_down);
654 break;
655
656 default:
657 bfa_sm_fault(event);
658 }
659 }
660
661 static void
bna_ethport_sm_up_entry(struct bna_ethport * ethport)662 bna_ethport_sm_up_entry(struct bna_ethport *ethport)
663 {
664 }
665
666 static void
bna_ethport_sm_up(struct bna_ethport * ethport,enum bna_ethport_event event)667 bna_ethport_sm_up(struct bna_ethport *ethport,
668 enum bna_ethport_event event)
669 {
670 switch (event) {
671 case ETHPORT_E_STOP:
672 bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
673 bna_bfi_ethport_down(ethport);
674 break;
675
676 case ETHPORT_E_FAIL:
677 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
678 break;
679
680 case ETHPORT_E_DOWN:
681 bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait);
682 bna_bfi_ethport_down(ethport);
683 break;
684
685 default:
686 bfa_sm_fault(event);
687 }
688 }
689
690 static void
bna_ethport_sm_last_resp_wait_entry(struct bna_ethport * ethport)691 bna_ethport_sm_last_resp_wait_entry(struct bna_ethport *ethport)
692 {
693 }
694
695 static void
bna_ethport_sm_last_resp_wait(struct bna_ethport * ethport,enum bna_ethport_event event)696 bna_ethport_sm_last_resp_wait(struct bna_ethport *ethport,
697 enum bna_ethport_event event)
698 {
699 switch (event) {
700 case ETHPORT_E_FAIL:
701 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
702 break;
703
704 case ETHPORT_E_DOWN:
705 /**
706 * This event is received due to Rx objects stopping in
707 * parallel to ethport
708 */
709 /* No-op */
710 break;
711
712 case ETHPORT_E_FWRESP_UP_OK:
713 /* up_resp_wait->last_resp_wait transition on ETHPORT_T_STOP */
714 bna_bfi_ethport_down(ethport);
715 break;
716
717 case ETHPORT_E_FWRESP_UP_FAIL:
718 case ETHPORT_E_FWRESP_DOWN:
719 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
720 break;
721
722 default:
723 bfa_sm_fault(event);
724 }
725 }
726
727 static void
bna_ethport_init(struct bna_ethport * ethport,struct bna * bna)728 bna_ethport_init(struct bna_ethport *ethport, struct bna *bna)
729 {
730 ethport->flags |= (BNA_ETHPORT_F_ADMIN_UP | BNA_ETHPORT_F_PORT_ENABLED);
731 ethport->bna = bna;
732
733 ethport->link_status = BNA_LINK_DOWN;
734 ethport->link_cbfn = bnad_cb_ethport_link_status;
735
736 ethport->rx_started_count = 0;
737
738 ethport->stop_cbfn = NULL;
739 ethport->adminup_cbfn = NULL;
740
741 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
742 }
743
744 static void
bna_ethport_uninit(struct bna_ethport * ethport)745 bna_ethport_uninit(struct bna_ethport *ethport)
746 {
747 ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP;
748 ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
749
750 ethport->bna = NULL;
751 }
752
753 static void
bna_ethport_start(struct bna_ethport * ethport)754 bna_ethport_start(struct bna_ethport *ethport)
755 {
756 bfa_fsm_send_event(ethport, ETHPORT_E_START);
757 }
758
759 static void
bna_enet_cb_ethport_stopped(struct bna_enet * enet)760 bna_enet_cb_ethport_stopped(struct bna_enet *enet)
761 {
762 bfa_wc_down(&enet->chld_stop_wc);
763 }
764
765 static void
bna_ethport_stop(struct bna_ethport * ethport)766 bna_ethport_stop(struct bna_ethport *ethport)
767 {
768 ethport->stop_cbfn = bna_enet_cb_ethport_stopped;
769 bfa_fsm_send_event(ethport, ETHPORT_E_STOP);
770 }
771
772 static void
bna_ethport_fail(struct bna_ethport * ethport)773 bna_ethport_fail(struct bna_ethport *ethport)
774 {
775 /* Reset the physical port status to enabled */
776 ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED;
777
778 if (ethport->link_status != BNA_LINK_DOWN) {
779 ethport->link_status = BNA_LINK_DOWN;
780 ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
781 }
782 bfa_fsm_send_event(ethport, ETHPORT_E_FAIL);
783 }
784
785 /* Should be called only when ethport is disabled */
786 void
bna_ethport_cb_rx_started(struct bna_ethport * ethport)787 bna_ethport_cb_rx_started(struct bna_ethport *ethport)
788 {
789 ethport->rx_started_count++;
790
791 if (ethport->rx_started_count == 1) {
792 ethport->flags |= BNA_ETHPORT_F_RX_STARTED;
793
794 if (ethport_can_be_up(ethport))
795 bfa_fsm_send_event(ethport, ETHPORT_E_UP);
796 }
797 }
798
799 void
bna_ethport_cb_rx_stopped(struct bna_ethport * ethport)800 bna_ethport_cb_rx_stopped(struct bna_ethport *ethport)
801 {
802 int ethport_up = ethport_is_up(ethport);
803
804 ethport->rx_started_count--;
805
806 if (ethport->rx_started_count == 0) {
807 ethport->flags &= ~BNA_ETHPORT_F_RX_STARTED;
808
809 if (ethport_up)
810 bfa_fsm_send_event(ethport, ETHPORT_E_DOWN);
811 }
812 }
813
814 /* ENET */
815
816 #define bna_enet_chld_start(enet) \
817 do { \
818 enum bna_tx_type tx_type = \
819 ((enet)->type == BNA_ENET_T_REGULAR) ? \
820 BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK; \
821 enum bna_rx_type rx_type = \
822 ((enet)->type == BNA_ENET_T_REGULAR) ? \
823 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
824 bna_ethport_start(&(enet)->bna->ethport); \
825 bna_tx_mod_start(&(enet)->bna->tx_mod, tx_type); \
826 bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type); \
827 } while (0)
828
829 #define bna_enet_chld_stop(enet) \
830 do { \
831 enum bna_tx_type tx_type = \
832 ((enet)->type == BNA_ENET_T_REGULAR) ? \
833 BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK; \
834 enum bna_rx_type rx_type = \
835 ((enet)->type == BNA_ENET_T_REGULAR) ? \
836 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
837 bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
838 bfa_wc_up(&(enet)->chld_stop_wc); \
839 bna_ethport_stop(&(enet)->bna->ethport); \
840 bfa_wc_up(&(enet)->chld_stop_wc); \
841 bna_tx_mod_stop(&(enet)->bna->tx_mod, tx_type); \
842 bfa_wc_up(&(enet)->chld_stop_wc); \
843 bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type); \
844 bfa_wc_wait(&(enet)->chld_stop_wc); \
845 } while (0)
846
847 #define bna_enet_chld_fail(enet) \
848 do { \
849 bna_ethport_fail(&(enet)->bna->ethport); \
850 bna_tx_mod_fail(&(enet)->bna->tx_mod); \
851 bna_rx_mod_fail(&(enet)->bna->rx_mod); \
852 } while (0)
853
854 #define bna_enet_rx_start(enet) \
855 do { \
856 enum bna_rx_type rx_type = \
857 ((enet)->type == BNA_ENET_T_REGULAR) ? \
858 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
859 bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type); \
860 } while (0)
861
862 #define bna_enet_rx_stop(enet) \
863 do { \
864 enum bna_rx_type rx_type = \
865 ((enet)->type == BNA_ENET_T_REGULAR) ? \
866 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
867 bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
868 bfa_wc_up(&(enet)->chld_stop_wc); \
869 bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type); \
870 bfa_wc_wait(&(enet)->chld_stop_wc); \
871 } while (0)
872
873 #define call_enet_stop_cbfn(enet) \
874 do { \
875 if ((enet)->stop_cbfn) { \
876 void (*cbfn)(void *); \
877 void *cbarg; \
878 cbfn = (enet)->stop_cbfn; \
879 cbarg = (enet)->stop_cbarg; \
880 (enet)->stop_cbfn = NULL; \
881 (enet)->stop_cbarg = NULL; \
882 cbfn(cbarg); \
883 } \
884 } while (0)
885
886 #define call_enet_pause_cbfn(enet) \
887 do { \
888 if ((enet)->pause_cbfn) { \
889 void (*cbfn)(struct bnad *); \
890 cbfn = (enet)->pause_cbfn; \
891 (enet)->pause_cbfn = NULL; \
892 cbfn((enet)->bna->bnad); \
893 } \
894 } while (0)
895
896 #define call_enet_mtu_cbfn(enet) \
897 do { \
898 if ((enet)->mtu_cbfn) { \
899 void (*cbfn)(struct bnad *); \
900 cbfn = (enet)->mtu_cbfn; \
901 (enet)->mtu_cbfn = NULL; \
902 cbfn((enet)->bna->bnad); \
903 } \
904 } while (0)
905
906 static void bna_enet_cb_chld_stopped(void *arg);
907 static void bna_bfi_pause_set(struct bna_enet *enet);
908
909 bfa_fsm_state_decl(bna_enet, stopped, struct bna_enet,
910 enum bna_enet_event);
911 bfa_fsm_state_decl(bna_enet, pause_init_wait, struct bna_enet,
912 enum bna_enet_event);
913 bfa_fsm_state_decl(bna_enet, last_resp_wait, struct bna_enet,
914 enum bna_enet_event);
915 bfa_fsm_state_decl(bna_enet, started, struct bna_enet,
916 enum bna_enet_event);
917 bfa_fsm_state_decl(bna_enet, cfg_wait, struct bna_enet,
918 enum bna_enet_event);
919 bfa_fsm_state_decl(bna_enet, cfg_stop_wait, struct bna_enet,
920 enum bna_enet_event);
921 bfa_fsm_state_decl(bna_enet, chld_stop_wait, struct bna_enet,
922 enum bna_enet_event);
923
924 static void
bna_enet_sm_stopped_entry(struct bna_enet * enet)925 bna_enet_sm_stopped_entry(struct bna_enet *enet)
926 {
927 call_enet_pause_cbfn(enet);
928 call_enet_mtu_cbfn(enet);
929 call_enet_stop_cbfn(enet);
930 }
931
932 static void
bna_enet_sm_stopped(struct bna_enet * enet,enum bna_enet_event event)933 bna_enet_sm_stopped(struct bna_enet *enet, enum bna_enet_event event)
934 {
935 switch (event) {
936 case ENET_E_START:
937 bfa_fsm_set_state(enet, bna_enet_sm_pause_init_wait);
938 break;
939
940 case ENET_E_STOP:
941 call_enet_stop_cbfn(enet);
942 break;
943
944 case ENET_E_FAIL:
945 /* No-op */
946 break;
947
948 case ENET_E_PAUSE_CFG:
949 call_enet_pause_cbfn(enet);
950 break;
951
952 case ENET_E_MTU_CFG:
953 call_enet_mtu_cbfn(enet);
954 break;
955
956 case ENET_E_CHLD_STOPPED:
957 /**
958 * This event is received due to Ethport, Tx and Rx objects
959 * failing
960 */
961 /* No-op */
962 break;
963
964 default:
965 bfa_sm_fault(event);
966 }
967 }
968
969 static void
bna_enet_sm_pause_init_wait_entry(struct bna_enet * enet)970 bna_enet_sm_pause_init_wait_entry(struct bna_enet *enet)
971 {
972 bna_bfi_pause_set(enet);
973 }
974
975 static void
bna_enet_sm_pause_init_wait(struct bna_enet * enet,enum bna_enet_event event)976 bna_enet_sm_pause_init_wait(struct bna_enet *enet,
977 enum bna_enet_event event)
978 {
979 switch (event) {
980 case ENET_E_STOP:
981 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
982 bfa_fsm_set_state(enet, bna_enet_sm_last_resp_wait);
983 break;
984
985 case ENET_E_FAIL:
986 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
987 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
988 break;
989
990 case ENET_E_PAUSE_CFG:
991 enet->flags |= BNA_ENET_F_PAUSE_CHANGED;
992 break;
993
994 case ENET_E_MTU_CFG:
995 /* No-op */
996 break;
997
998 case ENET_E_FWRESP_PAUSE:
999 if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) {
1000 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1001 bna_bfi_pause_set(enet);
1002 } else {
1003 bfa_fsm_set_state(enet, bna_enet_sm_started);
1004 bna_enet_chld_start(enet);
1005 }
1006 break;
1007
1008 default:
1009 bfa_sm_fault(event);
1010 }
1011 }
1012
1013 static void
bna_enet_sm_last_resp_wait_entry(struct bna_enet * enet)1014 bna_enet_sm_last_resp_wait_entry(struct bna_enet *enet)
1015 {
1016 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1017 }
1018
1019 static void
bna_enet_sm_last_resp_wait(struct bna_enet * enet,enum bna_enet_event event)1020 bna_enet_sm_last_resp_wait(struct bna_enet *enet,
1021 enum bna_enet_event event)
1022 {
1023 switch (event) {
1024 case ENET_E_FAIL:
1025 case ENET_E_FWRESP_PAUSE:
1026 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1027 break;
1028
1029 default:
1030 bfa_sm_fault(event);
1031 }
1032 }
1033
1034 static void
bna_enet_sm_started_entry(struct bna_enet * enet)1035 bna_enet_sm_started_entry(struct bna_enet *enet)
1036 {
1037 /**
1038 * NOTE: Do not call bna_enet_chld_start() here, since it will be
1039 * inadvertently called during cfg_wait->started transition as well
1040 */
1041 call_enet_pause_cbfn(enet);
1042 call_enet_mtu_cbfn(enet);
1043 }
1044
1045 static void
bna_enet_sm_started(struct bna_enet * enet,enum bna_enet_event event)1046 bna_enet_sm_started(struct bna_enet *enet,
1047 enum bna_enet_event event)
1048 {
1049 switch (event) {
1050 case ENET_E_STOP:
1051 bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait);
1052 break;
1053
1054 case ENET_E_FAIL:
1055 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1056 bna_enet_chld_fail(enet);
1057 break;
1058
1059 case ENET_E_PAUSE_CFG:
1060 bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait);
1061 bna_bfi_pause_set(enet);
1062 break;
1063
1064 case ENET_E_MTU_CFG:
1065 bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait);
1066 bna_enet_rx_stop(enet);
1067 break;
1068
1069 default:
1070 bfa_sm_fault(event);
1071 }
1072 }
1073
1074 static void
bna_enet_sm_cfg_wait_entry(struct bna_enet * enet)1075 bna_enet_sm_cfg_wait_entry(struct bna_enet *enet)
1076 {
1077 }
1078
1079 static void
bna_enet_sm_cfg_wait(struct bna_enet * enet,enum bna_enet_event event)1080 bna_enet_sm_cfg_wait(struct bna_enet *enet,
1081 enum bna_enet_event event)
1082 {
1083 switch (event) {
1084 case ENET_E_STOP:
1085 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1086 enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1087 bfa_fsm_set_state(enet, bna_enet_sm_cfg_stop_wait);
1088 break;
1089
1090 case ENET_E_FAIL:
1091 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1092 enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1093 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1094 bna_enet_chld_fail(enet);
1095 break;
1096
1097 case ENET_E_PAUSE_CFG:
1098 enet->flags |= BNA_ENET_F_PAUSE_CHANGED;
1099 break;
1100
1101 case ENET_E_MTU_CFG:
1102 enet->flags |= BNA_ENET_F_MTU_CHANGED;
1103 break;
1104
1105 case ENET_E_CHLD_STOPPED:
1106 bna_enet_rx_start(enet);
1107 /* Fall through */
1108 case ENET_E_FWRESP_PAUSE:
1109 if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) {
1110 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1111 bna_bfi_pause_set(enet);
1112 } else if (enet->flags & BNA_ENET_F_MTU_CHANGED) {
1113 enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1114 bna_enet_rx_stop(enet);
1115 } else {
1116 bfa_fsm_set_state(enet, bna_enet_sm_started);
1117 }
1118 break;
1119
1120 default:
1121 bfa_sm_fault(event);
1122 }
1123 }
1124
1125 static void
bna_enet_sm_cfg_stop_wait_entry(struct bna_enet * enet)1126 bna_enet_sm_cfg_stop_wait_entry(struct bna_enet *enet)
1127 {
1128 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1129 enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1130 }
1131
1132 static void
bna_enet_sm_cfg_stop_wait(struct bna_enet * enet,enum bna_enet_event event)1133 bna_enet_sm_cfg_stop_wait(struct bna_enet *enet,
1134 enum bna_enet_event event)
1135 {
1136 switch (event) {
1137 case ENET_E_FAIL:
1138 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1139 bna_enet_chld_fail(enet);
1140 break;
1141
1142 case ENET_E_FWRESP_PAUSE:
1143 case ENET_E_CHLD_STOPPED:
1144 bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait);
1145 break;
1146
1147 default:
1148 bfa_sm_fault(event);
1149 }
1150 }
1151
1152 static void
bna_enet_sm_chld_stop_wait_entry(struct bna_enet * enet)1153 bna_enet_sm_chld_stop_wait_entry(struct bna_enet *enet)
1154 {
1155 bna_enet_chld_stop(enet);
1156 }
1157
1158 static void
bna_enet_sm_chld_stop_wait(struct bna_enet * enet,enum bna_enet_event event)1159 bna_enet_sm_chld_stop_wait(struct bna_enet *enet,
1160 enum bna_enet_event event)
1161 {
1162 switch (event) {
1163 case ENET_E_FAIL:
1164 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1165 bna_enet_chld_fail(enet);
1166 break;
1167
1168 case ENET_E_CHLD_STOPPED:
1169 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1170 break;
1171
1172 default:
1173 bfa_sm_fault(event);
1174 }
1175 }
1176
1177 static void
bna_bfi_pause_set(struct bna_enet * enet)1178 bna_bfi_pause_set(struct bna_enet *enet)
1179 {
1180 struct bfi_enet_set_pause_req *pause_req = &enet->pause_req;
1181
1182 bfi_msgq_mhdr_set(pause_req->mh, BFI_MC_ENET,
1183 BFI_ENET_H2I_SET_PAUSE_REQ, 0, 0);
1184 pause_req->mh.num_entries = htons(
1185 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_set_pause_req)));
1186 pause_req->tx_pause = enet->pause_config.tx_pause;
1187 pause_req->rx_pause = enet->pause_config.rx_pause;
1188
1189 bfa_msgq_cmd_set(&enet->msgq_cmd, NULL, NULL,
1190 sizeof(struct bfi_enet_set_pause_req), &pause_req->mh);
1191 bfa_msgq_cmd_post(&enet->bna->msgq, &enet->msgq_cmd);
1192 }
1193
1194 static void
bna_enet_cb_chld_stopped(void * arg)1195 bna_enet_cb_chld_stopped(void *arg)
1196 {
1197 struct bna_enet *enet = (struct bna_enet *)arg;
1198
1199 bfa_fsm_send_event(enet, ENET_E_CHLD_STOPPED);
1200 }
1201
1202 static void
bna_enet_init(struct bna_enet * enet,struct bna * bna)1203 bna_enet_init(struct bna_enet *enet, struct bna *bna)
1204 {
1205 enet->bna = bna;
1206 enet->flags = 0;
1207 enet->mtu = 0;
1208 enet->type = BNA_ENET_T_REGULAR;
1209
1210 enet->stop_cbfn = NULL;
1211 enet->stop_cbarg = NULL;
1212
1213 enet->pause_cbfn = NULL;
1214
1215 enet->mtu_cbfn = NULL;
1216
1217 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1218 }
1219
1220 static void
bna_enet_uninit(struct bna_enet * enet)1221 bna_enet_uninit(struct bna_enet *enet)
1222 {
1223 enet->flags = 0;
1224
1225 enet->bna = NULL;
1226 }
1227
1228 static void
bna_enet_start(struct bna_enet * enet)1229 bna_enet_start(struct bna_enet *enet)
1230 {
1231 enet->flags |= BNA_ENET_F_IOCETH_READY;
1232 if (enet->flags & BNA_ENET_F_ENABLED)
1233 bfa_fsm_send_event(enet, ENET_E_START);
1234 }
1235
1236 static void
bna_ioceth_cb_enet_stopped(void * arg)1237 bna_ioceth_cb_enet_stopped(void *arg)
1238 {
1239 struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1240
1241 bfa_fsm_send_event(ioceth, IOCETH_E_ENET_STOPPED);
1242 }
1243
1244 static void
bna_enet_stop(struct bna_enet * enet)1245 bna_enet_stop(struct bna_enet *enet)
1246 {
1247 enet->stop_cbfn = bna_ioceth_cb_enet_stopped;
1248 enet->stop_cbarg = &enet->bna->ioceth;
1249
1250 enet->flags &= ~BNA_ENET_F_IOCETH_READY;
1251 bfa_fsm_send_event(enet, ENET_E_STOP);
1252 }
1253
1254 static void
bna_enet_fail(struct bna_enet * enet)1255 bna_enet_fail(struct bna_enet *enet)
1256 {
1257 enet->flags &= ~BNA_ENET_F_IOCETH_READY;
1258 bfa_fsm_send_event(enet, ENET_E_FAIL);
1259 }
1260
1261 void
bna_enet_cb_tx_stopped(struct bna_enet * enet)1262 bna_enet_cb_tx_stopped(struct bna_enet *enet)
1263 {
1264 bfa_wc_down(&enet->chld_stop_wc);
1265 }
1266
1267 void
bna_enet_cb_rx_stopped(struct bna_enet * enet)1268 bna_enet_cb_rx_stopped(struct bna_enet *enet)
1269 {
1270 bfa_wc_down(&enet->chld_stop_wc);
1271 }
1272
1273 int
bna_enet_mtu_get(struct bna_enet * enet)1274 bna_enet_mtu_get(struct bna_enet *enet)
1275 {
1276 return enet->mtu;
1277 }
1278
1279 void
bna_enet_enable(struct bna_enet * enet)1280 bna_enet_enable(struct bna_enet *enet)
1281 {
1282 if (enet->fsm != (bfa_sm_t)bna_enet_sm_stopped)
1283 return;
1284
1285 enet->flags |= BNA_ENET_F_ENABLED;
1286
1287 if (enet->flags & BNA_ENET_F_IOCETH_READY)
1288 bfa_fsm_send_event(enet, ENET_E_START);
1289 }
1290
1291 void
bna_enet_disable(struct bna_enet * enet,enum bna_cleanup_type type,void (* cbfn)(void *))1292 bna_enet_disable(struct bna_enet *enet, enum bna_cleanup_type type,
1293 void (*cbfn)(void *))
1294 {
1295 if (type == BNA_SOFT_CLEANUP) {
1296 (*cbfn)(enet->bna->bnad);
1297 return;
1298 }
1299
1300 enet->stop_cbfn = cbfn;
1301 enet->stop_cbarg = enet->bna->bnad;
1302
1303 enet->flags &= ~BNA_ENET_F_ENABLED;
1304
1305 bfa_fsm_send_event(enet, ENET_E_STOP);
1306 }
1307
1308 void
bna_enet_pause_config(struct bna_enet * enet,struct bna_pause_config * pause_config,void (* cbfn)(struct bnad *))1309 bna_enet_pause_config(struct bna_enet *enet,
1310 struct bna_pause_config *pause_config,
1311 void (*cbfn)(struct bnad *))
1312 {
1313 enet->pause_config = *pause_config;
1314
1315 enet->pause_cbfn = cbfn;
1316
1317 bfa_fsm_send_event(enet, ENET_E_PAUSE_CFG);
1318 }
1319
1320 void
bna_enet_mtu_set(struct bna_enet * enet,int mtu,void (* cbfn)(struct bnad *))1321 bna_enet_mtu_set(struct bna_enet *enet, int mtu,
1322 void (*cbfn)(struct bnad *))
1323 {
1324 enet->mtu = mtu;
1325
1326 enet->mtu_cbfn = cbfn;
1327
1328 bfa_fsm_send_event(enet, ENET_E_MTU_CFG);
1329 }
1330
1331 void
bna_enet_perm_mac_get(struct bna_enet * enet,mac_t * mac)1332 bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac)
1333 {
1334 *mac = bfa_nw_ioc_get_mac(&enet->bna->ioceth.ioc);
1335 }
1336
1337 /* IOCETH */
1338
1339 #define enable_mbox_intr(_ioceth) \
1340 do { \
1341 u32 intr_status; \
1342 bna_intr_status_get((_ioceth)->bna, intr_status); \
1343 bnad_cb_mbox_intr_enable((_ioceth)->bna->bnad); \
1344 bna_mbox_intr_enable((_ioceth)->bna); \
1345 } while (0)
1346
1347 #define disable_mbox_intr(_ioceth) \
1348 do { \
1349 bna_mbox_intr_disable((_ioceth)->bna); \
1350 bnad_cb_mbox_intr_disable((_ioceth)->bna->bnad); \
1351 } while (0)
1352
1353 #define call_ioceth_stop_cbfn(_ioceth) \
1354 do { \
1355 if ((_ioceth)->stop_cbfn) { \
1356 void (*cbfn)(struct bnad *); \
1357 struct bnad *cbarg; \
1358 cbfn = (_ioceth)->stop_cbfn; \
1359 cbarg = (_ioceth)->stop_cbarg; \
1360 (_ioceth)->stop_cbfn = NULL; \
1361 (_ioceth)->stop_cbarg = NULL; \
1362 cbfn(cbarg); \
1363 } \
1364 } while (0)
1365
1366 #define bna_stats_mod_uninit(_stats_mod) \
1367 do { \
1368 } while (0)
1369
1370 #define bna_stats_mod_start(_stats_mod) \
1371 do { \
1372 (_stats_mod)->ioc_ready = true; \
1373 } while (0)
1374
1375 #define bna_stats_mod_stop(_stats_mod) \
1376 do { \
1377 (_stats_mod)->ioc_ready = false; \
1378 } while (0)
1379
1380 #define bna_stats_mod_fail(_stats_mod) \
1381 do { \
1382 (_stats_mod)->ioc_ready = false; \
1383 (_stats_mod)->stats_get_busy = false; \
1384 (_stats_mod)->stats_clr_busy = false; \
1385 } while (0)
1386
1387 static void bna_bfi_attr_get(struct bna_ioceth *ioceth);
1388
1389 bfa_fsm_state_decl(bna_ioceth, stopped, struct bna_ioceth,
1390 enum bna_ioceth_event);
1391 bfa_fsm_state_decl(bna_ioceth, ioc_ready_wait, struct bna_ioceth,
1392 enum bna_ioceth_event);
1393 bfa_fsm_state_decl(bna_ioceth, enet_attr_wait, struct bna_ioceth,
1394 enum bna_ioceth_event);
1395 bfa_fsm_state_decl(bna_ioceth, ready, struct bna_ioceth,
1396 enum bna_ioceth_event);
1397 bfa_fsm_state_decl(bna_ioceth, last_resp_wait, struct bna_ioceth,
1398 enum bna_ioceth_event);
1399 bfa_fsm_state_decl(bna_ioceth, enet_stop_wait, struct bna_ioceth,
1400 enum bna_ioceth_event);
1401 bfa_fsm_state_decl(bna_ioceth, ioc_disable_wait, struct bna_ioceth,
1402 enum bna_ioceth_event);
1403 bfa_fsm_state_decl(bna_ioceth, failed, struct bna_ioceth,
1404 enum bna_ioceth_event);
1405
1406 static void
bna_ioceth_sm_stopped_entry(struct bna_ioceth * ioceth)1407 bna_ioceth_sm_stopped_entry(struct bna_ioceth *ioceth)
1408 {
1409 call_ioceth_stop_cbfn(ioceth);
1410 }
1411
1412 static void
bna_ioceth_sm_stopped(struct bna_ioceth * ioceth,enum bna_ioceth_event event)1413 bna_ioceth_sm_stopped(struct bna_ioceth *ioceth,
1414 enum bna_ioceth_event event)
1415 {
1416 switch (event) {
1417 case IOCETH_E_ENABLE:
1418 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait);
1419 bfa_nw_ioc_enable(&ioceth->ioc);
1420 break;
1421
1422 case IOCETH_E_DISABLE:
1423 bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
1424 break;
1425
1426 case IOCETH_E_IOC_RESET:
1427 enable_mbox_intr(ioceth);
1428 break;
1429
1430 case IOCETH_E_IOC_FAILED:
1431 disable_mbox_intr(ioceth);
1432 bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1433 break;
1434
1435 default:
1436 bfa_sm_fault(event);
1437 }
1438 }
1439
1440 static void
bna_ioceth_sm_ioc_ready_wait_entry(struct bna_ioceth * ioceth)1441 bna_ioceth_sm_ioc_ready_wait_entry(struct bna_ioceth *ioceth)
1442 {
1443 /**
1444 * Do not call bfa_nw_ioc_enable() here. It must be called in the
1445 * previous state due to failed -> ioc_ready_wait transition.
1446 */
1447 }
1448
1449 static void
bna_ioceth_sm_ioc_ready_wait(struct bna_ioceth * ioceth,enum bna_ioceth_event event)1450 bna_ioceth_sm_ioc_ready_wait(struct bna_ioceth *ioceth,
1451 enum bna_ioceth_event event)
1452 {
1453 switch (event) {
1454 case IOCETH_E_DISABLE:
1455 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1456 bfa_nw_ioc_disable(&ioceth->ioc);
1457 break;
1458
1459 case IOCETH_E_IOC_RESET:
1460 enable_mbox_intr(ioceth);
1461 break;
1462
1463 case IOCETH_E_IOC_FAILED:
1464 disable_mbox_intr(ioceth);
1465 bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1466 break;
1467
1468 case IOCETH_E_IOC_READY:
1469 bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_attr_wait);
1470 break;
1471
1472 default:
1473 bfa_sm_fault(event);
1474 }
1475 }
1476
1477 static void
bna_ioceth_sm_enet_attr_wait_entry(struct bna_ioceth * ioceth)1478 bna_ioceth_sm_enet_attr_wait_entry(struct bna_ioceth *ioceth)
1479 {
1480 bna_bfi_attr_get(ioceth);
1481 }
1482
1483 static void
bna_ioceth_sm_enet_attr_wait(struct bna_ioceth * ioceth,enum bna_ioceth_event event)1484 bna_ioceth_sm_enet_attr_wait(struct bna_ioceth *ioceth,
1485 enum bna_ioceth_event event)
1486 {
1487 switch (event) {
1488 case IOCETH_E_DISABLE:
1489 bfa_fsm_set_state(ioceth, bna_ioceth_sm_last_resp_wait);
1490 break;
1491
1492 case IOCETH_E_IOC_FAILED:
1493 disable_mbox_intr(ioceth);
1494 bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1495 break;
1496
1497 case IOCETH_E_ENET_ATTR_RESP:
1498 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ready);
1499 break;
1500
1501 default:
1502 bfa_sm_fault(event);
1503 }
1504 }
1505
1506 static void
bna_ioceth_sm_ready_entry(struct bna_ioceth * ioceth)1507 bna_ioceth_sm_ready_entry(struct bna_ioceth *ioceth)
1508 {
1509 bna_enet_start(&ioceth->bna->enet);
1510 bna_stats_mod_start(&ioceth->bna->stats_mod);
1511 bnad_cb_ioceth_ready(ioceth->bna->bnad);
1512 }
1513
1514 static void
bna_ioceth_sm_ready(struct bna_ioceth * ioceth,enum bna_ioceth_event event)1515 bna_ioceth_sm_ready(struct bna_ioceth *ioceth, enum bna_ioceth_event event)
1516 {
1517 switch (event) {
1518 case IOCETH_E_DISABLE:
1519 bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_stop_wait);
1520 break;
1521
1522 case IOCETH_E_IOC_FAILED:
1523 disable_mbox_intr(ioceth);
1524 bna_enet_fail(&ioceth->bna->enet);
1525 bna_stats_mod_fail(&ioceth->bna->stats_mod);
1526 bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1527 break;
1528
1529 default:
1530 bfa_sm_fault(event);
1531 }
1532 }
1533
1534 static void
bna_ioceth_sm_last_resp_wait_entry(struct bna_ioceth * ioceth)1535 bna_ioceth_sm_last_resp_wait_entry(struct bna_ioceth *ioceth)
1536 {
1537 }
1538
1539 static void
bna_ioceth_sm_last_resp_wait(struct bna_ioceth * ioceth,enum bna_ioceth_event event)1540 bna_ioceth_sm_last_resp_wait(struct bna_ioceth *ioceth,
1541 enum bna_ioceth_event event)
1542 {
1543 switch (event) {
1544 case IOCETH_E_IOC_FAILED:
1545 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1546 disable_mbox_intr(ioceth);
1547 bfa_nw_ioc_disable(&ioceth->ioc);
1548 break;
1549
1550 case IOCETH_E_ENET_ATTR_RESP:
1551 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1552 bfa_nw_ioc_disable(&ioceth->ioc);
1553 break;
1554
1555 default:
1556 bfa_sm_fault(event);
1557 }
1558 }
1559
1560 static void
bna_ioceth_sm_enet_stop_wait_entry(struct bna_ioceth * ioceth)1561 bna_ioceth_sm_enet_stop_wait_entry(struct bna_ioceth *ioceth)
1562 {
1563 bna_stats_mod_stop(&ioceth->bna->stats_mod);
1564 bna_enet_stop(&ioceth->bna->enet);
1565 }
1566
1567 static void
bna_ioceth_sm_enet_stop_wait(struct bna_ioceth * ioceth,enum bna_ioceth_event event)1568 bna_ioceth_sm_enet_stop_wait(struct bna_ioceth *ioceth,
1569 enum bna_ioceth_event event)
1570 {
1571 switch (event) {
1572 case IOCETH_E_IOC_FAILED:
1573 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1574 disable_mbox_intr(ioceth);
1575 bna_enet_fail(&ioceth->bna->enet);
1576 bna_stats_mod_fail(&ioceth->bna->stats_mod);
1577 bfa_nw_ioc_disable(&ioceth->ioc);
1578 break;
1579
1580 case IOCETH_E_ENET_STOPPED:
1581 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1582 bfa_nw_ioc_disable(&ioceth->ioc);
1583 break;
1584
1585 default:
1586 bfa_sm_fault(event);
1587 }
1588 }
1589
1590 static void
bna_ioceth_sm_ioc_disable_wait_entry(struct bna_ioceth * ioceth)1591 bna_ioceth_sm_ioc_disable_wait_entry(struct bna_ioceth *ioceth)
1592 {
1593 }
1594
1595 static void
bna_ioceth_sm_ioc_disable_wait(struct bna_ioceth * ioceth,enum bna_ioceth_event event)1596 bna_ioceth_sm_ioc_disable_wait(struct bna_ioceth *ioceth,
1597 enum bna_ioceth_event event)
1598 {
1599 switch (event) {
1600 case IOCETH_E_IOC_DISABLED:
1601 disable_mbox_intr(ioceth);
1602 bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
1603 break;
1604
1605 case IOCETH_E_ENET_STOPPED:
1606 /* This event is received due to enet failing */
1607 /* No-op */
1608 break;
1609
1610 default:
1611 bfa_sm_fault(event);
1612 }
1613 }
1614
1615 static void
bna_ioceth_sm_failed_entry(struct bna_ioceth * ioceth)1616 bna_ioceth_sm_failed_entry(struct bna_ioceth *ioceth)
1617 {
1618 bnad_cb_ioceth_failed(ioceth->bna->bnad);
1619 }
1620
1621 static void
bna_ioceth_sm_failed(struct bna_ioceth * ioceth,enum bna_ioceth_event event)1622 bna_ioceth_sm_failed(struct bna_ioceth *ioceth,
1623 enum bna_ioceth_event event)
1624 {
1625 switch (event) {
1626 case IOCETH_E_DISABLE:
1627 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1628 bfa_nw_ioc_disable(&ioceth->ioc);
1629 break;
1630
1631 case IOCETH_E_IOC_RESET:
1632 enable_mbox_intr(ioceth);
1633 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait);
1634 break;
1635
1636 case IOCETH_E_IOC_FAILED:
1637 break;
1638
1639 default:
1640 bfa_sm_fault(event);
1641 }
1642 }
1643
1644 static void
bna_bfi_attr_get(struct bna_ioceth * ioceth)1645 bna_bfi_attr_get(struct bna_ioceth *ioceth)
1646 {
1647 struct bfi_enet_attr_req *attr_req = &ioceth->attr_req;
1648
1649 bfi_msgq_mhdr_set(attr_req->mh, BFI_MC_ENET,
1650 BFI_ENET_H2I_GET_ATTR_REQ, 0, 0);
1651 attr_req->mh.num_entries = htons(
1652 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_attr_req)));
1653 bfa_msgq_cmd_set(&ioceth->msgq_cmd, NULL, NULL,
1654 sizeof(struct bfi_enet_attr_req), &attr_req->mh);
1655 bfa_msgq_cmd_post(&ioceth->bna->msgq, &ioceth->msgq_cmd);
1656 }
1657
1658 /* IOC callback functions */
1659
1660 static void
bna_cb_ioceth_enable(void * arg,enum bfa_status error)1661 bna_cb_ioceth_enable(void *arg, enum bfa_status error)
1662 {
1663 struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1664
1665 if (error)
1666 bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED);
1667 else
1668 bfa_fsm_send_event(ioceth, IOCETH_E_IOC_READY);
1669 }
1670
1671 static void
bna_cb_ioceth_disable(void * arg)1672 bna_cb_ioceth_disable(void *arg)
1673 {
1674 struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1675
1676 bfa_fsm_send_event(ioceth, IOCETH_E_IOC_DISABLED);
1677 }
1678
1679 static void
bna_cb_ioceth_hbfail(void * arg)1680 bna_cb_ioceth_hbfail(void *arg)
1681 {
1682 struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1683
1684 bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED);
1685 }
1686
1687 static void
bna_cb_ioceth_reset(void * arg)1688 bna_cb_ioceth_reset(void *arg)
1689 {
1690 struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1691
1692 bfa_fsm_send_event(ioceth, IOCETH_E_IOC_RESET);
1693 }
1694
1695 static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
1696 bna_cb_ioceth_enable,
1697 bna_cb_ioceth_disable,
1698 bna_cb_ioceth_hbfail,
1699 bna_cb_ioceth_reset
1700 };
1701
bna_attr_init(struct bna_ioceth * ioceth)1702 static void bna_attr_init(struct bna_ioceth *ioceth)
1703 {
1704 ioceth->attr.num_txq = BFI_ENET_DEF_TXQ;
1705 ioceth->attr.num_rxp = BFI_ENET_DEF_RXP;
1706 ioceth->attr.num_ucmac = BFI_ENET_DEF_UCAM;
1707 ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM;
1708 ioceth->attr.max_rit_size = BFI_ENET_DEF_RITSZ;
1709 ioceth->attr.fw_query_complete = false;
1710 }
1711
1712 static void
bna_ioceth_init(struct bna_ioceth * ioceth,struct bna * bna,struct bna_res_info * res_info)1713 bna_ioceth_init(struct bna_ioceth *ioceth, struct bna *bna,
1714 struct bna_res_info *res_info)
1715 {
1716 u64 dma;
1717 u8 *kva;
1718
1719 ioceth->bna = bna;
1720
1721 /**
1722 * Attach IOC and claim:
1723 * 1. DMA memory for IOC attributes
1724 * 2. Kernel memory for FW trace
1725 */
1726 bfa_nw_ioc_attach(&ioceth->ioc, ioceth, &bna_ioceth_cbfn);
1727 bfa_nw_ioc_pci_init(&ioceth->ioc, &bna->pcidev, BFI_PCIFN_CLASS_ETH);
1728
1729 BNA_GET_DMA_ADDR(
1730 &res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].dma, dma);
1731 kva = res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].kva;
1732 bfa_nw_ioc_mem_claim(&ioceth->ioc, kva, dma);
1733
1734 kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva;
1735 bfa_nw_ioc_debug_memclaim(&ioceth->ioc, kva);
1736
1737 /**
1738 * Attach common modules (Diag, SFP, CEE, Port) and claim respective
1739 * DMA memory.
1740 */
1741 BNA_GET_DMA_ADDR(
1742 &res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma);
1743 kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva;
1744 bfa_nw_cee_attach(&bna->cee, &ioceth->ioc, bna);
1745 bfa_nw_cee_mem_claim(&bna->cee, kva, dma);
1746 kva += bfa_nw_cee_meminfo();
1747 dma += bfa_nw_cee_meminfo();
1748
1749 bfa_nw_flash_attach(&bna->flash, &ioceth->ioc, bna);
1750 bfa_nw_flash_memclaim(&bna->flash, kva, dma);
1751 kva += bfa_nw_flash_meminfo();
1752 dma += bfa_nw_flash_meminfo();
1753
1754 bfa_msgq_attach(&bna->msgq, &ioceth->ioc);
1755 bfa_msgq_memclaim(&bna->msgq, kva, dma);
1756 bfa_msgq_regisr(&bna->msgq, BFI_MC_ENET, bna_msgq_rsp_handler, bna);
1757 kva += bfa_msgq_meminfo();
1758 dma += bfa_msgq_meminfo();
1759
1760 ioceth->stop_cbfn = NULL;
1761 ioceth->stop_cbarg = NULL;
1762
1763 bna_attr_init(ioceth);
1764
1765 bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
1766 }
1767
1768 static void
bna_ioceth_uninit(struct bna_ioceth * ioceth)1769 bna_ioceth_uninit(struct bna_ioceth *ioceth)
1770 {
1771 bfa_nw_ioc_detach(&ioceth->ioc);
1772
1773 ioceth->bna = NULL;
1774 }
1775
1776 void
bna_ioceth_enable(struct bna_ioceth * ioceth)1777 bna_ioceth_enable(struct bna_ioceth *ioceth)
1778 {
1779 if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_ready) {
1780 bnad_cb_ioceth_ready(ioceth->bna->bnad);
1781 return;
1782 }
1783
1784 if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_stopped)
1785 bfa_fsm_send_event(ioceth, IOCETH_E_ENABLE);
1786 }
1787
1788 void
bna_ioceth_disable(struct bna_ioceth * ioceth,enum bna_cleanup_type type)1789 bna_ioceth_disable(struct bna_ioceth *ioceth, enum bna_cleanup_type type)
1790 {
1791 if (type == BNA_SOFT_CLEANUP) {
1792 bnad_cb_ioceth_disabled(ioceth->bna->bnad);
1793 return;
1794 }
1795
1796 ioceth->stop_cbfn = bnad_cb_ioceth_disabled;
1797 ioceth->stop_cbarg = ioceth->bna->bnad;
1798
1799 bfa_fsm_send_event(ioceth, IOCETH_E_DISABLE);
1800 }
1801
1802 static void
bna_ucam_mod_init(struct bna_ucam_mod * ucam_mod,struct bna * bna,struct bna_res_info * res_info)1803 bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna,
1804 struct bna_res_info *res_info)
1805 {
1806 int i;
1807
1808 ucam_mod->ucmac = (struct bna_mac *)
1809 res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
1810
1811 INIT_LIST_HEAD(&ucam_mod->free_q);
1812 for (i = 0; i < bna->ioceth.attr.num_ucmac; i++) {
1813 bfa_q_qe_init(&ucam_mod->ucmac[i].qe);
1814 list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q);
1815 }
1816
1817 /* A separate queue to allow synchronous setting of a list of MACs */
1818 INIT_LIST_HEAD(&ucam_mod->del_q);
1819 for (i = i; i < (bna->ioceth.attr.num_ucmac * 2); i++) {
1820 bfa_q_qe_init(&ucam_mod->ucmac[i].qe);
1821 list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->del_q);
1822 }
1823
1824 ucam_mod->bna = bna;
1825 }
1826
1827 static void
bna_ucam_mod_uninit(struct bna_ucam_mod * ucam_mod)1828 bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod)
1829 {
1830 struct list_head *qe;
1831 int i;
1832
1833 i = 0;
1834 list_for_each(qe, &ucam_mod->free_q)
1835 i++;
1836
1837 i = 0;
1838 list_for_each(qe, &ucam_mod->del_q)
1839 i++;
1840
1841 ucam_mod->bna = NULL;
1842 }
1843
1844 static void
bna_mcam_mod_init(struct bna_mcam_mod * mcam_mod,struct bna * bna,struct bna_res_info * res_info)1845 bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna,
1846 struct bna_res_info *res_info)
1847 {
1848 int i;
1849
1850 mcam_mod->mcmac = (struct bna_mac *)
1851 res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
1852
1853 INIT_LIST_HEAD(&mcam_mod->free_q);
1854 for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) {
1855 bfa_q_qe_init(&mcam_mod->mcmac[i].qe);
1856 list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->free_q);
1857 }
1858
1859 mcam_mod->mchandle = (struct bna_mcam_handle *)
1860 res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mdl[0].kva;
1861
1862 INIT_LIST_HEAD(&mcam_mod->free_handle_q);
1863 for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) {
1864 bfa_q_qe_init(&mcam_mod->mchandle[i].qe);
1865 list_add_tail(&mcam_mod->mchandle[i].qe,
1866 &mcam_mod->free_handle_q);
1867 }
1868
1869 /* A separate queue to allow synchronous setting of a list of MACs */
1870 INIT_LIST_HEAD(&mcam_mod->del_q);
1871 for (i = i; i < (bna->ioceth.attr.num_mcmac * 2); i++) {
1872 bfa_q_qe_init(&mcam_mod->mcmac[i].qe);
1873 list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->del_q);
1874 }
1875
1876 mcam_mod->bna = bna;
1877 }
1878
1879 static void
bna_mcam_mod_uninit(struct bna_mcam_mod * mcam_mod)1880 bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod)
1881 {
1882 struct list_head *qe;
1883 int i;
1884
1885 i = 0;
1886 list_for_each(qe, &mcam_mod->free_q) i++;
1887
1888 i = 0;
1889 list_for_each(qe, &mcam_mod->del_q) i++;
1890
1891 i = 0;
1892 list_for_each(qe, &mcam_mod->free_handle_q) i++;
1893
1894 mcam_mod->bna = NULL;
1895 }
1896
1897 static void
bna_bfi_stats_get(struct bna * bna)1898 bna_bfi_stats_get(struct bna *bna)
1899 {
1900 struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get;
1901
1902 bna->stats_mod.stats_get_busy = true;
1903
1904 bfi_msgq_mhdr_set(stats_req->mh, BFI_MC_ENET,
1905 BFI_ENET_H2I_STATS_GET_REQ, 0, 0);
1906 stats_req->mh.num_entries = htons(
1907 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_stats_req)));
1908 stats_req->stats_mask = htons(BFI_ENET_STATS_ALL);
1909 stats_req->tx_enet_mask = htonl(bna->tx_mod.rid_mask);
1910 stats_req->rx_enet_mask = htonl(bna->rx_mod.rid_mask);
1911 stats_req->host_buffer.a32.addr_hi = bna->stats.hw_stats_dma.msb;
1912 stats_req->host_buffer.a32.addr_lo = bna->stats.hw_stats_dma.lsb;
1913
1914 bfa_msgq_cmd_set(&bna->stats_mod.stats_get_cmd, NULL, NULL,
1915 sizeof(struct bfi_enet_stats_req), &stats_req->mh);
1916 bfa_msgq_cmd_post(&bna->msgq, &bna->stats_mod.stats_get_cmd);
1917 }
1918
1919 void
bna_res_req(struct bna_res_info * res_info)1920 bna_res_req(struct bna_res_info *res_info)
1921 {
1922 /* DMA memory for COMMON_MODULE */
1923 res_info[BNA_RES_MEM_T_COM].res_type = BNA_RES_T_MEM;
1924 res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1925 res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1;
1926 res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN(
1927 (bfa_nw_cee_meminfo() +
1928 bfa_nw_flash_meminfo() +
1929 bfa_msgq_meminfo()), PAGE_SIZE);
1930
1931 /* DMA memory for retrieving IOC attributes */
1932 res_info[BNA_RES_MEM_T_ATTR].res_type = BNA_RES_T_MEM;
1933 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1934 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.num = 1;
1935 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.len =
1936 ALIGN(bfa_nw_ioc_meminfo(), PAGE_SIZE);
1937
1938 /* Virtual memory for retreiving fw_trc */
1939 res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM;
1940 res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mem_type = BNA_MEM_T_KVA;
1941 res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 1;
1942 res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = BNA_DBG_FWTRC_LEN;
1943
1944 /* DMA memory for retreiving stats */
1945 res_info[BNA_RES_MEM_T_STATS].res_type = BNA_RES_T_MEM;
1946 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1947 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.num = 1;
1948 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.len =
1949 ALIGN(sizeof(struct bfi_enet_stats),
1950 PAGE_SIZE);
1951 }
1952
1953 void
bna_mod_res_req(struct bna * bna,struct bna_res_info * res_info)1954 bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info)
1955 {
1956 struct bna_attr *attr = &bna->ioceth.attr;
1957
1958 /* Virtual memory for Tx objects - stored by Tx module */
1959 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_type = BNA_RES_T_MEM;
1960 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mem_type =
1961 BNA_MEM_T_KVA;
1962 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.num = 1;
1963 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.len =
1964 attr->num_txq * sizeof(struct bna_tx);
1965
1966 /* Virtual memory for TxQ - stored by Tx module */
1967 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_type = BNA_RES_T_MEM;
1968 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mem_type =
1969 BNA_MEM_T_KVA;
1970 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.num = 1;
1971 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.len =
1972 attr->num_txq * sizeof(struct bna_txq);
1973
1974 /* Virtual memory for Rx objects - stored by Rx module */
1975 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_type = BNA_RES_T_MEM;
1976 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mem_type =
1977 BNA_MEM_T_KVA;
1978 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.num = 1;
1979 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.len =
1980 attr->num_rxp * sizeof(struct bna_rx);
1981
1982 /* Virtual memory for RxPath - stored by Rx module */
1983 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_type = BNA_RES_T_MEM;
1984 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mem_type =
1985 BNA_MEM_T_KVA;
1986 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.num = 1;
1987 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.len =
1988 attr->num_rxp * sizeof(struct bna_rxp);
1989
1990 /* Virtual memory for RxQ - stored by Rx module */
1991 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_type = BNA_RES_T_MEM;
1992 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mem_type =
1993 BNA_MEM_T_KVA;
1994 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.num = 1;
1995 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.len =
1996 (attr->num_rxp * 2) * sizeof(struct bna_rxq);
1997
1998 /* Virtual memory for Unicast MAC address - stored by ucam module */
1999 res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_type = BNA_RES_T_MEM;
2000 res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mem_type =
2001 BNA_MEM_T_KVA;
2002 res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.num = 1;
2003 res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.len =
2004 (attr->num_ucmac * 2) * sizeof(struct bna_mac);
2005
2006 /* Virtual memory for Multicast MAC address - stored by mcam module */
2007 res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_type = BNA_RES_T_MEM;
2008 res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mem_type =
2009 BNA_MEM_T_KVA;
2010 res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.num = 1;
2011 res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.len =
2012 (attr->num_mcmac * 2) * sizeof(struct bna_mac);
2013
2014 /* Virtual memory for Multicast handle - stored by mcam module */
2015 res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_type = BNA_RES_T_MEM;
2016 res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mem_type =
2017 BNA_MEM_T_KVA;
2018 res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.num = 1;
2019 res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.len =
2020 attr->num_mcmac * sizeof(struct bna_mcam_handle);
2021 }
2022
2023 void
bna_init(struct bna * bna,struct bnad * bnad,struct bfa_pcidev * pcidev,struct bna_res_info * res_info)2024 bna_init(struct bna *bna, struct bnad *bnad,
2025 struct bfa_pcidev *pcidev, struct bna_res_info *res_info)
2026 {
2027 bna->bnad = bnad;
2028 bna->pcidev = *pcidev;
2029
2030 bna->stats.hw_stats_kva = (struct bfi_enet_stats *)
2031 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].kva;
2032 bna->stats.hw_stats_dma.msb =
2033 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.msb;
2034 bna->stats.hw_stats_dma.lsb =
2035 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.lsb;
2036
2037 bna_reg_addr_init(bna, &bna->pcidev);
2038
2039 /* Also initializes diag, cee, sfp, phy_port, msgq */
2040 bna_ioceth_init(&bna->ioceth, bna, res_info);
2041
2042 bna_enet_init(&bna->enet, bna);
2043 bna_ethport_init(&bna->ethport, bna);
2044 }
2045
2046 void
bna_mod_init(struct bna * bna,struct bna_res_info * res_info)2047 bna_mod_init(struct bna *bna, struct bna_res_info *res_info)
2048 {
2049 bna_tx_mod_init(&bna->tx_mod, bna, res_info);
2050
2051 bna_rx_mod_init(&bna->rx_mod, bna, res_info);
2052
2053 bna_ucam_mod_init(&bna->ucam_mod, bna, res_info);
2054
2055 bna_mcam_mod_init(&bna->mcam_mod, bna, res_info);
2056
2057 bna->default_mode_rid = BFI_INVALID_RID;
2058 bna->promisc_rid = BFI_INVALID_RID;
2059
2060 bna->mod_flags |= BNA_MOD_F_INIT_DONE;
2061 }
2062
2063 void
bna_uninit(struct bna * bna)2064 bna_uninit(struct bna *bna)
2065 {
2066 if (bna->mod_flags & BNA_MOD_F_INIT_DONE) {
2067 bna_mcam_mod_uninit(&bna->mcam_mod);
2068 bna_ucam_mod_uninit(&bna->ucam_mod);
2069 bna_rx_mod_uninit(&bna->rx_mod);
2070 bna_tx_mod_uninit(&bna->tx_mod);
2071 bna->mod_flags &= ~BNA_MOD_F_INIT_DONE;
2072 }
2073
2074 bna_stats_mod_uninit(&bna->stats_mod);
2075 bna_ethport_uninit(&bna->ethport);
2076 bna_enet_uninit(&bna->enet);
2077
2078 bna_ioceth_uninit(&bna->ioceth);
2079
2080 bna->bnad = NULL;
2081 }
2082
2083 int
bna_num_txq_set(struct bna * bna,int num_txq)2084 bna_num_txq_set(struct bna *bna, int num_txq)
2085 {
2086 if (bna->ioceth.attr.fw_query_complete &&
2087 (num_txq <= bna->ioceth.attr.num_txq)) {
2088 bna->ioceth.attr.num_txq = num_txq;
2089 return BNA_CB_SUCCESS;
2090 }
2091
2092 return BNA_CB_FAIL;
2093 }
2094
2095 int
bna_num_rxp_set(struct bna * bna,int num_rxp)2096 bna_num_rxp_set(struct bna *bna, int num_rxp)
2097 {
2098 if (bna->ioceth.attr.fw_query_complete &&
2099 (num_rxp <= bna->ioceth.attr.num_rxp)) {
2100 bna->ioceth.attr.num_rxp = num_rxp;
2101 return BNA_CB_SUCCESS;
2102 }
2103
2104 return BNA_CB_FAIL;
2105 }
2106
2107 struct bna_mac *
bna_cam_mod_mac_get(struct list_head * head)2108 bna_cam_mod_mac_get(struct list_head *head)
2109 {
2110 struct list_head *qe;
2111
2112 if (list_empty(head))
2113 return NULL;
2114
2115 bfa_q_deq(head, &qe);
2116 return (struct bna_mac *)qe;
2117 }
2118
2119 void
bna_cam_mod_mac_put(struct list_head * tail,struct bna_mac * mac)2120 bna_cam_mod_mac_put(struct list_head *tail, struct bna_mac *mac)
2121 {
2122 list_add_tail(&mac->qe, tail);
2123 }
2124
2125 struct bna_mcam_handle *
bna_mcam_mod_handle_get(struct bna_mcam_mod * mcam_mod)2126 bna_mcam_mod_handle_get(struct bna_mcam_mod *mcam_mod)
2127 {
2128 struct list_head *qe;
2129
2130 if (list_empty(&mcam_mod->free_handle_q))
2131 return NULL;
2132
2133 bfa_q_deq(&mcam_mod->free_handle_q, &qe);
2134
2135 return (struct bna_mcam_handle *)qe;
2136 }
2137
2138 void
bna_mcam_mod_handle_put(struct bna_mcam_mod * mcam_mod,struct bna_mcam_handle * handle)2139 bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod,
2140 struct bna_mcam_handle *handle)
2141 {
2142 list_add_tail(&handle->qe, &mcam_mod->free_handle_q);
2143 }
2144
2145 void
bna_hw_stats_get(struct bna * bna)2146 bna_hw_stats_get(struct bna *bna)
2147 {
2148 if (!bna->stats_mod.ioc_ready) {
2149 bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats);
2150 return;
2151 }
2152 if (bna->stats_mod.stats_get_busy) {
2153 bnad_cb_stats_get(bna->bnad, BNA_CB_BUSY, &bna->stats);
2154 return;
2155 }
2156
2157 bna_bfi_stats_get(bna);
2158 }
2159