1 /*
2 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
3 * Copyright (c) 2014- QLogic Corporation.
4 * All rights reserved
5 * www.qlogic.com
6 *
7 * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License (GPL) Version 2 as
11 * published by the Free Software Foundation
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 */
18
19 #include "bfad_drv.h"
20 #include "bfad_im.h"
21 #include "bfa_ioc.h"
22 #include "bfi_reg.h"
23 #include "bfa_defs.h"
24 #include "bfa_defs_svc.h"
25 #include "bfi.h"
26
27 BFA_TRC_FILE(CNA, IOC);
28
29 /*
30 * IOC local definitions
31 */
32 #define BFA_IOC_TOV 3000 /* msecs */
33 #define BFA_IOC_HWSEM_TOV 500 /* msecs */
34 #define BFA_IOC_HB_TOV 500 /* msecs */
35 #define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
36 #define BFA_IOC_POLL_TOV BFA_TIMER_FREQ
37
38 #define bfa_ioc_timer_start(__ioc) \
39 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
40 bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
41 #define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
42
43 #define bfa_hb_timer_start(__ioc) \
44 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer, \
45 bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
46 #define bfa_hb_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->hb_timer)
47
48 #define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
49
50 #define bfa_ioc_state_disabled(__sm) \
51 (((__sm) == BFI_IOC_UNINIT) || \
52 ((__sm) == BFI_IOC_INITING) || \
53 ((__sm) == BFI_IOC_HWINIT) || \
54 ((__sm) == BFI_IOC_DISABLED) || \
55 ((__sm) == BFI_IOC_FAIL) || \
56 ((__sm) == BFI_IOC_CFG_DISABLED))
57
58 /*
59 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
60 */
61
62 #define bfa_ioc_firmware_lock(__ioc) \
63 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
64 #define bfa_ioc_firmware_unlock(__ioc) \
65 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
66 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
67 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
68 #define bfa_ioc_notify_fail(__ioc) \
69 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
70 #define bfa_ioc_sync_start(__ioc) \
71 ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
72 #define bfa_ioc_sync_join(__ioc) \
73 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
74 #define bfa_ioc_sync_leave(__ioc) \
75 ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
76 #define bfa_ioc_sync_ack(__ioc) \
77 ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
78 #define bfa_ioc_sync_complete(__ioc) \
79 ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
80 #define bfa_ioc_set_cur_ioc_fwstate(__ioc, __fwstate) \
81 ((__ioc)->ioc_hwif->ioc_set_fwstate(__ioc, __fwstate))
82 #define bfa_ioc_get_cur_ioc_fwstate(__ioc) \
83 ((__ioc)->ioc_hwif->ioc_get_fwstate(__ioc))
84 #define bfa_ioc_set_alt_ioc_fwstate(__ioc, __fwstate) \
85 ((__ioc)->ioc_hwif->ioc_set_alt_fwstate(__ioc, __fwstate))
86 #define bfa_ioc_get_alt_ioc_fwstate(__ioc) \
87 ((__ioc)->ioc_hwif->ioc_get_alt_fwstate(__ioc))
88
89 #define bfa_ioc_mbox_cmd_pending(__ioc) \
90 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
91 readl((__ioc)->ioc_regs.hfn_mbox_cmd))
92
93 bfa_boolean_t bfa_auto_recover = BFA_TRUE;
94
95 /*
96 * forward declarations
97 */
98 static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
99 static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
100 static void bfa_ioc_timeout(void *ioc);
101 static void bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc);
102 static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
103 static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
104 static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
105 static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
106 static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
107 static void bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc);
108 static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
109 static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc ,
110 enum bfa_ioc_event_e event);
111 static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
112 static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
113 static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);
114 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
115 static enum bfi_ioc_img_ver_cmp_e bfa_ioc_fw_ver_patch_cmp(
116 struct bfi_ioc_image_hdr_s *base_fwhdr,
117 struct bfi_ioc_image_hdr_s *fwhdr_to_cmp);
118 static enum bfi_ioc_img_ver_cmp_e bfa_ioc_flash_fwver_cmp(
119 struct bfa_ioc_s *ioc,
120 struct bfi_ioc_image_hdr_s *base_fwhdr);
121
122 /*
123 * IOC state machine definitions/declarations
124 */
125 enum ioc_event {
126 IOC_E_RESET = 1, /* IOC reset request */
127 IOC_E_ENABLE = 2, /* IOC enable request */
128 IOC_E_DISABLE = 3, /* IOC disable request */
129 IOC_E_DETACH = 4, /* driver detach cleanup */
130 IOC_E_ENABLED = 5, /* f/w enabled */
131 IOC_E_FWRSP_GETATTR = 6, /* IOC get attribute response */
132 IOC_E_DISABLED = 7, /* f/w disabled */
133 IOC_E_PFFAILED = 8, /* failure notice by iocpf sm */
134 IOC_E_HBFAIL = 9, /* heartbeat failure */
135 IOC_E_HWERROR = 10, /* hardware error interrupt */
136 IOC_E_TIMEOUT = 11, /* timeout */
137 IOC_E_HWFAILED = 12, /* PCI mapping failure notice */
138 };
139
140 bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
141 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
142 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
143 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
144 bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
145 bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc_s, enum ioc_event);
146 bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
147 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
148 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
149 bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc_s, enum ioc_event);
150
151 static struct bfa_sm_table_s ioc_sm_table[] = {
152 {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
153 {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
154 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
155 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
156 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
157 {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
158 {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
159 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
160 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
161 {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
162 };
163
164 /*
165 * IOCPF state machine definitions/declarations
166 */
167
168 #define bfa_iocpf_timer_start(__ioc) \
169 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
170 bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
171 #define bfa_iocpf_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
172
173 #define bfa_iocpf_poll_timer_start(__ioc) \
174 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
175 bfa_iocpf_poll_timeout, (__ioc), BFA_IOC_POLL_TOV)
176
177 #define bfa_sem_timer_start(__ioc) \
178 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer, \
179 bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
180 #define bfa_sem_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->sem_timer)
181
182 /*
183 * Forward declareations for iocpf state machine
184 */
185 static void bfa_iocpf_timeout(void *ioc_arg);
186 static void bfa_iocpf_sem_timeout(void *ioc_arg);
187 static void bfa_iocpf_poll_timeout(void *ioc_arg);
188
189 /*
190 * IOCPF state machine events
191 */
192 enum iocpf_event {
193 IOCPF_E_ENABLE = 1, /* IOCPF enable request */
194 IOCPF_E_DISABLE = 2, /* IOCPF disable request */
195 IOCPF_E_STOP = 3, /* stop on driver detach */
196 IOCPF_E_FWREADY = 4, /* f/w initialization done */
197 IOCPF_E_FWRSP_ENABLE = 5, /* enable f/w response */
198 IOCPF_E_FWRSP_DISABLE = 6, /* disable f/w response */
199 IOCPF_E_FAIL = 7, /* failure notice by ioc sm */
200 IOCPF_E_INITFAIL = 8, /* init fail notice by ioc sm */
201 IOCPF_E_GETATTRFAIL = 9, /* init fail notice by ioc sm */
202 IOCPF_E_SEMLOCKED = 10, /* h/w semaphore is locked */
203 IOCPF_E_TIMEOUT = 11, /* f/w response timeout */
204 IOCPF_E_SEM_ERROR = 12, /* h/w sem mapping error */
205 };
206
207 /*
208 * IOCPF states
209 */
210 enum bfa_iocpf_state {
211 BFA_IOCPF_RESET = 1, /* IOC is in reset state */
212 BFA_IOCPF_SEMWAIT = 2, /* Waiting for IOC h/w semaphore */
213 BFA_IOCPF_HWINIT = 3, /* IOC h/w is being initialized */
214 BFA_IOCPF_READY = 4, /* IOCPF is initialized */
215 BFA_IOCPF_INITFAIL = 5, /* IOCPF failed */
216 BFA_IOCPF_FAIL = 6, /* IOCPF failed */
217 BFA_IOCPF_DISABLING = 7, /* IOCPF is being disabled */
218 BFA_IOCPF_DISABLED = 8, /* IOCPF is disabled */
219 BFA_IOCPF_FWMISMATCH = 9, /* IOC f/w different from drivers */
220 };
221
222 bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event);
223 bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event);
224 bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event);
225 bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event);
226 bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
227 bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
228 bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event);
229 bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf_s,
230 enum iocpf_event);
231 bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
232 bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf_s, enum iocpf_event);
233 bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event);
234 bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
235 bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf_s,
236 enum iocpf_event);
237 bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
238
239 static struct bfa_sm_table_s iocpf_sm_table[] = {
240 {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
241 {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
242 {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
243 {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
244 {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
245 {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
246 {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
247 {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
248 {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
249 {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
250 {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
251 {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
252 {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
253 {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
254 };
255
256 /*
257 * IOC State Machine
258 */
259
260 /*
261 * Beginning state. IOC uninit state.
262 */
263
264 static void
bfa_ioc_sm_uninit_entry(struct bfa_ioc_s * ioc)265 bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
266 {
267 }
268
269 /*
270 * IOC is in uninit state.
271 */
272 static void
bfa_ioc_sm_uninit(struct bfa_ioc_s * ioc,enum ioc_event event)273 bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
274 {
275 bfa_trc(ioc, event);
276
277 switch (event) {
278 case IOC_E_RESET:
279 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
280 break;
281
282 default:
283 bfa_sm_fault(ioc, event);
284 }
285 }
286 /*
287 * Reset entry actions -- initialize state machine
288 */
289 static void
bfa_ioc_sm_reset_entry(struct bfa_ioc_s * ioc)290 bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
291 {
292 bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
293 }
294
295 /*
296 * IOC is in reset state.
297 */
298 static void
bfa_ioc_sm_reset(struct bfa_ioc_s * ioc,enum ioc_event event)299 bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
300 {
301 bfa_trc(ioc, event);
302
303 switch (event) {
304 case IOC_E_ENABLE:
305 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
306 break;
307
308 case IOC_E_DISABLE:
309 bfa_ioc_disable_comp(ioc);
310 break;
311
312 case IOC_E_DETACH:
313 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
314 break;
315
316 default:
317 bfa_sm_fault(ioc, event);
318 }
319 }
320
321
322 static void
bfa_ioc_sm_enabling_entry(struct bfa_ioc_s * ioc)323 bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
324 {
325 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
326 }
327
328 /*
329 * Host IOC function is being enabled, awaiting response from firmware.
330 * Semaphore is acquired.
331 */
332 static void
bfa_ioc_sm_enabling(struct bfa_ioc_s * ioc,enum ioc_event event)333 bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
334 {
335 bfa_trc(ioc, event);
336
337 switch (event) {
338 case IOC_E_ENABLED:
339 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
340 break;
341
342 case IOC_E_PFFAILED:
343 /* !!! fall through !!! */
344 case IOC_E_HWERROR:
345 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
346 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
347 if (event != IOC_E_PFFAILED)
348 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
349 break;
350
351 case IOC_E_HWFAILED:
352 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
353 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
354 break;
355
356 case IOC_E_DISABLE:
357 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
358 break;
359
360 case IOC_E_DETACH:
361 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
362 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
363 break;
364
365 case IOC_E_ENABLE:
366 break;
367
368 default:
369 bfa_sm_fault(ioc, event);
370 }
371 }
372
373
374 static void
bfa_ioc_sm_getattr_entry(struct bfa_ioc_s * ioc)375 bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
376 {
377 bfa_ioc_timer_start(ioc);
378 bfa_ioc_send_getattr(ioc);
379 }
380
381 /*
382 * IOC configuration in progress. Timer is active.
383 */
384 static void
bfa_ioc_sm_getattr(struct bfa_ioc_s * ioc,enum ioc_event event)385 bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
386 {
387 bfa_trc(ioc, event);
388
389 switch (event) {
390 case IOC_E_FWRSP_GETATTR:
391 bfa_ioc_timer_stop(ioc);
392 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
393 break;
394
395 case IOC_E_PFFAILED:
396 case IOC_E_HWERROR:
397 bfa_ioc_timer_stop(ioc);
398 /* !!! fall through !!! */
399 case IOC_E_TIMEOUT:
400 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
401 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
402 if (event != IOC_E_PFFAILED)
403 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
404 break;
405
406 case IOC_E_DISABLE:
407 bfa_ioc_timer_stop(ioc);
408 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
409 break;
410
411 case IOC_E_ENABLE:
412 break;
413
414 default:
415 bfa_sm_fault(ioc, event);
416 }
417 }
418
419 static void
bfa_ioc_sm_op_entry(struct bfa_ioc_s * ioc)420 bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
421 {
422 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
423
424 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
425 bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
426 bfa_ioc_hb_monitor(ioc);
427 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
428 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
429 }
430
431 static void
bfa_ioc_sm_op(struct bfa_ioc_s * ioc,enum ioc_event event)432 bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
433 {
434 bfa_trc(ioc, event);
435
436 switch (event) {
437 case IOC_E_ENABLE:
438 break;
439
440 case IOC_E_DISABLE:
441 bfa_hb_timer_stop(ioc);
442 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
443 break;
444
445 case IOC_E_PFFAILED:
446 case IOC_E_HWERROR:
447 bfa_hb_timer_stop(ioc);
448 /* !!! fall through !!! */
449 case IOC_E_HBFAIL:
450 if (ioc->iocpf.auto_recover)
451 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
452 else
453 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
454
455 bfa_ioc_fail_notify(ioc);
456
457 if (event != IOC_E_PFFAILED)
458 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
459 break;
460
461 default:
462 bfa_sm_fault(ioc, event);
463 }
464 }
465
466
467 static void
bfa_ioc_sm_disabling_entry(struct bfa_ioc_s * ioc)468 bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
469 {
470 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
471 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
472 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
473 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE);
474 }
475
476 /*
477 * IOC is being disabled
478 */
479 static void
bfa_ioc_sm_disabling(struct bfa_ioc_s * ioc,enum ioc_event event)480 bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
481 {
482 bfa_trc(ioc, event);
483
484 switch (event) {
485 case IOC_E_DISABLED:
486 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
487 break;
488
489 case IOC_E_HWERROR:
490 /*
491 * No state change. Will move to disabled state
492 * after iocpf sm completes failure processing and
493 * moves to disabled state.
494 */
495 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
496 break;
497
498 case IOC_E_HWFAILED:
499 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
500 bfa_ioc_disable_comp(ioc);
501 break;
502
503 default:
504 bfa_sm_fault(ioc, event);
505 }
506 }
507
508 /*
509 * IOC disable completion entry.
510 */
511 static void
bfa_ioc_sm_disabled_entry(struct bfa_ioc_s * ioc)512 bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
513 {
514 bfa_ioc_disable_comp(ioc);
515 }
516
517 static void
bfa_ioc_sm_disabled(struct bfa_ioc_s * ioc,enum ioc_event event)518 bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
519 {
520 bfa_trc(ioc, event);
521
522 switch (event) {
523 case IOC_E_ENABLE:
524 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
525 break;
526
527 case IOC_E_DISABLE:
528 ioc->cbfn->disable_cbfn(ioc->bfa);
529 break;
530
531 case IOC_E_DETACH:
532 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
533 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
534 break;
535
536 default:
537 bfa_sm_fault(ioc, event);
538 }
539 }
540
541
542 static void
bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s * ioc)543 bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s *ioc)
544 {
545 bfa_trc(ioc, 0);
546 }
547
548 /*
549 * Hardware initialization retry.
550 */
551 static void
bfa_ioc_sm_fail_retry(struct bfa_ioc_s * ioc,enum ioc_event event)552 bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event)
553 {
554 bfa_trc(ioc, event);
555
556 switch (event) {
557 case IOC_E_ENABLED:
558 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
559 break;
560
561 case IOC_E_PFFAILED:
562 case IOC_E_HWERROR:
563 /*
564 * Initialization retry failed.
565 */
566 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
567 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
568 if (event != IOC_E_PFFAILED)
569 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
570 break;
571
572 case IOC_E_HWFAILED:
573 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
574 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
575 break;
576
577 case IOC_E_ENABLE:
578 break;
579
580 case IOC_E_DISABLE:
581 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
582 break;
583
584 case IOC_E_DETACH:
585 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
586 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
587 break;
588
589 default:
590 bfa_sm_fault(ioc, event);
591 }
592 }
593
594
595 static void
bfa_ioc_sm_fail_entry(struct bfa_ioc_s * ioc)596 bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
597 {
598 bfa_trc(ioc, 0);
599 }
600
601 /*
602 * IOC failure.
603 */
604 static void
bfa_ioc_sm_fail(struct bfa_ioc_s * ioc,enum ioc_event event)605 bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
606 {
607 bfa_trc(ioc, event);
608
609 switch (event) {
610
611 case IOC_E_ENABLE:
612 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
613 break;
614
615 case IOC_E_DISABLE:
616 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
617 break;
618
619 case IOC_E_DETACH:
620 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
621 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
622 break;
623
624 case IOC_E_HWERROR:
625 case IOC_E_HWFAILED:
626 /*
627 * HB failure / HW error notification, ignore.
628 */
629 break;
630 default:
631 bfa_sm_fault(ioc, event);
632 }
633 }
634
635 static void
bfa_ioc_sm_hwfail_entry(struct bfa_ioc_s * ioc)636 bfa_ioc_sm_hwfail_entry(struct bfa_ioc_s *ioc)
637 {
638 bfa_trc(ioc, 0);
639 }
640
641 static void
bfa_ioc_sm_hwfail(struct bfa_ioc_s * ioc,enum ioc_event event)642 bfa_ioc_sm_hwfail(struct bfa_ioc_s *ioc, enum ioc_event event)
643 {
644 bfa_trc(ioc, event);
645
646 switch (event) {
647 case IOC_E_ENABLE:
648 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
649 break;
650
651 case IOC_E_DISABLE:
652 ioc->cbfn->disable_cbfn(ioc->bfa);
653 break;
654
655 case IOC_E_DETACH:
656 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
657 break;
658
659 case IOC_E_HWERROR:
660 /* Ignore - already in hwfail state */
661 break;
662
663 default:
664 bfa_sm_fault(ioc, event);
665 }
666 }
667
668 /*
669 * IOCPF State Machine
670 */
671
672 /*
673 * Reset entry actions -- initialize state machine
674 */
675 static void
bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s * iocpf)676 bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
677 {
678 iocpf->fw_mismatch_notified = BFA_FALSE;
679 iocpf->auto_recover = bfa_auto_recover;
680 }
681
682 /*
683 * Beginning state. IOC is in reset state.
684 */
685 static void
bfa_iocpf_sm_reset(struct bfa_iocpf_s * iocpf,enum iocpf_event event)686 bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
687 {
688 struct bfa_ioc_s *ioc = iocpf->ioc;
689
690 bfa_trc(ioc, event);
691
692 switch (event) {
693 case IOCPF_E_ENABLE:
694 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
695 break;
696
697 case IOCPF_E_STOP:
698 break;
699
700 default:
701 bfa_sm_fault(ioc, event);
702 }
703 }
704
705 /*
706 * Semaphore should be acquired for version check.
707 */
708 static void
bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s * iocpf)709 bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
710 {
711 struct bfi_ioc_image_hdr_s fwhdr;
712 u32 r32, fwstate, pgnum, pgoff, loff = 0;
713 int i;
714
715 /*
716 * Spin on init semaphore to serialize.
717 */
718 r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
719 while (r32 & 0x1) {
720 udelay(20);
721 r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
722 }
723
724 /* h/w sem init */
725 fwstate = bfa_ioc_get_cur_ioc_fwstate(iocpf->ioc);
726 if (fwstate == BFI_IOC_UNINIT) {
727 writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
728 goto sem_get;
729 }
730
731 bfa_ioc_fwver_get(iocpf->ioc, &fwhdr);
732
733 if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) {
734 writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
735 goto sem_get;
736 }
737
738 /*
739 * Clear fwver hdr
740 */
741 pgnum = PSS_SMEM_PGNUM(iocpf->ioc->ioc_regs.smem_pg0, loff);
742 pgoff = PSS_SMEM_PGOFF(loff);
743 writel(pgnum, iocpf->ioc->ioc_regs.host_page_num_fn);
744
745 for (i = 0; i < sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32); i++) {
746 bfa_mem_write(iocpf->ioc->ioc_regs.smem_page_start, loff, 0);
747 loff += sizeof(u32);
748 }
749
750 bfa_trc(iocpf->ioc, fwstate);
751 bfa_trc(iocpf->ioc, swab32(fwhdr.exec));
752 bfa_ioc_set_cur_ioc_fwstate(iocpf->ioc, BFI_IOC_UNINIT);
753 bfa_ioc_set_alt_ioc_fwstate(iocpf->ioc, BFI_IOC_UNINIT);
754
755 /*
756 * Unlock the hw semaphore. Should be here only once per boot.
757 */
758 bfa_ioc_ownership_reset(iocpf->ioc);
759
760 /*
761 * unlock init semaphore.
762 */
763 writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
764
765 sem_get:
766 bfa_ioc_hw_sem_get(iocpf->ioc);
767 }
768
769 /*
770 * Awaiting h/w semaphore to continue with version check.
771 */
772 static void
bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s * iocpf,enum iocpf_event event)773 bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
774 {
775 struct bfa_ioc_s *ioc = iocpf->ioc;
776
777 bfa_trc(ioc, event);
778
779 switch (event) {
780 case IOCPF_E_SEMLOCKED:
781 if (bfa_ioc_firmware_lock(ioc)) {
782 if (bfa_ioc_sync_start(ioc)) {
783 bfa_ioc_sync_join(ioc);
784 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
785 } else {
786 bfa_ioc_firmware_unlock(ioc);
787 writel(1, ioc->ioc_regs.ioc_sem_reg);
788 bfa_sem_timer_start(ioc);
789 }
790 } else {
791 writel(1, ioc->ioc_regs.ioc_sem_reg);
792 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
793 }
794 break;
795
796 case IOCPF_E_SEM_ERROR:
797 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
798 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
799 break;
800
801 case IOCPF_E_DISABLE:
802 bfa_sem_timer_stop(ioc);
803 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
804 bfa_fsm_send_event(ioc, IOC_E_DISABLED);
805 break;
806
807 case IOCPF_E_STOP:
808 bfa_sem_timer_stop(ioc);
809 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
810 break;
811
812 default:
813 bfa_sm_fault(ioc, event);
814 }
815 }
816
817 /*
818 * Notify enable completion callback.
819 */
820 static void
bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s * iocpf)821 bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
822 {
823 /*
824 * Call only the first time sm enters fwmismatch state.
825 */
826 if (iocpf->fw_mismatch_notified == BFA_FALSE)
827 bfa_ioc_pf_fwmismatch(iocpf->ioc);
828
829 iocpf->fw_mismatch_notified = BFA_TRUE;
830 bfa_iocpf_timer_start(iocpf->ioc);
831 }
832
833 /*
834 * Awaiting firmware version match.
835 */
836 static void
bfa_iocpf_sm_mismatch(struct bfa_iocpf_s * iocpf,enum iocpf_event event)837 bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
838 {
839 struct bfa_ioc_s *ioc = iocpf->ioc;
840
841 bfa_trc(ioc, event);
842
843 switch (event) {
844 case IOCPF_E_TIMEOUT:
845 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
846 break;
847
848 case IOCPF_E_DISABLE:
849 bfa_iocpf_timer_stop(ioc);
850 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
851 bfa_fsm_send_event(ioc, IOC_E_DISABLED);
852 break;
853
854 case IOCPF_E_STOP:
855 bfa_iocpf_timer_stop(ioc);
856 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
857 break;
858
859 default:
860 bfa_sm_fault(ioc, event);
861 }
862 }
863
864 /*
865 * Request for semaphore.
866 */
867 static void
bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s * iocpf)868 bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
869 {
870 bfa_ioc_hw_sem_get(iocpf->ioc);
871 }
872
873 /*
874 * Awaiting semaphore for h/w initialzation.
875 */
876 static void
bfa_iocpf_sm_semwait(struct bfa_iocpf_s * iocpf,enum iocpf_event event)877 bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
878 {
879 struct bfa_ioc_s *ioc = iocpf->ioc;
880
881 bfa_trc(ioc, event);
882
883 switch (event) {
884 case IOCPF_E_SEMLOCKED:
885 if (bfa_ioc_sync_complete(ioc)) {
886 bfa_ioc_sync_join(ioc);
887 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
888 } else {
889 writel(1, ioc->ioc_regs.ioc_sem_reg);
890 bfa_sem_timer_start(ioc);
891 }
892 break;
893
894 case IOCPF_E_SEM_ERROR:
895 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
896 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
897 break;
898
899 case IOCPF_E_DISABLE:
900 bfa_sem_timer_stop(ioc);
901 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
902 break;
903
904 default:
905 bfa_sm_fault(ioc, event);
906 }
907 }
908
909 static void
bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s * iocpf)910 bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
911 {
912 iocpf->poll_time = 0;
913 bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE);
914 }
915
916 /*
917 * Hardware is being initialized. Interrupts are enabled.
918 * Holding hardware semaphore lock.
919 */
920 static void
bfa_iocpf_sm_hwinit(struct bfa_iocpf_s * iocpf,enum iocpf_event event)921 bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
922 {
923 struct bfa_ioc_s *ioc = iocpf->ioc;
924
925 bfa_trc(ioc, event);
926
927 switch (event) {
928 case IOCPF_E_FWREADY:
929 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
930 break;
931
932 case IOCPF_E_TIMEOUT:
933 writel(1, ioc->ioc_regs.ioc_sem_reg);
934 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
935 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
936 break;
937
938 case IOCPF_E_DISABLE:
939 bfa_iocpf_timer_stop(ioc);
940 bfa_ioc_sync_leave(ioc);
941 writel(1, ioc->ioc_regs.ioc_sem_reg);
942 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
943 break;
944
945 default:
946 bfa_sm_fault(ioc, event);
947 }
948 }
949
950 static void
bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s * iocpf)951 bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
952 {
953 bfa_iocpf_timer_start(iocpf->ioc);
954 /*
955 * Enable Interrupts before sending fw IOC ENABLE cmd.
956 */
957 iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
958 bfa_ioc_send_enable(iocpf->ioc);
959 }
960
961 /*
962 * Host IOC function is being enabled, awaiting response from firmware.
963 * Semaphore is acquired.
964 */
965 static void
bfa_iocpf_sm_enabling(struct bfa_iocpf_s * iocpf,enum iocpf_event event)966 bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
967 {
968 struct bfa_ioc_s *ioc = iocpf->ioc;
969
970 bfa_trc(ioc, event);
971
972 switch (event) {
973 case IOCPF_E_FWRSP_ENABLE:
974 bfa_iocpf_timer_stop(ioc);
975 writel(1, ioc->ioc_regs.ioc_sem_reg);
976 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
977 break;
978
979 case IOCPF_E_INITFAIL:
980 bfa_iocpf_timer_stop(ioc);
981 /*
982 * !!! fall through !!!
983 */
984
985 case IOCPF_E_TIMEOUT:
986 writel(1, ioc->ioc_regs.ioc_sem_reg);
987 if (event == IOCPF_E_TIMEOUT)
988 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
989 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
990 break;
991
992 case IOCPF_E_DISABLE:
993 bfa_iocpf_timer_stop(ioc);
994 writel(1, ioc->ioc_regs.ioc_sem_reg);
995 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
996 break;
997
998 default:
999 bfa_sm_fault(ioc, event);
1000 }
1001 }
1002
1003 static void
bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s * iocpf)1004 bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf)
1005 {
1006 bfa_fsm_send_event(iocpf->ioc, IOC_E_ENABLED);
1007 }
1008
1009 static void
bfa_iocpf_sm_ready(struct bfa_iocpf_s * iocpf,enum iocpf_event event)1010 bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1011 {
1012 struct bfa_ioc_s *ioc = iocpf->ioc;
1013
1014 bfa_trc(ioc, event);
1015
1016 switch (event) {
1017 case IOCPF_E_DISABLE:
1018 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
1019 break;
1020
1021 case IOCPF_E_GETATTRFAIL:
1022 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
1023 break;
1024
1025 case IOCPF_E_FAIL:
1026 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
1027 break;
1028
1029 default:
1030 bfa_sm_fault(ioc, event);
1031 }
1032 }
1033
1034 static void
bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s * iocpf)1035 bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
1036 {
1037 bfa_iocpf_timer_start(iocpf->ioc);
1038 bfa_ioc_send_disable(iocpf->ioc);
1039 }
1040
1041 /*
1042 * IOC is being disabled
1043 */
1044 static void
bfa_iocpf_sm_disabling(struct bfa_iocpf_s * iocpf,enum iocpf_event event)1045 bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1046 {
1047 struct bfa_ioc_s *ioc = iocpf->ioc;
1048
1049 bfa_trc(ioc, event);
1050
1051 switch (event) {
1052 case IOCPF_E_FWRSP_DISABLE:
1053 bfa_iocpf_timer_stop(ioc);
1054 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1055 break;
1056
1057 case IOCPF_E_FAIL:
1058 bfa_iocpf_timer_stop(ioc);
1059 /*
1060 * !!! fall through !!!
1061 */
1062
1063 case IOCPF_E_TIMEOUT:
1064 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
1065 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1066 break;
1067
1068 case IOCPF_E_FWRSP_ENABLE:
1069 break;
1070
1071 default:
1072 bfa_sm_fault(ioc, event);
1073 }
1074 }
1075
1076 static void
bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf_s * iocpf)1077 bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf_s *iocpf)
1078 {
1079 bfa_ioc_hw_sem_get(iocpf->ioc);
1080 }
1081
1082 /*
1083 * IOC hb ack request is being removed.
1084 */
1085 static void
bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s * iocpf,enum iocpf_event event)1086 bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1087 {
1088 struct bfa_ioc_s *ioc = iocpf->ioc;
1089
1090 bfa_trc(ioc, event);
1091
1092 switch (event) {
1093 case IOCPF_E_SEMLOCKED:
1094 bfa_ioc_sync_leave(ioc);
1095 writel(1, ioc->ioc_regs.ioc_sem_reg);
1096 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1097 break;
1098
1099 case IOCPF_E_SEM_ERROR:
1100 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1101 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1102 break;
1103
1104 case IOCPF_E_FAIL:
1105 break;
1106
1107 default:
1108 bfa_sm_fault(ioc, event);
1109 }
1110 }
1111
1112 /*
1113 * IOC disable completion entry.
1114 */
1115 static void
bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s * iocpf)1116 bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
1117 {
1118 bfa_ioc_mbox_flush(iocpf->ioc);
1119 bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED);
1120 }
1121
1122 static void
bfa_iocpf_sm_disabled(struct bfa_iocpf_s * iocpf,enum iocpf_event event)1123 bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1124 {
1125 struct bfa_ioc_s *ioc = iocpf->ioc;
1126
1127 bfa_trc(ioc, event);
1128
1129 switch (event) {
1130 case IOCPF_E_ENABLE:
1131 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1132 break;
1133
1134 case IOCPF_E_STOP:
1135 bfa_ioc_firmware_unlock(ioc);
1136 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1137 break;
1138
1139 default:
1140 bfa_sm_fault(ioc, event);
1141 }
1142 }
1143
1144 static void
bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s * iocpf)1145 bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s *iocpf)
1146 {
1147 bfa_ioc_debug_save_ftrc(iocpf->ioc);
1148 bfa_ioc_hw_sem_get(iocpf->ioc);
1149 }
1150
1151 /*
1152 * Hardware initialization failed.
1153 */
1154 static void
bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s * iocpf,enum iocpf_event event)1155 bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1156 {
1157 struct bfa_ioc_s *ioc = iocpf->ioc;
1158
1159 bfa_trc(ioc, event);
1160
1161 switch (event) {
1162 case IOCPF_E_SEMLOCKED:
1163 bfa_ioc_notify_fail(ioc);
1164 bfa_ioc_sync_leave(ioc);
1165 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
1166 writel(1, ioc->ioc_regs.ioc_sem_reg);
1167 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
1168 break;
1169
1170 case IOCPF_E_SEM_ERROR:
1171 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1172 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1173 break;
1174
1175 case IOCPF_E_DISABLE:
1176 bfa_sem_timer_stop(ioc);
1177 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1178 break;
1179
1180 case IOCPF_E_STOP:
1181 bfa_sem_timer_stop(ioc);
1182 bfa_ioc_firmware_unlock(ioc);
1183 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1184 break;
1185
1186 case IOCPF_E_FAIL:
1187 break;
1188
1189 default:
1190 bfa_sm_fault(ioc, event);
1191 }
1192 }
1193
1194 static void
bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s * iocpf)1195 bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
1196 {
1197 bfa_trc(iocpf->ioc, 0);
1198 }
1199
1200 /*
1201 * Hardware initialization failed.
1202 */
1203 static void
bfa_iocpf_sm_initfail(struct bfa_iocpf_s * iocpf,enum iocpf_event event)1204 bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1205 {
1206 struct bfa_ioc_s *ioc = iocpf->ioc;
1207
1208 bfa_trc(ioc, event);
1209
1210 switch (event) {
1211 case IOCPF_E_DISABLE:
1212 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1213 break;
1214
1215 case IOCPF_E_STOP:
1216 bfa_ioc_firmware_unlock(ioc);
1217 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1218 break;
1219
1220 default:
1221 bfa_sm_fault(ioc, event);
1222 }
1223 }
1224
1225 static void
bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s * iocpf)1226 bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s *iocpf)
1227 {
1228 /*
1229 * Mark IOC as failed in hardware and stop firmware.
1230 */
1231 bfa_ioc_lpu_stop(iocpf->ioc);
1232
1233 /*
1234 * Flush any queued up mailbox requests.
1235 */
1236 bfa_ioc_mbox_flush(iocpf->ioc);
1237
1238 bfa_ioc_hw_sem_get(iocpf->ioc);
1239 }
1240
1241 static void
bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s * iocpf,enum iocpf_event event)1242 bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1243 {
1244 struct bfa_ioc_s *ioc = iocpf->ioc;
1245
1246 bfa_trc(ioc, event);
1247
1248 switch (event) {
1249 case IOCPF_E_SEMLOCKED:
1250 bfa_ioc_sync_ack(ioc);
1251 bfa_ioc_notify_fail(ioc);
1252 if (!iocpf->auto_recover) {
1253 bfa_ioc_sync_leave(ioc);
1254 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
1255 writel(1, ioc->ioc_regs.ioc_sem_reg);
1256 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1257 } else {
1258 if (bfa_ioc_sync_complete(ioc))
1259 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1260 else {
1261 writel(1, ioc->ioc_regs.ioc_sem_reg);
1262 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1263 }
1264 }
1265 break;
1266
1267 case IOCPF_E_SEM_ERROR:
1268 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1269 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1270 break;
1271
1272 case IOCPF_E_DISABLE:
1273 bfa_sem_timer_stop(ioc);
1274 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1275 break;
1276
1277 case IOCPF_E_FAIL:
1278 break;
1279
1280 default:
1281 bfa_sm_fault(ioc, event);
1282 }
1283 }
1284
1285 static void
bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s * iocpf)1286 bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
1287 {
1288 bfa_trc(iocpf->ioc, 0);
1289 }
1290
1291 /*
1292 * IOC is in failed state.
1293 */
1294 static void
bfa_iocpf_sm_fail(struct bfa_iocpf_s * iocpf,enum iocpf_event event)1295 bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1296 {
1297 struct bfa_ioc_s *ioc = iocpf->ioc;
1298
1299 bfa_trc(ioc, event);
1300
1301 switch (event) {
1302 case IOCPF_E_DISABLE:
1303 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1304 break;
1305
1306 default:
1307 bfa_sm_fault(ioc, event);
1308 }
1309 }
1310
1311 /*
1312 * BFA IOC private functions
1313 */
1314
1315 /*
1316 * Notify common modules registered for notification.
1317 */
1318 static void
bfa_ioc_event_notify(struct bfa_ioc_s * ioc,enum bfa_ioc_event_e event)1319 bfa_ioc_event_notify(struct bfa_ioc_s *ioc, enum bfa_ioc_event_e event)
1320 {
1321 struct bfa_ioc_notify_s *notify;
1322 struct list_head *qe;
1323
1324 list_for_each(qe, &ioc->notify_q) {
1325 notify = (struct bfa_ioc_notify_s *)qe;
1326 notify->cbfn(notify->cbarg, event);
1327 }
1328 }
1329
1330 static void
bfa_ioc_disable_comp(struct bfa_ioc_s * ioc)1331 bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
1332 {
1333 ioc->cbfn->disable_cbfn(ioc->bfa);
1334 bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
1335 }
1336
1337 bfa_boolean_t
bfa_ioc_sem_get(void __iomem * sem_reg)1338 bfa_ioc_sem_get(void __iomem *sem_reg)
1339 {
1340 u32 r32;
1341 int cnt = 0;
1342 #define BFA_SEM_SPINCNT 3000
1343
1344 r32 = readl(sem_reg);
1345
1346 while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
1347 cnt++;
1348 udelay(2);
1349 r32 = readl(sem_reg);
1350 }
1351
1352 if (!(r32 & 1))
1353 return BFA_TRUE;
1354
1355 return BFA_FALSE;
1356 }
1357
1358 static void
bfa_ioc_hw_sem_get(struct bfa_ioc_s * ioc)1359 bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
1360 {
1361 u32 r32;
1362
1363 /*
1364 * First read to the semaphore register will return 0, subsequent reads
1365 * will return 1. Semaphore is released by writing 1 to the register
1366 */
1367 r32 = readl(ioc->ioc_regs.ioc_sem_reg);
1368 if (r32 == ~0) {
1369 WARN_ON(r32 == ~0);
1370 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
1371 return;
1372 }
1373 if (!(r32 & 1)) {
1374 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
1375 return;
1376 }
1377
1378 bfa_sem_timer_start(ioc);
1379 }
1380
1381 /*
1382 * Initialize LPU local memory (aka secondary memory / SRAM)
1383 */
1384 static void
bfa_ioc_lmem_init(struct bfa_ioc_s * ioc)1385 bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
1386 {
1387 u32 pss_ctl;
1388 int i;
1389 #define PSS_LMEM_INIT_TIME 10000
1390
1391 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1392 pss_ctl &= ~__PSS_LMEM_RESET;
1393 pss_ctl |= __PSS_LMEM_INIT_EN;
1394
1395 /*
1396 * i2c workaround 12.5khz clock
1397 */
1398 pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
1399 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1400
1401 /*
1402 * wait for memory initialization to be complete
1403 */
1404 i = 0;
1405 do {
1406 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1407 i++;
1408 } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1409
1410 /*
1411 * If memory initialization is not successful, IOC timeout will catch
1412 * such failures.
1413 */
1414 WARN_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
1415 bfa_trc(ioc, pss_ctl);
1416
1417 pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
1418 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1419 }
1420
1421 static void
bfa_ioc_lpu_start(struct bfa_ioc_s * ioc)1422 bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
1423 {
1424 u32 pss_ctl;
1425
1426 /*
1427 * Take processor out of reset.
1428 */
1429 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1430 pss_ctl &= ~__PSS_LPU0_RESET;
1431
1432 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1433 }
1434
1435 static void
bfa_ioc_lpu_stop(struct bfa_ioc_s * ioc)1436 bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
1437 {
1438 u32 pss_ctl;
1439
1440 /*
1441 * Put processors in reset.
1442 */
1443 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1444 pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1445
1446 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1447 }
1448
1449 /*
1450 * Get driver and firmware versions.
1451 */
1452 void
bfa_ioc_fwver_get(struct bfa_ioc_s * ioc,struct bfi_ioc_image_hdr_s * fwhdr)1453 bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1454 {
1455 u32 pgnum, pgoff;
1456 u32 loff = 0;
1457 int i;
1458 u32 *fwsig = (u32 *) fwhdr;
1459
1460 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1461 pgoff = PSS_SMEM_PGOFF(loff);
1462 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1463
1464 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
1465 i++) {
1466 fwsig[i] =
1467 bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1468 loff += sizeof(u32);
1469 }
1470 }
1471
1472 /*
1473 * Returns TRUE if driver is willing to work with current smem f/w version.
1474 */
1475 bfa_boolean_t
bfa_ioc_fwver_cmp(struct bfa_ioc_s * ioc,struct bfi_ioc_image_hdr_s * smem_fwhdr)1476 bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc,
1477 struct bfi_ioc_image_hdr_s *smem_fwhdr)
1478 {
1479 struct bfi_ioc_image_hdr_s *drv_fwhdr;
1480 enum bfi_ioc_img_ver_cmp_e smem_flash_cmp, drv_smem_cmp;
1481
1482 drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
1483 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1484
1485 /*
1486 * If smem is incompatible or old, driver should not work with it.
1487 */
1488 drv_smem_cmp = bfa_ioc_fw_ver_patch_cmp(drv_fwhdr, smem_fwhdr);
1489 if (drv_smem_cmp == BFI_IOC_IMG_VER_INCOMP ||
1490 drv_smem_cmp == BFI_IOC_IMG_VER_OLD) {
1491 return BFA_FALSE;
1492 }
1493
1494 /*
1495 * IF Flash has a better F/W than smem do not work with smem.
1496 * If smem f/w == flash f/w, as smem f/w not old | incmp, work with it.
1497 * If Flash is old or incomp work with smem iff smem f/w == drv f/w.
1498 */
1499 smem_flash_cmp = bfa_ioc_flash_fwver_cmp(ioc, smem_fwhdr);
1500
1501 if (smem_flash_cmp == BFI_IOC_IMG_VER_BETTER) {
1502 return BFA_FALSE;
1503 } else if (smem_flash_cmp == BFI_IOC_IMG_VER_SAME) {
1504 return BFA_TRUE;
1505 } else {
1506 return (drv_smem_cmp == BFI_IOC_IMG_VER_SAME) ?
1507 BFA_TRUE : BFA_FALSE;
1508 }
1509 }
1510
1511 /*
1512 * Return true if current running version is valid. Firmware signature and
1513 * execution context (driver/bios) must match.
1514 */
1515 static bfa_boolean_t
bfa_ioc_fwver_valid(struct bfa_ioc_s * ioc,u32 boot_env)1516 bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
1517 {
1518 struct bfi_ioc_image_hdr_s fwhdr;
1519
1520 bfa_ioc_fwver_get(ioc, &fwhdr);
1521
1522 if (swab32(fwhdr.bootenv) != boot_env) {
1523 bfa_trc(ioc, fwhdr.bootenv);
1524 bfa_trc(ioc, boot_env);
1525 return BFA_FALSE;
1526 }
1527
1528 return bfa_ioc_fwver_cmp(ioc, &fwhdr);
1529 }
1530
1531 static bfa_boolean_t
bfa_ioc_fwver_md5_check(struct bfi_ioc_image_hdr_s * fwhdr_1,struct bfi_ioc_image_hdr_s * fwhdr_2)1532 bfa_ioc_fwver_md5_check(struct bfi_ioc_image_hdr_s *fwhdr_1,
1533 struct bfi_ioc_image_hdr_s *fwhdr_2)
1534 {
1535 int i;
1536
1537 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++)
1538 if (fwhdr_1->md5sum[i] != fwhdr_2->md5sum[i])
1539 return BFA_FALSE;
1540
1541 return BFA_TRUE;
1542 }
1543
1544 /*
1545 * Returns TRUE if major minor and maintainence are same.
1546 * If patch versions are same, check for MD5 Checksum to be same.
1547 */
1548 static bfa_boolean_t
bfa_ioc_fw_ver_compatible(struct bfi_ioc_image_hdr_s * drv_fwhdr,struct bfi_ioc_image_hdr_s * fwhdr_to_cmp)1549 bfa_ioc_fw_ver_compatible(struct bfi_ioc_image_hdr_s *drv_fwhdr,
1550 struct bfi_ioc_image_hdr_s *fwhdr_to_cmp)
1551 {
1552 if (drv_fwhdr->signature != fwhdr_to_cmp->signature)
1553 return BFA_FALSE;
1554
1555 if (drv_fwhdr->fwver.major != fwhdr_to_cmp->fwver.major)
1556 return BFA_FALSE;
1557
1558 if (drv_fwhdr->fwver.minor != fwhdr_to_cmp->fwver.minor)
1559 return BFA_FALSE;
1560
1561 if (drv_fwhdr->fwver.maint != fwhdr_to_cmp->fwver.maint)
1562 return BFA_FALSE;
1563
1564 if (drv_fwhdr->fwver.patch == fwhdr_to_cmp->fwver.patch &&
1565 drv_fwhdr->fwver.phase == fwhdr_to_cmp->fwver.phase &&
1566 drv_fwhdr->fwver.build == fwhdr_to_cmp->fwver.build) {
1567 return bfa_ioc_fwver_md5_check(drv_fwhdr, fwhdr_to_cmp);
1568 }
1569
1570 return BFA_TRUE;
1571 }
1572
1573 static bfa_boolean_t
bfa_ioc_flash_fwver_valid(struct bfi_ioc_image_hdr_s * flash_fwhdr)1574 bfa_ioc_flash_fwver_valid(struct bfi_ioc_image_hdr_s *flash_fwhdr)
1575 {
1576 if (flash_fwhdr->fwver.major == 0 || flash_fwhdr->fwver.major == 0xFF)
1577 return BFA_FALSE;
1578
1579 return BFA_TRUE;
1580 }
1581
fwhdr_is_ga(struct bfi_ioc_image_hdr_s * fwhdr)1582 static bfa_boolean_t fwhdr_is_ga(struct bfi_ioc_image_hdr_s *fwhdr)
1583 {
1584 if (fwhdr->fwver.phase == 0 &&
1585 fwhdr->fwver.build == 0)
1586 return BFA_TRUE;
1587
1588 return BFA_FALSE;
1589 }
1590
1591 /*
1592 * Returns TRUE if both are compatible and patch of fwhdr_to_cmp is better.
1593 */
1594 static enum bfi_ioc_img_ver_cmp_e
bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr_s * base_fwhdr,struct bfi_ioc_image_hdr_s * fwhdr_to_cmp)1595 bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr_s *base_fwhdr,
1596 struct bfi_ioc_image_hdr_s *fwhdr_to_cmp)
1597 {
1598 if (bfa_ioc_fw_ver_compatible(base_fwhdr, fwhdr_to_cmp) == BFA_FALSE)
1599 return BFI_IOC_IMG_VER_INCOMP;
1600
1601 if (fwhdr_to_cmp->fwver.patch > base_fwhdr->fwver.patch)
1602 return BFI_IOC_IMG_VER_BETTER;
1603
1604 else if (fwhdr_to_cmp->fwver.patch < base_fwhdr->fwver.patch)
1605 return BFI_IOC_IMG_VER_OLD;
1606
1607 /*
1608 * GA takes priority over internal builds of the same patch stream.
1609 * At this point major minor maint and patch numbers are same.
1610 */
1611
1612 if (fwhdr_is_ga(base_fwhdr) == BFA_TRUE) {
1613 if (fwhdr_is_ga(fwhdr_to_cmp))
1614 return BFI_IOC_IMG_VER_SAME;
1615 else
1616 return BFI_IOC_IMG_VER_OLD;
1617 } else {
1618 if (fwhdr_is_ga(fwhdr_to_cmp))
1619 return BFI_IOC_IMG_VER_BETTER;
1620 }
1621
1622 if (fwhdr_to_cmp->fwver.phase > base_fwhdr->fwver.phase)
1623 return BFI_IOC_IMG_VER_BETTER;
1624 else if (fwhdr_to_cmp->fwver.phase < base_fwhdr->fwver.phase)
1625 return BFI_IOC_IMG_VER_OLD;
1626
1627 if (fwhdr_to_cmp->fwver.build > base_fwhdr->fwver.build)
1628 return BFI_IOC_IMG_VER_BETTER;
1629 else if (fwhdr_to_cmp->fwver.build < base_fwhdr->fwver.build)
1630 return BFI_IOC_IMG_VER_OLD;
1631
1632 /*
1633 * All Version Numbers are equal.
1634 * Md5 check to be done as a part of compatibility check.
1635 */
1636 return BFI_IOC_IMG_VER_SAME;
1637 }
1638
1639 #define BFA_FLASH_PART_FWIMG_ADDR 0x100000 /* fw image address */
1640
1641 bfa_status_t
bfa_ioc_flash_img_get_chnk(struct bfa_ioc_s * ioc,u32 off,u32 * fwimg)1642 bfa_ioc_flash_img_get_chnk(struct bfa_ioc_s *ioc, u32 off,
1643 u32 *fwimg)
1644 {
1645 return bfa_flash_raw_read(ioc->pcidev.pci_bar_kva,
1646 BFA_FLASH_PART_FWIMG_ADDR + (off * sizeof(u32)),
1647 (char *)fwimg, BFI_FLASH_CHUNK_SZ);
1648 }
1649
1650 static enum bfi_ioc_img_ver_cmp_e
bfa_ioc_flash_fwver_cmp(struct bfa_ioc_s * ioc,struct bfi_ioc_image_hdr_s * base_fwhdr)1651 bfa_ioc_flash_fwver_cmp(struct bfa_ioc_s *ioc,
1652 struct bfi_ioc_image_hdr_s *base_fwhdr)
1653 {
1654 struct bfi_ioc_image_hdr_s *flash_fwhdr;
1655 bfa_status_t status;
1656 u32 fwimg[BFI_FLASH_CHUNK_SZ_WORDS];
1657
1658 status = bfa_ioc_flash_img_get_chnk(ioc, 0, fwimg);
1659 if (status != BFA_STATUS_OK)
1660 return BFI_IOC_IMG_VER_INCOMP;
1661
1662 flash_fwhdr = (struct bfi_ioc_image_hdr_s *) fwimg;
1663 if (bfa_ioc_flash_fwver_valid(flash_fwhdr) == BFA_TRUE)
1664 return bfa_ioc_fw_ver_patch_cmp(base_fwhdr, flash_fwhdr);
1665 else
1666 return BFI_IOC_IMG_VER_INCOMP;
1667 }
1668
1669
1670 /*
1671 * Invalidate fwver signature
1672 */
1673 bfa_status_t
bfa_ioc_fwsig_invalidate(struct bfa_ioc_s * ioc)1674 bfa_ioc_fwsig_invalidate(struct bfa_ioc_s *ioc)
1675 {
1676
1677 u32 pgnum, pgoff;
1678 u32 loff = 0;
1679 enum bfi_ioc_state ioc_fwstate;
1680
1681 ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
1682 if (!bfa_ioc_state_disabled(ioc_fwstate))
1683 return BFA_STATUS_ADAPTER_ENABLED;
1684
1685 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1686 pgoff = PSS_SMEM_PGOFF(loff);
1687 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1688 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, BFA_IOC_FW_INV_SIGN);
1689
1690 return BFA_STATUS_OK;
1691 }
1692
1693 /*
1694 * Conditionally flush any pending message from firmware at start.
1695 */
1696 static void
bfa_ioc_msgflush(struct bfa_ioc_s * ioc)1697 bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
1698 {
1699 u32 r32;
1700
1701 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1702 if (r32)
1703 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1704 }
1705
1706 static void
bfa_ioc_hwinit(struct bfa_ioc_s * ioc,bfa_boolean_t force)1707 bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1708 {
1709 enum bfi_ioc_state ioc_fwstate;
1710 bfa_boolean_t fwvalid;
1711 u32 boot_type;
1712 u32 boot_env;
1713
1714 ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
1715
1716 if (force)
1717 ioc_fwstate = BFI_IOC_UNINIT;
1718
1719 bfa_trc(ioc, ioc_fwstate);
1720
1721 boot_type = BFI_FWBOOT_TYPE_NORMAL;
1722 boot_env = BFI_FWBOOT_ENV_OS;
1723
1724 /*
1725 * check if firmware is valid
1726 */
1727 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1728 BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env);
1729
1730 if (!fwvalid) {
1731 if (bfa_ioc_boot(ioc, boot_type, boot_env) == BFA_STATUS_OK)
1732 bfa_ioc_poll_fwinit(ioc);
1733 return;
1734 }
1735
1736 /*
1737 * If hardware initialization is in progress (initialized by other IOC),
1738 * just wait for an initialization completion interrupt.
1739 */
1740 if (ioc_fwstate == BFI_IOC_INITING) {
1741 bfa_ioc_poll_fwinit(ioc);
1742 return;
1743 }
1744
1745 /*
1746 * If IOC function is disabled and firmware version is same,
1747 * just re-enable IOC.
1748 *
1749 * If option rom, IOC must not be in operational state. With
1750 * convergence, IOC will be in operational state when 2nd driver
1751 * is loaded.
1752 */
1753 if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
1754
1755 /*
1756 * When using MSI-X any pending firmware ready event should
1757 * be flushed. Otherwise MSI-X interrupts are not delivered.
1758 */
1759 bfa_ioc_msgflush(ioc);
1760 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
1761 return;
1762 }
1763
1764 /*
1765 * Initialize the h/w for any other states.
1766 */
1767 if (bfa_ioc_boot(ioc, boot_type, boot_env) == BFA_STATUS_OK)
1768 bfa_ioc_poll_fwinit(ioc);
1769 }
1770
1771 static void
bfa_ioc_timeout(void * ioc_arg)1772 bfa_ioc_timeout(void *ioc_arg)
1773 {
1774 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
1775
1776 bfa_trc(ioc, 0);
1777 bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1778 }
1779
1780 void
bfa_ioc_mbox_send(struct bfa_ioc_s * ioc,void * ioc_msg,int len)1781 bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
1782 {
1783 u32 *msgp = (u32 *) ioc_msg;
1784 u32 i;
1785
1786 bfa_trc(ioc, msgp[0]);
1787 bfa_trc(ioc, len);
1788
1789 WARN_ON(len > BFI_IOC_MSGLEN_MAX);
1790
1791 /*
1792 * first write msg to mailbox registers
1793 */
1794 for (i = 0; i < len / sizeof(u32); i++)
1795 writel(cpu_to_le32(msgp[i]),
1796 ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1797
1798 for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1799 writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1800
1801 /*
1802 * write 1 to mailbox CMD to trigger LPU event
1803 */
1804 writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1805 (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
1806 }
1807
1808 static void
bfa_ioc_send_enable(struct bfa_ioc_s * ioc)1809 bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
1810 {
1811 struct bfi_ioc_ctrl_req_s enable_req;
1812 struct timeval tv;
1813
1814 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1815 bfa_ioc_portid(ioc));
1816 enable_req.clscode = cpu_to_be16(ioc->clscode);
1817 do_gettimeofday(&tv);
1818 enable_req.tv_sec = be32_to_cpu(tv.tv_sec);
1819 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1820 }
1821
1822 static void
bfa_ioc_send_disable(struct bfa_ioc_s * ioc)1823 bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
1824 {
1825 struct bfi_ioc_ctrl_req_s disable_req;
1826
1827 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1828 bfa_ioc_portid(ioc));
1829 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1830 }
1831
1832 static void
bfa_ioc_send_getattr(struct bfa_ioc_s * ioc)1833 bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
1834 {
1835 struct bfi_ioc_getattr_req_s attr_req;
1836
1837 bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1838 bfa_ioc_portid(ioc));
1839 bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1840 bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1841 }
1842
1843 static void
bfa_ioc_hb_check(void * cbarg)1844 bfa_ioc_hb_check(void *cbarg)
1845 {
1846 struct bfa_ioc_s *ioc = cbarg;
1847 u32 hb_count;
1848
1849 hb_count = readl(ioc->ioc_regs.heartbeat);
1850 if (ioc->hb_count == hb_count) {
1851 bfa_ioc_recover(ioc);
1852 return;
1853 } else {
1854 ioc->hb_count = hb_count;
1855 }
1856
1857 bfa_ioc_mbox_poll(ioc);
1858 bfa_hb_timer_start(ioc);
1859 }
1860
1861 static void
bfa_ioc_hb_monitor(struct bfa_ioc_s * ioc)1862 bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
1863 {
1864 ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1865 bfa_hb_timer_start(ioc);
1866 }
1867
1868 /*
1869 * Initiate a full firmware download.
1870 */
1871 static bfa_status_t
bfa_ioc_download_fw(struct bfa_ioc_s * ioc,u32 boot_type,u32 boot_env)1872 bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1873 u32 boot_env)
1874 {
1875 u32 *fwimg;
1876 u32 pgnum, pgoff;
1877 u32 loff = 0;
1878 u32 chunkno = 0;
1879 u32 i;
1880 u32 asicmode;
1881 u32 fwimg_size;
1882 u32 fwimg_buf[BFI_FLASH_CHUNK_SZ_WORDS];
1883 bfa_status_t status;
1884
1885 if (boot_env == BFI_FWBOOT_ENV_OS &&
1886 boot_type == BFI_FWBOOT_TYPE_FLASH) {
1887 fwimg_size = BFI_FLASH_IMAGE_SZ/sizeof(u32);
1888
1889 status = bfa_ioc_flash_img_get_chnk(ioc,
1890 BFA_IOC_FLASH_CHUNK_ADDR(chunkno), fwimg_buf);
1891 if (status != BFA_STATUS_OK)
1892 return status;
1893
1894 fwimg = fwimg_buf;
1895 } else {
1896 fwimg_size = bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc));
1897 fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
1898 BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1899 }
1900
1901 bfa_trc(ioc, fwimg_size);
1902
1903
1904 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1905 pgoff = PSS_SMEM_PGOFF(loff);
1906
1907 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1908
1909 for (i = 0; i < fwimg_size; i++) {
1910
1911 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1912 chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1913
1914 if (boot_env == BFI_FWBOOT_ENV_OS &&
1915 boot_type == BFI_FWBOOT_TYPE_FLASH) {
1916 status = bfa_ioc_flash_img_get_chnk(ioc,
1917 BFA_IOC_FLASH_CHUNK_ADDR(chunkno),
1918 fwimg_buf);
1919 if (status != BFA_STATUS_OK)
1920 return status;
1921
1922 fwimg = fwimg_buf;
1923 } else {
1924 fwimg = bfa_cb_image_get_chunk(
1925 bfa_ioc_asic_gen(ioc),
1926 BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1927 }
1928 }
1929
1930 /*
1931 * write smem
1932 */
1933 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
1934 fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
1935
1936 loff += sizeof(u32);
1937
1938 /*
1939 * handle page offset wrap around
1940 */
1941 loff = PSS_SMEM_PGOFF(loff);
1942 if (loff == 0) {
1943 pgnum++;
1944 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1945 }
1946 }
1947
1948 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1949 ioc->ioc_regs.host_page_num_fn);
1950
1951 /*
1952 * Set boot type, env and device mode at the end.
1953 */
1954 if (boot_env == BFI_FWBOOT_ENV_OS &&
1955 boot_type == BFI_FWBOOT_TYPE_FLASH) {
1956 boot_type = BFI_FWBOOT_TYPE_NORMAL;
1957 }
1958 asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
1959 ioc->port0_mode, ioc->port1_mode);
1960 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_DEVMODE_OFF,
1961 swab32(asicmode));
1962 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_TYPE_OFF,
1963 swab32(boot_type));
1964 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_ENV_OFF,
1965 swab32(boot_env));
1966 return BFA_STATUS_OK;
1967 }
1968
1969
1970 /*
1971 * Update BFA configuration from firmware configuration.
1972 */
1973 static void
bfa_ioc_getattr_reply(struct bfa_ioc_s * ioc)1974 bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
1975 {
1976 struct bfi_ioc_attr_s *attr = ioc->attr;
1977
1978 attr->adapter_prop = be32_to_cpu(attr->adapter_prop);
1979 attr->card_type = be32_to_cpu(attr->card_type);
1980 attr->maxfrsize = be16_to_cpu(attr->maxfrsize);
1981 ioc->fcmode = (attr->port_mode == BFI_PORT_MODE_FC);
1982 attr->mfg_year = be16_to_cpu(attr->mfg_year);
1983
1984 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1985 }
1986
1987 /*
1988 * Attach time initialization of mbox logic.
1989 */
1990 static void
bfa_ioc_mbox_attach(struct bfa_ioc_s * ioc)1991 bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
1992 {
1993 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1994 int mc;
1995
1996 INIT_LIST_HEAD(&mod->cmd_q);
1997 for (mc = 0; mc < BFI_MC_MAX; mc++) {
1998 mod->mbhdlr[mc].cbfn = NULL;
1999 mod->mbhdlr[mc].cbarg = ioc->bfa;
2000 }
2001 }
2002
2003 /*
2004 * Mbox poll timer -- restarts any pending mailbox requests.
2005 */
2006 static void
bfa_ioc_mbox_poll(struct bfa_ioc_s * ioc)2007 bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
2008 {
2009 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2010 struct bfa_mbox_cmd_s *cmd;
2011 u32 stat;
2012
2013 /*
2014 * If no command pending, do nothing
2015 */
2016 if (list_empty(&mod->cmd_q))
2017 return;
2018
2019 /*
2020 * If previous command is not yet fetched by firmware, do nothing
2021 */
2022 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2023 if (stat)
2024 return;
2025
2026 /*
2027 * Enqueue command to firmware.
2028 */
2029 bfa_q_deq(&mod->cmd_q, &cmd);
2030 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2031 }
2032
2033 /*
2034 * Cleanup any pending requests.
2035 */
2036 static void
bfa_ioc_mbox_flush(struct bfa_ioc_s * ioc)2037 bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc)
2038 {
2039 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2040 struct bfa_mbox_cmd_s *cmd;
2041
2042 while (!list_empty(&mod->cmd_q))
2043 bfa_q_deq(&mod->cmd_q, &cmd);
2044 }
2045
2046 /*
2047 * Read data from SMEM to host through PCI memmap
2048 *
2049 * @param[in] ioc memory for IOC
2050 * @param[in] tbuf app memory to store data from smem
2051 * @param[in] soff smem offset
2052 * @param[in] sz size of smem in bytes
2053 */
2054 static bfa_status_t
bfa_ioc_smem_read(struct bfa_ioc_s * ioc,void * tbuf,u32 soff,u32 sz)2055 bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
2056 {
2057 u32 pgnum, loff;
2058 __be32 r32;
2059 int i, len;
2060 u32 *buf = tbuf;
2061
2062 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
2063 loff = PSS_SMEM_PGOFF(soff);
2064 bfa_trc(ioc, pgnum);
2065 bfa_trc(ioc, loff);
2066 bfa_trc(ioc, sz);
2067
2068 /*
2069 * Hold semaphore to serialize pll init and fwtrc.
2070 */
2071 if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
2072 bfa_trc(ioc, 0);
2073 return BFA_STATUS_FAILED;
2074 }
2075
2076 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2077
2078 len = sz/sizeof(u32);
2079 bfa_trc(ioc, len);
2080 for (i = 0; i < len; i++) {
2081 r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
2082 buf[i] = swab32(r32);
2083 loff += sizeof(u32);
2084
2085 /*
2086 * handle page offset wrap around
2087 */
2088 loff = PSS_SMEM_PGOFF(loff);
2089 if (loff == 0) {
2090 pgnum++;
2091 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2092 }
2093 }
2094 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
2095 ioc->ioc_regs.host_page_num_fn);
2096 /*
2097 * release semaphore.
2098 */
2099 readl(ioc->ioc_regs.ioc_init_sem_reg);
2100 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
2101
2102 bfa_trc(ioc, pgnum);
2103 return BFA_STATUS_OK;
2104 }
2105
2106 /*
2107 * Clear SMEM data from host through PCI memmap
2108 *
2109 * @param[in] ioc memory for IOC
2110 * @param[in] soff smem offset
2111 * @param[in] sz size of smem in bytes
2112 */
2113 static bfa_status_t
bfa_ioc_smem_clr(struct bfa_ioc_s * ioc,u32 soff,u32 sz)2114 bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
2115 {
2116 int i, len;
2117 u32 pgnum, loff;
2118
2119 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
2120 loff = PSS_SMEM_PGOFF(soff);
2121 bfa_trc(ioc, pgnum);
2122 bfa_trc(ioc, loff);
2123 bfa_trc(ioc, sz);
2124
2125 /*
2126 * Hold semaphore to serialize pll init and fwtrc.
2127 */
2128 if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
2129 bfa_trc(ioc, 0);
2130 return BFA_STATUS_FAILED;
2131 }
2132
2133 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2134
2135 len = sz/sizeof(u32); /* len in words */
2136 bfa_trc(ioc, len);
2137 for (i = 0; i < len; i++) {
2138 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
2139 loff += sizeof(u32);
2140
2141 /*
2142 * handle page offset wrap around
2143 */
2144 loff = PSS_SMEM_PGOFF(loff);
2145 if (loff == 0) {
2146 pgnum++;
2147 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2148 }
2149 }
2150 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
2151 ioc->ioc_regs.host_page_num_fn);
2152
2153 /*
2154 * release semaphore.
2155 */
2156 readl(ioc->ioc_regs.ioc_init_sem_reg);
2157 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
2158 bfa_trc(ioc, pgnum);
2159 return BFA_STATUS_OK;
2160 }
2161
2162 static void
bfa_ioc_fail_notify(struct bfa_ioc_s * ioc)2163 bfa_ioc_fail_notify(struct bfa_ioc_s *ioc)
2164 {
2165 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
2166
2167 /*
2168 * Notify driver and common modules registered for notification.
2169 */
2170 ioc->cbfn->hbfail_cbfn(ioc->bfa);
2171 bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
2172
2173 bfa_ioc_debug_save_ftrc(ioc);
2174
2175 BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
2176 "Heart Beat of IOC has failed\n");
2177 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL);
2178
2179 }
2180
2181 static void
bfa_ioc_pf_fwmismatch(struct bfa_ioc_s * ioc)2182 bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
2183 {
2184 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
2185 /*
2186 * Provide enable completion callback.
2187 */
2188 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
2189 BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
2190 "Running firmware version is incompatible "
2191 "with the driver version\n");
2192 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH);
2193 }
2194
2195 bfa_status_t
bfa_ioc_pll_init(struct bfa_ioc_s * ioc)2196 bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
2197 {
2198
2199 /*
2200 * Hold semaphore so that nobody can access the chip during init.
2201 */
2202 bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
2203
2204 bfa_ioc_pll_init_asic(ioc);
2205
2206 ioc->pllinit = BFA_TRUE;
2207
2208 /*
2209 * Initialize LMEM
2210 */
2211 bfa_ioc_lmem_init(ioc);
2212
2213 /*
2214 * release semaphore.
2215 */
2216 readl(ioc->ioc_regs.ioc_init_sem_reg);
2217 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
2218
2219 return BFA_STATUS_OK;
2220 }
2221
2222 /*
2223 * Interface used by diag module to do firmware boot with memory test
2224 * as the entry vector.
2225 */
2226 bfa_status_t
bfa_ioc_boot(struct bfa_ioc_s * ioc,u32 boot_type,u32 boot_env)2227 bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
2228 {
2229 struct bfi_ioc_image_hdr_s *drv_fwhdr;
2230 bfa_status_t status;
2231 bfa_ioc_stats(ioc, ioc_boots);
2232
2233 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
2234 return BFA_STATUS_FAILED;
2235
2236 if (boot_env == BFI_FWBOOT_ENV_OS &&
2237 boot_type == BFI_FWBOOT_TYPE_NORMAL) {
2238
2239 drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
2240 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
2241
2242 /*
2243 * Work with Flash iff flash f/w is better than driver f/w.
2244 * Otherwise push drivers firmware.
2245 */
2246 if (bfa_ioc_flash_fwver_cmp(ioc, drv_fwhdr) ==
2247 BFI_IOC_IMG_VER_BETTER)
2248 boot_type = BFI_FWBOOT_TYPE_FLASH;
2249 }
2250
2251 /*
2252 * Initialize IOC state of all functions on a chip reset.
2253 */
2254 if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
2255 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
2256 bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
2257 } else {
2258 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_INITING);
2259 bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_INITING);
2260 }
2261
2262 bfa_ioc_msgflush(ioc);
2263 status = bfa_ioc_download_fw(ioc, boot_type, boot_env);
2264 if (status == BFA_STATUS_OK)
2265 bfa_ioc_lpu_start(ioc);
2266 else {
2267 WARN_ON(boot_type == BFI_FWBOOT_TYPE_MEMTEST);
2268 bfa_iocpf_timeout(ioc);
2269 }
2270 return status;
2271 }
2272
2273 /*
2274 * Enable/disable IOC failure auto recovery.
2275 */
2276 void
bfa_ioc_auto_recover(bfa_boolean_t auto_recover)2277 bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
2278 {
2279 bfa_auto_recover = auto_recover;
2280 }
2281
2282
2283
2284 bfa_boolean_t
bfa_ioc_is_operational(struct bfa_ioc_s * ioc)2285 bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
2286 {
2287 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
2288 }
2289
2290 bfa_boolean_t
bfa_ioc_is_initialized(struct bfa_ioc_s * ioc)2291 bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
2292 {
2293 u32 r32 = bfa_ioc_get_cur_ioc_fwstate(ioc);
2294
2295 return ((r32 != BFI_IOC_UNINIT) &&
2296 (r32 != BFI_IOC_INITING) &&
2297 (r32 != BFI_IOC_MEMTEST));
2298 }
2299
2300 bfa_boolean_t
bfa_ioc_msgget(struct bfa_ioc_s * ioc,void * mbmsg)2301 bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
2302 {
2303 __be32 *msgp = mbmsg;
2304 u32 r32;
2305 int i;
2306
2307 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
2308 if ((r32 & 1) == 0)
2309 return BFA_FALSE;
2310
2311 /*
2312 * read the MBOX msg
2313 */
2314 for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
2315 i++) {
2316 r32 = readl(ioc->ioc_regs.lpu_mbox +
2317 i * sizeof(u32));
2318 msgp[i] = cpu_to_be32(r32);
2319 }
2320
2321 /*
2322 * turn off mailbox interrupt by clearing mailbox status
2323 */
2324 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
2325 readl(ioc->ioc_regs.lpu_mbox_cmd);
2326
2327 return BFA_TRUE;
2328 }
2329
2330 void
bfa_ioc_isr(struct bfa_ioc_s * ioc,struct bfi_mbmsg_s * m)2331 bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
2332 {
2333 union bfi_ioc_i2h_msg_u *msg;
2334 struct bfa_iocpf_s *iocpf = &ioc->iocpf;
2335
2336 msg = (union bfi_ioc_i2h_msg_u *) m;
2337
2338 bfa_ioc_stats(ioc, ioc_isrs);
2339
2340 switch (msg->mh.msg_id) {
2341 case BFI_IOC_I2H_HBEAT:
2342 break;
2343
2344 case BFI_IOC_I2H_ENABLE_REPLY:
2345 ioc->port_mode = ioc->port_mode_cfg =
2346 (enum bfa_mode_s)msg->fw_event.port_mode;
2347 ioc->ad_cap_bm = msg->fw_event.cap_bm;
2348 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
2349 break;
2350
2351 case BFI_IOC_I2H_DISABLE_REPLY:
2352 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
2353 break;
2354
2355 case BFI_IOC_I2H_GETATTR_REPLY:
2356 bfa_ioc_getattr_reply(ioc);
2357 break;
2358
2359 default:
2360 bfa_trc(ioc, msg->mh.msg_id);
2361 WARN_ON(1);
2362 }
2363 }
2364
2365 /*
2366 * IOC attach time initialization and setup.
2367 *
2368 * @param[in] ioc memory for IOC
2369 * @param[in] bfa driver instance structure
2370 */
2371 void
bfa_ioc_attach(struct bfa_ioc_s * ioc,void * bfa,struct bfa_ioc_cbfn_s * cbfn,struct bfa_timer_mod_s * timer_mod)2372 bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
2373 struct bfa_timer_mod_s *timer_mod)
2374 {
2375 ioc->bfa = bfa;
2376 ioc->cbfn = cbfn;
2377 ioc->timer_mod = timer_mod;
2378 ioc->fcmode = BFA_FALSE;
2379 ioc->pllinit = BFA_FALSE;
2380 ioc->dbg_fwsave_once = BFA_TRUE;
2381 ioc->iocpf.ioc = ioc;
2382
2383 bfa_ioc_mbox_attach(ioc);
2384 INIT_LIST_HEAD(&ioc->notify_q);
2385
2386 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
2387 bfa_fsm_send_event(ioc, IOC_E_RESET);
2388 }
2389
2390 /*
2391 * Driver detach time IOC cleanup.
2392 */
2393 void
bfa_ioc_detach(struct bfa_ioc_s * ioc)2394 bfa_ioc_detach(struct bfa_ioc_s *ioc)
2395 {
2396 bfa_fsm_send_event(ioc, IOC_E_DETACH);
2397 INIT_LIST_HEAD(&ioc->notify_q);
2398 }
2399
2400 /*
2401 * Setup IOC PCI properties.
2402 *
2403 * @param[in] pcidev PCI device information for this IOC
2404 */
2405 void
bfa_ioc_pci_init(struct bfa_ioc_s * ioc,struct bfa_pcidev_s * pcidev,enum bfi_pcifn_class clscode)2406 bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
2407 enum bfi_pcifn_class clscode)
2408 {
2409 ioc->clscode = clscode;
2410 ioc->pcidev = *pcidev;
2411
2412 /*
2413 * Initialize IOC and device personality
2414 */
2415 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
2416 ioc->asic_mode = BFI_ASIC_MODE_FC;
2417
2418 switch (pcidev->device_id) {
2419 case BFA_PCI_DEVICE_ID_FC_8G1P:
2420 case BFA_PCI_DEVICE_ID_FC_8G2P:
2421 ioc->asic_gen = BFI_ASIC_GEN_CB;
2422 ioc->fcmode = BFA_TRUE;
2423 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2424 ioc->ad_cap_bm = BFA_CM_HBA;
2425 break;
2426
2427 case BFA_PCI_DEVICE_ID_CT:
2428 ioc->asic_gen = BFI_ASIC_GEN_CT;
2429 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2430 ioc->asic_mode = BFI_ASIC_MODE_ETH;
2431 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
2432 ioc->ad_cap_bm = BFA_CM_CNA;
2433 break;
2434
2435 case BFA_PCI_DEVICE_ID_CT_FC:
2436 ioc->asic_gen = BFI_ASIC_GEN_CT;
2437 ioc->fcmode = BFA_TRUE;
2438 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2439 ioc->ad_cap_bm = BFA_CM_HBA;
2440 break;
2441
2442 case BFA_PCI_DEVICE_ID_CT2:
2443 case BFA_PCI_DEVICE_ID_CT2_QUAD:
2444 ioc->asic_gen = BFI_ASIC_GEN_CT2;
2445 if (clscode == BFI_PCIFN_CLASS_FC &&
2446 pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
2447 ioc->asic_mode = BFI_ASIC_MODE_FC16;
2448 ioc->fcmode = BFA_TRUE;
2449 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2450 ioc->ad_cap_bm = BFA_CM_HBA;
2451 } else {
2452 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2453 ioc->asic_mode = BFI_ASIC_MODE_ETH;
2454 if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
2455 ioc->port_mode =
2456 ioc->port_mode_cfg = BFA_MODE_CNA;
2457 ioc->ad_cap_bm = BFA_CM_CNA;
2458 } else {
2459 ioc->port_mode =
2460 ioc->port_mode_cfg = BFA_MODE_NIC;
2461 ioc->ad_cap_bm = BFA_CM_NIC;
2462 }
2463 }
2464 break;
2465
2466 default:
2467 WARN_ON(1);
2468 }
2469
2470 /*
2471 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
2472 */
2473 if (ioc->asic_gen == BFI_ASIC_GEN_CB)
2474 bfa_ioc_set_cb_hwif(ioc);
2475 else if (ioc->asic_gen == BFI_ASIC_GEN_CT)
2476 bfa_ioc_set_ct_hwif(ioc);
2477 else {
2478 WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2);
2479 bfa_ioc_set_ct2_hwif(ioc);
2480 bfa_ioc_ct2_poweron(ioc);
2481 }
2482
2483 bfa_ioc_map_port(ioc);
2484 bfa_ioc_reg_init(ioc);
2485 }
2486
2487 /*
2488 * Initialize IOC dma memory
2489 *
2490 * @param[in] dm_kva kernel virtual address of IOC dma memory
2491 * @param[in] dm_pa physical address of IOC dma memory
2492 */
2493 void
bfa_ioc_mem_claim(struct bfa_ioc_s * ioc,u8 * dm_kva,u64 dm_pa)2494 bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa)
2495 {
2496 /*
2497 * dma memory for firmware attribute
2498 */
2499 ioc->attr_dma.kva = dm_kva;
2500 ioc->attr_dma.pa = dm_pa;
2501 ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
2502 }
2503
2504 void
bfa_ioc_enable(struct bfa_ioc_s * ioc)2505 bfa_ioc_enable(struct bfa_ioc_s *ioc)
2506 {
2507 bfa_ioc_stats(ioc, ioc_enables);
2508 ioc->dbg_fwsave_once = BFA_TRUE;
2509
2510 bfa_fsm_send_event(ioc, IOC_E_ENABLE);
2511 }
2512
2513 void
bfa_ioc_disable(struct bfa_ioc_s * ioc)2514 bfa_ioc_disable(struct bfa_ioc_s *ioc)
2515 {
2516 bfa_ioc_stats(ioc, ioc_disables);
2517 bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2518 }
2519
2520 void
bfa_ioc_suspend(struct bfa_ioc_s * ioc)2521 bfa_ioc_suspend(struct bfa_ioc_s *ioc)
2522 {
2523 ioc->dbg_fwsave_once = BFA_TRUE;
2524 bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2525 }
2526
2527 /*
2528 * Initialize memory for saving firmware trace. Driver must initialize
2529 * trace memory before call bfa_ioc_enable().
2530 */
2531 void
bfa_ioc_debug_memclaim(struct bfa_ioc_s * ioc,void * dbg_fwsave)2532 bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
2533 {
2534 ioc->dbg_fwsave = dbg_fwsave;
2535 ioc->dbg_fwsave_len = BFA_DBG_FWTRC_LEN;
2536 }
2537
2538 /*
2539 * Register mailbox message handler functions
2540 *
2541 * @param[in] ioc IOC instance
2542 * @param[in] mcfuncs message class handler functions
2543 */
2544 void
bfa_ioc_mbox_register(struct bfa_ioc_s * ioc,bfa_ioc_mbox_mcfunc_t * mcfuncs)2545 bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
2546 {
2547 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2548 int mc;
2549
2550 for (mc = 0; mc < BFI_MC_MAX; mc++)
2551 mod->mbhdlr[mc].cbfn = mcfuncs[mc];
2552 }
2553
2554 /*
2555 * Register mailbox message handler function, to be called by common modules
2556 */
2557 void
bfa_ioc_mbox_regisr(struct bfa_ioc_s * ioc,enum bfi_mclass mc,bfa_ioc_mbox_mcfunc_t cbfn,void * cbarg)2558 bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
2559 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
2560 {
2561 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2562
2563 mod->mbhdlr[mc].cbfn = cbfn;
2564 mod->mbhdlr[mc].cbarg = cbarg;
2565 }
2566
2567 /*
2568 * Queue a mailbox command request to firmware. Waits if mailbox is busy.
2569 * Responsibility of caller to serialize
2570 *
2571 * @param[in] ioc IOC instance
2572 * @param[i] cmd Mailbox command
2573 */
2574 void
bfa_ioc_mbox_queue(struct bfa_ioc_s * ioc,struct bfa_mbox_cmd_s * cmd)2575 bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
2576 {
2577 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2578 u32 stat;
2579
2580 /*
2581 * If a previous command is pending, queue new command
2582 */
2583 if (!list_empty(&mod->cmd_q)) {
2584 list_add_tail(&cmd->qe, &mod->cmd_q);
2585 return;
2586 }
2587
2588 /*
2589 * If mailbox is busy, queue command for poll timer
2590 */
2591 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2592 if (stat) {
2593 list_add_tail(&cmd->qe, &mod->cmd_q);
2594 return;
2595 }
2596
2597 /*
2598 * mailbox is free -- queue command to firmware
2599 */
2600 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2601 }
2602
2603 /*
2604 * Handle mailbox interrupts
2605 */
2606 void
bfa_ioc_mbox_isr(struct bfa_ioc_s * ioc)2607 bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
2608 {
2609 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2610 struct bfi_mbmsg_s m;
2611 int mc;
2612
2613 if (bfa_ioc_msgget(ioc, &m)) {
2614 /*
2615 * Treat IOC message class as special.
2616 */
2617 mc = m.mh.msg_class;
2618 if (mc == BFI_MC_IOC) {
2619 bfa_ioc_isr(ioc, &m);
2620 return;
2621 }
2622
2623 if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
2624 return;
2625
2626 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
2627 }
2628
2629 bfa_ioc_lpu_read_stat(ioc);
2630
2631 /*
2632 * Try to send pending mailbox commands
2633 */
2634 bfa_ioc_mbox_poll(ioc);
2635 }
2636
2637 void
bfa_ioc_error_isr(struct bfa_ioc_s * ioc)2638 bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
2639 {
2640 bfa_ioc_stats(ioc, ioc_hbfails);
2641 ioc->stats.hb_count = ioc->hb_count;
2642 bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2643 }
2644
2645 /*
2646 * return true if IOC is disabled
2647 */
2648 bfa_boolean_t
bfa_ioc_is_disabled(struct bfa_ioc_s * ioc)2649 bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
2650 {
2651 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
2652 bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
2653 }
2654
2655 /*
2656 * return true if IOC firmware is different.
2657 */
2658 bfa_boolean_t
bfa_ioc_fw_mismatch(struct bfa_ioc_s * ioc)2659 bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
2660 {
2661 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
2662 bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) ||
2663 bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch);
2664 }
2665
2666 /*
2667 * Check if adapter is disabled -- both IOCs should be in a disabled
2668 * state.
2669 */
2670 bfa_boolean_t
bfa_ioc_adapter_is_disabled(struct bfa_ioc_s * ioc)2671 bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
2672 {
2673 u32 ioc_state;
2674
2675 if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
2676 return BFA_FALSE;
2677
2678 ioc_state = bfa_ioc_get_cur_ioc_fwstate(ioc);
2679 if (!bfa_ioc_state_disabled(ioc_state))
2680 return BFA_FALSE;
2681
2682 if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
2683 ioc_state = bfa_ioc_get_cur_ioc_fwstate(ioc);
2684 if (!bfa_ioc_state_disabled(ioc_state))
2685 return BFA_FALSE;
2686 }
2687
2688 return BFA_TRUE;
2689 }
2690
2691 /*
2692 * Reset IOC fwstate registers.
2693 */
2694 void
bfa_ioc_reset_fwstate(struct bfa_ioc_s * ioc)2695 bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc)
2696 {
2697 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_UNINIT);
2698 bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_UNINIT);
2699 }
2700
2701 #define BFA_MFG_NAME "QLogic"
2702 void
bfa_ioc_get_adapter_attr(struct bfa_ioc_s * ioc,struct bfa_adapter_attr_s * ad_attr)2703 bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
2704 struct bfa_adapter_attr_s *ad_attr)
2705 {
2706 struct bfi_ioc_attr_s *ioc_attr;
2707
2708 ioc_attr = ioc->attr;
2709
2710 bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2711 bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2712 bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2713 bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
2714 memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2715 sizeof(struct bfa_mfg_vpd_s));
2716
2717 ad_attr->nports = bfa_ioc_get_nports(ioc);
2718 ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
2719
2720 bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2721 /* For now, model descr uses same model string */
2722 bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
2723
2724 ad_attr->card_type = ioc_attr->card_type;
2725 ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2726
2727 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2728 ad_attr->prototype = 1;
2729 else
2730 ad_attr->prototype = 0;
2731
2732 ad_attr->pwwn = ioc->attr->pwwn;
2733 ad_attr->mac = bfa_ioc_get_mac(ioc);
2734
2735 ad_attr->pcie_gen = ioc_attr->pcie_gen;
2736 ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2737 ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2738 ad_attr->asic_rev = ioc_attr->asic_rev;
2739
2740 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2741
2742 ad_attr->cna_capable = bfa_ioc_is_cna(ioc);
2743 ad_attr->trunk_capable = (ad_attr->nports > 1) &&
2744 !bfa_ioc_is_cna(ioc) && !ad_attr->is_mezz;
2745 ad_attr->mfg_day = ioc_attr->mfg_day;
2746 ad_attr->mfg_month = ioc_attr->mfg_month;
2747 ad_attr->mfg_year = ioc_attr->mfg_year;
2748 memcpy(ad_attr->uuid, ioc_attr->uuid, BFA_ADAPTER_UUID_LEN);
2749 }
2750
2751 enum bfa_ioc_type_e
bfa_ioc_get_type(struct bfa_ioc_s * ioc)2752 bfa_ioc_get_type(struct bfa_ioc_s *ioc)
2753 {
2754 if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
2755 return BFA_IOC_TYPE_LL;
2756
2757 WARN_ON(ioc->clscode != BFI_PCIFN_CLASS_FC);
2758
2759 return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
2760 ? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
2761 }
2762
2763 void
bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s * ioc,char * serial_num)2764 bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
2765 {
2766 memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
2767 memcpy((void *)serial_num,
2768 (void *)ioc->attr->brcd_serialnum,
2769 BFA_ADAPTER_SERIAL_NUM_LEN);
2770 }
2771
2772 void
bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s * ioc,char * fw_ver)2773 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
2774 {
2775 memset((void *)fw_ver, 0, BFA_VERSION_LEN);
2776 memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
2777 }
2778
2779 void
bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s * ioc,char * chip_rev)2780 bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
2781 {
2782 WARN_ON(!chip_rev);
2783
2784 memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
2785
2786 chip_rev[0] = 'R';
2787 chip_rev[1] = 'e';
2788 chip_rev[2] = 'v';
2789 chip_rev[3] = '-';
2790 chip_rev[4] = ioc->attr->asic_rev;
2791 chip_rev[5] = '\0';
2792 }
2793
2794 void
bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s * ioc,char * optrom_ver)2795 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
2796 {
2797 memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
2798 memcpy(optrom_ver, ioc->attr->optrom_version,
2799 BFA_VERSION_LEN);
2800 }
2801
2802 void
bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s * ioc,char * manufacturer)2803 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
2804 {
2805 memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
2806 strlcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
2807 }
2808
2809 void
bfa_ioc_get_adapter_model(struct bfa_ioc_s * ioc,char * model)2810 bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
2811 {
2812 struct bfi_ioc_attr_s *ioc_attr;
2813 u8 nports = bfa_ioc_get_nports(ioc);
2814
2815 WARN_ON(!model);
2816 memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
2817
2818 ioc_attr = ioc->attr;
2819
2820 if (bfa_asic_id_ct2(ioc->pcidev.device_id) &&
2821 (!bfa_mfg_is_mezz(ioc_attr->card_type)))
2822 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u-%u%s",
2823 BFA_MFG_NAME, ioc_attr->card_type, nports, "p");
2824 else
2825 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
2826 BFA_MFG_NAME, ioc_attr->card_type);
2827 }
2828
2829 enum bfa_ioc_state
bfa_ioc_get_state(struct bfa_ioc_s * ioc)2830 bfa_ioc_get_state(struct bfa_ioc_s *ioc)
2831 {
2832 enum bfa_iocpf_state iocpf_st;
2833 enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2834
2835 if (ioc_st == BFA_IOC_ENABLING ||
2836 ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2837
2838 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2839
2840 switch (iocpf_st) {
2841 case BFA_IOCPF_SEMWAIT:
2842 ioc_st = BFA_IOC_SEMWAIT;
2843 break;
2844
2845 case BFA_IOCPF_HWINIT:
2846 ioc_st = BFA_IOC_HWINIT;
2847 break;
2848
2849 case BFA_IOCPF_FWMISMATCH:
2850 ioc_st = BFA_IOC_FWMISMATCH;
2851 break;
2852
2853 case BFA_IOCPF_FAIL:
2854 ioc_st = BFA_IOC_FAIL;
2855 break;
2856
2857 case BFA_IOCPF_INITFAIL:
2858 ioc_st = BFA_IOC_INITFAIL;
2859 break;
2860
2861 default:
2862 break;
2863 }
2864 }
2865
2866 return ioc_st;
2867 }
2868
2869 void
bfa_ioc_get_attr(struct bfa_ioc_s * ioc,struct bfa_ioc_attr_s * ioc_attr)2870 bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
2871 {
2872 memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
2873
2874 ioc_attr->state = bfa_ioc_get_state(ioc);
2875 ioc_attr->port_id = bfa_ioc_portid(ioc);
2876 ioc_attr->port_mode = ioc->port_mode;
2877 ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
2878 ioc_attr->cap_bm = ioc->ad_cap_bm;
2879
2880 ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2881
2882 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2883
2884 ioc_attr->pci_attr.device_id = bfa_ioc_devid(ioc);
2885 ioc_attr->pci_attr.pcifn = bfa_ioc_pcifn(ioc);
2886 ioc_attr->def_fn = (bfa_ioc_pcifn(ioc) == bfa_ioc_portid(ioc));
2887 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2888 }
2889
2890 mac_t
bfa_ioc_get_mac(struct bfa_ioc_s * ioc)2891 bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
2892 {
2893 /*
2894 * Check the IOC type and return the appropriate MAC
2895 */
2896 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
2897 return ioc->attr->fcoe_mac;
2898 else
2899 return ioc->attr->mac;
2900 }
2901
2902 mac_t
bfa_ioc_get_mfg_mac(struct bfa_ioc_s * ioc)2903 bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
2904 {
2905 mac_t m;
2906
2907 m = ioc->attr->mfg_mac;
2908 if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
2909 m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
2910 else
2911 bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
2912 bfa_ioc_pcifn(ioc));
2913
2914 return m;
2915 }
2916
2917 /*
2918 * Send AEN notification
2919 */
2920 void
bfa_ioc_aen_post(struct bfa_ioc_s * ioc,enum bfa_ioc_aen_event event)2921 bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
2922 {
2923 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
2924 struct bfa_aen_entry_s *aen_entry;
2925 enum bfa_ioc_type_e ioc_type;
2926
2927 bfad_get_aen_entry(bfad, aen_entry);
2928 if (!aen_entry)
2929 return;
2930
2931 ioc_type = bfa_ioc_get_type(ioc);
2932 switch (ioc_type) {
2933 case BFA_IOC_TYPE_FC:
2934 aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
2935 break;
2936 case BFA_IOC_TYPE_FCoE:
2937 aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
2938 aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2939 break;
2940 case BFA_IOC_TYPE_LL:
2941 aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2942 break;
2943 default:
2944 WARN_ON(ioc_type != BFA_IOC_TYPE_FC);
2945 break;
2946 }
2947
2948 /* Send the AEN notification */
2949 aen_entry->aen_data.ioc.ioc_type = ioc_type;
2950 bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
2951 BFA_AEN_CAT_IOC, event);
2952 }
2953
2954 /*
2955 * Retrieve saved firmware trace from a prior IOC failure.
2956 */
2957 bfa_status_t
bfa_ioc_debug_fwsave(struct bfa_ioc_s * ioc,void * trcdata,int * trclen)2958 bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2959 {
2960 int tlen;
2961
2962 if (ioc->dbg_fwsave_len == 0)
2963 return BFA_STATUS_ENOFSAVE;
2964
2965 tlen = *trclen;
2966 if (tlen > ioc->dbg_fwsave_len)
2967 tlen = ioc->dbg_fwsave_len;
2968
2969 memcpy(trcdata, ioc->dbg_fwsave, tlen);
2970 *trclen = tlen;
2971 return BFA_STATUS_OK;
2972 }
2973
2974
2975 /*
2976 * Retrieve saved firmware trace from a prior IOC failure.
2977 */
2978 bfa_status_t
bfa_ioc_debug_fwtrc(struct bfa_ioc_s * ioc,void * trcdata,int * trclen)2979 bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2980 {
2981 u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
2982 int tlen;
2983 bfa_status_t status;
2984
2985 bfa_trc(ioc, *trclen);
2986
2987 tlen = *trclen;
2988 if (tlen > BFA_DBG_FWTRC_LEN)
2989 tlen = BFA_DBG_FWTRC_LEN;
2990
2991 status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen);
2992 *trclen = tlen;
2993 return status;
2994 }
2995
2996 static void
bfa_ioc_send_fwsync(struct bfa_ioc_s * ioc)2997 bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
2998 {
2999 struct bfa_mbox_cmd_s cmd;
3000 struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg;
3001
3002 bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
3003 bfa_ioc_portid(ioc));
3004 req->clscode = cpu_to_be16(ioc->clscode);
3005 bfa_ioc_mbox_queue(ioc, &cmd);
3006 }
3007
3008 static void
bfa_ioc_fwsync(struct bfa_ioc_s * ioc)3009 bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
3010 {
3011 u32 fwsync_iter = 1000;
3012
3013 bfa_ioc_send_fwsync(ioc);
3014
3015 /*
3016 * After sending a fw sync mbox command wait for it to
3017 * take effect. We will not wait for a response because
3018 * 1. fw_sync mbox cmd doesn't have a response.
3019 * 2. Even if we implement that, interrupts might not
3020 * be enabled when we call this function.
3021 * So, just keep checking if any mbox cmd is pending, and
3022 * after waiting for a reasonable amount of time, go ahead.
3023 * It is possible that fw has crashed and the mbox command
3024 * is never acknowledged.
3025 */
3026 while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0)
3027 fwsync_iter--;
3028 }
3029
3030 /*
3031 * Dump firmware smem
3032 */
3033 bfa_status_t
bfa_ioc_debug_fwcore(struct bfa_ioc_s * ioc,void * buf,u32 * offset,int * buflen)3034 bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
3035 u32 *offset, int *buflen)
3036 {
3037 u32 loff;
3038 int dlen;
3039 bfa_status_t status;
3040 u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc);
3041
3042 if (*offset >= smem_len) {
3043 *offset = *buflen = 0;
3044 return BFA_STATUS_EINVAL;
3045 }
3046
3047 loff = *offset;
3048 dlen = *buflen;
3049
3050 /*
3051 * First smem read, sync smem before proceeding
3052 * No need to sync before reading every chunk.
3053 */
3054 if (loff == 0)
3055 bfa_ioc_fwsync(ioc);
3056
3057 if ((loff + dlen) >= smem_len)
3058 dlen = smem_len - loff;
3059
3060 status = bfa_ioc_smem_read(ioc, buf, loff, dlen);
3061
3062 if (status != BFA_STATUS_OK) {
3063 *offset = *buflen = 0;
3064 return status;
3065 }
3066
3067 *offset += dlen;
3068
3069 if (*offset >= smem_len)
3070 *offset = 0;
3071
3072 *buflen = dlen;
3073
3074 return status;
3075 }
3076
3077 /*
3078 * Firmware statistics
3079 */
3080 bfa_status_t
bfa_ioc_fw_stats_get(struct bfa_ioc_s * ioc,void * stats)3081 bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats)
3082 {
3083 u32 loff = BFI_IOC_FWSTATS_OFF + \
3084 BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
3085 int tlen;
3086 bfa_status_t status;
3087
3088 if (ioc->stats_busy) {
3089 bfa_trc(ioc, ioc->stats_busy);
3090 return BFA_STATUS_DEVBUSY;
3091 }
3092 ioc->stats_busy = BFA_TRUE;
3093
3094 tlen = sizeof(struct bfa_fw_stats_s);
3095 status = bfa_ioc_smem_read(ioc, stats, loff, tlen);
3096
3097 ioc->stats_busy = BFA_FALSE;
3098 return status;
3099 }
3100
3101 bfa_status_t
bfa_ioc_fw_stats_clear(struct bfa_ioc_s * ioc)3102 bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
3103 {
3104 u32 loff = BFI_IOC_FWSTATS_OFF + \
3105 BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
3106 int tlen;
3107 bfa_status_t status;
3108
3109 if (ioc->stats_busy) {
3110 bfa_trc(ioc, ioc->stats_busy);
3111 return BFA_STATUS_DEVBUSY;
3112 }
3113 ioc->stats_busy = BFA_TRUE;
3114
3115 tlen = sizeof(struct bfa_fw_stats_s);
3116 status = bfa_ioc_smem_clr(ioc, loff, tlen);
3117
3118 ioc->stats_busy = BFA_FALSE;
3119 return status;
3120 }
3121
3122 /*
3123 * Save firmware trace if configured.
3124 */
3125 void
bfa_ioc_debug_save_ftrc(struct bfa_ioc_s * ioc)3126 bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc)
3127 {
3128 int tlen;
3129
3130 if (ioc->dbg_fwsave_once) {
3131 ioc->dbg_fwsave_once = BFA_FALSE;
3132 if (ioc->dbg_fwsave_len) {
3133 tlen = ioc->dbg_fwsave_len;
3134 bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
3135 }
3136 }
3137 }
3138
3139 /*
3140 * Firmware failure detected. Start recovery actions.
3141 */
3142 static void
bfa_ioc_recover(struct bfa_ioc_s * ioc)3143 bfa_ioc_recover(struct bfa_ioc_s *ioc)
3144 {
3145 bfa_ioc_stats(ioc, ioc_hbfails);
3146 ioc->stats.hb_count = ioc->hb_count;
3147 bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
3148 }
3149
3150 /*
3151 * BFA IOC PF private functions
3152 */
3153 static void
bfa_iocpf_timeout(void * ioc_arg)3154 bfa_iocpf_timeout(void *ioc_arg)
3155 {
3156 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
3157
3158 bfa_trc(ioc, 0);
3159 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
3160 }
3161
3162 static void
bfa_iocpf_sem_timeout(void * ioc_arg)3163 bfa_iocpf_sem_timeout(void *ioc_arg)
3164 {
3165 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
3166
3167 bfa_ioc_hw_sem_get(ioc);
3168 }
3169
3170 static void
bfa_ioc_poll_fwinit(struct bfa_ioc_s * ioc)3171 bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc)
3172 {
3173 u32 fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
3174
3175 bfa_trc(ioc, fwstate);
3176
3177 if (fwstate == BFI_IOC_DISABLED) {
3178 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
3179 return;
3180 }
3181
3182 if (ioc->iocpf.poll_time >= (3 * BFA_IOC_TOV))
3183 bfa_iocpf_timeout(ioc);
3184 else {
3185 ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
3186 bfa_iocpf_poll_timer_start(ioc);
3187 }
3188 }
3189
3190 static void
bfa_iocpf_poll_timeout(void * ioc_arg)3191 bfa_iocpf_poll_timeout(void *ioc_arg)
3192 {
3193 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
3194
3195 bfa_ioc_poll_fwinit(ioc);
3196 }
3197
3198 /*
3199 * bfa timer function
3200 */
3201 void
bfa_timer_beat(struct bfa_timer_mod_s * mod)3202 bfa_timer_beat(struct bfa_timer_mod_s *mod)
3203 {
3204 struct list_head *qh = &mod->timer_q;
3205 struct list_head *qe, *qe_next;
3206 struct bfa_timer_s *elem;
3207 struct list_head timedout_q;
3208
3209 INIT_LIST_HEAD(&timedout_q);
3210
3211 qe = bfa_q_next(qh);
3212
3213 while (qe != qh) {
3214 qe_next = bfa_q_next(qe);
3215
3216 elem = (struct bfa_timer_s *) qe;
3217 if (elem->timeout <= BFA_TIMER_FREQ) {
3218 elem->timeout = 0;
3219 list_del(&elem->qe);
3220 list_add_tail(&elem->qe, &timedout_q);
3221 } else {
3222 elem->timeout -= BFA_TIMER_FREQ;
3223 }
3224
3225 qe = qe_next; /* go to next elem */
3226 }
3227
3228 /*
3229 * Pop all the timeout entries
3230 */
3231 while (!list_empty(&timedout_q)) {
3232 bfa_q_deq(&timedout_q, &elem);
3233 elem->timercb(elem->arg);
3234 }
3235 }
3236
3237 /*
3238 * Should be called with lock protection
3239 */
3240 void
bfa_timer_begin(struct bfa_timer_mod_s * mod,struct bfa_timer_s * timer,void (* timercb)(void *),void * arg,unsigned int timeout)3241 bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
3242 void (*timercb) (void *), void *arg, unsigned int timeout)
3243 {
3244
3245 WARN_ON(timercb == NULL);
3246 WARN_ON(bfa_q_is_on_q(&mod->timer_q, timer));
3247
3248 timer->timeout = timeout;
3249 timer->timercb = timercb;
3250 timer->arg = arg;
3251
3252 list_add_tail(&timer->qe, &mod->timer_q);
3253 }
3254
3255 /*
3256 * Should be called with lock protection
3257 */
3258 void
bfa_timer_stop(struct bfa_timer_s * timer)3259 bfa_timer_stop(struct bfa_timer_s *timer)
3260 {
3261 WARN_ON(list_empty(&timer->qe));
3262
3263 list_del(&timer->qe);
3264 }
3265
3266 /*
3267 * ASIC block related
3268 */
3269 static void
bfa_ablk_config_swap(struct bfa_ablk_cfg_s * cfg)3270 bfa_ablk_config_swap(struct bfa_ablk_cfg_s *cfg)
3271 {
3272 struct bfa_ablk_cfg_inst_s *cfg_inst;
3273 int i, j;
3274 u16 be16;
3275
3276 for (i = 0; i < BFA_ABLK_MAX; i++) {
3277 cfg_inst = &cfg->inst[i];
3278 for (j = 0; j < BFA_ABLK_MAX_PFS; j++) {
3279 be16 = cfg_inst->pf_cfg[j].pers;
3280 cfg_inst->pf_cfg[j].pers = be16_to_cpu(be16);
3281 be16 = cfg_inst->pf_cfg[j].num_qpairs;
3282 cfg_inst->pf_cfg[j].num_qpairs = be16_to_cpu(be16);
3283 be16 = cfg_inst->pf_cfg[j].num_vectors;
3284 cfg_inst->pf_cfg[j].num_vectors = be16_to_cpu(be16);
3285 be16 = cfg_inst->pf_cfg[j].bw_min;
3286 cfg_inst->pf_cfg[j].bw_min = be16_to_cpu(be16);
3287 be16 = cfg_inst->pf_cfg[j].bw_max;
3288 cfg_inst->pf_cfg[j].bw_max = be16_to_cpu(be16);
3289 }
3290 }
3291 }
3292
3293 static void
bfa_ablk_isr(void * cbarg,struct bfi_mbmsg_s * msg)3294 bfa_ablk_isr(void *cbarg, struct bfi_mbmsg_s *msg)
3295 {
3296 struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
3297 struct bfi_ablk_i2h_rsp_s *rsp = (struct bfi_ablk_i2h_rsp_s *)msg;
3298 bfa_ablk_cbfn_t cbfn;
3299
3300 WARN_ON(msg->mh.msg_class != BFI_MC_ABLK);
3301 bfa_trc(ablk->ioc, msg->mh.msg_id);
3302
3303 switch (msg->mh.msg_id) {
3304 case BFI_ABLK_I2H_QUERY:
3305 if (rsp->status == BFA_STATUS_OK) {
3306 memcpy(ablk->cfg, ablk->dma_addr.kva,
3307 sizeof(struct bfa_ablk_cfg_s));
3308 bfa_ablk_config_swap(ablk->cfg);
3309 ablk->cfg = NULL;
3310 }
3311 break;
3312
3313 case BFI_ABLK_I2H_ADPT_CONFIG:
3314 case BFI_ABLK_I2H_PORT_CONFIG:
3315 /* update config port mode */
3316 ablk->ioc->port_mode_cfg = rsp->port_mode;
3317
3318 case BFI_ABLK_I2H_PF_DELETE:
3319 case BFI_ABLK_I2H_PF_UPDATE:
3320 case BFI_ABLK_I2H_OPTROM_ENABLE:
3321 case BFI_ABLK_I2H_OPTROM_DISABLE:
3322 /* No-op */
3323 break;
3324
3325 case BFI_ABLK_I2H_PF_CREATE:
3326 *(ablk->pcifn) = rsp->pcifn;
3327 ablk->pcifn = NULL;
3328 break;
3329
3330 default:
3331 WARN_ON(1);
3332 }
3333
3334 ablk->busy = BFA_FALSE;
3335 if (ablk->cbfn) {
3336 cbfn = ablk->cbfn;
3337 ablk->cbfn = NULL;
3338 cbfn(ablk->cbarg, rsp->status);
3339 }
3340 }
3341
3342 static void
bfa_ablk_notify(void * cbarg,enum bfa_ioc_event_e event)3343 bfa_ablk_notify(void *cbarg, enum bfa_ioc_event_e event)
3344 {
3345 struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
3346
3347 bfa_trc(ablk->ioc, event);
3348
3349 switch (event) {
3350 case BFA_IOC_E_ENABLED:
3351 WARN_ON(ablk->busy != BFA_FALSE);
3352 break;
3353
3354 case BFA_IOC_E_DISABLED:
3355 case BFA_IOC_E_FAILED:
3356 /* Fail any pending requests */
3357 ablk->pcifn = NULL;
3358 if (ablk->busy) {
3359 if (ablk->cbfn)
3360 ablk->cbfn(ablk->cbarg, BFA_STATUS_FAILED);
3361 ablk->cbfn = NULL;
3362 ablk->busy = BFA_FALSE;
3363 }
3364 break;
3365
3366 default:
3367 WARN_ON(1);
3368 break;
3369 }
3370 }
3371
3372 u32
bfa_ablk_meminfo(void)3373 bfa_ablk_meminfo(void)
3374 {
3375 return BFA_ROUNDUP(sizeof(struct bfa_ablk_cfg_s), BFA_DMA_ALIGN_SZ);
3376 }
3377
3378 void
bfa_ablk_memclaim(struct bfa_ablk_s * ablk,u8 * dma_kva,u64 dma_pa)3379 bfa_ablk_memclaim(struct bfa_ablk_s *ablk, u8 *dma_kva, u64 dma_pa)
3380 {
3381 ablk->dma_addr.kva = dma_kva;
3382 ablk->dma_addr.pa = dma_pa;
3383 }
3384
3385 void
bfa_ablk_attach(struct bfa_ablk_s * ablk,struct bfa_ioc_s * ioc)3386 bfa_ablk_attach(struct bfa_ablk_s *ablk, struct bfa_ioc_s *ioc)
3387 {
3388 ablk->ioc = ioc;
3389
3390 bfa_ioc_mbox_regisr(ablk->ioc, BFI_MC_ABLK, bfa_ablk_isr, ablk);
3391 bfa_q_qe_init(&ablk->ioc_notify);
3392 bfa_ioc_notify_init(&ablk->ioc_notify, bfa_ablk_notify, ablk);
3393 list_add_tail(&ablk->ioc_notify.qe, &ablk->ioc->notify_q);
3394 }
3395
3396 bfa_status_t
bfa_ablk_query(struct bfa_ablk_s * ablk,struct bfa_ablk_cfg_s * ablk_cfg,bfa_ablk_cbfn_t cbfn,void * cbarg)3397 bfa_ablk_query(struct bfa_ablk_s *ablk, struct bfa_ablk_cfg_s *ablk_cfg,
3398 bfa_ablk_cbfn_t cbfn, void *cbarg)
3399 {
3400 struct bfi_ablk_h2i_query_s *m;
3401
3402 WARN_ON(!ablk_cfg);
3403
3404 if (!bfa_ioc_is_operational(ablk->ioc)) {
3405 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3406 return BFA_STATUS_IOC_FAILURE;
3407 }
3408
3409 if (ablk->busy) {
3410 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3411 return BFA_STATUS_DEVBUSY;
3412 }
3413
3414 ablk->cfg = ablk_cfg;
3415 ablk->cbfn = cbfn;
3416 ablk->cbarg = cbarg;
3417 ablk->busy = BFA_TRUE;
3418
3419 m = (struct bfi_ablk_h2i_query_s *)ablk->mb.msg;
3420 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_QUERY,
3421 bfa_ioc_portid(ablk->ioc));
3422 bfa_dma_be_addr_set(m->addr, ablk->dma_addr.pa);
3423 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3424
3425 return BFA_STATUS_OK;
3426 }
3427
3428 bfa_status_t
bfa_ablk_pf_create(struct bfa_ablk_s * ablk,u16 * pcifn,u8 port,enum bfi_pcifn_class personality,u16 bw_min,u16 bw_max,bfa_ablk_cbfn_t cbfn,void * cbarg)3429 bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
3430 u8 port, enum bfi_pcifn_class personality,
3431 u16 bw_min, u16 bw_max,
3432 bfa_ablk_cbfn_t cbfn, void *cbarg)
3433 {
3434 struct bfi_ablk_h2i_pf_req_s *m;
3435
3436 if (!bfa_ioc_is_operational(ablk->ioc)) {
3437 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3438 return BFA_STATUS_IOC_FAILURE;
3439 }
3440
3441 if (ablk->busy) {
3442 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3443 return BFA_STATUS_DEVBUSY;
3444 }
3445
3446 ablk->pcifn = pcifn;
3447 ablk->cbfn = cbfn;
3448 ablk->cbarg = cbarg;
3449 ablk->busy = BFA_TRUE;
3450
3451 m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3452 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_CREATE,
3453 bfa_ioc_portid(ablk->ioc));
3454 m->pers = cpu_to_be16((u16)personality);
3455 m->bw_min = cpu_to_be16(bw_min);
3456 m->bw_max = cpu_to_be16(bw_max);
3457 m->port = port;
3458 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3459
3460 return BFA_STATUS_OK;
3461 }
3462
3463 bfa_status_t
bfa_ablk_pf_delete(struct bfa_ablk_s * ablk,int pcifn,bfa_ablk_cbfn_t cbfn,void * cbarg)3464 bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn,
3465 bfa_ablk_cbfn_t cbfn, void *cbarg)
3466 {
3467 struct bfi_ablk_h2i_pf_req_s *m;
3468
3469 if (!bfa_ioc_is_operational(ablk->ioc)) {
3470 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3471 return BFA_STATUS_IOC_FAILURE;
3472 }
3473
3474 if (ablk->busy) {
3475 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3476 return BFA_STATUS_DEVBUSY;
3477 }
3478
3479 ablk->cbfn = cbfn;
3480 ablk->cbarg = cbarg;
3481 ablk->busy = BFA_TRUE;
3482
3483 m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3484 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_DELETE,
3485 bfa_ioc_portid(ablk->ioc));
3486 m->pcifn = (u8)pcifn;
3487 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3488
3489 return BFA_STATUS_OK;
3490 }
3491
3492 bfa_status_t
bfa_ablk_adapter_config(struct bfa_ablk_s * ablk,enum bfa_mode_s mode,int max_pf,int max_vf,bfa_ablk_cbfn_t cbfn,void * cbarg)3493 bfa_ablk_adapter_config(struct bfa_ablk_s *ablk, enum bfa_mode_s mode,
3494 int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
3495 {
3496 struct bfi_ablk_h2i_cfg_req_s *m;
3497
3498 if (!bfa_ioc_is_operational(ablk->ioc)) {
3499 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3500 return BFA_STATUS_IOC_FAILURE;
3501 }
3502
3503 if (ablk->busy) {
3504 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3505 return BFA_STATUS_DEVBUSY;
3506 }
3507
3508 ablk->cbfn = cbfn;
3509 ablk->cbarg = cbarg;
3510 ablk->busy = BFA_TRUE;
3511
3512 m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
3513 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_ADPT_CONFIG,
3514 bfa_ioc_portid(ablk->ioc));
3515 m->mode = (u8)mode;
3516 m->max_pf = (u8)max_pf;
3517 m->max_vf = (u8)max_vf;
3518 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3519
3520 return BFA_STATUS_OK;
3521 }
3522
3523 bfa_status_t
bfa_ablk_port_config(struct bfa_ablk_s * ablk,int port,enum bfa_mode_s mode,int max_pf,int max_vf,bfa_ablk_cbfn_t cbfn,void * cbarg)3524 bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port, enum bfa_mode_s mode,
3525 int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
3526 {
3527 struct bfi_ablk_h2i_cfg_req_s *m;
3528
3529 if (!bfa_ioc_is_operational(ablk->ioc)) {
3530 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3531 return BFA_STATUS_IOC_FAILURE;
3532 }
3533
3534 if (ablk->busy) {
3535 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3536 return BFA_STATUS_DEVBUSY;
3537 }
3538
3539 ablk->cbfn = cbfn;
3540 ablk->cbarg = cbarg;
3541 ablk->busy = BFA_TRUE;
3542
3543 m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
3544 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PORT_CONFIG,
3545 bfa_ioc_portid(ablk->ioc));
3546 m->port = (u8)port;
3547 m->mode = (u8)mode;
3548 m->max_pf = (u8)max_pf;
3549 m->max_vf = (u8)max_vf;
3550 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3551
3552 return BFA_STATUS_OK;
3553 }
3554
3555 bfa_status_t
bfa_ablk_pf_update(struct bfa_ablk_s * ablk,int pcifn,u16 bw_min,u16 bw_max,bfa_ablk_cbfn_t cbfn,void * cbarg)3556 bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, u16 bw_min,
3557 u16 bw_max, bfa_ablk_cbfn_t cbfn, void *cbarg)
3558 {
3559 struct bfi_ablk_h2i_pf_req_s *m;
3560
3561 if (!bfa_ioc_is_operational(ablk->ioc)) {
3562 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3563 return BFA_STATUS_IOC_FAILURE;
3564 }
3565
3566 if (ablk->busy) {
3567 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3568 return BFA_STATUS_DEVBUSY;
3569 }
3570
3571 ablk->cbfn = cbfn;
3572 ablk->cbarg = cbarg;
3573 ablk->busy = BFA_TRUE;
3574
3575 m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3576 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_UPDATE,
3577 bfa_ioc_portid(ablk->ioc));
3578 m->pcifn = (u8)pcifn;
3579 m->bw_min = cpu_to_be16(bw_min);
3580 m->bw_max = cpu_to_be16(bw_max);
3581 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3582
3583 return BFA_STATUS_OK;
3584 }
3585
3586 bfa_status_t
bfa_ablk_optrom_en(struct bfa_ablk_s * ablk,bfa_ablk_cbfn_t cbfn,void * cbarg)3587 bfa_ablk_optrom_en(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
3588 {
3589 struct bfi_ablk_h2i_optrom_s *m;
3590
3591 if (!bfa_ioc_is_operational(ablk->ioc)) {
3592 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3593 return BFA_STATUS_IOC_FAILURE;
3594 }
3595
3596 if (ablk->busy) {
3597 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3598 return BFA_STATUS_DEVBUSY;
3599 }
3600
3601 ablk->cbfn = cbfn;
3602 ablk->cbarg = cbarg;
3603 ablk->busy = BFA_TRUE;
3604
3605 m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
3606 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_ENABLE,
3607 bfa_ioc_portid(ablk->ioc));
3608 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3609
3610 return BFA_STATUS_OK;
3611 }
3612
3613 bfa_status_t
bfa_ablk_optrom_dis(struct bfa_ablk_s * ablk,bfa_ablk_cbfn_t cbfn,void * cbarg)3614 bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
3615 {
3616 struct bfi_ablk_h2i_optrom_s *m;
3617
3618 if (!bfa_ioc_is_operational(ablk->ioc)) {
3619 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3620 return BFA_STATUS_IOC_FAILURE;
3621 }
3622
3623 if (ablk->busy) {
3624 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3625 return BFA_STATUS_DEVBUSY;
3626 }
3627
3628 ablk->cbfn = cbfn;
3629 ablk->cbarg = cbarg;
3630 ablk->busy = BFA_TRUE;
3631
3632 m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
3633 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_DISABLE,
3634 bfa_ioc_portid(ablk->ioc));
3635 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3636
3637 return BFA_STATUS_OK;
3638 }
3639
3640 /*
3641 * SFP module specific
3642 */
3643
3644 /* forward declarations */
3645 static void bfa_sfp_getdata_send(struct bfa_sfp_s *sfp);
3646 static void bfa_sfp_media_get(struct bfa_sfp_s *sfp);
3647 static bfa_status_t bfa_sfp_speed_valid(struct bfa_sfp_s *sfp,
3648 enum bfa_port_speed portspeed);
3649
3650 static void
bfa_cb_sfp_show(struct bfa_sfp_s * sfp)3651 bfa_cb_sfp_show(struct bfa_sfp_s *sfp)
3652 {
3653 bfa_trc(sfp, sfp->lock);
3654 if (sfp->cbfn)
3655 sfp->cbfn(sfp->cbarg, sfp->status);
3656 sfp->lock = 0;
3657 sfp->cbfn = NULL;
3658 }
3659
3660 static void
bfa_cb_sfp_state_query(struct bfa_sfp_s * sfp)3661 bfa_cb_sfp_state_query(struct bfa_sfp_s *sfp)
3662 {
3663 bfa_trc(sfp, sfp->portspeed);
3664 if (sfp->media) {
3665 bfa_sfp_media_get(sfp);
3666 if (sfp->state_query_cbfn)
3667 sfp->state_query_cbfn(sfp->state_query_cbarg,
3668 sfp->status);
3669 sfp->media = NULL;
3670 }
3671
3672 if (sfp->portspeed) {
3673 sfp->status = bfa_sfp_speed_valid(sfp, sfp->portspeed);
3674 if (sfp->state_query_cbfn)
3675 sfp->state_query_cbfn(sfp->state_query_cbarg,
3676 sfp->status);
3677 sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
3678 }
3679
3680 sfp->state_query_lock = 0;
3681 sfp->state_query_cbfn = NULL;
3682 }
3683
3684 /*
3685 * IOC event handler.
3686 */
3687 static void
bfa_sfp_notify(void * sfp_arg,enum bfa_ioc_event_e event)3688 bfa_sfp_notify(void *sfp_arg, enum bfa_ioc_event_e event)
3689 {
3690 struct bfa_sfp_s *sfp = sfp_arg;
3691
3692 bfa_trc(sfp, event);
3693 bfa_trc(sfp, sfp->lock);
3694 bfa_trc(sfp, sfp->state_query_lock);
3695
3696 switch (event) {
3697 case BFA_IOC_E_DISABLED:
3698 case BFA_IOC_E_FAILED:
3699 if (sfp->lock) {
3700 sfp->status = BFA_STATUS_IOC_FAILURE;
3701 bfa_cb_sfp_show(sfp);
3702 }
3703
3704 if (sfp->state_query_lock) {
3705 sfp->status = BFA_STATUS_IOC_FAILURE;
3706 bfa_cb_sfp_state_query(sfp);
3707 }
3708 break;
3709
3710 default:
3711 break;
3712 }
3713 }
3714
3715 /*
3716 * SFP's State Change Notification post to AEN
3717 */
3718 static void
bfa_sfp_scn_aen_post(struct bfa_sfp_s * sfp,struct bfi_sfp_scn_s * rsp)3719 bfa_sfp_scn_aen_post(struct bfa_sfp_s *sfp, struct bfi_sfp_scn_s *rsp)
3720 {
3721 struct bfad_s *bfad = (struct bfad_s *)sfp->ioc->bfa->bfad;
3722 struct bfa_aen_entry_s *aen_entry;
3723 enum bfa_port_aen_event aen_evt = 0;
3724
3725 bfa_trc(sfp, (((u64)rsp->pomlvl) << 16) | (((u64)rsp->sfpid) << 8) |
3726 ((u64)rsp->event));
3727
3728 bfad_get_aen_entry(bfad, aen_entry);
3729 if (!aen_entry)
3730 return;
3731
3732 aen_entry->aen_data.port.ioc_type = bfa_ioc_get_type(sfp->ioc);
3733 aen_entry->aen_data.port.pwwn = sfp->ioc->attr->pwwn;
3734 aen_entry->aen_data.port.mac = bfa_ioc_get_mac(sfp->ioc);
3735
3736 switch (rsp->event) {
3737 case BFA_SFP_SCN_INSERTED:
3738 aen_evt = BFA_PORT_AEN_SFP_INSERT;
3739 break;
3740 case BFA_SFP_SCN_REMOVED:
3741 aen_evt = BFA_PORT_AEN_SFP_REMOVE;
3742 break;
3743 case BFA_SFP_SCN_FAILED:
3744 aen_evt = BFA_PORT_AEN_SFP_ACCESS_ERROR;
3745 break;
3746 case BFA_SFP_SCN_UNSUPPORT:
3747 aen_evt = BFA_PORT_AEN_SFP_UNSUPPORT;
3748 break;
3749 case BFA_SFP_SCN_POM:
3750 aen_evt = BFA_PORT_AEN_SFP_POM;
3751 aen_entry->aen_data.port.level = rsp->pomlvl;
3752 break;
3753 default:
3754 bfa_trc(sfp, rsp->event);
3755 WARN_ON(1);
3756 }
3757
3758 /* Send the AEN notification */
3759 bfad_im_post_vendor_event(aen_entry, bfad, ++sfp->ioc->ioc_aen_seq,
3760 BFA_AEN_CAT_PORT, aen_evt);
3761 }
3762
3763 /*
3764 * SFP get data send
3765 */
3766 static void
bfa_sfp_getdata_send(struct bfa_sfp_s * sfp)3767 bfa_sfp_getdata_send(struct bfa_sfp_s *sfp)
3768 {
3769 struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3770
3771 bfa_trc(sfp, req->memtype);
3772
3773 /* build host command */
3774 bfi_h2i_set(req->mh, BFI_MC_SFP, BFI_SFP_H2I_SHOW,
3775 bfa_ioc_portid(sfp->ioc));
3776
3777 /* send mbox cmd */
3778 bfa_ioc_mbox_queue(sfp->ioc, &sfp->mbcmd);
3779 }
3780
3781 /*
3782 * SFP is valid, read sfp data
3783 */
3784 static void
bfa_sfp_getdata(struct bfa_sfp_s * sfp,enum bfi_sfp_mem_e memtype)3785 bfa_sfp_getdata(struct bfa_sfp_s *sfp, enum bfi_sfp_mem_e memtype)
3786 {
3787 struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3788
3789 WARN_ON(sfp->lock != 0);
3790 bfa_trc(sfp, sfp->state);
3791
3792 sfp->lock = 1;
3793 sfp->memtype = memtype;
3794 req->memtype = memtype;
3795
3796 /* Setup SG list */
3797 bfa_alen_set(&req->alen, sizeof(struct sfp_mem_s), sfp->dbuf_pa);
3798
3799 bfa_sfp_getdata_send(sfp);
3800 }
3801
3802 /*
3803 * SFP scn handler
3804 */
3805 static void
bfa_sfp_scn(struct bfa_sfp_s * sfp,struct bfi_mbmsg_s * msg)3806 bfa_sfp_scn(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3807 {
3808 struct bfi_sfp_scn_s *rsp = (struct bfi_sfp_scn_s *) msg;
3809
3810 switch (rsp->event) {
3811 case BFA_SFP_SCN_INSERTED:
3812 sfp->state = BFA_SFP_STATE_INSERTED;
3813 sfp->data_valid = 0;
3814 bfa_sfp_scn_aen_post(sfp, rsp);
3815 break;
3816 case BFA_SFP_SCN_REMOVED:
3817 sfp->state = BFA_SFP_STATE_REMOVED;
3818 sfp->data_valid = 0;
3819 bfa_sfp_scn_aen_post(sfp, rsp);
3820 break;
3821 case BFA_SFP_SCN_FAILED:
3822 sfp->state = BFA_SFP_STATE_FAILED;
3823 sfp->data_valid = 0;
3824 bfa_sfp_scn_aen_post(sfp, rsp);
3825 break;
3826 case BFA_SFP_SCN_UNSUPPORT:
3827 sfp->state = BFA_SFP_STATE_UNSUPPORT;
3828 bfa_sfp_scn_aen_post(sfp, rsp);
3829 if (!sfp->lock)
3830 bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3831 break;
3832 case BFA_SFP_SCN_POM:
3833 bfa_sfp_scn_aen_post(sfp, rsp);
3834 break;
3835 case BFA_SFP_SCN_VALID:
3836 sfp->state = BFA_SFP_STATE_VALID;
3837 if (!sfp->lock)
3838 bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3839 break;
3840 default:
3841 bfa_trc(sfp, rsp->event);
3842 WARN_ON(1);
3843 }
3844 }
3845
3846 /*
3847 * SFP show complete
3848 */
3849 static void
bfa_sfp_show_comp(struct bfa_sfp_s * sfp,struct bfi_mbmsg_s * msg)3850 bfa_sfp_show_comp(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3851 {
3852 struct bfi_sfp_rsp_s *rsp = (struct bfi_sfp_rsp_s *) msg;
3853
3854 if (!sfp->lock) {
3855 /*
3856 * receiving response after ioc failure
3857 */
3858 bfa_trc(sfp, sfp->lock);
3859 return;
3860 }
3861
3862 bfa_trc(sfp, rsp->status);
3863 if (rsp->status == BFA_STATUS_OK) {
3864 sfp->data_valid = 1;
3865 if (sfp->state == BFA_SFP_STATE_VALID)
3866 sfp->status = BFA_STATUS_OK;
3867 else if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
3868 sfp->status = BFA_STATUS_SFP_UNSUPP;
3869 else
3870 bfa_trc(sfp, sfp->state);
3871 } else {
3872 sfp->data_valid = 0;
3873 sfp->status = rsp->status;
3874 /* sfpshow shouldn't change sfp state */
3875 }
3876
3877 bfa_trc(sfp, sfp->memtype);
3878 if (sfp->memtype == BFI_SFP_MEM_DIAGEXT) {
3879 bfa_trc(sfp, sfp->data_valid);
3880 if (sfp->data_valid) {
3881 u32 size = sizeof(struct sfp_mem_s);
3882 u8 *des = (u8 *)(sfp->sfpmem);
3883 memcpy(des, sfp->dbuf_kva, size);
3884 }
3885 /*
3886 * Queue completion callback.
3887 */
3888 bfa_cb_sfp_show(sfp);
3889 } else
3890 sfp->lock = 0;
3891
3892 bfa_trc(sfp, sfp->state_query_lock);
3893 if (sfp->state_query_lock) {
3894 sfp->state = rsp->state;
3895 /* Complete callback */
3896 bfa_cb_sfp_state_query(sfp);
3897 }
3898 }
3899
3900 /*
3901 * SFP query fw sfp state
3902 */
3903 static void
bfa_sfp_state_query(struct bfa_sfp_s * sfp)3904 bfa_sfp_state_query(struct bfa_sfp_s *sfp)
3905 {
3906 struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3907
3908 /* Should not be doing query if not in _INIT state */
3909 WARN_ON(sfp->state != BFA_SFP_STATE_INIT);
3910 WARN_ON(sfp->state_query_lock != 0);
3911 bfa_trc(sfp, sfp->state);
3912
3913 sfp->state_query_lock = 1;
3914 req->memtype = 0;
3915
3916 if (!sfp->lock)
3917 bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3918 }
3919
3920 static void
bfa_sfp_media_get(struct bfa_sfp_s * sfp)3921 bfa_sfp_media_get(struct bfa_sfp_s *sfp)
3922 {
3923 enum bfa_defs_sfp_media_e *media = sfp->media;
3924
3925 *media = BFA_SFP_MEDIA_UNKNOWN;
3926
3927 if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
3928 *media = BFA_SFP_MEDIA_UNSUPPORT;
3929 else if (sfp->state == BFA_SFP_STATE_VALID) {
3930 union sfp_xcvr_e10g_code_u e10g;
3931 struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
3932 u16 xmtr_tech = (sfpmem->srlid_base.xcvr[4] & 0x3) << 7 |
3933 (sfpmem->srlid_base.xcvr[5] >> 1);
3934
3935 e10g.b = sfpmem->srlid_base.xcvr[0];
3936 bfa_trc(sfp, e10g.b);
3937 bfa_trc(sfp, xmtr_tech);
3938 /* check fc transmitter tech */
3939 if ((xmtr_tech & SFP_XMTR_TECH_CU) ||
3940 (xmtr_tech & SFP_XMTR_TECH_CP) ||
3941 (xmtr_tech & SFP_XMTR_TECH_CA))
3942 *media = BFA_SFP_MEDIA_CU;
3943 else if ((xmtr_tech & SFP_XMTR_TECH_EL_INTRA) ||
3944 (xmtr_tech & SFP_XMTR_TECH_EL_INTER))
3945 *media = BFA_SFP_MEDIA_EL;
3946 else if ((xmtr_tech & SFP_XMTR_TECH_LL) ||
3947 (xmtr_tech & SFP_XMTR_TECH_LC))
3948 *media = BFA_SFP_MEDIA_LW;
3949 else if ((xmtr_tech & SFP_XMTR_TECH_SL) ||
3950 (xmtr_tech & SFP_XMTR_TECH_SN) ||
3951 (xmtr_tech & SFP_XMTR_TECH_SA))
3952 *media = BFA_SFP_MEDIA_SW;
3953 /* Check 10G Ethernet Compilance code */
3954 else if (e10g.r.e10g_sr)
3955 *media = BFA_SFP_MEDIA_SW;
3956 else if (e10g.r.e10g_lrm && e10g.r.e10g_lr)
3957 *media = BFA_SFP_MEDIA_LW;
3958 else if (e10g.r.e10g_unall)
3959 *media = BFA_SFP_MEDIA_UNKNOWN;
3960 else
3961 bfa_trc(sfp, 0);
3962 } else
3963 bfa_trc(sfp, sfp->state);
3964 }
3965
3966 static bfa_status_t
bfa_sfp_speed_valid(struct bfa_sfp_s * sfp,enum bfa_port_speed portspeed)3967 bfa_sfp_speed_valid(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed)
3968 {
3969 struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
3970 struct sfp_xcvr_s *xcvr = (struct sfp_xcvr_s *) sfpmem->srlid_base.xcvr;
3971 union sfp_xcvr_fc3_code_u fc3 = xcvr->fc3;
3972 union sfp_xcvr_e10g_code_u e10g = xcvr->e10g;
3973
3974 if (portspeed == BFA_PORT_SPEED_10GBPS) {
3975 if (e10g.r.e10g_sr || e10g.r.e10g_lr)
3976 return BFA_STATUS_OK;
3977 else {
3978 bfa_trc(sfp, e10g.b);
3979 return BFA_STATUS_UNSUPP_SPEED;
3980 }
3981 }
3982 if (((portspeed & BFA_PORT_SPEED_16GBPS) && fc3.r.mb1600) ||
3983 ((portspeed & BFA_PORT_SPEED_8GBPS) && fc3.r.mb800) ||
3984 ((portspeed & BFA_PORT_SPEED_4GBPS) && fc3.r.mb400) ||
3985 ((portspeed & BFA_PORT_SPEED_2GBPS) && fc3.r.mb200) ||
3986 ((portspeed & BFA_PORT_SPEED_1GBPS) && fc3.r.mb100))
3987 return BFA_STATUS_OK;
3988 else {
3989 bfa_trc(sfp, portspeed);
3990 bfa_trc(sfp, fc3.b);
3991 bfa_trc(sfp, e10g.b);
3992 return BFA_STATUS_UNSUPP_SPEED;
3993 }
3994 }
3995
3996 /*
3997 * SFP hmbox handler
3998 */
3999 void
bfa_sfp_intr(void * sfparg,struct bfi_mbmsg_s * msg)4000 bfa_sfp_intr(void *sfparg, struct bfi_mbmsg_s *msg)
4001 {
4002 struct bfa_sfp_s *sfp = sfparg;
4003
4004 switch (msg->mh.msg_id) {
4005 case BFI_SFP_I2H_SHOW:
4006 bfa_sfp_show_comp(sfp, msg);
4007 break;
4008
4009 case BFI_SFP_I2H_SCN:
4010 bfa_sfp_scn(sfp, msg);
4011 break;
4012
4013 default:
4014 bfa_trc(sfp, msg->mh.msg_id);
4015 WARN_ON(1);
4016 }
4017 }
4018
4019 /*
4020 * Return DMA memory needed by sfp module.
4021 */
4022 u32
bfa_sfp_meminfo(void)4023 bfa_sfp_meminfo(void)
4024 {
4025 return BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
4026 }
4027
4028 /*
4029 * Attach virtual and physical memory for SFP.
4030 */
4031 void
bfa_sfp_attach(struct bfa_sfp_s * sfp,struct bfa_ioc_s * ioc,void * dev,struct bfa_trc_mod_s * trcmod)4032 bfa_sfp_attach(struct bfa_sfp_s *sfp, struct bfa_ioc_s *ioc, void *dev,
4033 struct bfa_trc_mod_s *trcmod)
4034 {
4035 sfp->dev = dev;
4036 sfp->ioc = ioc;
4037 sfp->trcmod = trcmod;
4038
4039 sfp->cbfn = NULL;
4040 sfp->cbarg = NULL;
4041 sfp->sfpmem = NULL;
4042 sfp->lock = 0;
4043 sfp->data_valid = 0;
4044 sfp->state = BFA_SFP_STATE_INIT;
4045 sfp->state_query_lock = 0;
4046 sfp->state_query_cbfn = NULL;
4047 sfp->state_query_cbarg = NULL;
4048 sfp->media = NULL;
4049 sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
4050 sfp->is_elb = BFA_FALSE;
4051
4052 bfa_ioc_mbox_regisr(sfp->ioc, BFI_MC_SFP, bfa_sfp_intr, sfp);
4053 bfa_q_qe_init(&sfp->ioc_notify);
4054 bfa_ioc_notify_init(&sfp->ioc_notify, bfa_sfp_notify, sfp);
4055 list_add_tail(&sfp->ioc_notify.qe, &sfp->ioc->notify_q);
4056 }
4057
4058 /*
4059 * Claim Memory for SFP
4060 */
4061 void
bfa_sfp_memclaim(struct bfa_sfp_s * sfp,u8 * dm_kva,u64 dm_pa)4062 bfa_sfp_memclaim(struct bfa_sfp_s *sfp, u8 *dm_kva, u64 dm_pa)
4063 {
4064 sfp->dbuf_kva = dm_kva;
4065 sfp->dbuf_pa = dm_pa;
4066 memset(sfp->dbuf_kva, 0, sizeof(struct sfp_mem_s));
4067
4068 dm_kva += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
4069 dm_pa += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
4070 }
4071
4072 /*
4073 * Show SFP eeprom content
4074 *
4075 * @param[in] sfp - bfa sfp module
4076 *
4077 * @param[out] sfpmem - sfp eeprom data
4078 *
4079 */
4080 bfa_status_t
bfa_sfp_show(struct bfa_sfp_s * sfp,struct sfp_mem_s * sfpmem,bfa_cb_sfp_t cbfn,void * cbarg)4081 bfa_sfp_show(struct bfa_sfp_s *sfp, struct sfp_mem_s *sfpmem,
4082 bfa_cb_sfp_t cbfn, void *cbarg)
4083 {
4084
4085 if (!bfa_ioc_is_operational(sfp->ioc)) {
4086 bfa_trc(sfp, 0);
4087 return BFA_STATUS_IOC_NON_OP;
4088 }
4089
4090 if (sfp->lock) {
4091 bfa_trc(sfp, 0);
4092 return BFA_STATUS_DEVBUSY;
4093 }
4094
4095 sfp->cbfn = cbfn;
4096 sfp->cbarg = cbarg;
4097 sfp->sfpmem = sfpmem;
4098
4099 bfa_sfp_getdata(sfp, BFI_SFP_MEM_DIAGEXT);
4100 return BFA_STATUS_OK;
4101 }
4102
4103 /*
4104 * Return SFP Media type
4105 *
4106 * @param[in] sfp - bfa sfp module
4107 *
4108 * @param[out] media - port speed from user
4109 *
4110 */
4111 bfa_status_t
bfa_sfp_media(struct bfa_sfp_s * sfp,enum bfa_defs_sfp_media_e * media,bfa_cb_sfp_t cbfn,void * cbarg)4112 bfa_sfp_media(struct bfa_sfp_s *sfp, enum bfa_defs_sfp_media_e *media,
4113 bfa_cb_sfp_t cbfn, void *cbarg)
4114 {
4115 if (!bfa_ioc_is_operational(sfp->ioc)) {
4116 bfa_trc(sfp, 0);
4117 return BFA_STATUS_IOC_NON_OP;
4118 }
4119
4120 sfp->media = media;
4121 if (sfp->state == BFA_SFP_STATE_INIT) {
4122 if (sfp->state_query_lock) {
4123 bfa_trc(sfp, 0);
4124 return BFA_STATUS_DEVBUSY;
4125 } else {
4126 sfp->state_query_cbfn = cbfn;
4127 sfp->state_query_cbarg = cbarg;
4128 bfa_sfp_state_query(sfp);
4129 return BFA_STATUS_SFP_NOT_READY;
4130 }
4131 }
4132
4133 bfa_sfp_media_get(sfp);
4134 return BFA_STATUS_OK;
4135 }
4136
4137 /*
4138 * Check if user set port speed is allowed by the SFP
4139 *
4140 * @param[in] sfp - bfa sfp module
4141 * @param[in] portspeed - port speed from user
4142 *
4143 */
4144 bfa_status_t
bfa_sfp_speed(struct bfa_sfp_s * sfp,enum bfa_port_speed portspeed,bfa_cb_sfp_t cbfn,void * cbarg)4145 bfa_sfp_speed(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed,
4146 bfa_cb_sfp_t cbfn, void *cbarg)
4147 {
4148 WARN_ON(portspeed == BFA_PORT_SPEED_UNKNOWN);
4149
4150 if (!bfa_ioc_is_operational(sfp->ioc))
4151 return BFA_STATUS_IOC_NON_OP;
4152
4153 /* For Mezz card, all speed is allowed */
4154 if (bfa_mfg_is_mezz(sfp->ioc->attr->card_type))
4155 return BFA_STATUS_OK;
4156
4157 /* Check SFP state */
4158 sfp->portspeed = portspeed;
4159 if (sfp->state == BFA_SFP_STATE_INIT) {
4160 if (sfp->state_query_lock) {
4161 bfa_trc(sfp, 0);
4162 return BFA_STATUS_DEVBUSY;
4163 } else {
4164 sfp->state_query_cbfn = cbfn;
4165 sfp->state_query_cbarg = cbarg;
4166 bfa_sfp_state_query(sfp);
4167 return BFA_STATUS_SFP_NOT_READY;
4168 }
4169 }
4170
4171 if (sfp->state == BFA_SFP_STATE_REMOVED ||
4172 sfp->state == BFA_SFP_STATE_FAILED) {
4173 bfa_trc(sfp, sfp->state);
4174 return BFA_STATUS_NO_SFP_DEV;
4175 }
4176
4177 if (sfp->state == BFA_SFP_STATE_INSERTED) {
4178 bfa_trc(sfp, sfp->state);
4179 return BFA_STATUS_DEVBUSY; /* sfp is reading data */
4180 }
4181
4182 /* For eloopback, all speed is allowed */
4183 if (sfp->is_elb)
4184 return BFA_STATUS_OK;
4185
4186 return bfa_sfp_speed_valid(sfp, portspeed);
4187 }
4188
4189 /*
4190 * Flash module specific
4191 */
4192
4193 /*
4194 * FLASH DMA buffer should be big enough to hold both MFG block and
4195 * asic block(64k) at the same time and also should be 2k aligned to
4196 * avoid write segement to cross sector boundary.
4197 */
4198 #define BFA_FLASH_SEG_SZ 2048
4199 #define BFA_FLASH_DMA_BUF_SZ \
4200 BFA_ROUNDUP(0x010000 + sizeof(struct bfa_mfg_block_s), BFA_FLASH_SEG_SZ)
4201
4202 static void
bfa_flash_aen_audit_post(struct bfa_ioc_s * ioc,enum bfa_audit_aen_event event,int inst,int type)4203 bfa_flash_aen_audit_post(struct bfa_ioc_s *ioc, enum bfa_audit_aen_event event,
4204 int inst, int type)
4205 {
4206 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
4207 struct bfa_aen_entry_s *aen_entry;
4208
4209 bfad_get_aen_entry(bfad, aen_entry);
4210 if (!aen_entry)
4211 return;
4212
4213 aen_entry->aen_data.audit.pwwn = ioc->attr->pwwn;
4214 aen_entry->aen_data.audit.partition_inst = inst;
4215 aen_entry->aen_data.audit.partition_type = type;
4216
4217 /* Send the AEN notification */
4218 bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
4219 BFA_AEN_CAT_AUDIT, event);
4220 }
4221
4222 static void
bfa_flash_cb(struct bfa_flash_s * flash)4223 bfa_flash_cb(struct bfa_flash_s *flash)
4224 {
4225 flash->op_busy = 0;
4226 if (flash->cbfn)
4227 flash->cbfn(flash->cbarg, flash->status);
4228 }
4229
4230 static void
bfa_flash_notify(void * cbarg,enum bfa_ioc_event_e event)4231 bfa_flash_notify(void *cbarg, enum bfa_ioc_event_e event)
4232 {
4233 struct bfa_flash_s *flash = cbarg;
4234
4235 bfa_trc(flash, event);
4236 switch (event) {
4237 case BFA_IOC_E_DISABLED:
4238 case BFA_IOC_E_FAILED:
4239 if (flash->op_busy) {
4240 flash->status = BFA_STATUS_IOC_FAILURE;
4241 flash->cbfn(flash->cbarg, flash->status);
4242 flash->op_busy = 0;
4243 }
4244 break;
4245
4246 default:
4247 break;
4248 }
4249 }
4250
4251 /*
4252 * Send flash attribute query request.
4253 *
4254 * @param[in] cbarg - callback argument
4255 */
4256 static void
bfa_flash_query_send(void * cbarg)4257 bfa_flash_query_send(void *cbarg)
4258 {
4259 struct bfa_flash_s *flash = cbarg;
4260 struct bfi_flash_query_req_s *msg =
4261 (struct bfi_flash_query_req_s *) flash->mb.msg;
4262
4263 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
4264 bfa_ioc_portid(flash->ioc));
4265 bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr_s),
4266 flash->dbuf_pa);
4267 bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4268 }
4269
4270 /*
4271 * Send flash write request.
4272 *
4273 * @param[in] cbarg - callback argument
4274 */
4275 static void
bfa_flash_write_send(struct bfa_flash_s * flash)4276 bfa_flash_write_send(struct bfa_flash_s *flash)
4277 {
4278 struct bfi_flash_write_req_s *msg =
4279 (struct bfi_flash_write_req_s *) flash->mb.msg;
4280 u32 len;
4281
4282 msg->type = be32_to_cpu(flash->type);
4283 msg->instance = flash->instance;
4284 msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
4285 len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
4286 flash->residue : BFA_FLASH_DMA_BUF_SZ;
4287 msg->length = be32_to_cpu(len);
4288
4289 /* indicate if it's the last msg of the whole write operation */
4290 msg->last = (len == flash->residue) ? 1 : 0;
4291
4292 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
4293 bfa_ioc_portid(flash->ioc));
4294 bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
4295 memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
4296 bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4297
4298 flash->residue -= len;
4299 flash->offset += len;
4300 }
4301
4302 /*
4303 * Send flash read request.
4304 *
4305 * @param[in] cbarg - callback argument
4306 */
4307 static void
bfa_flash_read_send(void * cbarg)4308 bfa_flash_read_send(void *cbarg)
4309 {
4310 struct bfa_flash_s *flash = cbarg;
4311 struct bfi_flash_read_req_s *msg =
4312 (struct bfi_flash_read_req_s *) flash->mb.msg;
4313 u32 len;
4314
4315 msg->type = be32_to_cpu(flash->type);
4316 msg->instance = flash->instance;
4317 msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
4318 len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
4319 flash->residue : BFA_FLASH_DMA_BUF_SZ;
4320 msg->length = be32_to_cpu(len);
4321 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
4322 bfa_ioc_portid(flash->ioc));
4323 bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
4324 bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4325 }
4326
4327 /*
4328 * Send flash erase request.
4329 *
4330 * @param[in] cbarg - callback argument
4331 */
4332 static void
bfa_flash_erase_send(void * cbarg)4333 bfa_flash_erase_send(void *cbarg)
4334 {
4335 struct bfa_flash_s *flash = cbarg;
4336 struct bfi_flash_erase_req_s *msg =
4337 (struct bfi_flash_erase_req_s *) flash->mb.msg;
4338
4339 msg->type = be32_to_cpu(flash->type);
4340 msg->instance = flash->instance;
4341 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_ERASE_REQ,
4342 bfa_ioc_portid(flash->ioc));
4343 bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4344 }
4345
4346 /*
4347 * Process flash response messages upon receiving interrupts.
4348 *
4349 * @param[in] flasharg - flash structure
4350 * @param[in] msg - message structure
4351 */
4352 static void
bfa_flash_intr(void * flasharg,struct bfi_mbmsg_s * msg)4353 bfa_flash_intr(void *flasharg, struct bfi_mbmsg_s *msg)
4354 {
4355 struct bfa_flash_s *flash = flasharg;
4356 u32 status;
4357
4358 union {
4359 struct bfi_flash_query_rsp_s *query;
4360 struct bfi_flash_erase_rsp_s *erase;
4361 struct bfi_flash_write_rsp_s *write;
4362 struct bfi_flash_read_rsp_s *read;
4363 struct bfi_flash_event_s *event;
4364 struct bfi_mbmsg_s *msg;
4365 } m;
4366
4367 m.msg = msg;
4368 bfa_trc(flash, msg->mh.msg_id);
4369
4370 if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT) {
4371 /* receiving response after ioc failure */
4372 bfa_trc(flash, 0x9999);
4373 return;
4374 }
4375
4376 switch (msg->mh.msg_id) {
4377 case BFI_FLASH_I2H_QUERY_RSP:
4378 status = be32_to_cpu(m.query->status);
4379 bfa_trc(flash, status);
4380 if (status == BFA_STATUS_OK) {
4381 u32 i;
4382 struct bfa_flash_attr_s *attr, *f;
4383
4384 attr = (struct bfa_flash_attr_s *) flash->ubuf;
4385 f = (struct bfa_flash_attr_s *) flash->dbuf_kva;
4386 attr->status = be32_to_cpu(f->status);
4387 attr->npart = be32_to_cpu(f->npart);
4388 bfa_trc(flash, attr->status);
4389 bfa_trc(flash, attr->npart);
4390 for (i = 0; i < attr->npart; i++) {
4391 attr->part[i].part_type =
4392 be32_to_cpu(f->part[i].part_type);
4393 attr->part[i].part_instance =
4394 be32_to_cpu(f->part[i].part_instance);
4395 attr->part[i].part_off =
4396 be32_to_cpu(f->part[i].part_off);
4397 attr->part[i].part_size =
4398 be32_to_cpu(f->part[i].part_size);
4399 attr->part[i].part_len =
4400 be32_to_cpu(f->part[i].part_len);
4401 attr->part[i].part_status =
4402 be32_to_cpu(f->part[i].part_status);
4403 }
4404 }
4405 flash->status = status;
4406 bfa_flash_cb(flash);
4407 break;
4408 case BFI_FLASH_I2H_ERASE_RSP:
4409 status = be32_to_cpu(m.erase->status);
4410 bfa_trc(flash, status);
4411 flash->status = status;
4412 bfa_flash_cb(flash);
4413 break;
4414 case BFI_FLASH_I2H_WRITE_RSP:
4415 status = be32_to_cpu(m.write->status);
4416 bfa_trc(flash, status);
4417 if (status != BFA_STATUS_OK || flash->residue == 0) {
4418 flash->status = status;
4419 bfa_flash_cb(flash);
4420 } else {
4421 bfa_trc(flash, flash->offset);
4422 bfa_flash_write_send(flash);
4423 }
4424 break;
4425 case BFI_FLASH_I2H_READ_RSP:
4426 status = be32_to_cpu(m.read->status);
4427 bfa_trc(flash, status);
4428 if (status != BFA_STATUS_OK) {
4429 flash->status = status;
4430 bfa_flash_cb(flash);
4431 } else {
4432 u32 len = be32_to_cpu(m.read->length);
4433 bfa_trc(flash, flash->offset);
4434 bfa_trc(flash, len);
4435 memcpy(flash->ubuf + flash->offset,
4436 flash->dbuf_kva, len);
4437 flash->residue -= len;
4438 flash->offset += len;
4439 if (flash->residue == 0) {
4440 flash->status = status;
4441 bfa_flash_cb(flash);
4442 } else
4443 bfa_flash_read_send(flash);
4444 }
4445 break;
4446 case BFI_FLASH_I2H_BOOT_VER_RSP:
4447 break;
4448 case BFI_FLASH_I2H_EVENT:
4449 status = be32_to_cpu(m.event->status);
4450 bfa_trc(flash, status);
4451 if (status == BFA_STATUS_BAD_FWCFG)
4452 bfa_ioc_aen_post(flash->ioc, BFA_IOC_AEN_FWCFG_ERROR);
4453 else if (status == BFA_STATUS_INVALID_VENDOR) {
4454 u32 param;
4455 param = be32_to_cpu(m.event->param);
4456 bfa_trc(flash, param);
4457 bfa_ioc_aen_post(flash->ioc,
4458 BFA_IOC_AEN_INVALID_VENDOR);
4459 }
4460 break;
4461
4462 default:
4463 WARN_ON(1);
4464 }
4465 }
4466
4467 /*
4468 * Flash memory info API.
4469 *
4470 * @param[in] mincfg - minimal cfg variable
4471 */
4472 u32
bfa_flash_meminfo(bfa_boolean_t mincfg)4473 bfa_flash_meminfo(bfa_boolean_t mincfg)
4474 {
4475 /* min driver doesn't need flash */
4476 if (mincfg)
4477 return 0;
4478 return BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4479 }
4480
4481 /*
4482 * Flash attach API.
4483 *
4484 * @param[in] flash - flash structure
4485 * @param[in] ioc - ioc structure
4486 * @param[in] dev - device structure
4487 * @param[in] trcmod - trace module
4488 * @param[in] logmod - log module
4489 */
4490 void
bfa_flash_attach(struct bfa_flash_s * flash,struct bfa_ioc_s * ioc,void * dev,struct bfa_trc_mod_s * trcmod,bfa_boolean_t mincfg)4491 bfa_flash_attach(struct bfa_flash_s *flash, struct bfa_ioc_s *ioc, void *dev,
4492 struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
4493 {
4494 flash->ioc = ioc;
4495 flash->trcmod = trcmod;
4496 flash->cbfn = NULL;
4497 flash->cbarg = NULL;
4498 flash->op_busy = 0;
4499
4500 bfa_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
4501 bfa_q_qe_init(&flash->ioc_notify);
4502 bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
4503 list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
4504
4505 /* min driver doesn't need flash */
4506 if (mincfg) {
4507 flash->dbuf_kva = NULL;
4508 flash->dbuf_pa = 0;
4509 }
4510 }
4511
4512 /*
4513 * Claim memory for flash
4514 *
4515 * @param[in] flash - flash structure
4516 * @param[in] dm_kva - pointer to virtual memory address
4517 * @param[in] dm_pa - physical memory address
4518 * @param[in] mincfg - minimal cfg variable
4519 */
4520 void
bfa_flash_memclaim(struct bfa_flash_s * flash,u8 * dm_kva,u64 dm_pa,bfa_boolean_t mincfg)4521 bfa_flash_memclaim(struct bfa_flash_s *flash, u8 *dm_kva, u64 dm_pa,
4522 bfa_boolean_t mincfg)
4523 {
4524 if (mincfg)
4525 return;
4526
4527 flash->dbuf_kva = dm_kva;
4528 flash->dbuf_pa = dm_pa;
4529 memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
4530 dm_kva += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4531 dm_pa += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4532 }
4533
4534 /*
4535 * Get flash attribute.
4536 *
4537 * @param[in] flash - flash structure
4538 * @param[in] attr - flash attribute structure
4539 * @param[in] cbfn - callback function
4540 * @param[in] cbarg - callback argument
4541 *
4542 * Return status.
4543 */
4544 bfa_status_t
bfa_flash_get_attr(struct bfa_flash_s * flash,struct bfa_flash_attr_s * attr,bfa_cb_flash_t cbfn,void * cbarg)4545 bfa_flash_get_attr(struct bfa_flash_s *flash, struct bfa_flash_attr_s *attr,
4546 bfa_cb_flash_t cbfn, void *cbarg)
4547 {
4548 bfa_trc(flash, BFI_FLASH_H2I_QUERY_REQ);
4549
4550 if (!bfa_ioc_is_operational(flash->ioc))
4551 return BFA_STATUS_IOC_NON_OP;
4552
4553 if (flash->op_busy) {
4554 bfa_trc(flash, flash->op_busy);
4555 return BFA_STATUS_DEVBUSY;
4556 }
4557
4558 flash->op_busy = 1;
4559 flash->cbfn = cbfn;
4560 flash->cbarg = cbarg;
4561 flash->ubuf = (u8 *) attr;
4562 bfa_flash_query_send(flash);
4563
4564 return BFA_STATUS_OK;
4565 }
4566
4567 /*
4568 * Erase flash partition.
4569 *
4570 * @param[in] flash - flash structure
4571 * @param[in] type - flash partition type
4572 * @param[in] instance - flash partition instance
4573 * @param[in] cbfn - callback function
4574 * @param[in] cbarg - callback argument
4575 *
4576 * Return status.
4577 */
4578 bfa_status_t
bfa_flash_erase_part(struct bfa_flash_s * flash,enum bfa_flash_part_type type,u8 instance,bfa_cb_flash_t cbfn,void * cbarg)4579 bfa_flash_erase_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4580 u8 instance, bfa_cb_flash_t cbfn, void *cbarg)
4581 {
4582 bfa_trc(flash, BFI_FLASH_H2I_ERASE_REQ);
4583 bfa_trc(flash, type);
4584 bfa_trc(flash, instance);
4585
4586 if (!bfa_ioc_is_operational(flash->ioc))
4587 return BFA_STATUS_IOC_NON_OP;
4588
4589 if (flash->op_busy) {
4590 bfa_trc(flash, flash->op_busy);
4591 return BFA_STATUS_DEVBUSY;
4592 }
4593
4594 flash->op_busy = 1;
4595 flash->cbfn = cbfn;
4596 flash->cbarg = cbarg;
4597 flash->type = type;
4598 flash->instance = instance;
4599
4600 bfa_flash_erase_send(flash);
4601 bfa_flash_aen_audit_post(flash->ioc, BFA_AUDIT_AEN_FLASH_ERASE,
4602 instance, type);
4603 return BFA_STATUS_OK;
4604 }
4605
4606 /*
4607 * Update flash partition.
4608 *
4609 * @param[in] flash - flash structure
4610 * @param[in] type - flash partition type
4611 * @param[in] instance - flash partition instance
4612 * @param[in] buf - update data buffer
4613 * @param[in] len - data buffer length
4614 * @param[in] offset - offset relative to the partition starting address
4615 * @param[in] cbfn - callback function
4616 * @param[in] cbarg - callback argument
4617 *
4618 * Return status.
4619 */
4620 bfa_status_t
bfa_flash_update_part(struct bfa_flash_s * flash,enum bfa_flash_part_type type,u8 instance,void * buf,u32 len,u32 offset,bfa_cb_flash_t cbfn,void * cbarg)4621 bfa_flash_update_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4622 u8 instance, void *buf, u32 len, u32 offset,
4623 bfa_cb_flash_t cbfn, void *cbarg)
4624 {
4625 bfa_trc(flash, BFI_FLASH_H2I_WRITE_REQ);
4626 bfa_trc(flash, type);
4627 bfa_trc(flash, instance);
4628 bfa_trc(flash, len);
4629 bfa_trc(flash, offset);
4630
4631 if (!bfa_ioc_is_operational(flash->ioc))
4632 return BFA_STATUS_IOC_NON_OP;
4633
4634 /*
4635 * 'len' must be in word (4-byte) boundary
4636 * 'offset' must be in sector (16kb) boundary
4637 */
4638 if (!len || (len & 0x03) || (offset & 0x00003FFF))
4639 return BFA_STATUS_FLASH_BAD_LEN;
4640
4641 if (type == BFA_FLASH_PART_MFG)
4642 return BFA_STATUS_EINVAL;
4643
4644 if (flash->op_busy) {
4645 bfa_trc(flash, flash->op_busy);
4646 return BFA_STATUS_DEVBUSY;
4647 }
4648
4649 flash->op_busy = 1;
4650 flash->cbfn = cbfn;
4651 flash->cbarg = cbarg;
4652 flash->type = type;
4653 flash->instance = instance;
4654 flash->residue = len;
4655 flash->offset = 0;
4656 flash->addr_off = offset;
4657 flash->ubuf = buf;
4658
4659 bfa_flash_write_send(flash);
4660 return BFA_STATUS_OK;
4661 }
4662
4663 /*
4664 * Read flash partition.
4665 *
4666 * @param[in] flash - flash structure
4667 * @param[in] type - flash partition type
4668 * @param[in] instance - flash partition instance
4669 * @param[in] buf - read data buffer
4670 * @param[in] len - data buffer length
4671 * @param[in] offset - offset relative to the partition starting address
4672 * @param[in] cbfn - callback function
4673 * @param[in] cbarg - callback argument
4674 *
4675 * Return status.
4676 */
4677 bfa_status_t
bfa_flash_read_part(struct bfa_flash_s * flash,enum bfa_flash_part_type type,u8 instance,void * buf,u32 len,u32 offset,bfa_cb_flash_t cbfn,void * cbarg)4678 bfa_flash_read_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4679 u8 instance, void *buf, u32 len, u32 offset,
4680 bfa_cb_flash_t cbfn, void *cbarg)
4681 {
4682 bfa_trc(flash, BFI_FLASH_H2I_READ_REQ);
4683 bfa_trc(flash, type);
4684 bfa_trc(flash, instance);
4685 bfa_trc(flash, len);
4686 bfa_trc(flash, offset);
4687
4688 if (!bfa_ioc_is_operational(flash->ioc))
4689 return BFA_STATUS_IOC_NON_OP;
4690
4691 /*
4692 * 'len' must be in word (4-byte) boundary
4693 * 'offset' must be in sector (16kb) boundary
4694 */
4695 if (!len || (len & 0x03) || (offset & 0x00003FFF))
4696 return BFA_STATUS_FLASH_BAD_LEN;
4697
4698 if (flash->op_busy) {
4699 bfa_trc(flash, flash->op_busy);
4700 return BFA_STATUS_DEVBUSY;
4701 }
4702
4703 flash->op_busy = 1;
4704 flash->cbfn = cbfn;
4705 flash->cbarg = cbarg;
4706 flash->type = type;
4707 flash->instance = instance;
4708 flash->residue = len;
4709 flash->offset = 0;
4710 flash->addr_off = offset;
4711 flash->ubuf = buf;
4712 bfa_flash_read_send(flash);
4713
4714 return BFA_STATUS_OK;
4715 }
4716
4717 /*
4718 * DIAG module specific
4719 */
4720
4721 #define BFA_DIAG_MEMTEST_TOV 50000 /* memtest timeout in msec */
4722 #define CT2_BFA_DIAG_MEMTEST_TOV (9*30*1000) /* 4.5 min */
4723
4724 /* IOC event handler */
4725 static void
bfa_diag_notify(void * diag_arg,enum bfa_ioc_event_e event)4726 bfa_diag_notify(void *diag_arg, enum bfa_ioc_event_e event)
4727 {
4728 struct bfa_diag_s *diag = diag_arg;
4729
4730 bfa_trc(diag, event);
4731 bfa_trc(diag, diag->block);
4732 bfa_trc(diag, diag->fwping.lock);
4733 bfa_trc(diag, diag->tsensor.lock);
4734
4735 switch (event) {
4736 case BFA_IOC_E_DISABLED:
4737 case BFA_IOC_E_FAILED:
4738 if (diag->fwping.lock) {
4739 diag->fwping.status = BFA_STATUS_IOC_FAILURE;
4740 diag->fwping.cbfn(diag->fwping.cbarg,
4741 diag->fwping.status);
4742 diag->fwping.lock = 0;
4743 }
4744
4745 if (diag->tsensor.lock) {
4746 diag->tsensor.status = BFA_STATUS_IOC_FAILURE;
4747 diag->tsensor.cbfn(diag->tsensor.cbarg,
4748 diag->tsensor.status);
4749 diag->tsensor.lock = 0;
4750 }
4751
4752 if (diag->block) {
4753 if (diag->timer_active) {
4754 bfa_timer_stop(&diag->timer);
4755 diag->timer_active = 0;
4756 }
4757
4758 diag->status = BFA_STATUS_IOC_FAILURE;
4759 diag->cbfn(diag->cbarg, diag->status);
4760 diag->block = 0;
4761 }
4762 break;
4763
4764 default:
4765 break;
4766 }
4767 }
4768
4769 static void
bfa_diag_memtest_done(void * cbarg)4770 bfa_diag_memtest_done(void *cbarg)
4771 {
4772 struct bfa_diag_s *diag = cbarg;
4773 struct bfa_ioc_s *ioc = diag->ioc;
4774 struct bfa_diag_memtest_result *res = diag->result;
4775 u32 loff = BFI_BOOT_MEMTEST_RES_ADDR;
4776 u32 pgnum, pgoff, i;
4777
4778 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
4779 pgoff = PSS_SMEM_PGOFF(loff);
4780
4781 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
4782
4783 for (i = 0; i < (sizeof(struct bfa_diag_memtest_result) /
4784 sizeof(u32)); i++) {
4785 /* read test result from smem */
4786 *((u32 *) res + i) =
4787 bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
4788 loff += sizeof(u32);
4789 }
4790
4791 /* Reset IOC fwstates to BFI_IOC_UNINIT */
4792 bfa_ioc_reset_fwstate(ioc);
4793
4794 res->status = swab32(res->status);
4795 bfa_trc(diag, res->status);
4796
4797 if (res->status == BFI_BOOT_MEMTEST_RES_SIG)
4798 diag->status = BFA_STATUS_OK;
4799 else {
4800 diag->status = BFA_STATUS_MEMTEST_FAILED;
4801 res->addr = swab32(res->addr);
4802 res->exp = swab32(res->exp);
4803 res->act = swab32(res->act);
4804 res->err_status = swab32(res->err_status);
4805 res->err_status1 = swab32(res->err_status1);
4806 res->err_addr = swab32(res->err_addr);
4807 bfa_trc(diag, res->addr);
4808 bfa_trc(diag, res->exp);
4809 bfa_trc(diag, res->act);
4810 bfa_trc(diag, res->err_status);
4811 bfa_trc(diag, res->err_status1);
4812 bfa_trc(diag, res->err_addr);
4813 }
4814 diag->timer_active = 0;
4815 diag->cbfn(diag->cbarg, diag->status);
4816 diag->block = 0;
4817 }
4818
4819 /*
4820 * Firmware ping
4821 */
4822
4823 /*
4824 * Perform DMA test directly
4825 */
4826 static void
diag_fwping_send(struct bfa_diag_s * diag)4827 diag_fwping_send(struct bfa_diag_s *diag)
4828 {
4829 struct bfi_diag_fwping_req_s *fwping_req;
4830 u32 i;
4831
4832 bfa_trc(diag, diag->fwping.dbuf_pa);
4833
4834 /* fill DMA area with pattern */
4835 for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++)
4836 *((u32 *)diag->fwping.dbuf_kva + i) = diag->fwping.data;
4837
4838 /* Fill mbox msg */
4839 fwping_req = (struct bfi_diag_fwping_req_s *)diag->fwping.mbcmd.msg;
4840
4841 /* Setup SG list */
4842 bfa_alen_set(&fwping_req->alen, BFI_DIAG_DMA_BUF_SZ,
4843 diag->fwping.dbuf_pa);
4844 /* Set up dma count */
4845 fwping_req->count = cpu_to_be32(diag->fwping.count);
4846 /* Set up data pattern */
4847 fwping_req->data = diag->fwping.data;
4848
4849 /* build host command */
4850 bfi_h2i_set(fwping_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_FWPING,
4851 bfa_ioc_portid(diag->ioc));
4852
4853 /* send mbox cmd */
4854 bfa_ioc_mbox_queue(diag->ioc, &diag->fwping.mbcmd);
4855 }
4856
4857 static void
diag_fwping_comp(struct bfa_diag_s * diag,struct bfi_diag_fwping_rsp_s * diag_rsp)4858 diag_fwping_comp(struct bfa_diag_s *diag,
4859 struct bfi_diag_fwping_rsp_s *diag_rsp)
4860 {
4861 u32 rsp_data = diag_rsp->data;
4862 u8 rsp_dma_status = diag_rsp->dma_status;
4863
4864 bfa_trc(diag, rsp_data);
4865 bfa_trc(diag, rsp_dma_status);
4866
4867 if (rsp_dma_status == BFA_STATUS_OK) {
4868 u32 i, pat;
4869 pat = (diag->fwping.count & 0x1) ? ~(diag->fwping.data) :
4870 diag->fwping.data;
4871 /* Check mbox data */
4872 if (diag->fwping.data != rsp_data) {
4873 bfa_trc(diag, rsp_data);
4874 diag->fwping.result->dmastatus =
4875 BFA_STATUS_DATACORRUPTED;
4876 diag->fwping.status = BFA_STATUS_DATACORRUPTED;
4877 diag->fwping.cbfn(diag->fwping.cbarg,
4878 diag->fwping.status);
4879 diag->fwping.lock = 0;
4880 return;
4881 }
4882 /* Check dma pattern */
4883 for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++) {
4884 if (*((u32 *)diag->fwping.dbuf_kva + i) != pat) {
4885 bfa_trc(diag, i);
4886 bfa_trc(diag, pat);
4887 bfa_trc(diag,
4888 *((u32 *)diag->fwping.dbuf_kva + i));
4889 diag->fwping.result->dmastatus =
4890 BFA_STATUS_DATACORRUPTED;
4891 diag->fwping.status = BFA_STATUS_DATACORRUPTED;
4892 diag->fwping.cbfn(diag->fwping.cbarg,
4893 diag->fwping.status);
4894 diag->fwping.lock = 0;
4895 return;
4896 }
4897 }
4898 diag->fwping.result->dmastatus = BFA_STATUS_OK;
4899 diag->fwping.status = BFA_STATUS_OK;
4900 diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
4901 diag->fwping.lock = 0;
4902 } else {
4903 diag->fwping.status = BFA_STATUS_HDMA_FAILED;
4904 diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
4905 diag->fwping.lock = 0;
4906 }
4907 }
4908
4909 /*
4910 * Temperature Sensor
4911 */
4912
4913 static void
diag_tempsensor_send(struct bfa_diag_s * diag)4914 diag_tempsensor_send(struct bfa_diag_s *diag)
4915 {
4916 struct bfi_diag_ts_req_s *msg;
4917
4918 msg = (struct bfi_diag_ts_req_s *)diag->tsensor.mbcmd.msg;
4919 bfa_trc(diag, msg->temp);
4920 /* build host command */
4921 bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_TEMPSENSOR,
4922 bfa_ioc_portid(diag->ioc));
4923 /* send mbox cmd */
4924 bfa_ioc_mbox_queue(diag->ioc, &diag->tsensor.mbcmd);
4925 }
4926
4927 static void
diag_tempsensor_comp(struct bfa_diag_s * diag,bfi_diag_ts_rsp_t * rsp)4928 diag_tempsensor_comp(struct bfa_diag_s *diag, bfi_diag_ts_rsp_t *rsp)
4929 {
4930 if (!diag->tsensor.lock) {
4931 /* receiving response after ioc failure */
4932 bfa_trc(diag, diag->tsensor.lock);
4933 return;
4934 }
4935
4936 /*
4937 * ASIC junction tempsensor is a reg read operation
4938 * it will always return OK
4939 */
4940 diag->tsensor.temp->temp = be16_to_cpu(rsp->temp);
4941 diag->tsensor.temp->ts_junc = rsp->ts_junc;
4942 diag->tsensor.temp->ts_brd = rsp->ts_brd;
4943
4944 if (rsp->ts_brd) {
4945 /* tsensor.temp->status is brd_temp status */
4946 diag->tsensor.temp->status = rsp->status;
4947 if (rsp->status == BFA_STATUS_OK) {
4948 diag->tsensor.temp->brd_temp =
4949 be16_to_cpu(rsp->brd_temp);
4950 } else
4951 diag->tsensor.temp->brd_temp = 0;
4952 }
4953
4954 bfa_trc(diag, rsp->status);
4955 bfa_trc(diag, rsp->ts_junc);
4956 bfa_trc(diag, rsp->temp);
4957 bfa_trc(diag, rsp->ts_brd);
4958 bfa_trc(diag, rsp->brd_temp);
4959
4960 /* tsensor status is always good bcos we always have junction temp */
4961 diag->tsensor.status = BFA_STATUS_OK;
4962 diag->tsensor.cbfn(diag->tsensor.cbarg, diag->tsensor.status);
4963 diag->tsensor.lock = 0;
4964 }
4965
4966 /*
4967 * LED Test command
4968 */
4969 static void
diag_ledtest_send(struct bfa_diag_s * diag,struct bfa_diag_ledtest_s * ledtest)4970 diag_ledtest_send(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
4971 {
4972 struct bfi_diag_ledtest_req_s *msg;
4973
4974 msg = (struct bfi_diag_ledtest_req_s *)diag->ledtest.mbcmd.msg;
4975 /* build host command */
4976 bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LEDTEST,
4977 bfa_ioc_portid(diag->ioc));
4978
4979 /*
4980 * convert the freq from N blinks per 10 sec to
4981 * crossbow ontime value. We do it here because division is need
4982 */
4983 if (ledtest->freq)
4984 ledtest->freq = 500 / ledtest->freq;
4985
4986 if (ledtest->freq == 0)
4987 ledtest->freq = 1;
4988
4989 bfa_trc(diag, ledtest->freq);
4990 /* mcpy(&ledtest_req->req, ledtest, sizeof(bfa_diag_ledtest_t)); */
4991 msg->cmd = (u8) ledtest->cmd;
4992 msg->color = (u8) ledtest->color;
4993 msg->portid = bfa_ioc_portid(diag->ioc);
4994 msg->led = ledtest->led;
4995 msg->freq = cpu_to_be16(ledtest->freq);
4996
4997 /* send mbox cmd */
4998 bfa_ioc_mbox_queue(diag->ioc, &diag->ledtest.mbcmd);
4999 }
5000
5001 static void
diag_ledtest_comp(struct bfa_diag_s * diag,struct bfi_diag_ledtest_rsp_s * msg)5002 diag_ledtest_comp(struct bfa_diag_s *diag, struct bfi_diag_ledtest_rsp_s *msg)
5003 {
5004 bfa_trc(diag, diag->ledtest.lock);
5005 diag->ledtest.lock = BFA_FALSE;
5006 /* no bfa_cb_queue is needed because driver is not waiting */
5007 }
5008
5009 /*
5010 * Port beaconing
5011 */
5012 static void
diag_portbeacon_send(struct bfa_diag_s * diag,bfa_boolean_t beacon,u32 sec)5013 diag_portbeacon_send(struct bfa_diag_s *diag, bfa_boolean_t beacon, u32 sec)
5014 {
5015 struct bfi_diag_portbeacon_req_s *msg;
5016
5017 msg = (struct bfi_diag_portbeacon_req_s *)diag->beacon.mbcmd.msg;
5018 /* build host command */
5019 bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_PORTBEACON,
5020 bfa_ioc_portid(diag->ioc));
5021 msg->beacon = beacon;
5022 msg->period = cpu_to_be32(sec);
5023 /* send mbox cmd */
5024 bfa_ioc_mbox_queue(diag->ioc, &diag->beacon.mbcmd);
5025 }
5026
5027 static void
diag_portbeacon_comp(struct bfa_diag_s * diag)5028 diag_portbeacon_comp(struct bfa_diag_s *diag)
5029 {
5030 bfa_trc(diag, diag->beacon.state);
5031 diag->beacon.state = BFA_FALSE;
5032 if (diag->cbfn_beacon)
5033 diag->cbfn_beacon(diag->dev, BFA_FALSE, diag->beacon.link_e2e);
5034 }
5035
5036 /*
5037 * Diag hmbox handler
5038 */
5039 void
bfa_diag_intr(void * diagarg,struct bfi_mbmsg_s * msg)5040 bfa_diag_intr(void *diagarg, struct bfi_mbmsg_s *msg)
5041 {
5042 struct bfa_diag_s *diag = diagarg;
5043
5044 switch (msg->mh.msg_id) {
5045 case BFI_DIAG_I2H_PORTBEACON:
5046 diag_portbeacon_comp(diag);
5047 break;
5048 case BFI_DIAG_I2H_FWPING:
5049 diag_fwping_comp(diag, (struct bfi_diag_fwping_rsp_s *) msg);
5050 break;
5051 case BFI_DIAG_I2H_TEMPSENSOR:
5052 diag_tempsensor_comp(diag, (bfi_diag_ts_rsp_t *) msg);
5053 break;
5054 case BFI_DIAG_I2H_LEDTEST:
5055 diag_ledtest_comp(diag, (struct bfi_diag_ledtest_rsp_s *) msg);
5056 break;
5057 default:
5058 bfa_trc(diag, msg->mh.msg_id);
5059 WARN_ON(1);
5060 }
5061 }
5062
5063 /*
5064 * Gen RAM Test
5065 *
5066 * @param[in] *diag - diag data struct
5067 * @param[in] *memtest - mem test params input from upper layer,
5068 * @param[in] pattern - mem test pattern
5069 * @param[in] *result - mem test result
5070 * @param[in] cbfn - mem test callback functioin
5071 * @param[in] cbarg - callback functioin arg
5072 *
5073 * @param[out]
5074 */
5075 bfa_status_t
bfa_diag_memtest(struct bfa_diag_s * diag,struct bfa_diag_memtest_s * memtest,u32 pattern,struct bfa_diag_memtest_result * result,bfa_cb_diag_t cbfn,void * cbarg)5076 bfa_diag_memtest(struct bfa_diag_s *diag, struct bfa_diag_memtest_s *memtest,
5077 u32 pattern, struct bfa_diag_memtest_result *result,
5078 bfa_cb_diag_t cbfn, void *cbarg)
5079 {
5080 u32 memtest_tov;
5081
5082 bfa_trc(diag, pattern);
5083
5084 if (!bfa_ioc_adapter_is_disabled(diag->ioc))
5085 return BFA_STATUS_ADAPTER_ENABLED;
5086
5087 /* check to see if there is another destructive diag cmd running */
5088 if (diag->block) {
5089 bfa_trc(diag, diag->block);
5090 return BFA_STATUS_DEVBUSY;
5091 } else
5092 diag->block = 1;
5093
5094 diag->result = result;
5095 diag->cbfn = cbfn;
5096 diag->cbarg = cbarg;
5097
5098 /* download memtest code and take LPU0 out of reset */
5099 bfa_ioc_boot(diag->ioc, BFI_FWBOOT_TYPE_MEMTEST, BFI_FWBOOT_ENV_OS);
5100
5101 memtest_tov = (bfa_ioc_asic_gen(diag->ioc) == BFI_ASIC_GEN_CT2) ?
5102 CT2_BFA_DIAG_MEMTEST_TOV : BFA_DIAG_MEMTEST_TOV;
5103 bfa_timer_begin(diag->ioc->timer_mod, &diag->timer,
5104 bfa_diag_memtest_done, diag, memtest_tov);
5105 diag->timer_active = 1;
5106 return BFA_STATUS_OK;
5107 }
5108
5109 /*
5110 * DIAG firmware ping command
5111 *
5112 * @param[in] *diag - diag data struct
5113 * @param[in] cnt - dma loop count for testing PCIE
5114 * @param[in] data - data pattern to pass in fw
5115 * @param[in] *result - pt to bfa_diag_fwping_result_t data struct
5116 * @param[in] cbfn - callback function
5117 * @param[in] *cbarg - callback functioin arg
5118 *
5119 * @param[out]
5120 */
5121 bfa_status_t
bfa_diag_fwping(struct bfa_diag_s * diag,u32 cnt,u32 data,struct bfa_diag_results_fwping * result,bfa_cb_diag_t cbfn,void * cbarg)5122 bfa_diag_fwping(struct bfa_diag_s *diag, u32 cnt, u32 data,
5123 struct bfa_diag_results_fwping *result, bfa_cb_diag_t cbfn,
5124 void *cbarg)
5125 {
5126 bfa_trc(diag, cnt);
5127 bfa_trc(diag, data);
5128
5129 if (!bfa_ioc_is_operational(diag->ioc))
5130 return BFA_STATUS_IOC_NON_OP;
5131
5132 if (bfa_asic_id_ct2(bfa_ioc_devid((diag->ioc))) &&
5133 ((diag->ioc)->clscode == BFI_PCIFN_CLASS_ETH))
5134 return BFA_STATUS_CMD_NOTSUPP;
5135
5136 /* check to see if there is another destructive diag cmd running */
5137 if (diag->block || diag->fwping.lock) {
5138 bfa_trc(diag, diag->block);
5139 bfa_trc(diag, diag->fwping.lock);
5140 return BFA_STATUS_DEVBUSY;
5141 }
5142
5143 /* Initialization */
5144 diag->fwping.lock = 1;
5145 diag->fwping.cbfn = cbfn;
5146 diag->fwping.cbarg = cbarg;
5147 diag->fwping.result = result;
5148 diag->fwping.data = data;
5149 diag->fwping.count = cnt;
5150
5151 /* Init test results */
5152 diag->fwping.result->data = 0;
5153 diag->fwping.result->status = BFA_STATUS_OK;
5154
5155 /* kick off the first ping */
5156 diag_fwping_send(diag);
5157 return BFA_STATUS_OK;
5158 }
5159
5160 /*
5161 * Read Temperature Sensor
5162 *
5163 * @param[in] *diag - diag data struct
5164 * @param[in] *result - pt to bfa_diag_temp_t data struct
5165 * @param[in] cbfn - callback function
5166 * @param[in] *cbarg - callback functioin arg
5167 *
5168 * @param[out]
5169 */
5170 bfa_status_t
bfa_diag_tsensor_query(struct bfa_diag_s * diag,struct bfa_diag_results_tempsensor_s * result,bfa_cb_diag_t cbfn,void * cbarg)5171 bfa_diag_tsensor_query(struct bfa_diag_s *diag,
5172 struct bfa_diag_results_tempsensor_s *result,
5173 bfa_cb_diag_t cbfn, void *cbarg)
5174 {
5175 /* check to see if there is a destructive diag cmd running */
5176 if (diag->block || diag->tsensor.lock) {
5177 bfa_trc(diag, diag->block);
5178 bfa_trc(diag, diag->tsensor.lock);
5179 return BFA_STATUS_DEVBUSY;
5180 }
5181
5182 if (!bfa_ioc_is_operational(diag->ioc))
5183 return BFA_STATUS_IOC_NON_OP;
5184
5185 /* Init diag mod params */
5186 diag->tsensor.lock = 1;
5187 diag->tsensor.temp = result;
5188 diag->tsensor.cbfn = cbfn;
5189 diag->tsensor.cbarg = cbarg;
5190 diag->tsensor.status = BFA_STATUS_OK;
5191
5192 /* Send msg to fw */
5193 diag_tempsensor_send(diag);
5194
5195 return BFA_STATUS_OK;
5196 }
5197
5198 /*
5199 * LED Test command
5200 *
5201 * @param[in] *diag - diag data struct
5202 * @param[in] *ledtest - pt to ledtest data structure
5203 *
5204 * @param[out]
5205 */
5206 bfa_status_t
bfa_diag_ledtest(struct bfa_diag_s * diag,struct bfa_diag_ledtest_s * ledtest)5207 bfa_diag_ledtest(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
5208 {
5209 bfa_trc(diag, ledtest->cmd);
5210
5211 if (!bfa_ioc_is_operational(diag->ioc))
5212 return BFA_STATUS_IOC_NON_OP;
5213
5214 if (diag->beacon.state)
5215 return BFA_STATUS_BEACON_ON;
5216
5217 if (diag->ledtest.lock)
5218 return BFA_STATUS_LEDTEST_OP;
5219
5220 /* Send msg to fw */
5221 diag->ledtest.lock = BFA_TRUE;
5222 diag_ledtest_send(diag, ledtest);
5223
5224 return BFA_STATUS_OK;
5225 }
5226
5227 /*
5228 * Port beaconing command
5229 *
5230 * @param[in] *diag - diag data struct
5231 * @param[in] beacon - port beaconing 1:ON 0:OFF
5232 * @param[in] link_e2e_beacon - link beaconing 1:ON 0:OFF
5233 * @param[in] sec - beaconing duration in seconds
5234 *
5235 * @param[out]
5236 */
5237 bfa_status_t
bfa_diag_beacon_port(struct bfa_diag_s * diag,bfa_boolean_t beacon,bfa_boolean_t link_e2e_beacon,uint32_t sec)5238 bfa_diag_beacon_port(struct bfa_diag_s *diag, bfa_boolean_t beacon,
5239 bfa_boolean_t link_e2e_beacon, uint32_t sec)
5240 {
5241 bfa_trc(diag, beacon);
5242 bfa_trc(diag, link_e2e_beacon);
5243 bfa_trc(diag, sec);
5244
5245 if (!bfa_ioc_is_operational(diag->ioc))
5246 return BFA_STATUS_IOC_NON_OP;
5247
5248 if (diag->ledtest.lock)
5249 return BFA_STATUS_LEDTEST_OP;
5250
5251 if (diag->beacon.state && beacon) /* beacon alread on */
5252 return BFA_STATUS_BEACON_ON;
5253
5254 diag->beacon.state = beacon;
5255 diag->beacon.link_e2e = link_e2e_beacon;
5256 if (diag->cbfn_beacon)
5257 diag->cbfn_beacon(diag->dev, beacon, link_e2e_beacon);
5258
5259 /* Send msg to fw */
5260 diag_portbeacon_send(diag, beacon, sec);
5261
5262 return BFA_STATUS_OK;
5263 }
5264
5265 /*
5266 * Return DMA memory needed by diag module.
5267 */
5268 u32
bfa_diag_meminfo(void)5269 bfa_diag_meminfo(void)
5270 {
5271 return BFA_ROUNDUP(BFI_DIAG_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5272 }
5273
5274 /*
5275 * Attach virtual and physical memory for Diag.
5276 */
5277 void
bfa_diag_attach(struct bfa_diag_s * diag,struct bfa_ioc_s * ioc,void * dev,bfa_cb_diag_beacon_t cbfn_beacon,struct bfa_trc_mod_s * trcmod)5278 bfa_diag_attach(struct bfa_diag_s *diag, struct bfa_ioc_s *ioc, void *dev,
5279 bfa_cb_diag_beacon_t cbfn_beacon, struct bfa_trc_mod_s *trcmod)
5280 {
5281 diag->dev = dev;
5282 diag->ioc = ioc;
5283 diag->trcmod = trcmod;
5284
5285 diag->block = 0;
5286 diag->cbfn = NULL;
5287 diag->cbarg = NULL;
5288 diag->result = NULL;
5289 diag->cbfn_beacon = cbfn_beacon;
5290
5291 bfa_ioc_mbox_regisr(diag->ioc, BFI_MC_DIAG, bfa_diag_intr, diag);
5292 bfa_q_qe_init(&diag->ioc_notify);
5293 bfa_ioc_notify_init(&diag->ioc_notify, bfa_diag_notify, diag);
5294 list_add_tail(&diag->ioc_notify.qe, &diag->ioc->notify_q);
5295 }
5296
5297 void
bfa_diag_memclaim(struct bfa_diag_s * diag,u8 * dm_kva,u64 dm_pa)5298 bfa_diag_memclaim(struct bfa_diag_s *diag, u8 *dm_kva, u64 dm_pa)
5299 {
5300 diag->fwping.dbuf_kva = dm_kva;
5301 diag->fwping.dbuf_pa = dm_pa;
5302 memset(diag->fwping.dbuf_kva, 0, BFI_DIAG_DMA_BUF_SZ);
5303 }
5304
5305 /*
5306 * PHY module specific
5307 */
5308 #define BFA_PHY_DMA_BUF_SZ 0x02000 /* 8k dma buffer */
5309 #define BFA_PHY_LOCK_STATUS 0x018878 /* phy semaphore status reg */
5310
5311 static void
bfa_phy_ntoh32(u32 * obuf,u32 * ibuf,int sz)5312 bfa_phy_ntoh32(u32 *obuf, u32 *ibuf, int sz)
5313 {
5314 int i, m = sz >> 2;
5315
5316 for (i = 0; i < m; i++)
5317 obuf[i] = be32_to_cpu(ibuf[i]);
5318 }
5319
5320 static bfa_boolean_t
bfa_phy_present(struct bfa_phy_s * phy)5321 bfa_phy_present(struct bfa_phy_s *phy)
5322 {
5323 return (phy->ioc->attr->card_type == BFA_MFG_TYPE_LIGHTNING);
5324 }
5325
5326 static void
bfa_phy_notify(void * cbarg,enum bfa_ioc_event_e event)5327 bfa_phy_notify(void *cbarg, enum bfa_ioc_event_e event)
5328 {
5329 struct bfa_phy_s *phy = cbarg;
5330
5331 bfa_trc(phy, event);
5332
5333 switch (event) {
5334 case BFA_IOC_E_DISABLED:
5335 case BFA_IOC_E_FAILED:
5336 if (phy->op_busy) {
5337 phy->status = BFA_STATUS_IOC_FAILURE;
5338 phy->cbfn(phy->cbarg, phy->status);
5339 phy->op_busy = 0;
5340 }
5341 break;
5342
5343 default:
5344 break;
5345 }
5346 }
5347
5348 /*
5349 * Send phy attribute query request.
5350 *
5351 * @param[in] cbarg - callback argument
5352 */
5353 static void
bfa_phy_query_send(void * cbarg)5354 bfa_phy_query_send(void *cbarg)
5355 {
5356 struct bfa_phy_s *phy = cbarg;
5357 struct bfi_phy_query_req_s *msg =
5358 (struct bfi_phy_query_req_s *) phy->mb.msg;
5359
5360 msg->instance = phy->instance;
5361 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_QUERY_REQ,
5362 bfa_ioc_portid(phy->ioc));
5363 bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_attr_s), phy->dbuf_pa);
5364 bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5365 }
5366
5367 /*
5368 * Send phy write request.
5369 *
5370 * @param[in] cbarg - callback argument
5371 */
5372 static void
bfa_phy_write_send(void * cbarg)5373 bfa_phy_write_send(void *cbarg)
5374 {
5375 struct bfa_phy_s *phy = cbarg;
5376 struct bfi_phy_write_req_s *msg =
5377 (struct bfi_phy_write_req_s *) phy->mb.msg;
5378 u32 len;
5379 u16 *buf, *dbuf;
5380 int i, sz;
5381
5382 msg->instance = phy->instance;
5383 msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
5384 len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
5385 phy->residue : BFA_PHY_DMA_BUF_SZ;
5386 msg->length = cpu_to_be32(len);
5387
5388 /* indicate if it's the last msg of the whole write operation */
5389 msg->last = (len == phy->residue) ? 1 : 0;
5390
5391 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_WRITE_REQ,
5392 bfa_ioc_portid(phy->ioc));
5393 bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
5394
5395 buf = (u16 *) (phy->ubuf + phy->offset);
5396 dbuf = (u16 *)phy->dbuf_kva;
5397 sz = len >> 1;
5398 for (i = 0; i < sz; i++)
5399 buf[i] = cpu_to_be16(dbuf[i]);
5400
5401 bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5402
5403 phy->residue -= len;
5404 phy->offset += len;
5405 }
5406
5407 /*
5408 * Send phy read request.
5409 *
5410 * @param[in] cbarg - callback argument
5411 */
5412 static void
bfa_phy_read_send(void * cbarg)5413 bfa_phy_read_send(void *cbarg)
5414 {
5415 struct bfa_phy_s *phy = cbarg;
5416 struct bfi_phy_read_req_s *msg =
5417 (struct bfi_phy_read_req_s *) phy->mb.msg;
5418 u32 len;
5419
5420 msg->instance = phy->instance;
5421 msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
5422 len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
5423 phy->residue : BFA_PHY_DMA_BUF_SZ;
5424 msg->length = cpu_to_be32(len);
5425 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_READ_REQ,
5426 bfa_ioc_portid(phy->ioc));
5427 bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
5428 bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5429 }
5430
5431 /*
5432 * Send phy stats request.
5433 *
5434 * @param[in] cbarg - callback argument
5435 */
5436 static void
bfa_phy_stats_send(void * cbarg)5437 bfa_phy_stats_send(void *cbarg)
5438 {
5439 struct bfa_phy_s *phy = cbarg;
5440 struct bfi_phy_stats_req_s *msg =
5441 (struct bfi_phy_stats_req_s *) phy->mb.msg;
5442
5443 msg->instance = phy->instance;
5444 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_STATS_REQ,
5445 bfa_ioc_portid(phy->ioc));
5446 bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_stats_s), phy->dbuf_pa);
5447 bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5448 }
5449
5450 /*
5451 * Flash memory info API.
5452 *
5453 * @param[in] mincfg - minimal cfg variable
5454 */
5455 u32
bfa_phy_meminfo(bfa_boolean_t mincfg)5456 bfa_phy_meminfo(bfa_boolean_t mincfg)
5457 {
5458 /* min driver doesn't need phy */
5459 if (mincfg)
5460 return 0;
5461
5462 return BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5463 }
5464
5465 /*
5466 * Flash attach API.
5467 *
5468 * @param[in] phy - phy structure
5469 * @param[in] ioc - ioc structure
5470 * @param[in] dev - device structure
5471 * @param[in] trcmod - trace module
5472 * @param[in] logmod - log module
5473 */
5474 void
bfa_phy_attach(struct bfa_phy_s * phy,struct bfa_ioc_s * ioc,void * dev,struct bfa_trc_mod_s * trcmod,bfa_boolean_t mincfg)5475 bfa_phy_attach(struct bfa_phy_s *phy, struct bfa_ioc_s *ioc, void *dev,
5476 struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
5477 {
5478 phy->ioc = ioc;
5479 phy->trcmod = trcmod;
5480 phy->cbfn = NULL;
5481 phy->cbarg = NULL;
5482 phy->op_busy = 0;
5483
5484 bfa_ioc_mbox_regisr(phy->ioc, BFI_MC_PHY, bfa_phy_intr, phy);
5485 bfa_q_qe_init(&phy->ioc_notify);
5486 bfa_ioc_notify_init(&phy->ioc_notify, bfa_phy_notify, phy);
5487 list_add_tail(&phy->ioc_notify.qe, &phy->ioc->notify_q);
5488
5489 /* min driver doesn't need phy */
5490 if (mincfg) {
5491 phy->dbuf_kva = NULL;
5492 phy->dbuf_pa = 0;
5493 }
5494 }
5495
5496 /*
5497 * Claim memory for phy
5498 *
5499 * @param[in] phy - phy structure
5500 * @param[in] dm_kva - pointer to virtual memory address
5501 * @param[in] dm_pa - physical memory address
5502 * @param[in] mincfg - minimal cfg variable
5503 */
5504 void
bfa_phy_memclaim(struct bfa_phy_s * phy,u8 * dm_kva,u64 dm_pa,bfa_boolean_t mincfg)5505 bfa_phy_memclaim(struct bfa_phy_s *phy, u8 *dm_kva, u64 dm_pa,
5506 bfa_boolean_t mincfg)
5507 {
5508 if (mincfg)
5509 return;
5510
5511 phy->dbuf_kva = dm_kva;
5512 phy->dbuf_pa = dm_pa;
5513 memset(phy->dbuf_kva, 0, BFA_PHY_DMA_BUF_SZ);
5514 dm_kva += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5515 dm_pa += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5516 }
5517
5518 bfa_boolean_t
bfa_phy_busy(struct bfa_ioc_s * ioc)5519 bfa_phy_busy(struct bfa_ioc_s *ioc)
5520 {
5521 void __iomem *rb;
5522
5523 rb = bfa_ioc_bar0(ioc);
5524 return readl(rb + BFA_PHY_LOCK_STATUS);
5525 }
5526
5527 /*
5528 * Get phy attribute.
5529 *
5530 * @param[in] phy - phy structure
5531 * @param[in] attr - phy attribute structure
5532 * @param[in] cbfn - callback function
5533 * @param[in] cbarg - callback argument
5534 *
5535 * Return status.
5536 */
5537 bfa_status_t
bfa_phy_get_attr(struct bfa_phy_s * phy,u8 instance,struct bfa_phy_attr_s * attr,bfa_cb_phy_t cbfn,void * cbarg)5538 bfa_phy_get_attr(struct bfa_phy_s *phy, u8 instance,
5539 struct bfa_phy_attr_s *attr, bfa_cb_phy_t cbfn, void *cbarg)
5540 {
5541 bfa_trc(phy, BFI_PHY_H2I_QUERY_REQ);
5542 bfa_trc(phy, instance);
5543
5544 if (!bfa_phy_present(phy))
5545 return BFA_STATUS_PHY_NOT_PRESENT;
5546
5547 if (!bfa_ioc_is_operational(phy->ioc))
5548 return BFA_STATUS_IOC_NON_OP;
5549
5550 if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5551 bfa_trc(phy, phy->op_busy);
5552 return BFA_STATUS_DEVBUSY;
5553 }
5554
5555 phy->op_busy = 1;
5556 phy->cbfn = cbfn;
5557 phy->cbarg = cbarg;
5558 phy->instance = instance;
5559 phy->ubuf = (uint8_t *) attr;
5560 bfa_phy_query_send(phy);
5561
5562 return BFA_STATUS_OK;
5563 }
5564
5565 /*
5566 * Get phy stats.
5567 *
5568 * @param[in] phy - phy structure
5569 * @param[in] instance - phy image instance
5570 * @param[in] stats - pointer to phy stats
5571 * @param[in] cbfn - callback function
5572 * @param[in] cbarg - callback argument
5573 *
5574 * Return status.
5575 */
5576 bfa_status_t
bfa_phy_get_stats(struct bfa_phy_s * phy,u8 instance,struct bfa_phy_stats_s * stats,bfa_cb_phy_t cbfn,void * cbarg)5577 bfa_phy_get_stats(struct bfa_phy_s *phy, u8 instance,
5578 struct bfa_phy_stats_s *stats,
5579 bfa_cb_phy_t cbfn, void *cbarg)
5580 {
5581 bfa_trc(phy, BFI_PHY_H2I_STATS_REQ);
5582 bfa_trc(phy, instance);
5583
5584 if (!bfa_phy_present(phy))
5585 return BFA_STATUS_PHY_NOT_PRESENT;
5586
5587 if (!bfa_ioc_is_operational(phy->ioc))
5588 return BFA_STATUS_IOC_NON_OP;
5589
5590 if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5591 bfa_trc(phy, phy->op_busy);
5592 return BFA_STATUS_DEVBUSY;
5593 }
5594
5595 phy->op_busy = 1;
5596 phy->cbfn = cbfn;
5597 phy->cbarg = cbarg;
5598 phy->instance = instance;
5599 phy->ubuf = (u8 *) stats;
5600 bfa_phy_stats_send(phy);
5601
5602 return BFA_STATUS_OK;
5603 }
5604
5605 /*
5606 * Update phy image.
5607 *
5608 * @param[in] phy - phy structure
5609 * @param[in] instance - phy image instance
5610 * @param[in] buf - update data buffer
5611 * @param[in] len - data buffer length
5612 * @param[in] offset - offset relative to starting address
5613 * @param[in] cbfn - callback function
5614 * @param[in] cbarg - callback argument
5615 *
5616 * Return status.
5617 */
5618 bfa_status_t
bfa_phy_update(struct bfa_phy_s * phy,u8 instance,void * buf,u32 len,u32 offset,bfa_cb_phy_t cbfn,void * cbarg)5619 bfa_phy_update(struct bfa_phy_s *phy, u8 instance,
5620 void *buf, u32 len, u32 offset,
5621 bfa_cb_phy_t cbfn, void *cbarg)
5622 {
5623 bfa_trc(phy, BFI_PHY_H2I_WRITE_REQ);
5624 bfa_trc(phy, instance);
5625 bfa_trc(phy, len);
5626 bfa_trc(phy, offset);
5627
5628 if (!bfa_phy_present(phy))
5629 return BFA_STATUS_PHY_NOT_PRESENT;
5630
5631 if (!bfa_ioc_is_operational(phy->ioc))
5632 return BFA_STATUS_IOC_NON_OP;
5633
5634 /* 'len' must be in word (4-byte) boundary */
5635 if (!len || (len & 0x03))
5636 return BFA_STATUS_FAILED;
5637
5638 if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5639 bfa_trc(phy, phy->op_busy);
5640 return BFA_STATUS_DEVBUSY;
5641 }
5642
5643 phy->op_busy = 1;
5644 phy->cbfn = cbfn;
5645 phy->cbarg = cbarg;
5646 phy->instance = instance;
5647 phy->residue = len;
5648 phy->offset = 0;
5649 phy->addr_off = offset;
5650 phy->ubuf = buf;
5651
5652 bfa_phy_write_send(phy);
5653 return BFA_STATUS_OK;
5654 }
5655
5656 /*
5657 * Read phy image.
5658 *
5659 * @param[in] phy - phy structure
5660 * @param[in] instance - phy image instance
5661 * @param[in] buf - read data buffer
5662 * @param[in] len - data buffer length
5663 * @param[in] offset - offset relative to starting address
5664 * @param[in] cbfn - callback function
5665 * @param[in] cbarg - callback argument
5666 *
5667 * Return status.
5668 */
5669 bfa_status_t
bfa_phy_read(struct bfa_phy_s * phy,u8 instance,void * buf,u32 len,u32 offset,bfa_cb_phy_t cbfn,void * cbarg)5670 bfa_phy_read(struct bfa_phy_s *phy, u8 instance,
5671 void *buf, u32 len, u32 offset,
5672 bfa_cb_phy_t cbfn, void *cbarg)
5673 {
5674 bfa_trc(phy, BFI_PHY_H2I_READ_REQ);
5675 bfa_trc(phy, instance);
5676 bfa_trc(phy, len);
5677 bfa_trc(phy, offset);
5678
5679 if (!bfa_phy_present(phy))
5680 return BFA_STATUS_PHY_NOT_PRESENT;
5681
5682 if (!bfa_ioc_is_operational(phy->ioc))
5683 return BFA_STATUS_IOC_NON_OP;
5684
5685 /* 'len' must be in word (4-byte) boundary */
5686 if (!len || (len & 0x03))
5687 return BFA_STATUS_FAILED;
5688
5689 if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5690 bfa_trc(phy, phy->op_busy);
5691 return BFA_STATUS_DEVBUSY;
5692 }
5693
5694 phy->op_busy = 1;
5695 phy->cbfn = cbfn;
5696 phy->cbarg = cbarg;
5697 phy->instance = instance;
5698 phy->residue = len;
5699 phy->offset = 0;
5700 phy->addr_off = offset;
5701 phy->ubuf = buf;
5702 bfa_phy_read_send(phy);
5703
5704 return BFA_STATUS_OK;
5705 }
5706
5707 /*
5708 * Process phy response messages upon receiving interrupts.
5709 *
5710 * @param[in] phyarg - phy structure
5711 * @param[in] msg - message structure
5712 */
5713 void
bfa_phy_intr(void * phyarg,struct bfi_mbmsg_s * msg)5714 bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg)
5715 {
5716 struct bfa_phy_s *phy = phyarg;
5717 u32 status;
5718
5719 union {
5720 struct bfi_phy_query_rsp_s *query;
5721 struct bfi_phy_stats_rsp_s *stats;
5722 struct bfi_phy_write_rsp_s *write;
5723 struct bfi_phy_read_rsp_s *read;
5724 struct bfi_mbmsg_s *msg;
5725 } m;
5726
5727 m.msg = msg;
5728 bfa_trc(phy, msg->mh.msg_id);
5729
5730 if (!phy->op_busy) {
5731 /* receiving response after ioc failure */
5732 bfa_trc(phy, 0x9999);
5733 return;
5734 }
5735
5736 switch (msg->mh.msg_id) {
5737 case BFI_PHY_I2H_QUERY_RSP:
5738 status = be32_to_cpu(m.query->status);
5739 bfa_trc(phy, status);
5740
5741 if (status == BFA_STATUS_OK) {
5742 struct bfa_phy_attr_s *attr =
5743 (struct bfa_phy_attr_s *) phy->ubuf;
5744 bfa_phy_ntoh32((u32 *)attr, (u32 *)phy->dbuf_kva,
5745 sizeof(struct bfa_phy_attr_s));
5746 bfa_trc(phy, attr->status);
5747 bfa_trc(phy, attr->length);
5748 }
5749
5750 phy->status = status;
5751 phy->op_busy = 0;
5752 if (phy->cbfn)
5753 phy->cbfn(phy->cbarg, phy->status);
5754 break;
5755 case BFI_PHY_I2H_STATS_RSP:
5756 status = be32_to_cpu(m.stats->status);
5757 bfa_trc(phy, status);
5758
5759 if (status == BFA_STATUS_OK) {
5760 struct bfa_phy_stats_s *stats =
5761 (struct bfa_phy_stats_s *) phy->ubuf;
5762 bfa_phy_ntoh32((u32 *)stats, (u32 *)phy->dbuf_kva,
5763 sizeof(struct bfa_phy_stats_s));
5764 bfa_trc(phy, stats->status);
5765 }
5766
5767 phy->status = status;
5768 phy->op_busy = 0;
5769 if (phy->cbfn)
5770 phy->cbfn(phy->cbarg, phy->status);
5771 break;
5772 case BFI_PHY_I2H_WRITE_RSP:
5773 status = be32_to_cpu(m.write->status);
5774 bfa_trc(phy, status);
5775
5776 if (status != BFA_STATUS_OK || phy->residue == 0) {
5777 phy->status = status;
5778 phy->op_busy = 0;
5779 if (phy->cbfn)
5780 phy->cbfn(phy->cbarg, phy->status);
5781 } else {
5782 bfa_trc(phy, phy->offset);
5783 bfa_phy_write_send(phy);
5784 }
5785 break;
5786 case BFI_PHY_I2H_READ_RSP:
5787 status = be32_to_cpu(m.read->status);
5788 bfa_trc(phy, status);
5789
5790 if (status != BFA_STATUS_OK) {
5791 phy->status = status;
5792 phy->op_busy = 0;
5793 if (phy->cbfn)
5794 phy->cbfn(phy->cbarg, phy->status);
5795 } else {
5796 u32 len = be32_to_cpu(m.read->length);
5797 u16 *buf = (u16 *)(phy->ubuf + phy->offset);
5798 u16 *dbuf = (u16 *)phy->dbuf_kva;
5799 int i, sz = len >> 1;
5800
5801 bfa_trc(phy, phy->offset);
5802 bfa_trc(phy, len);
5803
5804 for (i = 0; i < sz; i++)
5805 buf[i] = be16_to_cpu(dbuf[i]);
5806
5807 phy->residue -= len;
5808 phy->offset += len;
5809
5810 if (phy->residue == 0) {
5811 phy->status = status;
5812 phy->op_busy = 0;
5813 if (phy->cbfn)
5814 phy->cbfn(phy->cbarg, phy->status);
5815 } else
5816 bfa_phy_read_send(phy);
5817 }
5818 break;
5819 default:
5820 WARN_ON(1);
5821 }
5822 }
5823
5824 /*
5825 * DCONF state machine events
5826 */
5827 enum bfa_dconf_event {
5828 BFA_DCONF_SM_INIT = 1, /* dconf Init */
5829 BFA_DCONF_SM_FLASH_COMP = 2, /* read/write to flash */
5830 BFA_DCONF_SM_WR = 3, /* binding change, map */
5831 BFA_DCONF_SM_TIMEOUT = 4, /* Start timer */
5832 BFA_DCONF_SM_EXIT = 5, /* exit dconf module */
5833 BFA_DCONF_SM_IOCDISABLE = 6, /* IOC disable event */
5834 };
5835
5836 /* forward declaration of DCONF state machine */
5837 static void bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf,
5838 enum bfa_dconf_event event);
5839 static void bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
5840 enum bfa_dconf_event event);
5841 static void bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf,
5842 enum bfa_dconf_event event);
5843 static void bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf,
5844 enum bfa_dconf_event event);
5845 static void bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf,
5846 enum bfa_dconf_event event);
5847 static void bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
5848 enum bfa_dconf_event event);
5849 static void bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
5850 enum bfa_dconf_event event);
5851
5852 static void bfa_dconf_cbfn(void *dconf, bfa_status_t status);
5853 static void bfa_dconf_timer(void *cbarg);
5854 static bfa_status_t bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf);
5855 static void bfa_dconf_init_cb(void *arg, bfa_status_t status);
5856
5857 /*
5858 * Beginning state of dconf module. Waiting for an event to start.
5859 */
5860 static void
bfa_dconf_sm_uninit(struct bfa_dconf_mod_s * dconf,enum bfa_dconf_event event)5861 bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5862 {
5863 bfa_status_t bfa_status;
5864 bfa_trc(dconf->bfa, event);
5865
5866 switch (event) {
5867 case BFA_DCONF_SM_INIT:
5868 if (dconf->min_cfg) {
5869 bfa_trc(dconf->bfa, dconf->min_cfg);
5870 bfa_fsm_send_event(&dconf->bfa->iocfc,
5871 IOCFC_E_DCONF_DONE);
5872 return;
5873 }
5874 bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read);
5875 bfa_timer_start(dconf->bfa, &dconf->timer,
5876 bfa_dconf_timer, dconf, 2 * BFA_DCONF_UPDATE_TOV);
5877 bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa),
5878 BFA_FLASH_PART_DRV, dconf->instance,
5879 dconf->dconf,
5880 sizeof(struct bfa_dconf_s), 0,
5881 bfa_dconf_init_cb, dconf->bfa);
5882 if (bfa_status != BFA_STATUS_OK) {
5883 bfa_timer_stop(&dconf->timer);
5884 bfa_dconf_init_cb(dconf->bfa, BFA_STATUS_FAILED);
5885 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5886 return;
5887 }
5888 break;
5889 case BFA_DCONF_SM_EXIT:
5890 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5891 case BFA_DCONF_SM_IOCDISABLE:
5892 case BFA_DCONF_SM_WR:
5893 case BFA_DCONF_SM_FLASH_COMP:
5894 break;
5895 default:
5896 bfa_sm_fault(dconf->bfa, event);
5897 }
5898 }
5899
5900 /*
5901 * Read flash for dconf entries and make a call back to the driver once done.
5902 */
5903 static void
bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s * dconf,enum bfa_dconf_event event)5904 bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
5905 enum bfa_dconf_event event)
5906 {
5907 bfa_trc(dconf->bfa, event);
5908
5909 switch (event) {
5910 case BFA_DCONF_SM_FLASH_COMP:
5911 bfa_timer_stop(&dconf->timer);
5912 bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5913 break;
5914 case BFA_DCONF_SM_TIMEOUT:
5915 bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5916 bfa_ioc_suspend(&dconf->bfa->ioc);
5917 break;
5918 case BFA_DCONF_SM_EXIT:
5919 bfa_timer_stop(&dconf->timer);
5920 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5921 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5922 break;
5923 case BFA_DCONF_SM_IOCDISABLE:
5924 bfa_timer_stop(&dconf->timer);
5925 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5926 break;
5927 default:
5928 bfa_sm_fault(dconf->bfa, event);
5929 }
5930 }
5931
5932 /*
5933 * DCONF Module is in ready state. Has completed the initialization.
5934 */
5935 static void
bfa_dconf_sm_ready(struct bfa_dconf_mod_s * dconf,enum bfa_dconf_event event)5936 bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5937 {
5938 bfa_trc(dconf->bfa, event);
5939
5940 switch (event) {
5941 case BFA_DCONF_SM_WR:
5942 bfa_timer_start(dconf->bfa, &dconf->timer,
5943 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5944 bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5945 break;
5946 case BFA_DCONF_SM_EXIT:
5947 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5948 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5949 break;
5950 case BFA_DCONF_SM_INIT:
5951 case BFA_DCONF_SM_IOCDISABLE:
5952 break;
5953 default:
5954 bfa_sm_fault(dconf->bfa, event);
5955 }
5956 }
5957
5958 /*
5959 * entries are dirty, write back to the flash.
5960 */
5961
5962 static void
bfa_dconf_sm_dirty(struct bfa_dconf_mod_s * dconf,enum bfa_dconf_event event)5963 bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5964 {
5965 bfa_trc(dconf->bfa, event);
5966
5967 switch (event) {
5968 case BFA_DCONF_SM_TIMEOUT:
5969 bfa_sm_set_state(dconf, bfa_dconf_sm_sync);
5970 bfa_dconf_flash_write(dconf);
5971 break;
5972 case BFA_DCONF_SM_WR:
5973 bfa_timer_stop(&dconf->timer);
5974 bfa_timer_start(dconf->bfa, &dconf->timer,
5975 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5976 break;
5977 case BFA_DCONF_SM_EXIT:
5978 bfa_timer_stop(&dconf->timer);
5979 bfa_timer_start(dconf->bfa, &dconf->timer,
5980 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5981 bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
5982 bfa_dconf_flash_write(dconf);
5983 break;
5984 case BFA_DCONF_SM_FLASH_COMP:
5985 break;
5986 case BFA_DCONF_SM_IOCDISABLE:
5987 bfa_timer_stop(&dconf->timer);
5988 bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
5989 break;
5990 default:
5991 bfa_sm_fault(dconf->bfa, event);
5992 }
5993 }
5994
5995 /*
5996 * Sync the dconf entries to the flash.
5997 */
5998 static void
bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s * dconf,enum bfa_dconf_event event)5999 bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
6000 enum bfa_dconf_event event)
6001 {
6002 bfa_trc(dconf->bfa, event);
6003
6004 switch (event) {
6005 case BFA_DCONF_SM_IOCDISABLE:
6006 case BFA_DCONF_SM_FLASH_COMP:
6007 bfa_timer_stop(&dconf->timer);
6008 case BFA_DCONF_SM_TIMEOUT:
6009 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
6010 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
6011 break;
6012 default:
6013 bfa_sm_fault(dconf->bfa, event);
6014 }
6015 }
6016
6017 static void
bfa_dconf_sm_sync(struct bfa_dconf_mod_s * dconf,enum bfa_dconf_event event)6018 bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
6019 {
6020 bfa_trc(dconf->bfa, event);
6021
6022 switch (event) {
6023 case BFA_DCONF_SM_FLASH_COMP:
6024 bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
6025 break;
6026 case BFA_DCONF_SM_WR:
6027 bfa_timer_start(dconf->bfa, &dconf->timer,
6028 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
6029 bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
6030 break;
6031 case BFA_DCONF_SM_EXIT:
6032 bfa_timer_start(dconf->bfa, &dconf->timer,
6033 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
6034 bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
6035 break;
6036 case BFA_DCONF_SM_IOCDISABLE:
6037 bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
6038 break;
6039 default:
6040 bfa_sm_fault(dconf->bfa, event);
6041 }
6042 }
6043
6044 static void
bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s * dconf,enum bfa_dconf_event event)6045 bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
6046 enum bfa_dconf_event event)
6047 {
6048 bfa_trc(dconf->bfa, event);
6049
6050 switch (event) {
6051 case BFA_DCONF_SM_INIT:
6052 bfa_timer_start(dconf->bfa, &dconf->timer,
6053 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
6054 bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
6055 break;
6056 case BFA_DCONF_SM_EXIT:
6057 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
6058 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
6059 break;
6060 case BFA_DCONF_SM_IOCDISABLE:
6061 break;
6062 default:
6063 bfa_sm_fault(dconf->bfa, event);
6064 }
6065 }
6066
6067 /*
6068 * Compute and return memory needed by DRV_CFG module.
6069 */
6070 void
bfa_dconf_meminfo(struct bfa_iocfc_cfg_s * cfg,struct bfa_meminfo_s * meminfo,struct bfa_s * bfa)6071 bfa_dconf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
6072 struct bfa_s *bfa)
6073 {
6074 struct bfa_mem_kva_s *dconf_kva = BFA_MEM_DCONF_KVA(bfa);
6075
6076 if (cfg->drvcfg.min_cfg)
6077 bfa_mem_kva_setup(meminfo, dconf_kva,
6078 sizeof(struct bfa_dconf_hdr_s));
6079 else
6080 bfa_mem_kva_setup(meminfo, dconf_kva,
6081 sizeof(struct bfa_dconf_s));
6082 }
6083
6084 void
bfa_dconf_attach(struct bfa_s * bfa,void * bfad,struct bfa_iocfc_cfg_s * cfg)6085 bfa_dconf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg)
6086 {
6087 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6088
6089 dconf->bfad = bfad;
6090 dconf->bfa = bfa;
6091 dconf->instance = bfa->ioc.port_id;
6092 bfa_trc(bfa, dconf->instance);
6093
6094 dconf->dconf = (struct bfa_dconf_s *) bfa_mem_kva_curp(dconf);
6095 if (cfg->drvcfg.min_cfg) {
6096 bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_hdr_s);
6097 dconf->min_cfg = BFA_TRUE;
6098 } else {
6099 dconf->min_cfg = BFA_FALSE;
6100 bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_s);
6101 }
6102
6103 bfa_dconf_read_data_valid(bfa) = BFA_FALSE;
6104 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
6105 }
6106
6107 static void
bfa_dconf_init_cb(void * arg,bfa_status_t status)6108 bfa_dconf_init_cb(void *arg, bfa_status_t status)
6109 {
6110 struct bfa_s *bfa = arg;
6111 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6112
6113 if (status == BFA_STATUS_OK) {
6114 bfa_dconf_read_data_valid(bfa) = BFA_TRUE;
6115 if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE)
6116 dconf->dconf->hdr.signature = BFI_DCONF_SIGNATURE;
6117 if (dconf->dconf->hdr.version != BFI_DCONF_VERSION)
6118 dconf->dconf->hdr.version = BFI_DCONF_VERSION;
6119 }
6120 bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
6121 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DCONF_DONE);
6122 }
6123
6124 void
bfa_dconf_modinit(struct bfa_s * bfa)6125 bfa_dconf_modinit(struct bfa_s *bfa)
6126 {
6127 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6128 bfa_sm_send_event(dconf, BFA_DCONF_SM_INIT);
6129 }
6130
bfa_dconf_timer(void * cbarg)6131 static void bfa_dconf_timer(void *cbarg)
6132 {
6133 struct bfa_dconf_mod_s *dconf = cbarg;
6134 bfa_sm_send_event(dconf, BFA_DCONF_SM_TIMEOUT);
6135 }
6136
6137 void
bfa_dconf_iocdisable(struct bfa_s * bfa)6138 bfa_dconf_iocdisable(struct bfa_s *bfa)
6139 {
6140 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6141 bfa_sm_send_event(dconf, BFA_DCONF_SM_IOCDISABLE);
6142 }
6143
6144 static bfa_status_t
bfa_dconf_flash_write(struct bfa_dconf_mod_s * dconf)6145 bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf)
6146 {
6147 bfa_status_t bfa_status;
6148 bfa_trc(dconf->bfa, 0);
6149
6150 bfa_status = bfa_flash_update_part(BFA_FLASH(dconf->bfa),
6151 BFA_FLASH_PART_DRV, dconf->instance,
6152 dconf->dconf, sizeof(struct bfa_dconf_s), 0,
6153 bfa_dconf_cbfn, dconf);
6154 if (bfa_status != BFA_STATUS_OK)
6155 WARN_ON(bfa_status);
6156 bfa_trc(dconf->bfa, bfa_status);
6157
6158 return bfa_status;
6159 }
6160
6161 bfa_status_t
bfa_dconf_update(struct bfa_s * bfa)6162 bfa_dconf_update(struct bfa_s *bfa)
6163 {
6164 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6165 bfa_trc(dconf->bfa, 0);
6166 if (bfa_sm_cmp_state(dconf, bfa_dconf_sm_iocdown_dirty))
6167 return BFA_STATUS_FAILED;
6168
6169 if (dconf->min_cfg) {
6170 bfa_trc(dconf->bfa, dconf->min_cfg);
6171 return BFA_STATUS_FAILED;
6172 }
6173
6174 bfa_sm_send_event(dconf, BFA_DCONF_SM_WR);
6175 return BFA_STATUS_OK;
6176 }
6177
6178 static void
bfa_dconf_cbfn(void * arg,bfa_status_t status)6179 bfa_dconf_cbfn(void *arg, bfa_status_t status)
6180 {
6181 struct bfa_dconf_mod_s *dconf = arg;
6182 WARN_ON(status);
6183 bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
6184 }
6185
6186 void
bfa_dconf_modexit(struct bfa_s * bfa)6187 bfa_dconf_modexit(struct bfa_s *bfa)
6188 {
6189 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6190 bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT);
6191 }
6192
6193 /*
6194 * FRU specific functions
6195 */
6196
6197 #define BFA_FRU_DMA_BUF_SZ 0x02000 /* 8k dma buffer */
6198 #define BFA_FRU_CHINOOK_MAX_SIZE 0x10000
6199 #define BFA_FRU_LIGHTNING_MAX_SIZE 0x200
6200
6201 static void
bfa_fru_notify(void * cbarg,enum bfa_ioc_event_e event)6202 bfa_fru_notify(void *cbarg, enum bfa_ioc_event_e event)
6203 {
6204 struct bfa_fru_s *fru = cbarg;
6205
6206 bfa_trc(fru, event);
6207
6208 switch (event) {
6209 case BFA_IOC_E_DISABLED:
6210 case BFA_IOC_E_FAILED:
6211 if (fru->op_busy) {
6212 fru->status = BFA_STATUS_IOC_FAILURE;
6213 fru->cbfn(fru->cbarg, fru->status);
6214 fru->op_busy = 0;
6215 }
6216 break;
6217
6218 default:
6219 break;
6220 }
6221 }
6222
6223 /*
6224 * Send fru write request.
6225 *
6226 * @param[in] cbarg - callback argument
6227 */
6228 static void
bfa_fru_write_send(void * cbarg,enum bfi_fru_h2i_msgs msg_type)6229 bfa_fru_write_send(void *cbarg, enum bfi_fru_h2i_msgs msg_type)
6230 {
6231 struct bfa_fru_s *fru = cbarg;
6232 struct bfi_fru_write_req_s *msg =
6233 (struct bfi_fru_write_req_s *) fru->mb.msg;
6234 u32 len;
6235
6236 msg->offset = cpu_to_be32(fru->addr_off + fru->offset);
6237 len = (fru->residue < BFA_FRU_DMA_BUF_SZ) ?
6238 fru->residue : BFA_FRU_DMA_BUF_SZ;
6239 msg->length = cpu_to_be32(len);
6240
6241 /*
6242 * indicate if it's the last msg of the whole write operation
6243 */
6244 msg->last = (len == fru->residue) ? 1 : 0;
6245
6246 msg->trfr_cmpl = (len == fru->residue) ? fru->trfr_cmpl : 0;
6247 bfi_h2i_set(msg->mh, BFI_MC_FRU, msg_type, bfa_ioc_portid(fru->ioc));
6248 bfa_alen_set(&msg->alen, len, fru->dbuf_pa);
6249
6250 memcpy(fru->dbuf_kva, fru->ubuf + fru->offset, len);
6251 bfa_ioc_mbox_queue(fru->ioc, &fru->mb);
6252
6253 fru->residue -= len;
6254 fru->offset += len;
6255 }
6256
6257 /*
6258 * Send fru read request.
6259 *
6260 * @param[in] cbarg - callback argument
6261 */
6262 static void
bfa_fru_read_send(void * cbarg,enum bfi_fru_h2i_msgs msg_type)6263 bfa_fru_read_send(void *cbarg, enum bfi_fru_h2i_msgs msg_type)
6264 {
6265 struct bfa_fru_s *fru = cbarg;
6266 struct bfi_fru_read_req_s *msg =
6267 (struct bfi_fru_read_req_s *) fru->mb.msg;
6268 u32 len;
6269
6270 msg->offset = cpu_to_be32(fru->addr_off + fru->offset);
6271 len = (fru->residue < BFA_FRU_DMA_BUF_SZ) ?
6272 fru->residue : BFA_FRU_DMA_BUF_SZ;
6273 msg->length = cpu_to_be32(len);
6274 bfi_h2i_set(msg->mh, BFI_MC_FRU, msg_type, bfa_ioc_portid(fru->ioc));
6275 bfa_alen_set(&msg->alen, len, fru->dbuf_pa);
6276 bfa_ioc_mbox_queue(fru->ioc, &fru->mb);
6277 }
6278
6279 /*
6280 * Flash memory info API.
6281 *
6282 * @param[in] mincfg - minimal cfg variable
6283 */
6284 u32
bfa_fru_meminfo(bfa_boolean_t mincfg)6285 bfa_fru_meminfo(bfa_boolean_t mincfg)
6286 {
6287 /* min driver doesn't need fru */
6288 if (mincfg)
6289 return 0;
6290
6291 return BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
6292 }
6293
6294 /*
6295 * Flash attach API.
6296 *
6297 * @param[in] fru - fru structure
6298 * @param[in] ioc - ioc structure
6299 * @param[in] dev - device structure
6300 * @param[in] trcmod - trace module
6301 * @param[in] logmod - log module
6302 */
6303 void
bfa_fru_attach(struct bfa_fru_s * fru,struct bfa_ioc_s * ioc,void * dev,struct bfa_trc_mod_s * trcmod,bfa_boolean_t mincfg)6304 bfa_fru_attach(struct bfa_fru_s *fru, struct bfa_ioc_s *ioc, void *dev,
6305 struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
6306 {
6307 fru->ioc = ioc;
6308 fru->trcmod = trcmod;
6309 fru->cbfn = NULL;
6310 fru->cbarg = NULL;
6311 fru->op_busy = 0;
6312
6313 bfa_ioc_mbox_regisr(fru->ioc, BFI_MC_FRU, bfa_fru_intr, fru);
6314 bfa_q_qe_init(&fru->ioc_notify);
6315 bfa_ioc_notify_init(&fru->ioc_notify, bfa_fru_notify, fru);
6316 list_add_tail(&fru->ioc_notify.qe, &fru->ioc->notify_q);
6317
6318 /* min driver doesn't need fru */
6319 if (mincfg) {
6320 fru->dbuf_kva = NULL;
6321 fru->dbuf_pa = 0;
6322 }
6323 }
6324
6325 /*
6326 * Claim memory for fru
6327 *
6328 * @param[in] fru - fru structure
6329 * @param[in] dm_kva - pointer to virtual memory address
6330 * @param[in] dm_pa - frusical memory address
6331 * @param[in] mincfg - minimal cfg variable
6332 */
6333 void
bfa_fru_memclaim(struct bfa_fru_s * fru,u8 * dm_kva,u64 dm_pa,bfa_boolean_t mincfg)6334 bfa_fru_memclaim(struct bfa_fru_s *fru, u8 *dm_kva, u64 dm_pa,
6335 bfa_boolean_t mincfg)
6336 {
6337 if (mincfg)
6338 return;
6339
6340 fru->dbuf_kva = dm_kva;
6341 fru->dbuf_pa = dm_pa;
6342 memset(fru->dbuf_kva, 0, BFA_FRU_DMA_BUF_SZ);
6343 dm_kva += BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
6344 dm_pa += BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
6345 }
6346
6347 /*
6348 * Update fru vpd image.
6349 *
6350 * @param[in] fru - fru structure
6351 * @param[in] buf - update data buffer
6352 * @param[in] len - data buffer length
6353 * @param[in] offset - offset relative to starting address
6354 * @param[in] cbfn - callback function
6355 * @param[in] cbarg - callback argument
6356 *
6357 * Return status.
6358 */
6359 bfa_status_t
bfa_fruvpd_update(struct bfa_fru_s * fru,void * buf,u32 len,u32 offset,bfa_cb_fru_t cbfn,void * cbarg,u8 trfr_cmpl)6360 bfa_fruvpd_update(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6361 bfa_cb_fru_t cbfn, void *cbarg, u8 trfr_cmpl)
6362 {
6363 bfa_trc(fru, BFI_FRUVPD_H2I_WRITE_REQ);
6364 bfa_trc(fru, len);
6365 bfa_trc(fru, offset);
6366
6367 if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2 &&
6368 fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK2)
6369 return BFA_STATUS_FRU_NOT_PRESENT;
6370
6371 if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK)
6372 return BFA_STATUS_CMD_NOTSUPP;
6373
6374 if (!bfa_ioc_is_operational(fru->ioc))
6375 return BFA_STATUS_IOC_NON_OP;
6376
6377 if (fru->op_busy) {
6378 bfa_trc(fru, fru->op_busy);
6379 return BFA_STATUS_DEVBUSY;
6380 }
6381
6382 fru->op_busy = 1;
6383
6384 fru->cbfn = cbfn;
6385 fru->cbarg = cbarg;
6386 fru->residue = len;
6387 fru->offset = 0;
6388 fru->addr_off = offset;
6389 fru->ubuf = buf;
6390 fru->trfr_cmpl = trfr_cmpl;
6391
6392 bfa_fru_write_send(fru, BFI_FRUVPD_H2I_WRITE_REQ);
6393
6394 return BFA_STATUS_OK;
6395 }
6396
6397 /*
6398 * Read fru vpd image.
6399 *
6400 * @param[in] fru - fru structure
6401 * @param[in] buf - read data buffer
6402 * @param[in] len - data buffer length
6403 * @param[in] offset - offset relative to starting address
6404 * @param[in] cbfn - callback function
6405 * @param[in] cbarg - callback argument
6406 *
6407 * Return status.
6408 */
6409 bfa_status_t
bfa_fruvpd_read(struct bfa_fru_s * fru,void * buf,u32 len,u32 offset,bfa_cb_fru_t cbfn,void * cbarg)6410 bfa_fruvpd_read(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6411 bfa_cb_fru_t cbfn, void *cbarg)
6412 {
6413 bfa_trc(fru, BFI_FRUVPD_H2I_READ_REQ);
6414 bfa_trc(fru, len);
6415 bfa_trc(fru, offset);
6416
6417 if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6418 return BFA_STATUS_FRU_NOT_PRESENT;
6419
6420 if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK &&
6421 fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK2)
6422 return BFA_STATUS_CMD_NOTSUPP;
6423
6424 if (!bfa_ioc_is_operational(fru->ioc))
6425 return BFA_STATUS_IOC_NON_OP;
6426
6427 if (fru->op_busy) {
6428 bfa_trc(fru, fru->op_busy);
6429 return BFA_STATUS_DEVBUSY;
6430 }
6431
6432 fru->op_busy = 1;
6433
6434 fru->cbfn = cbfn;
6435 fru->cbarg = cbarg;
6436 fru->residue = len;
6437 fru->offset = 0;
6438 fru->addr_off = offset;
6439 fru->ubuf = buf;
6440 bfa_fru_read_send(fru, BFI_FRUVPD_H2I_READ_REQ);
6441
6442 return BFA_STATUS_OK;
6443 }
6444
6445 /*
6446 * Get maximum size fru vpd image.
6447 *
6448 * @param[in] fru - fru structure
6449 * @param[out] size - maximum size of fru vpd data
6450 *
6451 * Return status.
6452 */
6453 bfa_status_t
bfa_fruvpd_get_max_size(struct bfa_fru_s * fru,u32 * max_size)6454 bfa_fruvpd_get_max_size(struct bfa_fru_s *fru, u32 *max_size)
6455 {
6456 if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6457 return BFA_STATUS_FRU_NOT_PRESENT;
6458
6459 if (!bfa_ioc_is_operational(fru->ioc))
6460 return BFA_STATUS_IOC_NON_OP;
6461
6462 if (fru->ioc->attr->card_type == BFA_MFG_TYPE_CHINOOK ||
6463 fru->ioc->attr->card_type == BFA_MFG_TYPE_CHINOOK2)
6464 *max_size = BFA_FRU_CHINOOK_MAX_SIZE;
6465 else
6466 return BFA_STATUS_CMD_NOTSUPP;
6467 return BFA_STATUS_OK;
6468 }
6469 /*
6470 * tfru write.
6471 *
6472 * @param[in] fru - fru structure
6473 * @param[in] buf - update data buffer
6474 * @param[in] len - data buffer length
6475 * @param[in] offset - offset relative to starting address
6476 * @param[in] cbfn - callback function
6477 * @param[in] cbarg - callback argument
6478 *
6479 * Return status.
6480 */
6481 bfa_status_t
bfa_tfru_write(struct bfa_fru_s * fru,void * buf,u32 len,u32 offset,bfa_cb_fru_t cbfn,void * cbarg)6482 bfa_tfru_write(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6483 bfa_cb_fru_t cbfn, void *cbarg)
6484 {
6485 bfa_trc(fru, BFI_TFRU_H2I_WRITE_REQ);
6486 bfa_trc(fru, len);
6487 bfa_trc(fru, offset);
6488 bfa_trc(fru, *((u8 *) buf));
6489
6490 if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6491 return BFA_STATUS_FRU_NOT_PRESENT;
6492
6493 if (!bfa_ioc_is_operational(fru->ioc))
6494 return BFA_STATUS_IOC_NON_OP;
6495
6496 if (fru->op_busy) {
6497 bfa_trc(fru, fru->op_busy);
6498 return BFA_STATUS_DEVBUSY;
6499 }
6500
6501 fru->op_busy = 1;
6502
6503 fru->cbfn = cbfn;
6504 fru->cbarg = cbarg;
6505 fru->residue = len;
6506 fru->offset = 0;
6507 fru->addr_off = offset;
6508 fru->ubuf = buf;
6509
6510 bfa_fru_write_send(fru, BFI_TFRU_H2I_WRITE_REQ);
6511
6512 return BFA_STATUS_OK;
6513 }
6514
6515 /*
6516 * tfru read.
6517 *
6518 * @param[in] fru - fru structure
6519 * @param[in] buf - read data buffer
6520 * @param[in] len - data buffer length
6521 * @param[in] offset - offset relative to starting address
6522 * @param[in] cbfn - callback function
6523 * @param[in] cbarg - callback argument
6524 *
6525 * Return status.
6526 */
6527 bfa_status_t
bfa_tfru_read(struct bfa_fru_s * fru,void * buf,u32 len,u32 offset,bfa_cb_fru_t cbfn,void * cbarg)6528 bfa_tfru_read(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6529 bfa_cb_fru_t cbfn, void *cbarg)
6530 {
6531 bfa_trc(fru, BFI_TFRU_H2I_READ_REQ);
6532 bfa_trc(fru, len);
6533 bfa_trc(fru, offset);
6534
6535 if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6536 return BFA_STATUS_FRU_NOT_PRESENT;
6537
6538 if (!bfa_ioc_is_operational(fru->ioc))
6539 return BFA_STATUS_IOC_NON_OP;
6540
6541 if (fru->op_busy) {
6542 bfa_trc(fru, fru->op_busy);
6543 return BFA_STATUS_DEVBUSY;
6544 }
6545
6546 fru->op_busy = 1;
6547
6548 fru->cbfn = cbfn;
6549 fru->cbarg = cbarg;
6550 fru->residue = len;
6551 fru->offset = 0;
6552 fru->addr_off = offset;
6553 fru->ubuf = buf;
6554 bfa_fru_read_send(fru, BFI_TFRU_H2I_READ_REQ);
6555
6556 return BFA_STATUS_OK;
6557 }
6558
6559 /*
6560 * Process fru response messages upon receiving interrupts.
6561 *
6562 * @param[in] fruarg - fru structure
6563 * @param[in] msg - message structure
6564 */
6565 void
bfa_fru_intr(void * fruarg,struct bfi_mbmsg_s * msg)6566 bfa_fru_intr(void *fruarg, struct bfi_mbmsg_s *msg)
6567 {
6568 struct bfa_fru_s *fru = fruarg;
6569 struct bfi_fru_rsp_s *rsp = (struct bfi_fru_rsp_s *)msg;
6570 u32 status;
6571
6572 bfa_trc(fru, msg->mh.msg_id);
6573
6574 if (!fru->op_busy) {
6575 /*
6576 * receiving response after ioc failure
6577 */
6578 bfa_trc(fru, 0x9999);
6579 return;
6580 }
6581
6582 switch (msg->mh.msg_id) {
6583 case BFI_FRUVPD_I2H_WRITE_RSP:
6584 case BFI_TFRU_I2H_WRITE_RSP:
6585 status = be32_to_cpu(rsp->status);
6586 bfa_trc(fru, status);
6587
6588 if (status != BFA_STATUS_OK || fru->residue == 0) {
6589 fru->status = status;
6590 fru->op_busy = 0;
6591 if (fru->cbfn)
6592 fru->cbfn(fru->cbarg, fru->status);
6593 } else {
6594 bfa_trc(fru, fru->offset);
6595 if (msg->mh.msg_id == BFI_FRUVPD_I2H_WRITE_RSP)
6596 bfa_fru_write_send(fru,
6597 BFI_FRUVPD_H2I_WRITE_REQ);
6598 else
6599 bfa_fru_write_send(fru,
6600 BFI_TFRU_H2I_WRITE_REQ);
6601 }
6602 break;
6603 case BFI_FRUVPD_I2H_READ_RSP:
6604 case BFI_TFRU_I2H_READ_RSP:
6605 status = be32_to_cpu(rsp->status);
6606 bfa_trc(fru, status);
6607
6608 if (status != BFA_STATUS_OK) {
6609 fru->status = status;
6610 fru->op_busy = 0;
6611 if (fru->cbfn)
6612 fru->cbfn(fru->cbarg, fru->status);
6613 } else {
6614 u32 len = be32_to_cpu(rsp->length);
6615
6616 bfa_trc(fru, fru->offset);
6617 bfa_trc(fru, len);
6618
6619 memcpy(fru->ubuf + fru->offset, fru->dbuf_kva, len);
6620 fru->residue -= len;
6621 fru->offset += len;
6622
6623 if (fru->residue == 0) {
6624 fru->status = status;
6625 fru->op_busy = 0;
6626 if (fru->cbfn)
6627 fru->cbfn(fru->cbarg, fru->status);
6628 } else {
6629 if (msg->mh.msg_id == BFI_FRUVPD_I2H_READ_RSP)
6630 bfa_fru_read_send(fru,
6631 BFI_FRUVPD_H2I_READ_REQ);
6632 else
6633 bfa_fru_read_send(fru,
6634 BFI_TFRU_H2I_READ_REQ);
6635 }
6636 }
6637 break;
6638 default:
6639 WARN_ON(1);
6640 }
6641 }
6642
6643 /*
6644 * register definitions
6645 */
6646 #define FLI_CMD_REG 0x0001d000
6647 #define FLI_RDDATA_REG 0x0001d010
6648 #define FLI_ADDR_REG 0x0001d004
6649 #define FLI_DEV_STATUS_REG 0x0001d014
6650
6651 #define BFA_FLASH_FIFO_SIZE 128 /* fifo size */
6652 #define BFA_FLASH_CHECK_MAX 10000 /* max # of status check */
6653 #define BFA_FLASH_BLOCKING_OP_MAX 1000000 /* max # of blocking op check */
6654 #define BFA_FLASH_WIP_MASK 0x01 /* write in progress bit mask */
6655
6656 enum bfa_flash_cmd {
6657 BFA_FLASH_FAST_READ = 0x0b, /* fast read */
6658 BFA_FLASH_READ_STATUS = 0x05, /* read status */
6659 };
6660
6661 /**
6662 * @brief hardware error definition
6663 */
6664 enum bfa_flash_err {
6665 BFA_FLASH_NOT_PRESENT = -1, /*!< flash not present */
6666 BFA_FLASH_UNINIT = -2, /*!< flash not initialized */
6667 BFA_FLASH_BAD = -3, /*!< flash bad */
6668 BFA_FLASH_BUSY = -4, /*!< flash busy */
6669 BFA_FLASH_ERR_CMD_ACT = -5, /*!< command active never cleared */
6670 BFA_FLASH_ERR_FIFO_CNT = -6, /*!< fifo count never cleared */
6671 BFA_FLASH_ERR_WIP = -7, /*!< write-in-progress never cleared */
6672 BFA_FLASH_ERR_TIMEOUT = -8, /*!< fli timeout */
6673 BFA_FLASH_ERR_LEN = -9, /*!< invalid length */
6674 };
6675
6676 /**
6677 * @brief flash command register data structure
6678 */
6679 union bfa_flash_cmd_reg_u {
6680 struct {
6681 #ifdef __BIG_ENDIAN
6682 u32 act:1;
6683 u32 rsv:1;
6684 u32 write_cnt:9;
6685 u32 read_cnt:9;
6686 u32 addr_cnt:4;
6687 u32 cmd:8;
6688 #else
6689 u32 cmd:8;
6690 u32 addr_cnt:4;
6691 u32 read_cnt:9;
6692 u32 write_cnt:9;
6693 u32 rsv:1;
6694 u32 act:1;
6695 #endif
6696 } r;
6697 u32 i;
6698 };
6699
6700 /**
6701 * @brief flash device status register data structure
6702 */
6703 union bfa_flash_dev_status_reg_u {
6704 struct {
6705 #ifdef __BIG_ENDIAN
6706 u32 rsv:21;
6707 u32 fifo_cnt:6;
6708 u32 busy:1;
6709 u32 init_status:1;
6710 u32 present:1;
6711 u32 bad:1;
6712 u32 good:1;
6713 #else
6714 u32 good:1;
6715 u32 bad:1;
6716 u32 present:1;
6717 u32 init_status:1;
6718 u32 busy:1;
6719 u32 fifo_cnt:6;
6720 u32 rsv:21;
6721 #endif
6722 } r;
6723 u32 i;
6724 };
6725
6726 /**
6727 * @brief flash address register data structure
6728 */
6729 union bfa_flash_addr_reg_u {
6730 struct {
6731 #ifdef __BIG_ENDIAN
6732 u32 addr:24;
6733 u32 dummy:8;
6734 #else
6735 u32 dummy:8;
6736 u32 addr:24;
6737 #endif
6738 } r;
6739 u32 i;
6740 };
6741
6742 /**
6743 * dg flash_raw_private Flash raw private functions
6744 */
6745 static void
bfa_flash_set_cmd(void __iomem * pci_bar,u8 wr_cnt,u8 rd_cnt,u8 ad_cnt,u8 op)6746 bfa_flash_set_cmd(void __iomem *pci_bar, u8 wr_cnt,
6747 u8 rd_cnt, u8 ad_cnt, u8 op)
6748 {
6749 union bfa_flash_cmd_reg_u cmd;
6750
6751 cmd.i = 0;
6752 cmd.r.act = 1;
6753 cmd.r.write_cnt = wr_cnt;
6754 cmd.r.read_cnt = rd_cnt;
6755 cmd.r.addr_cnt = ad_cnt;
6756 cmd.r.cmd = op;
6757 writel(cmd.i, (pci_bar + FLI_CMD_REG));
6758 }
6759
6760 static void
bfa_flash_set_addr(void __iomem * pci_bar,u32 address)6761 bfa_flash_set_addr(void __iomem *pci_bar, u32 address)
6762 {
6763 union bfa_flash_addr_reg_u addr;
6764
6765 addr.r.addr = address & 0x00ffffff;
6766 addr.r.dummy = 0;
6767 writel(addr.i, (pci_bar + FLI_ADDR_REG));
6768 }
6769
6770 static int
bfa_flash_cmd_act_check(void __iomem * pci_bar)6771 bfa_flash_cmd_act_check(void __iomem *pci_bar)
6772 {
6773 union bfa_flash_cmd_reg_u cmd;
6774
6775 cmd.i = readl(pci_bar + FLI_CMD_REG);
6776
6777 if (cmd.r.act)
6778 return BFA_FLASH_ERR_CMD_ACT;
6779
6780 return 0;
6781 }
6782
6783 /**
6784 * @brief
6785 * Flush FLI data fifo.
6786 *
6787 * @param[in] pci_bar - pci bar address
6788 * @param[in] dev_status - device status
6789 *
6790 * Return 0 on success, negative error number on error.
6791 */
6792 static u32
bfa_flash_fifo_flush(void __iomem * pci_bar)6793 bfa_flash_fifo_flush(void __iomem *pci_bar)
6794 {
6795 u32 i;
6796 u32 t;
6797 union bfa_flash_dev_status_reg_u dev_status;
6798
6799 dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
6800
6801 if (!dev_status.r.fifo_cnt)
6802 return 0;
6803
6804 /* fifo counter in terms of words */
6805 for (i = 0; i < dev_status.r.fifo_cnt; i++)
6806 t = readl(pci_bar + FLI_RDDATA_REG);
6807
6808 /*
6809 * Check the device status. It may take some time.
6810 */
6811 for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) {
6812 dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
6813 if (!dev_status.r.fifo_cnt)
6814 break;
6815 }
6816
6817 if (dev_status.r.fifo_cnt)
6818 return BFA_FLASH_ERR_FIFO_CNT;
6819
6820 return 0;
6821 }
6822
6823 /**
6824 * @brief
6825 * Read flash status.
6826 *
6827 * @param[in] pci_bar - pci bar address
6828 *
6829 * Return 0 on success, negative error number on error.
6830 */
6831 static u32
bfa_flash_status_read(void __iomem * pci_bar)6832 bfa_flash_status_read(void __iomem *pci_bar)
6833 {
6834 union bfa_flash_dev_status_reg_u dev_status;
6835 int status;
6836 u32 ret_status;
6837 int i;
6838
6839 status = bfa_flash_fifo_flush(pci_bar);
6840 if (status < 0)
6841 return status;
6842
6843 bfa_flash_set_cmd(pci_bar, 0, 4, 0, BFA_FLASH_READ_STATUS);
6844
6845 for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) {
6846 status = bfa_flash_cmd_act_check(pci_bar);
6847 if (!status)
6848 break;
6849 }
6850
6851 if (status)
6852 return status;
6853
6854 dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
6855 if (!dev_status.r.fifo_cnt)
6856 return BFA_FLASH_BUSY;
6857
6858 ret_status = readl(pci_bar + FLI_RDDATA_REG);
6859 ret_status >>= 24;
6860
6861 status = bfa_flash_fifo_flush(pci_bar);
6862 if (status < 0)
6863 return status;
6864
6865 return ret_status;
6866 }
6867
6868 /**
6869 * @brief
6870 * Start flash read operation.
6871 *
6872 * @param[in] pci_bar - pci bar address
6873 * @param[in] offset - flash address offset
6874 * @param[in] len - read data length
6875 * @param[in] buf - read data buffer
6876 *
6877 * Return 0 on success, negative error number on error.
6878 */
6879 static u32
bfa_flash_read_start(void __iomem * pci_bar,u32 offset,u32 len,char * buf)6880 bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len,
6881 char *buf)
6882 {
6883 int status;
6884
6885 /*
6886 * len must be mutiple of 4 and not exceeding fifo size
6887 */
6888 if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0)
6889 return BFA_FLASH_ERR_LEN;
6890
6891 /*
6892 * check status
6893 */
6894 status = bfa_flash_status_read(pci_bar);
6895 if (status == BFA_FLASH_BUSY)
6896 status = bfa_flash_status_read(pci_bar);
6897
6898 if (status < 0)
6899 return status;
6900
6901 /*
6902 * check if write-in-progress bit is cleared
6903 */
6904 if (status & BFA_FLASH_WIP_MASK)
6905 return BFA_FLASH_ERR_WIP;
6906
6907 bfa_flash_set_addr(pci_bar, offset);
6908
6909 bfa_flash_set_cmd(pci_bar, 0, (u8)len, 4, BFA_FLASH_FAST_READ);
6910
6911 return 0;
6912 }
6913
6914 /**
6915 * @brief
6916 * Check flash read operation.
6917 *
6918 * @param[in] pci_bar - pci bar address
6919 *
6920 * Return flash device status, 1 if busy, 0 if not.
6921 */
6922 static u32
bfa_flash_read_check(void __iomem * pci_bar)6923 bfa_flash_read_check(void __iomem *pci_bar)
6924 {
6925 if (bfa_flash_cmd_act_check(pci_bar))
6926 return 1;
6927
6928 return 0;
6929 }
6930 /**
6931 * @brief
6932 * End flash read operation.
6933 *
6934 * @param[in] pci_bar - pci bar address
6935 * @param[in] len - read data length
6936 * @param[in] buf - read data buffer
6937 *
6938 */
6939 static void
bfa_flash_read_end(void __iomem * pci_bar,u32 len,char * buf)6940 bfa_flash_read_end(void __iomem *pci_bar, u32 len, char *buf)
6941 {
6942
6943 u32 i;
6944
6945 /*
6946 * read data fifo up to 32 words
6947 */
6948 for (i = 0; i < len; i += 4) {
6949 u32 w = readl(pci_bar + FLI_RDDATA_REG);
6950 *((u32 *) (buf + i)) = swab32(w);
6951 }
6952
6953 bfa_flash_fifo_flush(pci_bar);
6954 }
6955
6956 /**
6957 * @brief
6958 * Perform flash raw read.
6959 *
6960 * @param[in] pci_bar - pci bar address
6961 * @param[in] offset - flash partition address offset
6962 * @param[in] buf - read data buffer
6963 * @param[in] len - read data length
6964 *
6965 * Return status.
6966 */
6967
6968
6969 #define FLASH_BLOCKING_OP_MAX 500
6970 #define FLASH_SEM_LOCK_REG 0x18820
6971
6972 static int
bfa_raw_sem_get(void __iomem * bar)6973 bfa_raw_sem_get(void __iomem *bar)
6974 {
6975 int locked;
6976
6977 locked = readl((bar + FLASH_SEM_LOCK_REG));
6978 return !locked;
6979
6980 }
6981
6982 bfa_status_t
bfa_flash_sem_get(void __iomem * bar)6983 bfa_flash_sem_get(void __iomem *bar)
6984 {
6985 u32 n = FLASH_BLOCKING_OP_MAX;
6986
6987 while (!bfa_raw_sem_get(bar)) {
6988 if (--n <= 0)
6989 return BFA_STATUS_BADFLASH;
6990 mdelay(10);
6991 }
6992 return BFA_STATUS_OK;
6993 }
6994
6995 void
bfa_flash_sem_put(void __iomem * bar)6996 bfa_flash_sem_put(void __iomem *bar)
6997 {
6998 writel(0, (bar + FLASH_SEM_LOCK_REG));
6999 }
7000
7001 bfa_status_t
bfa_flash_raw_read(void __iomem * pci_bar,u32 offset,char * buf,u32 len)7002 bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf,
7003 u32 len)
7004 {
7005 u32 n;
7006 int status;
7007 u32 off, l, s, residue, fifo_sz;
7008
7009 residue = len;
7010 off = 0;
7011 fifo_sz = BFA_FLASH_FIFO_SIZE;
7012 status = bfa_flash_sem_get(pci_bar);
7013 if (status != BFA_STATUS_OK)
7014 return status;
7015
7016 while (residue) {
7017 s = offset + off;
7018 n = s / fifo_sz;
7019 l = (n + 1) * fifo_sz - s;
7020 if (l > residue)
7021 l = residue;
7022
7023 status = bfa_flash_read_start(pci_bar, offset + off, l,
7024 &buf[off]);
7025 if (status < 0) {
7026 bfa_flash_sem_put(pci_bar);
7027 return BFA_STATUS_FAILED;
7028 }
7029
7030 n = BFA_FLASH_BLOCKING_OP_MAX;
7031 while (bfa_flash_read_check(pci_bar)) {
7032 if (--n <= 0) {
7033 bfa_flash_sem_put(pci_bar);
7034 return BFA_STATUS_FAILED;
7035 }
7036 }
7037
7038 bfa_flash_read_end(pci_bar, l, &buf[off]);
7039
7040 residue -= l;
7041 off += l;
7042 }
7043 bfa_flash_sem_put(pci_bar);
7044
7045 return BFA_STATUS_OK;
7046 }
7047