1 /*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18 /*
19 * bfad.c Linux driver PCI interface module.
20 */
21 #include <linux/module.h>
22 #include <linux/kthread.h>
23 #include <linux/errno.h>
24 #include <linux/sched.h>
25 #include <linux/init.h>
26 #include <linux/fs.h>
27 #include <linux/pci.h>
28 #include <linux/firmware.h>
29 #include <asm/uaccess.h>
30 #include <asm/fcntl.h>
31
32 #include "bfad_drv.h"
33 #include "bfad_im.h"
34 #include "bfa_fcs.h"
35 #include "bfa_defs.h"
36 #include "bfa.h"
37
38 BFA_TRC_FILE(LDRV, BFAD);
39 DEFINE_MUTEX(bfad_mutex);
40 LIST_HEAD(bfad_list);
41
42 static int bfad_inst;
43 static int num_sgpgs_parm;
44 int supported_fc4s;
45 char *host_name, *os_name, *os_patch;
46 int num_rports, num_ios, num_tms;
47 int num_fcxps, num_ufbufs;
48 int reqq_size, rspq_size, num_sgpgs;
49 int rport_del_timeout = BFA_FCS_RPORT_DEF_DEL_TIMEOUT;
50 int bfa_lun_queue_depth = BFAD_LUN_QUEUE_DEPTH;
51 int bfa_io_max_sge = BFAD_IO_MAX_SGE;
52 int bfa_log_level = 3; /* WARNING log level */
53 int ioc_auto_recover = BFA_TRUE;
54 int bfa_linkup_delay = -1;
55 int fdmi_enable = BFA_TRUE;
56 int pcie_max_read_reqsz;
57 int bfa_debugfs_enable = 1;
58 int msix_disable_cb = 0, msix_disable_ct = 0;
59 int max_xfer_size = BFAD_MAX_SECTORS >> 1;
60 int max_rport_logins = BFA_FCS_MAX_RPORT_LOGINS;
61
62 /* Firmware releated */
63 u32 bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size;
64 u32 *bfi_image_cb, *bfi_image_ct, *bfi_image_ct2;
65
66 #define BFAD_FW_FILE_CB "cbfw-3.2.3.0.bin"
67 #define BFAD_FW_FILE_CT "ctfw-3.2.3.0.bin"
68 #define BFAD_FW_FILE_CT2 "ct2fw-3.2.3.0.bin"
69
70 static u32 *bfad_load_fwimg(struct pci_dev *pdev);
71 static void bfad_free_fwimg(void);
72 static void bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
73 u32 *bfi_image_size, char *fw_name);
74
75 static const char *msix_name_ct[] = {
76 "ctrl",
77 "cpe0", "cpe1", "cpe2", "cpe3",
78 "rme0", "rme1", "rme2", "rme3" };
79
80 static const char *msix_name_cb[] = {
81 "cpe0", "cpe1", "cpe2", "cpe3",
82 "rme0", "rme1", "rme2", "rme3",
83 "eemc", "elpu0", "elpu1", "epss", "mlpu" };
84
85 MODULE_FIRMWARE(BFAD_FW_FILE_CB);
86 MODULE_FIRMWARE(BFAD_FW_FILE_CT);
87 MODULE_FIRMWARE(BFAD_FW_FILE_CT2);
88
89 module_param(os_name, charp, S_IRUGO | S_IWUSR);
90 MODULE_PARM_DESC(os_name, "OS name of the hba host machine");
91 module_param(os_patch, charp, S_IRUGO | S_IWUSR);
92 MODULE_PARM_DESC(os_patch, "OS patch level of the hba host machine");
93 module_param(host_name, charp, S_IRUGO | S_IWUSR);
94 MODULE_PARM_DESC(host_name, "Hostname of the hba host machine");
95 module_param(num_rports, int, S_IRUGO | S_IWUSR);
96 MODULE_PARM_DESC(num_rports, "Max number of rports supported per port "
97 "(physical/logical), default=1024");
98 module_param(num_ios, int, S_IRUGO | S_IWUSR);
99 MODULE_PARM_DESC(num_ios, "Max number of ioim requests, default=2000");
100 module_param(num_tms, int, S_IRUGO | S_IWUSR);
101 MODULE_PARM_DESC(num_tms, "Max number of task im requests, default=128");
102 module_param(num_fcxps, int, S_IRUGO | S_IWUSR);
103 MODULE_PARM_DESC(num_fcxps, "Max number of fcxp requests, default=64");
104 module_param(num_ufbufs, int, S_IRUGO | S_IWUSR);
105 MODULE_PARM_DESC(num_ufbufs, "Max number of unsolicited frame "
106 "buffers, default=64");
107 module_param(reqq_size, int, S_IRUGO | S_IWUSR);
108 MODULE_PARM_DESC(reqq_size, "Max number of request queue elements, "
109 "default=256");
110 module_param(rspq_size, int, S_IRUGO | S_IWUSR);
111 MODULE_PARM_DESC(rspq_size, "Max number of response queue elements, "
112 "default=64");
113 module_param(num_sgpgs, int, S_IRUGO | S_IWUSR);
114 MODULE_PARM_DESC(num_sgpgs, "Number of scatter/gather pages, default=2048");
115 module_param(rport_del_timeout, int, S_IRUGO | S_IWUSR);
116 MODULE_PARM_DESC(rport_del_timeout, "Rport delete timeout, default=90 secs, "
117 "Range[>0]");
118 module_param(bfa_lun_queue_depth, int, S_IRUGO | S_IWUSR);
119 MODULE_PARM_DESC(bfa_lun_queue_depth, "Lun queue depth, default=32, Range[>0]");
120 module_param(bfa_io_max_sge, int, S_IRUGO | S_IWUSR);
121 MODULE_PARM_DESC(bfa_io_max_sge, "Max io scatter/gather elements, default=255");
122 module_param(bfa_log_level, int, S_IRUGO | S_IWUSR);
123 MODULE_PARM_DESC(bfa_log_level, "Driver log level, default=3, "
124 "Range[Critical:1|Error:2|Warning:3|Info:4]");
125 module_param(ioc_auto_recover, int, S_IRUGO | S_IWUSR);
126 MODULE_PARM_DESC(ioc_auto_recover, "IOC auto recovery, default=1, "
127 "Range[off:0|on:1]");
128 module_param(bfa_linkup_delay, int, S_IRUGO | S_IWUSR);
129 MODULE_PARM_DESC(bfa_linkup_delay, "Link up delay, default=30 secs for "
130 "boot port. Otherwise 10 secs in RHEL4 & 0 for "
131 "[RHEL5, SLES10, ESX40] Range[>0]");
132 module_param(msix_disable_cb, int, S_IRUGO | S_IWUSR);
133 MODULE_PARM_DESC(msix_disable_cb, "Disable Message Signaled Interrupts "
134 "for Brocade-415/425/815/825 cards, default=0, "
135 " Range[false:0|true:1]");
136 module_param(msix_disable_ct, int, S_IRUGO | S_IWUSR);
137 MODULE_PARM_DESC(msix_disable_ct, "Disable Message Signaled Interrupts "
138 "if possible for Brocade-1010/1020/804/1007/902/1741 "
139 "cards, default=0, Range[false:0|true:1]");
140 module_param(fdmi_enable, int, S_IRUGO | S_IWUSR);
141 MODULE_PARM_DESC(fdmi_enable, "Enables fdmi registration, default=1, "
142 "Range[false:0|true:1]");
143 module_param(pcie_max_read_reqsz, int, S_IRUGO | S_IWUSR);
144 MODULE_PARM_DESC(pcie_max_read_reqsz, "PCIe max read request size, default=0 "
145 "(use system setting), Range[128|256|512|1024|2048|4096]");
146 module_param(bfa_debugfs_enable, int, S_IRUGO | S_IWUSR);
147 MODULE_PARM_DESC(bfa_debugfs_enable, "Enables debugfs feature, default=1,"
148 " Range[false:0|true:1]");
149 module_param(max_xfer_size, int, S_IRUGO | S_IWUSR);
150 MODULE_PARM_DESC(max_xfer_size, "default=32MB,"
151 " Range[64k|128k|256k|512k|1024k|2048k]");
152 module_param(max_rport_logins, int, S_IRUGO | S_IWUSR);
153 MODULE_PARM_DESC(max_rport_logins, "Max number of logins to initiator and target rports on a port (physical/logical), default=1024");
154
155 static void
156 bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event);
157 static void
158 bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event);
159 static void
160 bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event);
161 static void
162 bfad_sm_operational(struct bfad_s *bfad, enum bfad_sm_event event);
163 static void
164 bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event);
165 static void
166 bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event);
167 static void
168 bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event);
169
170 /*
171 * Beginning state for the driver instance, awaiting the pci_probe event
172 */
173 static void
bfad_sm_uninit(struct bfad_s * bfad,enum bfad_sm_event event)174 bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event)
175 {
176 bfa_trc(bfad, event);
177
178 switch (event) {
179 case BFAD_E_CREATE:
180 bfa_sm_set_state(bfad, bfad_sm_created);
181 bfad->bfad_tsk = kthread_create(bfad_worker, (void *) bfad,
182 "%s", "bfad_worker");
183 if (IS_ERR(bfad->bfad_tsk)) {
184 printk(KERN_INFO "bfad[%d]: Kernel thread "
185 "creation failed!\n", bfad->inst_no);
186 bfa_sm_send_event(bfad, BFAD_E_KTHREAD_CREATE_FAILED);
187 }
188 bfa_sm_send_event(bfad, BFAD_E_INIT);
189 break;
190
191 case BFAD_E_STOP:
192 /* Ignore stop; already in uninit */
193 break;
194
195 default:
196 bfa_sm_fault(bfad, event);
197 }
198 }
199
200 /*
201 * Driver Instance is created, awaiting event INIT to initialize the bfad
202 */
203 static void
bfad_sm_created(struct bfad_s * bfad,enum bfad_sm_event event)204 bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event)
205 {
206 unsigned long flags;
207 bfa_status_t ret;
208
209 bfa_trc(bfad, event);
210
211 switch (event) {
212 case BFAD_E_INIT:
213 bfa_sm_set_state(bfad, bfad_sm_initializing);
214
215 init_completion(&bfad->comp);
216
217 /* Enable Interrupt and wait bfa_init completion */
218 if (bfad_setup_intr(bfad)) {
219 printk(KERN_WARNING "bfad%d: bfad_setup_intr failed\n",
220 bfad->inst_no);
221 bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED);
222 break;
223 }
224
225 spin_lock_irqsave(&bfad->bfad_lock, flags);
226 bfa_iocfc_init(&bfad->bfa);
227 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
228
229 /* Set up interrupt handler for each vectors */
230 if ((bfad->bfad_flags & BFAD_MSIX_ON) &&
231 bfad_install_msix_handler(bfad)) {
232 printk(KERN_WARNING "%s: install_msix failed, bfad%d\n",
233 __func__, bfad->inst_no);
234 }
235
236 bfad_init_timer(bfad);
237
238 wait_for_completion(&bfad->comp);
239
240 if ((bfad->bfad_flags & BFAD_HAL_INIT_DONE)) {
241 bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS);
242 } else {
243 printk(KERN_WARNING
244 "bfa %s: bfa init failed\n",
245 bfad->pci_name);
246 spin_lock_irqsave(&bfad->bfad_lock, flags);
247 bfa_fcs_init(&bfad->bfa_fcs);
248 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
249
250 ret = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM);
251 if (ret != BFA_STATUS_OK) {
252 init_completion(&bfad->comp);
253
254 spin_lock_irqsave(&bfad->bfad_lock, flags);
255 bfad->pport.flags |= BFAD_PORT_DELETE;
256 bfa_fcs_exit(&bfad->bfa_fcs);
257 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
258
259 wait_for_completion(&bfad->comp);
260
261 bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED);
262 break;
263 }
264 bfad->bfad_flags |= BFAD_HAL_INIT_FAIL;
265 bfa_sm_send_event(bfad, BFAD_E_HAL_INIT_FAILED);
266 }
267
268 break;
269
270 case BFAD_E_KTHREAD_CREATE_FAILED:
271 bfa_sm_set_state(bfad, bfad_sm_uninit);
272 break;
273
274 default:
275 bfa_sm_fault(bfad, event);
276 }
277 }
278
279 static void
bfad_sm_initializing(struct bfad_s * bfad,enum bfad_sm_event event)280 bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event)
281 {
282 int retval;
283 unsigned long flags;
284
285 bfa_trc(bfad, event);
286
287 switch (event) {
288 case BFAD_E_INIT_SUCCESS:
289 kthread_stop(bfad->bfad_tsk);
290 spin_lock_irqsave(&bfad->bfad_lock, flags);
291 bfad->bfad_tsk = NULL;
292 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
293
294 retval = bfad_start_ops(bfad);
295 if (retval != BFA_STATUS_OK) {
296 bfa_sm_set_state(bfad, bfad_sm_failed);
297 break;
298 }
299 bfa_sm_set_state(bfad, bfad_sm_operational);
300 break;
301
302 case BFAD_E_INIT_FAILED:
303 bfa_sm_set_state(bfad, bfad_sm_uninit);
304 kthread_stop(bfad->bfad_tsk);
305 spin_lock_irqsave(&bfad->bfad_lock, flags);
306 bfad->bfad_tsk = NULL;
307 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
308 break;
309
310 case BFAD_E_HAL_INIT_FAILED:
311 bfa_sm_set_state(bfad, bfad_sm_failed);
312 break;
313 default:
314 bfa_sm_fault(bfad, event);
315 }
316 }
317
318 static void
bfad_sm_failed(struct bfad_s * bfad,enum bfad_sm_event event)319 bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event)
320 {
321 int retval;
322
323 bfa_trc(bfad, event);
324
325 switch (event) {
326 case BFAD_E_INIT_SUCCESS:
327 retval = bfad_start_ops(bfad);
328 if (retval != BFA_STATUS_OK)
329 break;
330 bfa_sm_set_state(bfad, bfad_sm_operational);
331 break;
332
333 case BFAD_E_STOP:
334 bfa_sm_set_state(bfad, bfad_sm_fcs_exit);
335 bfa_sm_send_event(bfad, BFAD_E_FCS_EXIT_COMP);
336 break;
337
338 case BFAD_E_EXIT_COMP:
339 bfa_sm_set_state(bfad, bfad_sm_uninit);
340 bfad_remove_intr(bfad);
341 del_timer_sync(&bfad->hal_tmo);
342 break;
343
344 default:
345 bfa_sm_fault(bfad, event);
346 }
347 }
348
349 static void
bfad_sm_operational(struct bfad_s * bfad,enum bfad_sm_event event)350 bfad_sm_operational(struct bfad_s *bfad, enum bfad_sm_event event)
351 {
352 bfa_trc(bfad, event);
353
354 switch (event) {
355 case BFAD_E_STOP:
356 bfa_sm_set_state(bfad, bfad_sm_fcs_exit);
357 bfad_fcs_stop(bfad);
358 break;
359
360 default:
361 bfa_sm_fault(bfad, event);
362 }
363 }
364
365 static void
bfad_sm_fcs_exit(struct bfad_s * bfad,enum bfad_sm_event event)366 bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event)
367 {
368 bfa_trc(bfad, event);
369
370 switch (event) {
371 case BFAD_E_FCS_EXIT_COMP:
372 bfa_sm_set_state(bfad, bfad_sm_stopping);
373 bfad_stop(bfad);
374 break;
375
376 default:
377 bfa_sm_fault(bfad, event);
378 }
379 }
380
381 static void
bfad_sm_stopping(struct bfad_s * bfad,enum bfad_sm_event event)382 bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event)
383 {
384 bfa_trc(bfad, event);
385
386 switch (event) {
387 case BFAD_E_EXIT_COMP:
388 bfa_sm_set_state(bfad, bfad_sm_uninit);
389 bfad_remove_intr(bfad);
390 del_timer_sync(&bfad->hal_tmo);
391 bfad_im_probe_undo(bfad);
392 bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE;
393 bfad_uncfg_pport(bfad);
394 break;
395
396 default:
397 bfa_sm_fault(bfad, event);
398 break;
399 }
400 }
401
402 /*
403 * BFA callbacks
404 */
405 void
bfad_hcb_comp(void * arg,bfa_status_t status)406 bfad_hcb_comp(void *arg, bfa_status_t status)
407 {
408 struct bfad_hal_comp *fcomp = (struct bfad_hal_comp *)arg;
409
410 fcomp->status = status;
411 complete(&fcomp->comp);
412 }
413
414 /*
415 * bfa_init callback
416 */
417 void
bfa_cb_init(void * drv,bfa_status_t init_status)418 bfa_cb_init(void *drv, bfa_status_t init_status)
419 {
420 struct bfad_s *bfad = drv;
421
422 if (init_status == BFA_STATUS_OK) {
423 bfad->bfad_flags |= BFAD_HAL_INIT_DONE;
424
425 /*
426 * If BFAD_HAL_INIT_FAIL flag is set:
427 * Wake up the kernel thread to start
428 * the bfad operations after HAL init done
429 */
430 if ((bfad->bfad_flags & BFAD_HAL_INIT_FAIL)) {
431 bfad->bfad_flags &= ~BFAD_HAL_INIT_FAIL;
432 wake_up_process(bfad->bfad_tsk);
433 }
434 }
435
436 complete(&bfad->comp);
437 }
438
439 /*
440 * BFA_FCS callbacks
441 */
442 struct bfad_port_s *
bfa_fcb_lport_new(struct bfad_s * bfad,struct bfa_fcs_lport_s * port,enum bfa_lport_role roles,struct bfad_vf_s * vf_drv,struct bfad_vport_s * vp_drv)443 bfa_fcb_lport_new(struct bfad_s *bfad, struct bfa_fcs_lport_s *port,
444 enum bfa_lport_role roles, struct bfad_vf_s *vf_drv,
445 struct bfad_vport_s *vp_drv)
446 {
447 bfa_status_t rc;
448 struct bfad_port_s *port_drv;
449
450 if (!vp_drv && !vf_drv) {
451 port_drv = &bfad->pport;
452 port_drv->pvb_type = BFAD_PORT_PHYS_BASE;
453 } else if (!vp_drv && vf_drv) {
454 port_drv = &vf_drv->base_port;
455 port_drv->pvb_type = BFAD_PORT_VF_BASE;
456 } else if (vp_drv && !vf_drv) {
457 port_drv = &vp_drv->drv_port;
458 port_drv->pvb_type = BFAD_PORT_PHYS_VPORT;
459 } else {
460 port_drv = &vp_drv->drv_port;
461 port_drv->pvb_type = BFAD_PORT_VF_VPORT;
462 }
463
464 port_drv->fcs_port = port;
465 port_drv->roles = roles;
466
467 if (roles & BFA_LPORT_ROLE_FCP_IM) {
468 rc = bfad_im_port_new(bfad, port_drv);
469 if (rc != BFA_STATUS_OK) {
470 bfad_im_port_delete(bfad, port_drv);
471 port_drv = NULL;
472 }
473 }
474
475 return port_drv;
476 }
477
478 /*
479 * FCS RPORT alloc callback, after successful PLOGI by FCS
480 */
481 bfa_status_t
bfa_fcb_rport_alloc(struct bfad_s * bfad,struct bfa_fcs_rport_s ** rport,struct bfad_rport_s ** rport_drv)482 bfa_fcb_rport_alloc(struct bfad_s *bfad, struct bfa_fcs_rport_s **rport,
483 struct bfad_rport_s **rport_drv)
484 {
485 bfa_status_t rc = BFA_STATUS_OK;
486
487 *rport_drv = kzalloc(sizeof(struct bfad_rport_s), GFP_ATOMIC);
488 if (*rport_drv == NULL) {
489 rc = BFA_STATUS_ENOMEM;
490 goto ext;
491 }
492
493 *rport = &(*rport_drv)->fcs_rport;
494
495 ext:
496 return rc;
497 }
498
499 /*
500 * FCS PBC VPORT Create
501 */
502 void
bfa_fcb_pbc_vport_create(struct bfad_s * bfad,struct bfi_pbc_vport_s pbc_vport)503 bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s pbc_vport)
504 {
505
506 struct bfa_lport_cfg_s port_cfg = {0};
507 struct bfad_vport_s *vport;
508 int rc;
509
510 vport = kzalloc(sizeof(struct bfad_vport_s), GFP_ATOMIC);
511 if (!vport) {
512 bfa_trc(bfad, 0);
513 return;
514 }
515
516 vport->drv_port.bfad = bfad;
517 port_cfg.roles = BFA_LPORT_ROLE_FCP_IM;
518 port_cfg.pwwn = pbc_vport.vp_pwwn;
519 port_cfg.nwwn = pbc_vport.vp_nwwn;
520 port_cfg.preboot_vp = BFA_TRUE;
521
522 rc = bfa_fcs_pbc_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, 0,
523 &port_cfg, vport);
524
525 if (rc != BFA_STATUS_OK) {
526 bfa_trc(bfad, 0);
527 return;
528 }
529
530 list_add_tail(&vport->list_entry, &bfad->pbc_vport_list);
531 }
532
533 void
bfad_hal_mem_release(struct bfad_s * bfad)534 bfad_hal_mem_release(struct bfad_s *bfad)
535 {
536 struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo;
537 struct bfa_mem_dma_s *dma_info, *dma_elem;
538 struct bfa_mem_kva_s *kva_info, *kva_elem;
539 struct list_head *dm_qe, *km_qe;
540
541 dma_info = &hal_meminfo->dma_info;
542 kva_info = &hal_meminfo->kva_info;
543
544 /* Iterate through the KVA meminfo queue */
545 list_for_each(km_qe, &kva_info->qe) {
546 kva_elem = (struct bfa_mem_kva_s *) km_qe;
547 vfree(kva_elem->kva);
548 }
549
550 /* Iterate through the DMA meminfo queue */
551 list_for_each(dm_qe, &dma_info->qe) {
552 dma_elem = (struct bfa_mem_dma_s *) dm_qe;
553 dma_free_coherent(&bfad->pcidev->dev,
554 dma_elem->mem_len, dma_elem->kva,
555 (dma_addr_t) dma_elem->dma);
556 }
557
558 memset(hal_meminfo, 0, sizeof(struct bfa_meminfo_s));
559 }
560
561 void
bfad_update_hal_cfg(struct bfa_iocfc_cfg_s * bfa_cfg)562 bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg)
563 {
564 if (num_rports > 0)
565 bfa_cfg->fwcfg.num_rports = num_rports;
566 if (num_ios > 0)
567 bfa_cfg->fwcfg.num_ioim_reqs = num_ios;
568 if (num_tms > 0)
569 bfa_cfg->fwcfg.num_tskim_reqs = num_tms;
570 if (num_fcxps > 0 && num_fcxps <= BFA_FCXP_MAX)
571 bfa_cfg->fwcfg.num_fcxp_reqs = num_fcxps;
572 if (num_ufbufs > 0 && num_ufbufs <= BFA_UF_MAX)
573 bfa_cfg->fwcfg.num_uf_bufs = num_ufbufs;
574 if (reqq_size > 0)
575 bfa_cfg->drvcfg.num_reqq_elems = reqq_size;
576 if (rspq_size > 0)
577 bfa_cfg->drvcfg.num_rspq_elems = rspq_size;
578 if (num_sgpgs > 0 && num_sgpgs <= BFA_SGPG_MAX)
579 bfa_cfg->drvcfg.num_sgpgs = num_sgpgs;
580
581 /*
582 * populate the hal values back to the driver for sysfs use.
583 * otherwise, the default values will be shown as 0 in sysfs
584 */
585 num_rports = bfa_cfg->fwcfg.num_rports;
586 num_ios = bfa_cfg->fwcfg.num_ioim_reqs;
587 num_tms = bfa_cfg->fwcfg.num_tskim_reqs;
588 num_fcxps = bfa_cfg->fwcfg.num_fcxp_reqs;
589 num_ufbufs = bfa_cfg->fwcfg.num_uf_bufs;
590 reqq_size = bfa_cfg->drvcfg.num_reqq_elems;
591 rspq_size = bfa_cfg->drvcfg.num_rspq_elems;
592 num_sgpgs = bfa_cfg->drvcfg.num_sgpgs;
593 }
594
595 bfa_status_t
bfad_hal_mem_alloc(struct bfad_s * bfad)596 bfad_hal_mem_alloc(struct bfad_s *bfad)
597 {
598 struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo;
599 struct bfa_mem_dma_s *dma_info, *dma_elem;
600 struct bfa_mem_kva_s *kva_info, *kva_elem;
601 struct list_head *dm_qe, *km_qe;
602 bfa_status_t rc = BFA_STATUS_OK;
603 dma_addr_t phys_addr;
604
605 bfa_cfg_get_default(&bfad->ioc_cfg);
606 bfad_update_hal_cfg(&bfad->ioc_cfg);
607 bfad->cfg_data.ioc_queue_depth = bfad->ioc_cfg.fwcfg.num_ioim_reqs;
608 bfa_cfg_get_meminfo(&bfad->ioc_cfg, hal_meminfo, &bfad->bfa);
609
610 dma_info = &hal_meminfo->dma_info;
611 kva_info = &hal_meminfo->kva_info;
612
613 /* Iterate through the KVA meminfo queue */
614 list_for_each(km_qe, &kva_info->qe) {
615 kva_elem = (struct bfa_mem_kva_s *) km_qe;
616 kva_elem->kva = vmalloc(kva_elem->mem_len);
617 if (kva_elem->kva == NULL) {
618 bfad_hal_mem_release(bfad);
619 rc = BFA_STATUS_ENOMEM;
620 goto ext;
621 }
622 memset(kva_elem->kva, 0, kva_elem->mem_len);
623 }
624
625 /* Iterate through the DMA meminfo queue */
626 list_for_each(dm_qe, &dma_info->qe) {
627 dma_elem = (struct bfa_mem_dma_s *) dm_qe;
628 dma_elem->kva = dma_alloc_coherent(&bfad->pcidev->dev,
629 dma_elem->mem_len,
630 &phys_addr, GFP_KERNEL);
631 if (dma_elem->kva == NULL) {
632 bfad_hal_mem_release(bfad);
633 rc = BFA_STATUS_ENOMEM;
634 goto ext;
635 }
636 dma_elem->dma = phys_addr;
637 memset(dma_elem->kva, 0, dma_elem->mem_len);
638 }
639 ext:
640 return rc;
641 }
642
643 /*
644 * Create a vport under a vf.
645 */
646 bfa_status_t
bfad_vport_create(struct bfad_s * bfad,u16 vf_id,struct bfa_lport_cfg_s * port_cfg,struct device * dev)647 bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
648 struct bfa_lport_cfg_s *port_cfg, struct device *dev)
649 {
650 struct bfad_vport_s *vport;
651 int rc = BFA_STATUS_OK;
652 unsigned long flags;
653 struct completion fcomp;
654
655 vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL);
656 if (!vport) {
657 rc = BFA_STATUS_ENOMEM;
658 goto ext;
659 }
660
661 vport->drv_port.bfad = bfad;
662 spin_lock_irqsave(&bfad->bfad_lock, flags);
663 rc = bfa_fcs_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, vf_id,
664 port_cfg, vport);
665 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
666
667 if (rc != BFA_STATUS_OK)
668 goto ext_free_vport;
669
670 if (port_cfg->roles & BFA_LPORT_ROLE_FCP_IM) {
671 rc = bfad_im_scsi_host_alloc(bfad, vport->drv_port.im_port,
672 dev);
673 if (rc != BFA_STATUS_OK)
674 goto ext_free_fcs_vport;
675 }
676
677 spin_lock_irqsave(&bfad->bfad_lock, flags);
678 bfa_fcs_vport_start(&vport->fcs_vport);
679 list_add_tail(&vport->list_entry, &bfad->vport_list);
680 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
681
682 return BFA_STATUS_OK;
683
684 ext_free_fcs_vport:
685 spin_lock_irqsave(&bfad->bfad_lock, flags);
686 vport->comp_del = &fcomp;
687 init_completion(vport->comp_del);
688 bfa_fcs_vport_delete(&vport->fcs_vport);
689 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
690 wait_for_completion(vport->comp_del);
691 ext_free_vport:
692 kfree(vport);
693 ext:
694 return rc;
695 }
696
697 void
bfad_bfa_tmo(unsigned long data)698 bfad_bfa_tmo(unsigned long data)
699 {
700 struct bfad_s *bfad = (struct bfad_s *) data;
701 unsigned long flags;
702 struct list_head doneq;
703
704 spin_lock_irqsave(&bfad->bfad_lock, flags);
705
706 bfa_timer_beat(&bfad->bfa.timer_mod);
707
708 bfa_comp_deq(&bfad->bfa, &doneq);
709 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
710
711 if (!list_empty(&doneq)) {
712 bfa_comp_process(&bfad->bfa, &doneq);
713 spin_lock_irqsave(&bfad->bfad_lock, flags);
714 bfa_comp_free(&bfad->bfa, &doneq);
715 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
716 }
717
718 mod_timer(&bfad->hal_tmo,
719 jiffies + msecs_to_jiffies(BFA_TIMER_FREQ));
720 }
721
722 void
bfad_init_timer(struct bfad_s * bfad)723 bfad_init_timer(struct bfad_s *bfad)
724 {
725 init_timer(&bfad->hal_tmo);
726 bfad->hal_tmo.function = bfad_bfa_tmo;
727 bfad->hal_tmo.data = (unsigned long)bfad;
728
729 mod_timer(&bfad->hal_tmo,
730 jiffies + msecs_to_jiffies(BFA_TIMER_FREQ));
731 }
732
733 int
bfad_pci_init(struct pci_dev * pdev,struct bfad_s * bfad)734 bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
735 {
736 int rc = -ENODEV;
737
738 if (pci_enable_device(pdev)) {
739 printk(KERN_ERR "pci_enable_device fail %p\n", pdev);
740 goto out;
741 }
742
743 if (pci_request_regions(pdev, BFAD_DRIVER_NAME))
744 goto out_disable_device;
745
746 pci_set_master(pdev);
747
748
749 if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) ||
750 (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)) {
751 if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) ||
752 (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) {
753 printk(KERN_ERR "pci_set_dma_mask fail %p\n", pdev);
754 goto out_release_region;
755 }
756 }
757
758 /* Enable PCIE Advanced Error Recovery (AER) if kernel supports */
759 pci_enable_pcie_error_reporting(pdev);
760
761 bfad->pci_bar0_kva = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
762 bfad->pci_bar2_kva = pci_iomap(pdev, 2, pci_resource_len(pdev, 2));
763
764 if (bfad->pci_bar0_kva == NULL) {
765 printk(KERN_ERR "Fail to map bar0\n");
766 goto out_release_region;
767 }
768
769 bfad->hal_pcidev.pci_slot = PCI_SLOT(pdev->devfn);
770 bfad->hal_pcidev.pci_func = PCI_FUNC(pdev->devfn);
771 bfad->hal_pcidev.pci_bar_kva = bfad->pci_bar0_kva;
772 bfad->hal_pcidev.device_id = pdev->device;
773 bfad->hal_pcidev.ssid = pdev->subsystem_device;
774 bfad->pci_name = pci_name(pdev);
775
776 bfad->pci_attr.vendor_id = pdev->vendor;
777 bfad->pci_attr.device_id = pdev->device;
778 bfad->pci_attr.ssid = pdev->subsystem_device;
779 bfad->pci_attr.ssvid = pdev->subsystem_vendor;
780 bfad->pci_attr.pcifn = PCI_FUNC(pdev->devfn);
781
782 bfad->pcidev = pdev;
783
784 /* Adjust PCIe Maximum Read Request Size */
785 if (pci_is_pcie(pdev) && pcie_max_read_reqsz) {
786 if (pcie_max_read_reqsz >= 128 &&
787 pcie_max_read_reqsz <= 4096 &&
788 is_power_of_2(pcie_max_read_reqsz)) {
789 int max_rq = pcie_get_readrq(pdev);
790 printk(KERN_WARNING "BFA[%s]: "
791 "pcie_max_read_request_size is %d, "
792 "reset to %d\n", bfad->pci_name, max_rq,
793 pcie_max_read_reqsz);
794 pcie_set_readrq(pdev, pcie_max_read_reqsz);
795 } else {
796 printk(KERN_WARNING "BFA[%s]: invalid "
797 "pcie_max_read_request_size %d ignored\n",
798 bfad->pci_name, pcie_max_read_reqsz);
799 }
800 }
801
802 pci_save_state(pdev);
803
804 return 0;
805
806 out_release_region:
807 pci_release_regions(pdev);
808 out_disable_device:
809 pci_disable_device(pdev);
810 out:
811 return rc;
812 }
813
814 void
bfad_pci_uninit(struct pci_dev * pdev,struct bfad_s * bfad)815 bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad)
816 {
817 pci_iounmap(pdev, bfad->pci_bar0_kva);
818 pci_iounmap(pdev, bfad->pci_bar2_kva);
819 pci_release_regions(pdev);
820 /* Disable PCIE Advanced Error Recovery (AER) */
821 pci_disable_pcie_error_reporting(pdev);
822 pci_disable_device(pdev);
823 }
824
825 bfa_status_t
bfad_drv_init(struct bfad_s * bfad)826 bfad_drv_init(struct bfad_s *bfad)
827 {
828 bfa_status_t rc;
829 unsigned long flags;
830
831 bfad->cfg_data.rport_del_timeout = rport_del_timeout;
832 bfad->cfg_data.lun_queue_depth = bfa_lun_queue_depth;
833 bfad->cfg_data.io_max_sge = bfa_io_max_sge;
834 bfad->cfg_data.binding_method = FCP_PWWN_BINDING;
835
836 rc = bfad_hal_mem_alloc(bfad);
837 if (rc != BFA_STATUS_OK) {
838 printk(KERN_WARNING "bfad%d bfad_hal_mem_alloc failure\n",
839 bfad->inst_no);
840 printk(KERN_WARNING
841 "Not enough memory to attach all Brocade HBA ports, %s",
842 "System may need more memory.\n");
843 return BFA_STATUS_FAILED;
844 }
845
846 bfad->bfa.trcmod = bfad->trcmod;
847 bfad->bfa.plog = &bfad->plog_buf;
848 bfa_plog_init(&bfad->plog_buf);
849 bfa_plog_str(&bfad->plog_buf, BFA_PL_MID_DRVR, BFA_PL_EID_DRIVER_START,
850 0, "Driver Attach");
851
852 bfa_attach(&bfad->bfa, bfad, &bfad->ioc_cfg, &bfad->meminfo,
853 &bfad->hal_pcidev);
854
855 /* FCS INIT */
856 spin_lock_irqsave(&bfad->bfad_lock, flags);
857 bfad->bfa_fcs.trcmod = bfad->trcmod;
858 bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE);
859 bfad->bfa_fcs.fdmi_enabled = fdmi_enable;
860 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
861
862 bfad->bfad_flags |= BFAD_DRV_INIT_DONE;
863
864 return BFA_STATUS_OK;
865 }
866
867 void
bfad_drv_uninit(struct bfad_s * bfad)868 bfad_drv_uninit(struct bfad_s *bfad)
869 {
870 unsigned long flags;
871
872 spin_lock_irqsave(&bfad->bfad_lock, flags);
873 init_completion(&bfad->comp);
874 bfa_iocfc_stop(&bfad->bfa);
875 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
876 wait_for_completion(&bfad->comp);
877
878 del_timer_sync(&bfad->hal_tmo);
879 bfa_isr_disable(&bfad->bfa);
880 bfa_detach(&bfad->bfa);
881 bfad_remove_intr(bfad);
882 bfad_hal_mem_release(bfad);
883
884 bfad->bfad_flags &= ~BFAD_DRV_INIT_DONE;
885 }
886
887 void
bfad_drv_start(struct bfad_s * bfad)888 bfad_drv_start(struct bfad_s *bfad)
889 {
890 unsigned long flags;
891
892 spin_lock_irqsave(&bfad->bfad_lock, flags);
893 bfa_iocfc_start(&bfad->bfa);
894 bfa_fcs_pbc_vport_init(&bfad->bfa_fcs);
895 bfa_fcs_fabric_modstart(&bfad->bfa_fcs);
896 bfad->bfad_flags |= BFAD_HAL_START_DONE;
897 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
898
899 if (bfad->im)
900 flush_workqueue(bfad->im->drv_workq);
901 }
902
903 void
bfad_fcs_stop(struct bfad_s * bfad)904 bfad_fcs_stop(struct bfad_s *bfad)
905 {
906 unsigned long flags;
907
908 spin_lock_irqsave(&bfad->bfad_lock, flags);
909 init_completion(&bfad->comp);
910 bfad->pport.flags |= BFAD_PORT_DELETE;
911 bfa_fcs_exit(&bfad->bfa_fcs);
912 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
913 wait_for_completion(&bfad->comp);
914
915 bfa_sm_send_event(bfad, BFAD_E_FCS_EXIT_COMP);
916 }
917
918 void
bfad_stop(struct bfad_s * bfad)919 bfad_stop(struct bfad_s *bfad)
920 {
921 unsigned long flags;
922
923 spin_lock_irqsave(&bfad->bfad_lock, flags);
924 init_completion(&bfad->comp);
925 bfa_iocfc_stop(&bfad->bfa);
926 bfad->bfad_flags &= ~BFAD_HAL_START_DONE;
927 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
928 wait_for_completion(&bfad->comp);
929
930 bfa_sm_send_event(bfad, BFAD_E_EXIT_COMP);
931 }
932
933 bfa_status_t
bfad_cfg_pport(struct bfad_s * bfad,enum bfa_lport_role role)934 bfad_cfg_pport(struct bfad_s *bfad, enum bfa_lport_role role)
935 {
936 int rc = BFA_STATUS_OK;
937
938 /* Allocate scsi_host for the physical port */
939 if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) &&
940 (role & BFA_LPORT_ROLE_FCP_IM)) {
941 if (bfad->pport.im_port == NULL) {
942 rc = BFA_STATUS_FAILED;
943 goto out;
944 }
945
946 rc = bfad_im_scsi_host_alloc(bfad, bfad->pport.im_port,
947 &bfad->pcidev->dev);
948 if (rc != BFA_STATUS_OK)
949 goto out;
950
951 bfad->pport.roles |= BFA_LPORT_ROLE_FCP_IM;
952 }
953
954 bfad->bfad_flags |= BFAD_CFG_PPORT_DONE;
955
956 out:
957 return rc;
958 }
959
960 void
bfad_uncfg_pport(struct bfad_s * bfad)961 bfad_uncfg_pport(struct bfad_s *bfad)
962 {
963 if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) &&
964 (bfad->pport.roles & BFA_LPORT_ROLE_FCP_IM)) {
965 bfad_im_scsi_host_free(bfad, bfad->pport.im_port);
966 bfad_im_port_clean(bfad->pport.im_port);
967 kfree(bfad->pport.im_port);
968 bfad->pport.roles &= ~BFA_LPORT_ROLE_FCP_IM;
969 }
970
971 bfad->bfad_flags &= ~BFAD_CFG_PPORT_DONE;
972 }
973
974 bfa_status_t
bfad_start_ops(struct bfad_s * bfad)975 bfad_start_ops(struct bfad_s *bfad) {
976
977 int retval;
978 unsigned long flags;
979 struct bfad_vport_s *vport, *vport_new;
980 struct bfa_fcs_driver_info_s driver_info;
981
982 /* Limit min/max. xfer size to [64k-32MB] */
983 if (max_xfer_size < BFAD_MIN_SECTORS >> 1)
984 max_xfer_size = BFAD_MIN_SECTORS >> 1;
985 if (max_xfer_size > BFAD_MAX_SECTORS >> 1)
986 max_xfer_size = BFAD_MAX_SECTORS >> 1;
987
988 /* Fill the driver_info info to fcs*/
989 memset(&driver_info, 0, sizeof(driver_info));
990 strncpy(driver_info.version, BFAD_DRIVER_VERSION,
991 sizeof(driver_info.version) - 1);
992 if (host_name)
993 strncpy(driver_info.host_machine_name, host_name,
994 sizeof(driver_info.host_machine_name) - 1);
995 if (os_name)
996 strncpy(driver_info.host_os_name, os_name,
997 sizeof(driver_info.host_os_name) - 1);
998 if (os_patch)
999 strncpy(driver_info.host_os_patch, os_patch,
1000 sizeof(driver_info.host_os_patch) - 1);
1001
1002 strncpy(driver_info.os_device_name, bfad->pci_name,
1003 sizeof(driver_info.os_device_name) - 1);
1004
1005 /* FCS driver info init */
1006 spin_lock_irqsave(&bfad->bfad_lock, flags);
1007 bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info);
1008
1009 if (bfad->bfad_flags & BFAD_CFG_PPORT_DONE)
1010 bfa_fcs_update_cfg(&bfad->bfa_fcs);
1011 else
1012 bfa_fcs_init(&bfad->bfa_fcs);
1013
1014 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1015
1016 if (!(bfad->bfad_flags & BFAD_CFG_PPORT_DONE)) {
1017 retval = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM);
1018 if (retval != BFA_STATUS_OK)
1019 return BFA_STATUS_FAILED;
1020 }
1021
1022 /* Setup fc host fixed attribute if the lk supports */
1023 bfad_fc_host_init(bfad->pport.im_port);
1024
1025 /* BFAD level FC4 IM specific resource allocation */
1026 retval = bfad_im_probe(bfad);
1027 if (retval != BFA_STATUS_OK) {
1028 printk(KERN_WARNING "bfad_im_probe failed\n");
1029 if (bfa_sm_cmp_state(bfad, bfad_sm_initializing))
1030 bfa_sm_set_state(bfad, bfad_sm_failed);
1031 return BFA_STATUS_FAILED;
1032 } else
1033 bfad->bfad_flags |= BFAD_FC4_PROBE_DONE;
1034
1035 bfad_drv_start(bfad);
1036
1037 /* Complete pbc vport create */
1038 list_for_each_entry_safe(vport, vport_new, &bfad->pbc_vport_list,
1039 list_entry) {
1040 struct fc_vport_identifiers vid;
1041 struct fc_vport *fc_vport;
1042 char pwwn_buf[BFA_STRING_32];
1043
1044 memset(&vid, 0, sizeof(vid));
1045 vid.roles = FC_PORT_ROLE_FCP_INITIATOR;
1046 vid.vport_type = FC_PORTTYPE_NPIV;
1047 vid.disable = false;
1048 vid.node_name = wwn_to_u64((u8 *)
1049 (&((vport->fcs_vport).lport.port_cfg.nwwn)));
1050 vid.port_name = wwn_to_u64((u8 *)
1051 (&((vport->fcs_vport).lport.port_cfg.pwwn)));
1052 fc_vport = fc_vport_create(bfad->pport.im_port->shost, 0, &vid);
1053 if (!fc_vport) {
1054 wwn2str(pwwn_buf, vid.port_name);
1055 printk(KERN_WARNING "bfad%d: failed to create pbc vport"
1056 " %s\n", bfad->inst_no, pwwn_buf);
1057 }
1058 list_del(&vport->list_entry);
1059 kfree(vport);
1060 }
1061
1062 /*
1063 * If bfa_linkup_delay is set to -1 default; try to retrive the
1064 * value using the bfad_get_linkup_delay(); else use the
1065 * passed in module param value as the bfa_linkup_delay.
1066 */
1067 if (bfa_linkup_delay < 0) {
1068 bfa_linkup_delay = bfad_get_linkup_delay(bfad);
1069 bfad_rport_online_wait(bfad);
1070 bfa_linkup_delay = -1;
1071 } else
1072 bfad_rport_online_wait(bfad);
1073
1074 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "bfa device claimed\n");
1075
1076 return BFA_STATUS_OK;
1077 }
1078
1079 int
bfad_worker(void * ptr)1080 bfad_worker(void *ptr)
1081 {
1082 struct bfad_s *bfad;
1083 unsigned long flags;
1084
1085 bfad = (struct bfad_s *)ptr;
1086
1087 while (!kthread_should_stop()) {
1088
1089 /* Send event BFAD_E_INIT_SUCCESS */
1090 bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS);
1091
1092 spin_lock_irqsave(&bfad->bfad_lock, flags);
1093 bfad->bfad_tsk = NULL;
1094 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1095
1096 break;
1097 }
1098
1099 return 0;
1100 }
1101
1102 /*
1103 * BFA driver interrupt functions
1104 */
1105 irqreturn_t
bfad_intx(int irq,void * dev_id)1106 bfad_intx(int irq, void *dev_id)
1107 {
1108 struct bfad_s *bfad = dev_id;
1109 struct list_head doneq;
1110 unsigned long flags;
1111 bfa_boolean_t rc;
1112
1113 spin_lock_irqsave(&bfad->bfad_lock, flags);
1114 rc = bfa_intx(&bfad->bfa);
1115 if (!rc) {
1116 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1117 return IRQ_NONE;
1118 }
1119
1120 bfa_comp_deq(&bfad->bfa, &doneq);
1121 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1122
1123 if (!list_empty(&doneq)) {
1124 bfa_comp_process(&bfad->bfa, &doneq);
1125
1126 spin_lock_irqsave(&bfad->bfad_lock, flags);
1127 bfa_comp_free(&bfad->bfa, &doneq);
1128 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1129 }
1130
1131 return IRQ_HANDLED;
1132
1133 }
1134
1135 static irqreturn_t
bfad_msix(int irq,void * dev_id)1136 bfad_msix(int irq, void *dev_id)
1137 {
1138 struct bfad_msix_s *vec = dev_id;
1139 struct bfad_s *bfad = vec->bfad;
1140 struct list_head doneq;
1141 unsigned long flags;
1142
1143 spin_lock_irqsave(&bfad->bfad_lock, flags);
1144
1145 bfa_msix(&bfad->bfa, vec->msix.entry);
1146 bfa_comp_deq(&bfad->bfa, &doneq);
1147 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1148
1149 if (!list_empty(&doneq)) {
1150 bfa_comp_process(&bfad->bfa, &doneq);
1151
1152 spin_lock_irqsave(&bfad->bfad_lock, flags);
1153 bfa_comp_free(&bfad->bfa, &doneq);
1154 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1155 }
1156
1157 return IRQ_HANDLED;
1158 }
1159
1160 /*
1161 * Initialize the MSIX entry table.
1162 */
1163 static void
bfad_init_msix_entry(struct bfad_s * bfad,struct msix_entry * msix_entries,int mask,int max_bit)1164 bfad_init_msix_entry(struct bfad_s *bfad, struct msix_entry *msix_entries,
1165 int mask, int max_bit)
1166 {
1167 int i;
1168 int match = 0x00000001;
1169
1170 for (i = 0, bfad->nvec = 0; i < MAX_MSIX_ENTRY; i++) {
1171 if (mask & match) {
1172 bfad->msix_tab[bfad->nvec].msix.entry = i;
1173 bfad->msix_tab[bfad->nvec].bfad = bfad;
1174 msix_entries[bfad->nvec].entry = i;
1175 bfad->nvec++;
1176 }
1177
1178 match <<= 1;
1179 }
1180
1181 }
1182
1183 int
bfad_install_msix_handler(struct bfad_s * bfad)1184 bfad_install_msix_handler(struct bfad_s *bfad)
1185 {
1186 int i, error = 0;
1187
1188 for (i = 0; i < bfad->nvec; i++) {
1189 sprintf(bfad->msix_tab[i].name, "bfa-%s-%s",
1190 bfad->pci_name,
1191 ((bfa_asic_id_cb(bfad->hal_pcidev.device_id)) ?
1192 msix_name_cb[i] : msix_name_ct[i]));
1193
1194 error = request_irq(bfad->msix_tab[i].msix.vector,
1195 (irq_handler_t) bfad_msix, 0,
1196 bfad->msix_tab[i].name, &bfad->msix_tab[i]);
1197 bfa_trc(bfad, i);
1198 bfa_trc(bfad, bfad->msix_tab[i].msix.vector);
1199 if (error) {
1200 int j;
1201
1202 for (j = 0; j < i; j++)
1203 free_irq(bfad->msix_tab[j].msix.vector,
1204 &bfad->msix_tab[j]);
1205
1206 bfad->bfad_flags &= ~BFAD_MSIX_ON;
1207 pci_disable_msix(bfad->pcidev);
1208
1209 return 1;
1210 }
1211 }
1212
1213 return 0;
1214 }
1215
1216 /*
1217 * Setup MSIX based interrupt.
1218 */
1219 int
bfad_setup_intr(struct bfad_s * bfad)1220 bfad_setup_intr(struct bfad_s *bfad)
1221 {
1222 int error;
1223 u32 mask = 0, i, num_bit = 0, max_bit = 0;
1224 struct msix_entry msix_entries[MAX_MSIX_ENTRY];
1225 struct pci_dev *pdev = bfad->pcidev;
1226 u16 reg;
1227
1228 /* Call BFA to get the msix map for this PCI function. */
1229 bfa_msix_getvecs(&bfad->bfa, &mask, &num_bit, &max_bit);
1230
1231 /* Set up the msix entry table */
1232 bfad_init_msix_entry(bfad, msix_entries, mask, max_bit);
1233
1234 if ((bfa_asic_id_ctc(pdev->device) && !msix_disable_ct) ||
1235 (bfa_asic_id_cb(pdev->device) && !msix_disable_cb)) {
1236
1237 error = pci_enable_msix_exact(bfad->pcidev,
1238 msix_entries, bfad->nvec);
1239 /* In CT1 & CT2, try to allocate just one vector */
1240 if (error == -ENOSPC && bfa_asic_id_ctc(pdev->device)) {
1241 printk(KERN_WARNING "bfa %s: trying one msix "
1242 "vector failed to allocate %d[%d]\n",
1243 bfad->pci_name, bfad->nvec, error);
1244 bfad->nvec = 1;
1245 error = pci_enable_msix_exact(bfad->pcidev,
1246 msix_entries, 1);
1247 }
1248
1249 if (error) {
1250 printk(KERN_WARNING "bfad%d: "
1251 "pci_enable_msix_exact failed (%d), "
1252 "use line based.\n",
1253 bfad->inst_no, error);
1254 goto line_based;
1255 }
1256
1257 /* Disable INTX in MSI-X mode */
1258 pci_read_config_word(pdev, PCI_COMMAND, ®);
1259
1260 if (!(reg & PCI_COMMAND_INTX_DISABLE))
1261 pci_write_config_word(pdev, PCI_COMMAND,
1262 reg | PCI_COMMAND_INTX_DISABLE);
1263
1264 /* Save the vectors */
1265 for (i = 0; i < bfad->nvec; i++) {
1266 bfa_trc(bfad, msix_entries[i].vector);
1267 bfad->msix_tab[i].msix.vector = msix_entries[i].vector;
1268 }
1269
1270 bfa_msix_init(&bfad->bfa, bfad->nvec);
1271
1272 bfad->bfad_flags |= BFAD_MSIX_ON;
1273
1274 return 0;
1275 }
1276
1277 line_based:
1278 error = request_irq(bfad->pcidev->irq, (irq_handler_t)bfad_intx,
1279 BFAD_IRQ_FLAGS, BFAD_DRIVER_NAME, bfad);
1280 if (error)
1281 return error;
1282
1283 bfad->bfad_flags |= BFAD_INTX_ON;
1284
1285 return 0;
1286 }
1287
1288 void
bfad_remove_intr(struct bfad_s * bfad)1289 bfad_remove_intr(struct bfad_s *bfad)
1290 {
1291 int i;
1292
1293 if (bfad->bfad_flags & BFAD_MSIX_ON) {
1294 for (i = 0; i < bfad->nvec; i++)
1295 free_irq(bfad->msix_tab[i].msix.vector,
1296 &bfad->msix_tab[i]);
1297
1298 pci_disable_msix(bfad->pcidev);
1299 bfad->bfad_flags &= ~BFAD_MSIX_ON;
1300 } else if (bfad->bfad_flags & BFAD_INTX_ON) {
1301 free_irq(bfad->pcidev->irq, bfad);
1302 }
1303 }
1304
1305 /*
1306 * PCI probe entry.
1307 */
1308 int
bfad_pci_probe(struct pci_dev * pdev,const struct pci_device_id * pid)1309 bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
1310 {
1311 struct bfad_s *bfad;
1312 int error = -ENODEV, retval, i;
1313
1314 /* For single port cards - only claim function 0 */
1315 if ((pdev->device == BFA_PCI_DEVICE_ID_FC_8G1P) &&
1316 (PCI_FUNC(pdev->devfn) != 0))
1317 return -ENODEV;
1318
1319 bfad = kzalloc(sizeof(struct bfad_s), GFP_KERNEL);
1320 if (!bfad) {
1321 error = -ENOMEM;
1322 goto out;
1323 }
1324
1325 bfad->trcmod = kzalloc(sizeof(struct bfa_trc_mod_s), GFP_KERNEL);
1326 if (!bfad->trcmod) {
1327 printk(KERN_WARNING "Error alloc trace buffer!\n");
1328 error = -ENOMEM;
1329 goto out_alloc_trace_failure;
1330 }
1331
1332 /* TRACE INIT */
1333 bfa_trc_init(bfad->trcmod);
1334 bfa_trc(bfad, bfad_inst);
1335
1336 /* AEN INIT */
1337 INIT_LIST_HEAD(&bfad->free_aen_q);
1338 INIT_LIST_HEAD(&bfad->active_aen_q);
1339 for (i = 0; i < BFA_AEN_MAX_ENTRY; i++)
1340 list_add_tail(&bfad->aen_list[i].qe, &bfad->free_aen_q);
1341
1342 if (!(bfad_load_fwimg(pdev))) {
1343 kfree(bfad->trcmod);
1344 goto out_alloc_trace_failure;
1345 }
1346
1347 retval = bfad_pci_init(pdev, bfad);
1348 if (retval) {
1349 printk(KERN_WARNING "bfad_pci_init failure!\n");
1350 error = retval;
1351 goto out_pci_init_failure;
1352 }
1353
1354 mutex_lock(&bfad_mutex);
1355 bfad->inst_no = bfad_inst++;
1356 list_add_tail(&bfad->list_entry, &bfad_list);
1357 mutex_unlock(&bfad_mutex);
1358
1359 /* Initializing the state machine: State set to uninit */
1360 bfa_sm_set_state(bfad, bfad_sm_uninit);
1361
1362 spin_lock_init(&bfad->bfad_lock);
1363 spin_lock_init(&bfad->bfad_aen_spinlock);
1364
1365 pci_set_drvdata(pdev, bfad);
1366
1367 bfad->ref_count = 0;
1368 bfad->pport.bfad = bfad;
1369 INIT_LIST_HEAD(&bfad->pbc_vport_list);
1370 INIT_LIST_HEAD(&bfad->vport_list);
1371
1372 /* Setup the debugfs node for this bfad */
1373 if (bfa_debugfs_enable)
1374 bfad_debugfs_init(&bfad->pport);
1375
1376 retval = bfad_drv_init(bfad);
1377 if (retval != BFA_STATUS_OK)
1378 goto out_drv_init_failure;
1379
1380 bfa_sm_send_event(bfad, BFAD_E_CREATE);
1381
1382 if (bfa_sm_cmp_state(bfad, bfad_sm_uninit))
1383 goto out_bfad_sm_failure;
1384
1385 return 0;
1386
1387 out_bfad_sm_failure:
1388 bfad_hal_mem_release(bfad);
1389 out_drv_init_failure:
1390 /* Remove the debugfs node for this bfad */
1391 kfree(bfad->regdata);
1392 bfad_debugfs_exit(&bfad->pport);
1393 mutex_lock(&bfad_mutex);
1394 bfad_inst--;
1395 list_del(&bfad->list_entry);
1396 mutex_unlock(&bfad_mutex);
1397 bfad_pci_uninit(pdev, bfad);
1398 out_pci_init_failure:
1399 kfree(bfad->trcmod);
1400 out_alloc_trace_failure:
1401 kfree(bfad);
1402 out:
1403 return error;
1404 }
1405
1406 /*
1407 * PCI remove entry.
1408 */
1409 void
bfad_pci_remove(struct pci_dev * pdev)1410 bfad_pci_remove(struct pci_dev *pdev)
1411 {
1412 struct bfad_s *bfad = pci_get_drvdata(pdev);
1413 unsigned long flags;
1414
1415 bfa_trc(bfad, bfad->inst_no);
1416
1417 spin_lock_irqsave(&bfad->bfad_lock, flags);
1418 if (bfad->bfad_tsk != NULL) {
1419 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1420 kthread_stop(bfad->bfad_tsk);
1421 } else {
1422 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1423 }
1424
1425 /* Send Event BFAD_E_STOP */
1426 bfa_sm_send_event(bfad, BFAD_E_STOP);
1427
1428 /* Driver detach and dealloc mem */
1429 spin_lock_irqsave(&bfad->bfad_lock, flags);
1430 bfa_detach(&bfad->bfa);
1431 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1432 bfad_hal_mem_release(bfad);
1433
1434 /* Remove the debugfs node for this bfad */
1435 kfree(bfad->regdata);
1436 bfad_debugfs_exit(&bfad->pport);
1437
1438 /* Cleaning the BFAD instance */
1439 mutex_lock(&bfad_mutex);
1440 bfad_inst--;
1441 list_del(&bfad->list_entry);
1442 mutex_unlock(&bfad_mutex);
1443 bfad_pci_uninit(pdev, bfad);
1444
1445 kfree(bfad->trcmod);
1446 kfree(bfad);
1447 }
1448
1449 /*
1450 * PCI Error Recovery entry, error detected.
1451 */
1452 static pci_ers_result_t
bfad_pci_error_detected(struct pci_dev * pdev,pci_channel_state_t state)1453 bfad_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
1454 {
1455 struct bfad_s *bfad = pci_get_drvdata(pdev);
1456 unsigned long flags;
1457 pci_ers_result_t ret = PCI_ERS_RESULT_NONE;
1458
1459 dev_printk(KERN_ERR, &pdev->dev,
1460 "error detected state: %d - flags: 0x%x\n",
1461 state, bfad->bfad_flags);
1462
1463 switch (state) {
1464 case pci_channel_io_normal: /* non-fatal error */
1465 spin_lock_irqsave(&bfad->bfad_lock, flags);
1466 bfad->bfad_flags &= ~BFAD_EEH_BUSY;
1467 /* Suspend/fail all bfa operations */
1468 bfa_ioc_suspend(&bfad->bfa.ioc);
1469 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1470 del_timer_sync(&bfad->hal_tmo);
1471 ret = PCI_ERS_RESULT_CAN_RECOVER;
1472 break;
1473 case pci_channel_io_frozen: /* fatal error */
1474 init_completion(&bfad->comp);
1475 spin_lock_irqsave(&bfad->bfad_lock, flags);
1476 bfad->bfad_flags |= BFAD_EEH_BUSY;
1477 /* Suspend/fail all bfa operations */
1478 bfa_ioc_suspend(&bfad->bfa.ioc);
1479 bfa_fcs_stop(&bfad->bfa_fcs);
1480 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1481 wait_for_completion(&bfad->comp);
1482
1483 bfad_remove_intr(bfad);
1484 del_timer_sync(&bfad->hal_tmo);
1485 pci_disable_device(pdev);
1486 ret = PCI_ERS_RESULT_NEED_RESET;
1487 break;
1488 case pci_channel_io_perm_failure: /* PCI Card is DEAD */
1489 spin_lock_irqsave(&bfad->bfad_lock, flags);
1490 bfad->bfad_flags |= BFAD_EEH_BUSY |
1491 BFAD_EEH_PCI_CHANNEL_IO_PERM_FAILURE;
1492 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1493
1494 /* If the error_detected handler is called with the reason
1495 * pci_channel_io_perm_failure - it will subsequently call
1496 * pci_remove() entry point to remove the pci device from the
1497 * system - So defer the cleanup to pci_remove(); cleaning up
1498 * here causes inconsistent state during pci_remove().
1499 */
1500 ret = PCI_ERS_RESULT_DISCONNECT;
1501 break;
1502 default:
1503 WARN_ON(1);
1504 }
1505
1506 return ret;
1507 }
1508
1509 int
restart_bfa(struct bfad_s * bfad)1510 restart_bfa(struct bfad_s *bfad)
1511 {
1512 unsigned long flags;
1513 struct pci_dev *pdev = bfad->pcidev;
1514
1515 bfa_attach(&bfad->bfa, bfad, &bfad->ioc_cfg,
1516 &bfad->meminfo, &bfad->hal_pcidev);
1517
1518 /* Enable Interrupt and wait bfa_init completion */
1519 if (bfad_setup_intr(bfad)) {
1520 dev_printk(KERN_WARNING, &pdev->dev,
1521 "%s: bfad_setup_intr failed\n", bfad->pci_name);
1522 bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED);
1523 return -1;
1524 }
1525
1526 init_completion(&bfad->comp);
1527 spin_lock_irqsave(&bfad->bfad_lock, flags);
1528 bfa_iocfc_init(&bfad->bfa);
1529 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1530
1531 /* Set up interrupt handler for each vectors */
1532 if ((bfad->bfad_flags & BFAD_MSIX_ON) &&
1533 bfad_install_msix_handler(bfad))
1534 dev_printk(KERN_WARNING, &pdev->dev,
1535 "%s: install_msix failed.\n", bfad->pci_name);
1536
1537 bfad_init_timer(bfad);
1538 wait_for_completion(&bfad->comp);
1539 bfad_drv_start(bfad);
1540
1541 return 0;
1542 }
1543
1544 /*
1545 * PCI Error Recovery entry, re-initialize the chip.
1546 */
1547 static pci_ers_result_t
bfad_pci_slot_reset(struct pci_dev * pdev)1548 bfad_pci_slot_reset(struct pci_dev *pdev)
1549 {
1550 struct bfad_s *bfad = pci_get_drvdata(pdev);
1551 u8 byte;
1552
1553 dev_printk(KERN_ERR, &pdev->dev,
1554 "bfad_pci_slot_reset flags: 0x%x\n", bfad->bfad_flags);
1555
1556 if (pci_enable_device(pdev)) {
1557 dev_printk(KERN_ERR, &pdev->dev, "Cannot re-enable "
1558 "PCI device after reset.\n");
1559 return PCI_ERS_RESULT_DISCONNECT;
1560 }
1561
1562 pci_restore_state(pdev);
1563
1564 /*
1565 * Read some byte (e.g. DMA max. payload size which can't
1566 * be 0xff any time) to make sure - we did not hit another PCI error
1567 * in the middle of recovery. If we did, then declare permanent failure.
1568 */
1569 pci_read_config_byte(pdev, 0x68, &byte);
1570 if (byte == 0xff) {
1571 dev_printk(KERN_ERR, &pdev->dev,
1572 "slot_reset failed ... got another PCI error !\n");
1573 goto out_disable_device;
1574 }
1575
1576 pci_save_state(pdev);
1577 pci_set_master(pdev);
1578
1579 if (pci_set_dma_mask(bfad->pcidev, DMA_BIT_MASK(64)) != 0)
1580 if (pci_set_dma_mask(bfad->pcidev, DMA_BIT_MASK(32)) != 0)
1581 goto out_disable_device;
1582
1583 pci_cleanup_aer_uncorrect_error_status(pdev);
1584
1585 if (restart_bfa(bfad) == -1)
1586 goto out_disable_device;
1587
1588 pci_enable_pcie_error_reporting(pdev);
1589 dev_printk(KERN_WARNING, &pdev->dev,
1590 "slot_reset completed flags: 0x%x!\n", bfad->bfad_flags);
1591
1592 return PCI_ERS_RESULT_RECOVERED;
1593
1594 out_disable_device:
1595 pci_disable_device(pdev);
1596 return PCI_ERS_RESULT_DISCONNECT;
1597 }
1598
1599 static pci_ers_result_t
bfad_pci_mmio_enabled(struct pci_dev * pdev)1600 bfad_pci_mmio_enabled(struct pci_dev *pdev)
1601 {
1602 unsigned long flags;
1603 struct bfad_s *bfad = pci_get_drvdata(pdev);
1604
1605 dev_printk(KERN_INFO, &pdev->dev, "mmio_enabled\n");
1606
1607 /* Fetch FW diagnostic information */
1608 bfa_ioc_debug_save_ftrc(&bfad->bfa.ioc);
1609
1610 /* Cancel all pending IOs */
1611 spin_lock_irqsave(&bfad->bfad_lock, flags);
1612 init_completion(&bfad->comp);
1613 bfa_fcs_stop(&bfad->bfa_fcs);
1614 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1615 wait_for_completion(&bfad->comp);
1616
1617 bfad_remove_intr(bfad);
1618 del_timer_sync(&bfad->hal_tmo);
1619 pci_disable_device(pdev);
1620
1621 return PCI_ERS_RESULT_NEED_RESET;
1622 }
1623
1624 static void
bfad_pci_resume(struct pci_dev * pdev)1625 bfad_pci_resume(struct pci_dev *pdev)
1626 {
1627 unsigned long flags;
1628 struct bfad_s *bfad = pci_get_drvdata(pdev);
1629
1630 dev_printk(KERN_WARNING, &pdev->dev, "resume\n");
1631
1632 /* wait until the link is online */
1633 bfad_rport_online_wait(bfad);
1634
1635 spin_lock_irqsave(&bfad->bfad_lock, flags);
1636 bfad->bfad_flags &= ~BFAD_EEH_BUSY;
1637 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1638 }
1639
1640 struct pci_device_id bfad_id_table[] = {
1641 {
1642 .vendor = BFA_PCI_VENDOR_ID_BROCADE,
1643 .device = BFA_PCI_DEVICE_ID_FC_8G2P,
1644 .subvendor = PCI_ANY_ID,
1645 .subdevice = PCI_ANY_ID,
1646 },
1647 {
1648 .vendor = BFA_PCI_VENDOR_ID_BROCADE,
1649 .device = BFA_PCI_DEVICE_ID_FC_8G1P,
1650 .subvendor = PCI_ANY_ID,
1651 .subdevice = PCI_ANY_ID,
1652 },
1653 {
1654 .vendor = BFA_PCI_VENDOR_ID_BROCADE,
1655 .device = BFA_PCI_DEVICE_ID_CT,
1656 .subvendor = PCI_ANY_ID,
1657 .subdevice = PCI_ANY_ID,
1658 .class = (PCI_CLASS_SERIAL_FIBER << 8),
1659 .class_mask = ~0,
1660 },
1661 {
1662 .vendor = BFA_PCI_VENDOR_ID_BROCADE,
1663 .device = BFA_PCI_DEVICE_ID_CT_FC,
1664 .subvendor = PCI_ANY_ID,
1665 .subdevice = PCI_ANY_ID,
1666 .class = (PCI_CLASS_SERIAL_FIBER << 8),
1667 .class_mask = ~0,
1668 },
1669 {
1670 .vendor = BFA_PCI_VENDOR_ID_BROCADE,
1671 .device = BFA_PCI_DEVICE_ID_CT2,
1672 .subvendor = PCI_ANY_ID,
1673 .subdevice = PCI_ANY_ID,
1674 .class = (PCI_CLASS_SERIAL_FIBER << 8),
1675 .class_mask = ~0,
1676 },
1677
1678 {
1679 .vendor = BFA_PCI_VENDOR_ID_BROCADE,
1680 .device = BFA_PCI_DEVICE_ID_CT2_QUAD,
1681 .subvendor = PCI_ANY_ID,
1682 .subdevice = PCI_ANY_ID,
1683 .class = (PCI_CLASS_SERIAL_FIBER << 8),
1684 .class_mask = ~0,
1685 },
1686 {0, 0},
1687 };
1688
1689 MODULE_DEVICE_TABLE(pci, bfad_id_table);
1690
1691 /*
1692 * PCI error recovery handlers.
1693 */
1694 static struct pci_error_handlers bfad_err_handler = {
1695 .error_detected = bfad_pci_error_detected,
1696 .slot_reset = bfad_pci_slot_reset,
1697 .mmio_enabled = bfad_pci_mmio_enabled,
1698 .resume = bfad_pci_resume,
1699 };
1700
1701 static struct pci_driver bfad_pci_driver = {
1702 .name = BFAD_DRIVER_NAME,
1703 .id_table = bfad_id_table,
1704 .probe = bfad_pci_probe,
1705 .remove = bfad_pci_remove,
1706 .err_handler = &bfad_err_handler,
1707 };
1708
1709 /*
1710 * Driver module init.
1711 */
1712 static int __init
bfad_init(void)1713 bfad_init(void)
1714 {
1715 int error = 0;
1716
1717 printk(KERN_INFO "Brocade BFA FC/FCOE SCSI driver - version: %s\n",
1718 BFAD_DRIVER_VERSION);
1719
1720 if (num_sgpgs > 0)
1721 num_sgpgs_parm = num_sgpgs;
1722
1723 error = bfad_im_module_init();
1724 if (error) {
1725 error = -ENOMEM;
1726 printk(KERN_WARNING "bfad_im_module_init failure\n");
1727 goto ext;
1728 }
1729
1730 if (strcmp(FCPI_NAME, " fcpim") == 0)
1731 supported_fc4s |= BFA_LPORT_ROLE_FCP_IM;
1732
1733 bfa_auto_recover = ioc_auto_recover;
1734 bfa_fcs_rport_set_del_timeout(rport_del_timeout);
1735 bfa_fcs_rport_set_max_logins(max_rport_logins);
1736
1737 error = pci_register_driver(&bfad_pci_driver);
1738 if (error) {
1739 printk(KERN_WARNING "pci_register_driver failure\n");
1740 goto ext;
1741 }
1742
1743 return 0;
1744
1745 ext:
1746 bfad_im_module_exit();
1747 return error;
1748 }
1749
1750 /*
1751 * Driver module exit.
1752 */
1753 static void __exit
bfad_exit(void)1754 bfad_exit(void)
1755 {
1756 pci_unregister_driver(&bfad_pci_driver);
1757 bfad_im_module_exit();
1758 bfad_free_fwimg();
1759 }
1760
1761 /* Firmware handling */
1762 static void
bfad_read_firmware(struct pci_dev * pdev,u32 ** bfi_image,u32 * bfi_image_size,char * fw_name)1763 bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
1764 u32 *bfi_image_size, char *fw_name)
1765 {
1766 const struct firmware *fw;
1767
1768 if (request_firmware(&fw, fw_name, &pdev->dev)) {
1769 printk(KERN_ALERT "Can't locate firmware %s\n", fw_name);
1770 *bfi_image = NULL;
1771 goto out;
1772 }
1773
1774 *bfi_image = vmalloc(fw->size);
1775 if (NULL == *bfi_image) {
1776 printk(KERN_ALERT "Fail to allocate buffer for fw image "
1777 "size=%x!\n", (u32) fw->size);
1778 goto out;
1779 }
1780
1781 memcpy(*bfi_image, fw->data, fw->size);
1782 *bfi_image_size = fw->size/sizeof(u32);
1783 out:
1784 release_firmware(fw);
1785 }
1786
1787 static u32 *
bfad_load_fwimg(struct pci_dev * pdev)1788 bfad_load_fwimg(struct pci_dev *pdev)
1789 {
1790 if (bfa_asic_id_ct2(pdev->device)) {
1791 if (bfi_image_ct2_size == 0)
1792 bfad_read_firmware(pdev, &bfi_image_ct2,
1793 &bfi_image_ct2_size, BFAD_FW_FILE_CT2);
1794 return bfi_image_ct2;
1795 } else if (bfa_asic_id_ct(pdev->device)) {
1796 if (bfi_image_ct_size == 0)
1797 bfad_read_firmware(pdev, &bfi_image_ct,
1798 &bfi_image_ct_size, BFAD_FW_FILE_CT);
1799 return bfi_image_ct;
1800 } else if (bfa_asic_id_cb(pdev->device)) {
1801 if (bfi_image_cb_size == 0)
1802 bfad_read_firmware(pdev, &bfi_image_cb,
1803 &bfi_image_cb_size, BFAD_FW_FILE_CB);
1804 return bfi_image_cb;
1805 }
1806
1807 return NULL;
1808 }
1809
1810 static void
bfad_free_fwimg(void)1811 bfad_free_fwimg(void)
1812 {
1813 if (bfi_image_ct2_size && bfi_image_ct2)
1814 vfree(bfi_image_ct2);
1815 if (bfi_image_ct_size && bfi_image_ct)
1816 vfree(bfi_image_ct);
1817 if (bfi_image_cb_size && bfi_image_cb)
1818 vfree(bfi_image_cb);
1819 }
1820
1821 module_init(bfad_init);
1822 module_exit(bfad_exit);
1823 MODULE_LICENSE("GPL");
1824 MODULE_DESCRIPTION("Brocade Fibre Channel HBA Driver" BFAD_PROTO_NAME);
1825 MODULE_AUTHOR("Brocade Communications Systems, Inc.");
1826 MODULE_VERSION(BFAD_DRIVER_VERSION);
1827