1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
4 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
5 */
6
7 #include "efct_driver.h"
8
9 #include "efct_hw.h"
10 #include "efct_unsol.h"
11 #include "efct_scsi.h"
12
13 LIST_HEAD(efct_devices);
14
15 static int logmask;
16 module_param(logmask, int, 0444);
17 MODULE_PARM_DESC(logmask, "logging bitmask (default 0)");
18
19 static struct libefc_function_template efct_libefc_templ = {
20 .issue_mbox_rqst = efct_issue_mbox_rqst,
21 .send_els = efct_els_hw_srrs_send,
22 .send_bls = efct_efc_bls_send,
23
24 .new_nport = efct_scsi_tgt_new_nport,
25 .del_nport = efct_scsi_tgt_del_nport,
26 .scsi_new_node = efct_scsi_new_initiator,
27 .scsi_del_node = efct_scsi_del_initiator,
28 .hw_seq_free = efct_efc_hw_sequence_free,
29 };
30
31 static int
efct_device_init(void)32 efct_device_init(void)
33 {
34 int rc;
35
36 /* driver-wide init for target-server */
37 rc = efct_scsi_tgt_driver_init();
38 if (rc) {
39 pr_err("efct_scsi_tgt_init failed rc=%d\n", rc);
40 return rc;
41 }
42
43 rc = efct_scsi_reg_fc_transport();
44 if (rc) {
45 efct_scsi_tgt_driver_exit();
46 pr_err("failed to register to FC host\n");
47 return rc;
48 }
49
50 return 0;
51 }
52
53 static void
efct_device_shutdown(void)54 efct_device_shutdown(void)
55 {
56 efct_scsi_release_fc_transport();
57
58 efct_scsi_tgt_driver_exit();
59 }
60
61 static void *
efct_device_alloc(u32 nid)62 efct_device_alloc(u32 nid)
63 {
64 struct efct *efct = NULL;
65
66 efct = kzalloc_node(sizeof(*efct), GFP_KERNEL, nid);
67 if (!efct)
68 return efct;
69
70 INIT_LIST_HEAD(&efct->list_entry);
71 list_add_tail(&efct->list_entry, &efct_devices);
72
73 return efct;
74 }
75
76 static void
efct_teardown_msix(struct efct * efct)77 efct_teardown_msix(struct efct *efct)
78 {
79 u32 i;
80
81 for (i = 0; i < efct->n_msix_vec; i++) {
82 free_irq(pci_irq_vector(efct->pci, i),
83 &efct->intr_context[i]);
84 }
85
86 pci_free_irq_vectors(efct->pci);
87 }
88
89 static int
efct_efclib_config(struct efct * efct,struct libefc_function_template * tt)90 efct_efclib_config(struct efct *efct, struct libefc_function_template *tt)
91 {
92 struct efc *efc;
93 struct sli4 *sli;
94 int rc = 0;
95
96 efc = kzalloc(sizeof(*efc), GFP_KERNEL);
97 if (!efc)
98 return -ENOMEM;
99
100 efct->efcport = efc;
101
102 memcpy(&efc->tt, tt, sizeof(*tt));
103 efc->base = efct;
104 efc->pci = efct->pci;
105
106 efc->def_wwnn = efct_get_wwnn(&efct->hw);
107 efc->def_wwpn = efct_get_wwpn(&efct->hw);
108 efc->enable_tgt = 1;
109 efc->log_level = EFC_LOG_LIB;
110
111 sli = &efct->hw.sli;
112 efc->max_xfer_size = sli->sge_supported_length *
113 sli_get_max_sgl(&efct->hw.sli);
114 efc->sli = sli;
115 efc->fcfi = efct->hw.fcf_indicator;
116
117 rc = efcport_init(efc);
118 if (rc)
119 efc_log_err(efc, "efcport_init failed\n");
120
121 return rc;
122 }
123
124 static int efct_request_firmware_update(struct efct *efct);
125
126 static const char*
efct_pci_model(u16 device)127 efct_pci_model(u16 device)
128 {
129 switch (device) {
130 case EFCT_DEVICE_LANCER_G6: return "LPE31004";
131 case EFCT_DEVICE_LANCER_G7: return "LPE36000";
132 default: return "unknown";
133 }
134 }
135
136 static int
efct_device_attach(struct efct * efct)137 efct_device_attach(struct efct *efct)
138 {
139 u32 rc = 0, i = 0;
140
141 if (efct->attached) {
142 efc_log_err(efct, "Device is already attached\n");
143 return -EIO;
144 }
145
146 snprintf(efct->name, sizeof(efct->name), "[%s%d] ", "fc",
147 efct->instance_index);
148
149 efct->logmask = logmask;
150 efct->filter_def = EFCT_DEFAULT_FILTER;
151 efct->max_isr_time_msec = EFCT_OS_MAX_ISR_TIME_MSEC;
152
153 efct->model = efct_pci_model(efct->pci->device);
154
155 efct->efct_req_fw_upgrade = true;
156
157 /* Allocate transport object and bring online */
158 efct->xport = efct_xport_alloc(efct);
159 if (!efct->xport) {
160 efc_log_err(efct, "failed to allocate transport object\n");
161 rc = -ENOMEM;
162 goto out;
163 }
164
165 rc = efct_xport_attach(efct->xport);
166 if (rc) {
167 efc_log_err(efct, "failed to attach transport object\n");
168 goto xport_out;
169 }
170
171 rc = efct_xport_initialize(efct->xport);
172 if (rc) {
173 efc_log_err(efct, "failed to initialize transport object\n");
174 goto xport_out;
175 }
176
177 rc = efct_efclib_config(efct, &efct_libefc_templ);
178 if (rc) {
179 efc_log_err(efct, "failed to init efclib\n");
180 goto efclib_out;
181 }
182
183 for (i = 0; i < efct->n_msix_vec; i++) {
184 efc_log_debug(efct, "irq %d enabled\n", i);
185 enable_irq(pci_irq_vector(efct->pci, i));
186 }
187
188 efct->attached = true;
189
190 if (efct->efct_req_fw_upgrade)
191 efct_request_firmware_update(efct);
192
193 return rc;
194
195 efclib_out:
196 efct_xport_detach(efct->xport);
197 xport_out:
198 efct_xport_free(efct->xport);
199 efct->xport = NULL;
200 out:
201 return rc;
202 }
203
204 static int
efct_device_detach(struct efct * efct)205 efct_device_detach(struct efct *efct)
206 {
207 int i;
208
209 if (!efct || !efct->attached) {
210 pr_err("Device is not attached\n");
211 return -EIO;
212 }
213
214 if (efct_xport_control(efct->xport, EFCT_XPORT_SHUTDOWN))
215 efc_log_err(efct, "Transport Shutdown timed out\n");
216
217 for (i = 0; i < efct->n_msix_vec; i++)
218 disable_irq(pci_irq_vector(efct->pci, i));
219
220 efct_xport_detach(efct->xport);
221
222 efct_xport_free(efct->xport);
223 efct->xport = NULL;
224
225 efcport_destroy(efct->efcport);
226 kfree(efct->efcport);
227
228 efct->attached = false;
229
230 return 0;
231 }
232
233 static void
efct_fw_write_cb(int status,u32 actual_write_length,u32 change_status,void * arg)234 efct_fw_write_cb(int status, u32 actual_write_length,
235 u32 change_status, void *arg)
236 {
237 struct efct_fw_write_result *result = arg;
238
239 result->status = status;
240 result->actual_xfer = actual_write_length;
241 result->change_status = change_status;
242
243 complete(&result->done);
244 }
245
246 static int
efct_firmware_write(struct efct * efct,const u8 * buf,size_t buf_len,u8 * change_status)247 efct_firmware_write(struct efct *efct, const u8 *buf, size_t buf_len,
248 u8 *change_status)
249 {
250 int rc = 0;
251 u32 bytes_left;
252 u32 xfer_size;
253 u32 offset;
254 struct efc_dma dma;
255 int last = 0;
256 struct efct_fw_write_result result;
257
258 init_completion(&result.done);
259
260 bytes_left = buf_len;
261 offset = 0;
262
263 dma.size = FW_WRITE_BUFSIZE;
264 dma.virt = dma_alloc_coherent(&efct->pci->dev,
265 dma.size, &dma.phys, GFP_DMA);
266 if (!dma.virt)
267 return -ENOMEM;
268
269 while (bytes_left > 0) {
270 if (bytes_left > FW_WRITE_BUFSIZE)
271 xfer_size = FW_WRITE_BUFSIZE;
272 else
273 xfer_size = bytes_left;
274
275 memcpy(dma.virt, buf + offset, xfer_size);
276
277 if (bytes_left == xfer_size)
278 last = 1;
279
280 efct_hw_firmware_write(&efct->hw, &dma, xfer_size, offset,
281 last, efct_fw_write_cb, &result);
282
283 if (wait_for_completion_interruptible(&result.done) != 0) {
284 rc = -ENXIO;
285 break;
286 }
287
288 if (result.actual_xfer == 0 || result.status != 0) {
289 rc = -EFAULT;
290 break;
291 }
292
293 if (last)
294 *change_status = result.change_status;
295
296 bytes_left -= result.actual_xfer;
297 offset += result.actual_xfer;
298 }
299
300 dma_free_coherent(&efct->pci->dev, dma.size, dma.virt, dma.phys);
301 return rc;
302 }
303
304 static int
efct_fw_reset(struct efct * efct)305 efct_fw_reset(struct efct *efct)
306 {
307 /*
308 * Firmware reset to activate the new firmware.
309 * Function 0 will update and load the new firmware
310 * during attach.
311 */
312 if (timer_pending(&efct->xport->stats_timer))
313 del_timer(&efct->xport->stats_timer);
314
315 if (efct_hw_reset(&efct->hw, EFCT_HW_RESET_FIRMWARE)) {
316 efc_log_info(efct, "failed to reset firmware\n");
317 return -EIO;
318 }
319
320 efc_log_info(efct, "successfully reset firmware.Now resetting port\n");
321
322 efct_device_detach(efct);
323 return efct_device_attach(efct);
324 }
325
326 static int
efct_request_firmware_update(struct efct * efct)327 efct_request_firmware_update(struct efct *efct)
328 {
329 int rc = 0;
330 u8 file_name[256], fw_change_status = 0;
331 const struct firmware *fw;
332 struct efct_hw_grp_hdr *fw_image;
333
334 snprintf(file_name, 256, "%s.grp", efct->model);
335
336 rc = request_firmware(&fw, file_name, &efct->pci->dev);
337 if (rc) {
338 efc_log_debug(efct, "Firmware file(%s) not found.\n", file_name);
339 return rc;
340 }
341
342 fw_image = (struct efct_hw_grp_hdr *)fw->data;
343
344 if (!strncmp(efct->hw.sli.fw_name[0], fw_image->revision,
345 strnlen(fw_image->revision, 16))) {
346 efc_log_debug(efct,
347 "Skip update. Firmware is already up to date.\n");
348 goto exit;
349 }
350
351 efc_log_info(efct, "Firmware update is initiated. %s -> %s\n",
352 efct->hw.sli.fw_name[0], fw_image->revision);
353
354 rc = efct_firmware_write(efct, fw->data, fw->size, &fw_change_status);
355 if (rc) {
356 efc_log_err(efct, "Firmware update failed. rc = %d\n", rc);
357 goto exit;
358 }
359
360 efc_log_info(efct, "Firmware updated successfully\n");
361 switch (fw_change_status) {
362 case 0x00:
363 efc_log_info(efct, "New firmware is active.\n");
364 break;
365 case 0x01:
366 efc_log_info(efct,
367 "System reboot needed to activate the new firmware\n");
368 break;
369 case 0x02:
370 case 0x03:
371 efc_log_info(efct,
372 "firmware reset to activate the new firmware\n");
373 efct_fw_reset(efct);
374 break;
375 default:
376 efc_log_info(efct, "Unexpected value change_status:%d\n",
377 fw_change_status);
378 break;
379 }
380
381 exit:
382 release_firmware(fw);
383
384 return rc;
385 }
386
387 static void
efct_device_free(struct efct * efct)388 efct_device_free(struct efct *efct)
389 {
390 if (efct) {
391 list_del(&efct->list_entry);
392 kfree(efct);
393 }
394 }
395
396 static int
efct_device_interrupts_required(struct efct * efct)397 efct_device_interrupts_required(struct efct *efct)
398 {
399 int rc;
400
401 rc = efct_hw_setup(&efct->hw, efct, efct->pci);
402 if (rc < 0)
403 return rc;
404
405 return efct->hw.config.n_eq;
406 }
407
408 static irqreturn_t
efct_intr_thread(int irq,void * handle)409 efct_intr_thread(int irq, void *handle)
410 {
411 struct efct_intr_context *intr_ctx = handle;
412 struct efct *efct = intr_ctx->efct;
413
414 efct_hw_process(&efct->hw, intr_ctx->index, efct->max_isr_time_msec);
415 return IRQ_HANDLED;
416 }
417
418 static irqreturn_t
efct_intr_msix(int irq,void * handle)419 efct_intr_msix(int irq, void *handle)
420 {
421 return IRQ_WAKE_THREAD;
422 }
423
424 static int
efct_setup_msix(struct efct * efct,u32 num_intrs)425 efct_setup_msix(struct efct *efct, u32 num_intrs)
426 {
427 int rc = 0, i;
428
429 if (!pci_find_capability(efct->pci, PCI_CAP_ID_MSIX)) {
430 dev_err(&efct->pci->dev,
431 "%s : MSI-X not available\n", __func__);
432 return -EIO;
433 }
434
435 efct->n_msix_vec = num_intrs;
436
437 rc = pci_alloc_irq_vectors(efct->pci, num_intrs, num_intrs,
438 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
439
440 if (rc < 0) {
441 dev_err(&efct->pci->dev, "Failed to alloc irq : %d\n", rc);
442 return rc;
443 }
444
445 for (i = 0; i < num_intrs; i++) {
446 struct efct_intr_context *intr_ctx = NULL;
447
448 intr_ctx = &efct->intr_context[i];
449 intr_ctx->efct = efct;
450 intr_ctx->index = i;
451
452 rc = request_threaded_irq(pci_irq_vector(efct->pci, i),
453 efct_intr_msix, efct_intr_thread, 0,
454 EFCT_DRIVER_NAME, intr_ctx);
455 if (rc) {
456 dev_err(&efct->pci->dev,
457 "Failed to register %d vector: %d\n", i, rc);
458 goto out;
459 }
460 }
461
462 return rc;
463
464 out:
465 while (--i >= 0)
466 free_irq(pci_irq_vector(efct->pci, i),
467 &efct->intr_context[i]);
468
469 pci_free_irq_vectors(efct->pci);
470 return rc;
471 }
472
473 static struct pci_device_id efct_pci_table[] = {
474 {PCI_DEVICE(EFCT_VENDOR_ID, EFCT_DEVICE_LANCER_G6), 0},
475 {PCI_DEVICE(EFCT_VENDOR_ID, EFCT_DEVICE_LANCER_G7), 0},
476 {} /* terminate list */
477 };
478
479 static int
efct_pci_probe(struct pci_dev * pdev,const struct pci_device_id * ent)480 efct_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
481 {
482 struct efct *efct = NULL;
483 int rc;
484 u32 i, r;
485 int num_interrupts = 0;
486 int nid;
487
488 dev_info(&pdev->dev, "%s\n", EFCT_DRIVER_NAME);
489
490 rc = pci_enable_device_mem(pdev);
491 if (rc)
492 return rc;
493
494 pci_set_master(pdev);
495
496 rc = pci_set_mwi(pdev);
497 if (rc) {
498 dev_info(&pdev->dev, "pci_set_mwi returned %d\n", rc);
499 goto mwi_out;
500 }
501
502 rc = pci_request_regions(pdev, EFCT_DRIVER_NAME);
503 if (rc) {
504 dev_err(&pdev->dev, "pci_request_regions failed %d\n", rc);
505 goto req_regions_out;
506 }
507
508 /* Fetch the Numa node id for this device */
509 nid = dev_to_node(&pdev->dev);
510 if (nid < 0) {
511 dev_err(&pdev->dev, "Warning Numa node ID is %d\n", nid);
512 nid = 0;
513 }
514
515 /* Allocate efct */
516 efct = efct_device_alloc(nid);
517 if (!efct) {
518 dev_err(&pdev->dev, "Failed to allocate efct\n");
519 rc = -ENOMEM;
520 goto alloc_out;
521 }
522
523 efct->pci = pdev;
524 efct->numa_node = nid;
525
526 /* Map all memory BARs */
527 for (i = 0, r = 0; i < EFCT_PCI_MAX_REGS; i++) {
528 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
529 efct->reg[r] = ioremap(pci_resource_start(pdev, i),
530 pci_resource_len(pdev, i));
531 r++;
532 }
533
534 /*
535 * If the 64-bit attribute is set, both this BAR and the
536 * next form the complete address. Skip processing the
537 * next BAR.
538 */
539 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM_64)
540 i++;
541 }
542
543 pci_set_drvdata(pdev, efct);
544
545 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 ||
546 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
547 dev_warn(&pdev->dev, "trying DMA_BIT_MASK(32)\n");
548 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 ||
549 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
550 dev_err(&pdev->dev, "setting DMA_BIT_MASK failed\n");
551 rc = -1;
552 goto dma_mask_out;
553 }
554 }
555
556 num_interrupts = efct_device_interrupts_required(efct);
557 if (num_interrupts < 0) {
558 efc_log_err(efct, "efct_device_interrupts_required failed\n");
559 rc = -1;
560 goto dma_mask_out;
561 }
562
563 /*
564 * Initialize MSIX interrupts, note,
565 * efct_setup_msix() enables the interrupt
566 */
567 rc = efct_setup_msix(efct, num_interrupts);
568 if (rc) {
569 dev_err(&pdev->dev, "Can't setup msix\n");
570 goto dma_mask_out;
571 }
572 /* Disable interrupt for now */
573 for (i = 0; i < efct->n_msix_vec; i++) {
574 efc_log_debug(efct, "irq %d disabled\n", i);
575 disable_irq(pci_irq_vector(efct->pci, i));
576 }
577
578 rc = efct_device_attach(efct);
579 if (rc)
580 goto attach_out;
581
582 return 0;
583
584 attach_out:
585 efct_teardown_msix(efct);
586 dma_mask_out:
587 pci_set_drvdata(pdev, NULL);
588
589 for (i = 0; i < EFCT_PCI_MAX_REGS; i++) {
590 if (efct->reg[i])
591 iounmap(efct->reg[i]);
592 }
593 efct_device_free(efct);
594 alloc_out:
595 pci_release_regions(pdev);
596 req_regions_out:
597 pci_clear_mwi(pdev);
598 mwi_out:
599 pci_disable_device(pdev);
600 return rc;
601 }
602
603 static void
efct_pci_remove(struct pci_dev * pdev)604 efct_pci_remove(struct pci_dev *pdev)
605 {
606 struct efct *efct = pci_get_drvdata(pdev);
607 u32 i;
608
609 if (!efct)
610 return;
611
612 efct_device_detach(efct);
613
614 efct_teardown_msix(efct);
615
616 for (i = 0; i < EFCT_PCI_MAX_REGS; i++) {
617 if (efct->reg[i])
618 iounmap(efct->reg[i]);
619 }
620
621 pci_set_drvdata(pdev, NULL);
622
623 efct_device_free(efct);
624
625 pci_release_regions(pdev);
626
627 pci_disable_device(pdev);
628 }
629
630 static void
efct_device_prep_for_reset(struct efct * efct,struct pci_dev * pdev)631 efct_device_prep_for_reset(struct efct *efct, struct pci_dev *pdev)
632 {
633 if (efct) {
634 efc_log_debug(efct,
635 "PCI channel disable preparing for reset\n");
636 efct_device_detach(efct);
637 /* Disable interrupt and pci device */
638 efct_teardown_msix(efct);
639 }
640 pci_disable_device(pdev);
641 }
642
643 static void
efct_device_prep_for_recover(struct efct * efct)644 efct_device_prep_for_recover(struct efct *efct)
645 {
646 if (efct) {
647 efc_log_debug(efct, "PCI channel preparing for recovery\n");
648 efct_hw_io_abort_all(&efct->hw);
649 }
650 }
651
652 /**
653 * efct_pci_io_error_detected - method for handling PCI I/O error
654 * @pdev: pointer to PCI device.
655 * @state: the current PCI connection state.
656 *
657 * This routine is registered to the PCI subsystem for error handling. This
658 * function is called by the PCI subsystem after a PCI bus error affecting
659 * this device has been detected. When this routine is invoked, it dispatches
660 * device error detected handling routine, which will perform the proper
661 * error detected operation.
662 *
663 * Return codes
664 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
665 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
666 */
667 static pci_ers_result_t
efct_pci_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)668 efct_pci_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
669 {
670 struct efct *efct = pci_get_drvdata(pdev);
671 pci_ers_result_t rc;
672
673 switch (state) {
674 case pci_channel_io_normal:
675 efct_device_prep_for_recover(efct);
676 rc = PCI_ERS_RESULT_CAN_RECOVER;
677 break;
678 case pci_channel_io_frozen:
679 efct_device_prep_for_reset(efct, pdev);
680 rc = PCI_ERS_RESULT_NEED_RESET;
681 break;
682 case pci_channel_io_perm_failure:
683 efct_device_detach(efct);
684 rc = PCI_ERS_RESULT_DISCONNECT;
685 break;
686 default:
687 efc_log_debug(efct, "Unknown PCI error state:0x%x\n", state);
688 efct_device_prep_for_reset(efct, pdev);
689 rc = PCI_ERS_RESULT_NEED_RESET;
690 break;
691 }
692
693 return rc;
694 }
695
696 static pci_ers_result_t
efct_pci_io_slot_reset(struct pci_dev * pdev)697 efct_pci_io_slot_reset(struct pci_dev *pdev)
698 {
699 int rc;
700 struct efct *efct = pci_get_drvdata(pdev);
701
702 rc = pci_enable_device_mem(pdev);
703 if (rc) {
704 efc_log_err(efct, "failed to enable PCI device after reset\n");
705 return PCI_ERS_RESULT_DISCONNECT;
706 }
707
708 /*
709 * As the new kernel behavior of pci_restore_state() API call clears
710 * device saved_state flag, need to save the restored state again.
711 */
712
713 pci_save_state(pdev);
714
715 pci_set_master(pdev);
716
717 rc = efct_setup_msix(efct, efct->n_msix_vec);
718 if (rc)
719 efc_log_err(efct, "rc %d returned, IRQ allocation failed\n",
720 rc);
721
722 /* Perform device reset */
723 efct_device_detach(efct);
724 /* Bring device to online*/
725 efct_device_attach(efct);
726
727 return PCI_ERS_RESULT_RECOVERED;
728 }
729
730 static void
efct_pci_io_resume(struct pci_dev * pdev)731 efct_pci_io_resume(struct pci_dev *pdev)
732 {
733 struct efct *efct = pci_get_drvdata(pdev);
734
735 /* Perform device reset */
736 efct_device_detach(efct);
737 /* Bring device to online*/
738 efct_device_attach(efct);
739 }
740
741 MODULE_DEVICE_TABLE(pci, efct_pci_table);
742
743 static struct pci_error_handlers efct_pci_err_handler = {
744 .error_detected = efct_pci_io_error_detected,
745 .slot_reset = efct_pci_io_slot_reset,
746 .resume = efct_pci_io_resume,
747 };
748
749 static struct pci_driver efct_pci_driver = {
750 .name = EFCT_DRIVER_NAME,
751 .id_table = efct_pci_table,
752 .probe = efct_pci_probe,
753 .remove = efct_pci_remove,
754 .err_handler = &efct_pci_err_handler,
755 };
756
757 static
efct_init(void)758 int __init efct_init(void)
759 {
760 int rc;
761
762 rc = efct_device_init();
763 if (rc) {
764 pr_err("efct_device_init failed rc=%d\n", rc);
765 return rc;
766 }
767
768 rc = pci_register_driver(&efct_pci_driver);
769 if (rc) {
770 pr_err("pci_register_driver failed rc=%d\n", rc);
771 efct_device_shutdown();
772 }
773
774 return rc;
775 }
776
efct_exit(void)777 static void __exit efct_exit(void)
778 {
779 pci_unregister_driver(&efct_pci_driver);
780 efct_device_shutdown();
781 }
782
783 module_init(efct_init);
784 module_exit(efct_exit);
785 MODULE_VERSION(EFCT_DRIVER_VERSION);
786 MODULE_LICENSE("GPL");
787 MODULE_AUTHOR("Broadcom");
788