• Home
  • Raw
  • Download

Lines Matching full:dd

126 static int hfi1_create_kctxt(struct hfi1_devdata *dd,  in hfi1_create_kctxt()  argument
135 ret = hfi1_create_ctxtdata(ppd, dd->node, &rcd); in hfi1_create_kctxt()
137 dd_dev_err(dd, "Kernel receive context allocation failed\n"); in hfi1_create_kctxt()
160 rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node); in hfi1_create_kctxt()
162 dd_dev_err(dd, "Kernel send context allocation failed\n"); in hfi1_create_kctxt()
173 int hfi1_create_kctxts(struct hfi1_devdata *dd) in hfi1_create_kctxts() argument
178 dd->rcd = kcalloc_node(dd->num_rcv_contexts, sizeof(*dd->rcd), in hfi1_create_kctxts()
179 GFP_KERNEL, dd->node); in hfi1_create_kctxts()
180 if (!dd->rcd) in hfi1_create_kctxts()
183 for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) { in hfi1_create_kctxts()
184 ret = hfi1_create_kctxt(dd, dd->pport); in hfi1_create_kctxts()
191 for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) in hfi1_create_kctxts()
192 hfi1_free_ctxt(dd->rcd[i]); in hfi1_create_kctxts()
195 kfree(dd->rcd); in hfi1_create_kctxts()
196 dd->rcd = NULL; in hfi1_create_kctxts()
219 spin_lock_irqsave(&rcd->dd->uctxt_lock, flags); in hfi1_rcd_free()
220 rcd->dd->rcd[rcd->ctxt] = NULL; in hfi1_rcd_free()
221 spin_unlock_irqrestore(&rcd->dd->uctxt_lock, flags); in hfi1_rcd_free()
223 hfi1_free_ctxtdata(rcd->dd, rcd); in hfi1_rcd_free()
258 * @dd: pointer to a valid devdata structure
266 static int allocate_rcd_index(struct hfi1_devdata *dd, in allocate_rcd_index() argument
272 spin_lock_irqsave(&dd->uctxt_lock, flags); in allocate_rcd_index()
273 for (ctxt = 0; ctxt < dd->num_rcv_contexts; ctxt++) in allocate_rcd_index()
274 if (!dd->rcd[ctxt]) in allocate_rcd_index()
277 if (ctxt < dd->num_rcv_contexts) { in allocate_rcd_index()
279 dd->rcd[ctxt] = rcd; in allocate_rcd_index()
282 spin_unlock_irqrestore(&dd->uctxt_lock, flags); in allocate_rcd_index()
284 if (ctxt >= dd->num_rcv_contexts) in allocate_rcd_index()
295 * @dd: pointer to a valid devdata structure
304 struct hfi1_ctxtdata *hfi1_rcd_get_by_index_safe(struct hfi1_devdata *dd, in hfi1_rcd_get_by_index_safe() argument
307 if (ctxt < dd->num_rcv_contexts) in hfi1_rcd_get_by_index_safe()
308 return hfi1_rcd_get_by_index(dd, ctxt); in hfi1_rcd_get_by_index_safe()
315 * @dd: pointer to a valid devdata structure
325 struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt) in hfi1_rcd_get_by_index() argument
330 spin_lock_irqsave(&dd->uctxt_lock, flags); in hfi1_rcd_get_by_index()
331 if (dd->rcd[ctxt]) { in hfi1_rcd_get_by_index()
332 rcd = dd->rcd[ctxt]; in hfi1_rcd_get_by_index()
336 spin_unlock_irqrestore(&dd->uctxt_lock, flags); in hfi1_rcd_get_by_index()
348 struct hfi1_devdata *dd = ppd->dd; in hfi1_create_ctxtdata() local
353 if (dd->rcv_entries.nctxt_extra > in hfi1_create_ctxtdata()
354 dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt) in hfi1_create_ctxtdata()
355 kctxt_ngroups = (dd->rcv_entries.nctxt_extra - in hfi1_create_ctxtdata()
356 (dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt)); in hfi1_create_ctxtdata()
363 ret = allocate_rcd_index(dd, rcd, &ctxt); in hfi1_create_ctxtdata()
373 rcd->dd = dd; in hfi1_create_ctxtdata()
375 rcd->rcv_array_groups = dd->rcv_entries.ngroups; in hfi1_create_ctxtdata()
395 if (ctxt < dd->first_dyn_alloc_ctxt) { in hfi1_create_ctxtdata()
397 base = ctxt * (dd->rcv_entries.ngroups + 1); in hfi1_create_ctxtdata()
401 (ctxt * dd->rcv_entries.ngroups); in hfi1_create_ctxtdata()
404 u16 ct = ctxt - dd->first_dyn_alloc_ctxt; in hfi1_create_ctxtdata()
406 base = ((dd->n_krcv_queues * dd->rcv_entries.ngroups) + in hfi1_create_ctxtdata()
408 if (ct < dd->rcv_entries.nctxt_extra) { in hfi1_create_ctxtdata()
409 base += ct * (dd->rcv_entries.ngroups + 1); in hfi1_create_ctxtdata()
412 base += dd->rcv_entries.nctxt_extra + in hfi1_create_ctxtdata()
413 (ct * dd->rcv_entries.ngroups); in hfi1_create_ctxtdata()
416 rcd->eager_base = base * dd->rcv_entries.group_size; in hfi1_create_ctxtdata()
434 dd->rcv_entries.group_size; in hfi1_create_ctxtdata()
437 dd->rcv_entries.group_size); in hfi1_create_ctxtdata()
439 dd_dev_err(dd, "ctxt%u: requested too many RcvArray entries.\n", in hfi1_create_ctxtdata()
453 * multiple of dd->rcv_entries.group_size. in hfi1_create_ctxtdata()
482 if (ctxt < dd->first_dyn_alloc_ctxt) { in hfi1_create_ctxtdata()
526 struct hfi1_devdata *dd = ppd->dd; in set_link_ipg() local
573 write_csr(dd, SEND_STATIC_RATE_CONTROL, src); in set_link_ipg()
631 struct hfi1_devdata *dd, u8 hw_pidx, u8 port) in hfi1_init_pportdata() argument
637 ppd->dd = dd; in hfi1_init_pportdata()
698 dd_dev_err(dd, "Congestion Control Agent disabled for port %d\n", port); in hfi1_init_pportdata()
705 static int loadtime_init(struct hfi1_devdata *dd) in loadtime_init() argument
712 * @dd: the hfi1_ib device
718 static int init_after_reset(struct hfi1_devdata *dd) in init_after_reset() argument
727 for (i = 0; i < dd->num_rcv_contexts; i++) { in init_after_reset()
728 rcd = hfi1_rcd_get_by_index(dd, i); in init_after_reset()
729 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS | in init_after_reset()
734 pio_send_control(dd, PSC_GLOBAL_DISABLE); in init_after_reset()
735 for (i = 0; i < dd->num_send_contexts; i++) in init_after_reset()
736 sc_disable(dd->send_contexts[i].sc); in init_after_reset()
741 static void enable_chip(struct hfi1_devdata *dd) in enable_chip() argument
748 pio_send_control(dd, PSC_GLOBAL_ENABLE); in enable_chip()
754 for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) { in enable_chip()
755 rcd = hfi1_rcd_get_by_index(dd, i); in enable_chip()
769 hfi1_rcvctrl(dd, rcvmask, rcd); in enable_chip()
777 * @dd: the hfi1_ib device
779 static int create_workqueues(struct hfi1_devdata *dd) in create_workqueues() argument
784 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in create_workqueues()
785 ppd = dd->pport + pidx; in create_workqueues()
793 dd->unit, pidx); in create_workqueues()
807 dd->unit, pidx); in create_workqueues()
815 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in create_workqueues()
816 ppd = dd->pport + pidx; in create_workqueues()
831 * @dd: the hfi1_ib device
833 static void destroy_workqueues(struct hfi1_devdata *dd) in destroy_workqueues() argument
838 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in destroy_workqueues()
839 ppd = dd->pport + pidx; in destroy_workqueues()
855 * @dd: valid devdata
858 static void enable_general_intr(struct hfi1_devdata *dd) in enable_general_intr() argument
860 set_intr_bits(dd, CCE_ERR_INT, MISC_ERR_INT, true); in enable_general_intr()
861 set_intr_bits(dd, PIO_ERR_INT, TXE_ERR_INT, true); in enable_general_intr()
862 set_intr_bits(dd, IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END, true); in enable_general_intr()
863 set_intr_bits(dd, PBC_INT, GPIO_ASSERT_INT, true); in enable_general_intr()
864 set_intr_bits(dd, TCRIT_INT, TCRIT_INT, true); in enable_general_intr()
865 set_intr_bits(dd, IS_DC_START, IS_DC_END, true); in enable_general_intr()
866 set_intr_bits(dd, IS_SENDCREDIT_START, IS_SENDCREDIT_END, true); in enable_general_intr()
871 * @dd: the hfi1_ib device
884 int hfi1_init(struct hfi1_devdata *dd, int reinit) in hfi1_init() argument
893 dd->process_pio_send = hfi1_verbs_send_pio; in hfi1_init()
894 dd->process_dma_send = hfi1_verbs_send_dma; in hfi1_init()
895 dd->pio_inline_send = pio_copy; in hfi1_init()
896 dd->process_vnic_dma_send = hfi1_vnic_send_dma; in hfi1_init()
898 if (is_ax(dd)) { in hfi1_init()
899 atomic_set(&dd->drop_packet, DROP_PACKET_ON); in hfi1_init()
900 dd->do_drop = true; in hfi1_init()
902 atomic_set(&dd->drop_packet, DROP_PACKET_OFF); in hfi1_init()
903 dd->do_drop = false; in hfi1_init()
907 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in hfi1_init()
908 ppd = dd->pport + pidx; in hfi1_init()
913 ret = init_after_reset(dd); in hfi1_init()
915 ret = loadtime_init(dd); in hfi1_init()
919 /* dd->rcd can be NULL if early initialization failed */ in hfi1_init()
920 for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) { in hfi1_init()
927 rcd = hfi1_rcd_get_by_index(dd, i); in hfi1_init()
931 lastfail = hfi1_create_rcvhdrq(dd, rcd); in hfi1_init()
937 dd_dev_err(dd, in hfi1_init()
946 len = PAGE_ALIGN(chip_rcv_contexts(dd) * HFI1_MAX_SHARED_CTXTS * in hfi1_init()
947 sizeof(*dd->events)); in hfi1_init()
948 dd->events = vmalloc_user(len); in hfi1_init()
949 if (!dd->events) in hfi1_init()
950 dd_dev_err(dd, "Failed to allocate user events page\n"); in hfi1_init()
955 dd->status = vmalloc_user(PAGE_SIZE); in hfi1_init()
956 if (!dd->status) in hfi1_init()
957 dd_dev_err(dd, "Failed to allocate dev status page\n"); in hfi1_init()
958 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in hfi1_init()
959 ppd = dd->pport + pidx; in hfi1_init()
960 if (dd->status) in hfi1_init()
962 ppd->statusp = &dd->status->port; in hfi1_init()
968 enable_chip(dd); in hfi1_init()
975 if (dd->status) in hfi1_init()
976 dd->status->dev |= HFI1_STATUS_CHIP_PRESENT | in hfi1_init()
980 enable_general_intr(dd); in hfi1_init()
981 init_qsfp_int(dd); in hfi1_init()
984 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in hfi1_init()
985 ppd = dd->pport + pidx; in hfi1_init()
993 dd_dev_info(dd, in hfi1_init()
1022 static void stop_timers(struct hfi1_devdata *dd) in stop_timers() argument
1027 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in stop_timers()
1028 ppd = dd->pport + pidx; in stop_timers()
1038 * @dd: the hfi1_ib device
1043 * Everything it does has to be setup again by hfi1_init(dd, 1)
1045 static void shutdown_device(struct hfi1_devdata *dd) in shutdown_device() argument
1052 if (dd->flags & HFI1_SHUTDOWN) in shutdown_device()
1054 dd->flags |= HFI1_SHUTDOWN; in shutdown_device()
1056 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in shutdown_device()
1057 ppd = dd->pport + pidx; in shutdown_device()
1064 dd->flags &= ~HFI1_INITTED; in shutdown_device()
1067 set_intr_bits(dd, IS_FIRST_SOURCE, IS_LAST_SOURCE, false); in shutdown_device()
1068 msix_clean_up_interrupts(dd); in shutdown_device()
1070 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in shutdown_device()
1071 ppd = dd->pport + pidx; in shutdown_device()
1072 for (i = 0; i < dd->num_rcv_contexts; i++) { in shutdown_device()
1073 rcd = hfi1_rcd_get_by_index(dd, i); in shutdown_device()
1074 hfi1_rcvctrl(dd, HFI1_RCVCTRL_TAILUPD_DIS | in shutdown_device()
1085 for (i = 0; i < dd->num_send_contexts; i++) in shutdown_device()
1086 sc_flush(dd->send_contexts[i].sc); in shutdown_device()
1095 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in shutdown_device()
1096 ppd = dd->pport + pidx; in shutdown_device()
1099 for (i = 0; i < dd->num_send_contexts; i++) in shutdown_device()
1100 sc_disable(dd->send_contexts[i].sc); in shutdown_device()
1102 pio_send_control(dd, PSC_GLOBAL_DISABLE); in shutdown_device()
1116 sdma_exit(dd); in shutdown_device()
1121 * @dd: the hfi1_ib device
1127 void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) in hfi1_free_ctxtdata() argument
1135 dma_free_coherent(&dd->pcidev->dev, rcvhdrq_size(rcd), in hfi1_free_ctxtdata()
1139 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, in hfi1_free_ctxtdata()
1152 dma_free_coherent(&dd->pcidev->dev, in hfi1_free_ctxtdata()
1180 static struct hfi1_asic_data *release_asic_data(struct hfi1_devdata *dd) in release_asic_data() argument
1185 if (!dd->asic_data) in release_asic_data()
1187 dd->asic_data->dds[dd->hfi1_id] = NULL; in release_asic_data()
1188 other = dd->hfi1_id ? 0 : 1; in release_asic_data()
1189 ad = dd->asic_data; in release_asic_data()
1190 dd->asic_data = NULL; in release_asic_data()
1191 /* return NULL if the other dd still has a link */ in release_asic_data()
1195 static void finalize_asic_data(struct hfi1_devdata *dd, in finalize_asic_data() argument
1198 clean_up_i2c(dd, ad); in finalize_asic_data()
1204 * @dd: pointer to a valid devdata structure
1209 void hfi1_free_devdata(struct hfi1_devdata *dd) in hfi1_free_devdata() argument
1215 __xa_erase(&hfi1_dev_table, dd->unit); in hfi1_free_devdata()
1216 ad = release_asic_data(dd); in hfi1_free_devdata()
1219 finalize_asic_data(dd, ad); in hfi1_free_devdata()
1220 free_platform_config(dd); in hfi1_free_devdata()
1222 free_percpu(dd->int_counter); in hfi1_free_devdata()
1223 free_percpu(dd->rcv_limit); in hfi1_free_devdata()
1224 free_percpu(dd->send_schedule); in hfi1_free_devdata()
1225 free_percpu(dd->tx_opstats); in hfi1_free_devdata()
1226 dd->int_counter = NULL; in hfi1_free_devdata()
1227 dd->rcv_limit = NULL; in hfi1_free_devdata()
1228 dd->send_schedule = NULL; in hfi1_free_devdata()
1229 dd->tx_opstats = NULL; in hfi1_free_devdata()
1230 kfree(dd->comp_vect); in hfi1_free_devdata()
1231 dd->comp_vect = NULL; in hfi1_free_devdata()
1232 if (dd->rcvhdrtail_dummy_kvaddr) in hfi1_free_devdata()
1233 dma_free_coherent(&dd->pcidev->dev, sizeof(u64), in hfi1_free_devdata()
1234 (void *)dd->rcvhdrtail_dummy_kvaddr, in hfi1_free_devdata()
1235 dd->rcvhdrtail_dummy_dma); in hfi1_free_devdata()
1236 dd->rcvhdrtail_dummy_kvaddr = NULL; in hfi1_free_devdata()
1237 sdma_clean(dd, dd->num_sdma); in hfi1_free_devdata()
1238 rvt_dealloc_device(&dd->verbs_dev.rdi); in hfi1_free_devdata()
1253 struct hfi1_devdata *dd; in hfi1_alloc_devdata() local
1259 dd = (struct hfi1_devdata *)rvt_alloc_device(sizeof(*dd) + extra, in hfi1_alloc_devdata()
1261 if (!dd) in hfi1_alloc_devdata()
1263 dd->num_pports = nports; in hfi1_alloc_devdata()
1264 dd->pport = (struct hfi1_pportdata *)(dd + 1); in hfi1_alloc_devdata()
1265 dd->pcidev = pdev; in hfi1_alloc_devdata()
1266 pci_set_drvdata(pdev, dd); in hfi1_alloc_devdata()
1268 ret = xa_alloc_irq(&hfi1_dev_table, &dd->unit, dd, xa_limit_32b, in hfi1_alloc_devdata()
1275 rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s_%d", class_name(), dd->unit); in hfi1_alloc_devdata()
1280 dd->node = pcibus_to_node(pdev->bus); in hfi1_alloc_devdata()
1281 if (dd->node == NUMA_NO_NODE) { in hfi1_alloc_devdata()
1282 dd_dev_err(dd, "Invalid PCI NUMA node. Performance may be affected\n"); in hfi1_alloc_devdata()
1283 dd->node = 0; in hfi1_alloc_devdata()
1290 spin_lock_init(&dd->sc_lock); in hfi1_alloc_devdata()
1291 spin_lock_init(&dd->sendctrl_lock); in hfi1_alloc_devdata()
1292 spin_lock_init(&dd->rcvctrl_lock); in hfi1_alloc_devdata()
1293 spin_lock_init(&dd->uctxt_lock); in hfi1_alloc_devdata()
1294 spin_lock_init(&dd->hfi1_diag_trans_lock); in hfi1_alloc_devdata()
1295 spin_lock_init(&dd->sc_init_lock); in hfi1_alloc_devdata()
1296 spin_lock_init(&dd->dc8051_memlock); in hfi1_alloc_devdata()
1297 seqlock_init(&dd->sc2vl_lock); in hfi1_alloc_devdata()
1298 spin_lock_init(&dd->sde_map_lock); in hfi1_alloc_devdata()
1299 spin_lock_init(&dd->pio_map_lock); in hfi1_alloc_devdata()
1300 mutex_init(&dd->dc8051_lock); in hfi1_alloc_devdata()
1301 init_waitqueue_head(&dd->event_queue); in hfi1_alloc_devdata()
1302 spin_lock_init(&dd->irq_src_lock); in hfi1_alloc_devdata()
1304 dd->int_counter = alloc_percpu(u64); in hfi1_alloc_devdata()
1305 if (!dd->int_counter) { in hfi1_alloc_devdata()
1310 dd->rcv_limit = alloc_percpu(u64); in hfi1_alloc_devdata()
1311 if (!dd->rcv_limit) { in hfi1_alloc_devdata()
1316 dd->send_schedule = alloc_percpu(u64); in hfi1_alloc_devdata()
1317 if (!dd->send_schedule) { in hfi1_alloc_devdata()
1322 dd->tx_opstats = alloc_percpu(struct hfi1_opcode_stats_perctx); in hfi1_alloc_devdata()
1323 if (!dd->tx_opstats) { in hfi1_alloc_devdata()
1328 dd->comp_vect = kzalloc(sizeof(*dd->comp_vect), GFP_KERNEL); in hfi1_alloc_devdata()
1329 if (!dd->comp_vect) { in hfi1_alloc_devdata()
1335 dd->rcvhdrtail_dummy_kvaddr = in hfi1_alloc_devdata()
1336 dma_alloc_coherent(&dd->pcidev->dev, sizeof(u64), in hfi1_alloc_devdata()
1337 &dd->rcvhdrtail_dummy_dma, GFP_KERNEL); in hfi1_alloc_devdata()
1338 if (!dd->rcvhdrtail_dummy_kvaddr) { in hfi1_alloc_devdata()
1343 atomic_set(&dd->ipoib_rsm_usr_num, 0); in hfi1_alloc_devdata()
1344 return dd; in hfi1_alloc_devdata()
1347 hfi1_free_devdata(dd); in hfi1_alloc_devdata()
1356 void hfi1_disable_after_error(struct hfi1_devdata *dd) in hfi1_disable_after_error() argument
1358 if (dd->flags & HFI1_INITTED) { in hfi1_disable_after_error()
1361 dd->flags &= ~HFI1_INITTED; in hfi1_disable_after_error()
1362 if (dd->pport) in hfi1_disable_after_error()
1363 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in hfi1_disable_after_error()
1366 ppd = dd->pport + pidx; in hfi1_disable_after_error()
1367 if (dd->flags & HFI1_PRESENT) in hfi1_disable_after_error()
1380 if (dd->status) in hfi1_disable_after_error()
1381 dd->status->dev |= HFI1_STATUS_HWERROR; in hfi1_disable_after_error()
1522 static void cleanup_device_data(struct hfi1_devdata *dd) in cleanup_device_data() argument
1528 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in cleanup_device_data()
1529 struct hfi1_pportdata *ppd = &dd->pport[pidx]; in cleanup_device_data()
1548 free_credit_return(dd); in cleanup_device_data()
1554 for (ctxt = 0; dd->rcd && ctxt < dd->num_rcv_contexts; ctxt++) { in cleanup_device_data()
1555 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt]; in cleanup_device_data()
1563 kfree(dd->rcd); in cleanup_device_data()
1564 dd->rcd = NULL; in cleanup_device_data()
1566 free_pio_map(dd); in cleanup_device_data()
1568 for (ctxt = 0; ctxt < dd->num_send_contexts; ctxt++) in cleanup_device_data()
1569 sc_free(dd->send_contexts[ctxt].sc); in cleanup_device_data()
1570 dd->num_send_contexts = 0; in cleanup_device_data()
1571 kfree(dd->send_contexts); in cleanup_device_data()
1572 dd->send_contexts = NULL; in cleanup_device_data()
1573 kfree(dd->hw_to_sw); in cleanup_device_data()
1574 dd->hw_to_sw = NULL; in cleanup_device_data()
1575 kfree(dd->boardname); in cleanup_device_data()
1576 vfree(dd->events); in cleanup_device_data()
1577 vfree(dd->status); in cleanup_device_data()
1584 static void postinit_cleanup(struct hfi1_devdata *dd) in postinit_cleanup() argument
1586 hfi1_start_cleanup(dd); in postinit_cleanup()
1587 hfi1_comp_vectors_clean_up(dd); in postinit_cleanup()
1588 hfi1_dev_affinity_clean_up(dd); in postinit_cleanup()
1590 hfi1_pcie_ddcleanup(dd); in postinit_cleanup()
1591 hfi1_pcie_cleanup(dd->pcidev); in postinit_cleanup()
1593 cleanup_device_data(dd); in postinit_cleanup()
1595 hfi1_free_devdata(dd); in postinit_cleanup()
1601 struct hfi1_devdata *dd; in init_one() local
1616 /* Allocate the dd so we can get to work */ in init_one()
1617 dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS * in init_one()
1619 if (IS_ERR(dd)) { in init_one()
1620 ret = PTR_ERR(dd); in init_one()
1625 ret = hfi1_validate_rcvhdrcnt(dd, rcvhdrcnt); in init_one()
1631 dd_dev_err(dd, "Invalid HdrQ Entry size %u\n", in init_one()
1654 dd_dev_info(dd, "Eager buffer size %u\n", in init_one()
1657 dd_dev_err(dd, "Invalid Eager buffer size of 0\n"); in init_one()
1665 ret = hfi1_pcie_init(dd); in init_one()
1670 * Do device-specific initialization, function table setup, dd in init_one()
1673 ret = hfi1_init_dd(dd); in init_one()
1677 ret = create_workqueues(dd); in init_one()
1682 initfail = hfi1_init(dd, 0); in init_one()
1684 ret = hfi1_register_ib_device(dd); in init_one()
1693 dd->flags |= HFI1_INITTED; in init_one()
1695 hfi1_dbg_ibdev_init(&dd->verbs_dev); in init_one()
1698 j = hfi1_device_create(dd); in init_one()
1700 dd_dev_err(dd, "Failed to create /dev devices: %d\n", -j); in init_one()
1703 msix_clean_up_interrupts(dd); in init_one()
1704 stop_timers(dd); in init_one()
1706 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in init_one()
1707 hfi1_quiet_serdes(dd->pport + pidx); in init_one()
1708 ppd = dd->pport + pidx; in init_one()
1719 hfi1_device_remove(dd); in init_one()
1721 hfi1_unregister_ib_device(dd); in init_one()
1722 postinit_cleanup(dd); in init_one()
1728 sdma_start(dd); in init_one()
1738 static void wait_for_clients(struct hfi1_devdata *dd) in wait_for_clients() argument
1744 if (atomic_dec_and_test(&dd->user_refcount)) in wait_for_clients()
1745 complete(&dd->user_comp); in wait_for_clients()
1747 wait_for_completion(&dd->user_comp); in wait_for_clients()
1752 struct hfi1_devdata *dd = pci_get_drvdata(pdev); in remove_one() local
1755 hfi1_dbg_ibdev_exit(&dd->verbs_dev); in remove_one()
1758 hfi1_device_remove(dd); in remove_one()
1761 wait_for_clients(dd); in remove_one()
1764 hfi1_unregister_ib_device(dd); in remove_one()
1767 hfi1_netdev_free(dd); in remove_one()
1773 shutdown_device(dd); in remove_one()
1774 destroy_workqueues(dd); in remove_one()
1776 stop_timers(dd); in remove_one()
1781 postinit_cleanup(dd); in remove_one()
1786 struct hfi1_devdata *dd = pci_get_drvdata(pdev); in shutdown_one() local
1788 shutdown_device(dd); in shutdown_one()
1793 * @dd: the hfi1_ib device
1800 int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) in hfi1_create_rcvhdrq() argument
1809 if (rcd->ctxt < dd->first_dyn_alloc_ctxt || rcd->is_vnic) in hfi1_create_rcvhdrq()
1813 rcd->rcvhdrq = dma_alloc_coherent(&dd->pcidev->dev, amt, in hfi1_create_rcvhdrq()
1818 dd_dev_err(dd, in hfi1_create_rcvhdrq()
1826 rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(&dd->pcidev->dev, in hfi1_create_rcvhdrq()
1835 set_hdrq_regs(rcd->dd, rcd->ctxt, rcd->rcvhdrqentsize, in hfi1_create_rcvhdrq()
1841 dd_dev_err(dd, in hfi1_create_rcvhdrq()
1844 dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq, in hfi1_create_rcvhdrq()
1862 struct hfi1_devdata *dd = rcd->dd; in hfi1_setup_eagerbufs() local
1884 if (rcd->egrbufs.size < (round_mtu * dd->rcv_entries.group_size)) in hfi1_setup_eagerbufs()
1885 rcd->egrbufs.size = round_mtu * dd->rcv_entries.group_size; in hfi1_setup_eagerbufs()
1904 dma_alloc_coherent(&dd->pcidev->dev, in hfi1_setup_eagerbufs()
1930 dd_dev_err(dd, "ctxt%u: Failed to allocate eager buffers\n", in hfi1_setup_eagerbufs()
1994 max_entries = rcd->rcv_array_groups * dd->rcv_entries.group_size; in hfi1_setup_eagerbufs()
1995 egrtop = roundup(rcd->egrbufs.alloced, dd->rcv_entries.group_size); in hfi1_setup_eagerbufs()
2014 hfi1_put_tid(dd, rcd->eager_base + idx, PT_EAGER, in hfi1_setup_eagerbufs()
2025 dma_free_coherent(&dd->pcidev->dev, in hfi1_setup_eagerbufs()