Lines Matching refs:ndev
209 static inline void ndev_reset_unsafe_flags(struct intel_ntb_dev *ndev) in ndev_reset_unsafe_flags() argument
211 ndev->unsafe_flags = 0; in ndev_reset_unsafe_flags()
212 ndev->unsafe_flags_ignore = 0; in ndev_reset_unsafe_flags()
215 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) in ndev_reset_unsafe_flags()
216 if (!ntb_topo_is_b2b(ndev->ntb.topo)) in ndev_reset_unsafe_flags()
217 ndev->unsafe_flags |= NTB_UNSAFE_DB; in ndev_reset_unsafe_flags()
220 if (ndev->hwerr_flags & NTB_HWERR_SB01BASE_LOCKUP) { in ndev_reset_unsafe_flags()
221 ndev->unsafe_flags |= NTB_UNSAFE_DB; in ndev_reset_unsafe_flags()
222 ndev->unsafe_flags |= NTB_UNSAFE_SPAD; in ndev_reset_unsafe_flags()
226 static inline int ndev_is_unsafe(struct intel_ntb_dev *ndev, in ndev_is_unsafe() argument
229 return !!(flag & ndev->unsafe_flags & ~ndev->unsafe_flags_ignore); in ndev_is_unsafe()
232 static inline int ndev_ignore_unsafe(struct intel_ntb_dev *ndev, in ndev_ignore_unsafe() argument
235 flag &= ndev->unsafe_flags; in ndev_ignore_unsafe()
236 ndev->unsafe_flags_ignore |= flag; in ndev_ignore_unsafe()
241 static int ndev_mw_to_bar(struct intel_ntb_dev *ndev, int idx) in ndev_mw_to_bar() argument
243 if (idx < 0 || idx >= ndev->mw_count) in ndev_mw_to_bar()
245 return ndev->reg->mw_bar[idx]; in ndev_mw_to_bar()
248 static inline int ndev_db_addr(struct intel_ntb_dev *ndev, in ndev_db_addr() argument
252 if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB)) in ndev_db_addr()
257 dev_dbg(ndev_dev(ndev), "Peer db addr %llx\n", *db_addr); in ndev_db_addr()
261 *db_size = ndev->reg->db_size; in ndev_db_addr()
262 dev_dbg(ndev_dev(ndev), "Peer db size %llx\n", *db_size); in ndev_db_addr()
268 static inline u64 ndev_db_read(struct intel_ntb_dev *ndev, in ndev_db_read() argument
271 if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB)) in ndev_db_read()
274 return ndev->reg->db_ioread(mmio); in ndev_db_read()
277 static inline int ndev_db_write(struct intel_ntb_dev *ndev, u64 db_bits, in ndev_db_write() argument
280 if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB)) in ndev_db_write()
283 if (db_bits & ~ndev->db_valid_mask) in ndev_db_write()
286 ndev->reg->db_iowrite(db_bits, mmio); in ndev_db_write()
291 static inline int ndev_db_set_mask(struct intel_ntb_dev *ndev, u64 db_bits, in ndev_db_set_mask() argument
296 if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB)) in ndev_db_set_mask()
299 if (db_bits & ~ndev->db_valid_mask) in ndev_db_set_mask()
302 spin_lock_irqsave(&ndev->db_mask_lock, irqflags); in ndev_db_set_mask()
304 ndev->db_mask |= db_bits; in ndev_db_set_mask()
305 ndev->reg->db_iowrite(ndev->db_mask, mmio); in ndev_db_set_mask()
307 spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags); in ndev_db_set_mask()
312 static inline int ndev_db_clear_mask(struct intel_ntb_dev *ndev, u64 db_bits, in ndev_db_clear_mask() argument
317 if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB)) in ndev_db_clear_mask()
320 if (db_bits & ~ndev->db_valid_mask) in ndev_db_clear_mask()
323 spin_lock_irqsave(&ndev->db_mask_lock, irqflags); in ndev_db_clear_mask()
325 ndev->db_mask &= ~db_bits; in ndev_db_clear_mask()
326 ndev->reg->db_iowrite(ndev->db_mask, mmio); in ndev_db_clear_mask()
328 spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags); in ndev_db_clear_mask()
333 static inline u64 ndev_vec_mask(struct intel_ntb_dev *ndev, int db_vector) in ndev_vec_mask() argument
337 shift = ndev->db_vec_shift; in ndev_vec_mask()
343 static inline int ndev_spad_addr(struct intel_ntb_dev *ndev, int idx, in ndev_spad_addr() argument
347 if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD)) in ndev_spad_addr()
350 if (idx < 0 || idx >= ndev->spad_count) in ndev_spad_addr()
355 dev_dbg(ndev_dev(ndev), "Peer spad addr %llx\n", *spad_addr); in ndev_spad_addr()
361 static inline u32 ndev_spad_read(struct intel_ntb_dev *ndev, int idx, in ndev_spad_read() argument
364 if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD)) in ndev_spad_read()
367 if (idx < 0 || idx >= ndev->spad_count) in ndev_spad_read()
373 static inline int ndev_spad_write(struct intel_ntb_dev *ndev, int idx, u32 val, in ndev_spad_write() argument
376 if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD)) in ndev_spad_write()
379 if (idx < 0 || idx >= ndev->spad_count) in ndev_spad_write()
387 static irqreturn_t ndev_interrupt(struct intel_ntb_dev *ndev, int vec) in ndev_interrupt() argument
391 vec_mask = ndev_vec_mask(ndev, vec); in ndev_interrupt()
393 dev_dbg(ndev_dev(ndev), "vec %d vec_mask %llx\n", vec, vec_mask); in ndev_interrupt()
395 ndev->last_ts = jiffies; in ndev_interrupt()
397 if (vec_mask & ndev->db_link_mask) { in ndev_interrupt()
398 if (ndev->reg->poll_link(ndev)) in ndev_interrupt()
399 ntb_link_event(&ndev->ntb); in ndev_interrupt()
402 if (vec_mask & ndev->db_valid_mask) in ndev_interrupt()
403 ntb_db_event(&ndev->ntb, vec); in ndev_interrupt()
412 return ndev_interrupt(nvec->ndev, nvec->num); in ndev_vec_isr()
417 struct intel_ntb_dev *ndev = dev; in ndev_irq_isr() local
419 return ndev_interrupt(ndev, irq - ndev_pdev(ndev)->irq); in ndev_irq_isr()
422 static int ndev_init_isr(struct intel_ntb_dev *ndev, in ndev_init_isr() argument
429 pdev = ndev_pdev(ndev); in ndev_init_isr()
434 ndev->db_mask = ndev->db_valid_mask; in ndev_init_isr()
435 ndev->reg->db_iowrite(ndev->db_mask, in ndev_init_isr()
436 ndev->self_mmio + in ndev_init_isr()
437 ndev->self_reg->db_mask); in ndev_init_isr()
441 ndev->vec = kzalloc_node(msix_max * sizeof(*ndev->vec), in ndev_init_isr()
443 if (!ndev->vec) in ndev_init_isr()
446 ndev->msix = kzalloc_node(msix_max * sizeof(*ndev->msix), in ndev_init_isr()
448 if (!ndev->msix) in ndev_init_isr()
452 ndev->msix[i].entry = i; in ndev_init_isr()
454 msix_count = pci_enable_msix_range(pdev, ndev->msix, in ndev_init_isr()
460 ndev->vec[i].ndev = ndev; in ndev_init_isr()
461 ndev->vec[i].num = i; in ndev_init_isr()
462 rc = request_irq(ndev->msix[i].vector, ndev_vec_isr, 0, in ndev_init_isr()
463 "ndev_vec_isr", &ndev->vec[i]); in ndev_init_isr()
468 dev_dbg(ndev_dev(ndev), "Using msix interrupts\n"); in ndev_init_isr()
469 ndev->db_vec_count = msix_count; in ndev_init_isr()
470 ndev->db_vec_shift = msix_shift; in ndev_init_isr()
475 free_irq(ndev->msix[i].vector, ndev); in ndev_init_isr()
478 kfree(ndev->msix); in ndev_init_isr()
480 kfree(ndev->vec); in ndev_init_isr()
482 ndev->msix = NULL; in ndev_init_isr()
483 ndev->vec = NULL; in ndev_init_isr()
492 "ndev_irq_isr", ndev); in ndev_init_isr()
496 dev_dbg(ndev_dev(ndev), "Using msi interrupts\n"); in ndev_init_isr()
497 ndev->db_vec_count = 1; in ndev_init_isr()
498 ndev->db_vec_shift = total_shift; in ndev_init_isr()
510 "ndev_irq_isr", ndev); in ndev_init_isr()
514 dev_dbg(ndev_dev(ndev), "Using intx interrupts\n"); in ndev_init_isr()
515 ndev->db_vec_count = 1; in ndev_init_isr()
516 ndev->db_vec_shift = total_shift; in ndev_init_isr()
523 static void ndev_deinit_isr(struct intel_ntb_dev *ndev) in ndev_deinit_isr() argument
528 pdev = ndev_pdev(ndev); in ndev_deinit_isr()
531 ndev->db_mask = ndev->db_valid_mask; in ndev_deinit_isr()
532 ndev->reg->db_iowrite(ndev->db_mask, in ndev_deinit_isr()
533 ndev->self_mmio + in ndev_deinit_isr()
534 ndev->self_reg->db_mask); in ndev_deinit_isr()
536 if (ndev->msix) { in ndev_deinit_isr()
537 i = ndev->db_vec_count; in ndev_deinit_isr()
539 free_irq(ndev->msix[i].vector, &ndev->vec[i]); in ndev_deinit_isr()
541 kfree(ndev->msix); in ndev_deinit_isr()
542 kfree(ndev->vec); in ndev_deinit_isr()
544 free_irq(pdev->irq, ndev); in ndev_deinit_isr()
553 struct intel_ntb_dev *ndev; in ndev_debugfs_read() local
560 ndev = filp->private_data; in ndev_debugfs_read()
561 mmio = ndev->self_mmio; in ndev_debugfs_read()
576 ntb_topo_string(ndev->ntb.topo)); in ndev_debugfs_read()
578 if (ndev->b2b_idx != UINT_MAX) { in ndev_debugfs_read()
580 "B2B MW Idx -\t\t%u\n", ndev->b2b_idx); in ndev_debugfs_read()
582 "B2B Offset -\t\t%#lx\n", ndev->b2b_off); in ndev_debugfs_read()
587 ndev->bar4_split ? "yes" : "no"); in ndev_debugfs_read()
590 "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl); in ndev_debugfs_read()
592 "LNK STA -\t\t%#06x\n", ndev->lnk_sta); in ndev_debugfs_read()
594 if (!ndev->reg->link_is_up(ndev)) { in ndev_debugfs_read()
602 NTB_LNK_STA_SPEED(ndev->lnk_sta)); in ndev_debugfs_read()
605 NTB_LNK_STA_WIDTH(ndev->lnk_sta)); in ndev_debugfs_read()
609 "Memory Window Count -\t%u\n", ndev->mw_count); in ndev_debugfs_read()
611 "Scratchpad Count -\t%u\n", ndev->spad_count); in ndev_debugfs_read()
613 "Doorbell Count -\t%u\n", ndev->db_count); in ndev_debugfs_read()
615 "Doorbell Vector Count -\t%u\n", ndev->db_vec_count); in ndev_debugfs_read()
617 "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift); in ndev_debugfs_read()
620 "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask); in ndev_debugfs_read()
622 "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask); in ndev_debugfs_read()
624 "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask); in ndev_debugfs_read()
626 u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask); in ndev_debugfs_read()
630 u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_bell); in ndev_debugfs_read()
637 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 2)); in ndev_debugfs_read()
641 if (ndev->bar4_split) { in ndev_debugfs_read()
642 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 4)); in ndev_debugfs_read()
646 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 5)); in ndev_debugfs_read()
650 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 4)); in ndev_debugfs_read()
655 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 2)); in ndev_debugfs_read()
659 if (ndev->bar4_split) { in ndev_debugfs_read()
660 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 4)); in ndev_debugfs_read()
663 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 5)); in ndev_debugfs_read()
667 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 4)); in ndev_debugfs_read()
672 if (pdev_is_xeon(ndev->ntb.pdev)) { in ndev_debugfs_read()
673 if (ntb_topo_is_b2b(ndev->ntb.topo)) { in ndev_debugfs_read()
681 if (ndev->bar4_split) { in ndev_debugfs_read()
701 if (ndev->bar4_split) { in ndev_debugfs_read()
728 if (ndev->bar4_split) { in ndev_debugfs_read()
753 if (!pci_read_config_word(ndev->ntb.pdev, in ndev_debugfs_read()
758 if (!pci_read_config_word(ndev->ntb.pdev, in ndev_debugfs_read()
763 if (!pci_read_config_dword(ndev->ntb.pdev, in ndev_debugfs_read()
768 if (!pci_read_config_dword(ndev->ntb.pdev, in ndev_debugfs_read()
779 static void ndev_init_debugfs(struct intel_ntb_dev *ndev) in ndev_init_debugfs() argument
782 ndev->debugfs_dir = NULL; in ndev_init_debugfs()
783 ndev->debugfs_info = NULL; in ndev_init_debugfs()
785 ndev->debugfs_dir = in ndev_init_debugfs()
786 debugfs_create_dir(ndev_name(ndev), debugfs_dir); in ndev_init_debugfs()
787 if (!ndev->debugfs_dir) in ndev_init_debugfs()
788 ndev->debugfs_info = NULL; in ndev_init_debugfs()
790 ndev->debugfs_info = in ndev_init_debugfs()
792 ndev->debugfs_dir, ndev, in ndev_init_debugfs()
797 static void ndev_deinit_debugfs(struct intel_ntb_dev *ndev) in ndev_deinit_debugfs() argument
799 debugfs_remove_recursive(ndev->debugfs_dir); in ndev_deinit_debugfs()
813 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb_mw_get_range() local
816 if (idx >= ndev->b2b_idx && !ndev->b2b_off) in intel_ntb_mw_get_range()
819 bar = ndev_mw_to_bar(ndev, idx); in intel_ntb_mw_get_range()
824 *base = pci_resource_start(ndev->ntb.pdev, bar) + in intel_ntb_mw_get_range()
825 (idx == ndev->b2b_idx ? ndev->b2b_off : 0); in intel_ntb_mw_get_range()
828 *size = pci_resource_len(ndev->ntb.pdev, bar) - in intel_ntb_mw_get_range()
829 (idx == ndev->b2b_idx ? ndev->b2b_off : 0); in intel_ntb_mw_get_range()
832 *align = pci_resource_len(ndev->ntb.pdev, bar); in intel_ntb_mw_get_range()
843 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb_mw_set_trans() local
850 if (idx >= ndev->b2b_idx && !ndev->b2b_off) in intel_ntb_mw_set_trans()
853 bar = ndev_mw_to_bar(ndev, idx); in intel_ntb_mw_set_trans()
857 bar_size = pci_resource_len(ndev->ntb.pdev, bar); in intel_ntb_mw_set_trans()
859 if (idx == ndev->b2b_idx) in intel_ntb_mw_set_trans()
860 mw_size = bar_size - ndev->b2b_off; in intel_ntb_mw_set_trans()
872 mmio = ndev->self_mmio; in intel_ntb_mw_set_trans()
873 base_reg = bar0_off(ndev->xlat_reg->bar0_base, bar); in intel_ntb_mw_set_trans()
874 xlat_reg = bar2_off(ndev->xlat_reg->bar2_xlat, bar); in intel_ntb_mw_set_trans()
875 limit_reg = bar2_off(ndev->xlat_reg->bar2_limit, bar); in intel_ntb_mw_set_trans()
877 if (bar < 4 || !ndev->bar4_split) { in intel_ntb_mw_set_trans()
942 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb_link_is_up() local
944 if (ndev->reg->link_is_up(ndev)) { in intel_ntb_link_is_up()
946 *speed = NTB_LNK_STA_SPEED(ndev->lnk_sta); in intel_ntb_link_is_up()
948 *width = NTB_LNK_STA_WIDTH(ndev->lnk_sta); in intel_ntb_link_is_up()
965 struct intel_ntb_dev *ndev; in intel_ntb_link_enable() local
968 ndev = container_of(ntb, struct intel_ntb_dev, ntb); in intel_ntb_link_enable()
970 if (ndev->ntb.topo == NTB_TOPO_SEC) in intel_ntb_link_enable()
973 dev_dbg(ndev_dev(ndev), in intel_ntb_link_enable()
977 dev_dbg(ndev_dev(ndev), "ignoring max_speed %d\n", max_speed); in intel_ntb_link_enable()
979 dev_dbg(ndev_dev(ndev), "ignoring max_width %d\n", max_width); in intel_ntb_link_enable()
981 ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl); in intel_ntb_link_enable()
985 if (ndev->bar4_split) in intel_ntb_link_enable()
987 iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl); in intel_ntb_link_enable()
994 struct intel_ntb_dev *ndev; in intel_ntb_link_disable() local
997 ndev = container_of(ntb, struct intel_ntb_dev, ntb); in intel_ntb_link_disable()
999 if (ndev->ntb.topo == NTB_TOPO_SEC) in intel_ntb_link_disable()
1002 dev_dbg(ndev_dev(ndev), "Disabling link\n"); in intel_ntb_link_disable()
1005 ntb_cntl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl); in intel_ntb_link_disable()
1008 if (ndev->bar4_split) in intel_ntb_link_disable()
1011 iowrite32(ntb_cntl, ndev->self_mmio + ndev->reg->ntb_ctl); in intel_ntb_link_disable()
1028 struct intel_ntb_dev *ndev; in intel_ntb_db_vector_count() local
1030 ndev = container_of(ntb, struct intel_ntb_dev, ntb); in intel_ntb_db_vector_count()
1032 return ndev->db_vec_count; in intel_ntb_db_vector_count()
1037 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb_db_vector_mask() local
1039 if (db_vector < 0 || db_vector > ndev->db_vec_count) in intel_ntb_db_vector_mask()
1042 return ndev->db_valid_mask & ndev_vec_mask(ndev, db_vector); in intel_ntb_db_vector_mask()
1047 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb_db_read() local
1049 return ndev_db_read(ndev, in intel_ntb_db_read()
1050 ndev->self_mmio + in intel_ntb_db_read()
1051 ndev->self_reg->db_bell); in intel_ntb_db_read()
1056 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb_db_clear() local
1058 return ndev_db_write(ndev, db_bits, in intel_ntb_db_clear()
1059 ndev->self_mmio + in intel_ntb_db_clear()
1060 ndev->self_reg->db_bell); in intel_ntb_db_clear()
1065 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb_db_set_mask() local
1067 return ndev_db_set_mask(ndev, db_bits, in intel_ntb_db_set_mask()
1068 ndev->self_mmio + in intel_ntb_db_set_mask()
1069 ndev->self_reg->db_mask); in intel_ntb_db_set_mask()
1074 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb_db_clear_mask() local
1076 return ndev_db_clear_mask(ndev, db_bits, in intel_ntb_db_clear_mask()
1077 ndev->self_mmio + in intel_ntb_db_clear_mask()
1078 ndev->self_reg->db_mask); in intel_ntb_db_clear_mask()
1085 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb_peer_db_addr() local
1087 return ndev_db_addr(ndev, db_addr, db_size, ndev->peer_addr, in intel_ntb_peer_db_addr()
1088 ndev->peer_reg->db_bell); in intel_ntb_peer_db_addr()
1093 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb_peer_db_set() local
1095 return ndev_db_write(ndev, db_bits, in intel_ntb_peer_db_set()
1096 ndev->peer_mmio + in intel_ntb_peer_db_set()
1097 ndev->peer_reg->db_bell); in intel_ntb_peer_db_set()
1107 struct intel_ntb_dev *ndev; in intel_ntb_spad_count() local
1109 ndev = container_of(ntb, struct intel_ntb_dev, ntb); in intel_ntb_spad_count()
1111 return ndev->spad_count; in intel_ntb_spad_count()
1116 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb_spad_read() local
1118 return ndev_spad_read(ndev, idx, in intel_ntb_spad_read()
1119 ndev->self_mmio + in intel_ntb_spad_read()
1120 ndev->self_reg->spad); in intel_ntb_spad_read()
1126 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb_spad_write() local
1128 return ndev_spad_write(ndev, idx, val, in intel_ntb_spad_write()
1129 ndev->self_mmio + in intel_ntb_spad_write()
1130 ndev->self_reg->spad); in intel_ntb_spad_write()
1136 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb_peer_spad_addr() local
1138 return ndev_spad_addr(ndev, idx, spad_addr, ndev->peer_addr, in intel_ntb_peer_spad_addr()
1139 ndev->peer_reg->spad); in intel_ntb_peer_spad_addr()
1144 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb_peer_spad_read() local
1146 return ndev_spad_read(ndev, idx, in intel_ntb_peer_spad_read()
1147 ndev->peer_mmio + in intel_ntb_peer_spad_read()
1148 ndev->peer_reg->spad); in intel_ntb_peer_spad_read()
1154 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb_peer_spad_write() local
1156 return ndev_spad_write(ndev, idx, val, in intel_ntb_peer_spad_write()
1157 ndev->peer_mmio + in intel_ntb_peer_spad_write()
1158 ndev->peer_reg->spad); in intel_ntb_peer_spad_write()
1173 static int atom_poll_link(struct intel_ntb_dev *ndev) in atom_poll_link() argument
1177 ntb_ctl = ioread32(ndev->self_mmio + ATOM_NTBCNTL_OFFSET); in atom_poll_link()
1179 if (ntb_ctl == ndev->ntb_ctl) in atom_poll_link()
1182 ndev->ntb_ctl = ntb_ctl; in atom_poll_link()
1184 ndev->lnk_sta = ioread32(ndev->self_mmio + ATOM_LINK_STATUS_OFFSET); in atom_poll_link()
1189 static int atom_link_is_up(struct intel_ntb_dev *ndev) in atom_link_is_up() argument
1191 return ATOM_NTB_CTL_ACTIVE(ndev->ntb_ctl); in atom_link_is_up()
1194 static int atom_link_is_err(struct intel_ntb_dev *ndev) in atom_link_is_err() argument
1196 if (ioread32(ndev->self_mmio + ATOM_LTSSMSTATEJMP_OFFSET) in atom_link_is_err()
1200 if (ioread32(ndev->self_mmio + ATOM_IBSTERRRCRVSTS0_OFFSET) in atom_link_is_err()
1207 static inline enum ntb_topo atom_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd) in atom_ppd_topo() argument
1211 dev_dbg(ndev_dev(ndev), "PPD %d B2B USD\n", ppd); in atom_ppd_topo()
1215 dev_dbg(ndev_dev(ndev), "PPD %d B2B DSD\n", ppd); in atom_ppd_topo()
1222 dev_dbg(ndev_dev(ndev), "PPD %d non B2B disabled\n", ppd); in atom_ppd_topo()
1226 dev_dbg(ndev_dev(ndev), "PPD %d invalid\n", ppd); in atom_ppd_topo()
1232 struct intel_ntb_dev *ndev = hb_ndev(work); in atom_link_hb() local
1237 poll_ts = ndev->last_ts + ATOM_LINK_HB_TIMEOUT; in atom_link_hb()
1242 if (time_after(poll_ts, jiffies) && atom_link_is_up(ndev)) { in atom_link_hb()
1243 schedule_delayed_work(&ndev->hb_timer, poll_ts - jiffies); in atom_link_hb()
1247 if (atom_poll_link(ndev)) in atom_link_hb()
1248 ntb_link_event(&ndev->ntb); in atom_link_hb()
1250 if (atom_link_is_up(ndev) || !atom_link_is_err(ndev)) { in atom_link_hb()
1251 schedule_delayed_work(&ndev->hb_timer, ATOM_LINK_HB_TIMEOUT); in atom_link_hb()
1257 mmio = ndev->self_mmio; in atom_link_hb()
1270 dev_dbg(ndev_dev(ndev), "ERRCORSTS = %x\n", status32); in atom_link_hb()
1276 dev_dbg(ndev_dev(ndev), "LTSSMERRSTS0 = %x\n", status32); in atom_link_hb()
1282 dev_dbg(ndev_dev(ndev), "DESKEWSTS = %x\n", status32); in atom_link_hb()
1287 dev_dbg(ndev_dev(ndev), "IBSTERRRCRVSTS0 = %x\n", status32); in atom_link_hb()
1293 dev_dbg(ndev_dev(ndev), "LTSSMSTATEJMP = %x\n", status32); in atom_link_hb()
1302 schedule_delayed_work(&ndev->hb_timer, ATOM_LINK_RECOVERY_TIME in atom_link_hb()
1306 static int atom_init_isr(struct intel_ntb_dev *ndev) in atom_init_isr() argument
1310 rc = ndev_init_isr(ndev, 1, ATOM_DB_MSIX_VECTOR_COUNT, in atom_init_isr()
1316 ndev->last_ts = jiffies; in atom_init_isr()
1317 INIT_DELAYED_WORK(&ndev->hb_timer, atom_link_hb); in atom_init_isr()
1318 schedule_delayed_work(&ndev->hb_timer, ATOM_LINK_HB_TIMEOUT); in atom_init_isr()
1323 static void atom_deinit_isr(struct intel_ntb_dev *ndev) in atom_deinit_isr() argument
1325 cancel_delayed_work_sync(&ndev->hb_timer); in atom_deinit_isr()
1326 ndev_deinit_isr(ndev); in atom_deinit_isr()
1329 static int atom_init_ntb(struct intel_ntb_dev *ndev) in atom_init_ntb() argument
1331 ndev->mw_count = ATOM_MW_COUNT; in atom_init_ntb()
1332 ndev->spad_count = ATOM_SPAD_COUNT; in atom_init_ntb()
1333 ndev->db_count = ATOM_DB_COUNT; in atom_init_ntb()
1335 switch (ndev->ntb.topo) { in atom_init_ntb()
1338 ndev->self_reg = &atom_pri_reg; in atom_init_ntb()
1339 ndev->peer_reg = &atom_b2b_reg; in atom_init_ntb()
1340 ndev->xlat_reg = &atom_sec_xlat; in atom_init_ntb()
1344 ndev->self_mmio + ATOM_SPCICMD_OFFSET); in atom_init_ntb()
1352 ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1; in atom_init_ntb()
1357 static int atom_init_dev(struct intel_ntb_dev *ndev) in atom_init_dev() argument
1362 rc = pci_read_config_dword(ndev->ntb.pdev, ATOM_PPD_OFFSET, &ppd); in atom_init_dev()
1366 ndev->ntb.topo = atom_ppd_topo(ndev, ppd); in atom_init_dev()
1367 if (ndev->ntb.topo == NTB_TOPO_NONE) in atom_init_dev()
1370 rc = atom_init_ntb(ndev); in atom_init_dev()
1374 rc = atom_init_isr(ndev); in atom_init_dev()
1378 if (ndev->ntb.topo != NTB_TOPO_SEC) { in atom_init_dev()
1380 rc = pci_write_config_dword(ndev->ntb.pdev, ATOM_PPD_OFFSET, in atom_init_dev()
1389 static void atom_deinit_dev(struct intel_ntb_dev *ndev) in atom_deinit_dev() argument
1391 atom_deinit_isr(ndev); in atom_deinit_dev()
1406 static int xeon_poll_link(struct intel_ntb_dev *ndev) in xeon_poll_link() argument
1411 ndev->reg->db_iowrite(ndev->db_link_mask, in xeon_poll_link()
1412 ndev->self_mmio + in xeon_poll_link()
1413 ndev->self_reg->db_bell); in xeon_poll_link()
1415 rc = pci_read_config_word(ndev->ntb.pdev, in xeon_poll_link()
1420 if (reg_val == ndev->lnk_sta) in xeon_poll_link()
1423 ndev->lnk_sta = reg_val; in xeon_poll_link()
1428 static int xeon_link_is_up(struct intel_ntb_dev *ndev) in xeon_link_is_up() argument
1430 if (ndev->ntb.topo == NTB_TOPO_SEC) in xeon_link_is_up()
1433 return NTB_LNK_STA_ACTIVE(ndev->lnk_sta); in xeon_link_is_up()
1436 static inline enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd) in xeon_ppd_topo() argument
1457 static inline int xeon_ppd_bar4_split(struct intel_ntb_dev *ndev, u8 ppd) in xeon_ppd_bar4_split() argument
1460 dev_dbg(ndev_dev(ndev), "PPD %d split bar\n", ppd); in xeon_ppd_bar4_split()
1466 static int xeon_init_isr(struct intel_ntb_dev *ndev) in xeon_init_isr() argument
1468 return ndev_init_isr(ndev, XEON_DB_MSIX_VECTOR_COUNT, in xeon_init_isr()
1474 static void xeon_deinit_isr(struct intel_ntb_dev *ndev) in xeon_deinit_isr() argument
1476 ndev_deinit_isr(ndev); in xeon_deinit_isr()
1479 static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev, in xeon_setup_b2b_mw() argument
1490 pdev = ndev_pdev(ndev); in xeon_setup_b2b_mw()
1491 mmio = ndev->self_mmio; in xeon_setup_b2b_mw()
1493 if (ndev->b2b_idx == UINT_MAX) { in xeon_setup_b2b_mw()
1494 dev_dbg(ndev_dev(ndev), "not using b2b mw\n"); in xeon_setup_b2b_mw()
1496 ndev->b2b_off = 0; in xeon_setup_b2b_mw()
1498 b2b_bar = ndev_mw_to_bar(ndev, ndev->b2b_idx); in xeon_setup_b2b_mw()
1502 dev_dbg(ndev_dev(ndev), "using b2b mw bar %d\n", b2b_bar); in xeon_setup_b2b_mw()
1504 bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar); in xeon_setup_b2b_mw()
1506 dev_dbg(ndev_dev(ndev), "b2b bar size %#llx\n", bar_size); in xeon_setup_b2b_mw()
1509 dev_dbg(ndev_dev(ndev), in xeon_setup_b2b_mw()
1511 ndev->b2b_off = bar_size >> 1; in xeon_setup_b2b_mw()
1513 dev_dbg(ndev_dev(ndev), in xeon_setup_b2b_mw()
1515 ndev->b2b_off = 0; in xeon_setup_b2b_mw()
1516 --ndev->mw_count; in xeon_setup_b2b_mw()
1518 dev_dbg(ndev_dev(ndev), in xeon_setup_b2b_mw()
1531 dev_dbg(ndev_dev(ndev), "PBAR23SZ %#x\n", bar_sz); in xeon_setup_b2b_mw()
1533 if (ndev->b2b_off) in xeon_setup_b2b_mw()
1540 dev_dbg(ndev_dev(ndev), "SBAR23SZ %#x\n", bar_sz); in xeon_setup_b2b_mw()
1542 if (!ndev->bar4_split) { in xeon_setup_b2b_mw()
1544 dev_dbg(ndev_dev(ndev), "PBAR45SZ %#x\n", bar_sz); in xeon_setup_b2b_mw()
1546 if (ndev->b2b_off) in xeon_setup_b2b_mw()
1553 dev_dbg(ndev_dev(ndev), "SBAR45SZ %#x\n", bar_sz); in xeon_setup_b2b_mw()
1556 dev_dbg(ndev_dev(ndev), "PBAR4SZ %#x\n", bar_sz); in xeon_setup_b2b_mw()
1558 if (ndev->b2b_off) in xeon_setup_b2b_mw()
1565 dev_dbg(ndev_dev(ndev), "SBAR4SZ %#x\n", bar_sz); in xeon_setup_b2b_mw()
1568 dev_dbg(ndev_dev(ndev), "PBAR5SZ %#x\n", bar_sz); in xeon_setup_b2b_mw()
1570 if (ndev->b2b_off) in xeon_setup_b2b_mw()
1577 dev_dbg(ndev_dev(ndev), "SBAR5SZ %#x\n", bar_sz); in xeon_setup_b2b_mw()
1585 else if (b2b_bar == 4 && !ndev->bar4_split) in xeon_setup_b2b_mw()
1594 dev_dbg(ndev_dev(ndev), "SBAR01 %#018llx\n", bar_addr); in xeon_setup_b2b_mw()
1602 bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0); in xeon_setup_b2b_mw()
1605 dev_dbg(ndev_dev(ndev), "SBAR23 %#018llx\n", bar_addr); in xeon_setup_b2b_mw()
1607 if (!ndev->bar4_split) { in xeon_setup_b2b_mw()
1609 (b2b_bar == 4 ? ndev->b2b_off : 0); in xeon_setup_b2b_mw()
1612 dev_dbg(ndev_dev(ndev), "SBAR45 %#018llx\n", bar_addr); in xeon_setup_b2b_mw()
1615 (b2b_bar == 4 ? ndev->b2b_off : 0); in xeon_setup_b2b_mw()
1618 dev_dbg(ndev_dev(ndev), "SBAR4 %#010llx\n", bar_addr); in xeon_setup_b2b_mw()
1621 (b2b_bar == 5 ? ndev->b2b_off : 0); in xeon_setup_b2b_mw()
1624 dev_dbg(ndev_dev(ndev), "SBAR5 %#010llx\n", bar_addr); in xeon_setup_b2b_mw()
1629 bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0); in xeon_setup_b2b_mw()
1632 dev_dbg(ndev_dev(ndev), "SBAR23LMT %#018llx\n", bar_addr); in xeon_setup_b2b_mw()
1634 if (!ndev->bar4_split) { in xeon_setup_b2b_mw()
1636 (b2b_bar == 4 ? ndev->b2b_off : 0); in xeon_setup_b2b_mw()
1639 dev_dbg(ndev_dev(ndev), "SBAR45LMT %#018llx\n", bar_addr); in xeon_setup_b2b_mw()
1642 (b2b_bar == 4 ? ndev->b2b_off : 0); in xeon_setup_b2b_mw()
1645 dev_dbg(ndev_dev(ndev), "SBAR4LMT %#010llx\n", bar_addr); in xeon_setup_b2b_mw()
1648 (b2b_bar == 5 ? ndev->b2b_off : 0); in xeon_setup_b2b_mw()
1651 dev_dbg(ndev_dev(ndev), "SBAR5LMT %#05llx\n", bar_addr); in xeon_setup_b2b_mw()
1657 if (!ndev->bar4_split) { in xeon_setup_b2b_mw()
1666 if (!ndev->bar4_split) { in xeon_setup_b2b_mw()
1677 dev_dbg(ndev_dev(ndev), "PBAR23XLAT %#018llx\n", bar_addr); in xeon_setup_b2b_mw()
1679 if (!ndev->bar4_split) { in xeon_setup_b2b_mw()
1683 dev_dbg(ndev_dev(ndev), "PBAR45XLAT %#018llx\n", bar_addr); in xeon_setup_b2b_mw()
1688 dev_dbg(ndev_dev(ndev), "PBAR4XLAT %#010llx\n", bar_addr); in xeon_setup_b2b_mw()
1693 dev_dbg(ndev_dev(ndev), "PBAR5XLAT %#010llx\n", bar_addr); in xeon_setup_b2b_mw()
1701 else if (b2b_bar == 4 && !ndev->bar4_split) in xeon_setup_b2b_mw()
1711 dev_dbg(ndev_dev(ndev), "B2BXLAT %#018llx\n", bar_addr); in xeon_setup_b2b_mw()
1717 ndev->peer_mmio = pci_iomap(pdev, b2b_bar, in xeon_setup_b2b_mw()
1719 if (!ndev->peer_mmio) in xeon_setup_b2b_mw()
1726 static int xeon_init_ntb(struct intel_ntb_dev *ndev) in xeon_init_ntb() argument
1731 if (ndev->bar4_split) in xeon_init_ntb()
1732 ndev->mw_count = HSX_SPLIT_BAR_MW_COUNT; in xeon_init_ntb()
1734 ndev->mw_count = XEON_MW_COUNT; in xeon_init_ntb()
1736 ndev->spad_count = XEON_SPAD_COUNT; in xeon_init_ntb()
1737 ndev->db_count = XEON_DB_COUNT; in xeon_init_ntb()
1738 ndev->db_link_mask = XEON_DB_LINK_BIT; in xeon_init_ntb()
1740 switch (ndev->ntb.topo) { in xeon_init_ntb()
1742 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) { in xeon_init_ntb()
1743 dev_err(ndev_dev(ndev), "NTB Primary config disabled\n"); in xeon_init_ntb()
1748 ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl); in xeon_init_ntb()
1750 iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl); in xeon_init_ntb()
1753 ndev->spad_count >>= 1; in xeon_init_ntb()
1754 ndev->self_reg = &xeon_pri_reg; in xeon_init_ntb()
1755 ndev->peer_reg = &xeon_sec_reg; in xeon_init_ntb()
1756 ndev->xlat_reg = &xeon_sec_xlat; in xeon_init_ntb()
1760 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) { in xeon_init_ntb()
1761 dev_err(ndev_dev(ndev), "NTB Secondary config disabled\n"); in xeon_init_ntb()
1765 ndev->spad_count >>= 1; in xeon_init_ntb()
1766 ndev->self_reg = &xeon_sec_reg; in xeon_init_ntb()
1767 ndev->peer_reg = &xeon_pri_reg; in xeon_init_ntb()
1768 ndev->xlat_reg = &xeon_pri_xlat; in xeon_init_ntb()
1773 ndev->self_reg = &xeon_pri_reg; in xeon_init_ntb()
1774 ndev->peer_reg = &xeon_b2b_reg; in xeon_init_ntb()
1775 ndev->xlat_reg = &xeon_sec_xlat; in xeon_init_ntb()
1777 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) { in xeon_init_ntb()
1778 ndev->peer_reg = &xeon_pri_reg; in xeon_init_ntb()
1781 ndev->b2b_idx = b2b_mw_idx + ndev->mw_count; in xeon_init_ntb()
1783 ndev->b2b_idx = b2b_mw_idx; in xeon_init_ntb()
1785 if (ndev->b2b_idx >= ndev->mw_count) { in xeon_init_ntb()
1786 dev_dbg(ndev_dev(ndev), in xeon_init_ntb()
1788 b2b_mw_idx, ndev->mw_count); in xeon_init_ntb()
1792 dev_dbg(ndev_dev(ndev), in xeon_init_ntb()
1794 b2b_mw_idx, ndev->b2b_idx); in xeon_init_ntb()
1796 } else if (ndev->hwerr_flags & NTB_HWERR_B2BDOORBELL_BIT14) { in xeon_init_ntb()
1797 dev_warn(ndev_dev(ndev), "Reduce doorbell count by 1\n"); in xeon_init_ntb()
1798 ndev->db_count -= 1; in xeon_init_ntb()
1801 if (ndev->ntb.topo == NTB_TOPO_B2B_USD) { in xeon_init_ntb()
1802 rc = xeon_setup_b2b_mw(ndev, in xeon_init_ntb()
1806 rc = xeon_setup_b2b_mw(ndev, in xeon_init_ntb()
1815 ndev->self_mmio + XEON_SPCICMD_OFFSET); in xeon_init_ntb()
1823 ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1; in xeon_init_ntb()
1825 ndev->reg->db_iowrite(ndev->db_valid_mask, in xeon_init_ntb()
1826 ndev->self_mmio + in xeon_init_ntb()
1827 ndev->self_reg->db_mask); in xeon_init_ntb()
1832 static int xeon_init_dev(struct intel_ntb_dev *ndev) in xeon_init_dev() argument
1838 pdev = ndev_pdev(ndev); in xeon_init_dev()
1862 ndev->hwerr_flags |= NTB_HWERR_SDOORBELL_LOCKUP; in xeon_init_dev()
1879 ndev->hwerr_flags |= NTB_HWERR_SB01BASE_LOCKUP; in xeon_init_dev()
1903 ndev->hwerr_flags |= NTB_HWERR_B2BDOORBELL_BIT14; in xeon_init_dev()
1907 ndev->reg = &xeon_reg; in xeon_init_dev()
1913 ndev->ntb.topo = xeon_ppd_topo(ndev, ppd); in xeon_init_dev()
1914 dev_dbg(ndev_dev(ndev), "ppd %#x topo %s\n", ppd, in xeon_init_dev()
1915 ntb_topo_string(ndev->ntb.topo)); in xeon_init_dev()
1916 if (ndev->ntb.topo == NTB_TOPO_NONE) in xeon_init_dev()
1919 if (ndev->ntb.topo != NTB_TOPO_SEC) { in xeon_init_dev()
1920 ndev->bar4_split = xeon_ppd_bar4_split(ndev, ppd); in xeon_init_dev()
1921 dev_dbg(ndev_dev(ndev), "ppd %#x bar4_split %d\n", in xeon_init_dev()
1922 ppd, ndev->bar4_split); in xeon_init_dev()
1929 ndev->bar4_split = hweight32(mem) == in xeon_init_dev()
1931 dev_dbg(ndev_dev(ndev), "mem %#x bar4_split %d\n", in xeon_init_dev()
1932 mem, ndev->bar4_split); in xeon_init_dev()
1935 rc = xeon_init_ntb(ndev); in xeon_init_dev()
1939 return xeon_init_isr(ndev); in xeon_init_dev()
1942 static void xeon_deinit_dev(struct intel_ntb_dev *ndev) in xeon_deinit_dev() argument
1944 xeon_deinit_isr(ndev); in xeon_deinit_dev()
1947 static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev) in intel_ntb_init_pci() argument
1951 pci_set_drvdata(pdev, ndev); in intel_ntb_init_pci()
1968 dev_warn(ndev_dev(ndev), "Cannot DMA highmem\n"); in intel_ntb_init_pci()
1976 dev_warn(ndev_dev(ndev), "Cannot DMA consistent highmem\n"); in intel_ntb_init_pci()
1979 ndev->self_mmio = pci_iomap(pdev, 0, 0); in intel_ntb_init_pci()
1980 if (!ndev->self_mmio) { in intel_ntb_init_pci()
1984 ndev->peer_mmio = ndev->self_mmio; in intel_ntb_init_pci()
1999 static void intel_ntb_deinit_pci(struct intel_ntb_dev *ndev) in intel_ntb_deinit_pci() argument
2001 struct pci_dev *pdev = ndev_pdev(ndev); in intel_ntb_deinit_pci()
2003 if (ndev->peer_mmio && ndev->peer_mmio != ndev->self_mmio) in intel_ntb_deinit_pci()
2004 pci_iounmap(pdev, ndev->peer_mmio); in intel_ntb_deinit_pci()
2005 pci_iounmap(pdev, ndev->self_mmio); in intel_ntb_deinit_pci()
2013 static inline void ndev_init_struct(struct intel_ntb_dev *ndev, in ndev_init_struct() argument
2016 ndev->ntb.pdev = pdev; in ndev_init_struct()
2017 ndev->ntb.topo = NTB_TOPO_NONE; in ndev_init_struct()
2018 ndev->ntb.ops = &intel_ntb_ops; in ndev_init_struct()
2020 ndev->b2b_off = 0; in ndev_init_struct()
2021 ndev->b2b_idx = UINT_MAX; in ndev_init_struct()
2023 ndev->bar4_split = 0; in ndev_init_struct()
2025 ndev->mw_count = 0; in ndev_init_struct()
2026 ndev->spad_count = 0; in ndev_init_struct()
2027 ndev->db_count = 0; in ndev_init_struct()
2028 ndev->db_vec_count = 0; in ndev_init_struct()
2029 ndev->db_vec_shift = 0; in ndev_init_struct()
2031 ndev->ntb_ctl = 0; in ndev_init_struct()
2032 ndev->lnk_sta = 0; in ndev_init_struct()
2034 ndev->db_valid_mask = 0; in ndev_init_struct()
2035 ndev->db_link_mask = 0; in ndev_init_struct()
2036 ndev->db_mask = 0; in ndev_init_struct()
2038 spin_lock_init(&ndev->db_mask_lock); in ndev_init_struct()
2044 struct intel_ntb_dev *ndev; in intel_ntb_pci_probe() local
2050 ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node); in intel_ntb_pci_probe()
2051 if (!ndev) { in intel_ntb_pci_probe()
2056 ndev_init_struct(ndev, pdev); in intel_ntb_pci_probe()
2058 rc = intel_ntb_init_pci(ndev, pdev); in intel_ntb_pci_probe()
2062 rc = atom_init_dev(ndev); in intel_ntb_pci_probe()
2067 ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node); in intel_ntb_pci_probe()
2068 if (!ndev) { in intel_ntb_pci_probe()
2073 ndev_init_struct(ndev, pdev); in intel_ntb_pci_probe()
2075 rc = intel_ntb_init_pci(ndev, pdev); in intel_ntb_pci_probe()
2079 rc = xeon_init_dev(ndev); in intel_ntb_pci_probe()
2088 ndev_reset_unsafe_flags(ndev); in intel_ntb_pci_probe()
2090 ndev->reg->poll_link(ndev); in intel_ntb_pci_probe()
2092 ndev_init_debugfs(ndev); in intel_ntb_pci_probe()
2094 rc = ntb_register_device(&ndev->ntb); in intel_ntb_pci_probe()
2103 ndev_deinit_debugfs(ndev); in intel_ntb_pci_probe()
2105 atom_deinit_dev(ndev); in intel_ntb_pci_probe()
2107 xeon_deinit_dev(ndev); in intel_ntb_pci_probe()
2109 intel_ntb_deinit_pci(ndev); in intel_ntb_pci_probe()
2111 kfree(ndev); in intel_ntb_pci_probe()
2118 struct intel_ntb_dev *ndev = pci_get_drvdata(pdev); in intel_ntb_pci_remove() local
2120 ntb_unregister_device(&ndev->ntb); in intel_ntb_pci_remove()
2121 ndev_deinit_debugfs(ndev); in intel_ntb_pci_remove()
2123 atom_deinit_dev(ndev); in intel_ntb_pci_remove()
2125 xeon_deinit_dev(ndev); in intel_ntb_pci_remove()
2126 intel_ntb_deinit_pci(ndev); in intel_ntb_pci_remove()
2127 kfree(ndev); in intel_ntb_pci_remove()