Lines Matching refs:ndev
155 static inline enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd);
156 static int xeon_init_isr(struct intel_ntb_dev *ndev);
227 static inline void ndev_reset_unsafe_flags(struct intel_ntb_dev *ndev) in ndev_reset_unsafe_flags() argument
229 ndev->unsafe_flags = 0; in ndev_reset_unsafe_flags()
230 ndev->unsafe_flags_ignore = 0; in ndev_reset_unsafe_flags()
233 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) in ndev_reset_unsafe_flags()
234 if (!ntb_topo_is_b2b(ndev->ntb.topo)) in ndev_reset_unsafe_flags()
235 ndev->unsafe_flags |= NTB_UNSAFE_DB; in ndev_reset_unsafe_flags()
238 if (ndev->hwerr_flags & NTB_HWERR_SB01BASE_LOCKUP) { in ndev_reset_unsafe_flags()
239 ndev->unsafe_flags |= NTB_UNSAFE_DB; in ndev_reset_unsafe_flags()
240 ndev->unsafe_flags |= NTB_UNSAFE_SPAD; in ndev_reset_unsafe_flags()
244 static inline int ndev_is_unsafe(struct intel_ntb_dev *ndev, in ndev_is_unsafe() argument
247 return !!(flag & ndev->unsafe_flags & ~ndev->unsafe_flags_ignore); in ndev_is_unsafe()
250 static inline int ndev_ignore_unsafe(struct intel_ntb_dev *ndev, in ndev_ignore_unsafe() argument
253 flag &= ndev->unsafe_flags; in ndev_ignore_unsafe()
254 ndev->unsafe_flags_ignore |= flag; in ndev_ignore_unsafe()
259 static int ndev_mw_to_bar(struct intel_ntb_dev *ndev, int idx) in ndev_mw_to_bar() argument
261 if (idx < 0 || idx >= ndev->mw_count) in ndev_mw_to_bar()
263 return ndev->reg->mw_bar[idx]; in ndev_mw_to_bar()
266 static inline int ndev_db_addr(struct intel_ntb_dev *ndev, in ndev_db_addr() argument
270 if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB)) in ndev_db_addr()
275 dev_dbg(&ndev->ntb.pdev->dev, "Peer db addr %llx\n", *db_addr); in ndev_db_addr()
279 *db_size = ndev->reg->db_size; in ndev_db_addr()
280 dev_dbg(&ndev->ntb.pdev->dev, "Peer db size %llx\n", *db_size); in ndev_db_addr()
286 static inline u64 ndev_db_read(struct intel_ntb_dev *ndev, in ndev_db_read() argument
289 if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB)) in ndev_db_read()
292 return ndev->reg->db_ioread(mmio); in ndev_db_read()
295 static inline int ndev_db_write(struct intel_ntb_dev *ndev, u64 db_bits, in ndev_db_write() argument
298 if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB)) in ndev_db_write()
301 if (db_bits & ~ndev->db_valid_mask) in ndev_db_write()
304 ndev->reg->db_iowrite(db_bits, mmio); in ndev_db_write()
309 static inline int ndev_db_set_mask(struct intel_ntb_dev *ndev, u64 db_bits, in ndev_db_set_mask() argument
314 if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB)) in ndev_db_set_mask()
317 if (db_bits & ~ndev->db_valid_mask) in ndev_db_set_mask()
320 spin_lock_irqsave(&ndev->db_mask_lock, irqflags); in ndev_db_set_mask()
322 ndev->db_mask |= db_bits; in ndev_db_set_mask()
323 ndev->reg->db_iowrite(ndev->db_mask, mmio); in ndev_db_set_mask()
325 spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags); in ndev_db_set_mask()
330 static inline int ndev_db_clear_mask(struct intel_ntb_dev *ndev, u64 db_bits, in ndev_db_clear_mask() argument
335 if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB)) in ndev_db_clear_mask()
338 if (db_bits & ~ndev->db_valid_mask) in ndev_db_clear_mask()
341 spin_lock_irqsave(&ndev->db_mask_lock, irqflags); in ndev_db_clear_mask()
343 ndev->db_mask &= ~db_bits; in ndev_db_clear_mask()
344 ndev->reg->db_iowrite(ndev->db_mask, mmio); in ndev_db_clear_mask()
346 spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags); in ndev_db_clear_mask()
351 static inline u64 ndev_vec_mask(struct intel_ntb_dev *ndev, int db_vector) in ndev_vec_mask() argument
355 shift = ndev->db_vec_shift; in ndev_vec_mask()
361 static inline int ndev_spad_addr(struct intel_ntb_dev *ndev, int idx, in ndev_spad_addr() argument
365 if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD)) in ndev_spad_addr()
368 if (idx < 0 || idx >= ndev->spad_count) in ndev_spad_addr()
373 dev_dbg(&ndev->ntb.pdev->dev, "Peer spad addr %llx\n", in ndev_spad_addr()
380 static inline u32 ndev_spad_read(struct intel_ntb_dev *ndev, int idx, in ndev_spad_read() argument
383 if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD)) in ndev_spad_read()
386 if (idx < 0 || idx >= ndev->spad_count) in ndev_spad_read()
392 static inline int ndev_spad_write(struct intel_ntb_dev *ndev, int idx, u32 val, in ndev_spad_write() argument
395 if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD)) in ndev_spad_write()
398 if (idx < 0 || idx >= ndev->spad_count) in ndev_spad_write()
406 static irqreturn_t ndev_interrupt(struct intel_ntb_dev *ndev, int vec) in ndev_interrupt() argument
410 vec_mask = ndev_vec_mask(ndev, vec); in ndev_interrupt()
412 if ((ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) && (vec == 31)) in ndev_interrupt()
413 vec_mask |= ndev->db_link_mask; in ndev_interrupt()
415 dev_dbg(&ndev->ntb.pdev->dev, "vec %d vec_mask %llx\n", vec, vec_mask); in ndev_interrupt()
417 ndev->last_ts = jiffies; in ndev_interrupt()
419 if (vec_mask & ndev->db_link_mask) { in ndev_interrupt()
420 if (ndev->reg->poll_link(ndev)) in ndev_interrupt()
421 ntb_link_event(&ndev->ntb); in ndev_interrupt()
424 if (vec_mask & ndev->db_valid_mask) in ndev_interrupt()
425 ntb_db_event(&ndev->ntb, vec); in ndev_interrupt()
434 dev_dbg(&nvec->ndev->ntb.pdev->dev, "irq: %d nvec->num: %d\n", in ndev_vec_isr()
437 return ndev_interrupt(nvec->ndev, nvec->num); in ndev_vec_isr()
442 struct intel_ntb_dev *ndev = dev; in ndev_irq_isr() local
444 return ndev_interrupt(ndev, irq - ndev->ntb.pdev->irq); in ndev_irq_isr()
447 static int ndev_init_isr(struct intel_ntb_dev *ndev, in ndev_init_isr() argument
454 pdev = ndev->ntb.pdev; in ndev_init_isr()
459 ndev->db_mask = ndev->db_valid_mask; in ndev_init_isr()
460 ndev->reg->db_iowrite(ndev->db_mask, in ndev_init_isr()
461 ndev->self_mmio + in ndev_init_isr()
462 ndev->self_reg->db_mask); in ndev_init_isr()
466 ndev->vec = kzalloc_node(msix_max * sizeof(*ndev->vec), in ndev_init_isr()
468 if (!ndev->vec) in ndev_init_isr()
471 ndev->msix = kzalloc_node(msix_max * sizeof(*ndev->msix), in ndev_init_isr()
473 if (!ndev->msix) in ndev_init_isr()
477 ndev->msix[i].entry = i; in ndev_init_isr()
479 msix_count = pci_enable_msix_range(pdev, ndev->msix, in ndev_init_isr()
485 ndev->vec[i].ndev = ndev; in ndev_init_isr()
486 ndev->vec[i].num = i; in ndev_init_isr()
487 rc = request_irq(ndev->msix[i].vector, ndev_vec_isr, 0, in ndev_init_isr()
488 "ndev_vec_isr", &ndev->vec[i]); in ndev_init_isr()
494 ndev->db_vec_count = msix_count; in ndev_init_isr()
495 ndev->db_vec_shift = msix_shift; in ndev_init_isr()
500 free_irq(ndev->msix[i].vector, &ndev->vec[i]); in ndev_init_isr()
503 kfree(ndev->msix); in ndev_init_isr()
505 kfree(ndev->vec); in ndev_init_isr()
507 ndev->msix = NULL; in ndev_init_isr()
508 ndev->vec = NULL; in ndev_init_isr()
517 "ndev_irq_isr", ndev); in ndev_init_isr()
522 ndev->db_vec_count = 1; in ndev_init_isr()
523 ndev->db_vec_shift = total_shift; in ndev_init_isr()
535 "ndev_irq_isr", ndev); in ndev_init_isr()
540 ndev->db_vec_count = 1; in ndev_init_isr()
541 ndev->db_vec_shift = total_shift; in ndev_init_isr()
548 static void ndev_deinit_isr(struct intel_ntb_dev *ndev) in ndev_deinit_isr() argument
553 pdev = ndev->ntb.pdev; in ndev_deinit_isr()
556 ndev->db_mask = ndev->db_valid_mask; in ndev_deinit_isr()
557 ndev->reg->db_iowrite(ndev->db_mask, in ndev_deinit_isr()
558 ndev->self_mmio + in ndev_deinit_isr()
559 ndev->self_reg->db_mask); in ndev_deinit_isr()
561 if (ndev->msix) { in ndev_deinit_isr()
562 i = ndev->db_vec_count; in ndev_deinit_isr()
564 free_irq(ndev->msix[i].vector, &ndev->vec[i]); in ndev_deinit_isr()
566 kfree(ndev->msix); in ndev_deinit_isr()
567 kfree(ndev->vec); in ndev_deinit_isr()
569 free_irq(pdev->irq, ndev); in ndev_deinit_isr()
578 struct intel_ntb_dev *ndev; in ndev_ntb3_debugfs_read() local
585 ndev = filp->private_data; in ndev_ntb3_debugfs_read()
586 mmio = ndev->self_mmio; in ndev_ntb3_debugfs_read()
601 ntb_topo_string(ndev->ntb.topo)); in ndev_ntb3_debugfs_read()
604 "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl); in ndev_ntb3_debugfs_read()
606 "LNK STA -\t\t%#06x\n", ndev->lnk_sta); in ndev_ntb3_debugfs_read()
608 if (!ndev->reg->link_is_up(ndev)) in ndev_ntb3_debugfs_read()
616 NTB_LNK_STA_SPEED(ndev->lnk_sta)); in ndev_ntb3_debugfs_read()
619 NTB_LNK_STA_WIDTH(ndev->lnk_sta)); in ndev_ntb3_debugfs_read()
623 "Memory Window Count -\t%u\n", ndev->mw_count); in ndev_ntb3_debugfs_read()
625 "Scratchpad Count -\t%u\n", ndev->spad_count); in ndev_ntb3_debugfs_read()
627 "Doorbell Count -\t%u\n", ndev->db_count); in ndev_ntb3_debugfs_read()
629 "Doorbell Vector Count -\t%u\n", ndev->db_vec_count); in ndev_ntb3_debugfs_read()
631 "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift); in ndev_ntb3_debugfs_read()
634 "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask); in ndev_ntb3_debugfs_read()
636 "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask); in ndev_ntb3_debugfs_read()
638 "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask); in ndev_ntb3_debugfs_read()
640 u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask); in ndev_ntb3_debugfs_read()
644 u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_bell); in ndev_ntb3_debugfs_read()
667 if (ntb_topo_is_b2b(ndev->ntb.topo)) { in ndev_ntb3_debugfs_read()
713 if (!pci_read_config_word(ndev->ntb.pdev, in ndev_ntb3_debugfs_read()
718 if (!pci_read_config_word(ndev->ntb.pdev, in ndev_ntb3_debugfs_read()
723 if (!pci_read_config_dword(ndev->ntb.pdev, in ndev_ntb3_debugfs_read()
728 if (!pci_read_config_dword(ndev->ntb.pdev, in ndev_ntb3_debugfs_read()
741 struct intel_ntb_dev *ndev; in ndev_ntb_debugfs_read() local
749 ndev = filp->private_data; in ndev_ntb_debugfs_read()
750 pdev = ndev->ntb.pdev; in ndev_ntb_debugfs_read()
751 mmio = ndev->self_mmio; in ndev_ntb_debugfs_read()
766 ntb_topo_string(ndev->ntb.topo)); in ndev_ntb_debugfs_read()
768 if (ndev->b2b_idx != UINT_MAX) { in ndev_ntb_debugfs_read()
770 "B2B MW Idx -\t\t%u\n", ndev->b2b_idx); in ndev_ntb_debugfs_read()
772 "B2B Offset -\t\t%#lx\n", ndev->b2b_off); in ndev_ntb_debugfs_read()
777 ndev->bar4_split ? "yes" : "no"); in ndev_ntb_debugfs_read()
780 "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl); in ndev_ntb_debugfs_read()
782 "LNK STA -\t\t%#06x\n", ndev->lnk_sta); in ndev_ntb_debugfs_read()
784 if (!ndev->reg->link_is_up(ndev)) { in ndev_ntb_debugfs_read()
792 NTB_LNK_STA_SPEED(ndev->lnk_sta)); in ndev_ntb_debugfs_read()
795 NTB_LNK_STA_WIDTH(ndev->lnk_sta)); in ndev_ntb_debugfs_read()
799 "Memory Window Count -\t%u\n", ndev->mw_count); in ndev_ntb_debugfs_read()
801 "Scratchpad Count -\t%u\n", ndev->spad_count); in ndev_ntb_debugfs_read()
803 "Doorbell Count -\t%u\n", ndev->db_count); in ndev_ntb_debugfs_read()
805 "Doorbell Vector Count -\t%u\n", ndev->db_vec_count); in ndev_ntb_debugfs_read()
807 "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift); in ndev_ntb_debugfs_read()
810 "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask); in ndev_ntb_debugfs_read()
812 "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask); in ndev_ntb_debugfs_read()
814 "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask); in ndev_ntb_debugfs_read()
816 u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask); in ndev_ntb_debugfs_read()
820 u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_bell); in ndev_ntb_debugfs_read()
830 if (!ndev->bar4_split) { in ndev_ntb_debugfs_read()
846 if (!ndev->bar4_split) { in ndev_ntb_debugfs_read()
862 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 2)); in ndev_ntb_debugfs_read()
866 if (ndev->bar4_split) { in ndev_ntb_debugfs_read()
867 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 4)); in ndev_ntb_debugfs_read()
871 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 5)); in ndev_ntb_debugfs_read()
875 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 4)); in ndev_ntb_debugfs_read()
880 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 2)); in ndev_ntb_debugfs_read()
884 if (ndev->bar4_split) { in ndev_ntb_debugfs_read()
885 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 4)); in ndev_ntb_debugfs_read()
888 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 5)); in ndev_ntb_debugfs_read()
892 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 4)); in ndev_ntb_debugfs_read()
898 if (ntb_topo_is_b2b(ndev->ntb.topo)) { in ndev_ntb_debugfs_read()
906 if (ndev->bar4_split) { in ndev_ntb_debugfs_read()
926 if (ndev->bar4_split) { in ndev_ntb_debugfs_read()
953 if (ndev->bar4_split) { in ndev_ntb_debugfs_read()
1007 struct intel_ntb_dev *ndev = filp->private_data; in ndev_debugfs_read() local
1009 if (pdev_is_xeon(ndev->ntb.pdev) || in ndev_debugfs_read()
1010 pdev_is_atom(ndev->ntb.pdev)) in ndev_debugfs_read()
1012 else if (pdev_is_skx_xeon(ndev->ntb.pdev)) in ndev_debugfs_read()
1018 static void ndev_init_debugfs(struct intel_ntb_dev *ndev) in ndev_init_debugfs() argument
1021 ndev->debugfs_dir = NULL; in ndev_init_debugfs()
1022 ndev->debugfs_info = NULL; in ndev_init_debugfs()
1024 ndev->debugfs_dir = in ndev_init_debugfs()
1025 debugfs_create_dir(pci_name(ndev->ntb.pdev), in ndev_init_debugfs()
1027 if (!ndev->debugfs_dir) in ndev_init_debugfs()
1028 ndev->debugfs_info = NULL; in ndev_init_debugfs()
1030 ndev->debugfs_info = in ndev_init_debugfs()
1032 ndev->debugfs_dir, ndev, in ndev_init_debugfs()
1037 static void ndev_deinit_debugfs(struct intel_ntb_dev *ndev) in ndev_deinit_debugfs() argument
1039 debugfs_remove_recursive(ndev->debugfs_dir); in ndev_deinit_debugfs()
1055 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb_mw_get_align() local
1062 if (idx >= ndev->b2b_idx && !ndev->b2b_off) in intel_ntb_mw_get_align()
1065 bar = ndev_mw_to_bar(ndev, idx); in intel_ntb_mw_get_align()
1069 bar_size = pci_resource_len(ndev->ntb.pdev, bar); in intel_ntb_mw_get_align()
1071 if (idx == ndev->b2b_idx) in intel_ntb_mw_get_align()
1072 mw_size = bar_size - ndev->b2b_off; in intel_ntb_mw_get_align()
1077 *addr_align = pci_resource_len(ndev->ntb.pdev, bar); in intel_ntb_mw_get_align()
1091 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb_mw_set_trans() local
1101 if (idx >= ndev->b2b_idx && !ndev->b2b_off) in intel_ntb_mw_set_trans()
1104 bar = ndev_mw_to_bar(ndev, idx); in intel_ntb_mw_set_trans()
1108 bar_size = pci_resource_len(ndev->ntb.pdev, bar); in intel_ntb_mw_set_trans()
1110 if (idx == ndev->b2b_idx) in intel_ntb_mw_set_trans()
1111 mw_size = bar_size - ndev->b2b_off; in intel_ntb_mw_set_trans()
1123 mmio = ndev->self_mmio; in intel_ntb_mw_set_trans()
1124 base_reg = bar0_off(ndev->xlat_reg->bar0_base, bar); in intel_ntb_mw_set_trans()
1125 xlat_reg = bar2_off(ndev->xlat_reg->bar2_xlat, bar); in intel_ntb_mw_set_trans()
1126 limit_reg = bar2_off(ndev->xlat_reg->bar2_limit, bar); in intel_ntb_mw_set_trans()
1128 if (bar < 4 || !ndev->bar4_split) { in intel_ntb_mw_set_trans()
1193 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb_link_is_up() local
1195 if (ndev->reg->link_is_up(ndev)) { in intel_ntb_link_is_up()
1197 *speed = NTB_LNK_STA_SPEED(ndev->lnk_sta); in intel_ntb_link_is_up()
1199 *width = NTB_LNK_STA_WIDTH(ndev->lnk_sta); in intel_ntb_link_is_up()
1216 struct intel_ntb_dev *ndev; in intel_ntb_link_enable() local
1219 ndev = container_of(ntb, struct intel_ntb_dev, ntb); in intel_ntb_link_enable()
1221 if (ndev->ntb.topo == NTB_TOPO_SEC) in intel_ntb_link_enable()
1232 ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl); in intel_ntb_link_enable()
1236 if (ndev->bar4_split) in intel_ntb_link_enable()
1238 iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl); in intel_ntb_link_enable()
1245 struct intel_ntb_dev *ndev; in intel_ntb_link_disable() local
1248 ndev = container_of(ntb, struct intel_ntb_dev, ntb); in intel_ntb_link_disable()
1250 if (ndev->ntb.topo == NTB_TOPO_SEC) in intel_ntb_link_disable()
1256 ntb_cntl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl); in intel_ntb_link_disable()
1259 if (ndev->bar4_split) in intel_ntb_link_disable()
1262 iowrite32(ntb_cntl, ndev->self_mmio + ndev->reg->ntb_ctl); in intel_ntb_link_disable()
1276 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb_peer_mw_get_addr() local
1279 if (idx >= ndev->b2b_idx && !ndev->b2b_off) in intel_ntb_peer_mw_get_addr()
1282 bar = ndev_mw_to_bar(ndev, idx); in intel_ntb_peer_mw_get_addr()
1287 *base = pci_resource_start(ndev->ntb.pdev, bar) + in intel_ntb_peer_mw_get_addr()
1288 (idx == ndev->b2b_idx ? ndev->b2b_off : 0); in intel_ntb_peer_mw_get_addr()
1291 *size = pci_resource_len(ndev->ntb.pdev, bar) - in intel_ntb_peer_mw_get_addr()
1292 (idx == ndev->b2b_idx ? ndev->b2b_off : 0); in intel_ntb_peer_mw_get_addr()
1309 struct intel_ntb_dev *ndev; in intel_ntb_db_vector_count() local
1311 ndev = container_of(ntb, struct intel_ntb_dev, ntb); in intel_ntb_db_vector_count()
1313 return ndev->db_vec_count; in intel_ntb_db_vector_count()
1318 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb_db_vector_mask() local
1320 if (db_vector < 0 || db_vector > ndev->db_vec_count) in intel_ntb_db_vector_mask()
1323 return ndev->db_valid_mask & ndev_vec_mask(ndev, db_vector); in intel_ntb_db_vector_mask()
1328 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb_db_read() local
1330 return ndev_db_read(ndev, in intel_ntb_db_read()
1331 ndev->self_mmio + in intel_ntb_db_read()
1332 ndev->self_reg->db_bell); in intel_ntb_db_read()
1337 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb_db_clear() local
1339 return ndev_db_write(ndev, db_bits, in intel_ntb_db_clear()
1340 ndev->self_mmio + in intel_ntb_db_clear()
1341 ndev->self_reg->db_bell); in intel_ntb_db_clear()
1346 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb_db_set_mask() local
1348 return ndev_db_set_mask(ndev, db_bits, in intel_ntb_db_set_mask()
1349 ndev->self_mmio + in intel_ntb_db_set_mask()
1350 ndev->self_reg->db_mask); in intel_ntb_db_set_mask()
1355 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb_db_clear_mask() local
1357 return ndev_db_clear_mask(ndev, db_bits, in intel_ntb_db_clear_mask()
1358 ndev->self_mmio + in intel_ntb_db_clear_mask()
1359 ndev->self_reg->db_mask); in intel_ntb_db_clear_mask()
1366 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb_peer_db_addr() local
1368 return ndev_db_addr(ndev, db_addr, db_size, ndev->peer_addr, in intel_ntb_peer_db_addr()
1369 ndev->peer_reg->db_bell); in intel_ntb_peer_db_addr()
1374 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb_peer_db_set() local
1376 return ndev_db_write(ndev, db_bits, in intel_ntb_peer_db_set()
1377 ndev->peer_mmio + in intel_ntb_peer_db_set()
1378 ndev->peer_reg->db_bell); in intel_ntb_peer_db_set()
1388 struct intel_ntb_dev *ndev; in intel_ntb_spad_count() local
1390 ndev = container_of(ntb, struct intel_ntb_dev, ntb); in intel_ntb_spad_count()
1392 return ndev->spad_count; in intel_ntb_spad_count()
1397 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb_spad_read() local
1399 return ndev_spad_read(ndev, idx, in intel_ntb_spad_read()
1400 ndev->self_mmio + in intel_ntb_spad_read()
1401 ndev->self_reg->spad); in intel_ntb_spad_read()
1407 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb_spad_write() local
1409 return ndev_spad_write(ndev, idx, val, in intel_ntb_spad_write()
1410 ndev->self_mmio + in intel_ntb_spad_write()
1411 ndev->self_reg->spad); in intel_ntb_spad_write()
1417 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb_peer_spad_addr() local
1419 return ndev_spad_addr(ndev, sidx, spad_addr, ndev->peer_addr, in intel_ntb_peer_spad_addr()
1420 ndev->peer_reg->spad); in intel_ntb_peer_spad_addr()
1425 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb_peer_spad_read() local
1427 return ndev_spad_read(ndev, sidx, in intel_ntb_peer_spad_read()
1428 ndev->peer_mmio + in intel_ntb_peer_spad_read()
1429 ndev->peer_reg->spad); in intel_ntb_peer_spad_read()
1435 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb_peer_spad_write() local
1437 return ndev_spad_write(ndev, sidx, val, in intel_ntb_peer_spad_write()
1438 ndev->peer_mmio + in intel_ntb_peer_spad_write()
1439 ndev->peer_reg->spad); in intel_ntb_peer_spad_write()
1454 static int atom_poll_link(struct intel_ntb_dev *ndev) in atom_poll_link() argument
1458 ntb_ctl = ioread32(ndev->self_mmio + ATOM_NTBCNTL_OFFSET); in atom_poll_link()
1460 if (ntb_ctl == ndev->ntb_ctl) in atom_poll_link()
1463 ndev->ntb_ctl = ntb_ctl; in atom_poll_link()
1465 ndev->lnk_sta = ioread32(ndev->self_mmio + ATOM_LINK_STATUS_OFFSET); in atom_poll_link()
1470 static int atom_link_is_up(struct intel_ntb_dev *ndev) in atom_link_is_up() argument
1472 return ATOM_NTB_CTL_ACTIVE(ndev->ntb_ctl); in atom_link_is_up()
1475 static int atom_link_is_err(struct intel_ntb_dev *ndev) in atom_link_is_err() argument
1477 if (ioread32(ndev->self_mmio + ATOM_LTSSMSTATEJMP_OFFSET) in atom_link_is_err()
1481 if (ioread32(ndev->self_mmio + ATOM_IBSTERRRCRVSTS0_OFFSET) in atom_link_is_err()
1488 static inline enum ntb_topo atom_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd) in atom_ppd_topo() argument
1490 struct device *dev = &ndev->ntb.pdev->dev; in atom_ppd_topo()
1515 struct intel_ntb_dev *ndev = hb_ndev(work); in atom_link_hb() local
1516 struct device *dev = &ndev->ntb.pdev->dev; in atom_link_hb()
1521 poll_ts = ndev->last_ts + ATOM_LINK_HB_TIMEOUT; in atom_link_hb()
1526 if (time_after(poll_ts, jiffies) && atom_link_is_up(ndev)) { in atom_link_hb()
1527 schedule_delayed_work(&ndev->hb_timer, poll_ts - jiffies); in atom_link_hb()
1531 if (atom_poll_link(ndev)) in atom_link_hb()
1532 ntb_link_event(&ndev->ntb); in atom_link_hb()
1534 if (atom_link_is_up(ndev) || !atom_link_is_err(ndev)) { in atom_link_hb()
1535 schedule_delayed_work(&ndev->hb_timer, ATOM_LINK_HB_TIMEOUT); in atom_link_hb()
1541 mmio = ndev->self_mmio; in atom_link_hb()
1586 schedule_delayed_work(&ndev->hb_timer, ATOM_LINK_RECOVERY_TIME in atom_link_hb()
1590 static int atom_init_isr(struct intel_ntb_dev *ndev) in atom_init_isr() argument
1594 rc = ndev_init_isr(ndev, 1, ATOM_DB_MSIX_VECTOR_COUNT, in atom_init_isr()
1600 ndev->last_ts = jiffies; in atom_init_isr()
1601 INIT_DELAYED_WORK(&ndev->hb_timer, atom_link_hb); in atom_init_isr()
1602 schedule_delayed_work(&ndev->hb_timer, ATOM_LINK_HB_TIMEOUT); in atom_init_isr()
1607 static void atom_deinit_isr(struct intel_ntb_dev *ndev) in atom_deinit_isr() argument
1609 cancel_delayed_work_sync(&ndev->hb_timer); in atom_deinit_isr()
1610 ndev_deinit_isr(ndev); in atom_deinit_isr()
1613 static int atom_init_ntb(struct intel_ntb_dev *ndev) in atom_init_ntb() argument
1615 ndev->mw_count = ATOM_MW_COUNT; in atom_init_ntb()
1616 ndev->spad_count = ATOM_SPAD_COUNT; in atom_init_ntb()
1617 ndev->db_count = ATOM_DB_COUNT; in atom_init_ntb()
1619 switch (ndev->ntb.topo) { in atom_init_ntb()
1622 ndev->self_reg = &atom_pri_reg; in atom_init_ntb()
1623 ndev->peer_reg = &atom_b2b_reg; in atom_init_ntb()
1624 ndev->xlat_reg = &atom_sec_xlat; in atom_init_ntb()
1628 ndev->self_mmio + ATOM_SPCICMD_OFFSET); in atom_init_ntb()
1636 ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1; in atom_init_ntb()
1641 static int atom_init_dev(struct intel_ntb_dev *ndev) in atom_init_dev() argument
1646 rc = pci_read_config_dword(ndev->ntb.pdev, ATOM_PPD_OFFSET, &ppd); in atom_init_dev()
1650 ndev->ntb.topo = atom_ppd_topo(ndev, ppd); in atom_init_dev()
1651 if (ndev->ntb.topo == NTB_TOPO_NONE) in atom_init_dev()
1654 rc = atom_init_ntb(ndev); in atom_init_dev()
1658 rc = atom_init_isr(ndev); in atom_init_dev()
1662 if (ndev->ntb.topo != NTB_TOPO_SEC) { in atom_init_dev()
1664 rc = pci_write_config_dword(ndev->ntb.pdev, ATOM_PPD_OFFSET, in atom_init_dev()
1673 static void atom_deinit_dev(struct intel_ntb_dev *ndev) in atom_deinit_dev() argument
1675 atom_deinit_isr(ndev); in atom_deinit_dev()
1680 static int skx_poll_link(struct intel_ntb_dev *ndev) in skx_poll_link() argument
1685 ndev->reg->db_iowrite(ndev->db_link_mask, in skx_poll_link()
1686 ndev->self_mmio + in skx_poll_link()
1687 ndev->self_reg->db_clear); in skx_poll_link()
1689 rc = pci_read_config_word(ndev->ntb.pdev, in skx_poll_link()
1694 if (reg_val == ndev->lnk_sta) in skx_poll_link()
1697 ndev->lnk_sta = reg_val; in skx_poll_link()
1712 static int skx_init_isr(struct intel_ntb_dev *ndev) in skx_init_isr() argument
1724 iowrite8(i, ndev->self_mmio + SKX_INTVEC_OFFSET + i); in skx_init_isr()
1727 if (ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) { in skx_init_isr()
1729 ndev->self_mmio + SKX_INTVEC_OFFSET + in skx_init_isr()
1733 return ndev_init_isr(ndev, SKX_DB_MSIX_VECTOR_COUNT, in skx_init_isr()
1739 static int skx_setup_b2b_mw(struct intel_ntb_dev *ndev, in skx_setup_b2b_mw() argument
1750 pdev = ndev->ntb.pdev; in skx_setup_b2b_mw()
1751 mmio = ndev->self_mmio; in skx_setup_b2b_mw()
1753 if (ndev->b2b_idx == UINT_MAX) { in skx_setup_b2b_mw()
1756 ndev->b2b_off = 0; in skx_setup_b2b_mw()
1758 b2b_bar = ndev_mw_to_bar(ndev, ndev->b2b_idx); in skx_setup_b2b_mw()
1764 bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar); in skx_setup_b2b_mw()
1770 ndev->b2b_off = bar_size >> 1; in skx_setup_b2b_mw()
1773 ndev->b2b_off = 0; in skx_setup_b2b_mw()
1774 --ndev->mw_count; in skx_setup_b2b_mw()
1788 if (ndev->b2b_off) in skx_setup_b2b_mw()
1801 if (ndev->b2b_off) in skx_setup_b2b_mw()
1822 bar_addr = addr->bar2_addr64 + (b2b_bar == 1 ? ndev->b2b_off : 0); in skx_setup_b2b_mw()
1827 bar_addr = addr->bar4_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0); in skx_setup_b2b_mw()
1836 ndev->peer_mmio = ndev->self_mmio; in skx_setup_b2b_mw()
1841 static int skx_init_ntb(struct intel_ntb_dev *ndev) in skx_init_ntb() argument
1846 ndev->mw_count = XEON_MW_COUNT; in skx_init_ntb()
1847 ndev->spad_count = SKX_SPAD_COUNT; in skx_init_ntb()
1848 ndev->db_count = SKX_DB_COUNT; in skx_init_ntb()
1849 ndev->db_link_mask = SKX_DB_LINK_BIT; in skx_init_ntb()
1852 if (ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) in skx_init_ntb()
1853 ndev->db_link_mask |= BIT_ULL(31); in skx_init_ntb()
1855 switch (ndev->ntb.topo) { in skx_init_ntb()
1858 ndev->self_reg = &skx_pri_reg; in skx_init_ntb()
1859 ndev->peer_reg = &skx_b2b_reg; in skx_init_ntb()
1860 ndev->xlat_reg = &skx_sec_xlat; in skx_init_ntb()
1862 if (ndev->ntb.topo == NTB_TOPO_B2B_USD) { in skx_init_ntb()
1863 rc = skx_setup_b2b_mw(ndev, in skx_init_ntb()
1867 rc = skx_setup_b2b_mw(ndev, in skx_init_ntb()
1877 ndev->self_mmio + SKX_SPCICMD_OFFSET); in skx_init_ntb()
1885 ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1; in skx_init_ntb()
1887 ndev->reg->db_iowrite(ndev->db_valid_mask, in skx_init_ntb()
1888 ndev->self_mmio + in skx_init_ntb()
1889 ndev->self_reg->db_mask); in skx_init_ntb()
1894 static int skx_init_dev(struct intel_ntb_dev *ndev) in skx_init_dev() argument
1900 pdev = ndev->ntb.pdev; in skx_init_dev()
1902 ndev->reg = &skx_reg; in skx_init_dev()
1908 ndev->ntb.topo = xeon_ppd_topo(ndev, ppd); in skx_init_dev()
1910 ntb_topo_string(ndev->ntb.topo)); in skx_init_dev()
1911 if (ndev->ntb.topo == NTB_TOPO_NONE) in skx_init_dev()
1915 ndev->hwerr_flags |= NTB_HWERR_MSIX_VECTOR32_BAD; in skx_init_dev()
1917 rc = skx_init_ntb(ndev); in skx_init_dev()
1921 return skx_init_isr(ndev); in skx_init_dev()
1928 struct intel_ntb_dev *ndev; in intel_ntb3_link_enable() local
1931 ndev = container_of(ntb, struct intel_ntb_dev, ntb); in intel_ntb3_link_enable()
1942 ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl); in intel_ntb3_link_enable()
1946 iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl); in intel_ntb3_link_enable()
1953 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb3_mw_set_trans() local
1963 if (idx >= ndev->b2b_idx && !ndev->b2b_off) in intel_ntb3_mw_set_trans()
1966 bar = ndev_mw_to_bar(ndev, idx); in intel_ntb3_mw_set_trans()
1970 bar_size = pci_resource_len(ndev->ntb.pdev, bar); in intel_ntb3_mw_set_trans()
1972 if (idx == ndev->b2b_idx) in intel_ntb3_mw_set_trans()
1973 mw_size = bar_size - ndev->b2b_off; in intel_ntb3_mw_set_trans()
1985 mmio = ndev->self_mmio; in intel_ntb3_mw_set_trans()
1986 xlat_reg = ndev->xlat_reg->bar2_xlat + (idx * 0x10); in intel_ntb3_mw_set_trans()
1987 limit_reg = ndev->xlat_reg->bar2_limit + (idx * 0x10); in intel_ntb3_mw_set_trans()
1988 base = pci_resource_start(ndev->ntb.pdev, bar); in intel_ntb3_mw_set_trans()
2018 limit_reg = ndev->xlat_reg->bar2_limit + (idx * 0x10) + 0x4000; in intel_ntb3_mw_set_trans()
2043 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb3_peer_db_set() local
2046 if (db_bits & ~ndev->db_valid_mask) in intel_ntb3_peer_db_set()
2051 iowrite32(1, ndev->peer_mmio + in intel_ntb3_peer_db_set()
2052 ndev->peer_reg->db_bell + (bit * 4)); in intel_ntb3_peer_db_set()
2061 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb3_db_read() local
2063 return ndev_db_read(ndev, in intel_ntb3_db_read()
2064 ndev->self_mmio + in intel_ntb3_db_read()
2065 ndev->self_reg->db_clear); in intel_ntb3_db_read()
2070 struct intel_ntb_dev *ndev = ntb_ndev(ntb); in intel_ntb3_db_clear() local
2072 return ndev_db_write(ndev, db_bits, in intel_ntb3_db_clear()
2073 ndev->self_mmio + in intel_ntb3_db_clear()
2074 ndev->self_reg->db_clear); in intel_ntb3_db_clear()
2089 static int xeon_poll_link(struct intel_ntb_dev *ndev) in xeon_poll_link() argument
2094 ndev->reg->db_iowrite(ndev->db_link_mask, in xeon_poll_link()
2095 ndev->self_mmio + in xeon_poll_link()
2096 ndev->self_reg->db_bell); in xeon_poll_link()
2098 rc = pci_read_config_word(ndev->ntb.pdev, in xeon_poll_link()
2103 if (reg_val == ndev->lnk_sta) in xeon_poll_link()
2106 ndev->lnk_sta = reg_val; in xeon_poll_link()
2111 static int xeon_link_is_up(struct intel_ntb_dev *ndev) in xeon_link_is_up() argument
2113 if (ndev->ntb.topo == NTB_TOPO_SEC) in xeon_link_is_up()
2116 return NTB_LNK_STA_ACTIVE(ndev->lnk_sta); in xeon_link_is_up()
2119 static inline enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd) in xeon_ppd_topo() argument
2140 static inline int xeon_ppd_bar4_split(struct intel_ntb_dev *ndev, u8 ppd) in xeon_ppd_bar4_split() argument
2143 dev_dbg(&ndev->ntb.pdev->dev, "PPD %d split bar\n", ppd); in xeon_ppd_bar4_split()
2149 static int xeon_init_isr(struct intel_ntb_dev *ndev) in xeon_init_isr() argument
2151 return ndev_init_isr(ndev, XEON_DB_MSIX_VECTOR_COUNT, in xeon_init_isr()
2157 static void xeon_deinit_isr(struct intel_ntb_dev *ndev) in xeon_deinit_isr() argument
2159 ndev_deinit_isr(ndev); in xeon_deinit_isr()
2162 static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev, in xeon_setup_b2b_mw() argument
2173 pdev = ndev->ntb.pdev; in xeon_setup_b2b_mw()
2174 mmio = ndev->self_mmio; in xeon_setup_b2b_mw()
2176 if (ndev->b2b_idx == UINT_MAX) { in xeon_setup_b2b_mw()
2179 ndev->b2b_off = 0; in xeon_setup_b2b_mw()
2181 b2b_bar = ndev_mw_to_bar(ndev, ndev->b2b_idx); in xeon_setup_b2b_mw()
2187 bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar); in xeon_setup_b2b_mw()
2193 ndev->b2b_off = bar_size >> 1; in xeon_setup_b2b_mw()
2196 ndev->b2b_off = 0; in xeon_setup_b2b_mw()
2197 --ndev->mw_count; in xeon_setup_b2b_mw()
2213 if (ndev->b2b_off) in xeon_setup_b2b_mw()
2222 if (!ndev->bar4_split) { in xeon_setup_b2b_mw()
2226 if (ndev->b2b_off) in xeon_setup_b2b_mw()
2238 if (ndev->b2b_off) in xeon_setup_b2b_mw()
2250 if (ndev->b2b_off) in xeon_setup_b2b_mw()
2265 else if (b2b_bar == 4 && !ndev->bar4_split) in xeon_setup_b2b_mw()
2282 bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0); in xeon_setup_b2b_mw()
2287 if (!ndev->bar4_split) { in xeon_setup_b2b_mw()
2289 (b2b_bar == 4 ? ndev->b2b_off : 0); in xeon_setup_b2b_mw()
2295 (b2b_bar == 4 ? ndev->b2b_off : 0); in xeon_setup_b2b_mw()
2301 (b2b_bar == 5 ? ndev->b2b_off : 0); in xeon_setup_b2b_mw()
2309 bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0); in xeon_setup_b2b_mw()
2314 if (!ndev->bar4_split) { in xeon_setup_b2b_mw()
2316 (b2b_bar == 4 ? ndev->b2b_off : 0); in xeon_setup_b2b_mw()
2322 (b2b_bar == 4 ? ndev->b2b_off : 0); in xeon_setup_b2b_mw()
2328 (b2b_bar == 5 ? ndev->b2b_off : 0); in xeon_setup_b2b_mw()
2337 if (!ndev->bar4_split) { in xeon_setup_b2b_mw()
2346 if (!ndev->bar4_split) { in xeon_setup_b2b_mw()
2359 if (!ndev->bar4_split) { in xeon_setup_b2b_mw()
2381 else if (b2b_bar == 4 && !ndev->bar4_split) in xeon_setup_b2b_mw()
2397 ndev->peer_mmio = pci_iomap(pdev, b2b_bar, in xeon_setup_b2b_mw()
2399 if (!ndev->peer_mmio) in xeon_setup_b2b_mw()
2402 ndev->peer_addr = pci_resource_start(pdev, b2b_bar); in xeon_setup_b2b_mw()
2408 static int xeon_init_ntb(struct intel_ntb_dev *ndev) in xeon_init_ntb() argument
2410 struct device *dev = &ndev->ntb.pdev->dev; in xeon_init_ntb()
2414 if (ndev->bar4_split) in xeon_init_ntb()
2415 ndev->mw_count = HSX_SPLIT_BAR_MW_COUNT; in xeon_init_ntb()
2417 ndev->mw_count = XEON_MW_COUNT; in xeon_init_ntb()
2419 ndev->spad_count = XEON_SPAD_COUNT; in xeon_init_ntb()
2420 ndev->db_count = XEON_DB_COUNT; in xeon_init_ntb()
2421 ndev->db_link_mask = XEON_DB_LINK_BIT; in xeon_init_ntb()
2423 switch (ndev->ntb.topo) { in xeon_init_ntb()
2425 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) { in xeon_init_ntb()
2431 ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl); in xeon_init_ntb()
2433 iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl); in xeon_init_ntb()
2436 ndev->spad_count >>= 1; in xeon_init_ntb()
2437 ndev->self_reg = &xeon_pri_reg; in xeon_init_ntb()
2438 ndev->peer_reg = &xeon_sec_reg; in xeon_init_ntb()
2439 ndev->xlat_reg = &xeon_sec_xlat; in xeon_init_ntb()
2443 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) { in xeon_init_ntb()
2448 ndev->spad_count >>= 1; in xeon_init_ntb()
2449 ndev->self_reg = &xeon_sec_reg; in xeon_init_ntb()
2450 ndev->peer_reg = &xeon_pri_reg; in xeon_init_ntb()
2451 ndev->xlat_reg = &xeon_pri_xlat; in xeon_init_ntb()
2456 ndev->self_reg = &xeon_pri_reg; in xeon_init_ntb()
2457 ndev->peer_reg = &xeon_b2b_reg; in xeon_init_ntb()
2458 ndev->xlat_reg = &xeon_sec_xlat; in xeon_init_ntb()
2460 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) { in xeon_init_ntb()
2461 ndev->peer_reg = &xeon_pri_reg; in xeon_init_ntb()
2464 ndev->b2b_idx = b2b_mw_idx + ndev->mw_count; in xeon_init_ntb()
2466 ndev->b2b_idx = b2b_mw_idx; in xeon_init_ntb()
2468 if (ndev->b2b_idx >= ndev->mw_count) { in xeon_init_ntb()
2471 b2b_mw_idx, ndev->mw_count); in xeon_init_ntb()
2476 b2b_mw_idx, ndev->b2b_idx); in xeon_init_ntb()
2478 } else if (ndev->hwerr_flags & NTB_HWERR_B2BDOORBELL_BIT14) { in xeon_init_ntb()
2480 ndev->db_count -= 1; in xeon_init_ntb()
2483 if (ndev->ntb.topo == NTB_TOPO_B2B_USD) { in xeon_init_ntb()
2484 rc = xeon_setup_b2b_mw(ndev, in xeon_init_ntb()
2488 rc = xeon_setup_b2b_mw(ndev, in xeon_init_ntb()
2497 ndev->self_mmio + XEON_SPCICMD_OFFSET); in xeon_init_ntb()
2505 ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1; in xeon_init_ntb()
2507 ndev->reg->db_iowrite(ndev->db_valid_mask, in xeon_init_ntb()
2508 ndev->self_mmio + in xeon_init_ntb()
2509 ndev->self_reg->db_mask); in xeon_init_ntb()
2514 static int xeon_init_dev(struct intel_ntb_dev *ndev) in xeon_init_dev() argument
2520 pdev = ndev->ntb.pdev; in xeon_init_dev()
2544 ndev->hwerr_flags |= NTB_HWERR_SDOORBELL_LOCKUP; in xeon_init_dev()
2561 ndev->hwerr_flags |= NTB_HWERR_SB01BASE_LOCKUP; in xeon_init_dev()
2585 ndev->hwerr_flags |= NTB_HWERR_B2BDOORBELL_BIT14; in xeon_init_dev()
2589 ndev->reg = &xeon_reg; in xeon_init_dev()
2595 ndev->ntb.topo = xeon_ppd_topo(ndev, ppd); in xeon_init_dev()
2597 ntb_topo_string(ndev->ntb.topo)); in xeon_init_dev()
2598 if (ndev->ntb.topo == NTB_TOPO_NONE) in xeon_init_dev()
2601 if (ndev->ntb.topo != NTB_TOPO_SEC) { in xeon_init_dev()
2602 ndev->bar4_split = xeon_ppd_bar4_split(ndev, ppd); in xeon_init_dev()
2604 ppd, ndev->bar4_split); in xeon_init_dev()
2611 ndev->bar4_split = hweight32(mem) == in xeon_init_dev()
2614 mem, ndev->bar4_split); in xeon_init_dev()
2617 rc = xeon_init_ntb(ndev); in xeon_init_dev()
2621 return xeon_init_isr(ndev); in xeon_init_dev()
2624 static void xeon_deinit_dev(struct intel_ntb_dev *ndev) in xeon_deinit_dev() argument
2626 xeon_deinit_isr(ndev); in xeon_deinit_dev()
2629 static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev) in intel_ntb_init_pci() argument
2633 pci_set_drvdata(pdev, ndev); in intel_ntb_init_pci()
2661 ndev->self_mmio = pci_iomap(pdev, 0, 0); in intel_ntb_init_pci()
2662 if (!ndev->self_mmio) { in intel_ntb_init_pci()
2666 ndev->peer_mmio = ndev->self_mmio; in intel_ntb_init_pci()
2667 ndev->peer_addr = pci_resource_start(pdev, 0); in intel_ntb_init_pci()
2682 static void intel_ntb_deinit_pci(struct intel_ntb_dev *ndev) in intel_ntb_deinit_pci() argument
2684 struct pci_dev *pdev = ndev->ntb.pdev; in intel_ntb_deinit_pci()
2686 if (ndev->peer_mmio && ndev->peer_mmio != ndev->self_mmio) in intel_ntb_deinit_pci()
2687 pci_iounmap(pdev, ndev->peer_mmio); in intel_ntb_deinit_pci()
2688 pci_iounmap(pdev, ndev->self_mmio); in intel_ntb_deinit_pci()
2696 static inline void ndev_init_struct(struct intel_ntb_dev *ndev, in ndev_init_struct() argument
2699 ndev->ntb.pdev = pdev; in ndev_init_struct()
2700 ndev->ntb.topo = NTB_TOPO_NONE; in ndev_init_struct()
2701 ndev->ntb.ops = &intel_ntb_ops; in ndev_init_struct()
2703 ndev->b2b_off = 0; in ndev_init_struct()
2704 ndev->b2b_idx = UINT_MAX; in ndev_init_struct()
2706 ndev->bar4_split = 0; in ndev_init_struct()
2708 ndev->mw_count = 0; in ndev_init_struct()
2709 ndev->spad_count = 0; in ndev_init_struct()
2710 ndev->db_count = 0; in ndev_init_struct()
2711 ndev->db_vec_count = 0; in ndev_init_struct()
2712 ndev->db_vec_shift = 0; in ndev_init_struct()
2714 ndev->ntb_ctl = 0; in ndev_init_struct()
2715 ndev->lnk_sta = 0; in ndev_init_struct()
2717 ndev->db_valid_mask = 0; in ndev_init_struct()
2718 ndev->db_link_mask = 0; in ndev_init_struct()
2719 ndev->db_mask = 0; in ndev_init_struct()
2721 spin_lock_init(&ndev->db_mask_lock); in ndev_init_struct()
2727 struct intel_ntb_dev *ndev; in intel_ntb_pci_probe() local
2733 ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node); in intel_ntb_pci_probe()
2734 if (!ndev) { in intel_ntb_pci_probe()
2739 ndev_init_struct(ndev, pdev); in intel_ntb_pci_probe()
2741 rc = intel_ntb_init_pci(ndev, pdev); in intel_ntb_pci_probe()
2745 rc = atom_init_dev(ndev); in intel_ntb_pci_probe()
2750 ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node); in intel_ntb_pci_probe()
2751 if (!ndev) { in intel_ntb_pci_probe()
2756 ndev_init_struct(ndev, pdev); in intel_ntb_pci_probe()
2758 rc = intel_ntb_init_pci(ndev, pdev); in intel_ntb_pci_probe()
2762 rc = xeon_init_dev(ndev); in intel_ntb_pci_probe()
2767 ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node); in intel_ntb_pci_probe()
2768 if (!ndev) { in intel_ntb_pci_probe()
2773 ndev_init_struct(ndev, pdev); in intel_ntb_pci_probe()
2774 ndev->ntb.ops = &intel_ntb3_ops; in intel_ntb_pci_probe()
2776 rc = intel_ntb_init_pci(ndev, pdev); in intel_ntb_pci_probe()
2780 rc = skx_init_dev(ndev); in intel_ntb_pci_probe()
2789 ndev_reset_unsafe_flags(ndev); in intel_ntb_pci_probe()
2791 ndev->reg->poll_link(ndev); in intel_ntb_pci_probe()
2793 ndev_init_debugfs(ndev); in intel_ntb_pci_probe()
2795 rc = ntb_register_device(&ndev->ntb); in intel_ntb_pci_probe()
2804 ndev_deinit_debugfs(ndev); in intel_ntb_pci_probe()
2806 atom_deinit_dev(ndev); in intel_ntb_pci_probe()
2808 xeon_deinit_dev(ndev); in intel_ntb_pci_probe()
2810 intel_ntb_deinit_pci(ndev); in intel_ntb_pci_probe()
2812 kfree(ndev); in intel_ntb_pci_probe()
2819 struct intel_ntb_dev *ndev = pci_get_drvdata(pdev); in intel_ntb_pci_remove() local
2821 ntb_unregister_device(&ndev->ntb); in intel_ntb_pci_remove()
2822 ndev_deinit_debugfs(ndev); in intel_ntb_pci_remove()
2824 atom_deinit_dev(ndev); in intel_ntb_pci_remove()
2826 xeon_deinit_dev(ndev); in intel_ntb_pci_remove()
2827 intel_ntb_deinit_pci(ndev); in intel_ntb_pci_remove()
2828 kfree(ndev); in intel_ntb_pci_remove()