Lines Matching refs:dd
162 #define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \ argument
164 #define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \ argument
749 static inline void qib_write_kreg(const struct qib_devdata *dd,
758 static void qib_setup_dca(struct qib_devdata *dd);
759 static void setup_dca_notifier(struct qib_devdata *dd,
761 static void reset_dca_notifier(struct qib_devdata *dd,
775 static inline u32 qib_read_ureg32(const struct qib_devdata *dd, in qib_read_ureg32() argument
778 if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) in qib_read_ureg32()
781 (dd->ureg_align * ctxt) + (dd->userbase ? in qib_read_ureg32()
782 (char __iomem *)dd->userbase : in qib_read_ureg32()
783 (char __iomem *)dd->kregbase + dd->uregbase))); in qib_read_ureg32()
796 static inline u64 qib_read_ureg(const struct qib_devdata *dd, in qib_read_ureg() argument
800 if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) in qib_read_ureg()
803 (dd->ureg_align * ctxt) + (dd->userbase ? in qib_read_ureg()
804 (char __iomem *)dd->userbase : in qib_read_ureg()
805 (char __iomem *)dd->kregbase + dd->uregbase))); in qib_read_ureg()
817 static inline void qib_write_ureg(const struct qib_devdata *dd, in qib_write_ureg() argument
822 if (dd->userbase) in qib_write_ureg()
824 ((char __iomem *) dd->userbase + in qib_write_ureg()
825 dd->ureg_align * ctxt); in qib_write_ureg()
828 (dd->uregbase + in qib_write_ureg()
829 (char __iomem *) dd->kregbase + in qib_write_ureg()
830 dd->ureg_align * ctxt); in qib_write_ureg()
832 if (dd->kregbase && (dd->flags & QIB_PRESENT)) in qib_write_ureg()
836 static inline u32 qib_read_kreg32(const struct qib_devdata *dd, in qib_read_kreg32() argument
839 if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) in qib_read_kreg32()
841 return readl((u32 __iomem *) &dd->kregbase[regno]); in qib_read_kreg32()
844 static inline u64 qib_read_kreg64(const struct qib_devdata *dd, in qib_read_kreg64() argument
847 if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) in qib_read_kreg64()
849 return readq(&dd->kregbase[regno]); in qib_read_kreg64()
852 static inline void qib_write_kreg(const struct qib_devdata *dd, in qib_write_kreg() argument
855 if (dd->kregbase && (dd->flags & QIB_PRESENT)) in qib_write_kreg()
856 writeq(value, &dd->kregbase[regno]); in qib_write_kreg()
866 if (!ppd->cpspec->kpregbase || !(ppd->dd->flags & QIB_PRESENT)) in qib_read_kreg_port()
874 if (ppd->cpspec && ppd->dd && ppd->cpspec->kpregbase && in qib_write_kreg_port()
875 (ppd->dd->flags & QIB_PRESENT)) in qib_write_kreg_port()
886 static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd, in qib_write_kreg_ctxt() argument
890 qib_write_kreg(dd, regno + ctxt, value); in qib_write_kreg_ctxt()
893 static inline u64 read_7322_creg(const struct qib_devdata *dd, u16 regno) in read_7322_creg() argument
895 if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT)) in read_7322_creg()
897 return readq(&dd->cspec->cregbase[regno]); in read_7322_creg()
902 static inline u32 read_7322_creg32(const struct qib_devdata *dd, u16 regno) in read_7322_creg32() argument
904 if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT)) in read_7322_creg32()
906 return readl(&dd->cspec->cregbase[regno]); in read_7322_creg32()
915 (ppd->dd->flags & QIB_PRESENT)) in write_7322_creg_port()
923 !(ppd->dd->flags & QIB_PRESENT)) in read_7322_creg_port()
932 !(ppd->dd->flags & QIB_PRESENT)) in read_7322_creg32_port()
1334 struct qib_devdata *dd = ppd->dd; in qib_disarm_7322_senderrbufs() local
1337 u32 piobcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS; in qib_disarm_7322_senderrbufs()
1347 sbuf[i] = qib_read_kreg64(dd, kr_sendbuffererror + i); in qib_disarm_7322_senderrbufs()
1350 qib_write_kreg(dd, kr_sendbuffererror + i, sbuf[i]); in qib_disarm_7322_senderrbufs()
1355 qib_disarm_piobufs_set(dd, sbuf, piobcnt); in qib_disarm_7322_senderrbufs()
1412 struct qib_devdata *dd = ppd->dd; in flush_fifo() local
1443 if (dd->flags & QIB_PIO_FLUSH_WC) { in flush_fifo()
1451 qib_sendbuf_done(dd, bufn); in flush_fifo()
1459 struct qib_devdata *dd = ppd->dd; in qib_7322_sdma_sendctrl() local
1487 spin_lock(&dd->sendctrl_lock); in qib_7322_sdma_sendctrl()
1493 qib_write_kreg(dd, kr_scratch, 0); in qib_7322_sdma_sendctrl()
1505 qib_write_kreg(dd, kr_scratch, 0); in qib_7322_sdma_sendctrl()
1510 qib_write_kreg(dd, kr_scratch, 0); in qib_7322_sdma_sendctrl()
1513 spin_unlock(&dd->sendctrl_lock); in qib_7322_sdma_sendctrl()
1515 if ((op & QIB_SDMA_SENDCTRL_OP_DRAIN) && ppd->dd->cspec->r1) in qib_7322_sdma_sendctrl()
1583 struct qib_devdata *dd = ppd->dd; in sdma_7322_p_errors() local
1590 qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit, in sdma_7322_p_errors()
1597 qib_dev_porterr(dd, ppd->port, in sdma_7322_p_errors()
1644 static noinline void handle_7322_errors(struct qib_devdata *dd) in handle_7322_errors() argument
1653 errs = qib_read_kreg64(dd, kr_errstatus); in handle_7322_errors()
1655 qib_devinfo(dd->pcidev, in handle_7322_errors()
1661 errs &= dd->cspec->errormask; in handle_7322_errors()
1662 msg = dd->cspec->emsgbuf; in handle_7322_errors()
1667 qib_7322_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf)); in handle_7322_errors()
1670 if (errs & dd->eep_st_masks[log_idx].errs_to_log) in handle_7322_errors()
1671 qib_inc_eeprom_err(dd, log_idx, 1); in handle_7322_errors()
1674 qib_disarm_7322_senderrbufs(dd->pport); in handle_7322_errors()
1680 qib_disarm_7322_senderrbufs(dd->pport); in handle_7322_errors()
1682 qib_write_kreg(dd, kr_errclear, errs); in handle_7322_errors()
1692 err_decode(msg, sizeof(dd->cspec->emsgbuf), errs & ~mask, in handle_7322_errors()
1702 qib_dev_err(dd, in handle_7322_errors()
1704 dd->flags &= ~QIB_INITTED; /* needs re-init */ in handle_7322_errors()
1706 *dd->devstatusp |= QIB_STATUS_HWERROR; in handle_7322_errors()
1707 for (pidx = 0; pidx < dd->num_pports; ++pidx) in handle_7322_errors()
1708 if (dd->pport[pidx].link_speed_supported) in handle_7322_errors()
1709 *dd->pport[pidx].statusp &= ~QIB_STATUS_IB_CONF; in handle_7322_errors()
1713 qib_dev_err(dd, "%s error\n", msg); in handle_7322_errors()
1723 qib_handle_urcv(dd, ~0U); in handle_7322_errors()
1736 struct qib_devdata *dd = (struct qib_devdata *)data; in qib_error_tasklet() local
1738 handle_7322_errors(dd); in qib_error_tasklet()
1739 qib_write_kreg(dd, kr_errmask, dd->cspec->errormask); in qib_error_tasklet()
1801 if (!ppd->dd->cspec->r1) in handle_serdes_issues()
1810 if ((IS_QMH(ppd->dd) || IS_QME(ppd->dd)) && in handle_serdes_issues()
1822 if (!ppd->dd->cspec->r1 && in handle_serdes_issues()
1837 ppd->dd->cspec->r1 ? in handle_serdes_issues()
1842 ppd->dd->unit, ppd->port, ibclt); in handle_serdes_issues()
1858 struct qib_devdata *dd = ppd->dd; in handle_7322_p_errors() local
1861 fmask = qib_read_kreg64(dd, kr_act_fmask); in handle_7322_p_errors()
1867 qib_devinfo(dd->pcidev, in handle_7322_p_errors()
1884 qib_dev_porterr(dd, ppd->port, in handle_7322_p_errors()
2001 qib_dev_porterr(dd, ppd->port, "%s error\n", msg); in handle_7322_p_errors()
2010 static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable) in qib_7322_set_intr_state() argument
2013 if (dd->flags & QIB_BADINTR) in qib_7322_set_intr_state()
2015 qib_write_kreg(dd, kr_intmask, dd->cspec->int_enable_mask); in qib_7322_set_intr_state()
2017 qib_write_kreg(dd, kr_intclear, 0ULL); in qib_7322_set_intr_state()
2018 if (dd->cspec->num_msix_entries) { in qib_7322_set_intr_state()
2020 u64 val = qib_read_kreg64(dd, kr_intgranted); in qib_7322_set_intr_state()
2023 qib_write_kreg(dd, kr_intgranted, val); in qib_7322_set_intr_state()
2026 qib_write_kreg(dd, kr_intmask, 0ULL); in qib_7322_set_intr_state()
2044 static void qib_7322_clear_freeze(struct qib_devdata *dd) in qib_7322_clear_freeze() argument
2049 qib_write_kreg(dd, kr_errmask, 0ULL); in qib_7322_clear_freeze()
2051 for (pidx = 0; pidx < dd->num_pports; ++pidx) in qib_7322_clear_freeze()
2052 if (dd->pport[pidx].link_speed_supported) in qib_7322_clear_freeze()
2053 qib_write_kreg_port(dd->pport + pidx, krp_errmask, in qib_7322_clear_freeze()
2057 qib_7322_set_intr_state(dd, 0); in qib_7322_clear_freeze()
2060 qib_write_kreg(dd, kr_control, dd->control); in qib_7322_clear_freeze()
2061 qib_read_kreg32(dd, kr_scratch); in qib_7322_clear_freeze()
2069 qib_write_kreg(dd, kr_hwerrclear, 0ULL); in qib_7322_clear_freeze()
2070 qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE); in qib_7322_clear_freeze()
2071 qib_write_kreg(dd, kr_errmask, dd->cspec->errormask); in qib_7322_clear_freeze()
2073 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in qib_7322_clear_freeze()
2074 if (!dd->pport[pidx].link_speed_supported) in qib_7322_clear_freeze()
2076 qib_write_kreg_port(dd->pport + pidx, krp_errclear, ~0Ull); in qib_7322_clear_freeze()
2077 qib_write_kreg_port(dd->pport + pidx, krp_errmask, ~0Ull); in qib_7322_clear_freeze()
2079 qib_7322_set_intr_state(dd, 1); in qib_7322_clear_freeze()
2094 static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg, in qib_7322_handle_hwerrors() argument
2101 hwerrs = qib_read_kreg64(dd, kr_hwerrstatus); in qib_7322_handle_hwerrors()
2105 qib_dev_err(dd, in qib_7322_handle_hwerrors()
2112 qib_write_kreg(dd, kr_hwerrclear, hwerrs & in qib_7322_handle_hwerrors()
2115 hwerrs &= dd->cspec->hwerrmask; in qib_7322_handle_hwerrors()
2120 qib_devinfo(dd->pcidev, in qib_7322_handle_hwerrors()
2124 ctrl = qib_read_kreg32(dd, kr_control); in qib_7322_handle_hwerrors()
2125 if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) { in qib_7322_handle_hwerrors()
2130 dd->cspec->stay_in_freeze) { in qib_7322_handle_hwerrors()
2138 if (dd->flags & QIB_INITTED) in qib_7322_handle_hwerrors()
2141 qib_7322_clear_freeze(dd); in qib_7322_handle_hwerrors()
2150 dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed); in qib_7322_handle_hwerrors()
2151 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); in qib_7322_handle_hwerrors()
2158 qib_dev_err(dd, "%s hardware error\n", msg); in qib_7322_handle_hwerrors()
2166 struct qib_pportdata *ppd = dd->pport; in qib_7322_handle_hwerrors()
2168 for (; pidx < dd->num_pports; ++pidx, ppd++) { in qib_7322_handle_hwerrors()
2184 if (isfatal && !dd->diag_client) { in qib_7322_handle_hwerrors()
2185 qib_dev_err(dd, in qib_7322_handle_hwerrors()
2187 dd->serial); in qib_7322_handle_hwerrors()
2192 if (dd->freezemsg) in qib_7322_handle_hwerrors()
2193 snprintf(dd->freezemsg, dd->freezelen, in qib_7322_handle_hwerrors()
2195 qib_disable_after_error(dd); in qib_7322_handle_hwerrors()
2210 static void qib_7322_init_hwerrors(struct qib_devdata *dd) in qib_7322_init_hwerrors() argument
2215 extsval = qib_read_kreg64(dd, kr_extstatus); in qib_7322_init_hwerrors()
2218 qib_dev_err(dd, "MemBIST did not complete!\n"); in qib_7322_init_hwerrors()
2221 qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed)); in qib_7322_init_hwerrors()
2222 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); in qib_7322_init_hwerrors()
2225 qib_write_kreg(dd, kr_errclear, ~0ULL); in qib_7322_init_hwerrors()
2227 qib_write_kreg(dd, kr_errmask, ~0ULL); in qib_7322_init_hwerrors()
2228 dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask); in qib_7322_init_hwerrors()
2229 for (pidx = 0; pidx < dd->num_pports; ++pidx) in qib_7322_init_hwerrors()
2230 if (dd->pport[pidx].link_speed_supported) in qib_7322_init_hwerrors()
2231 qib_write_kreg_port(dd->pport + pidx, krp_errmask, in qib_7322_init_hwerrors()
2241 static void qib_set_7322_armlaunch(struct qib_devdata *dd, u32 enable) in qib_set_7322_armlaunch() argument
2244 qib_write_kreg(dd, kr_errclear, QIB_E_SPIOARMLAUNCH); in qib_set_7322_armlaunch()
2245 dd->cspec->errormask |= QIB_E_SPIOARMLAUNCH; in qib_set_7322_armlaunch()
2247 dd->cspec->errormask &= ~QIB_E_SPIOARMLAUNCH; in qib_set_7322_armlaunch()
2248 qib_write_kreg(dd, kr_errmask, dd->cspec->errormask); in qib_set_7322_armlaunch()
2260 struct qib_devdata *dd = ppd->dd; in qib_set_ib_7322_lstate() local
2298 qib_write_kreg(dd, kr_scratch, 0); in qib_set_ib_7322_lstate()
2311 #define NUM_RCV_BUF_UNITS(dd) ((64 * 1024) / (RCV_BUF_UNITSZ * dd->num_pports)) argument
2316 struct qib_devdata *dd = ppd->dd; in set_vls() local
2328 totcred = NUM_RCV_BUF_UNITS(dd); in set_vls()
2344 qib_write_kreg(dd, kr_scratch, 0ULL); in set_vls()
2357 qib_write_kreg(dd, kr_scratch, 0ULL); in set_vls()
2372 struct qib_devdata *dd = ppd->dd; in qib_7322_bringup_serdes() local
2385 qib_write_kreg(dd, kr_scratch, 0ULL); in qib_7322_bringup_serdes()
2477 if (dd->base_guid) in qib_7322_bringup_serdes()
2478 guid = be64_to_cpu(dd->base_guid) + ppd->port - 1; in qib_7322_bringup_serdes()
2484 qib_write_kreg(dd, kr_scratch, 0); in qib_7322_bringup_serdes()
2494 qib_write_kreg(dd, kr_scratch, 0ULL); in qib_7322_bringup_serdes()
2499 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags); in qib_7322_bringup_serdes()
2502 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); in qib_7322_bringup_serdes()
2530 if (ppd->dd->cspec->r1) in qib_7322_mini_quiet_serdes()
2553 struct qib_devdata *dd = ppd->dd; in qib_7322_mini_quiet_serdes() local
2557 diagc = qib_read_kreg64(dd, kr_hwdiagctrl); in qib_7322_mini_quiet_serdes()
2558 qib_write_kreg(dd, kr_hwdiagctrl, in qib_7322_mini_quiet_serdes()
2586 qib_write_kreg(dd, kr_hwdiagctrl, diagc); in qib_7322_mini_quiet_serdes()
2614 struct qib_devdata *dd = ppd->dd; in qib_setup_7322_setextled() local
2623 if (dd->diag_client) in qib_setup_7322_setextled()
2640 spin_lock_irqsave(&dd->cspec->gpio_lock, flags); in qib_setup_7322_setextled()
2641 extctl = dd->cspec->extctrl & (ppd->port == 1 ? in qib_setup_7322_setextled()
2655 dd->cspec->extctrl = extctl; in qib_setup_7322_setextled()
2656 qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl); in qib_setup_7322_setextled()
2657 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags); in qib_setup_7322_setextled()
2665 static int qib_7322_notify_dca(struct qib_devdata *dd, unsigned long event) in qib_7322_notify_dca() argument
2669 if (dd->flags & QIB_DCA_ENABLED) in qib_7322_notify_dca()
2671 if (!dca_add_requester(&dd->pcidev->dev)) { in qib_7322_notify_dca()
2672 qib_devinfo(dd->pcidev, "DCA enabled\n"); in qib_7322_notify_dca()
2673 dd->flags |= QIB_DCA_ENABLED; in qib_7322_notify_dca()
2674 qib_setup_dca(dd); in qib_7322_notify_dca()
2678 if (dd->flags & QIB_DCA_ENABLED) { in qib_7322_notify_dca()
2679 dca_remove_requester(&dd->pcidev->dev); in qib_7322_notify_dca()
2680 dd->flags &= ~QIB_DCA_ENABLED; in qib_7322_notify_dca()
2681 dd->cspec->dca_ctrl = 0; in qib_7322_notify_dca()
2682 qib_write_kreg(dd, KREG_IDX(DCACtrlA), in qib_7322_notify_dca()
2683 dd->cspec->dca_ctrl); in qib_7322_notify_dca()
2692 struct qib_devdata *dd = rcd->dd; in qib_update_rhdrq_dca() local
2693 struct qib_chip_specific *cspec = dd->cspec; in qib_update_rhdrq_dca()
2695 if (!(dd->flags & QIB_DCA_ENABLED)) in qib_update_rhdrq_dca()
2704 (u64) dca3_get_tag(&dd->pcidev->dev, cpu) << rmp->lsb; in qib_update_rhdrq_dca()
2705 qib_devinfo(dd->pcidev, in qib_update_rhdrq_dca()
2708 qib_write_kreg(dd, rmp->regno, in qib_update_rhdrq_dca()
2711 qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl); in qib_update_rhdrq_dca()
2717 struct qib_devdata *dd = ppd->dd; in qib_update_sdma_dca() local
2718 struct qib_chip_specific *cspec = dd->cspec; in qib_update_sdma_dca()
2721 if (!(dd->flags & QIB_DCA_ENABLED)) in qib_update_sdma_dca()
2729 (u64) dca3_get_tag(&dd->pcidev->dev, cpu) << in qib_update_sdma_dca()
2733 qib_devinfo(dd->pcidev, in qib_update_sdma_dca()
2736 qib_write_kreg(dd, KREG_IDX(DCACtrlF), in qib_update_sdma_dca()
2741 qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl); in qib_update_sdma_dca()
2745 static void qib_setup_dca(struct qib_devdata *dd) in qib_setup_dca() argument
2747 struct qib_chip_specific *cspec = dd->cspec; in qib_setup_dca()
2778 qib_write_kreg(dd, KREG_IDX(DCACtrlB) + i, in qib_setup_dca()
2781 setup_dca_notifier(dd, &cspec->msix_entries[i]); in qib_setup_dca()
2806 struct qib_devdata *dd; in qib_irq_notifier_release() local
2811 dd = rcd->dd; in qib_irq_notifier_release()
2815 dd = ppd->dd; in qib_irq_notifier_release()
2817 qib_devinfo(dd->pcidev, in qib_irq_notifier_release()
2828 static void qib_7322_nomsix(struct qib_devdata *dd) in qib_7322_nomsix() argument
2833 dd->cspec->main_int_mask = ~0ULL; in qib_7322_nomsix()
2834 n = dd->cspec->num_msix_entries; in qib_7322_nomsix()
2838 dd->cspec->num_msix_entries = 0; in qib_7322_nomsix()
2841 reset_dca_notifier(dd, &dd->cspec->msix_entries[i]); in qib_7322_nomsix()
2844 dd->cspec->msix_entries[i].irq, NULL); in qib_7322_nomsix()
2845 free_cpumask_var(dd->cspec->msix_entries[i].mask); in qib_7322_nomsix()
2846 free_irq(dd->cspec->msix_entries[i].irq, in qib_7322_nomsix()
2847 dd->cspec->msix_entries[i].arg); in qib_7322_nomsix()
2849 qib_nomsix(dd); in qib_7322_nomsix()
2852 intgranted = qib_read_kreg64(dd, kr_intgranted); in qib_7322_nomsix()
2854 qib_write_kreg(dd, kr_intgranted, intgranted); in qib_7322_nomsix()
2857 static void qib_7322_free_irq(struct qib_devdata *dd) in qib_7322_free_irq() argument
2859 if (dd->cspec->irq) { in qib_7322_free_irq()
2860 free_irq(dd->cspec->irq, dd); in qib_7322_free_irq()
2861 dd->cspec->irq = 0; in qib_7322_free_irq()
2863 qib_7322_nomsix(dd); in qib_7322_free_irq()
2866 static void qib_setup_7322_cleanup(struct qib_devdata *dd) in qib_setup_7322_cleanup() argument
2871 if (dd->flags & QIB_DCA_ENABLED) { in qib_setup_7322_cleanup()
2872 dca_remove_requester(&dd->pcidev->dev); in qib_setup_7322_cleanup()
2873 dd->flags &= ~QIB_DCA_ENABLED; in qib_setup_7322_cleanup()
2874 dd->cspec->dca_ctrl = 0; in qib_setup_7322_cleanup()
2875 qib_write_kreg(dd, KREG_IDX(DCACtrlA), dd->cspec->dca_ctrl); in qib_setup_7322_cleanup()
2879 qib_7322_free_irq(dd); in qib_setup_7322_cleanup()
2880 kfree(dd->cspec->cntrs); in qib_setup_7322_cleanup()
2881 kfree(dd->cspec->sendchkenable); in qib_setup_7322_cleanup()
2882 kfree(dd->cspec->sendgrhchk); in qib_setup_7322_cleanup()
2883 kfree(dd->cspec->sendibchk); in qib_setup_7322_cleanup()
2884 kfree(dd->cspec->msix_entries); in qib_setup_7322_cleanup()
2885 for (i = 0; i < dd->num_pports; i++) { in qib_setup_7322_cleanup()
2890 kfree(dd->pport[i].cpspec->portcntrs); in qib_setup_7322_cleanup()
2891 if (dd->flags & QIB_HAS_QSFP) { in qib_setup_7322_cleanup()
2892 spin_lock_irqsave(&dd->cspec->gpio_lock, flags); in qib_setup_7322_cleanup()
2893 dd->cspec->gpio_mask &= ~mask; in qib_setup_7322_cleanup()
2894 qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask); in qib_setup_7322_cleanup()
2895 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags); in qib_setup_7322_cleanup()
2901 static void sdma_7322_intr(struct qib_devdata *dd, u64 istat) in sdma_7322_intr() argument
2903 struct qib_pportdata *ppd0 = &dd->pport[0]; in sdma_7322_intr()
2904 struct qib_pportdata *ppd1 = &dd->pport[1]; in sdma_7322_intr()
2924 static void qib_wantpiobuf_7322_intr(struct qib_devdata *dd, u32 needint) in qib_wantpiobuf_7322_intr() argument
2928 spin_lock_irqsave(&dd->sendctrl_lock, flags); in qib_wantpiobuf_7322_intr()
2930 dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail); in qib_wantpiobuf_7322_intr()
2932 dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail); in qib_wantpiobuf_7322_intr()
2933 qib_write_kreg(dd, kr_sendctrl, dd->sendctrl); in qib_wantpiobuf_7322_intr()
2934 qib_write_kreg(dd, kr_scratch, 0ULL); in qib_wantpiobuf_7322_intr()
2935 spin_unlock_irqrestore(&dd->sendctrl_lock, flags); in qib_wantpiobuf_7322_intr()
2943 static noinline void unknown_7322_ibits(struct qib_devdata *dd, u64 istat) in unknown_7322_ibits() argument
2949 qib_dev_err(dd, in unknown_7322_ibits()
2952 qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills)); in unknown_7322_ibits()
2956 static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd) in unknown_7322_gpio_intr() argument
2969 gpiostatus = qib_read_kreg32(dd, kr_gpio_status); in unknown_7322_gpio_intr()
2977 qib_write_kreg(dd, kr_gpio_clear, gpiostatus); in unknown_7322_gpio_intr()
2982 for (pidx = 0; pidx < dd->num_pports && (dd->flags & QIB_HAS_QSFP); in unknown_7322_gpio_intr()
2988 if (!dd->pport[pidx].link_speed_supported) in unknown_7322_gpio_intr()
2991 ppd = dd->pport + pidx; in unknown_7322_gpio_intr()
2993 if (gpiostatus & dd->cspec->gpio_mask & mask) { in unknown_7322_gpio_intr()
2998 pins = qib_read_kreg64(dd, kr_extstatus); in unknown_7322_gpio_intr()
3009 const u32 mask = qib_read_kreg32(dd, kr_gpio_mask); in unknown_7322_gpio_intr()
3015 dd->cspec->gpio_mask &= ~gpio_irq; in unknown_7322_gpio_intr()
3016 qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask); in unknown_7322_gpio_intr()
3024 static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat) in unlikely_7322_intr() argument
3027 unknown_7322_ibits(dd, istat); in unlikely_7322_intr()
3029 unknown_7322_gpio_intr(dd); in unlikely_7322_intr()
3031 qib_write_kreg(dd, kr_errmask, 0ULL); in unlikely_7322_intr()
3032 tasklet_schedule(&dd->error_tasklet); in unlikely_7322_intr()
3034 if (istat & INT_MASK_P(Err, 0) && dd->rcd[0]) in unlikely_7322_intr()
3035 handle_7322_p_errors(dd->rcd[0]->ppd); in unlikely_7322_intr()
3036 if (istat & INT_MASK_P(Err, 1) && dd->rcd[1]) in unlikely_7322_intr()
3037 handle_7322_p_errors(dd->rcd[1]->ppd); in unlikely_7322_intr()
3046 struct qib_devdata *dd = rcd->dd; in adjust_rcv_timeout() local
3047 u32 timeout = dd->cspec->rcvavail_timeout[rcd->ctxt]; in adjust_rcv_timeout()
3060 dd->cspec->rcvavail_timeout[rcd->ctxt] = timeout; in adjust_rcv_timeout()
3061 qib_write_kreg(dd, kr_rcvavailtimeout + rcd->ctxt, timeout); in adjust_rcv_timeout()
3074 struct qib_devdata *dd = data; in qib_7322intr() local
3082 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) { in qib_7322intr()
3093 istat = qib_read_kreg64(dd, kr_intstatus); in qib_7322intr()
3096 qib_bad_intrstatus(dd); in qib_7322intr()
3097 qib_dev_err(dd, "Interrupt status all f's, skipping\n"); in qib_7322intr()
3103 istat &= dd->cspec->main_int_mask; in qib_7322intr()
3110 this_cpu_inc(*dd->int_counter); in qib_7322intr()
3116 unlikely_7322_intr(dd, istat); in qib_7322intr()
3124 qib_write_kreg(dd, kr_intclear, istat); in qib_7322intr()
3135 for (i = 0; i < dd->first_user_ctxt; i++) { in qib_7322intr()
3138 if (dd->rcd[i]) in qib_7322intr()
3139 qib_kreceive(dd->rcd[i], NULL, &npkts); in qib_7322intr()
3146 qib_handle_urcv(dd, ctxtrbits); in qib_7322intr()
3151 sdma_7322_intr(dd, istat); in qib_7322intr()
3153 if ((istat & QIB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED)) in qib_7322intr()
3154 qib_ib_piobufavail(dd); in qib_7322intr()
3167 struct qib_devdata *dd = rcd->dd; in qib_7322pintr() local
3170 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) in qib_7322pintr()
3179 this_cpu_inc(*dd->int_counter); in qib_7322pintr()
3182 qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) | in qib_7322pintr()
3195 struct qib_devdata *dd = data; in qib_7322bufavail() local
3197 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) in qib_7322bufavail()
3206 this_cpu_inc(*dd->int_counter); in qib_7322bufavail()
3209 qib_write_kreg(dd, kr_intclear, QIB_I_SPIOBUFAVAIL); in qib_7322bufavail()
3212 if (dd->flags & QIB_INITTED) in qib_7322bufavail()
3213 qib_ib_piobufavail(dd); in qib_7322bufavail()
3215 qib_wantpiobuf_7322_intr(dd, 0); in qib_7322bufavail()
3226 struct qib_devdata *dd = ppd->dd; in sdma_intr() local
3228 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) in sdma_intr()
3237 this_cpu_inc(*dd->int_counter); in sdma_intr()
3240 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? in sdma_intr()
3253 struct qib_devdata *dd = ppd->dd; in sdma_idle_intr() local
3255 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) in sdma_idle_intr()
3264 this_cpu_inc(*dd->int_counter); in sdma_idle_intr()
3267 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? in sdma_idle_intr()
3280 struct qib_devdata *dd = ppd->dd; in sdma_progress_intr() local
3282 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) in sdma_progress_intr()
3291 this_cpu_inc(*dd->int_counter); in sdma_progress_intr()
3294 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? in sdma_progress_intr()
3308 struct qib_devdata *dd = ppd->dd; in sdma_cleanup_intr() local
3310 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) in sdma_cleanup_intr()
3319 this_cpu_inc(*dd->int_counter); in sdma_cleanup_intr()
3322 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? in sdma_cleanup_intr()
3332 static void reset_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m) in reset_dca_notifier() argument
3336 qib_devinfo(dd->pcidev, in reset_dca_notifier()
3338 dd->unit, in reset_dca_notifier()
3346 static void setup_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m) in setup_dca_notifier() argument
3362 qib_devinfo(dd->pcidev, in setup_dca_notifier()
3385 static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend) in qib_setup_7322_interrupt() argument
3393 if (!dd->num_pports) in qib_setup_7322_interrupt()
3402 qib_7322_set_intr_state(dd, 0); in qib_setup_7322_interrupt()
3405 qib_7322_init_hwerrors(dd); in qib_setup_7322_interrupt()
3408 qib_write_kreg(dd, kr_intclear, ~0ULL); in qib_setup_7322_interrupt()
3411 qib_write_kreg(dd, kr_intgranted, ~0ULL); in qib_setup_7322_interrupt()
3412 qib_write_kreg(dd, kr_vecclr_wo_int, ~0ULL); in qib_setup_7322_interrupt()
3415 if (!dd->cspec->num_msix_entries) { in qib_setup_7322_interrupt()
3418 if (!dd->pcidev->irq) { in qib_setup_7322_interrupt()
3419 qib_dev_err(dd, in qib_setup_7322_interrupt()
3423 ret = request_irq(dd->pcidev->irq, qib_7322intr, in qib_setup_7322_interrupt()
3424 IRQF_SHARED, QIB_DRV_NAME, dd); in qib_setup_7322_interrupt()
3426 qib_dev_err(dd, in qib_setup_7322_interrupt()
3428 dd->pcidev->irq, ret); in qib_setup_7322_interrupt()
3431 dd->cspec->irq = dd->pcidev->irq; in qib_setup_7322_interrupt()
3432 dd->cspec->main_int_mask = ~0ULL; in qib_setup_7322_interrupt()
3440 local_mask = cpumask_of_pcibus(dd->pcidev->bus); in qib_setup_7322_interrupt()
3453 for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) { in qib_setup_7322_interrupt()
3462 dd->cspec->msix_entries[msixnum]. in qib_setup_7322_interrupt()
3463 name[sizeof(dd->cspec->msix_entries[msixnum].name) - 1] in qib_setup_7322_interrupt()
3468 if (irq_table[i].port > dd->num_pports) in qib_setup_7322_interrupt()
3470 arg = dd->pport + irq_table[i].port - 1; in qib_setup_7322_interrupt()
3472 arg = dd; in qib_setup_7322_interrupt()
3478 snprintf(dd->cspec->msix_entries[msixnum].name, in qib_setup_7322_interrupt()
3479 sizeof(dd->cspec->msix_entries[msixnum].name) in qib_setup_7322_interrupt()
3481 QIB_DRV_NAME "%d%s", dd->unit, in qib_setup_7322_interrupt()
3488 arg = dd->rcd[ctxt]; in qib_setup_7322_interrupt()
3498 snprintf(dd->cspec->msix_entries[msixnum].name, in qib_setup_7322_interrupt()
3499 sizeof(dd->cspec->msix_entries[msixnum].name) in qib_setup_7322_interrupt()
3501 QIB_DRV_NAME "%d (kctx)", dd->unit); in qib_setup_7322_interrupt()
3504 dd->cspec->msix_entries[msixnum].irq = pci_irq_vector( in qib_setup_7322_interrupt()
3505 dd->pcidev, msixnum); in qib_setup_7322_interrupt()
3506 if (dd->cspec->msix_entries[msixnum].irq < 0) { in qib_setup_7322_interrupt()
3507 qib_dev_err(dd, in qib_setup_7322_interrupt()
3510 dd->cspec->msix_entries[msixnum].irq); in qib_setup_7322_interrupt()
3511 qib_7322_nomsix(dd); in qib_setup_7322_interrupt()
3514 ret = request_irq(dd->cspec->msix_entries[msixnum].irq, in qib_setup_7322_interrupt()
3516 dd->cspec->msix_entries[msixnum].name, in qib_setup_7322_interrupt()
3523 qib_dev_err(dd, in qib_setup_7322_interrupt()
3526 dd->cspec->msix_entries[msixnum].irq, in qib_setup_7322_interrupt()
3528 qib_7322_nomsix(dd); in qib_setup_7322_interrupt()
3531 dd->cspec->msix_entries[msixnum].arg = arg; in qib_setup_7322_interrupt()
3533 dd->cspec->msix_entries[msixnum].dca = dca; in qib_setup_7322_interrupt()
3534 dd->cspec->msix_entries[msixnum].rcv = in qib_setup_7322_interrupt()
3544 val = qib_read_kreg64(dd, 2 * msixnum + 1 + in qib_setup_7322_interrupt()
3548 &dd->cspec->msix_entries[msixnum].mask, in qib_setup_7322_interrupt()
3552 dd->cspec->msix_entries[msixnum].mask); in qib_setup_7322_interrupt()
3559 dd->cspec->msix_entries[msixnum].mask); in qib_setup_7322_interrupt()
3562 dd->cspec->msix_entries[msixnum].irq, in qib_setup_7322_interrupt()
3563 dd->cspec->msix_entries[msixnum].mask); in qib_setup_7322_interrupt()
3569 qib_write_kreg(dd, kr_intredirect + i, redirect[i]); in qib_setup_7322_interrupt()
3570 dd->cspec->main_int_mask = mask; in qib_setup_7322_interrupt()
3571 tasklet_init(&dd->error_tasklet, qib_error_tasklet, in qib_setup_7322_interrupt()
3572 (unsigned long)dd); in qib_setup_7322_interrupt()
3582 static unsigned qib_7322_boardname(struct qib_devdata *dd) in qib_7322_boardname() argument
3588 boardid = SYM_FIELD(dd->revision, Revision, BoardID); in qib_7322_boardname()
3592 dd->boardname = "InfiniPath_QLE7342_Emulation"; in qib_7322_boardname()
3595 dd->boardname = "InfiniPath_QLE7340"; in qib_7322_boardname()
3596 dd->flags |= QIB_HAS_QSFP; in qib_7322_boardname()
3600 dd->boardname = "InfiniPath_QLE7342"; in qib_7322_boardname()
3601 dd->flags |= QIB_HAS_QSFP; in qib_7322_boardname()
3604 dd->boardname = "InfiniPath_QMI7342"; in qib_7322_boardname()
3607 dd->boardname = "InfiniPath_Unsupported7342"; in qib_7322_boardname()
3608 qib_dev_err(dd, "Unsupported version of QMH7342\n"); in qib_7322_boardname()
3612 dd->boardname = "InfiniPath_QMH7342"; in qib_7322_boardname()
3616 dd->boardname = "InfiniPath_QME7342"; in qib_7322_boardname()
3619 dd->boardname = "InfiniPath_QME7362"; in qib_7322_boardname()
3620 dd->flags |= QIB_HAS_QSFP; in qib_7322_boardname()
3623 dd->boardname = "Intel IB QDR 1P FLR-QSFP Adptr"; in qib_7322_boardname()
3624 dd->flags |= QIB_HAS_QSFP; in qib_7322_boardname()
3627 dd->boardname = "InfiniPath_QLE7342_TEST"; in qib_7322_boardname()
3628 dd->flags |= QIB_HAS_QSFP; in qib_7322_boardname()
3631 dd->boardname = "InfiniPath_QLE73xy_UNKNOWN"; in qib_7322_boardname()
3632 qib_dev_err(dd, "Unknown 7322 board type %u\n", boardid); in qib_7322_boardname()
3635 dd->board_atten = 1; /* index into txdds_Xdr */ in qib_7322_boardname()
3637 snprintf(dd->boardversion, sizeof(dd->boardversion), in qib_7322_boardname()
3639 QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname, in qib_7322_boardname()
3640 (unsigned int)SYM_FIELD(dd->revision, Revision_R, Arch), in qib_7322_boardname()
3641 dd->majrev, dd->minrev, in qib_7322_boardname()
3642 (unsigned int)SYM_FIELD(dd->revision, Revision_R, SW)); in qib_7322_boardname()
3645 qib_devinfo(dd->pcidev, in qib_7322_boardname()
3647 dd->unit); in qib_7322_boardname()
3658 static int qib_do_7322_reset(struct qib_devdata *dd) in qib_do_7322_reset() argument
3668 qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit); in qib_do_7322_reset()
3670 qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz); in qib_do_7322_reset()
3672 msix_entries = dd->cspec->num_msix_entries; in qib_do_7322_reset()
3675 qib_7322_set_intr_state(dd, 0); in qib_do_7322_reset()
3678 qib_7322_nomsix(dd); in qib_do_7322_reset()
3680 msix_vecsave = kmalloc(2 * dd->cspec->num_msix_entries * in qib_do_7322_reset()
3694 vecaddr = qib_read_kreg64(dd, 2 * i + in qib_do_7322_reset()
3696 vecdata = qib_read_kreg64(dd, 1 + 2 * i + in qib_do_7322_reset()
3705 dd->pport->cpspec->ibdeltainprog = 0; in qib_do_7322_reset()
3706 dd->pport->cpspec->ibsymdelta = 0; in qib_do_7322_reset()
3707 dd->pport->cpspec->iblnkerrdelta = 0; in qib_do_7322_reset()
3708 dd->pport->cpspec->ibmalfdelta = 0; in qib_do_7322_reset()
3710 dd->z_int_counter = qib_int_counter(dd); in qib_do_7322_reset()
3717 dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR); in qib_do_7322_reset()
3718 dd->flags |= QIB_DOING_RESET; in qib_do_7322_reset()
3719 val = dd->control | QLOGIC_IB_C_RESET; in qib_do_7322_reset()
3720 writeq(val, &dd->kregbase[kr_control]); in qib_do_7322_reset()
3730 qib_pcie_reenable(dd, cmdval, int_line, clinesz); in qib_do_7322_reset()
3736 val = readq(&dd->kregbase[kr_revision]); in qib_do_7322_reset()
3737 if (val == dd->revision) in qib_do_7322_reset()
3740 qib_dev_err(dd, in qib_do_7322_reset()
3747 dd->flags |= QIB_PRESENT; /* it's back */ in qib_do_7322_reset()
3754 qib_write_kreg(dd, 2 * i + in qib_do_7322_reset()
3757 qib_write_kreg(dd, 1 + 2 * i + in qib_do_7322_reset()
3764 for (i = 0; i < dd->num_pports; ++i) in qib_do_7322_reset()
3765 write_7322_init_portregs(&dd->pport[i]); in qib_do_7322_reset()
3766 write_7322_initregs(dd); in qib_do_7322_reset()
3768 if (qib_pcie_params(dd, dd->lbus_width, in qib_do_7322_reset()
3769 &dd->cspec->num_msix_entries)) in qib_do_7322_reset()
3770 qib_dev_err(dd, in qib_do_7322_reset()
3773 qib_setup_7322_interrupt(dd, 1); in qib_do_7322_reset()
3775 for (i = 0; i < dd->num_pports; ++i) { in qib_do_7322_reset()
3776 struct qib_pportdata *ppd = &dd->pport[i]; in qib_do_7322_reset()
3785 dd->flags &= ~QIB_DOING_RESET; /* OK or not, no longer resetting */ in qib_do_7322_reset()
3797 static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr, in qib_7322_put_tid() argument
3800 if (!(dd->flags & QIB_PRESENT)) in qib_7322_put_tid()
3802 if (pa != dd->tidinvalid) { in qib_7322_put_tid()
3807 qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n", in qib_7322_put_tid()
3812 qib_dev_err(dd, in qib_7322_put_tid()
3819 chippa |= dd->tidtemplate; in qib_7322_put_tid()
3836 static void qib_7322_clear_tids(struct qib_devdata *dd, in qib_7322_clear_tids() argument
3844 if (!dd->kregbase || !rcd) in qib_7322_clear_tids()
3849 tidinv = dd->tidinvalid; in qib_7322_clear_tids()
3851 ((char __iomem *) dd->kregbase + in qib_7322_clear_tids()
3852 dd->rcvtidbase + in qib_7322_clear_tids()
3853 ctxt * dd->rcvtidcnt * sizeof(*tidbase)); in qib_7322_clear_tids()
3855 for (i = 0; i < dd->rcvtidcnt; i++) in qib_7322_clear_tids()
3856 qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED, in qib_7322_clear_tids()
3860 ((char __iomem *) dd->kregbase + in qib_7322_clear_tids()
3861 dd->rcvegrbase + in qib_7322_clear_tids()
3865 qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER, in qib_7322_clear_tids()
3875 static void qib_7322_tidtemplate(struct qib_devdata *dd) in qib_7322_tidtemplate() argument
3886 if (dd->rcvegrbufsize == 2048) in qib_7322_tidtemplate()
3887 dd->tidtemplate = IBA7322_TID_SZ_2K; in qib_7322_tidtemplate()
3888 else if (dd->rcvegrbufsize == 4096) in qib_7322_tidtemplate()
3889 dd->tidtemplate = IBA7322_TID_SZ_4K; in qib_7322_tidtemplate()
3890 dd->tidinvalid = 0; in qib_7322_tidtemplate()
3908 if (rcd->dd->cspec->r1) in qib_7322_get_base_info()
3910 if (rcd->dd->flags & QIB_USE_SPCL_TRIG) in qib_7322_get_base_info()
3917 qib_7322_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr) in qib_7322_get_msgheader() argument
3922 (rhf_addr - dd->rhf_offset + offset); in qib_7322_get_msgheader()
3928 static void qib_7322_config_ctxts(struct qib_devdata *dd) in qib_7322_config_ctxts() argument
3933 nchipctxts = qib_read_kreg32(dd, kr_contextcnt); in qib_7322_config_ctxts()
3934 dd->cspec->numctxts = nchipctxts; in qib_7322_config_ctxts()
3935 if (qib_n_krcv_queues > 1 && dd->num_pports) { in qib_7322_config_ctxts()
3936 dd->first_user_ctxt = NUM_IB_PORTS + in qib_7322_config_ctxts()
3937 (qib_n_krcv_queues - 1) * dd->num_pports; in qib_7322_config_ctxts()
3938 if (dd->first_user_ctxt > nchipctxts) in qib_7322_config_ctxts()
3939 dd->first_user_ctxt = nchipctxts; in qib_7322_config_ctxts()
3940 dd->n_krcv_queues = dd->first_user_ctxt / dd->num_pports; in qib_7322_config_ctxts()
3942 dd->first_user_ctxt = NUM_IB_PORTS; in qib_7322_config_ctxts()
3943 dd->n_krcv_queues = 1; in qib_7322_config_ctxts()
3947 int nctxts = dd->first_user_ctxt + num_online_cpus(); in qib_7322_config_ctxts()
3950 dd->ctxtcnt = 6; in qib_7322_config_ctxts()
3952 dd->ctxtcnt = 10; in qib_7322_config_ctxts()
3954 dd->ctxtcnt = nchipctxts; in qib_7322_config_ctxts()
3955 } else if (qib_cfgctxts < dd->num_pports) in qib_7322_config_ctxts()
3956 dd->ctxtcnt = dd->num_pports; in qib_7322_config_ctxts()
3958 dd->ctxtcnt = qib_cfgctxts; in qib_7322_config_ctxts()
3959 if (!dd->ctxtcnt) /* none of the above, set to max */ in qib_7322_config_ctxts()
3960 dd->ctxtcnt = nchipctxts; in qib_7322_config_ctxts()
3967 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags); in qib_7322_config_ctxts()
3968 if (dd->ctxtcnt > 10) in qib_7322_config_ctxts()
3969 dd->rcvctrl |= 2ULL << SYM_LSB(RcvCtrl, ContextCfg); in qib_7322_config_ctxts()
3970 else if (dd->ctxtcnt > 6) in qib_7322_config_ctxts()
3971 dd->rcvctrl |= 1ULL << SYM_LSB(RcvCtrl, ContextCfg); in qib_7322_config_ctxts()
3975 dd->rcvctrl |= 5ULL << SYM_LSB(RcvCtrl, XrcTypeCode); in qib_7322_config_ctxts()
3981 qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl); in qib_7322_config_ctxts()
3982 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); in qib_7322_config_ctxts()
3985 dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt); in qib_7322_config_ctxts()
3987 dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt); in qib_7322_config_ctxts()
3989 dd->rcvhdrcnt = 2 * max(dd->cspec->rcvegrcnt, in qib_7322_config_ctxts()
3990 dd->num_pports > 1 ? 1024U : 2048U); in qib_7322_config_ctxts()
4098 struct qib_devdata *dd = ppd->dd; in qib_7322_set_ib_cfg() local
4183 qib_write_kreg(dd, kr_scratch, 0ULL); in qib_7322_set_ib_cfg()
4197 qib_write_kreg(dd, kr_scratch, 0ULL); in qib_7322_set_ib_cfg()
4217 qib_write_kreg(dd, kr_scratch, 0ULL); in qib_7322_set_ib_cfg()
4234 qib_write_kreg(dd, kr_scratch, 0ULL); in qib_7322_set_ib_cfg()
4273 qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16); in qib_7322_set_ib_cfg()
4304 qib_dev_err(dd, "bad linkinitcmd req 0x%x\n", in qib_7322_set_ib_cfg()
4333 if (ppd->dd->cspec->r1) { in qib_7322_set_ib_cfg()
4346 qib_write_kreg(dd, kr_scratch, 0); in qib_7322_set_ib_cfg()
4361 qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n", in qib_7322_set_loopback()
4362 ppd->dd->unit, ppd->port); in qib_7322_set_loopback()
4368 qib_devinfo(ppd->dd->pcidev, in qib_7322_set_loopback()
4370 ppd->dd->unit, ppd->port); in qib_7322_set_loopback()
4381 qib_write_kreg(ppd->dd, kr_scratch, 0); in qib_7322_set_loopback()
4416 struct qib_devdata *dd = ppd->dd; in set_vl_weights() local
4419 spin_lock_irqsave(&dd->sendctrl_lock, flags); in set_vl_weights()
4422 qib_write_kreg(dd, kr_scratch, 0); in set_vl_weights()
4423 spin_unlock_irqrestore(&dd->sendctrl_lock, flags); in set_vl_weights()
4471 qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt); in qib_update_7322_usrhead()
4473 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); in qib_update_7322_usrhead()
4474 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); in qib_update_7322_usrhead()
4482 head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt); in qib_7322_hdrqempty()
4486 tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt); in qib_7322_hdrqempty()
4516 struct qib_devdata *dd = ppd->dd; in rcvctrl_7322_mod() local
4521 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags); in rcvctrl_7322_mod()
4524 dd->rcvctrl |= SYM_MASK(RcvCtrl, TidFlowEnable); in rcvctrl_7322_mod()
4526 dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TidFlowEnable); in rcvctrl_7322_mod()
4528 dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd); in rcvctrl_7322_mod()
4530 dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TailUpd); in rcvctrl_7322_mod()
4536 mask = (1ULL << dd->ctxtcnt) - 1; in rcvctrl_7322_mod()
4540 rcd = dd->rcd[ctxt]; in rcvctrl_7322_mod()
4545 if (!(dd->flags & QIB_NODMA_RTAIL)) { in rcvctrl_7322_mod()
4547 dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd); in rcvctrl_7322_mod()
4550 qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, in rcvctrl_7322_mod()
4552 qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, in rcvctrl_7322_mod()
4560 dd->rcvctrl |= mask << SYM_LSB(RcvCtrl, dontDropRHQFull); in rcvctrl_7322_mod()
4562 dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, dontDropRHQFull)); in rcvctrl_7322_mod()
4564 dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, IntrAvail)); in rcvctrl_7322_mod()
4566 dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, IntrAvail)); in rcvctrl_7322_mod()
4573 qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl); in rcvctrl_7322_mod()
4576 if ((op & QIB_RCVCTRL_CTXT_ENB) && dd->rcd[ctxt]) { in rcvctrl_7322_mod()
4583 val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt); in rcvctrl_7322_mod()
4584 qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt); in rcvctrl_7322_mod()
4587 (void) qib_read_kreg32(dd, kr_scratch); in rcvctrl_7322_mod()
4588 val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt); in rcvctrl_7322_mod()
4589 dd->rcd[ctxt]->head = val; in rcvctrl_7322_mod()
4591 if (ctxt < dd->first_user_ctxt) in rcvctrl_7322_mod()
4592 val |= dd->rhdrhead_intr_off; in rcvctrl_7322_mod()
4593 qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt); in rcvctrl_7322_mod()
4595 dd->rcd[ctxt] && dd->rhdrhead_intr_off) { in rcvctrl_7322_mod()
4597 val = dd->rcd[ctxt]->head | dd->rhdrhead_intr_off; in rcvctrl_7322_mod()
4598 qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt); in rcvctrl_7322_mod()
4605 qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, 0); in rcvctrl_7322_mod()
4606 qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, 0); in rcvctrl_7322_mod()
4608 qib_write_ureg(dd, ur_rcvflowtable + f, in rcvctrl_7322_mod()
4613 for (i = 0; i < dd->cfgctxts; i++) { in rcvctrl_7322_mod()
4614 qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, in rcvctrl_7322_mod()
4616 qib_write_kreg_ctxt(dd, krc_rcvhdraddr, i, 0); in rcvctrl_7322_mod()
4618 qib_write_ureg(dd, ur_rcvflowtable + f, in rcvctrl_7322_mod()
4623 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); in rcvctrl_7322_mod()
4655 struct qib_devdata *dd = ppd->dd; in sendctrl_7322_mod() local
4659 spin_lock_irqsave(&dd->sendctrl_lock, flags); in sendctrl_7322_mod()
4663 dd->sendctrl = 0; in sendctrl_7322_mod()
4665 dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd); in sendctrl_7322_mod()
4667 dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd); in sendctrl_7322_mod()
4668 if (dd->flags & QIB_USE_SPCL_TRIG) in sendctrl_7322_mod()
4669 dd->sendctrl |= SYM_MASK(SendCtrl, SpecialTriggerEn); in sendctrl_7322_mod()
4681 tmp_dd_sendctrl = dd->sendctrl; in sendctrl_7322_mod()
4682 last = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS; in sendctrl_7322_mod()
4689 qib_write_kreg(dd, kr_sendctrl, in sendctrl_7322_mod()
4692 qib_write_kreg(dd, kr_scratch, 0); in sendctrl_7322_mod()
4708 qib_write_kreg(dd, kr_scratch, 0); in sendctrl_7322_mod()
4711 tmp_dd_sendctrl = dd->sendctrl; in sendctrl_7322_mod()
4718 (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd))) in sendctrl_7322_mod()
4722 qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl); in sendctrl_7322_mod()
4723 qib_write_kreg(dd, kr_scratch, 0); in sendctrl_7322_mod()
4728 qib_write_kreg(dd, kr_scratch, 0); in sendctrl_7322_mod()
4732 qib_write_kreg(dd, kr_sendctrl, dd->sendctrl); in sendctrl_7322_mod()
4733 qib_write_kreg(dd, kr_scratch, 0); in sendctrl_7322_mod()
4736 spin_unlock_irqrestore(&dd->sendctrl_lock, flags); in sendctrl_7322_mod()
4746 v = qib_read_kreg32(dd, kr_scratch); in sendctrl_7322_mod()
4747 qib_write_kreg(dd, kr_scratch, v); in sendctrl_7322_mod()
4748 v = qib_read_kreg32(dd, kr_scratch); in sendctrl_7322_mod()
4749 qib_write_kreg(dd, kr_scratch, v); in sendctrl_7322_mod()
4750 qib_read_kreg32(dd, kr_scratch); in sendctrl_7322_mod()
4765 struct qib_devdata *dd = ppd->dd; in qib_portcntr_7322() local
4813 qib_devinfo(ppd->dd->pcidev, in qib_portcntr_7322()
4824 for (i = 0; dd->rcd && i < dd->first_user_ctxt; i++) { in qib_portcntr_7322()
4825 struct qib_ctxtdata *rcd = dd->rcd[i]; in qib_portcntr_7322()
4829 ret += read_7322_creg32(dd, cr_base_egrovfl + i); in qib_portcntr_7322()
5021 static void init_7322_cntrnames(struct qib_devdata *dd) in init_7322_cntrnames() argument
5026 for (i = 0, s = (char *)cntr7322names; s && j <= dd->cfgctxts; in init_7322_cntrnames()
5035 dd->cspec->ncntrs = i; in init_7322_cntrnames()
5038 dd->cspec->cntrnamelen = sizeof(cntr7322names) - 1; in init_7322_cntrnames()
5040 dd->cspec->cntrnamelen = 1 + s - cntr7322names; in init_7322_cntrnames()
5041 dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs in init_7322_cntrnames()
5046 dd->cspec->nportcntrs = i - 1; in init_7322_cntrnames()
5047 dd->cspec->portcntrnamelen = sizeof(portcntr7322names) - 1; in init_7322_cntrnames()
5048 for (i = 0; i < dd->num_pports; ++i) { in init_7322_cntrnames()
5049 dd->pport[i].cpspec->portcntrs = kmalloc(dd->cspec->nportcntrs in init_7322_cntrnames()
5054 static u32 qib_read_7322cntrs(struct qib_devdata *dd, loff_t pos, char **namep, in qib_read_7322cntrs() argument
5060 ret = dd->cspec->cntrnamelen; in qib_read_7322cntrs()
5066 u64 *cntr = dd->cspec->cntrs; in qib_read_7322cntrs()
5069 ret = dd->cspec->ncntrs * sizeof(u64); in qib_read_7322cntrs()
5076 for (i = 0; i < dd->cspec->ncntrs; i++) in qib_read_7322cntrs()
5078 *cntr++ = read_7322_creg(dd, in qib_read_7322cntrs()
5082 *cntr++ = read_7322_creg32(dd, in qib_read_7322cntrs()
5089 static u32 qib_read_7322portcntrs(struct qib_devdata *dd, loff_t pos, u32 port, in qib_read_7322portcntrs() argument
5095 ret = dd->cspec->portcntrnamelen; in qib_read_7322portcntrs()
5101 struct qib_pportdata *ppd = &dd->pport[port]; in qib_read_7322portcntrs()
5105 ret = dd->cspec->nportcntrs * sizeof(u64); in qib_read_7322portcntrs()
5112 for (i = 0; i < dd->cspec->nportcntrs; i++) { in qib_read_7322portcntrs()
5143 struct qib_devdata *dd = (struct qib_devdata *) opaque; in qib_get_7322_faststats() local
5149 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in qib_get_7322_faststats()
5150 ppd = dd->pport + pidx; in qib_get_7322_faststats()
5157 if (!ppd->link_speed_supported || !(dd->flags & QIB_INITTED) in qib_get_7322_faststats()
5158 || dd->diag_client) in qib_get_7322_faststats()
5168 spin_lock_irqsave(&ppd->dd->eep_st_lock, flags); in qib_get_7322_faststats()
5169 traffic_wds -= ppd->dd->traffic_wds; in qib_get_7322_faststats()
5170 ppd->dd->traffic_wds += traffic_wds; in qib_get_7322_faststats()
5171 spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags); in qib_get_7322_faststats()
5181 ppd->dd->cspec->r1 ? in qib_get_7322_faststats()
5187 mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER); in qib_get_7322_faststats()
5193 static int qib_7322_intr_fallback(struct qib_devdata *dd) in qib_7322_intr_fallback() argument
5195 if (!dd->cspec->num_msix_entries) in qib_7322_intr_fallback()
5198 qib_devinfo(dd->pcidev, in qib_7322_intr_fallback()
5200 qib_7322_nomsix(dd); in qib_7322_intr_fallback()
5201 qib_enable_intx(dd); in qib_7322_intr_fallback()
5202 qib_setup_7322_interrupt(dd, 0); in qib_7322_intr_fallback()
5218 struct qib_devdata *dd = ppd->dd; in qib_7322_mini_pcs_reset() local
5224 qib_write_kreg(dd, kr_hwerrmask, in qib_7322_mini_pcs_reset()
5225 dd->cspec->hwerrmask & ~HWE_MASK(statusValidNoEop)); in qib_7322_mini_pcs_reset()
5231 qib_read_kreg32(dd, kr_scratch); in qib_7322_mini_pcs_reset()
5234 qib_write_kreg(dd, kr_scratch, 0ULL); in qib_7322_mini_pcs_reset()
5235 qib_write_kreg(dd, kr_hwerrclear, in qib_7322_mini_pcs_reset()
5237 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); in qib_7322_mini_pcs_reset()
5255 struct qib_devdata *dd = ppd->dd; in autoneg_7322_sendpkt() local
5267 dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_DIS1, NULL); in autoneg_7322_sendpkt()
5272 if (dd->flags & QIB_USE_SPCL_TRIG) { in autoneg_7322_sendpkt()
5273 u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023; in autoneg_7322_sendpkt()
5279 qib_sendbuf_done(dd, pnum); in autoneg_7322_sendpkt()
5281 dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_ENAB1, NULL); in autoneg_7322_sendpkt()
5289 struct qib_devdata *dd = ppd->dd; in qib_autoneg_7322_send() local
5324 qib_read_kreg64(dd, kr_scratch); in qib_autoneg_7322_send()
5327 qib_read_kreg64(dd, kr_scratch); in qib_autoneg_7322_send()
5368 qib_write_kreg(ppd->dd, kr_scratch, 0); in set_7322_ibspeed_fast()
5399 struct qib_devdata *dd; in autoneg_7322_work() local
5406 dd = ppd->dd; in autoneg_7322_work()
5620 if (ppd->dd->flags & QIB_HAS_QSFP) { in qib_7322_ib_updown()
5688 if (ppd->dd->cspec->r1 && ppd->cpspec->ipg_tries <= 10) in qib_7322_ib_updown()
5731 static int gpio_7322_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask) in gpio_7322_mod() argument
5740 spin_lock_irqsave(&dd->cspec->gpio_lock, flags); in gpio_7322_mod()
5741 dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe)); in gpio_7322_mod()
5742 dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe)); in gpio_7322_mod()
5743 new_out = (dd->cspec->gpio_out & ~mask) | out; in gpio_7322_mod()
5745 qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl); in gpio_7322_mod()
5746 qib_write_kreg(dd, kr_gpio_out, new_out); in gpio_7322_mod()
5747 dd->cspec->gpio_out = new_out; in gpio_7322_mod()
5748 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags); in gpio_7322_mod()
5758 read_val = qib_read_kreg64(dd, kr_extstatus); in gpio_7322_mod()
5763 static int qib_7322_eeprom_wen(struct qib_devdata *dd, int wen) in qib_7322_eeprom_wen() argument
5769 prev_wen = ~gpio_7322_mod(dd, 0, 0, 0) >> QIB_EEPROM_WEN_NUM; in qib_7322_eeprom_wen()
5770 gpio_7322_mod(dd, wen ? 0 : mask, mask, mask); in qib_7322_eeprom_wen()
5780 static void get_7322_chip_params(struct qib_devdata *dd) in get_7322_chip_params() argument
5786 dd->palign = qib_read_kreg32(dd, kr_pagealign); in get_7322_chip_params()
5788 dd->uregbase = qib_read_kreg32(dd, kr_userregbase); in get_7322_chip_params()
5790 dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt); in get_7322_chip_params()
5791 dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase); in get_7322_chip_params()
5792 dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase); in get_7322_chip_params()
5793 dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase); in get_7322_chip_params()
5794 dd->pio2k_bufbase = dd->piobufbase & 0xffffffff; in get_7322_chip_params()
5796 val = qib_read_kreg64(dd, kr_sendpiobufcnt); in get_7322_chip_params()
5797 dd->piobcnt2k = val & ~0U; in get_7322_chip_params()
5798 dd->piobcnt4k = val >> 32; in get_7322_chip_params()
5799 val = qib_read_kreg64(dd, kr_sendpiosize); in get_7322_chip_params()
5800 dd->piosize2k = val & ~0U; in get_7322_chip_params()
5801 dd->piosize4k = val >> 32; in get_7322_chip_params()
5806 dd->pport[0].ibmtu = (u32)mtu; in get_7322_chip_params()
5807 dd->pport[1].ibmtu = (u32)mtu; in get_7322_chip_params()
5810 dd->pio2kbase = (u32 __iomem *) in get_7322_chip_params()
5811 ((char __iomem *) dd->kregbase + dd->pio2k_bufbase); in get_7322_chip_params()
5812 dd->pio4kbase = (u32 __iomem *) in get_7322_chip_params()
5813 ((char __iomem *) dd->kregbase + in get_7322_chip_params()
5814 (dd->piobufbase >> 32)); in get_7322_chip_params()
5820 dd->align4k = ALIGN(dd->piosize4k, dd->palign); in get_7322_chip_params()
5822 piobufs = dd->piobcnt4k + dd->piobcnt2k + NUM_VL15_BUFS; in get_7322_chip_params()
5824 dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) / in get_7322_chip_params()
5833 static void qib_7322_set_baseaddrs(struct qib_devdata *dd) in qib_7322_set_baseaddrs() argument
5837 cregbase = qib_read_kreg32(dd, kr_counterregbase); in qib_7322_set_baseaddrs()
5839 dd->cspec->cregbase = (u64 __iomem *)(cregbase + in qib_7322_set_baseaddrs()
5840 (char __iomem *)dd->kregbase); in qib_7322_set_baseaddrs()
5842 dd->egrtidbase = (u64 __iomem *) in qib_7322_set_baseaddrs()
5843 ((char __iomem *) dd->kregbase + dd->rcvegrbase); in qib_7322_set_baseaddrs()
5846 dd->pport[0].cpspec->kpregbase = in qib_7322_set_baseaddrs()
5847 (u64 __iomem *)((char __iomem *)dd->kregbase); in qib_7322_set_baseaddrs()
5848 dd->pport[1].cpspec->kpregbase = in qib_7322_set_baseaddrs()
5849 (u64 __iomem *)(dd->palign + in qib_7322_set_baseaddrs()
5850 (char __iomem *)dd->kregbase); in qib_7322_set_baseaddrs()
5851 dd->pport[0].cpspec->cpregbase = in qib_7322_set_baseaddrs()
5852 (u64 __iomem *)(qib_read_kreg_port(&dd->pport[0], in qib_7322_set_baseaddrs()
5853 kr_counterregbase) + (char __iomem *)dd->kregbase); in qib_7322_set_baseaddrs()
5854 dd->pport[1].cpspec->cpregbase = in qib_7322_set_baseaddrs()
5855 (u64 __iomem *)(qib_read_kreg_port(&dd->pport[1], in qib_7322_set_baseaddrs()
5856 kr_counterregbase) + (char __iomem *)dd->kregbase); in qib_7322_set_baseaddrs()
5872 static int sendctrl_hook(struct qib_devdata *dd, in sendctrl_hook() argument
5888 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in sendctrl_hook()
5892 ppd = dd->pport + pidx; in sendctrl_hook()
5897 psoffs = (u32) (psptr - dd->kregbase) * sizeof(*psptr); in sendctrl_hook()
5903 if (pidx >= dd->num_pports) in sendctrl_hook()
5913 spin_lock_irqsave(&dd->sendctrl_lock, flags); in sendctrl_hook()
5923 local_data = (u64)qib_read_kreg32(dd, idx); in sendctrl_hook()
5925 local_data = qib_read_kreg64(dd, idx); in sendctrl_hook()
5946 qib_write_kreg(dd, idx, tval); in sendctrl_hook()
5947 qib_write_kreg(dd, kr_scratch, 0Ull); in sendctrl_hook()
5949 spin_unlock_irqrestore(&dd->sendctrl_lock, flags); in sendctrl_hook()
6015 if (!ret && !ppd->dd->cspec->r1) { in qsfp_7322_event()
6057 struct qib_devdata *dd = ppd->dd; in qib_init_7322_qsfp() local
6063 spin_lock_irqsave(&dd->cspec->gpio_lock, flags); in qib_init_7322_qsfp()
6064 dd->cspec->extctrl |= (mod_prs_bit << SYM_LSB(EXTCtrl, GPIOInvert)); in qib_init_7322_qsfp()
6065 dd->cspec->gpio_mask |= mod_prs_bit; in qib_init_7322_qsfp()
6066 qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl); in qib_init_7322_qsfp()
6067 qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask); in qib_init_7322_qsfp()
6068 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags); in qib_init_7322_qsfp()
6084 static void set_no_qsfp_atten(struct qib_devdata *dd, int change) in set_no_qsfp_atten() argument
6096 for (pidx = 0; pidx < dd->num_pports; ++pidx) in set_no_qsfp_atten()
6097 dd->pport[pidx].cpspec->no_eep = deflt; in set_no_qsfp_atten()
6100 if (IS_QME(dd) || IS_QMH(dd)) in set_no_qsfp_atten()
6138 for (pidx = 0; dd->unit == unit && pidx < dd->num_pports; in set_no_qsfp_atten()
6140 struct qib_pportdata *ppd = &dd->pport[pidx]; in set_no_qsfp_atten()
6152 if (IS_QMH(dd) || IS_QME(dd)) in set_no_qsfp_atten()
6165 for (pidx = 0; pidx < dd->num_pports; ++pidx) in set_no_qsfp_atten()
6166 if (dd->pport[pidx].link_speed_supported) in set_no_qsfp_atten()
6167 init_txdds_table(&dd->pport[pidx], 0); in set_no_qsfp_atten()
6174 struct qib_devdata *dd; in setup_txselect() local
6191 list_for_each_entry(dd, &qib_dev_list, list) in setup_txselect()
6192 if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322) in setup_txselect()
6193 set_no_qsfp_atten(dd, 1); in setup_txselect()
6202 static int qib_late_7322_initreg(struct qib_devdata *dd) in qib_late_7322_initreg() argument
6207 qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize); in qib_late_7322_initreg()
6208 qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize); in qib_late_7322_initreg()
6209 qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt); in qib_late_7322_initreg()
6210 qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys); in qib_late_7322_initreg()
6211 val = qib_read_kreg64(dd, kr_sendpioavailaddr); in qib_late_7322_initreg()
6212 if (val != dd->pioavailregs_phys) { in qib_late_7322_initreg()
6213 qib_dev_err(dd, in qib_late_7322_initreg()
6215 (unsigned long) dd->pioavailregs_phys, in qib_late_7322_initreg()
6220 n = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS; in qib_late_7322_initreg()
6221 qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_KERN, NULL); in qib_late_7322_initreg()
6223 qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_ENAB1, NULL); in qib_late_7322_initreg()
6225 qib_register_observer(dd, &sendctrl_0_observer); in qib_late_7322_initreg()
6226 qib_register_observer(dd, &sendctrl_1_observer); in qib_late_7322_initreg()
6228 dd->control &= ~QLOGIC_IB_C_SDMAFETCHPRIOEN; in qib_late_7322_initreg()
6229 qib_write_kreg(dd, kr_control, dd->control); in qib_late_7322_initreg()
6236 set_no_qsfp_atten(dd, 0); in qib_late_7322_initreg()
6237 for (n = 0; n < dd->num_pports; ++n) { in qib_late_7322_initreg()
6238 struct qib_pportdata *ppd = dd->pport + n; in qib_late_7322_initreg()
6243 if (dd->flags & QIB_HAS_QSFP) in qib_late_7322_initreg()
6246 dd->control |= QLOGIC_IB_C_SDMAFETCHPRIOEN; in qib_late_7322_initreg()
6247 qib_write_kreg(dd, kr_control, dd->control); in qib_late_7322_initreg()
6276 qib_write_kreg(ppd->dd, kr_scratch, 0); in write_7322_init_portregs()
6307 if (ppd->dd->cspec->r1) in write_7322_init_portregs()
6318 static void write_7322_initregs(struct qib_devdata *dd) in write_7322_initregs() argument
6325 qib_write_kreg(dd, KREG_IDX(RcvQPMulticastContext_1), 1); in write_7322_initregs()
6327 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in write_7322_initregs()
6331 if (dd->n_krcv_queues < 2 || in write_7322_initregs()
6332 !dd->pport[pidx].link_speed_supported) in write_7322_initregs()
6335 ppd = &dd->pport[pidx]; in write_7322_initregs()
6338 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags); in write_7322_initregs()
6340 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); in write_7322_initregs()
6345 if (dd->num_pports > 1) in write_7322_initregs()
6346 n = dd->first_user_ctxt / dd->num_pports; in write_7322_initregs()
6348 n = dd->first_user_ctxt - 1; in write_7322_initregs()
6352 if (dd->num_pports > 1) in write_7322_initregs()
6353 ctxt = (i % n) * dd->num_pports + pidx; in write_7322_initregs()
6375 for (i = 0; i < dd->first_user_ctxt; i++) { in write_7322_initregs()
6376 dd->cspec->rcvavail_timeout[i] = rcv_int_timeout; in write_7322_initregs()
6377 qib_write_kreg(dd, kr_rcvavailtimeout + i, rcv_int_timeout); in write_7322_initregs()
6386 for (i = 0; i < dd->cfgctxts; i++) { in write_7322_initregs()
6390 qib_write_ureg(dd, ur_rcvflowtable+flow, val, i); in write_7322_initregs()
6398 if (dd->num_pports) in write_7322_initregs()
6399 setup_7322_link_recovery(dd->pport, dd->num_pports > 1); in write_7322_initregs()
6402 static int qib_init_7322_variables(struct qib_devdata *dd) in qib_init_7322_variables() argument
6411 ppd = (struct qib_pportdata *)(dd + 1); in qib_init_7322_variables()
6412 dd->pport = ppd; in qib_init_7322_variables()
6413 ppd[0].dd = dd; in qib_init_7322_variables()
6414 ppd[1].dd = dd; in qib_init_7322_variables()
6416 dd->cspec = (struct qib_chip_specific *)(ppd + 2); in qib_init_7322_variables()
6418 ppd[0].cpspec = (struct qib_chippport_specific *)(dd->cspec + 1); in qib_init_7322_variables()
6423 spin_lock_init(&dd->cspec->rcvmod_lock); in qib_init_7322_variables()
6424 spin_lock_init(&dd->cspec->gpio_lock); in qib_init_7322_variables()
6427 dd->revision = readq(&dd->kregbase[kr_revision]); in qib_init_7322_variables()
6429 if ((dd->revision & 0xffffffffU) == 0xffffffffU) { in qib_init_7322_variables()
6430 qib_dev_err(dd, in qib_init_7322_variables()
6435 dd->flags |= QIB_PRESENT; /* now register routines work */ in qib_init_7322_variables()
6437 dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMajor); in qib_init_7322_variables()
6438 dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMinor); in qib_init_7322_variables()
6439 dd->cspec->r1 = dd->minrev == 1; in qib_init_7322_variables()
6441 get_7322_chip_params(dd); in qib_init_7322_variables()
6442 features = qib_7322_boardname(dd); in qib_init_7322_variables()
6445 sbufcnt = dd->piobcnt2k + dd->piobcnt4k + in qib_init_7322_variables()
6448 dd->cspec->sendchkenable = kmalloc(sbufcnt * in qib_init_7322_variables()
6449 sizeof(*dd->cspec->sendchkenable), GFP_KERNEL); in qib_init_7322_variables()
6450 dd->cspec->sendgrhchk = kmalloc(sbufcnt * in qib_init_7322_variables()
6451 sizeof(*dd->cspec->sendgrhchk), GFP_KERNEL); in qib_init_7322_variables()
6452 dd->cspec->sendibchk = kmalloc(sbufcnt * in qib_init_7322_variables()
6453 sizeof(*dd->cspec->sendibchk), GFP_KERNEL); in qib_init_7322_variables()
6454 if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk || in qib_init_7322_variables()
6455 !dd->cspec->sendibchk) { in qib_init_7322_variables()
6460 ppd = dd->pport; in qib_init_7322_variables()
6466 dd->gpio_sda_num = _QIB_GPIO_SDA_NUM; in qib_init_7322_variables()
6467 dd->gpio_scl_num = _QIB_GPIO_SCL_NUM; in qib_init_7322_variables()
6468 dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV; in qib_init_7322_variables()
6470 dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY | in qib_init_7322_variables()
6474 dd->flags |= qib_special_trigger ? in qib_init_7322_variables()
6481 qib_7322_set_baseaddrs(dd); in qib_init_7322_variables()
6487 dd->cspec->int_enable_mask = QIB_I_BITSEXTANT; in qib_init_7322_variables()
6489 dd->cspec->hwerrmask = ~0ULL; in qib_init_7322_variables()
6492 dd->cspec->hwerrmask &= in qib_init_7322_variables()
6504 dd->skip_kctxt_mask |= 1 << pidx; in qib_init_7322_variables()
6510 dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask, in qib_init_7322_variables()
6514 dd->cspec->int_enable_mask &= ~( in qib_init_7322_variables()
6525 dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask, in qib_init_7322_variables()
6529 dd->cspec->int_enable_mask &= ~( in qib_init_7322_variables()
6540 dd->num_pports++; in qib_init_7322_variables()
6541 ret = qib_init_pportdata(ppd, dd, pidx, dd->num_pports); in qib_init_7322_variables()
6543 dd->num_pports--; in qib_init_7322_variables()
6565 qib_devinfo(dd->pcidev, in qib_init_7322_variables()
6577 qib_devinfo(dd->pcidev, in qib_init_7322_variables()
6590 if (ppd->dd->cspec->r1) in qib_init_7322_variables()
6598 if (!(dd->flags & QIB_HAS_QSFP)) { in qib_init_7322_variables()
6599 if (!IS_QMH(dd) && !IS_QME(dd)) in qib_init_7322_variables()
6600 qib_devinfo(dd->pcidev, in qib_init_7322_variables()
6602 dd->unit, ppd->port); in qib_init_7322_variables()
6603 cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME; in qib_init_7322_variables()
6608 ppd->cpspec->no_eep = IS_QMH(dd) ? in qib_init_7322_variables()
6623 dd->rcvhdrentsize = qib_rcvhdrentsize ? in qib_init_7322_variables()
6625 dd->rcvhdrsize = qib_rcvhdrsize ? in qib_init_7322_variables()
6627 dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32); in qib_init_7322_variables()
6630 dd->rcvegrbufsize = max(mtu, 2048); in qib_init_7322_variables()
6631 BUG_ON(!is_power_of_2(dd->rcvegrbufsize)); in qib_init_7322_variables()
6632 dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize); in qib_init_7322_variables()
6634 qib_7322_tidtemplate(dd); in qib_init_7322_variables()
6640 dd->rhdrhead_intr_off = in qib_init_7322_variables()
6644 setup_timer(&dd->stats_timer, qib_get_7322_faststats, in qib_init_7322_variables()
6645 (unsigned long)dd); in qib_init_7322_variables()
6647 dd->ureg_align = 0x10000; /* 64KB alignment */ in qib_init_7322_variables()
6649 dd->piosize2kmax_dwords = dd->piosize2k >> 2; in qib_init_7322_variables()
6651 qib_7322_config_ctxts(dd); in qib_init_7322_variables()
6652 qib_set_ctxtcnt(dd); in qib_init_7322_variables()
6661 ret = init_chip_wc_pat(dd, 0); in qib_init_7322_variables()
6666 vl15off = dd->physaddr + (dd->piobufbase >> 32) + in qib_init_7322_variables()
6667 dd->piobcnt4k * dd->align4k; in qib_init_7322_variables()
6668 dd->piovl15base = ioremap_nocache(vl15off, in qib_init_7322_variables()
6669 NUM_VL15_BUFS * dd->align4k); in qib_init_7322_variables()
6670 if (!dd->piovl15base) { in qib_init_7322_variables()
6675 qib_7322_set_baseaddrs(dd); /* set chip access pointers now */ in qib_init_7322_variables()
6680 if (!dd->num_pports) { in qib_init_7322_variables()
6681 qib_dev_err(dd, "No ports enabled, giving up initialization\n"); in qib_init_7322_variables()
6685 write_7322_initregs(dd); in qib_init_7322_variables()
6686 ret = qib_create_ctxts(dd); in qib_init_7322_variables()
6687 init_7322_cntrnames(dd); in qib_init_7322_variables()
6701 if (dd->flags & QIB_HAS_SEND_DMA) { in qib_init_7322_variables()
6702 dd->cspec->sdmabufcnt = dd->piobcnt4k; in qib_init_7322_variables()
6705 dd->cspec->sdmabufcnt = 0; in qib_init_7322_variables()
6706 sbufs = dd->piobcnt4k; in qib_init_7322_variables()
6708 dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k - in qib_init_7322_variables()
6709 dd->cspec->sdmabufcnt; in qib_init_7322_variables()
6710 dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs; in qib_init_7322_variables()
6711 dd->cspec->lastbuf_for_pio--; /* range is <= , not < */ in qib_init_7322_variables()
6712 dd->last_pio = dd->cspec->lastbuf_for_pio; in qib_init_7322_variables()
6713 dd->pbufsctxt = (dd->cfgctxts > dd->first_user_ctxt) ? in qib_init_7322_variables()
6714 dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt) : 0; in qib_init_7322_variables()
6722 if (dd->pbufsctxt >= 2 && dd->pbufsctxt - 2 < updthresh) in qib_init_7322_variables()
6723 updthresh = dd->pbufsctxt - 2; in qib_init_7322_variables()
6724 dd->cspec->updthresh_dflt = updthresh; in qib_init_7322_variables()
6725 dd->cspec->updthresh = updthresh; in qib_init_7322_variables()
6728 dd->sendctrl |= ((updthresh & SYM_RMASK(SendCtrl, AvailUpdThld)) in qib_init_7322_variables()
6732 dd->psxmitwait_supported = 1; in qib_init_7322_variables()
6733 dd->psxmitwait_check_rate = QIB_7322_PSXMITWAIT_CHECK_RATE; in qib_init_7322_variables()
6735 if (!dd->ctxtcnt) in qib_init_7322_variables()
6736 dd->ctxtcnt = 1; /* for other initialization code */ in qib_init_7322_variables()
6745 struct qib_devdata *dd = ppd->dd; in qib_7322_getsendbuf() local
6749 first = dd->piobcnt2k + dd->piobcnt4k + ppd->hw_pidx; in qib_7322_getsendbuf()
6752 if ((plen + 1) > dd->piosize2kmax_dwords) in qib_7322_getsendbuf()
6753 first = dd->piobcnt2k; in qib_7322_getsendbuf()
6756 last = dd->cspec->lastbuf_for_pio; in qib_7322_getsendbuf()
6758 return qib_getsendbuf_range(dd, pbufnum, first, last); in qib_7322_getsendbuf()
6784 qib_dev_porterr(ppd->dd, ppd->port, in dump_sdma_7322_state()
6788 qib_dev_porterr(ppd->dd, ppd->port, in dump_sdma_7322_state()
6792 qib_dev_porterr(ppd->dd, ppd->port, in dump_sdma_7322_state()
6798 qib_dev_porterr(ppd->dd, ppd->port, in dump_sdma_7322_state()
6810 qib_dev_porterr(ppd->dd, ppd->port, in dump_sdma_7322_state()
6817 qib_dev_porterr(ppd->dd, ppd->port, in dump_sdma_7322_state()
6822 qib_dev_porterr(ppd->dd, ppd->port, in dump_sdma_7322_state()
6826 qib_dev_porterr(ppd->dd, ppd->port, in dump_sdma_7322_state()
6830 qib_dev_porterr(ppd->dd, ppd->port, in dump_sdma_7322_state()
6834 qib_dev_porterr(ppd->dd, ppd->port, in dump_sdma_7322_state()
6838 qib_dev_porterr(ppd->dd, ppd->port, in dump_sdma_7322_state()
6842 qib_dev_porterr(ppd->dd, ppd->port, in dump_sdma_7322_state()
6846 qib_dev_porterr(ppd->dd, ppd->port, in dump_sdma_7322_state()
6850 qib_dev_porterr(ppd->dd, ppd->port, in dump_sdma_7322_state()
6910 struct qib_devdata *dd = ppd->dd; in init_sdma_7322_regs() local
6922 if (dd->num_pports) in init_sdma_7322_regs()
6923 n = dd->cspec->sdmabufcnt / dd->num_pports; /* no remainder */ in init_sdma_7322_regs()
6925 n = dd->cspec->sdmabufcnt; /* failsafe for init */ in init_sdma_7322_regs()
6926 erstbuf = (dd->piobcnt2k + dd->piobcnt4k) - in init_sdma_7322_regs()
6927 ((dd->num_pports == 1 || ppd->port == 2) ? n : in init_sdma_7322_regs()
6928 dd->cspec->sdmabufcnt); in init_sdma_7322_regs()
6949 struct qib_devdata *dd = ppd->dd; in qib_sdma_7322_gethead() local
6958 (dd->flags & QIB_HAS_SDMA_TIMEOUT); in qib_sdma_7322_gethead()
7034 static void qib_7322_initvl15_bufs(struct qib_devdata *dd) in qib_7322_initvl15_bufs() argument
7038 vl15bufs = dd->piobcnt2k + dd->piobcnt4k; in qib_7322_initvl15_bufs()
7039 qib_chg_pioavailkernel(dd, vl15bufs, NUM_VL15_BUFS, in qib_7322_initvl15_bufs()
7046 if (rcd->dd->num_pports > 1) { in qib_7322_init_ctxt()
7054 rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt; in qib_7322_init_ctxt()
7061 static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start, in qib_7322_txchk_change() argument
7088 le64_to_cpu(dd->pioavailregs_dma[i]); in qib_7322_txchk_change()
7103 sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); in qib_7322_txchk_change()
7115 clear_bit(i, dd->cspec->sendchkenable); in qib_7322_txchk_change()
7125 qib_read_kreg32(dd, kr_scratch); in qib_7322_txchk_change()
7127 set_bit(i, dd->cspec->sendchkenable); in qib_7322_txchk_change()
7133 set_bit(i, dd->cspec->sendibchk); in qib_7322_txchk_change()
7134 clear_bit(i, dd->cspec->sendgrhchk); in qib_7322_txchk_change()
7136 spin_lock_irqsave(&dd->uctxt_lock, flags); in qib_7322_txchk_change()
7138 for (i = dd->first_user_ctxt; in qib_7322_txchk_change()
7139 dd->cspec->updthresh != dd->cspec->updthresh_dflt in qib_7322_txchk_change()
7140 && i < dd->cfgctxts; i++) in qib_7322_txchk_change()
7141 if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt && in qib_7322_txchk_change()
7142 ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1) in qib_7322_txchk_change()
7143 < dd->cspec->updthresh_dflt) in qib_7322_txchk_change()
7145 spin_unlock_irqrestore(&dd->uctxt_lock, flags); in qib_7322_txchk_change()
7146 if (i == dd->cfgctxts) { in qib_7322_txchk_change()
7147 spin_lock_irqsave(&dd->sendctrl_lock, flags); in qib_7322_txchk_change()
7148 dd->cspec->updthresh = dd->cspec->updthresh_dflt; in qib_7322_txchk_change()
7149 dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld); in qib_7322_txchk_change()
7150 dd->sendctrl |= (dd->cspec->updthresh & in qib_7322_txchk_change()
7153 spin_unlock_irqrestore(&dd->sendctrl_lock, flags); in qib_7322_txchk_change()
7154 sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); in qib_7322_txchk_change()
7161 clear_bit(i, dd->cspec->sendibchk); in qib_7322_txchk_change()
7162 set_bit(i, dd->cspec->sendgrhchk); in qib_7322_txchk_change()
7164 spin_lock_irqsave(&dd->sendctrl_lock, flags); in qib_7322_txchk_change()
7166 / rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) { in qib_7322_txchk_change()
7167 dd->cspec->updthresh = (rcd->piocnt / in qib_7322_txchk_change()
7169 dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld); in qib_7322_txchk_change()
7170 dd->sendctrl |= (dd->cspec->updthresh & in qib_7322_txchk_change()
7173 spin_unlock_irqrestore(&dd->sendctrl_lock, flags); in qib_7322_txchk_change()
7174 sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); in qib_7322_txchk_change()
7176 spin_unlock_irqrestore(&dd->sendctrl_lock, flags); in qib_7322_txchk_change()
7184 qib_write_kreg(dd, kr_sendcheckmask + i, in qib_7322_txchk_change()
7185 dd->cspec->sendchkenable[i]); in qib_7322_txchk_change()
7188 qib_write_kreg(dd, kr_sendgrhcheckmask + i, in qib_7322_txchk_change()
7189 dd->cspec->sendgrhchk[i]); in qib_7322_txchk_change()
7190 qib_write_kreg(dd, kr_sendibpktmask + i, in qib_7322_txchk_change()
7191 dd->cspec->sendibchk[i]); in qib_7322_txchk_change()
7198 qib_read_kreg32(dd, kr_scratch); in qib_7322_txchk_change()
7203 static void writescratch(struct qib_devdata *dd, u32 val) in writescratch() argument
7205 qib_write_kreg(dd, kr_scratch, val); in writescratch()
7209 static int qib_7322_tempsense_rd(struct qib_devdata *dd, int regnum) in qib_7322_tempsense_rd() argument
7228 struct qib_devdata *dd; in qib_init_iba7322_funcs() local
7232 dd = qib_alloc_devdata(pdev, in qib_init_iba7322_funcs()
7236 if (IS_ERR(dd)) in qib_init_iba7322_funcs()
7239 dd->f_bringup_serdes = qib_7322_bringup_serdes; in qib_init_iba7322_funcs()
7240 dd->f_cleanup = qib_setup_7322_cleanup; in qib_init_iba7322_funcs()
7241 dd->f_clear_tids = qib_7322_clear_tids; in qib_init_iba7322_funcs()
7242 dd->f_free_irq = qib_7322_free_irq; in qib_init_iba7322_funcs()
7243 dd->f_get_base_info = qib_7322_get_base_info; in qib_init_iba7322_funcs()
7244 dd->f_get_msgheader = qib_7322_get_msgheader; in qib_init_iba7322_funcs()
7245 dd->f_getsendbuf = qib_7322_getsendbuf; in qib_init_iba7322_funcs()
7246 dd->f_gpio_mod = gpio_7322_mod; in qib_init_iba7322_funcs()
7247 dd->f_eeprom_wen = qib_7322_eeprom_wen; in qib_init_iba7322_funcs()
7248 dd->f_hdrqempty = qib_7322_hdrqempty; in qib_init_iba7322_funcs()
7249 dd->f_ib_updown = qib_7322_ib_updown; in qib_init_iba7322_funcs()
7250 dd->f_init_ctxt = qib_7322_init_ctxt; in qib_init_iba7322_funcs()
7251 dd->f_initvl15_bufs = qib_7322_initvl15_bufs; in qib_init_iba7322_funcs()
7252 dd->f_intr_fallback = qib_7322_intr_fallback; in qib_init_iba7322_funcs()
7253 dd->f_late_initreg = qib_late_7322_initreg; in qib_init_iba7322_funcs()
7254 dd->f_setpbc_control = qib_7322_setpbc_control; in qib_init_iba7322_funcs()
7255 dd->f_portcntr = qib_portcntr_7322; in qib_init_iba7322_funcs()
7256 dd->f_put_tid = qib_7322_put_tid; in qib_init_iba7322_funcs()
7257 dd->f_quiet_serdes = qib_7322_mini_quiet_serdes; in qib_init_iba7322_funcs()
7258 dd->f_rcvctrl = rcvctrl_7322_mod; in qib_init_iba7322_funcs()
7259 dd->f_read_cntrs = qib_read_7322cntrs; in qib_init_iba7322_funcs()
7260 dd->f_read_portcntrs = qib_read_7322portcntrs; in qib_init_iba7322_funcs()
7261 dd->f_reset = qib_do_7322_reset; in qib_init_iba7322_funcs()
7262 dd->f_init_sdma_regs = init_sdma_7322_regs; in qib_init_iba7322_funcs()
7263 dd->f_sdma_busy = qib_sdma_7322_busy; in qib_init_iba7322_funcs()
7264 dd->f_sdma_gethead = qib_sdma_7322_gethead; in qib_init_iba7322_funcs()
7265 dd->f_sdma_sendctrl = qib_7322_sdma_sendctrl; in qib_init_iba7322_funcs()
7266 dd->f_sdma_set_desc_cnt = qib_sdma_set_7322_desc_cnt; in qib_init_iba7322_funcs()
7267 dd->f_sdma_update_tail = qib_sdma_update_7322_tail; in qib_init_iba7322_funcs()
7268 dd->f_sendctrl = sendctrl_7322_mod; in qib_init_iba7322_funcs()
7269 dd->f_set_armlaunch = qib_set_7322_armlaunch; in qib_init_iba7322_funcs()
7270 dd->f_set_cntr_sample = qib_set_cntr_7322_sample; in qib_init_iba7322_funcs()
7271 dd->f_iblink_state = qib_7322_iblink_state; in qib_init_iba7322_funcs()
7272 dd->f_ibphys_portstate = qib_7322_phys_portstate; in qib_init_iba7322_funcs()
7273 dd->f_get_ib_cfg = qib_7322_get_ib_cfg; in qib_init_iba7322_funcs()
7274 dd->f_set_ib_cfg = qib_7322_set_ib_cfg; in qib_init_iba7322_funcs()
7275 dd->f_set_ib_loopback = qib_7322_set_loopback; in qib_init_iba7322_funcs()
7276 dd->f_get_ib_table = qib_7322_get_ib_table; in qib_init_iba7322_funcs()
7277 dd->f_set_ib_table = qib_7322_set_ib_table; in qib_init_iba7322_funcs()
7278 dd->f_set_intr_state = qib_7322_set_intr_state; in qib_init_iba7322_funcs()
7279 dd->f_setextled = qib_setup_7322_setextled; in qib_init_iba7322_funcs()
7280 dd->f_txchk_change = qib_7322_txchk_change; in qib_init_iba7322_funcs()
7281 dd->f_update_usrhead = qib_update_7322_usrhead; in qib_init_iba7322_funcs()
7282 dd->f_wantpiobuf_intr = qib_wantpiobuf_7322_intr; in qib_init_iba7322_funcs()
7283 dd->f_xgxs_reset = qib_7322_mini_pcs_reset; in qib_init_iba7322_funcs()
7284 dd->f_sdma_hw_clean_up = qib_7322_sdma_hw_clean_up; in qib_init_iba7322_funcs()
7285 dd->f_sdma_hw_start_up = qib_7322_sdma_hw_start_up; in qib_init_iba7322_funcs()
7286 dd->f_sdma_init_early = qib_7322_sdma_init_early; in qib_init_iba7322_funcs()
7287 dd->f_writescratch = writescratch; in qib_init_iba7322_funcs()
7288 dd->f_tempsense_rd = qib_7322_tempsense_rd; in qib_init_iba7322_funcs()
7290 dd->f_notify_dca = qib_7322_notify_dca; in qib_init_iba7322_funcs()
7298 ret = qib_pcie_ddinit(dd, pdev, ent); in qib_init_iba7322_funcs()
7303 ret = qib_init_7322_variables(dd); in qib_init_iba7322_funcs()
7307 if (qib_mini_init || !dd->num_pports) in qib_init_iba7322_funcs()
7316 tabsize = dd->first_user_ctxt + ARRAY_SIZE(irq_table); in qib_init_iba7322_funcs()
7319 irq_table[i].port <= dd->num_pports) || in qib_init_iba7322_funcs()
7321 dd->rcd[i - ARRAY_SIZE(irq_table)])) in qib_init_iba7322_funcs()
7325 actual_cnt -= dd->num_pports; in qib_init_iba7322_funcs()
7328 dd->cspec->msix_entries = kzalloc(tabsize * in qib_init_iba7322_funcs()
7330 if (!dd->cspec->msix_entries) in qib_init_iba7322_funcs()
7333 if (qib_pcie_params(dd, 8, &tabsize)) in qib_init_iba7322_funcs()
7334 qib_dev_err(dd, in qib_init_iba7322_funcs()
7337 dd->cspec->num_msix_entries = tabsize; in qib_init_iba7322_funcs()
7340 qib_setup_7322_interrupt(dd, 1); in qib_init_iba7322_funcs()
7343 qib_write_kreg(dd, kr_hwdiagctrl, 0); in qib_init_iba7322_funcs()
7346 qib_devinfo(dd->pcidev, "DCA enabled\n"); in qib_init_iba7322_funcs()
7347 dd->flags |= QIB_DCA_ENABLED; in qib_init_iba7322_funcs()
7348 qib_setup_dca(dd); in qib_init_iba7322_funcs()
7354 qib_pcie_ddcleanup(dd); in qib_init_iba7322_funcs()
7356 qib_free_devdata(dd); in qib_init_iba7322_funcs()
7357 dd = ERR_PTR(ret); in qib_init_iba7322_funcs()
7359 return dd; in qib_init_iba7322_funcs()
7383 struct qib_devdata *dd = ppd->dd; in set_txdds() local
7395 regidx += (dd->palign / sizeof(u64)); in set_txdds()
7401 qib_write_kreg(dd, regidx, pack_ent); in set_txdds()
7403 qib_write_kreg(ppd->dd, kr_scratch, 0); in set_txdds()
7676 *sdr_dds = txdds_sdr + ppd->dd->board_atten; in find_best_ent()
7677 *ddr_dds = txdds_ddr + ppd->dd->board_atten; in find_best_ent()
7678 *qdr_dds = txdds_qdr + ppd->dd->board_atten; in find_best_ent()
7705 } else if ((IS_QME(ppd->dd) || IS_QMH(ppd->dd)) && in find_best_ent()
7710 ppd->dd->unit, ppd->port, idx); in find_best_ent()
7732 if (!(ppd->dd->flags & QIB_HAS_QSFP) || override) in init_txdds_table()
7771 static u32 ahb_mod(struct qib_devdata *dd, int quad, int chan, int addr, in ahb_mod() argument
7779 prev_acc = qib_read_kreg64(dd, KR_AHB_ACC); in ahb_mod()
7782 qib_write_kreg(dd, KR_AHB_ACC, acc); in ahb_mod()
7785 trans = qib_read_kreg64(dd, KR_AHB_TRANS); in ahb_mod()
7790 qib_dev_err(dd, "No ahb_rdy in %d tries\n", AHB_TRANS_TRIES); in ahb_mod()
7801 qib_write_kreg(dd, KR_AHB_TRANS, trans); in ahb_mod()
7804 trans = qib_read_kreg64(dd, KR_AHB_TRANS); in ahb_mod()
7809 qib_dev_err(dd, "No Rd ahb_rdy in %d tries\n", in ahb_mod()
7814 trans = qib_read_kreg64(dd, KR_AHB_TRANS); in ahb_mod()
7824 qib_write_kreg(dd, KR_AHB_TRANS, trans); in ahb_mod()
7827 trans = qib_read_kreg64(dd, KR_AHB_TRANS); in ahb_mod()
7832 qib_dev_err(dd, "No Wr ahb_rdy in %d tries\n", in ahb_mod()
7839 qib_write_kreg(dd, KR_AHB_ACC, prev_acc); in ahb_mod()
7846 struct qib_devdata *dd = ppd->dd; in ibsd_wr_allchans() local
7851 ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr, in ibsd_wr_allchans()
7853 rbc = ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), in ibsd_wr_allchans()
7865 ppd->dd->unit, ppd->port); in serdes_7322_los_enable()
7869 ppd->dd->unit, ppd->port); in serdes_7322_los_enable()
7879 if (ppd->dd->cspec->r1) in serdes_7322_init()
7911 le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT; in serdes_7322_init_old()
7915 le_val = IS_QME(ppd->dd) ? 0 : 1; in serdes_7322_init_old()
7919 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14); in serdes_7322_init_old()
7926 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11)); in serdes_7322_init_old()
7927 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4)); in serdes_7322_init_old()
7928 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11)); in serdes_7322_init_old()
7929 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4)); in serdes_7322_init_old()
7932 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0)); in serdes_7322_init_old()
7933 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8)); in serdes_7322_init_old()
7934 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0)); in serdes_7322_init_old()
7935 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8)); in serdes_7322_init_old()
7938 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15); in serdes_7322_init_old()
7954 le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac; in serdes_7322_init_old()
7964 ppd->dd->cspec->r1 ? in serdes_7322_init_old()
7974 if (!ppd->dd->cspec->r1) { in serdes_7322_init_old()
7992 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14); in serdes_7322_init_new()
8034 if (!ppd->dd->cspec->r1) { in serdes_7322_init_new()
8060 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11)); in serdes_7322_init_new()
8061 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4)); in serdes_7322_init_new()
8062 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11)); in serdes_7322_init_new()
8063 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4)); in serdes_7322_init_new()
8066 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0)); in serdes_7322_init_new()
8067 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8)); in serdes_7322_init_new()
8068 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0)); in serdes_7322_init_new()
8069 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8)); in serdes_7322_init_new()
8072 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15); in serdes_7322_init_new()
8096 rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), in serdes_7322_init_new()
8109 rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), in serdes_7322_init_new()
8124 le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT; in serdes_7322_init_new()
8134 le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac; in serdes_7322_init_new()
8155 ppd->dd->cspec->r1 ? in serdes_7322_init_new()
8185 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), in set_man_code()
8193 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), in set_man_mode_h1()
8196 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), in set_man_mode_h1()
8203 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), in clock_man()
8205 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), in clock_man()
8207 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), in clock_man()
8209 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), in clock_man()
8270 if (!ppd->dd->cspec->r1) in force_h1()
8292 static int qib_r_grab(struct qib_devdata *dd) in qib_r_grab() argument
8296 qib_write_kreg(dd, kr_r_access, val); in qib_r_grab()
8297 qib_read_kreg32(dd, kr_scratch); in qib_r_grab()
8304 static int qib_r_wait_for_rdy(struct qib_devdata *dd) in qib_r_wait_for_rdy() argument
8310 val = qib_read_kreg32(dd, kr_r_access); in qib_r_wait_for_rdy()
8317 static int qib_r_shift(struct qib_devdata *dd, int bisten, in qib_r_shift() argument
8325 ret = qib_r_wait_for_rdy(dd); in qib_r_shift()
8339 qib_write_kreg(dd, kr_r_access, val); in qib_r_shift()
8340 qib_read_kreg32(dd, kr_scratch); in qib_r_shift()
8341 ret = qib_r_wait_for_rdy(dd); in qib_r_shift()
8347 qib_write_kreg(dd, kr_r_access, val); in qib_r_shift()
8348 qib_read_kreg32(dd, kr_scratch); in qib_r_shift()
8349 ret = qib_r_wait_for_rdy(dd); in qib_r_shift()
8357 static int qib_r_update(struct qib_devdata *dd, int bisten) in qib_r_update() argument
8363 ret = qib_r_wait_for_rdy(dd); in qib_r_update()
8365 qib_write_kreg(dd, kr_r_access, val); in qib_r_update()
8366 qib_read_kreg32(dd, kr_scratch); in qib_r_update()
8469 struct qib_devdata *dd = ppd->dd; in setup_7322_link_recovery() local
8471 if (!ppd->dd->cspec->r1) in setup_7322_link_recovery()
8474 dd->cspec->recovery_ports_initted++; in setup_7322_link_recovery()
8477 if (!both && dd->cspec->recovery_ports_initted == 1) { in setup_7322_link_recovery()
8485 if (qib_r_grab(dd) < 0 || in setup_7322_link_recovery()
8486 qib_r_shift(dd, BISTEN_ETM, LEN_ETM, reset_atetm, NULL) < 0 || in setup_7322_link_recovery()
8487 qib_r_update(dd, BISTEN_ETM) < 0 || in setup_7322_link_recovery()
8488 qib_r_shift(dd, BISTEN_AT, LEN_AT, reset_at, NULL) < 0 || in setup_7322_link_recovery()
8489 qib_r_update(dd, BISTEN_AT) < 0 || in setup_7322_link_recovery()
8490 qib_r_shift(dd, BISTEN_PORT_SEL, LEN_PORT_SEL, in setup_7322_link_recovery()
8492 qib_r_update(dd, BISTEN_PORT_SEL) < 0 || in setup_7322_link_recovery()
8493 qib_r_shift(dd, BISTEN_AT, LEN_AT, at, NULL) < 0 || in setup_7322_link_recovery()
8494 qib_r_update(dd, BISTEN_AT) < 0 || in setup_7322_link_recovery()
8495 qib_r_shift(dd, BISTEN_ETM, LEN_ETM, etm, NULL) < 0 || in setup_7322_link_recovery()
8496 qib_r_update(dd, BISTEN_ETM) < 0) in setup_7322_link_recovery()
8497 qib_dev_err(dd, "Failed IB link recovery setup\n"); in setup_7322_link_recovery()
8502 struct qib_devdata *dd = ppd->dd; in check_7322_rxe_status() local
8505 if (dd->cspec->recovery_ports_initted != 1) in check_7322_rxe_status()
8507 qib_write_kreg(dd, kr_control, dd->control | in check_7322_rxe_status()
8509 (void)qib_read_kreg64(dd, kr_scratch); in check_7322_rxe_status()
8511 fmask = qib_read_kreg64(dd, kr_act_fmask); in check_7322_rxe_status()
8518 ppd->dd->cspec->stay_in_freeze = 1; in check_7322_rxe_status()
8519 qib_7322_set_intr_state(ppd->dd, 0); in check_7322_rxe_status()
8520 qib_write_kreg(dd, kr_fmask, 0ULL); in check_7322_rxe_status()
8521 qib_dev_err(dd, "HCA unusable until powercycled\n"); in check_7322_rxe_status()
8525 qib_write_kreg(ppd->dd, kr_hwerrclear, in check_7322_rxe_status()
8529 qib_write_kreg(dd, kr_control, dd->control); in check_7322_rxe_status()
8530 qib_read_kreg32(dd, kr_scratch); in check_7322_rxe_status()
8537 qib_read_kreg32(dd, kr_scratch); in check_7322_rxe_status()