• Home
  • Raw
  • Download

Lines Matching refs:adap

226 	struct adapter *adap = pi->adapter;  in dcb_tx_queue_prio_enable()  local
227 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset]; in dcb_tx_queue_prio_enable()
247 err = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, in dcb_tx_queue_prio_enable()
252 dev_err(adap->pdev_dev, in dcb_tx_queue_prio_enable()
294 void t4_os_portmod_changed(const struct adapter *adap, int port_id) in t4_os_portmod_changed() argument
300 const struct net_device *dev = adap->port[port_id]; in t4_os_portmod_changed()
334 struct adapter *adap = pi->adapter; in cxgb4_set_addr_hash() local
340 list_for_each_entry(entry, &adap->mac_hlist, list) { in cxgb4_set_addr_hash()
344 return t4_set_addr_hash(adap, adap->mbox, pi->viid, ucast, in cxgb4_set_addr_hash()
351 struct adapter *adap = pi->adapter; in cxgb4_mac_sync() local
360 ret = t4_alloc_mac_filt(adap, adap->mbox, pi->viid, free, 1, maclist, in cxgb4_mac_sync()
373 list_add_tail(&new_entry->list, &adap->mac_hlist); in cxgb4_mac_sync()
383 struct adapter *adap = pi->adapter; in cxgb4_mac_unsync() local
391 list_for_each_entry_safe(entry, tmp, &adap->mac_hlist, list) { in cxgb4_mac_unsync()
399 ret = t4_free_mac_filt(adap, adap->mbox, pi->viid, 1, maclist, false); in cxgb4_mac_unsync()
463 static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd) in dcb_rpl() argument
466 struct net_device *dev = adap->port[adap->chan_map[port]]; in dcb_rpl()
470 cxgb4_dcb_handle_fw_update(adap, pcmd); in dcb_rpl()
499 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n" in fwevtq_handler()
510 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start]; in fwevtq_handler()
540 dev = q->adap->port[q->adap->chan_map[port]]; in fwevtq_handler()
555 dcb_rpl(q->adap, pcmd); in fwevtq_handler()
559 t4_handle_fw_rpl(q->adap, p->data); in fwevtq_handler()
563 do_l2t_write_rpl(q->adap, p); in fwevtq_handler()
567 filter_rpl(q->adap, p); in fwevtq_handler()
569 dev_err(q->adap->pdev_dev, in fwevtq_handler()
591 struct adapter *adap = cookie; in t4_nondata_intr() local
592 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A)); in t4_nondata_intr()
595 adap->swintr = 1; in t4_nondata_intr()
596 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v); in t4_nondata_intr()
598 if (adap->flags & MASTER_PF) in t4_nondata_intr()
599 t4_slow_intr_handler(adap); in t4_nondata_intr()
606 static void name_msix_vecs(struct adapter *adap) in name_msix_vecs() argument
608 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc); in name_msix_vecs()
611 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name); in name_msix_vecs()
614 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq", in name_msix_vecs()
615 adap->port[0]->name); in name_msix_vecs()
618 for_each_port(adap, j) { in name_msix_vecs()
619 struct net_device *d = adap->port[j]; in name_msix_vecs()
623 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d", in name_msix_vecs()
628 static int request_msix_queue_irqs(struct adapter *adap) in request_msix_queue_irqs() argument
630 struct sge *s = &adap->sge; in request_msix_queue_irqs()
634 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0, in request_msix_queue_irqs()
635 adap->msix_info[1].desc, &s->fw_evtq); in request_msix_queue_irqs()
640 err = request_irq(adap->msix_info[msi_index].vec, in request_msix_queue_irqs()
642 adap->msix_info[msi_index].desc, in request_msix_queue_irqs()
652 free_irq(adap->msix_info[--msi_index].vec, in request_msix_queue_irqs()
654 free_irq(adap->msix_info[1].vec, &s->fw_evtq); in request_msix_queue_irqs()
658 static void free_msix_queue_irqs(struct adapter *adap) in free_msix_queue_irqs() argument
661 struct sge *s = &adap->sge; in free_msix_queue_irqs()
663 free_irq(adap->msix_info[1].vec, &s->fw_evtq); in free_msix_queue_irqs()
665 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq); in free_msix_queue_irqs()
718 static int setup_rss(struct adapter *adap) in setup_rss() argument
722 for_each_port(adap, i) { in setup_rss()
723 const struct port_info *pi = adap2pinfo(adap, i); in setup_rss()
748 static void quiesce_rx(struct adapter *adap) in quiesce_rx() argument
752 for (i = 0; i < adap->sge.ingr_sz; i++) { in quiesce_rx()
753 struct sge_rspq *q = adap->sge.ingr_map[i]; in quiesce_rx()
761 static void disable_interrupts(struct adapter *adap) in disable_interrupts() argument
763 if (adap->flags & FULL_INIT_DONE) { in disable_interrupts()
764 t4_intr_disable(adap); in disable_interrupts()
765 if (adap->flags & USING_MSIX) { in disable_interrupts()
766 free_msix_queue_irqs(adap); in disable_interrupts()
767 free_irq(adap->msix_info[0].vec, adap); in disable_interrupts()
769 free_irq(adap->pdev->irq, adap); in disable_interrupts()
771 quiesce_rx(adap); in disable_interrupts()
778 static void enable_rx(struct adapter *adap) in enable_rx() argument
782 for (i = 0; i < adap->sge.ingr_sz; i++) { in enable_rx()
783 struct sge_rspq *q = adap->sge.ingr_map[i]; in enable_rx()
791 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A), in enable_rx()
798 static int setup_fw_sge_queues(struct adapter *adap) in setup_fw_sge_queues() argument
800 struct sge *s = &adap->sge; in setup_fw_sge_queues()
806 if (adap->flags & USING_MSIX) in setup_fw_sge_queues()
807 adap->msi_idx = 1; /* vector 0 is for non-queue interrupts */ in setup_fw_sge_queues()
809 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0, in setup_fw_sge_queues()
813 adap->msi_idx = -((int)s->intrq.abs_id + 1); in setup_fw_sge_queues()
816 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0], in setup_fw_sge_queues()
817 adap->msi_idx, NULL, fwevtq_handler, NULL, -1); in setup_fw_sge_queues()
829 static int setup_sge_queues(struct adapter *adap) in setup_sge_queues() argument
832 struct sge *s = &adap->sge; in setup_sge_queues()
836 if (is_uld(adap)) in setup_sge_queues()
839 for_each_port(adap, i) { in setup_sge_queues()
840 struct net_device *dev = adap->port[i]; in setup_sge_queues()
846 if (adap->msi_idx > 0) in setup_sge_queues()
847 adap->msi_idx++; in setup_sge_queues()
848 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, in setup_sge_queues()
849 adap->msi_idx, &q->fl, in setup_sge_queues()
852 t4_get_tp_ch_map(adap, in setup_sge_queues()
860 err = t4_sge_alloc_eth_txq(adap, t, dev, in setup_sge_queues()
868 for_each_port(adap, i) { in setup_sge_queues()
875 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i], in setup_sge_queues()
881 if (!is_t4(adap->params.chip)) { in setup_sge_queues()
882 err = t4_sge_alloc_eth_txq(adap, &s->ptptxq, adap->port[0], in setup_sge_queues()
883 netdev_get_tx_queue(adap->port[0], 0) in setup_sge_queues()
889 t4_write_reg(adap, is_t4(adap->params.chip) ? in setup_sge_queues()
892 RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) | in setup_sge_queues()
896 t4_free_sge_resources(adap); in setup_sge_queues()
990 struct adapter *adap = q->adap; in cxgb4_set_rspq_intr_params() local
999 new_idx = closest_thres(&adap->sge, cnt); in cxgb4_set_rspq_intr_params()
1006 err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, in cxgb4_set_rspq_intr_params()
1014 us = us == 0 ? 6 : closest_timer(&adap->sge, us); in cxgb4_set_rspq_intr_params()
1036 static int setup_debugfs(struct adapter *adap) in setup_debugfs() argument
1038 if (IS_ERR_OR_NULL(adap->debugfs_root)) in setup_debugfs()
1042 t4_setup_debugfs(adap); in setup_debugfs()
1208 struct adapter *adap = container_of(t, struct adapter, tids); in cxgb4_queue_tid_release() local
1210 spin_lock_bh(&adap->tid_release_lock); in cxgb4_queue_tid_release()
1211 *p = adap->tid_release_head; in cxgb4_queue_tid_release()
1213 adap->tid_release_head = (void **)((uintptr_t)p | chan); in cxgb4_queue_tid_release()
1214 if (!adap->tid_release_task_busy) { in cxgb4_queue_tid_release()
1215 adap->tid_release_task_busy = true; in cxgb4_queue_tid_release()
1216 queue_work(adap->workq, &adap->tid_release_task); in cxgb4_queue_tid_release()
1218 spin_unlock_bh(&adap->tid_release_lock); in cxgb4_queue_tid_release()
1227 struct adapter *adap; in process_tid_release_list() local
1229 adap = container_of(work, struct adapter, tid_release_task); in process_tid_release_list()
1231 spin_lock_bh(&adap->tid_release_lock); in process_tid_release_list()
1232 while (adap->tid_release_head) { in process_tid_release_list()
1233 void **p = adap->tid_release_head; in process_tid_release_list()
1237 adap->tid_release_head = *p; in process_tid_release_list()
1239 spin_unlock_bh(&adap->tid_release_lock); in process_tid_release_list()
1245 mk_tid_release(skb, chan, p - adap->tids.tid_tab); in process_tid_release_list()
1246 t4_ofld_send(adap, skb); in process_tid_release_list()
1247 spin_lock_bh(&adap->tid_release_lock); in process_tid_release_list()
1249 adap->tid_release_task_busy = false; in process_tid_release_list()
1250 spin_unlock_bh(&adap->tid_release_lock); in process_tid_release_list()
1261 struct adapter *adap = container_of(t, struct adapter, tids); in cxgb4_remove_tid() local
1284 t4_ofld_send(adap, skb); in cxgb4_remove_tid()
1295 struct adapter *adap = container_of(t, struct adapter, tids); in tid_init() local
1341 if (is_offload(adap)) { in tid_init()
1345 CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) in tid_init()
1370 struct adapter *adap; in cxgb4_create_server() local
1378 adap = netdev2adap(dev); in cxgb4_create_server()
1386 chan = rxq_to_chan(&adap->sge, queue); in cxgb4_create_server()
1390 ret = t4_mgmt_tx(adap, skb); in cxgb4_create_server()
1411 struct adapter *adap; in cxgb4_create_server6() local
1419 adap = netdev2adap(dev); in cxgb4_create_server6()
1429 chan = rxq_to_chan(&adap->sge, queue); in cxgb4_create_server6()
1433 ret = t4_mgmt_tx(adap, skb); in cxgb4_create_server6()
1442 struct adapter *adap; in cxgb4_remove_server() local
1446 adap = netdev2adap(dev); in cxgb4_remove_server()
1457 ret = t4_mgmt_tx(adap, skb); in cxgb4_remove_server()
1592 struct adapter *adap = netdev2adap(dev); in cxgb4_dbfifo_count() local
1595 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A); in cxgb4_dbfifo_count()
1596 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A); in cxgb4_dbfifo_count()
1597 if (is_t4(adap->params.chip)) { in cxgb4_dbfifo_count()
1635 struct adapter *adap = pci_get_drvdata(pdev); in cxgb4_get_tcp_stats() local
1637 spin_lock(&adap->stats_lock); in cxgb4_get_tcp_stats()
1638 t4_tp_get_tcp_stats(adap, v4, v6); in cxgb4_get_tcp_stats()
1639 spin_unlock(&adap->stats_lock); in cxgb4_get_tcp_stats()
1646 struct adapter *adap = netdev2adap(dev); in cxgb4_iscsi_init() local
1648 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask); in cxgb4_iscsi_init()
1649 t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) | in cxgb4_iscsi_init()
1657 struct adapter *adap = netdev2adap(dev); in cxgb4_flush_eq_cache() local
1659 return t4_sge_ctxt_flush(adap, adap->mbox); in cxgb4_flush_eq_cache()
1663 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx) in read_eq_indices() argument
1665 u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8; in read_eq_indices()
1669 spin_lock(&adap->win0_lock); in read_eq_indices()
1670 ret = t4_memory_rw(adap, 0, MEM_EDC0, addr, in read_eq_indices()
1673 spin_unlock(&adap->win0_lock); in read_eq_indices()
1684 struct adapter *adap = netdev2adap(dev); in cxgb4_sync_txq_pidx() local
1688 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx); in cxgb4_sync_txq_pidx()
1701 if (is_t4(adap->params.chip)) in cxgb4_sync_txq_pidx()
1706 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), in cxgb4_sync_txq_pidx()
1716 struct adapter *adap; in cxgb4_read_tpte() local
1722 adap = netdev2adap(dev); in cxgb4_read_tpte()
1724 offset = ((stag >> 8) * 32) + adap->vres.stag.start; in cxgb4_read_tpte()
1732 size = t4_read_reg(adap, MA_EDRAM0_BAR_A); in cxgb4_read_tpte()
1734 size = t4_read_reg(adap, MA_EDRAM1_BAR_A); in cxgb4_read_tpte()
1736 size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A); in cxgb4_read_tpte()
1753 } else if (is_t5(adap->params.chip)) { in cxgb4_read_tpte()
1754 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A); in cxgb4_read_tpte()
1770 spin_lock(&adap->win0_lock); in cxgb4_read_tpte()
1771 ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ); in cxgb4_read_tpte()
1772 spin_unlock(&adap->win0_lock); in cxgb4_read_tpte()
1776 dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n", in cxgb4_read_tpte()
1785 struct adapter *adap; in cxgb4_read_sge_timestamp() local
1787 adap = netdev2adap(dev); in cxgb4_read_sge_timestamp()
1788 lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A); in cxgb4_read_sge_timestamp()
1789 hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A)); in cxgb4_read_sge_timestamp()
1846 static void drain_db_fifo(struct adapter *adap, int usecs) in drain_db_fifo() argument
1851 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A); in drain_db_fifo()
1852 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A); in drain_db_fifo()
1853 if (is_t4(adap->params.chip)) { in drain_db_fifo()
1877 static void enable_txq_db(struct adapter *adap, struct sge_txq *q) in enable_txq_db() argument
1885 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), in enable_txq_db()
1893 static void disable_dbs(struct adapter *adap) in disable_dbs() argument
1897 for_each_ethrxq(&adap->sge, i) in disable_dbs()
1898 disable_txq_db(&adap->sge.ethtxq[i].q); in disable_dbs()
1899 if (is_offload(adap)) { in disable_dbs()
1901 adap->sge.uld_txq_info[CXGB4_TX_OFLD]; in disable_dbs()
1904 for_each_ofldtxq(&adap->sge, i) { in disable_dbs()
1911 for_each_port(adap, i) in disable_dbs()
1912 disable_txq_db(&adap->sge.ctrlq[i].q); in disable_dbs()
1915 static void enable_dbs(struct adapter *adap) in enable_dbs() argument
1919 for_each_ethrxq(&adap->sge, i) in enable_dbs()
1920 enable_txq_db(adap, &adap->sge.ethtxq[i].q); in enable_dbs()
1921 if (is_offload(adap)) { in enable_dbs()
1923 adap->sge.uld_txq_info[CXGB4_TX_OFLD]; in enable_dbs()
1926 for_each_ofldtxq(&adap->sge, i) { in enable_dbs()
1929 enable_txq_db(adap, &txq->q); in enable_dbs()
1933 for_each_port(adap, i) in enable_dbs()
1934 enable_txq_db(adap, &adap->sge.ctrlq[i].q); in enable_dbs()
1937 static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd) in notify_rdma_uld() argument
1941 if (adap->uld && adap->uld[type].handle) in notify_rdma_uld()
1942 adap->uld[type].control(adap->uld[type].handle, cmd); in notify_rdma_uld()
1947 struct adapter *adap; in process_db_full() local
1949 adap = container_of(work, struct adapter, db_full_task); in process_db_full()
1951 drain_db_fifo(adap, dbfifo_drain_delay); in process_db_full()
1952 enable_dbs(adap); in process_db_full()
1953 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY); in process_db_full()
1954 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) in process_db_full()
1955 t4_set_reg_field(adap, SGE_INT_ENABLE3_A, in process_db_full()
1959 t4_set_reg_field(adap, SGE_INT_ENABLE3_A, in process_db_full()
1963 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q) in sync_txq_pidx() argument
1969 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx); in sync_txq_pidx()
1981 if (is_t4(adap->params.chip)) in sync_txq_pidx()
1986 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), in sync_txq_pidx()
1994 CH_WARN(adap, "DB drop recovery failed.\n"); in sync_txq_pidx()
1997 static void recover_all_queues(struct adapter *adap) in recover_all_queues() argument
2001 for_each_ethrxq(&adap->sge, i) in recover_all_queues()
2002 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q); in recover_all_queues()
2003 if (is_offload(adap)) { in recover_all_queues()
2005 adap->sge.uld_txq_info[CXGB4_TX_OFLD]; in recover_all_queues()
2007 for_each_ofldtxq(&adap->sge, i) { in recover_all_queues()
2010 sync_txq_pidx(adap, &txq->q); in recover_all_queues()
2014 for_each_port(adap, i) in recover_all_queues()
2015 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q); in recover_all_queues()
2020 struct adapter *adap; in process_db_drop() local
2022 adap = container_of(work, struct adapter, db_drop_task); in process_db_drop()
2024 if (is_t4(adap->params.chip)) { in process_db_drop()
2025 drain_db_fifo(adap, dbfifo_drain_delay); in process_db_drop()
2026 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP); in process_db_drop()
2027 drain_db_fifo(adap, dbfifo_drain_delay); in process_db_drop()
2028 recover_all_queues(adap); in process_db_drop()
2029 drain_db_fifo(adap, dbfifo_drain_delay); in process_db_drop()
2030 enable_dbs(adap); in process_db_drop()
2031 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY); in process_db_drop()
2032 } else if (is_t5(adap->params.chip)) { in process_db_drop()
2033 u32 dropped_db = t4_read_reg(adap, 0x010ac); in process_db_drop()
2040 ret = t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS, in process_db_drop()
2043 dev_err(adap->pdev_dev, "doorbell drop recovery: " in process_db_drop()
2047 adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL); in process_db_drop()
2050 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15); in process_db_drop()
2053 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) in process_db_drop()
2054 t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0); in process_db_drop()
2057 void t4_db_full(struct adapter *adap) in t4_db_full() argument
2059 if (is_t4(adap->params.chip)) { in t4_db_full()
2060 disable_dbs(adap); in t4_db_full()
2061 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); in t4_db_full()
2062 t4_set_reg_field(adap, SGE_INT_ENABLE3_A, in t4_db_full()
2064 queue_work(adap->workq, &adap->db_full_task); in t4_db_full()
2068 void t4_db_dropped(struct adapter *adap) in t4_db_dropped() argument
2070 if (is_t4(adap->params.chip)) { in t4_db_dropped()
2071 disable_dbs(adap); in t4_db_dropped()
2072 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); in t4_db_dropped()
2074 queue_work(adap->workq, &adap->db_drop_task); in t4_db_dropped()
2085 static void detach_ulds(struct adapter *adap) in detach_ulds() argument
2090 list_del(&adap->list_node); in detach_ulds()
2093 if (adap->uld && adap->uld[i].handle) in detach_ulds()
2094 adap->uld[i].state_change(adap->uld[i].handle, in detach_ulds()
2104 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state) in notify_ulds() argument
2110 if (adap->uld && adap->uld[i].handle) in notify_ulds()
2111 adap->uld[i].state_change(adap->uld[i].handle, in notify_ulds()
2124 struct adapter *adap; in cxgb4_inet6addr_handler() local
2130 list_for_each_entry(adap, &adapter_list, list_node) { in cxgb4_inet6addr_handler()
2133 cxgb4_clip_get(adap->port[0], in cxgb4_inet6addr_handler()
2137 cxgb4_clip_release(adap->port[0], in cxgb4_inet6addr_handler()
2171 static void update_clip(const struct adapter *adap) in update_clip() argument
2180 dev = adap->port[i]; in update_clip()
2203 static int cxgb_up(struct adapter *adap) in cxgb_up() argument
2208 err = setup_sge_queues(adap); in cxgb_up()
2211 err = setup_rss(adap); in cxgb_up()
2215 if (adap->flags & USING_MSIX) { in cxgb_up()
2216 name_msix_vecs(adap); in cxgb_up()
2217 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0, in cxgb_up()
2218 adap->msix_info[0].desc, adap); in cxgb_up()
2221 err = request_msix_queue_irqs(adap); in cxgb_up()
2223 free_irq(adap->msix_info[0].vec, adap); in cxgb_up()
2227 err = request_irq(adap->pdev->irq, t4_intr_handler(adap), in cxgb_up()
2228 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED, in cxgb_up()
2229 adap->port[0]->name, adap); in cxgb_up()
2234 enable_rx(adap); in cxgb_up()
2235 t4_sge_start(adap); in cxgb_up()
2236 t4_intr_enable(adap); in cxgb_up()
2237 adap->flags |= FULL_INIT_DONE; in cxgb_up()
2240 notify_ulds(adap, CXGB4_STATE_UP); in cxgb_up()
2242 update_clip(adap); in cxgb_up()
2245 INIT_LIST_HEAD(&adap->mac_hlist); in cxgb_up()
2249 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err); in cxgb_up()
2251 t4_free_sge_resources(adap); in cxgb_up()
2316 struct adapter *adap; in cxgb4_create_server_filter() local
2320 adap = netdev2adap(dev); in cxgb4_create_server_filter()
2323 stid -= adap->tids.sftid_base; in cxgb4_create_server_filter()
2324 stid += adap->tids.nftids; in cxgb4_create_server_filter()
2328 f = &adap->tids.ftid_tab[stid]; in cxgb4_create_server_filter()
2337 clear_filter(adap, f); in cxgb4_create_server_filter()
2349 if (adap->params.tp.vlan_pri_map & PORT_F) { in cxgb4_create_server_filter()
2355 if (adap->params.tp.vlan_pri_map & PROTOCOL_F) { in cxgb4_create_server_filter()
2369 f->tid = stid + adap->tids.ftid_base; in cxgb4_create_server_filter()
2370 ret = set_filter_wr(adap, stid); in cxgb4_create_server_filter()
2372 clear_filter(adap, f); in cxgb4_create_server_filter()
2384 struct adapter *adap; in cxgb4_remove_server_filter() local
2386 adap = netdev2adap(dev); in cxgb4_remove_server_filter()
2389 stid -= adap->tids.sftid_base; in cxgb4_remove_server_filter()
2390 stid += adap->tids.nftids; in cxgb4_remove_server_filter()
2392 f = &adap->tids.ftid_tab[stid]; in cxgb4_remove_server_filter()
2396 return delete_filter(adap, stid); in cxgb4_remove_server_filter()
2594 static void fill_vf_station_mac_addr(struct adapter *adap) in fill_vf_station_mac_addr() argument
2602 err = t4_get_raw_vpd_params(adap, &adap->params.vpd); in fill_vf_station_mac_addr()
2604 na = adap->params.vpd.na; in fill_vf_station_mac_addr()
2619 for (i = 0; i < adap->num_vfs; i++) { in fill_vf_station_mac_addr()
2620 macaddr[5] = adap->pf * 16 + i; in fill_vf_station_mac_addr()
2621 ether_addr_copy(adap->vfinfo[i].vf_mac_addr, macaddr); in fill_vf_station_mac_addr()
2629 struct adapter *adap = pi->adapter; in cxgb_set_vf_mac() local
2642 ret = t4_set_vf_mac_acl(adap, vf + 1, 1, mac); in cxgb_set_vf_mac()
2644 ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, mac); in cxgb_set_vf_mac()
2652 struct adapter *adap = pi->adapter; in cxgb_get_vf_config() local
2654 if (vf >= adap->num_vfs) in cxgb_get_vf_config()
2657 ivi->max_tx_rate = adap->vfinfo[vf].tx_rate; in cxgb_get_vf_config()
2659 ether_addr_copy(ivi->mac, adap->vfinfo[vf].vf_mac_addr); in cxgb_get_vf_config()
2679 struct adapter *adap = pi->adapter; in cxgb_set_vf_rate() local
2686 if (vf >= adap->num_vfs) in cxgb_set_vf_rate()
2690 dev_err(adap->pdev_dev, in cxgb_set_vf_rate()
2698 dev_err(adap->pdev_dev, in cxgb_set_vf_rate()
2704 dev_err(adap->pdev_dev, "Link down for VF %d\n", vf); in cxgb_set_vf_rate()
2709 dev_err(adap->pdev_dev, in cxgb_set_vf_rate()
2721 ret = t4_sched_params(adap, SCHED_CLASS_TYPE_PACKET, in cxgb_set_vf_rate()
2729 dev_err(adap->pdev_dev, "Err %d for Traffic Class config\n", in cxgb_set_vf_rate()
2733 dev_info(adap->pdev_dev, in cxgb_set_vf_rate()
2741 ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1, &fw_pfvf, in cxgb_set_vf_rate()
2744 dev_err(adap->pdev_dev, in cxgb_set_vf_rate()
2749 dev_info(adap->pdev_dev, "PF %d VF %d is bound to Class %d\n", in cxgb_set_vf_rate()
2750 adap->pf, vf, class_id); in cxgb_set_vf_rate()
2751 adap->vfinfo[vf].tx_rate = max_tx_rate; in cxgb_set_vf_rate()
2780 struct adapter *adap = pi->adapter; in cxgb_netpoll() local
2782 if (adap->flags & USING_MSIX) { in cxgb_netpoll()
2784 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset]; in cxgb_netpoll()
2789 t4_intr_handler(adap)(0, adap); in cxgb_netpoll()
2796 struct adapter *adap = pi->adapter; in cxgb_set_tx_maxrate() local
2809 if (!(adap->flags & FULL_INIT_DONE)) { in cxgb_set_tx_maxrate()
2810 dev_err(adap->pdev_dev, in cxgb_set_tx_maxrate()
2821 dev_err(adap->pdev_dev, in cxgb_set_tx_maxrate()
2834 dev_err(adap->pdev_dev, in cxgb_set_tx_maxrate()
2869 dev_err(adap->pdev_dev, in cxgb_set_tx_maxrate()
2896 struct adapter *adap = netdev2adap(dev); in cxgb_setup_tc() local
2898 if (!(adap->flags & FULL_INIT_DONE)) { in cxgb_setup_tc()
2899 dev_err(adap->pdev_dev, in cxgb_setup_tc()
2972 void t4_fatal_err(struct adapter *adap) in t4_fatal_err() argument
2976 if (pci_channel_offline(adap->pdev)) in t4_fatal_err()
2982 t4_shutdown_adapter(adap); in t4_fatal_err()
2983 for_each_port(adap, port) { in t4_fatal_err()
2984 struct net_device *dev = adap->port[port]; in t4_fatal_err()
2995 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n"); in t4_fatal_err()
2998 static void setup_memwin(struct adapter *adap) in setup_memwin() argument
3000 u32 nic_win_base = t4_get_util_window(adap); in setup_memwin()
3002 t4_setup_memwin(adap, nic_win_base, MEMWIN_NIC); in setup_memwin()
3005 static void setup_memwin_rdma(struct adapter *adap) in setup_memwin_rdma() argument
3007 if (adap->vres.ocq.size) { in setup_memwin_rdma()
3011 start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2); in setup_memwin_rdma()
3013 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres); in setup_memwin_rdma()
3014 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10; in setup_memwin_rdma()
3015 t4_write_reg(adap, in setup_memwin_rdma()
3018 t4_write_reg(adap, in setup_memwin_rdma()
3020 adap->vres.ocq.start); in setup_memwin_rdma()
3021 t4_read_reg(adap, in setup_memwin_rdma()
3026 static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c) in adap_init1() argument
3036 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), c); in adap_init1()
3042 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL); in adap_init1()
3046 ret = t4_config_glbl_rss(adap, adap->pf, in adap_init1()
3053 ret = t4_cfg_pfvf(adap, adap->mbox, adap->pf, 0, adap->sge.egr_sz, 64, in adap_init1()
3059 t4_sge_init(adap); in adap_init1()
3062 t4_write_reg(adap, TP_SHIFT_CNT_A, 0x64f8849); in adap_init1()
3063 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12)); in adap_init1()
3064 t4_write_reg(adap, TP_PIO_ADDR_A, TP_INGRESS_CONFIG_A); in adap_init1()
3065 v = t4_read_reg(adap, TP_PIO_DATA_A); in adap_init1()
3066 t4_write_reg(adap, TP_PIO_DATA_A, v & ~CSUM_HAS_PSEUDO_HDR_F); in adap_init1()
3069 adap->params.tp.tx_modq_map = 0xE4; in adap_init1()
3070 t4_write_reg(adap, TP_TX_MOD_QUEUE_REQ_MAP_A, in adap_init1()
3071 TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map)); in adap_init1()
3075 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, in adap_init1()
3077 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, in adap_init1()
3079 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, in adap_init1()
3083 if (is_offload(adap)) { in adap_init1()
3084 t4_write_reg(adap, TP_TX_MOD_QUEUE_WEIGHT0_A, in adap_init1()
3089 t4_write_reg(adap, TP_TX_MOD_CHANNEL_WEIGHT_A, in adap_init1()
3097 return t4_early_init(adap, adap->pf); in adap_init1()
3221 static int adap_init0_phy(struct adapter *adap) in adap_init0_phy() argument
3229 phy_info = find_phy_info(adap->pdev->device); in adap_init0_phy()
3231 dev_warn(adap->pdev_dev, in adap_init0_phy()
3242 adap->pdev_dev); in adap_init0_phy()
3250 dev_err(adap->pdev_dev, "unable to find PHY Firmware image " in adap_init0_phy()
3256 t4_phy_fw_ver(adap, &cur_phy_fw_ver); in adap_init0_phy()
3257 dev_warn(adap->pdev_dev, "continuing with, on-adapter " in adap_init0_phy()
3267 ret = t4_load_phy_fw(adap, MEMWIN_NIC, &adap->win0_lock, in adap_init0_phy()
3271 dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n", in adap_init0_phy()
3279 dev_info(adap->pdev_dev, "Successfully transferred PHY " in adap_init0_phy()
3568 static int adap_init0(struct adapter *adap) in adap_init0() argument
3580 ret = t4_init_devlog_params(adap); in adap_init0()
3585 ret = t4_fw_hello(adap, adap->mbox, adap->mbox, in adap_init0()
3588 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n", in adap_init0()
3592 if (ret == adap->mbox) in adap_init0()
3593 adap->flags |= MASTER_PF; in adap_init0()
3603 t4_get_version_info(adap); in adap_init0()
3604 ret = t4_check_fw_version(adap); in adap_init0()
3608 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) { in adap_init0()
3618 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip)); in adap_init0()
3620 dev_err(adap->pdev_dev, in adap_init0()
3622 CHELSIO_CHIP_VERSION(adap->params.chip)); in adap_init0()
3633 adap->pdev_dev); in adap_init0()
3635 dev_err(adap->pdev_dev, in adap_init0()
3644 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw, in adap_init0()
3662 ret = t4_get_vpd_params(adap, &adap->params.vpd); in adap_init0()
3674 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec); in adap_init0()
3678 adap->params.nports = hweight32(port_vec); in adap_init0()
3679 adap->params.portvec = port_vec; in adap_init0()
3685 dev_info(adap->pdev_dev, "Coming up as %s: "\ in adap_init0()
3687 adap->flags & MASTER_PF ? "MASTER" : "SLAVE"); in adap_init0()
3689 dev_info(adap->pdev_dev, "Coming up as MASTER: "\ in adap_init0()
3697 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, in adap_init0()
3704 dev_err(adap->pdev_dev, "firmware doesn't support " in adap_init0()
3713 ret = adap_init0_config(adap, reset); in adap_init0()
3715 dev_err(adap->pdev_dev, "no Configuration File " in adap_init0()
3720 dev_err(adap->pdev_dev, "could not initialize " in adap_init0()
3730 ret = t4_sge_init(adap); in adap_init0()
3734 if (is_bypass_device(adap->pdev->device)) in adap_init0()
3735 adap->params.bypass = 1; in adap_init0()
3756 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val); in adap_init0()
3759 adap->sge.egr_start = val[0]; in adap_init0()
3760 adap->l2t_start = val[1]; in adap_init0()
3761 adap->l2t_end = val[2]; in adap_init0()
3762 adap->tids.ftid_base = val[3]; in adap_init0()
3763 adap->tids.nftids = val[4] - val[3] + 1; in adap_init0()
3764 adap->sge.ingr_start = val[5]; in adap_init0()
3774 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val); in adap_init0()
3777 adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1; in adap_init0()
3778 adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1; in adap_init0()
3780 adap->sge.egr_map = kcalloc(adap->sge.egr_sz, in adap_init0()
3781 sizeof(*adap->sge.egr_map), GFP_KERNEL); in adap_init0()
3782 if (!adap->sge.egr_map) { in adap_init0()
3787 adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz, in adap_init0()
3788 sizeof(*adap->sge.ingr_map), GFP_KERNEL); in adap_init0()
3789 if (!adap->sge.ingr_map) { in adap_init0()
3797 adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), in adap_init0()
3799 if (!adap->sge.starving_fl) { in adap_init0()
3804 adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), in adap_init0()
3806 if (!adap->sge.txq_maperr) { in adap_init0()
3812 adap->sge.blocked_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), in adap_init0()
3814 if (!adap->sge.blocked_fl) { in adap_init0()
3822 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val); in adap_init0()
3825 adap->clipt_start = val[0]; in adap_init0()
3826 adap->clipt_end = val[1]; in adap_init0()
3832 adap->params.nsched_cls = is_t4(adap->params.chip) ? 15 : 16; in adap_init0()
3837 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val); in adap_init0()
3842 adap->flags |= FW_OFLD_CONN; in adap_init0()
3843 adap->tids.aftid_base = val[0]; in adap_init0()
3844 adap->tids.aftid_end = val[1]; in adap_init0()
3854 (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val); in adap_init0()
3862 if (is_t4(adap->params.chip)) { in adap_init0()
3863 adap->params.ulptx_memwrite_dsgl = false; in adap_init0()
3866 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
3868 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0); in adap_init0()
3873 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
3875 adap->params.fr_nsmr_tpte_wr_support = (ret == 0 && val[0] != 0); in adap_init0()
3885 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd), in adap_init0()
3898 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, in adap_init0()
3902 adap->tids.ntids = val[0]; in adap_init0()
3903 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS); in adap_init0()
3904 adap->tids.stid_base = val[1]; in adap_init0()
3905 adap->tids.nstids = val[2] - val[1] + 1; in adap_init0()
3915 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) { in adap_init0()
3916 adap->tids.sftid_base = adap->tids.ftid_base + in adap_init0()
3917 DIV_ROUND_UP(adap->tids.nftids, 3); in adap_init0()
3918 adap->tids.nsftids = adap->tids.nftids - in adap_init0()
3919 DIV_ROUND_UP(adap->tids.nftids, 3); in adap_init0()
3920 adap->tids.nftids = adap->tids.sftid_base - in adap_init0()
3921 adap->tids.ftid_base; in adap_init0()
3923 adap->vres.ddp.start = val[3]; in adap_init0()
3924 adap->vres.ddp.size = val[4] - val[3] + 1; in adap_init0()
3925 adap->params.ofldq_wr_cred = val[5]; in adap_init0()
3927 adap->params.offload = 1; in adap_init0()
3928 adap->num_ofld_uld += 1; in adap_init0()
3937 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, in adap_init0()
3941 adap->vres.stag.start = val[0]; in adap_init0()
3942 adap->vres.stag.size = val[1] - val[0] + 1; in adap_init0()
3943 adap->vres.rq.start = val[2]; in adap_init0()
3944 adap->vres.rq.size = val[3] - val[2] + 1; in adap_init0()
3945 adap->vres.pbl.start = val[4]; in adap_init0()
3946 adap->vres.pbl.size = val[5] - val[4] + 1; in adap_init0()
3954 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, in adap_init0()
3958 adap->vres.qp.start = val[0]; in adap_init0()
3959 adap->vres.qp.size = val[1] - val[0] + 1; in adap_init0()
3960 adap->vres.cq.start = val[2]; in adap_init0()
3961 adap->vres.cq.size = val[3] - val[2] + 1; in adap_init0()
3962 adap->vres.ocq.start = val[4]; in adap_init0()
3963 adap->vres.ocq.size = val[5] - val[4] + 1; in adap_init0()
3967 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, in adap_init0()
3970 adap->params.max_ordird_qp = 8; in adap_init0()
3971 adap->params.max_ird_adapter = 32 * adap->tids.ntids; in adap_init0()
3974 adap->params.max_ordird_qp = val[0]; in adap_init0()
3975 adap->params.max_ird_adapter = val[1]; in adap_init0()
3977 dev_info(adap->pdev_dev, in adap_init0()
3979 adap->params.max_ordird_qp, in adap_init0()
3980 adap->params.max_ird_adapter); in adap_init0()
3981 adap->num_ofld_uld += 2; in adap_init0()
3986 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, in adap_init0()
3990 adap->vres.iscsi.start = val[0]; in adap_init0()
3991 adap->vres.iscsi.size = val[1] - val[0] + 1; in adap_init0()
3993 adap->num_ofld_uld += 2; in adap_init0()
3998 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, in adap_init0()
4004 adap->vres.ncrypto_fc = val[0]; in adap_init0()
4006 adap->params.crypto |= ULP_CRYPTO_LOOKASIDE; in adap_init0()
4007 adap->num_uld += 1; in adap_init0()
4017 t4_read_mtu_tbl(adap, adap->params.mtus, NULL); in adap_init0()
4039 if (adap->params.mtus[i] == 1492) { in adap_init0()
4040 adap->params.mtus[i] = 1488; in adap_init0()
4044 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, in adap_init0()
4045 adap->params.b_wnd); in adap_init0()
4047 t4_init_sge_params(adap); in adap_init0()
4048 adap->flags |= FW_OK; in adap_init0()
4049 t4_init_tp_params(adap); in adap_init0()
4058 kfree(adap->sge.egr_map); in adap_init0()
4059 kfree(adap->sge.ingr_map); in adap_init0()
4060 kfree(adap->sge.starving_fl); in adap_init0()
4061 kfree(adap->sge.txq_maperr); in adap_init0()
4063 kfree(adap->sge.blocked_fl); in adap_init0()
4066 t4_fw_bye(adap, adap->mbox); in adap_init0()
4076 struct adapter *adap = pci_get_drvdata(pdev); in eeh_err_detected() local
4078 if (!adap) in eeh_err_detected()
4082 adap->flags &= ~FW_OK; in eeh_err_detected()
4083 notify_ulds(adap, CXGB4_STATE_START_RECOVERY); in eeh_err_detected()
4084 spin_lock(&adap->stats_lock); in eeh_err_detected()
4085 for_each_port(adap, i) { in eeh_err_detected()
4086 struct net_device *dev = adap->port[i]; in eeh_err_detected()
4092 spin_unlock(&adap->stats_lock); in eeh_err_detected()
4093 disable_interrupts(adap); in eeh_err_detected()
4094 if (adap->flags & FULL_INIT_DONE) in eeh_err_detected()
4095 cxgb_down(adap); in eeh_err_detected()
4097 if ((adap->flags & DEV_ENABLED)) { in eeh_err_detected()
4099 adap->flags &= ~DEV_ENABLED; in eeh_err_detected()
4109 struct adapter *adap = pci_get_drvdata(pdev); in eeh_slot_reset() local
4111 if (!adap) { in eeh_slot_reset()
4117 if (!(adap->flags & DEV_ENABLED)) { in eeh_slot_reset()
4123 adap->flags |= DEV_ENABLED; in eeh_slot_reset()
4131 if (t4_wait_dev_ready(adap->regs) < 0) in eeh_slot_reset()
4133 if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0) in eeh_slot_reset()
4135 adap->flags |= FW_OK; in eeh_slot_reset()
4136 if (adap_init1(adap, &c)) in eeh_slot_reset()
4139 for_each_port(adap, i) { in eeh_slot_reset()
4140 struct port_info *p = adap2pinfo(adap, i); in eeh_slot_reset()
4142 ret = t4_alloc_vi(adap, adap->mbox, p->tx_chan, adap->pf, 0, 1, in eeh_slot_reset()
4150 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, in eeh_slot_reset()
4151 adap->params.b_wnd); in eeh_slot_reset()
4152 setup_memwin(adap); in eeh_slot_reset()
4153 if (cxgb_up(adap)) in eeh_slot_reset()
4161 struct adapter *adap = pci_get_drvdata(pdev); in eeh_resume() local
4163 if (!adap) in eeh_resume()
4167 for_each_port(adap, i) { in eeh_resume()
4168 struct net_device *dev = adap->port[i]; in eeh_resume()
4205 static void cfg_queues(struct adapter *adap) in cfg_queues() argument
4207 struct sge *s = &adap->sge; in cfg_queues()
4215 if (is_kdump_kernel() || (is_uld(adap) && t4_uld_mem_alloc(adap))) { in cfg_queues()
4216 adap->params.offload = 0; in cfg_queues()
4217 adap->params.crypto = 0; in cfg_queues()
4220 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg); in cfg_queues()
4226 if (adap->params.nports * 8 > MAX_ETH_QSETS) { in cfg_queues()
4227 dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n", in cfg_queues()
4228 MAX_ETH_QSETS, adap->params.nports * 8); in cfg_queues()
4232 for_each_port(adap, i) { in cfg_queues()
4233 struct port_info *pi = adap2pinfo(adap, i); in cfg_queues()
4245 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g; in cfg_queues()
4252 for_each_port(adap, i) { in cfg_queues()
4253 struct port_info *pi = adap2pinfo(adap, i); in cfg_queues()
4264 if (is_uld(adap)) { in cfg_queues()
4272 s->ofldqsets = roundup(i, adap->params.nports); in cfg_queues()
4274 s->ofldqsets = adap->params.nports; in cfg_queues()
4281 init_rspq(adap, &r->rspq, 5, 10, 1024, 64); in cfg_queues()
4291 if (!is_t4(adap->params.chip)) in cfg_queues()
4294 init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64); in cfg_queues()
4295 init_rspq(adap, &s->intrq, 0, 1, 512, 64); in cfg_queues()
4302 static void reduce_ethqs(struct adapter *adap, int n) in reduce_ethqs() argument
4307 while (n < adap->sge.ethqsets) in reduce_ethqs()
4308 for_each_port(adap, i) { in reduce_ethqs()
4309 pi = adap2pinfo(adap, i); in reduce_ethqs()
4312 adap->sge.ethqsets--; in reduce_ethqs()
4313 if (adap->sge.ethqsets <= n) in reduce_ethqs()
4319 for_each_port(adap, i) { in reduce_ethqs()
4320 pi = adap2pinfo(adap, i); in reduce_ethqs()
4326 static int get_msix_info(struct adapter *adap) in get_msix_info() argument
4331 if (is_offload(adap)) in get_msix_info()
4332 max_ingq += MAX_OFLD_QSETS * adap->num_ofld_uld; in get_msix_info()
4333 if (is_pci_uld(adap)) in get_msix_info()
4334 max_ingq += MAX_OFLD_QSETS * adap->num_uld; in get_msix_info()
4343 adap->msix_bmap_ulds.msix_bmap = kcalloc(BITS_TO_LONGS(max_ingq), in get_msix_info()
4345 if (!adap->msix_bmap_ulds.msix_bmap) { in get_msix_info()
4349 spin_lock_init(&adap->msix_bmap_ulds.lock); in get_msix_info()
4350 adap->msix_info_ulds = msix_info; in get_msix_info()
4355 static void free_msix_info(struct adapter *adap) in free_msix_info() argument
4357 if (!(adap->num_uld && adap->num_ofld_uld)) in free_msix_info()
4360 kfree(adap->msix_info_ulds); in free_msix_info()
4361 kfree(adap->msix_bmap_ulds.msix_bmap); in free_msix_info()
4367 static int enable_msix(struct adapter *adap) in enable_msix() argument
4371 struct sge *s = &adap->sge; in enable_msix()
4372 unsigned int nchan = adap->params.nports; in enable_msix()
4376 if (is_pci_uld(adap)) in enable_msix()
4377 max_ingq += (MAX_OFLD_QSETS * adap->num_uld); in enable_msix()
4378 if (is_offload(adap)) in enable_msix()
4379 max_ingq += (MAX_OFLD_QSETS * adap->num_ofld_uld); in enable_msix()
4386 if (get_msix_info(adap)) { in enable_msix()
4387 adap->params.offload = 0; in enable_msix()
4388 adap->params.crypto = 0; in enable_msix()
4395 if (is_offload(adap)) { in enable_msix()
4396 want += adap->num_ofld_uld * s->ofldqsets; in enable_msix()
4397 ofld_need = adap->num_ofld_uld * nchan; in enable_msix()
4399 if (is_pci_uld(adap)) { in enable_msix()
4400 want += adap->num_uld * s->ofldqsets; in enable_msix()
4401 uld_need = adap->num_uld * nchan; in enable_msix()
4407 need = 8 * adap->params.nports + EXTRA_VECS + ofld_need + uld_need; in enable_msix()
4409 need = adap->params.nports + EXTRA_VECS + ofld_need + uld_need; in enable_msix()
4411 allocated = pci_enable_msix_range(adap->pdev, entries, need, want); in enable_msix()
4413 dev_info(adap->pdev_dev, "not enough MSI-X vectors left," in enable_msix()
4427 reduce_ethqs(adap, i); in enable_msix()
4429 if (is_uld(adap)) { in enable_msix()
4437 adap->msix_info[i].vec = entries[i].vector; in enable_msix()
4438 if (is_uld(adap)) { in enable_msix()
4440 adap->msix_info_ulds[j].vec = entries[i].vector; in enable_msix()
4441 adap->msix_info_ulds[j].idx = i; in enable_msix()
4443 adap->msix_bmap_ulds.mapsize = j; in enable_msix()
4445 dev_info(adap->pdev_dev, "%d MSI-X vectors allocated, " in enable_msix()
4455 static int init_rss(struct adapter *adap) in init_rss() argument
4460 err = t4_init_rss_mode(adap, adap->mbox); in init_rss()
4464 for_each_port(adap, i) { in init_rss()
4465 struct port_info *pi = adap2pinfo(adap, i); in init_rss()
4474 static int cxgb4_get_pcie_dev_link_caps(struct adapter *adap, in cxgb4_get_pcie_dev_link_caps() argument
4486 err1 = pcie_capability_read_dword(adap->pdev, PCI_EXP_LNKCAP, in cxgb4_get_pcie_dev_link_caps()
4488 err2 = pcie_capability_read_dword(adap->pdev, PCI_EXP_LNKCAP2, in cxgb4_get_pcie_dev_link_caps()
4513 static void cxgb4_check_pcie_caps(struct adapter *adap) in cxgb4_check_pcie_caps() argument
4524 if (cxgb4_get_pcie_dev_link_caps(adap, &speed_cap, &width_cap)) { in cxgb4_check_pcie_caps()
4525 dev_warn(adap->pdev_dev, in cxgb4_check_pcie_caps()
4530 if (pcie_get_minimum_link(adap->pdev, &speed, &width) || in cxgb4_check_pcie_caps()
4532 dev_warn(adap->pdev_dev, in cxgb4_check_pcie_caps()
4537 dev_info(adap->pdev_dev, "PCIe link speed is %s, device supports %s\n", in cxgb4_check_pcie_caps()
4539 dev_info(adap->pdev_dev, "PCIe link width is x%d, device supports x%d\n", in cxgb4_check_pcie_caps()
4542 dev_info(adap->pdev_dev, in cxgb4_check_pcie_caps()
4567 const struct adapter *adap = pi->adapter; in print_port_info() local
4569 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB) in print_port_info()
4571 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB) in print_port_info()
4573 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB) in print_port_info()
4599 dev->name, adap->params.vpd.id, adap->name, buf); in print_port_info()
4684 struct adapter *adap = pci_get_drvdata(pdev); in config_mgmt_dev() local
4690 snprintf(name, IFNAMSIZ, "mgmtpf%d%d", adap->adap_idx, adap->pf); in config_mgmt_dev()
4697 pi->adapter = adap; in config_mgmt_dev()
4698 pi->tx_chan = adap->pf % adap->params.nports; in config_mgmt_dev()
4701 adap->port[0] = netdev; in config_mgmt_dev()
4704 err = register_netdev(adap->port[0]); in config_mgmt_dev()
4707 free_netdev(adap->port[0]); in config_mgmt_dev()
4708 adap->port[0] = NULL; in config_mgmt_dev()
4716 struct adapter *adap = pci_get_drvdata(pdev); in cxgb4_iov_configure() local
4721 pcie_fw = readl(adap->regs + PCIE_FW_A); in cxgb4_iov_configure()
4748 if (adap->port[0]) { in cxgb4_iov_configure()
4749 unregister_netdev(adap->port[0]); in cxgb4_iov_configure()
4750 adap->port[0] = NULL; in cxgb4_iov_configure()
4753 kfree(adap->vfinfo); in cxgb4_iov_configure()
4754 adap->vfinfo = NULL; in cxgb4_iov_configure()
4755 adap->num_vfs = 0; in cxgb4_iov_configure()
4764 adap->num_vfs = num_vfs; in cxgb4_iov_configure()
4770 adap->vfinfo = kcalloc(adap->num_vfs, in cxgb4_iov_configure()
4772 if (adap->vfinfo) in cxgb4_iov_configure()
4773 fill_vf_station_mac_addr(adap); in cxgb4_iov_configure()