Lines Matching refs:bp
34 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
35 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
36 static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
39 static void bnx2x_add_all_napi_cnic(struct bnx2x *bp) in bnx2x_add_all_napi_cnic() argument
44 for_each_rx_queue_cnic(bp, i) { in bnx2x_add_all_napi_cnic()
45 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), in bnx2x_add_all_napi_cnic()
47 napi_hash_add(&bnx2x_fp(bp, i, napi)); in bnx2x_add_all_napi_cnic()
51 static void bnx2x_add_all_napi(struct bnx2x *bp) in bnx2x_add_all_napi() argument
56 for_each_eth_queue(bp, i) { in bnx2x_add_all_napi()
57 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), in bnx2x_add_all_napi()
59 napi_hash_add(&bnx2x_fp(bp, i, napi)); in bnx2x_add_all_napi()
63 static int bnx2x_calc_num_queues(struct bnx2x *bp) in bnx2x_calc_num_queues() argument
71 nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp)); in bnx2x_calc_num_queues()
88 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to) in bnx2x_move_fp() argument
90 struct bnx2x_fastpath *from_fp = &bp->fp[from]; in bnx2x_move_fp()
91 struct bnx2x_fastpath *to_fp = &bp->fp[to]; in bnx2x_move_fp()
92 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from]; in bnx2x_move_fp()
93 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to]; in bnx2x_move_fp()
94 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from]; in bnx2x_move_fp()
95 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to]; in bnx2x_move_fp()
123 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos; in bnx2x_move_fp()
124 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) * in bnx2x_move_fp()
125 (bp)->max_cos; in bnx2x_move_fp()
126 if (from == FCOE_IDX(bp)) { in bnx2x_move_fp()
131 memcpy(&bp->bnx2x_txq[new_txdata_index], in bnx2x_move_fp()
132 &bp->bnx2x_txq[old_txdata_index], in bnx2x_move_fp()
134 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index]; in bnx2x_move_fp()
145 void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len) in bnx2x_fill_fw_str() argument
147 if (IS_PF(bp)) { in bnx2x_fill_fw_str()
151 bnx2x_get_ext_phy_fw_version(&bp->link_params, in bnx2x_fill_fw_str()
153 strlcpy(buf, bp->fw_ver, buf_len); in bnx2x_fill_fw_str()
154 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver), in bnx2x_fill_fw_str()
156 (bp->common.bc_ver & 0xff0000) >> 16, in bnx2x_fill_fw_str()
157 (bp->common.bc_ver & 0xff00) >> 8, in bnx2x_fill_fw_str()
158 (bp->common.bc_ver & 0xff), in bnx2x_fill_fw_str()
161 bnx2x_vf_fill_fw_str(bp, buf, buf_len); in bnx2x_fill_fw_str()
171 static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta) in bnx2x_shrink_eth_fp() argument
173 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp); in bnx2x_shrink_eth_fp()
178 for (cos = 1; cos < bp->max_cos; cos++) { in bnx2x_shrink_eth_fp()
180 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_shrink_eth_fp()
183 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos], in bnx2x_shrink_eth_fp()
185 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx]; in bnx2x_shrink_eth_fp()
195 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, in bnx2x_free_tx_pkt() argument
246 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), in bnx2x_free_tx_pkt()
254 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd), in bnx2x_free_tx_pkt()
274 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata) in bnx2x_tx_int() argument
281 if (unlikely(bp->panic)) in bnx2x_tx_int()
285 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index); in bnx2x_tx_int()
298 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons, in bnx2x_tx_int()
334 (bp->state == BNX2X_STATE_OPEN) && in bnx2x_tx_int()
335 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)) in bnx2x_tx_int()
356 struct bnx2x *bp = fp->bp; in bnx2x_update_sge_prod() local
408 static u32 bnx2x_get_rxhash(const struct bnx2x *bp, in bnx2x_get_rxhash() argument
413 if ((bp->dev->features & NETIF_F_RXHASH) && in bnx2x_get_rxhash()
432 struct bnx2x *bp = fp->bp; in bnx2x_tpa_start() local
445 mapping = dma_map_single(&bp->pdev->dev, in bnx2x_tpa_start()
454 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { in bnx2x_tpa_start()
478 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type); in bnx2x_tpa_start()
544 static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp, in bnx2x_alloc_rx_sge() argument
557 mapping = dma_map_page(&bp->pdev->dev, page, 0, in bnx2x_alloc_rx_sge()
559 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { in bnx2x_alloc_rx_sge()
574 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, in bnx2x_fill_frag_skb() argument
626 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC); in bnx2x_fill_frag_skb()
628 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; in bnx2x_fill_frag_skb()
633 dma_unmap_page(&bp->pdev->dev, in bnx2x_fill_frag_skb()
684 static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb) in bnx2x_gro_ip_csum() argument
696 static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb) in bnx2x_gro_ipv6_csum() argument
708 static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb, in bnx2x_gro_csum() argument
712 gro_func(bp, skb); in bnx2x_gro_csum()
717 static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp, in bnx2x_gro_receive() argument
724 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum); in bnx2x_gro_receive()
727 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum); in bnx2x_gro_receive()
739 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, in bnx2x_tpa_stop() argument
765 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), in bnx2x_tpa_stop()
784 skb->protocol = eth_type_trans(skb, bp->dev); in bnx2x_tpa_stop()
787 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages, in bnx2x_tpa_stop()
791 bnx2x_gro_receive(bp, fp, skb); in bnx2x_tpa_stop()
809 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++; in bnx2x_tpa_stop()
812 static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp, in bnx2x_alloc_rx_data() argument
824 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD, in bnx2x_alloc_rx_data()
827 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { in bnx2x_alloc_rx_data()
868 struct bnx2x *bp = fp->bp; in bnx2x_rx_int() local
876 if (unlikely(bp->panic)) in bnx2x_rx_int()
906 if (unlikely(bp->panic)) in bnx2x_rx_int()
985 bnx2x_tpa_stop(bp, fp, tpa_info, pages, in bnx2x_rx_int()
988 if (bp->panic) in bnx2x_rx_int()
998 dma_sync_single_for_cpu(&bp->pdev->dev, in bnx2x_rx_int()
1009 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++; in bnx2x_rx_int()
1016 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) && in bnx2x_rx_int()
1018 skb = netdev_alloc_skb_ip_align(bp->dev, len); in bnx2x_rx_int()
1022 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; in bnx2x_rx_int()
1028 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod, in bnx2x_rx_int()
1030 dma_unmap_single(&bp->pdev->dev, in bnx2x_rx_int()
1037 bnx2x_fp_qstats(bp, fp)-> in bnx2x_rx_int()
1045 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; in bnx2x_rx_int()
1053 skb->protocol = eth_type_trans(skb, bp->dev); in bnx2x_rx_int()
1056 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type); in bnx2x_rx_int()
1061 if (bp->dev->features & NETIF_F_RXCSUM) in bnx2x_rx_int()
1063 bnx2x_fp_qstats(bp, fp)); in bnx2x_rx_int()
1070 bnx2x_set_rx_ts(bp, skb); in bnx2x_rx_int()
1111 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod, in bnx2x_rx_int()
1123 struct bnx2x *bp = fp->bp; in bnx2x_msix_fp_int() local
1130 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); in bnx2x_msix_fp_int()
1133 if (unlikely(bp->panic)) in bnx2x_msix_fp_int()
1142 napi_schedule(&bnx2x_fp(bp, fp->index, napi)); in bnx2x_msix_fp_int()
1148 void bnx2x_acquire_phy_lock(struct bnx2x *bp) in bnx2x_acquire_phy_lock() argument
1150 mutex_lock(&bp->port.phy_mutex); in bnx2x_acquire_phy_lock()
1152 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO); in bnx2x_acquire_phy_lock()
1155 void bnx2x_release_phy_lock(struct bnx2x *bp) in bnx2x_release_phy_lock() argument
1157 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO); in bnx2x_release_phy_lock()
1159 mutex_unlock(&bp->port.phy_mutex); in bnx2x_release_phy_lock()
1163 u16 bnx2x_get_mf_speed(struct bnx2x *bp) in bnx2x_get_mf_speed() argument
1165 u16 line_speed = bp->link_vars.line_speed; in bnx2x_get_mf_speed()
1166 if (IS_MF(bp)) { in bnx2x_get_mf_speed()
1167 u16 maxCfg = bnx2x_extract_max_cfg(bp, in bnx2x_get_mf_speed()
1168 bp->mf_config[BP_VN(bp)]); in bnx2x_get_mf_speed()
1173 if (IS_MF_SI(bp)) in bnx2x_get_mf_speed()
1194 static void bnx2x_fill_report_data(struct bnx2x *bp, in bnx2x_fill_report_data() argument
1199 if (IS_PF(bp)) { in bnx2x_fill_report_data()
1201 data->line_speed = bnx2x_get_mf_speed(bp); in bnx2x_fill_report_data()
1204 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS)) in bnx2x_fill_report_data()
1208 if (!BNX2X_NUM_ETH_QUEUES(bp)) in bnx2x_fill_report_data()
1213 if (bp->link_vars.duplex == DUPLEX_FULL) in bnx2x_fill_report_data()
1218 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) in bnx2x_fill_report_data()
1223 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) in bnx2x_fill_report_data()
1227 *data = bp->vf_link_vars; in bnx2x_fill_report_data()
1241 void bnx2x_link_report(struct bnx2x *bp) in bnx2x_link_report() argument
1243 bnx2x_acquire_phy_lock(bp); in bnx2x_link_report()
1244 __bnx2x_link_report(bp); in bnx2x_link_report()
1245 bnx2x_release_phy_lock(bp); in bnx2x_link_report()
1256 void __bnx2x_link_report(struct bnx2x *bp) in __bnx2x_link_report() argument
1261 if (IS_PF(bp) && !CHIP_IS_E1(bp)) in __bnx2x_link_report()
1262 bnx2x_read_mf_cfg(bp); in __bnx2x_link_report()
1265 bnx2x_fill_report_data(bp, &cur_data); in __bnx2x_link_report()
1268 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) || in __bnx2x_link_report()
1270 &bp->last_reported_link.link_report_flags) && in __bnx2x_link_report()
1275 bp->link_cnt++; in __bnx2x_link_report()
1280 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data)); in __bnx2x_link_report()
1283 if (IS_PF(bp)) in __bnx2x_link_report()
1284 bnx2x_iov_link_update(bp); in __bnx2x_link_report()
1288 netif_carrier_off(bp->dev); in __bnx2x_link_report()
1289 netdev_err(bp->dev, "NIC Link is Down\n"); in __bnx2x_link_report()
1295 netif_carrier_on(bp->dev); in __bnx2x_link_report()
1321 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n", in __bnx2x_link_report()
1344 static void bnx2x_free_tpa_pool(struct bnx2x *bp, in bnx2x_free_tpa_pool() argument
1359 dma_unmap_single(&bp->pdev->dev, in bnx2x_free_tpa_pool()
1367 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp) in bnx2x_init_rx_rings_cnic() argument
1371 for_each_rx_queue_cnic(bp, j) { in bnx2x_init_rx_rings_cnic()
1372 struct bnx2x_fastpath *fp = &bp->fp[j]; in bnx2x_init_rx_rings_cnic()
1381 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod, in bnx2x_init_rx_rings_cnic()
1386 void bnx2x_init_rx_rings(struct bnx2x *bp) in bnx2x_init_rx_rings() argument
1388 int func = BP_FUNC(bp); in bnx2x_init_rx_rings()
1393 for_each_eth_queue(bp, j) { in bnx2x_init_rx_rings()
1394 struct bnx2x_fastpath *fp = &bp->fp[j]; in bnx2x_init_rx_rings()
1397 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size); in bnx2x_init_rx_rings()
1401 for (i = 0; i < MAX_AGG_QS(bp); i++) { in bnx2x_init_rx_rings()
1412 bnx2x_free_tpa_pool(bp, fp, i); in bnx2x_init_rx_rings()
1430 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod, in bnx2x_init_rx_rings()
1437 bnx2x_free_rx_sge_range(bp, fp, in bnx2x_init_rx_rings()
1439 bnx2x_free_tpa_pool(bp, fp, in bnx2x_init_rx_rings()
1440 MAX_AGG_QS(bp)); in bnx2x_init_rx_rings()
1452 for_each_eth_queue(bp, j) { in bnx2x_init_rx_rings()
1453 struct bnx2x_fastpath *fp = &bp->fp[j]; in bnx2x_init_rx_rings()
1462 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod, in bnx2x_init_rx_rings()
1468 if (CHIP_IS_E1(bp)) { in bnx2x_init_rx_rings()
1469 REG_WR(bp, BAR_USTRORM_INTMEM + in bnx2x_init_rx_rings()
1472 REG_WR(bp, BAR_USTRORM_INTMEM + in bnx2x_init_rx_rings()
1482 struct bnx2x *bp = fp->bp; in bnx2x_free_tx_skbs_queue() local
1492 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons), in bnx2x_free_tx_skbs_queue()
1498 netdev_get_tx_queue(bp->dev, in bnx2x_free_tx_skbs_queue()
1503 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp) in bnx2x_free_tx_skbs_cnic() argument
1507 for_each_tx_queue_cnic(bp, i) { in bnx2x_free_tx_skbs_cnic()
1508 bnx2x_free_tx_skbs_queue(&bp->fp[i]); in bnx2x_free_tx_skbs_cnic()
1512 static void bnx2x_free_tx_skbs(struct bnx2x *bp) in bnx2x_free_tx_skbs() argument
1516 for_each_eth_queue(bp, i) { in bnx2x_free_tx_skbs()
1517 bnx2x_free_tx_skbs_queue(&bp->fp[i]); in bnx2x_free_tx_skbs()
1523 struct bnx2x *bp = fp->bp; in bnx2x_free_rx_bds() local
1536 dma_unmap_single(&bp->pdev->dev, in bnx2x_free_rx_bds()
1545 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp) in bnx2x_free_rx_skbs_cnic() argument
1549 for_each_rx_queue_cnic(bp, j) { in bnx2x_free_rx_skbs_cnic()
1550 bnx2x_free_rx_bds(&bp->fp[j]); in bnx2x_free_rx_skbs_cnic()
1554 static void bnx2x_free_rx_skbs(struct bnx2x *bp) in bnx2x_free_rx_skbs() argument
1558 for_each_eth_queue(bp, j) { in bnx2x_free_rx_skbs()
1559 struct bnx2x_fastpath *fp = &bp->fp[j]; in bnx2x_free_rx_skbs()
1564 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp)); in bnx2x_free_rx_skbs()
1568 static void bnx2x_free_skbs_cnic(struct bnx2x *bp) in bnx2x_free_skbs_cnic() argument
1570 bnx2x_free_tx_skbs_cnic(bp); in bnx2x_free_skbs_cnic()
1571 bnx2x_free_rx_skbs_cnic(bp); in bnx2x_free_skbs_cnic()
1574 void bnx2x_free_skbs(struct bnx2x *bp) in bnx2x_free_skbs() argument
1576 bnx2x_free_tx_skbs(bp); in bnx2x_free_skbs()
1577 bnx2x_free_rx_skbs(bp); in bnx2x_free_skbs()
1580 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value) in bnx2x_update_max_mf_config() argument
1583 u32 mf_cfg = bp->mf_config[BP_VN(bp)]; in bnx2x_update_max_mf_config()
1585 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) { in bnx2x_update_max_mf_config()
1593 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg); in bnx2x_update_max_mf_config()
1603 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs) in bnx2x_free_msix_irqs() argument
1611 if (IS_PF(bp)) { in bnx2x_free_msix_irqs()
1612 free_irq(bp->msix_table[offset].vector, bp->dev); in bnx2x_free_msix_irqs()
1614 bp->msix_table[offset].vector); in bnx2x_free_msix_irqs()
1618 if (CNIC_SUPPORT(bp)) { in bnx2x_free_msix_irqs()
1624 for_each_eth_queue(bp, i) { in bnx2x_free_msix_irqs()
1628 i, bp->msix_table[offset].vector); in bnx2x_free_msix_irqs()
1630 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]); in bnx2x_free_msix_irqs()
1634 void bnx2x_free_irq(struct bnx2x *bp) in bnx2x_free_irq() argument
1636 if (bp->flags & USING_MSIX_FLAG && in bnx2x_free_irq()
1637 !(bp->flags & USING_SINGLE_MSIX_FLAG)) { in bnx2x_free_irq()
1638 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp); in bnx2x_free_irq()
1641 if (IS_PF(bp)) in bnx2x_free_irq()
1644 bnx2x_free_msix_irqs(bp, nvecs); in bnx2x_free_irq()
1646 free_irq(bp->dev->irq, bp->dev); in bnx2x_free_irq()
1650 int bnx2x_enable_msix(struct bnx2x *bp) in bnx2x_enable_msix() argument
1655 if (IS_PF(bp)) { in bnx2x_enable_msix()
1656 bp->msix_table[msix_vec].entry = msix_vec; in bnx2x_enable_msix()
1658 bp->msix_table[0].entry); in bnx2x_enable_msix()
1663 if (CNIC_SUPPORT(bp)) { in bnx2x_enable_msix()
1664 bp->msix_table[msix_vec].entry = msix_vec; in bnx2x_enable_msix()
1666 msix_vec, bp->msix_table[msix_vec].entry); in bnx2x_enable_msix()
1671 for_each_eth_queue(bp, i) { in bnx2x_enable_msix()
1672 bp->msix_table[msix_vec].entry = msix_vec; in bnx2x_enable_msix()
1681 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], in bnx2x_enable_msix()
1682 BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec); in bnx2x_enable_msix()
1689 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1); in bnx2x_enable_msix()
1697 bp->flags |= USING_SINGLE_MSIX_FLAG; in bnx2x_enable_msix()
1700 bp->num_ethernet_queues = 1; in bnx2x_enable_msix()
1701 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; in bnx2x_enable_msix()
1714 bp->num_ethernet_queues -= diff; in bnx2x_enable_msix()
1715 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; in bnx2x_enable_msix()
1718 bp->num_queues); in bnx2x_enable_msix()
1721 bp->flags |= USING_MSIX_FLAG; in bnx2x_enable_msix()
1728 bp->flags |= DISABLE_MSI_FLAG; in bnx2x_enable_msix()
1733 static int bnx2x_req_msix_irqs(struct bnx2x *bp) in bnx2x_req_msix_irqs() argument
1738 if (IS_PF(bp)) { in bnx2x_req_msix_irqs()
1739 rc = request_irq(bp->msix_table[offset++].vector, in bnx2x_req_msix_irqs()
1741 bp->dev->name, bp->dev); in bnx2x_req_msix_irqs()
1748 if (CNIC_SUPPORT(bp)) in bnx2x_req_msix_irqs()
1751 for_each_eth_queue(bp, i) { in bnx2x_req_msix_irqs()
1752 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_req_msix_irqs()
1754 bp->dev->name, i); in bnx2x_req_msix_irqs()
1756 rc = request_irq(bp->msix_table[offset].vector, in bnx2x_req_msix_irqs()
1760 bp->msix_table[offset].vector, rc); in bnx2x_req_msix_irqs()
1761 bnx2x_free_msix_irqs(bp, offset); in bnx2x_req_msix_irqs()
1768 i = BNX2X_NUM_ETH_QUEUES(bp); in bnx2x_req_msix_irqs()
1769 if (IS_PF(bp)) { in bnx2x_req_msix_irqs()
1770 offset = 1 + CNIC_SUPPORT(bp); in bnx2x_req_msix_irqs()
1771 netdev_info(bp->dev, in bnx2x_req_msix_irqs()
1773 bp->msix_table[0].vector, in bnx2x_req_msix_irqs()
1774 0, bp->msix_table[offset].vector, in bnx2x_req_msix_irqs()
1775 i - 1, bp->msix_table[offset + i - 1].vector); in bnx2x_req_msix_irqs()
1777 offset = CNIC_SUPPORT(bp); in bnx2x_req_msix_irqs()
1778 netdev_info(bp->dev, in bnx2x_req_msix_irqs()
1780 0, bp->msix_table[offset].vector, in bnx2x_req_msix_irqs()
1781 i - 1, bp->msix_table[offset + i - 1].vector); in bnx2x_req_msix_irqs()
1786 int bnx2x_enable_msi(struct bnx2x *bp) in bnx2x_enable_msi() argument
1790 rc = pci_enable_msi(bp->pdev); in bnx2x_enable_msi()
1795 bp->flags |= USING_MSI_FLAG; in bnx2x_enable_msi()
1800 static int bnx2x_req_irq(struct bnx2x *bp) in bnx2x_req_irq() argument
1805 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG)) in bnx2x_req_irq()
1810 if (bp->flags & USING_MSIX_FLAG) in bnx2x_req_irq()
1811 irq = bp->msix_table[0].vector; in bnx2x_req_irq()
1813 irq = bp->pdev->irq; in bnx2x_req_irq()
1815 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev); in bnx2x_req_irq()
1818 static int bnx2x_setup_irqs(struct bnx2x *bp) in bnx2x_setup_irqs() argument
1821 if (bp->flags & USING_MSIX_FLAG && in bnx2x_setup_irqs()
1822 !(bp->flags & USING_SINGLE_MSIX_FLAG)) { in bnx2x_setup_irqs()
1823 rc = bnx2x_req_msix_irqs(bp); in bnx2x_setup_irqs()
1827 rc = bnx2x_req_irq(bp); in bnx2x_setup_irqs()
1832 if (bp->flags & USING_MSI_FLAG) { in bnx2x_setup_irqs()
1833 bp->dev->irq = bp->pdev->irq; in bnx2x_setup_irqs()
1834 netdev_info(bp->dev, "using MSI IRQ %d\n", in bnx2x_setup_irqs()
1835 bp->dev->irq); in bnx2x_setup_irqs()
1837 if (bp->flags & USING_MSIX_FLAG) { in bnx2x_setup_irqs()
1838 bp->dev->irq = bp->msix_table[0].vector; in bnx2x_setup_irqs()
1839 netdev_info(bp->dev, "using MSIX IRQ %d\n", in bnx2x_setup_irqs()
1840 bp->dev->irq); in bnx2x_setup_irqs()
1847 static void bnx2x_napi_enable_cnic(struct bnx2x *bp) in bnx2x_napi_enable_cnic() argument
1851 for_each_rx_queue_cnic(bp, i) { in bnx2x_napi_enable_cnic()
1852 bnx2x_fp_busy_poll_init(&bp->fp[i]); in bnx2x_napi_enable_cnic()
1853 napi_enable(&bnx2x_fp(bp, i, napi)); in bnx2x_napi_enable_cnic()
1857 static void bnx2x_napi_enable(struct bnx2x *bp) in bnx2x_napi_enable() argument
1861 for_each_eth_queue(bp, i) { in bnx2x_napi_enable()
1862 bnx2x_fp_busy_poll_init(&bp->fp[i]); in bnx2x_napi_enable()
1863 napi_enable(&bnx2x_fp(bp, i, napi)); in bnx2x_napi_enable()
1867 static void bnx2x_napi_disable_cnic(struct bnx2x *bp) in bnx2x_napi_disable_cnic() argument
1871 for_each_rx_queue_cnic(bp, i) { in bnx2x_napi_disable_cnic()
1872 napi_disable(&bnx2x_fp(bp, i, napi)); in bnx2x_napi_disable_cnic()
1873 while (!bnx2x_fp_ll_disable(&bp->fp[i])) in bnx2x_napi_disable_cnic()
1878 static void bnx2x_napi_disable(struct bnx2x *bp) in bnx2x_napi_disable() argument
1882 for_each_eth_queue(bp, i) { in bnx2x_napi_disable()
1883 napi_disable(&bnx2x_fp(bp, i, napi)); in bnx2x_napi_disable()
1884 while (!bnx2x_fp_ll_disable(&bp->fp[i])) in bnx2x_napi_disable()
1889 void bnx2x_netif_start(struct bnx2x *bp) in bnx2x_netif_start() argument
1891 if (netif_running(bp->dev)) { in bnx2x_netif_start()
1892 bnx2x_napi_enable(bp); in bnx2x_netif_start()
1893 if (CNIC_LOADED(bp)) in bnx2x_netif_start()
1894 bnx2x_napi_enable_cnic(bp); in bnx2x_netif_start()
1895 bnx2x_int_enable(bp); in bnx2x_netif_start()
1896 if (bp->state == BNX2X_STATE_OPEN) in bnx2x_netif_start()
1897 netif_tx_wake_all_queues(bp->dev); in bnx2x_netif_start()
1901 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) in bnx2x_netif_stop() argument
1903 bnx2x_int_disable_sync(bp, disable_hw); in bnx2x_netif_stop()
1904 bnx2x_napi_disable(bp); in bnx2x_netif_stop()
1905 if (CNIC_LOADED(bp)) in bnx2x_netif_stop()
1906 bnx2x_napi_disable_cnic(bp); in bnx2x_netif_stop()
1912 struct bnx2x *bp = netdev_priv(dev); in bnx2x_select_queue() local
1914 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) { in bnx2x_select_queue()
1928 return bnx2x_fcoe_tx(bp, txq_index); in bnx2x_select_queue()
1932 return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos); in bnx2x_select_queue()
1935 void bnx2x_set_num_queues(struct bnx2x *bp) in bnx2x_set_num_queues() argument
1938 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp); in bnx2x_set_num_queues()
1941 if (IS_MF_STORAGE_ONLY(bp)) in bnx2x_set_num_queues()
1942 bp->num_ethernet_queues = 1; in bnx2x_set_num_queues()
1945 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */ in bnx2x_set_num_queues()
1946 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; in bnx2x_set_num_queues()
1948 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues); in bnx2x_set_num_queues()
1973 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic) in bnx2x_set_real_num_queues() argument
1977 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos; in bnx2x_set_real_num_queues()
1978 rx = BNX2X_NUM_ETH_QUEUES(bp); in bnx2x_set_real_num_queues()
1981 if (include_cnic && !NO_FCOE(bp)) { in bnx2x_set_real_num_queues()
1986 rc = netif_set_real_num_tx_queues(bp->dev, tx); in bnx2x_set_real_num_queues()
1991 rc = netif_set_real_num_rx_queues(bp->dev, rx); in bnx2x_set_real_num_queues()
2003 static void bnx2x_set_rx_buf_size(struct bnx2x *bp) in bnx2x_set_rx_buf_size() argument
2007 for_each_queue(bp, i) { in bnx2x_set_rx_buf_size()
2008 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_set_rx_buf_size()
2021 mtu = bp->dev->mtu; in bnx2x_set_rx_buf_size()
2035 static int bnx2x_init_rss(struct bnx2x *bp) in bnx2x_init_rss() argument
2038 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp); in bnx2x_init_rss()
2043 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++) in bnx2x_init_rss()
2044 bp->rss_conf_obj.ind_table[i] = in bnx2x_init_rss()
2045 bp->fp->cl_id + in bnx2x_init_rss()
2056 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp)); in bnx2x_init_rss()
2059 int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, in bnx2x_rss() argument
2088 if (!CHIP_IS_E1x(bp)) in bnx2x_rss()
2106 if (IS_PF(bp)) in bnx2x_rss()
2107 return bnx2x_config_rss(bp, ¶ms); in bnx2x_rss()
2109 return bnx2x_vfpf_config_rss(bp, ¶ms); in bnx2x_rss()
2112 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) in bnx2x_init_hw() argument
2119 func_params.f_obj = &bp->func_obj; in bnx2x_init_hw()
2124 return bnx2x_func_state_change(bp, &func_params); in bnx2x_init_hw()
2131 void bnx2x_squeeze_objects(struct bnx2x *bp) in bnx2x_squeeze_objects() argument
2136 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj; in bnx2x_squeeze_objects()
2147 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags, in bnx2x_squeeze_objects()
2155 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, in bnx2x_squeeze_objects()
2161 rparam.mcast_obj = &bp->mcast_obj; in bnx2x_squeeze_objects()
2168 netif_addr_lock_bh(bp->dev); in bnx2x_squeeze_objects()
2169 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); in bnx2x_squeeze_objects()
2175 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); in bnx2x_squeeze_objects()
2180 netif_addr_unlock_bh(bp->dev); in bnx2x_squeeze_objects()
2184 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); in bnx2x_squeeze_objects()
2186 netif_addr_unlock_bh(bp->dev); in bnx2x_squeeze_objects()
2190 #define LOAD_ERROR_EXIT(bp, label) \ argument
2192 (bp)->state = BNX2X_STATE_ERROR; \
2196 #define LOAD_ERROR_EXIT_CNIC(bp, label) \ argument
2198 bp->cnic_loaded = false; \
2202 #define LOAD_ERROR_EXIT(bp, label) \ argument
2204 (bp)->state = BNX2X_STATE_ERROR; \
2205 (bp)->panic = 1; \
2208 #define LOAD_ERROR_EXIT_CNIC(bp, label) \ argument
2210 bp->cnic_loaded = false; \
2211 (bp)->panic = 1; \
2216 static void bnx2x_free_fw_stats_mem(struct bnx2x *bp) in bnx2x_free_fw_stats_mem() argument
2218 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, in bnx2x_free_fw_stats_mem()
2219 bp->fw_stats_data_sz + bp->fw_stats_req_sz); in bnx2x_free_fw_stats_mem()
2223 static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp) in bnx2x_alloc_fw_stats_mem() argument
2226 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1; in bnx2x_alloc_fw_stats_mem()
2229 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats; in bnx2x_alloc_fw_stats_mem()
2236 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats; in bnx2x_alloc_fw_stats_mem()
2243 if (IS_SRIOV(bp)) in bnx2x_alloc_fw_stats_mem()
2244 vf_headroom = bnx2x_vf_headroom(bp); in bnx2x_alloc_fw_stats_mem()
2252 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) + in bnx2x_alloc_fw_stats_mem()
2253 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ? in bnx2x_alloc_fw_stats_mem()
2257 bp->fw_stats_num, vf_headroom, num_groups); in bnx2x_alloc_fw_stats_mem()
2258 bp->fw_stats_req_sz = sizeof(struct stats_query_header) + in bnx2x_alloc_fw_stats_mem()
2269 bp->fw_stats_data_sz = sizeof(struct per_port_stats) + in bnx2x_alloc_fw_stats_mem()
2275 bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping, in bnx2x_alloc_fw_stats_mem()
2276 bp->fw_stats_data_sz + bp->fw_stats_req_sz); in bnx2x_alloc_fw_stats_mem()
2277 if (!bp->fw_stats) in bnx2x_alloc_fw_stats_mem()
2281 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats; in bnx2x_alloc_fw_stats_mem()
2282 bp->fw_stats_req_mapping = bp->fw_stats_mapping; in bnx2x_alloc_fw_stats_mem()
2283 bp->fw_stats_data = (struct bnx2x_fw_stats_data *) in bnx2x_alloc_fw_stats_mem()
2284 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz); in bnx2x_alloc_fw_stats_mem()
2285 bp->fw_stats_data_mapping = bp->fw_stats_mapping + in bnx2x_alloc_fw_stats_mem()
2286 bp->fw_stats_req_sz; in bnx2x_alloc_fw_stats_mem()
2289 U64_HI(bp->fw_stats_req_mapping), in bnx2x_alloc_fw_stats_mem()
2290 U64_LO(bp->fw_stats_req_mapping)); in bnx2x_alloc_fw_stats_mem()
2292 U64_HI(bp->fw_stats_data_mapping), in bnx2x_alloc_fw_stats_mem()
2293 U64_LO(bp->fw_stats_data_mapping)); in bnx2x_alloc_fw_stats_mem()
2297 bnx2x_free_fw_stats_mem(bp); in bnx2x_alloc_fw_stats_mem()
2303 static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code) in bnx2x_nic_load_request() argument
2308 bp->fw_seq = in bnx2x_nic_load_request()
2309 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & in bnx2x_nic_load_request()
2311 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); in bnx2x_nic_load_request()
2314 bp->fw_drv_pulse_wr_seq = in bnx2x_nic_load_request()
2315 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) & in bnx2x_nic_load_request()
2317 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq); in bnx2x_nic_load_request()
2321 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp)) in bnx2x_nic_load_request()
2325 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param); in bnx2x_nic_load_request()
2347 int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err) in bnx2x_compare_fw_ver() argument
2359 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM); in bnx2x_compare_fw_ver()
2379 static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port) in bnx2x_nic_load_no_mcp() argument
2381 int path = BP_PATH(bp); in bnx2x_nic_load_no_mcp()
2400 static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code) in bnx2x_nic_load_pmf() argument
2405 bp->port.pmf = 1; in bnx2x_nic_load_pmf()
2412 bp->port.pmf = 0; in bnx2x_nic_load_pmf()
2415 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf); in bnx2x_nic_load_pmf()
2418 static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code) in bnx2x_nic_load_afex_dcc() argument
2422 (bp->common.shmem2_base)) { in bnx2x_nic_load_afex_dcc()
2423 if (SHMEM2_HAS(bp, dcc_support)) in bnx2x_nic_load_afex_dcc()
2424 SHMEM2_WR(bp, dcc_support, in bnx2x_nic_load_afex_dcc()
2427 if (SHMEM2_HAS(bp, afex_driver_support)) in bnx2x_nic_load_afex_dcc()
2428 SHMEM2_WR(bp, afex_driver_support, in bnx2x_nic_load_afex_dcc()
2433 bp->afex_def_vlan_tag = -1; in bnx2x_nic_load_afex_dcc()
2445 static void bnx2x_bz_fp(struct bnx2x *bp, int index) in bnx2x_bz_fp() argument
2447 struct bnx2x_fastpath *fp = &bp->fp[index]; in bnx2x_bz_fp()
2461 fp->bp = bp; in bnx2x_bz_fp()
2464 fp->max_cos = bp->max_cos; in bnx2x_bz_fp()
2471 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)]; in bnx2x_bz_fp()
2474 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos * in bnx2x_bz_fp()
2475 BNX2X_NUM_ETH_QUEUES(bp) + index]; in bnx2x_bz_fp()
2480 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG || in bnx2x_bz_fp()
2481 (bp->flags & GRO_ENABLE_FLAG && in bnx2x_bz_fp()
2482 bnx2x_mtu_allows_gro(bp->dev->mtu))); in bnx2x_bz_fp()
2483 if (bp->flags & TPA_ENABLE_FLAG) in bnx2x_bz_fp()
2485 else if (bp->flags & GRO_ENABLE_FLAG) in bnx2x_bz_fp()
2493 int bnx2x_load_cnic(struct bnx2x *bp) in bnx2x_load_cnic() argument
2495 int i, rc, port = BP_PORT(bp); in bnx2x_load_cnic()
2499 mutex_init(&bp->cnic_mutex); in bnx2x_load_cnic()
2501 if (IS_PF(bp)) { in bnx2x_load_cnic()
2502 rc = bnx2x_alloc_mem_cnic(bp); in bnx2x_load_cnic()
2505 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0); in bnx2x_load_cnic()
2509 rc = bnx2x_alloc_fp_mem_cnic(bp); in bnx2x_load_cnic()
2512 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0); in bnx2x_load_cnic()
2516 rc = bnx2x_set_real_num_queues(bp, 1); in bnx2x_load_cnic()
2519 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0); in bnx2x_load_cnic()
2523 bnx2x_add_all_napi_cnic(bp); in bnx2x_load_cnic()
2525 bnx2x_napi_enable_cnic(bp); in bnx2x_load_cnic()
2527 rc = bnx2x_init_hw_func_cnic(bp); in bnx2x_load_cnic()
2529 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1); in bnx2x_load_cnic()
2531 bnx2x_nic_init_cnic(bp); in bnx2x_load_cnic()
2533 if (IS_PF(bp)) { in bnx2x_load_cnic()
2535 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1); in bnx2x_load_cnic()
2538 for_each_cnic_queue(bp, i) { in bnx2x_load_cnic()
2539 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0); in bnx2x_load_cnic()
2542 LOAD_ERROR_EXIT(bp, load_error_cnic2); in bnx2x_load_cnic()
2548 bnx2x_set_rx_mode_inner(bp); in bnx2x_load_cnic()
2551 bnx2x_get_iscsi_info(bp); in bnx2x_load_cnic()
2552 bnx2x_setup_cnic_irq_info(bp); in bnx2x_load_cnic()
2553 bnx2x_setup_cnic_info(bp); in bnx2x_load_cnic()
2554 bp->cnic_loaded = true; in bnx2x_load_cnic()
2555 if (bp->state == BNX2X_STATE_OPEN) in bnx2x_load_cnic()
2556 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD); in bnx2x_load_cnic()
2565 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0); in bnx2x_load_cnic()
2568 bnx2x_napi_disable_cnic(bp); in bnx2x_load_cnic()
2570 if (bnx2x_set_real_num_queues(bp, 0)) in bnx2x_load_cnic()
2574 bnx2x_free_fp_mem_cnic(bp); in bnx2x_load_cnic()
2575 bnx2x_free_mem_cnic(bp); in bnx2x_load_cnic()
2581 int bnx2x_nic_load(struct bnx2x *bp, int load_mode) in bnx2x_nic_load() argument
2583 int port = BP_PORT(bp); in bnx2x_nic_load()
2588 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled"); in bnx2x_nic_load()
2591 if (unlikely(bp->panic)) { in bnx2x_nic_load()
2597 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; in bnx2x_nic_load()
2600 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link)); in bnx2x_nic_load()
2602 &bp->last_reported_link.link_report_flags); in bnx2x_nic_load()
2604 if (IS_PF(bp)) in bnx2x_nic_load()
2606 bnx2x_ilt_set_info(bp); in bnx2x_nic_load()
2613 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues); in bnx2x_nic_load()
2614 for_each_queue(bp, i) in bnx2x_nic_load()
2615 bnx2x_bz_fp(bp, i); in bnx2x_nic_load()
2616 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + in bnx2x_nic_load()
2617 bp->num_cnic_queues) * in bnx2x_nic_load()
2620 bp->fcoe_init = false; in bnx2x_nic_load()
2623 bnx2x_set_rx_buf_size(bp); in bnx2x_nic_load()
2625 if (IS_PF(bp)) { in bnx2x_nic_load()
2626 rc = bnx2x_alloc_mem(bp); in bnx2x_nic_load()
2636 rc = bnx2x_alloc_fp_mem(bp); in bnx2x_nic_load()
2639 LOAD_ERROR_EXIT(bp, load_error0); in bnx2x_nic_load()
2643 if (bnx2x_alloc_fw_stats_mem(bp)) in bnx2x_nic_load()
2644 LOAD_ERROR_EXIT(bp, load_error0); in bnx2x_nic_load()
2647 if (IS_VF(bp)) { in bnx2x_nic_load()
2648 rc = bnx2x_vfpf_init(bp); in bnx2x_nic_load()
2650 LOAD_ERROR_EXIT(bp, load_error0); in bnx2x_nic_load()
2657 rc = bnx2x_set_real_num_queues(bp, 0); in bnx2x_nic_load()
2660 LOAD_ERROR_EXIT(bp, load_error0); in bnx2x_nic_load()
2667 bnx2x_setup_tc(bp->dev, bp->max_cos); in bnx2x_nic_load()
2670 bnx2x_add_all_napi(bp); in bnx2x_nic_load()
2672 bnx2x_napi_enable(bp); in bnx2x_nic_load()
2674 if (IS_PF(bp)) { in bnx2x_nic_load()
2676 bnx2x_set_pf_load(bp); in bnx2x_nic_load()
2679 if (!BP_NOMCP(bp)) { in bnx2x_nic_load()
2681 rc = bnx2x_nic_load_request(bp, &load_code); in bnx2x_nic_load()
2683 LOAD_ERROR_EXIT(bp, load_error1); in bnx2x_nic_load()
2686 rc = bnx2x_compare_fw_ver(bp, load_code, true); in bnx2x_nic_load()
2688 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); in bnx2x_nic_load()
2689 LOAD_ERROR_EXIT(bp, load_error2); in bnx2x_nic_load()
2692 load_code = bnx2x_nic_load_no_mcp(bp, port); in bnx2x_nic_load()
2696 bnx2x_nic_load_pmf(bp, load_code); in bnx2x_nic_load()
2699 bnx2x__init_func_obj(bp); in bnx2x_nic_load()
2702 rc = bnx2x_init_hw(bp, load_code); in bnx2x_nic_load()
2705 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); in bnx2x_nic_load()
2706 LOAD_ERROR_EXIT(bp, load_error2); in bnx2x_nic_load()
2710 bnx2x_pre_irq_nic_init(bp); in bnx2x_nic_load()
2713 rc = bnx2x_setup_irqs(bp); in bnx2x_nic_load()
2716 if (IS_PF(bp)) in bnx2x_nic_load()
2717 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); in bnx2x_nic_load()
2718 LOAD_ERROR_EXIT(bp, load_error2); in bnx2x_nic_load()
2722 if (IS_PF(bp)) { in bnx2x_nic_load()
2724 bnx2x_post_irq_nic_init(bp, load_code); in bnx2x_nic_load()
2726 bnx2x_init_bp_objs(bp); in bnx2x_nic_load()
2727 bnx2x_iov_nic_init(bp); in bnx2x_nic_load()
2730 bp->afex_def_vlan_tag = -1; in bnx2x_nic_load()
2731 bnx2x_nic_load_afex_dcc(bp, load_code); in bnx2x_nic_load()
2732 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT; in bnx2x_nic_load()
2733 rc = bnx2x_func_start(bp); in bnx2x_nic_load()
2736 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); in bnx2x_nic_load()
2738 LOAD_ERROR_EXIT(bp, load_error3); in bnx2x_nic_load()
2742 if (!BP_NOMCP(bp)) { in bnx2x_nic_load()
2743 load_code = bnx2x_fw_command(bp, in bnx2x_nic_load()
2748 LOAD_ERROR_EXIT(bp, load_error3); in bnx2x_nic_load()
2753 bnx2x_update_coalesce(bp); in bnx2x_nic_load()
2757 rc = bnx2x_setup_leading(bp); in bnx2x_nic_load()
2760 LOAD_ERROR_EXIT(bp, load_error3); in bnx2x_nic_load()
2764 for_each_nondefault_eth_queue(bp, i) { in bnx2x_nic_load()
2765 if (IS_PF(bp)) in bnx2x_nic_load()
2766 rc = bnx2x_setup_queue(bp, &bp->fp[i], false); in bnx2x_nic_load()
2768 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false); in bnx2x_nic_load()
2771 LOAD_ERROR_EXIT(bp, load_error3); in bnx2x_nic_load()
2776 rc = bnx2x_init_rss(bp); in bnx2x_nic_load()
2779 LOAD_ERROR_EXIT(bp, load_error3); in bnx2x_nic_load()
2783 bp->state = BNX2X_STATE_OPEN; in bnx2x_nic_load()
2786 if (IS_PF(bp)) in bnx2x_nic_load()
2787 rc = bnx2x_set_eth_mac(bp, true); in bnx2x_nic_load()
2789 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index, in bnx2x_nic_load()
2793 LOAD_ERROR_EXIT(bp, load_error3); in bnx2x_nic_load()
2796 if (IS_PF(bp) && bp->pending_max) { in bnx2x_nic_load()
2797 bnx2x_update_max_mf_config(bp, bp->pending_max); in bnx2x_nic_load()
2798 bp->pending_max = 0; in bnx2x_nic_load()
2801 if (bp->port.pmf) { in bnx2x_nic_load()
2802 rc = bnx2x_initial_phy_init(bp, load_mode); in bnx2x_nic_load()
2804 LOAD_ERROR_EXIT(bp, load_error3); in bnx2x_nic_load()
2806 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN; in bnx2x_nic_load()
2811 bnx2x_set_rx_mode_inner(bp); in bnx2x_nic_load()
2813 if (bp->flags & PTP_SUPPORTED) { in bnx2x_nic_load()
2814 bnx2x_init_ptp(bp); in bnx2x_nic_load()
2815 bnx2x_configure_ptp_filters(bp); in bnx2x_nic_load()
2821 netif_tx_wake_all_queues(bp->dev); in bnx2x_nic_load()
2825 netif_tx_start_all_queues(bp->dev); in bnx2x_nic_load()
2831 bp->state = BNX2X_STATE_DIAG; in bnx2x_nic_load()
2838 if (bp->port.pmf) in bnx2x_nic_load()
2839 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0); in bnx2x_nic_load()
2841 bnx2x__link_status_update(bp); in bnx2x_nic_load()
2844 mod_timer(&bp->timer, jiffies + bp->current_interval); in bnx2x_nic_load()
2846 if (CNIC_ENABLED(bp)) in bnx2x_nic_load()
2847 bnx2x_load_cnic(bp); in bnx2x_nic_load()
2849 if (IS_PF(bp)) in bnx2x_nic_load()
2850 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0); in bnx2x_nic_load()
2852 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { in bnx2x_nic_load()
2855 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]); in bnx2x_nic_load()
2856 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)], in bnx2x_nic_load()
2862 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) { in bnx2x_nic_load()
2864 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false); in bnx2x_nic_load()
2869 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG)) in bnx2x_nic_load()
2870 bnx2x_dcbx_init(bp, false); in bnx2x_nic_load()
2878 if (IS_PF(bp)) { in bnx2x_nic_load()
2879 bnx2x_int_disable_sync(bp, 1); in bnx2x_nic_load()
2882 bnx2x_squeeze_objects(bp); in bnx2x_nic_load()
2886 bnx2x_free_skbs(bp); in bnx2x_nic_load()
2887 for_each_rx_queue(bp, i) in bnx2x_nic_load()
2888 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); in bnx2x_nic_load()
2891 bnx2x_free_irq(bp); in bnx2x_nic_load()
2893 if (IS_PF(bp) && !BP_NOMCP(bp)) { in bnx2x_nic_load()
2894 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); in bnx2x_nic_load()
2895 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); in bnx2x_nic_load()
2898 bp->port.pmf = 0; in bnx2x_nic_load()
2900 bnx2x_napi_disable(bp); in bnx2x_nic_load()
2901 bnx2x_del_all_napi(bp); in bnx2x_nic_load()
2904 if (IS_PF(bp)) in bnx2x_nic_load()
2905 bnx2x_clear_pf_load(bp); in bnx2x_nic_load()
2907 bnx2x_free_fw_stats_mem(bp); in bnx2x_nic_load()
2908 bnx2x_free_fp_mem(bp); in bnx2x_nic_load()
2909 bnx2x_free_mem(bp); in bnx2x_nic_load()
2915 int bnx2x_drain_tx_queues(struct bnx2x *bp) in bnx2x_drain_tx_queues() argument
2920 for_each_tx_queue(bp, i) { in bnx2x_drain_tx_queues()
2921 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_drain_tx_queues()
2924 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]); in bnx2x_drain_tx_queues()
2932 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) in bnx2x_nic_unload() argument
2940 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { in bnx2x_nic_unload()
2942 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]); in bnx2x_nic_unload()
2943 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)], in bnx2x_nic_unload()
2947 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE && in bnx2x_nic_unload()
2948 (bp->state == BNX2X_STATE_CLOSED || in bnx2x_nic_unload()
2949 bp->state == BNX2X_STATE_ERROR)) { in bnx2x_nic_unload()
2957 bp->recovery_state = BNX2X_RECOVERY_DONE; in bnx2x_nic_unload()
2958 bp->is_leader = 0; in bnx2x_nic_unload()
2959 bnx2x_release_leader_lock(bp); in bnx2x_nic_unload()
2973 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR) in bnx2x_nic_unload()
2980 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; in bnx2x_nic_unload()
2984 bnx2x_iov_channel_down(bp); in bnx2x_nic_unload()
2986 if (CNIC_LOADED(bp)) in bnx2x_nic_unload()
2987 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); in bnx2x_nic_unload()
2990 bnx2x_tx_disable(bp); in bnx2x_nic_unload()
2991 netdev_reset_tc(bp->dev); in bnx2x_nic_unload()
2993 bp->rx_mode = BNX2X_RX_MODE_NONE; in bnx2x_nic_unload()
2995 del_timer_sync(&bp->timer); in bnx2x_nic_unload()
2997 if (IS_PF(bp)) { in bnx2x_nic_unload()
2999 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; in bnx2x_nic_unload()
3000 bnx2x_drv_pulse(bp); in bnx2x_nic_unload()
3001 bnx2x_stats_handle(bp, STATS_EVENT_STOP); in bnx2x_nic_unload()
3002 bnx2x_save_statistics(bp); in bnx2x_nic_unload()
3006 bnx2x_drain_tx_queues(bp); in bnx2x_nic_unload()
3011 if (IS_VF(bp)) in bnx2x_nic_unload()
3012 bnx2x_vfpf_close_vf(bp); in bnx2x_nic_unload()
3015 bnx2x_chip_cleanup(bp, unload_mode, keep_link); in bnx2x_nic_unload()
3018 bnx2x_send_unload_req(bp, unload_mode); in bnx2x_nic_unload()
3026 if (!CHIP_IS_E1x(bp)) in bnx2x_nic_unload()
3027 bnx2x_pf_disable(bp); in bnx2x_nic_unload()
3030 bnx2x_netif_stop(bp, 1); in bnx2x_nic_unload()
3032 bnx2x_del_all_napi(bp); in bnx2x_nic_unload()
3033 if (CNIC_LOADED(bp)) in bnx2x_nic_unload()
3034 bnx2x_del_all_napi_cnic(bp); in bnx2x_nic_unload()
3036 bnx2x_free_irq(bp); in bnx2x_nic_unload()
3039 bnx2x_send_unload_done(bp, false); in bnx2x_nic_unload()
3046 if (IS_PF(bp)) in bnx2x_nic_unload()
3047 bnx2x_squeeze_objects(bp); in bnx2x_nic_unload()
3050 bp->sp_state = 0; in bnx2x_nic_unload()
3052 bp->port.pmf = 0; in bnx2x_nic_unload()
3055 bp->sp_rtnl_state = 0; in bnx2x_nic_unload()
3059 bnx2x_free_skbs(bp); in bnx2x_nic_unload()
3060 if (CNIC_LOADED(bp)) in bnx2x_nic_unload()
3061 bnx2x_free_skbs_cnic(bp); in bnx2x_nic_unload()
3062 for_each_rx_queue(bp, i) in bnx2x_nic_unload()
3063 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); in bnx2x_nic_unload()
3065 bnx2x_free_fp_mem(bp); in bnx2x_nic_unload()
3066 if (CNIC_LOADED(bp)) in bnx2x_nic_unload()
3067 bnx2x_free_fp_mem_cnic(bp); in bnx2x_nic_unload()
3069 if (IS_PF(bp)) { in bnx2x_nic_unload()
3070 if (CNIC_LOADED(bp)) in bnx2x_nic_unload()
3071 bnx2x_free_mem_cnic(bp); in bnx2x_nic_unload()
3073 bnx2x_free_mem(bp); in bnx2x_nic_unload()
3075 bp->state = BNX2X_STATE_CLOSED; in bnx2x_nic_unload()
3076 bp->cnic_loaded = false; in bnx2x_nic_unload()
3079 if (IS_PF(bp)) in bnx2x_nic_unload()
3080 bnx2x_update_mng_version(bp); in bnx2x_nic_unload()
3085 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) { in bnx2x_nic_unload()
3086 bnx2x_set_reset_in_progress(bp); in bnx2x_nic_unload()
3090 bnx2x_set_reset_global(bp); in bnx2x_nic_unload()
3096 if (IS_PF(bp) && in bnx2x_nic_unload()
3097 !bnx2x_clear_pf_load(bp) && in bnx2x_nic_unload()
3098 bnx2x_reset_is_done(bp, BP_PATH(bp))) in bnx2x_nic_unload()
3099 bnx2x_disable_close_the_gate(bp); in bnx2x_nic_unload()
3106 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state) in bnx2x_set_power_state() argument
3111 if (!bp->pdev->pm_cap) { in bnx2x_set_power_state()
3116 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr); in bnx2x_set_power_state()
3120 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, in bnx2x_set_power_state()
3132 if (atomic_read(&bp->pdev->enable_cnt) != 1) in bnx2x_set_power_state()
3135 if (CHIP_REV_IS_SLOW(bp)) in bnx2x_set_power_state()
3141 if (bp->wol) in bnx2x_set_power_state()
3144 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, in bnx2x_set_power_state()
3153 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state); in bnx2x_set_power_state()
3168 struct bnx2x *bp = fp->bp; in bnx2x_poll() local
3172 if (unlikely(bp->panic)) { in bnx2x_poll()
3182 bnx2x_tx_int(bp, fp->txdata_ptr[cos]); in bnx2x_poll()
3228 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, in bnx2x_poll()
3245 struct bnx2x *bp = fp->bp; in bnx2x_low_latency_recv() local
3248 if ((bp->state == BNX2X_STATE_CLOSED) || in bnx2x_low_latency_recv()
3249 (bp->state == BNX2X_STATE_ERROR) || in bnx2x_low_latency_recv()
3250 (bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG))) in bnx2x_low_latency_recv()
3269 static u16 bnx2x_tx_split(struct bnx2x *bp, in bnx2x_tx_split() argument
3328 static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb) in bnx2x_xmit_type() argument
3346 if (!CHIP_IS_E1x(bp) && skb->encapsulation) { in bnx2x_xmit_type()
3379 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb, in bnx2x_pkt_req_lin() argument
3497 static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb, in bnx2x_set_pbd_csum_enc() argument
3531 static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb, in bnx2x_set_pbd_csum_e2() argument
3553 static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb, in bnx2x_set_sbd_csum() argument
3574 static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb, in bnx2x_set_pbd_csum() argument
3717 struct bnx2x *bp = netdev_priv(dev); in bnx2x_start_xmit() local
3731 u32 xmit_type = bnx2x_xmit_type(bp, skb); in bnx2x_start_xmit()
3739 if (unlikely(bp->panic)) in bnx2x_start_xmit()
3746 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0)); in bnx2x_start_xmit()
3748 txdata = &bp->bnx2x_txq[txq_index]; in bnx2x_start_xmit()
3759 if (unlikely(bnx2x_tx_avail(bp, txdata) < in bnx2x_start_xmit()
3766 bnx2x_fp_qstats(bp, txdata->parent_fp); in bnx2x_start_xmit()
3771 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++; in bnx2x_start_xmit()
3798 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) { in bnx2x_start_xmit()
3800 bp->lin_cnt++; in bnx2x_start_xmit()
3810 mapping = dma_map_single(&bp->pdev->dev, skb->data, in bnx2x_start_xmit()
3812 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { in bnx2x_start_xmit()
3844 if (!(bp->flags & TX_TIMESTAMPING_EN)) { in bnx2x_start_xmit()
3846 } else if (bp->ptp_tx_skb) { in bnx2x_start_xmit()
3851 bp->ptp_tx_skb = skb_get(skb); in bnx2x_start_xmit()
3852 bp->ptp_tx_start = jiffies; in bnx2x_start_xmit()
3853 schedule_work(&bp->ptp_task); in bnx2x_start_xmit()
3879 if (IS_VF(bp)) in bnx2x_start_xmit()
3896 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type); in bnx2x_start_xmit()
3898 if (!CHIP_IS_E1x(bp)) { in bnx2x_start_xmit()
3906 hlen = bnx2x_set_pbd_csum_enc(bp, skb, in bnx2x_start_xmit()
3940 hlen = bnx2x_set_pbd_csum_e2(bp, skb, in bnx2x_start_xmit()
3949 if (IS_VF(bp)) { in bnx2x_start_xmit()
3961 if (bp->flags & TX_SWITCHING) in bnx2x_start_xmit()
3986 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type); in bnx2x_start_xmit()
4017 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf, in bnx2x_start_xmit()
4021 if (!CHIP_IS_E1x(bp)) in bnx2x_start_xmit()
4042 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, in bnx2x_start_xmit()
4044 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { in bnx2x_start_xmit()
4056 bnx2x_free_tx_pkt(bp, txdata, in bnx2x_start_xmit()
4139 DOORBELL(bp, txdata->cid, txdata->tx_db.raw); in bnx2x_start_xmit()
4145 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) { in bnx2x_start_xmit()
4153 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++; in bnx2x_start_xmit()
4154 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT) in bnx2x_start_xmit()
4173 struct bnx2x *bp = netdev_priv(dev); in bnx2x_setup_tc() local
4185 if (num_tc > bp->max_cos) { in bnx2x_setup_tc()
4187 num_tc, bp->max_cos); in bnx2x_setup_tc()
4199 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]); in bnx2x_setup_tc()
4202 prio, bp->prio_to_cos[prio]); in bnx2x_setup_tc()
4216 for (cos = 0; cos < bp->max_cos; cos++) { in bnx2x_setup_tc()
4217 count = BNX2X_NUM_ETH_QUEUES(bp); in bnx2x_setup_tc()
4218 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp); in bnx2x_setup_tc()
4232 struct bnx2x *bp = netdev_priv(dev); in bnx2x_change_mac_addr() local
4240 if (IS_MF_STORAGE_ONLY(bp)) { in bnx2x_change_mac_addr()
4246 rc = bnx2x_set_eth_mac(bp, false); in bnx2x_change_mac_addr()
4254 rc = bnx2x_set_eth_mac(bp, true); in bnx2x_change_mac_addr()
4259 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index) in bnx2x_free_fp_mem_at() argument
4261 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk); in bnx2x_free_fp_mem_at()
4262 struct bnx2x_fastpath *fp = &bp->fp[fp_index]; in bnx2x_free_fp_mem_at()
4272 if (!CHIP_IS_E1x(bp)) in bnx2x_free_fp_mem_at()
4274 bnx2x_fp(bp, fp_index, in bnx2x_free_fp_mem_at()
4279 bnx2x_fp(bp, fp_index, in bnx2x_free_fp_mem_at()
4285 if (!skip_rx_queue(bp, fp_index)) { in bnx2x_free_fp_mem_at()
4289 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring)); in bnx2x_free_fp_mem_at()
4290 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring), in bnx2x_free_fp_mem_at()
4291 bnx2x_fp(bp, fp_index, rx_desc_mapping), in bnx2x_free_fp_mem_at()
4294 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring), in bnx2x_free_fp_mem_at()
4295 bnx2x_fp(bp, fp_index, rx_comp_mapping), in bnx2x_free_fp_mem_at()
4300 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring)); in bnx2x_free_fp_mem_at()
4301 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring), in bnx2x_free_fp_mem_at()
4302 bnx2x_fp(bp, fp_index, rx_sge_mapping), in bnx2x_free_fp_mem_at()
4307 if (!skip_tx_queue(bp, fp_index)) { in bnx2x_free_fp_mem_at()
4325 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp) in bnx2x_free_fp_mem_cnic() argument
4328 for_each_cnic_queue(bp, i) in bnx2x_free_fp_mem_cnic()
4329 bnx2x_free_fp_mem_at(bp, i); in bnx2x_free_fp_mem_cnic()
4332 void bnx2x_free_fp_mem(struct bnx2x *bp) in bnx2x_free_fp_mem() argument
4335 for_each_eth_queue(bp, i) in bnx2x_free_fp_mem()
4336 bnx2x_free_fp_mem_at(bp, i); in bnx2x_free_fp_mem()
4339 static void set_sb_shortcuts(struct bnx2x *bp, int index) in set_sb_shortcuts() argument
4341 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk); in set_sb_shortcuts()
4342 if (!CHIP_IS_E1x(bp)) { in set_sb_shortcuts()
4343 bnx2x_fp(bp, index, sb_index_values) = in set_sb_shortcuts()
4345 bnx2x_fp(bp, index, sb_running_index) = in set_sb_shortcuts()
4348 bnx2x_fp(bp, index, sb_index_values) = in set_sb_shortcuts()
4350 bnx2x_fp(bp, index, sb_running_index) = in set_sb_shortcuts()
4359 struct bnx2x *bp = fp->bp; in bnx2x_alloc_rx_bds() local
4370 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) { in bnx2x_alloc_rx_bds()
4389 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt; in bnx2x_alloc_rx_bds()
4412 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) in bnx2x_alloc_fp_mem_at() argument
4415 struct bnx2x_fastpath *fp = &bp->fp[index]; in bnx2x_alloc_fp_mem_at()
4420 if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) { in bnx2x_alloc_fp_mem_at()
4422 bp->rx_ring_size = rx_ring_size; in bnx2x_alloc_fp_mem_at()
4423 } else if (!bp->rx_ring_size) { in bnx2x_alloc_fp_mem_at()
4424 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp); in bnx2x_alloc_fp_mem_at()
4426 if (CHIP_IS_E3(bp)) { in bnx2x_alloc_fp_mem_at()
4427 u32 cfg = SHMEM_RD(bp, in bnx2x_alloc_fp_mem_at()
4428 dev_info.port_hw_config[BP_PORT(bp)]. in bnx2x_alloc_fp_mem_at()
4438 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA : in bnx2x_alloc_fp_mem_at()
4441 bp->rx_ring_size = rx_ring_size; in bnx2x_alloc_fp_mem_at()
4443 rx_ring_size = bp->rx_ring_size; in bnx2x_alloc_fp_mem_at()
4448 sb = &bnx2x_fp(bp, index, status_blk); in bnx2x_alloc_fp_mem_at()
4452 if (!CHIP_IS_E1x(bp)) { in bnx2x_alloc_fp_mem_at()
4453 sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping), in bnx2x_alloc_fp_mem_at()
4458 sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping), in bnx2x_alloc_fp_mem_at()
4469 set_sb_shortcuts(bp, index); in bnx2x_alloc_fp_mem_at()
4472 if (!skip_tx_queue(bp, index)) { in bnx2x_alloc_fp_mem_at()
4494 if (!skip_rx_queue(bp, index)) { in bnx2x_alloc_fp_mem_at()
4496 bnx2x_fp(bp, index, rx_buf_ring) = in bnx2x_alloc_fp_mem_at()
4498 if (!bnx2x_fp(bp, index, rx_buf_ring)) in bnx2x_alloc_fp_mem_at()
4500 bnx2x_fp(bp, index, rx_desc_ring) = in bnx2x_alloc_fp_mem_at()
4501 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping), in bnx2x_alloc_fp_mem_at()
4503 if (!bnx2x_fp(bp, index, rx_desc_ring)) in bnx2x_alloc_fp_mem_at()
4507 bnx2x_fp(bp, index, rx_comp_ring) = in bnx2x_alloc_fp_mem_at()
4508 BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping), in bnx2x_alloc_fp_mem_at()
4510 if (!bnx2x_fp(bp, index, rx_comp_ring)) in bnx2x_alloc_fp_mem_at()
4514 bnx2x_fp(bp, index, rx_page_ring) = in bnx2x_alloc_fp_mem_at()
4517 if (!bnx2x_fp(bp, index, rx_page_ring)) in bnx2x_alloc_fp_mem_at()
4519 bnx2x_fp(bp, index, rx_sge_ring) = in bnx2x_alloc_fp_mem_at()
4520 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping), in bnx2x_alloc_fp_mem_at()
4522 if (!bnx2x_fp(bp, index, rx_sge_ring)) in bnx2x_alloc_fp_mem_at()
4549 bnx2x_free_fp_mem_at(bp, index); in bnx2x_alloc_fp_mem_at()
4555 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp) in bnx2x_alloc_fp_mem_cnic() argument
4557 if (!NO_FCOE(bp)) in bnx2x_alloc_fp_mem_cnic()
4559 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp))) in bnx2x_alloc_fp_mem_cnic()
4568 static int bnx2x_alloc_fp_mem(struct bnx2x *bp) in bnx2x_alloc_fp_mem() argument
4577 if (bnx2x_alloc_fp_mem_at(bp, 0)) in bnx2x_alloc_fp_mem()
4581 for_each_nondefault_eth_queue(bp, i) in bnx2x_alloc_fp_mem()
4582 if (bnx2x_alloc_fp_mem_at(bp, i)) in bnx2x_alloc_fp_mem()
4586 if (i != BNX2X_NUM_ETH_QUEUES(bp)) { in bnx2x_alloc_fp_mem()
4587 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i; in bnx2x_alloc_fp_mem()
4590 bnx2x_shrink_eth_fp(bp, delta); in bnx2x_alloc_fp_mem()
4591 if (CNIC_SUPPORT(bp)) in bnx2x_alloc_fp_mem()
4598 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta); in bnx2x_alloc_fp_mem()
4599 bp->num_ethernet_queues -= delta; in bnx2x_alloc_fp_mem()
4600 bp->num_queues = bp->num_ethernet_queues + in bnx2x_alloc_fp_mem()
4601 bp->num_cnic_queues; in bnx2x_alloc_fp_mem()
4603 bp->num_queues + delta, bp->num_queues); in bnx2x_alloc_fp_mem()
4609 void bnx2x_free_mem_bp(struct bnx2x *bp) in bnx2x_free_mem_bp() argument
4613 for (i = 0; i < bp->fp_array_size; i++) in bnx2x_free_mem_bp()
4614 kfree(bp->fp[i].tpa_info); in bnx2x_free_mem_bp()
4615 kfree(bp->fp); in bnx2x_free_mem_bp()
4616 kfree(bp->sp_objs); in bnx2x_free_mem_bp()
4617 kfree(bp->fp_stats); in bnx2x_free_mem_bp()
4618 kfree(bp->bnx2x_txq); in bnx2x_free_mem_bp()
4619 kfree(bp->msix_table); in bnx2x_free_mem_bp()
4620 kfree(bp->ilt); in bnx2x_free_mem_bp()
4623 int bnx2x_alloc_mem_bp(struct bnx2x *bp) in bnx2x_alloc_mem_bp() argument
4636 msix_table_size = bp->igu_sb_cnt; in bnx2x_alloc_mem_bp()
4637 if (IS_PF(bp)) in bnx2x_alloc_mem_bp()
4642 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp); in bnx2x_alloc_mem_bp()
4643 bp->fp_array_size = fp_array_size; in bnx2x_alloc_mem_bp()
4644 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size); in bnx2x_alloc_mem_bp()
4646 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL); in bnx2x_alloc_mem_bp()
4649 for (i = 0; i < bp->fp_array_size; i++) { in bnx2x_alloc_mem_bp()
4657 bp->fp = fp; in bnx2x_alloc_mem_bp()
4660 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs), in bnx2x_alloc_mem_bp()
4662 if (!bp->sp_objs) in bnx2x_alloc_mem_bp()
4666 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats), in bnx2x_alloc_mem_bp()
4668 if (!bp->fp_stats) in bnx2x_alloc_mem_bp()
4673 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp); in bnx2x_alloc_mem_bp()
4676 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata), in bnx2x_alloc_mem_bp()
4678 if (!bp->bnx2x_txq) in bnx2x_alloc_mem_bp()
4685 bp->msix_table = tbl; in bnx2x_alloc_mem_bp()
4691 bp->ilt = ilt; in bnx2x_alloc_mem_bp()
4695 bnx2x_free_mem_bp(bp); in bnx2x_alloc_mem_bp()
4701 struct bnx2x *bp = netdev_priv(dev); in bnx2x_reload_if_running() local
4706 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); in bnx2x_reload_if_running()
4707 return bnx2x_nic_load(bp, LOAD_NORMAL); in bnx2x_reload_if_running()
4710 int bnx2x_get_cur_phy_idx(struct bnx2x *bp) in bnx2x_get_cur_phy_idx() argument
4713 if (bp->link_params.num_phys <= 1) in bnx2x_get_cur_phy_idx()
4716 if (bp->link_vars.link_up) { in bnx2x_get_cur_phy_idx()
4719 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) && in bnx2x_get_cur_phy_idx()
4720 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE)) in bnx2x_get_cur_phy_idx()
4724 switch (bnx2x_phy_selection(&bp->link_params)) { in bnx2x_get_cur_phy_idx()
4739 int bnx2x_get_link_cfg_idx(struct bnx2x *bp) in bnx2x_get_link_cfg_idx() argument
4741 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp); in bnx2x_get_link_cfg_idx()
4748 if (bp->link_params.multi_phy_config & in bnx2x_get_link_cfg_idx()
4761 struct bnx2x *bp = netdev_priv(dev); in bnx2x_fcoe_get_wwn() local
4762 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; in bnx2x_fcoe_get_wwn()
4785 struct bnx2x *bp = netdev_priv(dev); in bnx2x_change_mtu() local
4787 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { in bnx2x_change_mtu()
4810 struct bnx2x *bp = netdev_priv(dev); in bnx2x_fix_features() local
4819 if (bp->disable_tpa) in bnx2x_fix_features()
4827 struct bnx2x *bp = netdev_priv(dev); in bnx2x_set_features() local
4828 u32 flags = bp->flags; in bnx2x_set_features()
4843 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) { in bnx2x_set_features()
4844 bp->link_params.loopback_mode = LOOPBACK_BMAC; in bnx2x_set_features()
4848 if (bp->link_params.loopback_mode != LOOPBACK_NONE) { in bnx2x_set_features()
4849 bp->link_params.loopback_mode = LOOPBACK_NONE; in bnx2x_set_features()
4854 changes = flags ^ bp->flags; in bnx2x_set_features()
4861 if ((changes & GRO_ENABLE_FLAG) && bp->disable_tpa) in bnx2x_set_features()
4867 bp->flags = flags; in bnx2x_set_features()
4870 if (bp->recovery_state == BNX2X_RECOVERY_DONE) in bnx2x_set_features()
4880 struct bnx2x *bp = netdev_priv(dev); in bnx2x_tx_timeout() local
4883 if (!bp->panic) in bnx2x_tx_timeout()
4888 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0); in bnx2x_tx_timeout()
4894 struct bnx2x *bp; in bnx2x_suspend() local
4900 bp = netdev_priv(dev); in bnx2x_suspend()
4913 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false); in bnx2x_suspend()
4915 bnx2x_set_power_state(bp, pci_choose_state(pdev, state)); in bnx2x_suspend()
4925 struct bnx2x *bp; in bnx2x_resume() local
4932 bp = netdev_priv(dev); in bnx2x_resume()
4934 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { in bnx2x_resume()
4948 bnx2x_set_power_state(bp, PCI_D0); in bnx2x_resume()
4951 rc = bnx2x_nic_load(bp, LOAD_OPEN); in bnx2x_resume()
4958 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, in bnx2x_set_ctx_validation() argument
4968 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid), in bnx2x_set_ctx_validation()
4972 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid), in bnx2x_set_ctx_validation()
4976 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port, in storm_memset_hc_timeout() argument
4982 REG_WR8(bp, addr, ticks); in storm_memset_hc_timeout()
4988 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port, in storm_memset_hc_disable() argument
4995 u8 flags = REG_RD8(bp, addr); in storm_memset_hc_disable()
4999 REG_WR8(bp, addr, flags); in storm_memset_hc_disable()
5005 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id, in bnx2x_update_coalesce_sb_index() argument
5008 int port = BP_PORT(bp); in bnx2x_update_coalesce_sb_index()
5011 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks); in bnx2x_update_coalesce_sb_index()
5014 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable); in bnx2x_update_coalesce_sb_index()
5017 void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag, in bnx2x_schedule_sp_rtnl() argument
5021 set_bit(flag, &bp->sp_rtnl_state); in bnx2x_schedule_sp_rtnl()
5025 schedule_delayed_work(&bp->sp_rtnl_task, 0); in bnx2x_schedule_sp_rtnl()