• Home
  • Raw
  • Download

Lines Matching refs:fp

121 int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
453 void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe);
521 struct bnx2x_fastpath *fp, in bnx2x_update_rx_prod() argument
543 REG_WR(bp, fp->ustorm_rx_prods_offset + i*4, in bnx2x_update_rx_prod()
550 fp->index, bd_prod, rx_comp_prod, rx_sge_prod); in bnx2x_update_rx_prod()
559 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget);
657 static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) in bnx2x_update_fpsb_idx() argument
660 fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID]; in bnx2x_update_fpsb_idx()
794 static inline bool bnx2x_has_tx_work(struct bnx2x_fastpath *fp) in bnx2x_has_tx_work() argument
797 for_each_cos_in_tx_queue(fp, cos) in bnx2x_has_tx_work()
798 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos])) in bnx2x_has_tx_work()
803 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp) in bnx2x_has_rx_work() argument
809 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb); in bnx2x_has_rx_work()
812 return (fp->rx_comp_cons != rx_cons_sb); in bnx2x_has_rx_work()
827 struct bnx2x_fastpath *fp, u16 index) in bnx2x_free_rx_sge() argument
829 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; in bnx2x_free_rx_sge()
831 struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; in bnx2x_free_rx_sge()
903 static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp) in bnx2x_clear_sge_mask_next_elems() argument
911 BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx); in bnx2x_clear_sge_mask_next_elems()
917 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp) in bnx2x_init_sge_ring_bit_mask() argument
920 memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask)); in bnx2x_init_sge_ring_bit_mask()
926 bnx2x_clear_sge_mask_next_elems(fp); in bnx2x_init_sge_ring_bit_mask()
934 static inline void bnx2x_reuse_rx_data(struct bnx2x_fastpath *fp, in bnx2x_reuse_rx_data() argument
937 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; in bnx2x_reuse_rx_data()
938 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod]; in bnx2x_reuse_rx_data()
939 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons]; in bnx2x_reuse_rx_data()
940 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; in bnx2x_reuse_rx_data()
1015 struct bnx2x_fastpath *fp, int last) in bnx2x_free_rx_sge_range() argument
1019 if (fp->disable_tpa) in bnx2x_free_rx_sge_range()
1023 bnx2x_free_rx_sge(bp, fp, i); in bnx2x_free_rx_sge_range()
1026 static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp) in bnx2x_set_next_page_rx_bd() argument
1033 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2]; in bnx2x_set_next_page_rx_bd()
1035 cpu_to_le32(U64_HI(fp->rx_desc_mapping + in bnx2x_set_next_page_rx_bd()
1038 cpu_to_le32(U64_LO(fp->rx_desc_mapping + in bnx2x_set_next_page_rx_bd()
1046 static inline u8 bnx2x_stats_id(struct bnx2x_fastpath *fp) in bnx2x_stats_id() argument
1048 struct bnx2x *bp = fp->bp; in bnx2x_stats_id()
1051 if (IS_FCOE_FP(fp)) in bnx2x_stats_id()
1053 return fp->cl_id; in bnx2x_stats_id()
1055 return fp->cl_id + BP_PORT(bp) * FP_SB_MAX_E1x; in bnx2x_stats_id()
1058 static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp, in bnx2x_init_vlan_mac_fp_objs() argument
1061 struct bnx2x *bp = fp->bp; in bnx2x_init_vlan_mac_fp_objs()
1064 bnx2x_init_mac_obj(bp, &bnx2x_sp_obj(bp, fp).mac_obj, fp->cl_id, in bnx2x_init_vlan_mac_fp_objs()
1065 fp->cid, BP_FUNC(bp), bnx2x_sp(bp, mac_rdata), in bnx2x_init_vlan_mac_fp_objs()
1118 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid, in bnx2x_init_bp_objs()
1133 bnx2x_init_rss_config_obj(bp, &bp->rss_conf_obj, bp->fp->cl_id, in bnx2x_init_bp_objs()
1134 bp->fp->cid, BP_FUNC(bp), BP_FUNC(bp), in bnx2x_init_bp_objs()
1141 static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp) in bnx2x_fp_qzone_id() argument
1143 if (CHIP_IS_E1x(fp->bp)) in bnx2x_fp_qzone_id()
1144 return fp->cl_id + BP_PORT(fp->bp) * ETH_MAX_RX_CLIENTS_E1H; in bnx2x_fp_qzone_id()
1146 return fp->cl_id; in bnx2x_fp_qzone_id()
1149 u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp);
1154 struct bnx2x_fastpath *fp) in bnx2x_init_txdata() argument
1159 txdata->parent_fp = fp; in bnx2x_init_txdata()
1160 txdata->tx_ring_size = IS_FCOE_FP(fp) ? MAX_TX_AVAIL : bp->tx_ring_size; in bnx2x_init_txdata()
1187 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); in bnx2x_init_fcoe_fp() local
1198 fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX, in bnx2x_init_fcoe_fp()
1199 fp); in bnx2x_init_fcoe_fp()
1201 DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index); in bnx2x_init_fcoe_fp()
1204 bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp); in bnx2x_init_fcoe_fp()
1207 bnx2x_rx_ustorm_prods_offset(fp); in bnx2x_init_fcoe_fp()
1214 BUG_ON(fp->max_cos != 1); in bnx2x_init_fcoe_fp()
1216 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, in bnx2x_init_fcoe_fp()
1217 &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata), in bnx2x_init_fcoe_fp()
1222 fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, in bnx2x_init_fcoe_fp()
1223 fp->igu_sb_id); in bnx2x_init_fcoe_fp()