• Home
  • Raw
  • Download

Lines Matching refs:fp

89 	struct bnx2x_fastpath *from_fp = &bp->fp[from];  in bnx2x_move_fp()
90 struct bnx2x_fastpath *to_fp = &bp->fp[to]; in bnx2x_move_fp()
179 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_shrink_eth_fp() local
182 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos], in bnx2x_shrink_eth_fp()
184 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx]; in bnx2x_shrink_eth_fp()
345 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp, in bnx2x_update_last_max_sge() argument
348 u16 last_max = fp->last_max_sge; in bnx2x_update_last_max_sge()
351 fp->last_max_sge = idx; in bnx2x_update_last_max_sge()
354 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp, in bnx2x_update_sge_prod() argument
358 struct bnx2x *bp = fp->bp; in bnx2x_update_sge_prod()
368 BIT_VEC64_CLEAR_BIT(fp->sge_mask, in bnx2x_update_sge_prod()
375 prefetch((void *)(fp->sge_mask)); in bnx2x_update_sge_prod()
376 bnx2x_update_last_max_sge(fp, in bnx2x_update_sge_prod()
379 last_max = RX_SGE(fp->last_max_sge); in bnx2x_update_sge_prod()
381 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT; in bnx2x_update_sge_prod()
389 if (likely(fp->sge_mask[i])) in bnx2x_update_sge_prod()
392 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK; in bnx2x_update_sge_prod()
397 fp->rx_sge_prod += delta; in bnx2x_update_sge_prod()
399 bnx2x_clear_sge_mask_next_elems(fp); in bnx2x_update_sge_prod()
404 fp->last_max_sge, fp->rx_sge_prod); in bnx2x_update_sge_prod()
430 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, in bnx2x_tpa_start() argument
434 struct bnx2x *bp = fp->bp; in bnx2x_tpa_start()
435 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; in bnx2x_tpa_start()
436 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod]; in bnx2x_tpa_start()
437 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; in bnx2x_tpa_start()
439 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue]; in bnx2x_tpa_start()
449 fp->rx_buf_size, DMA_FROM_DEVICE); in bnx2x_tpa_start()
458 bnx2x_reuse_rx_data(fp, cons, prod); in bnx2x_tpa_start()
481 if (fp->mode == TPA_MODE_GRO) { in bnx2x_tpa_start()
488 fp->tpa_queue_used |= (1 << queue); in bnx2x_tpa_start()
490 fp->tpa_queue_used); in bnx2x_tpa_start()
547 static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp, in bnx2x_alloc_rx_sge() argument
550 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; in bnx2x_alloc_rx_sge()
551 struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; in bnx2x_alloc_rx_sge()
552 struct bnx2x_alloc_pool *pool = &fp->page_pool; in bnx2x_alloc_rx_sge()
586 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, in bnx2x_fill_frag_skb() argument
601 if (fp->mode == TPA_MODE_GRO) { in bnx2x_fill_frag_skb()
628 if (fp->mode == TPA_MODE_GRO) in bnx2x_fill_frag_skb()
633 rx_pg = &fp->rx_page_ring[sge_idx]; in bnx2x_fill_frag_skb()
638 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC); in bnx2x_fill_frag_skb()
640 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; in bnx2x_fill_frag_skb()
648 if (fp->mode == TPA_MODE_LRO) in bnx2x_fill_frag_skb()
676 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data) in bnx2x_frag_free() argument
678 if (fp->rx_frag_size) in bnx2x_frag_free()
684 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask) in bnx2x_frag_alloc() argument
686 if (fp->rx_frag_size) { in bnx2x_frag_alloc()
691 return napi_alloc_frag(fp->rx_frag_size); in bnx2x_frag_alloc()
694 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask); in bnx2x_frag_alloc()
731 static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp, in bnx2x_gro_receive() argument
750 skb_record_rx_queue(skb, fp->rx_queue); in bnx2x_gro_receive()
751 napi_gro_receive(&fp->napi, skb); in bnx2x_gro_receive()
754 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, in bnx2x_tpa_stop() argument
776 new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC); in bnx2x_tpa_stop()
781 fp->rx_buf_size, DMA_FROM_DEVICE); in bnx2x_tpa_stop()
783 skb = build_skb(data, fp->rx_frag_size); in bnx2x_tpa_stop()
787 if (pad + len > fp->rx_buf_size) { in bnx2x_tpa_stop()
789 pad, len, fp->rx_buf_size); in bnx2x_tpa_stop()
791 bnx2x_frag_free(fp, new_data); in bnx2x_tpa_stop()
803 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages, in bnx2x_tpa_stop()
807 bnx2x_gro_receive(bp, fp, skb); in bnx2x_tpa_stop()
820 bnx2x_frag_free(fp, new_data); in bnx2x_tpa_stop()
825 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++; in bnx2x_tpa_stop()
828 static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp, in bnx2x_alloc_rx_data() argument
832 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index]; in bnx2x_alloc_rx_data()
833 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index]; in bnx2x_alloc_rx_data()
836 data = bnx2x_frag_alloc(fp, gfp_mask); in bnx2x_alloc_rx_data()
841 fp->rx_buf_size, in bnx2x_alloc_rx_data()
844 bnx2x_frag_free(fp, data); in bnx2x_alloc_rx_data()
860 struct bnx2x_fastpath *fp, in bnx2x_csum_validate() argument
882 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) in bnx2x_rx_int() argument
884 struct bnx2x *bp = fp->bp; in bnx2x_rx_int()
898 bd_cons = fp->rx_bd_cons; in bnx2x_rx_int()
899 bd_prod = fp->rx_bd_prod; in bnx2x_rx_int()
901 sw_comp_cons = fp->rx_comp_cons; in bnx2x_rx_int()
902 sw_comp_prod = fp->rx_comp_prod; in bnx2x_rx_int()
905 cqe = &fp->rx_comp_ring[comp_ring_cons]; in bnx2x_rx_int()
909 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons); in bnx2x_rx_int()
954 bnx2x_sp_event(fp, cqe); in bnx2x_rx_int()
958 rx_buf = &fp->rx_buf_ring[bd_cons]; in bnx2x_rx_int()
966 if (fp->mode == TPA_MODE_DISABLED && in bnx2x_rx_int()
979 bnx2x_tpa_start(fp, queue, in bnx2x_rx_int()
986 tpa_info = &fp->tpa_info[queue]; in bnx2x_rx_int()
994 if (fp->mode == TPA_MODE_GRO) in bnx2x_rx_int()
1001 bnx2x_tpa_stop(bp, fp, tpa_info, pages, in bnx2x_rx_int()
1008 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe); in bnx2x_rx_int()
1025 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++; in bnx2x_rx_int()
1034 skb = napi_alloc_skb(&fp->napi, len); in bnx2x_rx_int()
1038 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; in bnx2x_rx_int()
1042 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod); in bnx2x_rx_int()
1044 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod, in bnx2x_rx_int()
1048 fp->rx_buf_size, in bnx2x_rx_int()
1050 skb = build_skb(data, fp->rx_frag_size); in bnx2x_rx_int()
1052 bnx2x_frag_free(fp, data); in bnx2x_rx_int()
1053 bnx2x_fp_qstats(bp, fp)-> in bnx2x_rx_int()
1061 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; in bnx2x_rx_int()
1063 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod); in bnx2x_rx_int()
1078 bnx2x_csum_validate(skb, cqe, fp, in bnx2x_rx_int()
1079 bnx2x_fp_qstats(bp, fp)); in bnx2x_rx_int()
1081 skb_record_rx_queue(skb, fp->rx_queue); in bnx2x_rx_int()
1093 napi_gro_receive(&fp->napi, skb); in bnx2x_rx_int()
1112 cqe = &fp->rx_comp_ring[comp_ring_cons]; in bnx2x_rx_int()
1116 fp->rx_bd_cons = bd_cons; in bnx2x_rx_int()
1117 fp->rx_bd_prod = bd_prod_fw; in bnx2x_rx_int()
1118 fp->rx_comp_cons = sw_comp_cons; in bnx2x_rx_int()
1119 fp->rx_comp_prod = sw_comp_prod; in bnx2x_rx_int()
1122 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod, in bnx2x_rx_int()
1123 fp->rx_sge_prod); in bnx2x_rx_int()
1130 struct bnx2x_fastpath *fp = fp_cookie; in bnx2x_msix_fp_int() local
1131 struct bnx2x *bp = fp->bp; in bnx2x_msix_fp_int()
1136 fp->index, fp->fw_sb_id, fp->igu_sb_id); in bnx2x_msix_fp_int()
1138 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); in bnx2x_msix_fp_int()
1146 for_each_cos_in_tx_queue(fp, cos) in bnx2x_msix_fp_int()
1147 prefetch(fp->txdata_ptr[cos]->tx_cons_sb); in bnx2x_msix_fp_int()
1149 prefetch(&fp->sb_running_index[SM_RX_ID]); in bnx2x_msix_fp_int()
1150 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi)); in bnx2x_msix_fp_int()
1339 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp) in bnx2x_set_next_page_sgl() argument
1346 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2]; in bnx2x_set_next_page_sgl()
1348 cpu_to_le32(U64_HI(fp->rx_sge_mapping + in bnx2x_set_next_page_sgl()
1352 cpu_to_le32(U64_LO(fp->rx_sge_mapping + in bnx2x_set_next_page_sgl()
1358 struct bnx2x_fastpath *fp, int last) in bnx2x_free_tpa_pool() argument
1363 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i]; in bnx2x_free_tpa_pool()
1374 fp->rx_buf_size, DMA_FROM_DEVICE); in bnx2x_free_tpa_pool()
1375 bnx2x_frag_free(fp, data); in bnx2x_free_tpa_pool()
1385 struct bnx2x_fastpath *fp = &bp->fp[j]; in bnx2x_init_rx_rings_cnic() local
1387 fp->rx_bd_cons = 0; in bnx2x_init_rx_rings_cnic()
1394 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod, in bnx2x_init_rx_rings_cnic()
1395 fp->rx_sge_prod); in bnx2x_init_rx_rings_cnic()
1407 struct bnx2x_fastpath *fp = &bp->fp[j]; in bnx2x_init_rx_rings() local
1410 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size); in bnx2x_init_rx_rings()
1412 if (fp->mode != TPA_MODE_DISABLED) { in bnx2x_init_rx_rings()
1416 &fp->tpa_info[i]; in bnx2x_init_rx_rings()
1421 bnx2x_frag_alloc(fp, GFP_KERNEL); in bnx2x_init_rx_rings()
1425 bnx2x_free_tpa_pool(bp, fp, i); in bnx2x_init_rx_rings()
1426 fp->mode = TPA_MODE_DISABLED; in bnx2x_init_rx_rings()
1434 bnx2x_set_next_page_sgl(fp); in bnx2x_init_rx_rings()
1437 bnx2x_init_sge_ring_bit_mask(fp); in bnx2x_init_rx_rings()
1443 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod, in bnx2x_init_rx_rings()
1450 bnx2x_free_rx_sge_range(bp, fp, in bnx2x_init_rx_rings()
1452 bnx2x_free_tpa_pool(bp, fp, in bnx2x_init_rx_rings()
1454 fp->mode = TPA_MODE_DISABLED; in bnx2x_init_rx_rings()
1461 fp->rx_sge_prod = ring_prod; in bnx2x_init_rx_rings()
1466 struct bnx2x_fastpath *fp = &bp->fp[j]; in bnx2x_init_rx_rings() local
1468 fp->rx_bd_cons = 0; in bnx2x_init_rx_rings()
1475 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod, in bnx2x_init_rx_rings()
1476 fp->rx_sge_prod); in bnx2x_init_rx_rings()
1484 U64_LO(fp->rx_comp_mapping)); in bnx2x_init_rx_rings()
1487 U64_HI(fp->rx_comp_mapping)); in bnx2x_init_rx_rings()
1492 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp) in bnx2x_free_tx_skbs_queue() argument
1495 struct bnx2x *bp = fp->bp; in bnx2x_free_tx_skbs_queue()
1497 for_each_cos_in_tx_queue(fp, cos) { in bnx2x_free_tx_skbs_queue()
1498 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; in bnx2x_free_tx_skbs_queue()
1521 bnx2x_free_tx_skbs_queue(&bp->fp[i]); in bnx2x_free_tx_skbs_cnic()
1530 bnx2x_free_tx_skbs_queue(&bp->fp[i]); in bnx2x_free_tx_skbs()
1534 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp) in bnx2x_free_rx_bds() argument
1536 struct bnx2x *bp = fp->bp; in bnx2x_free_rx_bds()
1540 if (fp->rx_buf_ring == NULL) in bnx2x_free_rx_bds()
1544 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i]; in bnx2x_free_rx_bds()
1551 fp->rx_buf_size, DMA_FROM_DEVICE); in bnx2x_free_rx_bds()
1554 bnx2x_frag_free(fp, data); in bnx2x_free_rx_bds()
1563 bnx2x_free_rx_bds(&bp->fp[j]); in bnx2x_free_rx_skbs_cnic()
1572 struct bnx2x_fastpath *fp = &bp->fp[j]; in bnx2x_free_rx_skbs() local
1574 bnx2x_free_rx_bds(fp); in bnx2x_free_rx_skbs()
1576 if (fp->mode != TPA_MODE_DISABLED) in bnx2x_free_rx_skbs()
1577 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp)); in bnx2x_free_rx_skbs()
1643 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]); in bnx2x_free_msix_irqs()
1765 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_req_msix_irqs() local
1766 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", in bnx2x_req_msix_irqs()
1770 bnx2x_msix_fp_int, 0, fp->name, fp); in bnx2x_req_msix_irqs()
2016 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_set_rx_buf_size() local
2030 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START + in bnx2x_set_rx_buf_size()
2035 fp->rx_buf_size = SKB_DATA_ALIGN(fp->rx_buf_size); in bnx2x_set_rx_buf_size()
2037 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE) in bnx2x_set_rx_buf_size()
2038 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD; in bnx2x_set_rx_buf_size()
2040 fp->rx_frag_size = 0; in bnx2x_set_rx_buf_size()
2054 bp->fp->cl_id + in bnx2x_init_rss()
2465 struct bnx2x_fastpath *fp = &bp->fp[index]; in bnx2x_bz_fp() local
2467 struct napi_struct orig_napi = fp->napi; in bnx2x_bz_fp()
2468 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info; in bnx2x_bz_fp()
2471 if (fp->tpa_info) in bnx2x_bz_fp()
2472 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 * in bnx2x_bz_fp()
2474 memset(fp, 0, sizeof(*fp)); in bnx2x_bz_fp()
2477 fp->napi = orig_napi; in bnx2x_bz_fp()
2478 fp->tpa_info = orig_tpa_info; in bnx2x_bz_fp()
2479 fp->bp = bp; in bnx2x_bz_fp()
2480 fp->index = index; in bnx2x_bz_fp()
2481 if (IS_ETH_FP(fp)) in bnx2x_bz_fp()
2482 fp->max_cos = bp->max_cos; in bnx2x_bz_fp()
2485 fp->max_cos = 1; in bnx2x_bz_fp()
2488 if (IS_FCOE_FP(fp)) in bnx2x_bz_fp()
2489 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)]; in bnx2x_bz_fp()
2490 if (IS_ETH_FP(fp)) in bnx2x_bz_fp()
2491 for_each_cos_in_tx_queue(fp, cos) in bnx2x_bz_fp()
2492 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos * in bnx2x_bz_fp()
2499 fp->mode = TPA_MODE_LRO; in bnx2x_bz_fp()
2501 fp->mode = TPA_MODE_GRO; in bnx2x_bz_fp()
2503 fp->mode = TPA_MODE_DISABLED; in bnx2x_bz_fp()
2508 if (bp->disable_tpa || IS_FCOE_FP(fp)) in bnx2x_bz_fp()
2509 fp->mode = TPA_MODE_DISABLED; in bnx2x_bz_fp()
2572 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0); in bnx2x_load_cnic()
2800 rc = bnx2x_setup_queue(bp, &bp->fp[i], false); in bnx2x_nic_load()
2802 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false); in bnx2x_nic_load()
2823 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index, in bnx2x_nic_load()
2938 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); in bnx2x_nic_load()
2971 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_drain_tx_queues() local
2973 for_each_cos_in_tx_queue(fp, cos) in bnx2x_drain_tx_queues()
2974 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]); in bnx2x_drain_tx_queues()
3121 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); in bnx2x_nic_unload()
3222 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath, in bnx2x_poll() local
3224 struct bnx2x *bp = fp->bp; in bnx2x_poll()
3234 for_each_cos_in_tx_queue(fp, cos) in bnx2x_poll()
3235 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos])) in bnx2x_poll()
3236 bnx2x_tx_int(bp, fp->txdata_ptr[cos]); in bnx2x_poll()
3238 rx_work_done = (bnx2x_has_rx_work(fp)) ? bnx2x_rx_int(fp, budget) : 0; in bnx2x_poll()
3245 if (IS_FCOE_FP(fp)) { in bnx2x_poll()
3248 bnx2x_update_fpsb_idx(fp); in bnx2x_poll()
3264 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { in bnx2x_poll()
3268 "Update index to %d\n", fp->fp_hc_idx); in bnx2x_poll()
3269 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, in bnx2x_poll()
3270 le16_to_cpu(fp->fp_hc_idx), in bnx2x_poll()
4357 struct bnx2x_fastpath *fp = &bp->fp[fp_index]; in bnx2x_free_fp_mem_at() local
4364 fp->status_blk_mapping = 0; in bnx2x_free_fp_mem_at()
4381 bnx2x_free_rx_bds(fp); in bnx2x_free_fp_mem_at()
4404 for_each_cos_in_tx_queue(fp, cos) { in bnx2x_free_fp_mem_at()
4405 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; in bnx2x_free_fp_mem_at()
4451 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp, in bnx2x_alloc_rx_bds() argument
4454 struct bnx2x *bp = fp->bp; in bnx2x_alloc_rx_bds()
4458 fp->rx_comp_cons = 0; in bnx2x_alloc_rx_bds()
4465 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) { in bnx2x_alloc_rx_bds()
4476 i - failure_cnt, fp->index); in bnx2x_alloc_rx_bds()
4478 fp->rx_bd_prod = ring_prod; in bnx2x_alloc_rx_bds()
4480 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT, in bnx2x_alloc_rx_bds()
4483 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt; in bnx2x_alloc_rx_bds()
4488 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp) in bnx2x_set_next_page_rx_cq() argument
4496 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1]; in bnx2x_set_next_page_rx_cq()
4498 cpu_to_le32(U64_HI(fp->rx_comp_mapping + in bnx2x_set_next_page_rx_cq()
4501 cpu_to_le32(U64_LO(fp->rx_comp_mapping + in bnx2x_set_next_page_rx_cq()
4509 struct bnx2x_fastpath *fp = &bp->fp[index]; in bnx2x_alloc_fp_mem_at() local
4568 for_each_cos_in_tx_queue(fp, cos) { in bnx2x_alloc_fp_mem_at()
4569 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; in bnx2x_alloc_fp_mem_at()
4619 bnx2x_set_next_page_rx_bd(fp); in bnx2x_alloc_fp_mem_at()
4622 bnx2x_set_next_page_rx_cq(fp); in bnx2x_alloc_fp_mem_at()
4625 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size); in bnx2x_alloc_fp_mem_at()
4640 if (ring_size < (fp->mode == TPA_MODE_DISABLED ? in bnx2x_alloc_fp_mem_at()
4708 kfree(bp->fp[i].tpa_info); in bnx2x_free_mem_bp()
4709 kfree(bp->fp); in bnx2x_free_mem_bp()
4719 struct bnx2x_fastpath *fp; in bnx2x_alloc_mem_bp() local
4740 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL); in bnx2x_alloc_mem_bp()
4741 if (!fp) in bnx2x_alloc_mem_bp()
4744 fp[i].tpa_info = in bnx2x_alloc_mem_bp()
4747 if (!(fp[i].tpa_info)) in bnx2x_alloc_mem_bp()
4751 bp->fp = fp; in bnx2x_alloc_mem_bp()