Lines Matching refs:cqe
651 struct eth_fast_path_rx_tpa_start_cqe *cqe) in qede_set_gro_params() argument
653 u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags); in qede_set_gro_params()
661 skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) - in qede_set_gro_params()
662 cqe->header_len; in qede_set_gro_params()
834 struct eth_fast_path_rx_tpa_start_cqe *cqe) in qede_tpa_start() argument
836 struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; in qede_tpa_start()
841 pad = cqe->placement_offset + rxq->rx_headroom; in qede_tpa_start()
844 le16_to_cpu(cqe->len_on_first_bd), in qede_tpa_start()
865 if ((le16_to_cpu(cqe->pars_flags.flags) >> in qede_tpa_start()
868 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag); in qede_tpa_start()
872 qede_get_rxhash(tpa_info->skb, cqe->bitfields, cqe->rss_hash); in qede_tpa_start()
875 qede_set_gro_params(edev, tpa_info->skb, cqe); in qede_tpa_start()
878 if (likely(cqe->bw_ext_bd_len_list[0])) in qede_tpa_start()
879 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, in qede_tpa_start()
880 le16_to_cpu(cqe->bw_ext_bd_len_list[0])); in qede_tpa_start()
882 if (unlikely(cqe->bw_ext_bd_len_list[1])) { in qede_tpa_start()
960 struct eth_fast_path_rx_tpa_cont_cqe *cqe) in qede_tpa_cont() argument
964 for (i = 0; cqe->len_list[i]; i++) in qede_tpa_cont()
965 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, in qede_tpa_cont()
966 le16_to_cpu(cqe->len_list[i])); in qede_tpa_cont()
975 struct eth_fast_path_rx_tpa_end_cqe *cqe) in qede_tpa_end() argument
982 tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; in qede_tpa_end()
989 for (i = 0; cqe->len_list[i]; i++) in qede_tpa_end()
990 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, in qede_tpa_end()
991 le16_to_cpu(cqe->len_list[i])); in qede_tpa_end()
1000 if (unlikely(cqe->num_of_bds != tpa_info->frag_id + 1)) in qede_tpa_end()
1003 cqe->num_of_bds, tpa_info->frag_id); in qede_tpa_end()
1004 if (unlikely(skb->len != le16_to_cpu(cqe->total_packet_len))) in qede_tpa_end()
1007 le16_to_cpu(cqe->total_packet_len), skb->len); in qede_tpa_end()
1016 NAPI_GRO_CB(skb)->count = le16_to_cpu(cqe->num_of_coalesced_segs); in qede_tpa_end()
1065 static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe, in qede_pkt_is_ip_fragmented() argument
1068 u8 tun_pars_flg = cqe->tunnel_pars_flags.flags; in qede_pkt_is_ip_fragmented()
1085 struct eth_fast_path_rx_reg_cqe *cqe, in qede_rx_xdp() argument
1164 qede_recycle_rx_bd_ring(rxq, cqe->bd_num); in qede_rx_xdp()
1173 struct eth_fast_path_rx_reg_cqe *cqe, in qede_rx_build_jumbo() argument
1176 u16 pkt_len = le16_to_cpu(cqe->pkt_len); in qede_rx_build_jumbo()
1184 for (num_frags = cqe->bd_num - 1; num_frags > 0; num_frags--) { in qede_rx_build_jumbo()
1227 union eth_rx_cqe *cqe, in qede_rx_process_tpa_cqe() argument
1232 qede_tpa_start(edev, rxq, &cqe->fast_path_tpa_start); in qede_rx_process_tpa_cqe()
1235 qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont); in qede_rx_process_tpa_cqe()
1238 return qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end); in qede_rx_process_tpa_cqe()
1252 union eth_rx_cqe *cqe; in qede_rx_process_cqe() local
1259 cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring); in qede_rx_process_cqe()
1260 cqe_type = cqe->fast_path_regular.type; in qede_rx_process_cqe()
1266 sp_cqe = (struct eth_slow_path_rx_cqe *)cqe; in qede_rx_process_cqe()
1273 return qede_rx_process_tpa_cqe(edev, fp, rxq, cqe, cqe_type); in qede_rx_process_cqe()
1281 fp_cqe = &cqe->fast_path_regular; in qede_rx_process_cqe()
1292 flags = cqe->fast_path_regular.pars_flags.flags; in qede_rx_process_cqe()
1332 qede_ptp_record_rx_ts(edev, cqe, skb); in qede_rx_process_cqe()