Home
last modified time | relevance | path

Searched refs:segs (Results 1 – 25 of 40) sorted by relevance

12

/drivers/net/ethernet/intel/ice/
Dice_flow.c627 static int ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt) in ice_flow_val_hdrs() argument
633 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK && in ice_flow_val_hdrs()
634 !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK)) in ice_flow_val_hdrs()
638 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK && in ice_flow_val_hdrs()
639 !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)) in ice_flow_val_hdrs()
667 sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ? in ice_flow_calc_seg_sz()
671 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4) in ice_flow_calc_seg_sz()
673 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6) in ice_flow_calc_seg_sz()
675 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP) in ice_flow_calc_seg_sz()
677 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK) in ice_flow_calc_seg_sz()
[all …]
Dice_flow.h374 struct ice_flow_seg_info segs[ICE_FLOW_SEG_MAX]; member
390 u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
/drivers/infiniband/sw/rdmavt/
Dmr.c379 mr->mr.map[m]->segs[n].vaddr = vaddr; in rvt_reg_user_mr()
380 mr->mr.map[m]->segs[n].length = PAGE_SIZE; in rvt_reg_user_mr()
569 mr->mr.map[m]->segs[n].vaddr = (void *)addr; in rvt_set_page()
570 mr->mr.map[m]->segs[n].length = ps; in rvt_set_page()
599 mr->mr.offset = ibmr->iova - (u64)mr->mr.map[0]->segs[0].vaddr; in rvt_map_mr_sg()
787 while (off >= mr->map[m]->segs[n].length) { in rvt_lkey_ok()
788 off -= mr->map[m]->segs[n].length; in rvt_lkey_ok()
797 isge->vaddr = mr->map[m]->segs[n].vaddr + off; in rvt_lkey_ok()
798 isge->length = mr->map[m]->segs[n].length - off; in rvt_lkey_ok()
894 while (off >= mr->map[m]->segs[n].length) { in rvt_rkey_ok()
[all …]
/drivers/net/wireguard/
Ddevice.c178 struct sk_buff *segs = skb_gso_segment(skb, 0); in wg_xmit() local
180 if (IS_ERR(segs)) { in wg_xmit()
181 ret = PTR_ERR(segs); in wg_xmit()
185 skb = segs; in wg_xmit()
/drivers/net/ethernet/broadcom/bnxt/
Dbnxt_coredump.c52 info->segs = le16_to_cpu(*((__le16 *)(resp + in bnxt_hwrm_dbg_dma_data()
54 if (!info->segs) { in bnxt_hwrm_dbg_dma_data()
59 info->dest_buf_size = info->segs * in bnxt_hwrm_dbg_dma_data()
113 coredump->total_segs = info.segs; in bnxt_hwrm_dbg_coredump_list()
Dbnxt_coredump.h90 u16 segs; member
/drivers/net/ethernet/marvell/octeontx2/nic/
Dotx2_txrx.c270 for (seg = 0; seg < sg->segs; seg++, seg_addr++) in otx2_free_rcv_seg()
336 if (cqe->sg.segs) in otx2_check_rcv_errors()
373 for (seg = 0; seg < sg->segs; seg++, seg_addr++) { in otx2_rcv_pkt_handler()
610 sg->segs = 0; in otx2_sqe_add_sg()
626 sg->segs++; in otx2_sqe_add_sg()
844 sg->segs = 0; in otx2_sqe_tso_add_sg()
857 sg->segs++; in otx2_sqe_tso_add_sg()
1210 if (cqe->sg.segs > 1) { in otx2_cleanup_rx_cqes()
1333 sg->segs = 1; in otx2_xdp_sqe_add_sg()
Dotx2_struct.h153 u64 segs : 2; member
228 u64 segs : 2; member
/drivers/block/
Dxen-blkfront.c1211 int i, j, segs; in blkif_free_ring() local
1252 segs = rinfo->shadow[i].req.operation == BLKIF_OP_INDIRECT ? in blkif_free_ring()
1255 for (j = 0; j < segs; j++) { in blkif_free_ring()
1270 for (j = 0; j < INDIRECT_GREFS(segs); j++) { in blkif_free_ring()
2013 unsigned int segs; in blkif_recover() local
2019 segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST; in blkif_recover()
2020 blk_queue_max_segments(info->rq, segs / GRANTS_PER_PSEG); in blkif_recover()
2040 BUG_ON(req->nr_phys_segments > segs); in blkif_recover()
/drivers/media/dvb-frontends/
Dmb86a20s.c1428 const struct linear_segments *segs; in mb86a20s_get_blk_error_layer_CNR() local
1480 segs = cnr_qpsk_table; in mb86a20s_get_blk_error_layer_CNR()
1484 segs = cnr_16qam_table; in mb86a20s_get_blk_error_layer_CNR()
1489 segs = cnr_64qam_table; in mb86a20s_get_blk_error_layer_CNR()
1493 cnr = interpolate_value(mer, segs, segs_len); in mb86a20s_get_blk_error_layer_CNR()
/drivers/net/
Dtap.c344 struct sk_buff *segs = __skb_gso_segment(skb, features, false); in tap_handle_frame() local
347 if (IS_ERR(segs)) { in tap_handle_frame()
352 if (!segs) { in tap_handle_frame()
361 skb_list_walk_safe(segs, skb, next) { in tap_handle_frame()
/drivers/staging/octeon/
Dethernet-tx.c254 pko_command.s.segs = 1; in cvm_oct_xmit()
283 pko_command.s.segs = skb_shinfo(skb)->nr_frags + 1; in cvm_oct_xmit()
/drivers/scsi/
Dvmw_pvscsi.c363 int segs = scsi_dma_map(cmd); in pvscsi_map_buffers() local
365 if (segs == -ENOMEM) { in pvscsi_map_buffers()
369 } else if (segs > 1) { in pvscsi_map_buffers()
370 pvscsi_create_sg(ctx, sg, segs); in pvscsi_map_buffers()
Dst.c3898 int segs, max_segs, b_size, order, got; in enlarge_buffer() local
3932 for (segs = STbuffer->frp_segs, got = STbuffer->buffer_size; in enlarge_buffer()
3933 segs < max_segs && got < new_size;) { in enlarge_buffer()
3946 STbuffer->reserved_pages[segs] = page; in enlarge_buffer()
3947 segs++; in enlarge_buffer()
/drivers/infiniband/hw/qib/
Dqib_ud.c192 sge->mr->map[sge->m]->segs[sge->n].vaddr; in qib_ud_loopback()
194 sge->mr->map[sge->m]->segs[sge->n].length; in qib_ud_loopback()
Dqib_verbs.c167 sge.mr->map[sge.m]->segs[sge.n].vaddr; in qib_count_sge()
169 sge.mr->map[sge.m]->segs[sge.n].length; in qib_count_sge()
200 sge->mr->map[sge->m]->segs[sge->n].vaddr; in qib_copy_from_sge()
202 sge->mr->map[sge->m]->segs[sge->n].length; in qib_copy_from_sge()
/drivers/net/wireless/realtek/rtw88/
Dmain.c373 const struct rtw_fwcd_segs *segs = chip->fwcd_segs; in rtw_fwcd_prep() local
377 if (segs) { in rtw_fwcd_prep()
378 prep_size += segs->num * sizeof(struct rtw_fwcd_hdr); in rtw_fwcd_prep()
380 for (i = 0; i < segs->num; i++) in rtw_fwcd_prep()
381 prep_size += segs->segs[i]; in rtw_fwcd_prep()
/drivers/net/ethernet/qlogic/qed/
Dqed_cxt.c196 struct qed_tid_seg *segs = p_cfg->tid_seg; in qed_cxt_tm_iids() local
202 iids->pf_tids[j] += segs[j].count; in qed_cxt_tm_iids()
208 iids->per_vf_tids += segs[NUM_TASK_PF_SEGMENTS].count; in qed_cxt_tm_iids()
226 struct qed_tid_seg *segs; in qed_cxt_qm_iids() local
234 segs = p_mngr->conn_cfg[type].tid_seg; in qed_cxt_qm_iids()
239 iids->tids += segs[j].count; in qed_cxt_qm_iids()
245 vf_tids += segs[NUM_TASK_PF_SEGMENTS].count; in qed_cxt_qm_iids()
/drivers/net/ethernet/netronome/nfp/nfdk/
Ddp.c42 u32 segs, hdrlen, l3_offset, l4_offset; in nfp_nfdk_tx_tso() local
56 segs = skb_shinfo(skb)->gso_segs; in nfp_nfdk_tx_tso()
64 txd.lso_totsegs = segs; in nfp_nfdk_tx_tso()
66 txbuf->pkt_cnt = segs; in nfp_nfdk_tx_tso()
/drivers/net/ethernet/intel/e1000/
De1000.h131 unsigned short segs; member
De1000_main.c2832 unsigned int f, bytecount, segs; in e1000_tx_map() local
2940 segs = skb_shinfo(skb)->gso_segs ?: 1; in e1000_tx_map()
2942 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; in e1000_tx_map()
2945 tx_ring->buffer_info[i].segs = segs; in e1000_tx_map()
3852 total_tx_packets += buffer_info->segs; in e1000_clean_tx_irq()
/drivers/net/ethernet/sun/
Dsunvnet_common.c1226 struct sk_buff *segs, *curr, *next; in vnet_handle_offloads() local
1277 segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO); in vnet_handle_offloads()
1278 if (IS_ERR(segs)) in vnet_handle_offloads()
1285 skb_list_walk_safe(segs, curr, next) { in vnet_handle_offloads()
/drivers/net/ethernet/intel/e1000e/
De1000.h139 unsigned int segs; member
/drivers/net/ethernet/intel/igbvf/
Dnetdev.c809 unsigned int segs, bytecount; in igbvf_clean_tx_irq() local
812 segs = skb_shinfo(skb)->gso_segs ?: 1; in igbvf_clean_tx_irq()
814 bytecount = ((segs - 1) * skb_headlen(skb)) + in igbvf_clean_tx_irq()
816 total_packets += segs; in igbvf_clean_tx_irq()
/drivers/net/ethernet/myricom/myri10ge/
Dmyri10ge.c2892 struct sk_buff *segs, *curr, *next; in myri10ge_sw_tso() local
2897 segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO6); in myri10ge_sw_tso()
2898 if (IS_ERR(segs)) in myri10ge_sw_tso()
2901 skb_list_walk_safe(segs, curr, next) { in myri10ge_sw_tso()

12