Home
last modified time | relevance | path

Searched refs:frag_size (Results 1 – 25 of 41) sorted by relevance

12

/drivers/media/pci/netup_unidvb/
Dnetup_unidvb_spi.c111 u32 frag_size = (tr_size > sizeof(spi->regs->data)) ? in netup_spi_transfer() local
117 frag_offset + frag_size == t->len) { in netup_spi_transfer()
123 frag_size); in netup_spi_transfer()
126 0, frag_size); in netup_spi_transfer()
129 writew((frag_size & 0x3ff) | in netup_spi_transfer()
143 spi->regs->data, frag_size); in netup_spi_transfer()
158 tr_size -= frag_size; in netup_spi_transfer()
159 msg->actual_length += frag_size; in netup_spi_transfer()
/drivers/net/wireless/intel/ipw2x00/
Dlibipw_tx.c189 txb->frag_size = txb_size; in libipw_alloc_txb()
246 int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size, in libipw_xmit() local
371 frag_size = MAX_FRAG_THRESHOLD; in libipw_xmit()
373 frag_size = ieee->fts; in libipw_xmit()
379 bytes_per_frag = frag_size - hdr_len; in libipw_xmit()
401 frag_size = bytes + hdr_len; in libipw_xmit()
404 rts_required = (frag_size > ieee->rts in libipw_xmit()
412 txb = libipw_alloc_txb(nr_frags, frag_size, in libipw_xmit()
421 txb->payload_size = frag_size * (nr_frags - 1) + in libipw_xmit()
Dlibipw.h490 u16 frag_size; member
/drivers/net/ethernet/mellanox/mlx4/
Den_rx.c118 cpu_to_be32(priv->frag_info[i].frag_size); in mlx4_en_init_rx_desc()
475 int nr, frag_size; in mlx4_en_complete_rx_desc() local
481 frag_size = min_t(int, length, frag_info->frag_size); in mlx4_en_complete_rx_desc()
489 frag_size, priv->dma_dir); in mlx4_en_complete_rx_desc()
492 frag_size); in mlx4_en_complete_rx_desc()
505 u32 sz_align = ALIGN(frag_size, SMP_CACHE_BYTES); in mlx4_en_complete_rx_desc()
508 release = frags->page_offset + frag_info->frag_size > PAGE_SIZE; in mlx4_en_complete_rx_desc()
518 length -= frag_size; in mlx4_en_complete_rx_desc()
774 priv->frag_info[0].frag_size, in mlx4_en_process_rx_cq()
989 priv->frag_info[0].frag_size = eff_mtu; in mlx4_en_calc_rx_buf()
[all …]
/drivers/infiniband/hw/qib/
Dqib_user_sdma.c71 u16 frag_size; /* frag size used by PSM */ member
352 if ((pkt->payload_size + newlen) >= pkt->frag_size) { in qib_user_sdma_page_to_frags()
353 newlen = pkt->frag_size - pkt->payload_size; in qib_user_sdma_page_to_frags()
765 if (pkt->frag_size == pkt->bytes_togo && in qib_user_sdma_init_payload()
819 u16 frag_size; in qib_user_sdma_queue_pkts() local
903 frag_size = ((le32_to_cpu(*pbc))>>16) & 0xFFFF; in qib_user_sdma_queue_pkts()
904 if (((frag_size ? frag_size : bytes_togo) + len) > in qib_user_sdma_queue_pkts()
910 if (frag_size) { in qib_user_sdma_queue_pkts()
913 n = npages*((2*PAGE_SIZE/frag_size)+1); in qib_user_sdma_queue_pkts()
939 pkt->frag_size = frag_size; in qib_user_sdma_queue_pkts()
[all …]
/drivers/soc/qcom/
Dwcnss_ctrl.c94 u32 frag_size; member
229 req->frag_size = NV_FRAGMENT_SIZE; in wcnss_download_nv()
235 req->frag_size = left; in wcnss_download_nv()
239 memcpy(req->fragment, data, req->frag_size); in wcnss_download_nv()
/drivers/net/ethernet/mellanox/mlx5/core/en/
Dparams.c388 info->arr[0].frag_size = byte_count; in mlx5e_build_rq_frags_info()
401 int frag_size = byte_count - buf_size; in mlx5e_build_rq_frags_info() local
404 frag_size = min(frag_size, frag_size_max); in mlx5e_build_rq_frags_info()
406 info->arr[i].frag_size = frag_size; in mlx5e_build_rq_frags_info()
407 info->arr[i].frag_stride = roundup_pow_of_two(frag_size); in mlx5e_build_rq_frags_info()
409 buf_size += frag_size; in mlx5e_build_rq_frags_info()
/drivers/staging/rtl8192u/ieee80211/
Dieee80211_tx.c238 txb->frag_size = __cpu_to_le16(txb_size); in ieee80211_alloc_txb()
534 int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size; in ieee80211_xmit() local
638 frag_size = MAX_FRAG_THRESHOLD; in ieee80211_xmit()
641 frag_size = ieee->fts;//default:392 in ieee80211_xmit()
660 bytes_per_frag = frag_size - hdr_len; in ieee80211_xmit()
684 txb = ieee80211_alloc_txb(nr_frags, frag_size + ieee->tx_headroom, GFP_ATOMIC); in ieee80211_xmit()
/drivers/staging/rtl8192e/
Drtllib_tx.c215 txb->frag_size = cpu_to_le16(txb_size); in rtllib_alloc_txb()
554 int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size; in rtllib_xmit_inter() local
707 frag_size = MAX_FRAG_THRESHOLD; in rtllib_xmit_inter()
710 frag_size = ieee->fts; in rtllib_xmit_inter()
738 bytes_per_frag = frag_size - hdr_len; in rtllib_xmit_inter()
766 txb = rtllib_alloc_txb(nr_frags, frag_size + in rtllib_xmit_inter()
/drivers/net/ethernet/mellanox/mlx5/core/
Den_rx.c1109 u32 frag_size, u16 headroom, in mlx5e_build_linear_skb() argument
1112 struct sk_buff *skb = build_skb(va, frag_size); in mlx5e_build_linear_skb()
1141 u32 frag_size; in mlx5e_skb_from_cqe_linear() local
1145 frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt); in mlx5e_skb_from_cqe_linear()
1148 frag_size, DMA_FROM_DEVICE); in mlx5e_skb_from_cqe_linear()
1157 frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt); in mlx5e_skb_from_cqe_linear()
1158 skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt); in mlx5e_skb_from_cqe_linear()
1193 min_t(u16, frag_info->frag_size - frag_headlen, byte_cnt); in mlx5e_skb_from_cqe_nonlinear()
1456 u32 frag_size; in mlx5e_skb_from_cqe_mpwrq_linear() local
1466 frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32); in mlx5e_skb_from_cqe_mpwrq_linear()
[all …]
/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/
Dchcr_ktls.c26 u32 frag_size, skb_linear_data_len = skb_headlen(skb); in chcr_get_nfrags_to_send() local
35 frag_size = min(len, skb_linear_data_len - start); in chcr_get_nfrags_to_send()
40 frag_size = skb_frag_size(frag); in chcr_get_nfrags_to_send()
41 while (start >= frag_size) { in chcr_get_nfrags_to_send()
42 start -= frag_size; in chcr_get_nfrags_to_send()
45 frag_size = skb_frag_size(frag); in chcr_get_nfrags_to_send()
47 frag_size = min(len, skb_frag_size(frag) - start); in chcr_get_nfrags_to_send()
49 len -= frag_size; in chcr_get_nfrags_to_send()
53 frag_size = min(len, skb_frag_size(&si->frags[frag_idx])); in chcr_get_nfrags_to_send()
54 len -= frag_size; in chcr_get_nfrags_to_send()
[all …]
/drivers/net/ethernet/mediatek/
Dmtk_eth_soc.c800 static inline int mtk_max_buf_size(int frag_size) in mtk_max_buf_size() argument
802 int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN - in mtk_max_buf_size()
1029 int frag_size = skb_frag_size(frag); in mtk_tx_map() local
1031 while (frag_size) { in mtk_tx_map()
1049 frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN); in mtk_tx_map()
1057 (frag_size - frag_map_size) == 0) in mtk_tx_map()
1077 frag_size -= frag_map_size; in mtk_tx_map()
1326 if (ring->frag_size <= PAGE_SIZE) in mtk_poll_rx()
1327 new_data = napi_alloc_frag(ring->frag_size); in mtk_poll_rx()
1349 skb = build_skb(data, ring->frag_size); in mtk_poll_rx()
[all …]
/drivers/net/ethernet/apm/xgene/
Dxgene_enet_main.c617 u32 frag_size; in xgene_enet_free_pagepool() local
629 frag_size = xgene_enet_get_data_len(le64_to_cpu(desc[i ^ 1])); in xgene_enet_free_pagepool()
630 if (!frag_size) in xgene_enet_free_pagepool()
673 u32 datalen, frag_size, skb_index; in xgene_enet_rx_frame() local
734 frag_size = xgene_enet_get_data_len(le64_to_cpu(desc[i ^ 1])); in xgene_enet_rx_frame()
735 if (!frag_size) in xgene_enet_rx_frame()
743 frag_size, PAGE_SIZE); in xgene_enet_rx_frame()
745 datalen += frag_size; in xgene_enet_rx_frame()
/drivers/scsi/qedi/
Dqedi_fw.c1756 int frag_size, sg_frags; in qedi_split_bd() local
1762 frag_size = in qedi_split_bd()
1765 frag_size = (sg_len > QEDI_BD_SPLIT_SZ) ? 0 : in qedi_split_bd()
1768 if (frag_size == 0) in qedi_split_bd()
1769 frag_size = QEDI_BD_SPLIT_SZ; in qedi_split_bd()
1773 bd[bd_index + sg_frags].sge_len = (u16)frag_size; in qedi_split_bd()
1776 (bd_index + sg_frags), addr, frag_size); in qedi_split_bd()
1778 addr += (u64)frag_size; in qedi_split_bd()
1780 sg_len -= frag_size; in qedi_split_bd()
/drivers/scsi/bnx2fc/
Dbnx2fc_io.c1626 int frag_size, sg_frags; in bnx2fc_split_bd() local
1631 frag_size = BNX2FC_BD_SPLIT_SZ; in bnx2fc_split_bd()
1633 frag_size = sg_len; in bnx2fc_split_bd()
1636 bd[bd_index + sg_frags].buf_len = (u16)frag_size; in bnx2fc_split_bd()
1639 addr += (u64) frag_size; in bnx2fc_split_bd()
1641 sg_len -= frag_size; in bnx2fc_split_bd()
/drivers/net/ethernet/chelsio/cxgb4/
Dsge.c911 u32 frag_size, skb_linear_data_len = skb_headlen(skb); in cxgb4_write_partial_sgl() local
920 frag_size = min(len, skb_linear_data_len - start); in cxgb4_write_partial_sgl()
921 sgl->len0 = htonl(frag_size); in cxgb4_write_partial_sgl()
923 len -= frag_size; in cxgb4_write_partial_sgl()
928 frag_size = skb_frag_size(frag); in cxgb4_write_partial_sgl()
930 while (start >= frag_size) { in cxgb4_write_partial_sgl()
931 start -= frag_size; in cxgb4_write_partial_sgl()
934 frag_size = skb_frag_size(frag); in cxgb4_write_partial_sgl()
937 frag_size = min(len, skb_frag_size(frag) - start); in cxgb4_write_partial_sgl()
938 sgl->len0 = cpu_to_be32(frag_size); in cxgb4_write_partial_sgl()
[all …]
/drivers/nfc/pn533/
Dpn533.c2212 int frag_size; in pn533_fill_fragment_skbs() local
2217 frag_size = PN533_CMD_DATAFRAME_MAXLEN; in pn533_fill_fragment_skbs()
2219 frag_size = skb->len; in pn533_fill_fragment_skbs()
2222 frag = pn533_alloc_skb(dev, frag_size); in pn533_fill_fragment_skbs()
2233 if (frag_size == PN533_CMD_DATAFRAME_MAXLEN) in pn533_fill_fragment_skbs()
2240 skb_put_data(frag, skb->data, frag_size); in pn533_fill_fragment_skbs()
2243 skb_pull(skb, frag_size); in pn533_fill_fragment_skbs()
/drivers/net/ethernet/nvidia/
Dforcedeth.c2227 u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in nv_start_xmit() local
2229 entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) + in nv_start_xmit()
2230 ((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); in nv_start_xmit()
2285 u32 frag_size = skb_frag_size(frag); in nv_start_xmit() local
2292 bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size; in nv_start_xmit()
2324 frag_size -= bcnt; in nv_start_xmit()
2329 } while (frag_size); in nv_start_xmit()
2402 u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in nv_start_xmit_optimized() local
2404 entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) + in nv_start_xmit_optimized()
2405 ((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); in nv_start_xmit_optimized()
[all …]
/drivers/net/ethernet/broadcom/bnx2x/
Dbnx2x_cmn.c594 u32 i, frag_len, frag_size; in bnx2x_fill_frag_skb() local
599 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd; in bnx2x_fill_frag_skb()
607 if (frag_size) in bnx2x_fill_frag_skb()
629 frag_len = min_t(u32, frag_size, (u32)full_page); in bnx2x_fill_frag_skb()
631 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES); in bnx2x_fill_frag_skb()
670 frag_size -= frag_len; in bnx2x_fill_frag_skb()
963 u16 frag_size, pages; in bnx2x_rx_int() local
991 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) - in bnx2x_rx_int()
995 pages = (frag_size + tpa_info->full_page - 1) / in bnx2x_rx_int()
998 pages = SGE_PAGE_ALIGN(frag_size) >> in bnx2x_rx_int()
/drivers/net/ethernet/ti/
Dam65-cpsw-nuss.c1208 u32 frag_size = skb_frag_size(frag); in am65_cpsw_nuss_ndo_slave_xmit() local
1216 buf_dma = skb_frag_dma_map(tx_chn->dma_dev, frag, 0, frag_size, in am65_cpsw_nuss_ndo_slave_xmit()
1228 buf_dma, frag_size, buf_dma, frag_size); in am65_cpsw_nuss_ndo_slave_xmit()
1235 pkt_len += frag_size; in am65_cpsw_nuss_ndo_slave_xmit()
/drivers/net/ethernet/google/gve/
Dgve_tx_dqo.c587 unsigned int frag_size = skb_frag_size(&shinfo->frags[i]); in gve_num_buffer_descs_needed() local
589 num_descs += gve_num_descs_per_buf(frag_size); in gve_num_buffer_descs_needed()
/drivers/net/ethernet/marvell/mvpp2/
Dmvpp2_main.c366 if (likely(pool->frag_size <= PAGE_SIZE)) in mvpp2_frag_alloc()
367 return netdev_alloc_frag(pool->frag_size); in mvpp2_frag_alloc()
369 return kmalloc(pool->frag_size, GFP_ATOMIC); in mvpp2_frag_alloc()
377 else if (likely(pool->frag_size <= PAGE_SIZE)) in mvpp2_frag_free()
1135 new_pool->frag_size = in mvpp2_bm_pool_use()
1183 new_pool->frag_size = in mvpp2_bm_pool_use_percpu()
3918 unsigned int frag_size; in mvpp2_rx() local
3967 if (bm_pool->frag_size > PAGE_SIZE) in mvpp2_rx()
3968 frag_size = 0; in mvpp2_rx()
3970 frag_size = bm_pool->frag_size; in mvpp2_rx()
[all …]
/drivers/net/ethernet/hisilicon/hns3/
Dhns3_enet.c3535 u32 frag_size = size - pull_len; in hns3_handle_rx_copybreak() local
3536 void *frag = napi_alloc_frag(frag_size); in hns3_handle_rx_copybreak()
3547 memcpy(frag, desc_cb->buf + frag_offset, frag_size); in hns3_handle_rx_copybreak()
3549 offset_in_page(frag), frag_size, frag_size); in hns3_handle_rx_copybreak()
3563 u32 frag_size = size - pull_len; in hns3_nic_reuse_page() local
3569 frag_size, truesize); in hns3_nic_reuse_page()
3598 } else if (frag_size <= ring->rx_copybreak) { in hns3_nic_reuse_page()
3613 frag_size, truesize); in hns3_nic_reuse_page()
/drivers/net/ethernet/marvell/
Dmvneta_bm.c188 hwbm_pool->frag_size = in mvneta_bm_pool_use()
Dmvneta.c1190 hwbm_pool->frag_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + in mvneta_bm_update_mtu()
2489 u32 rx_status, frag_size; in mvneta_rx_hwbm() local
2557 frag_size = bm_pool->hwbm_pool.frag_size; in mvneta_rx_hwbm()
2559 skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size); in mvneta_rx_hwbm()

12