Home
last modified time | relevance | path

Searched refs:shinfo (Results 1 – 22 of 22) sorted by relevance

/kernel/linux/linux-5.10/drivers/net/ethernet/mellanox/mlx4/
Den_tx.c601 const struct skb_shared_info *shinfo, in is_inline() argument
609 if (shinfo->nr_frags == 1) { in is_inline()
610 ptr = skb_frag_address_safe(&shinfo->frags[0]); in is_inline()
616 if (shinfo->nr_frags) in is_inline()
633 const struct skb_shared_info *shinfo, in get_real_size() argument
642 if (shinfo->gso_size) { in get_real_size()
648 real_size = CTRL_SIZE + shinfo->nr_frags * DS_SIZE + in get_real_size()
664 shinfo, pfrag); in get_real_size()
670 (shinfo->nr_frags + 1) * DS_SIZE; in get_real_size()
678 const struct skb_shared_info *shinfo, in build_inline_wqe() argument
[all …]
/kernel/linux/linux-5.10/include/linux/
Dvirtio_net.h145 struct skb_shared_info *shinfo = skb_shinfo(skb); in virtio_net_hdr_to_skb() local
153 shinfo->gso_size = gso_size; in virtio_net_hdr_to_skb()
154 shinfo->gso_type = gso_type; in virtio_net_hdr_to_skb()
157 shinfo->gso_type |= SKB_GSO_DODGY; in virtio_net_hdr_to_skb()
158 shinfo->gso_segs = 0; in virtio_net_hdr_to_skb()
Dskbuff.h4489 static inline void skb_increase_gso_size(struct skb_shared_info *shinfo, in skb_increase_gso_size() argument
4492 if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS)) in skb_increase_gso_size()
4494 shinfo->gso_size += increment; in skb_increase_gso_size()
4497 static inline void skb_decrease_gso_size(struct skb_shared_info *shinfo, in skb_decrease_gso_size() argument
4500 if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS)) in skb_decrease_gso_size()
4502 shinfo->gso_size -= decrement; in skb_decrease_gso_size()
4511 const struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_warn_if_lro() local
4513 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 && in skb_warn_if_lro()
4514 unlikely(shinfo->gso_type == 0)) { in skb_warn_if_lro()
/kernel/linux/linux-5.10/drivers/net/xen-netback/
Dnetback.c385 struct skb_shared_info *shinfo = skb_shinfo(skb); in xenvif_get_requests() local
386 skb_frag_t *frags = shinfo->frags; in xenvif_get_requests()
394 nr_slots = shinfo->nr_frags + 1; in xenvif_get_requests()
449 for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; in xenvif_get_requests()
450 shinfo->nr_frags++, gop++) { in xenvif_get_requests()
455 frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx); in xenvif_get_requests()
465 shinfo = skb_shinfo(nskb); in xenvif_get_requests()
466 frags = shinfo->frags; in xenvif_get_requests()
468 for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow; in xenvif_get_requests()
469 shinfo->nr_frags++, txp++, gop++) { in xenvif_get_requests()
[all …]
/kernel/linux/linux-5.10/net/core/
Dskbuff.c186 struct skb_shared_info *shinfo; in __alloc_skb() local
238 shinfo = skb_shinfo(skb); in __alloc_skb()
239 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); in __alloc_skb()
240 atomic_set(&shinfo->dataref, 1); in __alloc_skb()
268 struct skb_shared_info *shinfo; in __build_skb_around() local
284 shinfo = skb_shinfo(skb); in __build_skb_around()
285 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); in __build_skb_around()
286 atomic_set(&shinfo->dataref, 1); in __build_skb_around()
609 struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_release_data() local
614 &shinfo->dataref)) in skb_release_data()
[all …]
Dlwt_bpf.c527 struct skb_shared_info *shinfo = skb_shinfo(skb); in handle_gso_type() local
530 shinfo->gso_type |= gso_type; in handle_gso_type()
531 skb_decrease_gso_size(shinfo, encap_len); in handle_gso_type()
532 shinfo->gso_segs = 0; in handle_gso_type()
Dfilter.c3259 struct skb_shared_info *shinfo = skb_shinfo(skb); in bpf_skb_proto_4_to_6() local
3264 if (shinfo->gso_type & SKB_GSO_TCPV4) { in bpf_skb_proto_4_to_6()
3265 shinfo->gso_type &= ~SKB_GSO_TCPV4; in bpf_skb_proto_4_to_6()
3266 shinfo->gso_type |= SKB_GSO_TCPV6; in bpf_skb_proto_4_to_6()
3270 shinfo->gso_type |= SKB_GSO_DODGY; in bpf_skb_proto_4_to_6()
3271 shinfo->gso_segs = 0; in bpf_skb_proto_4_to_6()
3298 struct skb_shared_info *shinfo = skb_shinfo(skb); in bpf_skb_proto_6_to_4() local
3303 if (shinfo->gso_type & SKB_GSO_TCPV6) { in bpf_skb_proto_6_to_4()
3304 shinfo->gso_type &= ~SKB_GSO_TCPV6; in bpf_skb_proto_6_to_4()
3305 shinfo->gso_type |= SKB_GSO_TCPV4; in bpf_skb_proto_6_to_4()
[all …]
Ddev.c3723 const struct skb_shared_info *shinfo = skb_shinfo(skb); in qdisc_pkt_len_init() local
3730 if (shinfo->gso_size && skb_transport_header_was_set(skb)) { in qdisc_pkt_len_init()
3732 u16 gso_segs = shinfo->gso_segs; in qdisc_pkt_len_init()
3738 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { in qdisc_pkt_len_init()
3754 if (shinfo->gso_type & SKB_GSO_DODGY) in qdisc_pkt_len_init()
3756 shinfo->gso_size); in qdisc_pkt_len_init()
/kernel/linux/linux-5.10/net/ipv4/
Dtcp_output.c1494 struct skb_shared_info *shinfo = skb_shinfo(skb); in tcp_fragment_tstamp() local
1497 !before(shinfo->tskey, TCP_SKB_CB(skb2)->seq)) { in tcp_fragment_tstamp()
1499 u8 tsflags = shinfo->tx_flags & SKBTX_ANY_TSTAMP; in tcp_fragment_tstamp()
1501 shinfo->tx_flags &= ~tsflags; in tcp_fragment_tstamp()
1503 swap(shinfo->tskey, shinfo2->tskey); in tcp_fragment_tstamp()
1632 struct skb_shared_info *shinfo; in __pskb_trim_head() local
1644 shinfo = skb_shinfo(skb); in __pskb_trim_head()
1645 for (i = 0; i < shinfo->nr_frags; i++) { in __pskb_trim_head()
1646 int size = skb_frag_size(&shinfo->frags[i]); in __pskb_trim_head()
1652 shinfo->frags[k] = shinfo->frags[i]; in __pskb_trim_head()
[all …]
Dtcp_ipv4.c1772 struct skb_shared_info *shinfo; in tcp_add_backlog() local
1828 shinfo = skb_shinfo(skb); in tcp_add_backlog()
1829 gso_size = shinfo->gso_size ?: skb->len; in tcp_add_backlog()
1830 gso_segs = shinfo->gso_segs ?: 1; in tcp_add_backlog()
1832 shinfo = skb_shinfo(tail); in tcp_add_backlog()
1833 tail_gso_size = shinfo->gso_size ?: (tail->len - hdrlen); in tcp_add_backlog()
1834 tail_gso_segs = shinfo->gso_segs ?: 1; in tcp_add_backlog()
1862 shinfo->gso_size = max(gso_size, tail_gso_size); in tcp_add_backlog()
1863 shinfo->gso_segs = min_t(u32, gso_segs + tail_gso_segs, 0xFFFF); in tcp_add_backlog()
Dtcp.c467 struct skb_shared_info *shinfo = skb_shinfo(skb); in tcp_tx_timestamp() local
470 sock_tx_timestamp(sk, tsflags, &shinfo->tx_flags); in tcp_tx_timestamp()
474 shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1; in tcp_tx_timestamp()
Dtcp_input.c3146 const struct skb_shared_info *shinfo; in tcp_ack_tstamp() local
3152 shinfo = skb_shinfo(skb); in tcp_ack_tstamp()
3153 if (!before(shinfo->tskey, prior_snd_una) && in tcp_ack_tstamp()
3154 before(shinfo->tskey, tcp_sk(sk)->snd_una)) { in tcp_ack_tstamp()
/kernel/linux/linux-5.10/drivers/net/wireless/mediatek/mt76/
Ddma.c520 struct skb_shared_info *shinfo = skb_shinfo(skb); in mt76_add_fragment() local
521 int nr_frags = shinfo->nr_frags; in mt76_add_fragment()
523 if (nr_frags < ARRAY_SIZE(shinfo->frags)) { in mt76_add_fragment()
536 if (nr_frags < ARRAY_SIZE(shinfo->frags)) in mt76_add_fragment()
/kernel/linux/linux-5.10/net/sched/
Dsch_cake.c1351 const struct skb_shared_info *shinfo = skb_shinfo(skb); in cake_overhead() local
1359 if (!shinfo->gso_size) in cake_overhead()
1366 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | in cake_overhead()
1383 if (unlikely(shinfo->gso_type & SKB_GSO_DODGY)) in cake_overhead()
1385 shinfo->gso_size); in cake_overhead()
1387 segs = shinfo->gso_segs; in cake_overhead()
1389 len = shinfo->gso_size + hdr_len; in cake_overhead()
1390 last_len = skb->len - shinfo->gso_size * (segs - 1); in cake_overhead()
/kernel/linux/linux-5.10/drivers/net/ethernet/mscc/
Docelot_net.c325 struct skb_shared_info *shinfo = skb_shinfo(skb); in ocelot_port_xmit() local
347 if (ocelot->ptp && (shinfo->tx_flags & SKBTX_HW_TSTAMP)) { in ocelot_port_xmit()
365 if (ocelot->ptp && shinfo->tx_flags & SKBTX_HW_TSTAMP) { in ocelot_port_xmit()
/kernel/linux/linux-5.10/drivers/net/ethernet/hisilicon/hns3/
Dhns3_enet.h613 void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size);
Dhns3_enet.c1276 void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size) in hns3_shinfo_pack() argument
1281 size[i] = skb_frag_size(&shinfo->frags[i]); in hns3_shinfo_pack()
/kernel/linux/linux-5.10/drivers/net/ethernet/broadcom/
Dbnx2.c2954 struct skb_shared_info *shinfo; in bnx2_reuse_rx_skb_pages() local
2956 shinfo = skb_shinfo(skb); in bnx2_reuse_rx_skb_pages()
2957 shinfo->nr_frags--; in bnx2_reuse_rx_skb_pages()
2958 page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]); in bnx2_reuse_rx_skb_pages()
2959 __skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL); in bnx2_reuse_rx_skb_pages()
/kernel/linux/linux-5.10/drivers/net/ethernet/realtek/
Dr8169_main.c4187 struct skb_shared_info *shinfo = skb_shinfo(skb); in rtl8169_tso_csum_v2() local
4188 u32 mss = shinfo->gso_size; in rtl8169_tso_csum_v2()
4191 if (shinfo->gso_type & SKB_GSO_TCPV4) { in rtl8169_tso_csum_v2()
4193 } else if (shinfo->gso_type & SKB_GSO_TCPV6) { in rtl8169_tso_csum_v2()
/kernel/linux/linux-5.10/drivers/net/ethernet/intel/e1000e/
Dnetdev.c1523 struct skb_shared_info *shinfo; in e1000_clean_jumbo_rx_irq() local
1579 shinfo = skb_shinfo(rxtop); in e1000_clean_jumbo_rx_irq()
1580 skb_fill_page_desc(rxtop, shinfo->nr_frags, in e1000_clean_jumbo_rx_irq()
1591 shinfo = skb_shinfo(rxtop); in e1000_clean_jumbo_rx_irq()
1592 skb_fill_page_desc(rxtop, shinfo->nr_frags, in e1000_clean_jumbo_rx_irq()
/kernel/linux/patches/linux-5.10/imx8mm_patch/patches/
D0008_linux_net.patch197 + struct skb_shared_info *shinfo;
202 + shinfo = skb_shinfo(skb);
203 + memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
204 + atomic_set(&shinfo->dataref, 1);
/kernel/linux/linux-5.10/drivers/net/ethernet/broadcom/bnxt/
Dbnxt.c1083 struct skb_shared_info *shinfo; in bnxt_rx_pages() local
1086 shinfo = skb_shinfo(skb); in bnxt_rx_pages()
1087 nr_frags = --shinfo->nr_frags; in bnxt_rx_pages()
1088 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL); in bnxt_rx_pages()