/net/core/ |
D | skbuff.c | 570 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_coalesce_rx_frag() 619 __skb_frag_unref(&shinfo->frags[i]); in skb_release_data() 794 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_dump() 1383 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; in skb_copy_ubufs() 1589 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; in __pskb_copy_fclone() 1651 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); in pskb_expand_head() 1754 offsetof(struct skb_shared_info, frags[shinfo->nr_frags])); in __skb_unclone_keeptruesize() 1984 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); in ___pskb_trim() 1991 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); in ___pskb_trim() 2123 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in __pskb_pull_tail() [all …]
|
D | tso.c | 62 skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx]; in tso_build_data() 88 skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx]; in tso_start()
|
D | pktgen.c | 2625 int frags = pkt_dev->nfrags; in pktgen_finalize_skb() local 2630 if (frags > MAX_SKB_FRAGS) in pktgen_finalize_skb() 2631 frags = MAX_SKB_FRAGS; in pktgen_finalize_skb() 2632 len = datalen - frags * PAGE_SIZE; in pktgen_finalize_skb() 2635 datalen = frags * PAGE_SIZE; in pktgen_finalize_skb() 2639 frag_len = (datalen/frags) < PAGE_SIZE ? in pktgen_finalize_skb() 2640 (datalen/frags) : PAGE_SIZE; in pktgen_finalize_skb() 2653 skb_frag_off_set(&skb_shinfo(skb)->frags[i], 0); in pktgen_finalize_skb() 2655 if (i == (frags - 1)) in pktgen_finalize_skb() 2656 skb_frag_size_set(&skb_shinfo(skb)->frags[i], in pktgen_finalize_skb() [all …]
|
D | datagram.c | 437 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in __skb_datagram_iter() 569 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_copy_datagram_from_iter() 661 skb_frag_t *last = &skb_shinfo(skb)->frags[frag - 1]; in __zerocopy_sg_from_iter()
|
D | dev.c | 3457 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in illegal_highdma() 5931 const skb_frag_t *frag0 = &pinfo->frags[0]; in skb_gro_reset_offset() 5958 skb_frag_off_add(&pinfo->frags[0], grow); in gro_pull_from_frag0() 5959 skb_frag_size_sub(&pinfo->frags[0], grow); in gro_pull_from_frag0() 5961 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) { in gro_pull_from_frag0() 5963 memmove(pinfo->frags, pinfo->frags + 1, in gro_pull_from_frag0() 5964 --pinfo->nr_frags * sizeof(pinfo->frags[0])); in gro_pull_from_frag0()
|
/net/sunrpc/auth_gss/ |
D | gss_krb5_crypto.c | 427 struct scatterlist frags[4]; member 444 sg_set_page(&desc->frags[desc->fragno], sg_page(sg), sg->length, in decryptor() 455 sg_mark_end(&desc->frags[desc->fragno - 1]); in decryptor() 457 skcipher_request_set_crypt(desc->req, desc->frags, desc->frags, in decryptor() 464 sg_init_table(desc->frags, 4); in decryptor() 467 sg_set_page(&desc->frags[0], sg_page(sg), fraglen, in decryptor() 497 sg_init_table(desc.frags, 4); in gss_decrypt_xdr_buf() 771 sg_init_table(desc.frags, 4); in gss_krb5_aes_decrypt()
|
/net/sctp/ |
D | outqueue.c | 1205 union sctp_sack_variable *frags; in sctp_sack_update_unack_data() local 1211 frags = sack->variable; in sctp_sack_update_unack_data() 1213 unack_data -= ((ntohs(frags[i].gab.end) - in sctp_sack_update_unack_data() 1214 ntohs(frags[i].gab.start) + 1)); in sctp_sack_update_unack_data() 1232 union sctp_sack_variable *frags = sack->variable; in sctp_outq_sack() local 1294 highest_tsn += ntohs(frags[gap_ack_blocks - 1].gab.end); in sctp_outq_sack() 1750 union sctp_sack_variable *frags; in sctp_acked() local 1769 frags = sack->variable; in sctp_acked() 1773 if (tsn_offset >= ntohs(frags[i].gab.start) && in sctp_acked() 1774 tsn_offset <= ntohs(frags[i].gab.end)) in sctp_acked()
|
/net/tipc/ |
D | msg.c | 312 int pktmax, struct sk_buff_head *frags) in tipc_msg_fragment() argument 340 __skb_queue_tail(frags, _skb); in tipc_msg_fragment() 354 __skb_queue_purge(frags); in tipc_msg_fragment() 355 __skb_queue_head_init(frags); in tipc_msg_fragment()
|
D | link.c | 1944 struct sk_buff_head tmpxq, tnlq, frags; in tipc_link_tnl_prepare() local 1980 __skb_queue_head_init(&frags); in tipc_link_tnl_prepare() 2023 &frags); in tipc_link_tnl_prepare() 2029 pktcnt += skb_queue_len(&frags) - 1; in tipc_link_tnl_prepare() 2031 skb_queue_splice_tail_init(&frags, &tnlq); in tipc_link_tnl_prepare()
|
D | msg.h | 1193 int pktmax, struct sk_buff_head *frags);
|
/net/tls/ |
D | tls_device.c | 134 __skb_frag_unref(&record->frags[i]); in destroy_record() 254 frag = &record->frags[record->num_frags - 1]; in tls_append_frag() 292 frag = &record->frags[i]; in tls_push_record() 331 tls_fill_prepend(ctx, skb_frag_address(&record->frags[0]), in tls_device_record_close() 348 frag = &record->frags[0]; in tls_create_new_record()
|
D | tls_device_fallback.c | 272 skb_frag_t *frag = &record->frags[i]; in fill_sg_in()
|
/net/ipv4/ |
D | tcp.c | 1043 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); in do_tcp_sendpages() 1364 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); in tcp_sendmsg_locked() 1771 frag = skb_shinfo(skb)->frags; in skb_advance_to_frag() 1899 const skb_frag_t *frags = NULL; in tcp_zerocopy_receive() local 1964 frags = skb_advance_to_frag(skb, offset, &offset_frag); in tcp_zerocopy_receive() 1965 if (!frags || offset_frag) in tcp_zerocopy_receive() 1969 mappable_offset = find_next_mappable_frag(frags, in tcp_zerocopy_receive() 1975 pages[pg_idx] = skb_frag_page(frags); in tcp_zerocopy_receive() 1979 frags++; in tcp_zerocopy_receive() 4134 const skb_frag_t *f = &shi->frags[i]; in tcp_md5_hash_skb_data()
|
D | tcp_output.c | 1643 int size = skb_frag_size(&shinfo->frags[i]); in __pskb_trim_head() 1649 shinfo->frags[k] = shinfo->frags[i]; in __pskb_trim_head() 1651 skb_frag_off_add(&shinfo->frags[k], eat); in __pskb_trim_head() 1652 skb_frag_size_sub(&shinfo->frags[k], eat); in __pskb_trim_head()
|
D | inet_fragment.c | 462 plen += skb_frag_size(&skb_shinfo(head)->frags[i]); in inet_frag_reasm_prepare()
|
D | udp.c | 841 struct sk_buff *frags; in udp4_hwcsum() local 848 skb_walk_frags(skb, frags) { in udp4_hwcsum() 849 csum = csum_add(csum, frags->csum); in udp4_hwcsum() 850 hlen -= frags->len; in udp4_hwcsum()
|
/net/ipv6/ |
D | udp.c | 1150 struct sk_buff *frags = skb_shinfo(skb)->frag_list; in udp6_hwcsum_outgoing() local 1153 if (!frags) { in udp6_hwcsum_outgoing() 1171 csum = csum_add(csum, frags->csum); in udp6_hwcsum_outgoing() 1172 } while ((frags = frags->next)); in udp6_hwcsum_outgoing()
|
/net/xfrm/ |
D | xfrm_ipcomp.c | 75 frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags; in ipcomp_decompress()
|
/net/mac80211/ |
D | ieee80211_i.h | 866 struct sk_buff_head frags; member 918 struct ieee80211_fragment_cache frags; member 1595 return !(skb_queue_empty(&txqi->frags) && !txqi->tin.backlog_packets); in txq_has_queue()
|
D | sta_info.h | 670 struct ieee80211_fragment_cache frags; member
|
D | iface.c | 687 ieee80211_destroy_frag_cache(&sdata->frags); in ieee80211_teardown_sdata() 1949 ieee80211_init_frag_cache(&sdata->frags); in ieee80211_if_add()
|
D | tx.c | 1466 __skb_queue_head_init(&txqi->frags); in ieee80211_txq_init() 1508 ieee80211_purge_tx_queue(&local->hw, &txqi->frags); in ieee80211_txq_purge() 3649 skb = __skb_dequeue(&txqi->frags); in ieee80211_tx_dequeue() 3728 skb_queue_splice_tail(&tx.skbs, &txqi->frags); in ieee80211_tx_dequeue() 3866 (force || !skb_queue_empty(&txqi->frags) || in __ieee80211_schedule_txq()
|
D | sta_info.c | 395 ieee80211_init_frag_cache(&sta->frags); in sta_info_alloc() 1110 ieee80211_destroy_frag_cache(&sta->frags); in __sta_info_destroy_part2()
|
/net/kcm/ |
D | kcmsock.c | 644 frag = &skb_shinfo(skb)->frags[fragidx]; in kcm_write_msgs() 793 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size); in kcm_sendpage() 1003 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); in kcm_sendmsg()
|
/net/iucv/ |
D | af_iucv.c | 1086 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in iucv_sock_sendmsg() 1186 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in iucv_process_message()
|