• Home
  • Raw
  • Download

Lines Matching refs:offset

389 		nc->frag.offset = 0;  in __netdev_alloc_frag()
392 if (nc->frag.offset + fragsz > nc->frag.size) { in __netdev_alloc_frag()
405 nc->frag.offset = 0; in __netdev_alloc_frag()
408 data = page_address(nc->frag.page) + nc->frag.offset; in __netdev_alloc_frag()
409 nc->frag.offset += fragsz; in __netdev_alloc_frag()
1376 int offset = skb_headlen(skb); in ___pskb_trim() local
1386 if (offset >= len) in ___pskb_trim()
1390 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); in ___pskb_trim()
1393 offset = end; in ___pskb_trim()
1397 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); in ___pskb_trim()
1412 int end = offset + frag->len; in ___pskb_trim()
1428 offset = end; in ___pskb_trim()
1433 unlikely((err = pskb_trim(frag, len - offset)))) in ___pskb_trim()
1612 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) in skb_copy_bits() argument
1618 if (offset > (int)skb->len - len) in skb_copy_bits()
1622 if ((copy = start - offset) > 0) { in skb_copy_bits()
1625 skb_copy_from_linear_data_offset(skb, offset, to, copy); in skb_copy_bits()
1628 offset += copy; in skb_copy_bits()
1636 WARN_ON(start > offset + len); in skb_copy_bits()
1639 if ((copy = end - offset) > 0) { in skb_copy_bits()
1647 vaddr + f->page_offset + offset - start, in skb_copy_bits()
1653 offset += copy; in skb_copy_bits()
1662 WARN_ON(start > offset + len); in skb_copy_bits()
1665 if ((copy = end - offset) > 0) { in skb_copy_bits()
1668 if (skb_copy_bits(frag_iter, offset - start, to, copy)) in skb_copy_bits()
1672 offset += copy; in skb_copy_bits()
1696 unsigned int *offset, in linear_to_page() argument
1704 *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset); in linear_to_page()
1706 memcpy(page_address(pfrag->page) + pfrag->offset, in linear_to_page()
1707 page_address(page) + *offset, *len); in linear_to_page()
1708 *offset = pfrag->offset; in linear_to_page()
1709 pfrag->offset += *len; in linear_to_page()
1716 unsigned int offset) in spd_can_coalesce() argument
1720 (spd->partial[spd->nr_pages - 1].offset + in spd_can_coalesce()
1721 spd->partial[spd->nr_pages - 1].len == offset); in spd_can_coalesce()
1729 unsigned int *len, unsigned int offset, in spd_fill_page() argument
1737 page = linear_to_page(page, len, &offset, sk); in spd_fill_page()
1741 if (spd_can_coalesce(spd, page, offset)) { in spd_fill_page()
1748 spd->partial[spd->nr_pages].offset = offset; in spd_fill_page()
1794 unsigned int *offset, unsigned int *len, in __skb_splice_bits() argument
1807 offset, len, spd, in __skb_splice_bits()
1820 offset, len, spd, false, sk, pipe)) in __skb_splice_bits()
1833 int skb_splice_bits(struct sk_buff *skb, unsigned int offset, in skb_splice_bits() argument
1855 if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk)) in skb_splice_bits()
1866 if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk)) in skb_splice_bits()
1901 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) in skb_store_bits() argument
1907 if (offset > (int)skb->len - len) in skb_store_bits()
1910 if ((copy = start - offset) > 0) { in skb_store_bits()
1913 skb_copy_to_linear_data_offset(skb, offset, from, copy); in skb_store_bits()
1916 offset += copy; in skb_store_bits()
1924 WARN_ON(start > offset + len); in skb_store_bits()
1927 if ((copy = end - offset) > 0) { in skb_store_bits()
1934 memcpy(vaddr + frag->page_offset + offset - start, in skb_store_bits()
1940 offset += copy; in skb_store_bits()
1949 WARN_ON(start > offset + len); in skb_store_bits()
1952 if ((copy = end - offset) > 0) { in skb_store_bits()
1955 if (skb_store_bits(frag_iter, offset - start, in skb_store_bits()
1960 offset += copy; in skb_store_bits()
1974 __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, in __skb_checksum() argument
1978 int i, copy = start - offset; in __skb_checksum()
1986 csum = ops->update(skb->data + offset, copy, csum); in __skb_checksum()
1989 offset += copy; in __skb_checksum()
1997 WARN_ON(start > offset + len); in __skb_checksum()
2000 if ((copy = end - offset) > 0) { in __skb_checksum()
2008 offset - start, copy, 0); in __skb_checksum()
2013 offset += copy; in __skb_checksum()
2022 WARN_ON(start > offset + len); in __skb_checksum()
2025 if ((copy = end - offset) > 0) { in __skb_checksum()
2029 csum2 = __skb_checksum(frag_iter, offset - start, in __skb_checksum()
2034 offset += copy; in __skb_checksum()
2045 __wsum skb_checksum(const struct sk_buff *skb, int offset, in skb_checksum() argument
2053 return __skb_checksum(skb, offset, len, csum, &ops); in skb_checksum()
2059 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, in skb_copy_and_csum_bits() argument
2063 int i, copy = start - offset; in skb_copy_and_csum_bits()
2071 csum = csum_partial_copy_nocheck(skb->data + offset, to, in skb_copy_and_csum_bits()
2075 offset += copy; in skb_copy_and_csum_bits()
2083 WARN_ON(start > offset + len); in skb_copy_and_csum_bits()
2086 if ((copy = end - offset) > 0) { in skb_copy_and_csum_bits()
2096 offset - start, to, in skb_copy_and_csum_bits()
2102 offset += copy; in skb_copy_and_csum_bits()
2113 WARN_ON(start > offset + len); in skb_copy_and_csum_bits()
2116 if ((copy = end - offset) > 0) { in skb_copy_and_csum_bits()
2120 offset - start, in skb_copy_and_csum_bits()
2125 offset += copy; in skb_copy_and_csum_bits()
2185 unsigned int offset; in skb_zerocopy() local
2202 offset = from->data - (unsigned char *)page_address(page); in skb_zerocopy()
2203 __skb_fill_page_desc(to, 0, page, offset, plen); in skb_zerocopy()
2774 static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, in skb_ts_get_next_block() argument
2778 return skb_seq_read(offset, text, TS_SKB_CB(state)); in skb_ts_get_next_block()
2827 int (*getfrag)(void *from, char *to, int offset, in skb_append_datato_frags() argument
2833 int offset = 0; in skb_append_datato_frags() local
2846 copy = min_t(int, length, pfrag->size - pfrag->offset); in skb_append_datato_frags()
2848 ret = getfrag(from, page_address(pfrag->page) + pfrag->offset, in skb_append_datato_frags()
2849 offset, copy, 0, skb); in skb_append_datato_frags()
2854 skb_fill_page_desc(skb, frg_cnt, pfrag->page, pfrag->offset, in skb_append_datato_frags()
2857 pfrag->offset += copy; in skb_append_datato_frags()
2864 offset += copy; in skb_append_datato_frags()
2914 unsigned int offset = doffset; in skb_segment() local
2944 len = head_skb->len - offset; in skb_segment()
2948 hsize = skb_headlen(head_skb) - offset; in skb_segment()
2964 while (pos < offset + len) { in skb_segment()
2968 if (pos + size > offset + len) in skb_segment()
3028 nskb->csum = skb_copy_and_csum_bits(head_skb, offset, in skb_segment()
3038 skb_copy_from_linear_data_offset(head_skb, offset, in skb_segment()
3044 while (pos < offset + len) { in skb_segment()
3073 if (pos < offset) { in skb_segment()
3074 nskb_frag->page_offset += offset - pos; in skb_segment()
3075 skb_frag_size_sub(nskb_frag, offset - pos); in skb_segment()
3080 if (pos + size <= offset + len) { in skb_segment()
3085 skb_frag_size_sub(nskb_frag, pos + size - (offset + len)); in skb_segment()
3105 } while ((offset += len) < head_skb->len); in skb_segment()
3123 unsigned int offset = skb_gro_offset(skb); in skb_gro_receive() local
3136 if (headlen <= offset) { in skb_gro_receive()
3145 offset -= headlen; in skb_gro_receive()
3155 frag->page_offset += offset; in skb_gro_receive()
3156 skb_frag_size_sub(frag, offset); in skb_gro_receive()
3172 unsigned int first_size = headlen - offset; in skb_gro_receive()
3180 offset; in skb_gro_receive()
3240 if (offset > headlen) { in skb_gro_receive()
3241 unsigned int eat = offset - headlen; in skb_gro_receive()
3247 offset = headlen; in skb_gro_receive()
3250 __skb_pull(skb, offset); in skb_gro_receive()
3299 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) in __skb_to_sgvec() argument
3302 int i, copy = start - offset; in __skb_to_sgvec()
3309 sg_set_buf(sg, skb->data + offset, copy); in __skb_to_sgvec()
3313 offset += copy; in __skb_to_sgvec()
3319 WARN_ON(start > offset + len); in __skb_to_sgvec()
3322 if ((copy = end - offset) > 0) { in __skb_to_sgvec()
3328 frag->page_offset+offset-start); in __skb_to_sgvec()
3332 offset += copy; in __skb_to_sgvec()
3340 WARN_ON(start > offset + len); in __skb_to_sgvec()
3343 if ((copy = end - offset) > 0) { in __skb_to_sgvec()
3346 elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start, in __skb_to_sgvec()
3350 offset += copy; in __skb_to_sgvec()
3378 int offset, int len) in skb_to_sgvec_nomark() argument
3380 return __skb_to_sgvec(skb, sg, offset, len); in skb_to_sgvec_nomark()
3384 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) in skb_to_sgvec() argument
3386 int nsg = __skb_to_sgvec(skb, sg, offset, len); in skb_to_sgvec()
3995 unsigned int offset; in skb_try_coalesce() local
4007 offset = from->data - (unsigned char *)page_address(page); in skb_try_coalesce()
4010 page, offset, skb_headlen(from)); in skb_try_coalesce()