Lines Matching +full:mmp +full:- +full:timer
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
7 * Florian La Roche, <rzsfl@rz.uni-sb.de>
31 #include <linux/dma-mapping.h>
56 * NETIF_F_HW_CSUM - The driver (or its device) is able to compute one
62 * NETIF_F_IP_CSUM - Driver (device) is only able to checksum plain
71 * NETIF_F_IPV6_CSUM - Driver (device) is only able to checksum plain
81 * NETIF_F_RXCSUM - Driver (device) performs receive checksum offload.
88 * verification is set in skb->ip_summed. Possible values are:
94 * not in skb->csum. Thus, skb->csum is undefined in this case.
101 * if their checksums are okay. skb->csum is still undefined in this case
114 * skb->csum_level indicates the number of consecutive checksums found in
116 * For instance if a device receives an IPv6->UDP->GRE->IPv4->TCP packet
118 * GRE (checksum flag is set) and TCP, skb->csum_level would be set to
121 * checksum is bad, skb->csum_level would be set to zero (TCP checksum is
127 * packet as seen by netif_rx() and fills in skb->csum. This means the
131 * - Even if device supports only some protocols, but is able to produce
132 * skb->csum, it MUST use CHECKSUM_COMPLETE, not CHECKSUM_UNNECESSARY.
133 * - CHECKSUM_COMPLETE is not applicable to SCTP and FCoE protocols.
142 * referred to by skb->csum_start + skb->csum_offset and any preceding
147 * C. Checksumming on transmit for non-GSO. The stack requests checksum offload
148 * in the skb->ip_summed for a packet. Values are:
153 * from skb->csum_start up to the end, and to record/write the checksum at
154 * offset skb->csum_start + skb->csum_offset. A driver may verify that the
157 * checksum refers to a legitimate transport layer checksum -- it is the
188 * D. Non-IP checksum (CRC) offloads
190 * NETIF_F_SCTP_CRC - This feature indicates that a device is capable of
197 * value of skb->csum_not_inet; skb_crc32c_csum_help is provided to resolve
200 * NETIF_F_FCOE_CRC - This feature indicates that a device is capable of
225 /* Maximum value in skb->csum_level */
230 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
232 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
265 /* always valid & non-NULL from FORWARD on, for physdev match */
317 /* Set skb_shinfo(skb)->gso_size to this in case you want skb_segment to
325 * skb_frag_size() - Returns the size of a skb fragment
330 return frag->bv_len; in skb_frag_size()
334 * skb_frag_size_set() - Sets the size of a skb fragment
340 frag->bv_len = size; in skb_frag_size_set()
344 * skb_frag_size_add() - Increments the size of a skb fragment by @delta
350 frag->bv_len += delta; in skb_frag_size_add()
354 * skb_frag_size_sub() - Decrements the size of a skb fragment by @delta
360 frag->bv_len -= delta; in skb_frag_size_sub()
364 * skb_frag_must_loop - Test if %p is a high memory page
377 * skb_frag_foreach_page - loop over pages in a fragment
380 * @f_off: offset from start of f->bv_page
384 * non-zero only on first page.
389 * A fragment can hold a compound page, in which case per-page
395 p_off = (f_off) & (PAGE_SIZE - 1), \
397 min_t(u32, f_len, PAGE_SIZE - p_off) : f_len, \
401 p_len = min_t(u32, f_len - copied, PAGE_SIZE)) \
406 * struct skb_shared_hwtstamps - hardware time stamps
411 * skb->tstamp.
434 /* device driver supports TX zero-copy buffers */
483 } mmp; member
486 #define skb_uarg(SKB) ((struct ubuf_info *)(skb_shinfo(SKB)->destructor_arg))
488 int mm_account_pinned_pages(struct mmpin *mmp, size_t size);
489 void mm_unaccount_pinned_pages(struct mmpin *mmp);
497 refcount_inc(&uarg->refcnt); in sock_zerocopy_get()
511 * the end of the header data, ie. at skb->end.
540 * to the payload part of skb->data. The lower 16 bits hold references to
541 * the entire skb->data. A clone of a headerless skb holds the length of
542 * the header in skb->hdr_len.
544 * All users must obey the rule that the skb->data reference count must be
548 * care about modifications to the header part of skb->data.
551 #define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
613 * struct sk_buff - socket buffer
618 * for retransmit timer
634 * @csum_start: Offset from skb->head where checksumming should start
647 * @offload_fwd_mark: Packet was L2-forwarded in hardware
648 * @offload_l3_fwd_mark: Packet was L3-forwarded in hardware
658 * @tcp_tsorted_anchor: list structure for TCP (tp->tsorted_sent_queue)
660 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
671 * @l4_hash: indicate hash is a canonical 4-tuple hash over transport
699 * skb->inner_protocol_type == ENCAP_TYPE_IPPROTO;
712 * @users: User count - see {datagram,tcp}.c
928 /* only useable after checking ->active_extensions != 0 */
943 * skb_pfmemalloc - Test if the skb was allocated from PFMEMALLOC reserves
948 return unlikely(skb->pfmemalloc); in skb_pfmemalloc()
959 * skb_dst - returns skb dst_entry
969 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) && in skb_dst()
972 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK); in skb_dst()
976 * skb_dst_set - sets skb dst
985 skb->_skb_refdst = (unsigned long)dst; in skb_dst_set()
989 * skb_dst_set_noref - sets skb dst, hopefully, without taking reference
1001 skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF; in skb_dst_set_noref()
1005 * skb_dst_is_noref - Test if skb dst isn't refcounted
1010 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb); in skb_dst_is_noref()
1014 * skb_rtable - Returns the skb &rtable
1022 /* For mangling skb->pkt_type from user space side from applications
1032 * skb_napi_id - Returns the skb's NAPI id
1038 return skb->napi_id; in skb_napi_id()
1045 * skb_unref - decrement the skb's reference count
1054 if (likely(refcount_read(&skb->users) == 1)) in skb_unref()
1056 else if (likely(!refcount_dec_and_test(&skb->users))) in skb_unref()
1093 * alloc_skb - allocate a network buffer
1122 * skb_fclone_busy - check if fclone is busy
1137 return skb->fclone == SKB_FCLONE_ORIG && in skb_fclone_busy()
1138 refcount_read(&fclones->fclone_ref) > 1 && in skb_fclone_busy()
1139 fclones->skb2.sk == sk; in skb_fclone_busy()
1143 * alloc_skb_fclone - allocate a network buffer from fclone cache
1182 * skb_pad - zero pad the tail of an skb
1226 * layer-2 (L2), layer-3 (L3), or layer-4 (L4).
1255 skb->hash = 0; in skb_clear_hash()
1256 skb->sw_hash = 0; in skb_clear_hash()
1257 skb->l4_hash = 0; in skb_clear_hash()
1262 if (!skb->l4_hash) in skb_clear_hash_if_not_l4()
1269 skb->l4_hash = is_l4; in __skb_set_hash()
1270 skb->sw_hash = is_sw; in __skb_set_hash()
1271 skb->hash = hash; in __skb_set_hash()
1370 if (!skb->l4_hash && !skb->sw_hash) in skb_get_hash()
1373 return skb->hash; in skb_get_hash()
1378 if (!skb->l4_hash && !skb->sw_hash) { in skb_get_hash_flowi6()
1385 return skb->hash; in skb_get_hash_flowi6()
1393 return skb->hash; in skb_get_hash_raw()
1398 to->hash = from->hash; in skb_copy_hash()
1399 to->sw_hash = from->sw_hash; in skb_copy_hash()
1400 to->l4_hash = from->l4_hash; in skb_copy_hash()
1407 to->decrypted = from->decrypted; in skb_copy_decrypted()
1414 return skb->head + skb->end; in skb_end_pointer()
1419 return skb->end; in skb_end_offset()
1424 return skb->end; in skb_end_pointer()
1429 return skb->end - skb->head; in skb_end_offset()
1438 return &skb_shinfo(skb)->hwtstamps; in skb_hwtstamps()
1443 bool is_zcopy = skb && skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY; in skb_zcopy()
1456 skb_shinfo(skb)->destructor_arg = uarg; in skb_zcopy_set()
1457 skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG; in skb_zcopy_set()
1463 skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t) val | 0x1UL); in skb_zcopy_set_nouarg()
1464 skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG; in skb_zcopy_set_nouarg()
1469 return (uintptr_t) skb_shinfo(skb)->destructor_arg & 0x1UL; in skb_zcopy_is_nouarg()
1474 return (void *)((uintptr_t) skb_shinfo(skb)->destructor_arg & ~0x1UL); in skb_zcopy_get_nouarg()
1485 } else if (uarg->callback == sock_zerocopy_callback) { in skb_zcopy_clear()
1486 uarg->zerocopy = uarg->zerocopy && zerocopy; in skb_zcopy_clear()
1489 uarg->callback(uarg, zerocopy); in skb_zcopy_clear()
1492 skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG; in skb_zcopy_clear()
1503 skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG; in skb_zcopy_abort()
1509 skb->next = NULL; in skb_mark_not_on_list()
1512 /* Iterate through singly-linked GSO fragments of an skb. */
1514 for ((skb) = (first), (next_skb) = (skb) ? (skb)->next : NULL; (skb); \
1515 (skb) = (next_skb), (next_skb) = (skb) ? (skb)->next : NULL)
1519 __list_del_entry(&skb->list); in skb_list_del_init()
1524 * skb_queue_empty - check if a queue is empty
1531 return list->next == (const struct sk_buff *) list; in skb_queue_empty()
1535 * skb_queue_empty_lockless - check if a queue is empty
1543 return READ_ONCE(list->next) == (const struct sk_buff *) list; in skb_queue_empty_lockless()
1548 * skb_queue_is_last - check if skb is the last entry in the queue
1557 return skb->next == (const struct sk_buff *) list; in skb_queue_is_last()
1561 * skb_queue_is_first - check if skb is the first entry in the queue
1570 return skb->prev == (const struct sk_buff *) list; in skb_queue_is_first()
1574 * skb_queue_next - return the next packet in the queue
1588 return skb->next; in skb_queue_next()
1592 * skb_queue_prev - return the prev packet in the queue
1606 return skb->prev; in skb_queue_prev()
1610 * skb_get - reference buffer
1618 refcount_inc(&skb->users); in skb_get()
1627 * skb_cloned - is the buffer a clone
1636 return skb->cloned && in skb_cloned()
1637 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1; in skb_cloned()
1651 * skb_header_cloned - is the header a clone
1661 if (!skb->cloned) in skb_header_cloned()
1664 dataref = atomic_read(&skb_shinfo(skb)->dataref); in skb_header_cloned()
1665 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT); in skb_header_cloned()
1680 * __skb_header_release - release reference to header
1685 skb->nohdr = 1; in __skb_header_release()
1686 atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT)); in __skb_header_release()
1691 * skb_shared - is the buffer shared
1699 return refcount_read(&skb->users) != 1; in skb_shared()
1703 * skb_share_check - check if buffer is shared and if so clone it
1738 * skb_unshare - make a copy of a shared buffer
1768 * skb_peek - peek at the head of an &sk_buff_head
1782 struct sk_buff *skb = list_->next; in skb_peek()
1790 * __skb_peek - peek at the head of a non-empty &sk_buff_head
1797 return list_->next; in __skb_peek()
1801 * skb_peek_next - peek skb following the given one from a queue
1812 struct sk_buff *next = skb->next; in skb_peek_next()
1820 * skb_peek_tail - peek at the tail of an &sk_buff_head
1834 struct sk_buff *skb = READ_ONCE(list_->prev); in skb_peek_tail()
1843 * skb_queue_len - get queue length
1850 return list_->qlen; in skb_queue_len()
1854 * skb_queue_len_lockless - get queue length
1862 return READ_ONCE(list_->qlen); in skb_queue_len_lockless()
1866 * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head
1872 * the spinlock. It can also be used for on-stack sk_buff_head
1877 list->prev = list->next = (struct sk_buff *)list; in __skb_queue_head_init()
1878 list->qlen = 0; in __skb_queue_head_init()
1883 * this is needed for now since a whole lot of users of the skb-queue
1891 spin_lock_init(&list->lock); in skb_queue_head_init()
1899 lockdep_set_class(&list->lock, class); in skb_queue_head_init_class()
1905 * The "__skb_xxxx()" functions are the non-atomic ones that
1915 WRITE_ONCE(newsk->next, next); in __skb_insert()
1916 WRITE_ONCE(newsk->prev, prev); in __skb_insert()
1917 WRITE_ONCE(next->prev, newsk); in __skb_insert()
1918 WRITE_ONCE(prev->next, newsk); in __skb_insert()
1919 WRITE_ONCE(list->qlen, list->qlen + 1); in __skb_insert()
1926 struct sk_buff *first = list->next; in __skb_queue_splice()
1927 struct sk_buff *last = list->prev; in __skb_queue_splice()
1929 WRITE_ONCE(first->prev, prev); in __skb_queue_splice()
1930 WRITE_ONCE(prev->next, first); in __skb_queue_splice()
1932 WRITE_ONCE(last->next, next); in __skb_queue_splice()
1933 WRITE_ONCE(next->prev, last); in __skb_queue_splice()
1937 * skb_queue_splice - join two skb lists, this is designed for stacks
1945 __skb_queue_splice(list, (struct sk_buff *) head, head->next); in skb_queue_splice()
1946 head->qlen += list->qlen; in skb_queue_splice()
1951 * skb_queue_splice_init - join two skb lists and reinitialise the emptied list
1961 __skb_queue_splice(list, (struct sk_buff *) head, head->next); in skb_queue_splice_init()
1962 head->qlen += list->qlen; in skb_queue_splice_init()
1968 * skb_queue_splice_tail - join two skb lists, each list being a queue
1976 __skb_queue_splice(list, head->prev, (struct sk_buff *) head); in skb_queue_splice_tail()
1977 head->qlen += list->qlen; in skb_queue_splice_tail()
1982 * skb_queue_splice_tail_init - join two skb lists and reinitialise the emptied list
1993 __skb_queue_splice(list, head->prev, (struct sk_buff *) head); in skb_queue_splice_tail_init()
1994 head->qlen += list->qlen; in skb_queue_splice_tail_init()
2000 * __skb_queue_after - queue a buffer at the list head
2014 __skb_insert(newsk, prev, prev->next, list); in __skb_queue_after()
2024 __skb_insert(newsk, next->prev, next, list); in __skb_queue_before()
2028 * __skb_queue_head - queue a buffer at the list head
2045 * __skb_queue_tail - queue a buffer at the list tail
2070 WRITE_ONCE(list->qlen, list->qlen - 1); in __skb_unlink()
2071 next = skb->next; in __skb_unlink()
2072 prev = skb->prev; in __skb_unlink()
2073 skb->next = skb->prev = NULL; in __skb_unlink()
2074 WRITE_ONCE(next->prev, prev); in __skb_unlink()
2075 WRITE_ONCE(prev->next, next); in __skb_unlink()
2079 * __skb_dequeue - remove from the head of the queue
2096 * __skb_dequeue_tail - remove from the tail of the queue
2115 return skb->data_len; in skb_is_nonlinear()
2120 return skb->len - skb->data_len; in skb_headlen()
2127 for (i = skb_shinfo(skb)->nr_frags - 1; (int)i >= 0; i--) in __skb_pagelen()
2128 len += skb_frag_size(&skb_shinfo(skb)->frags[i]); in __skb_pagelen()
2138 * __skb_fill_page_desc - initialise a paged fragment in an skb
2153 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in __skb_fill_page_desc()
2160 frag->bv_page = page; in __skb_fill_page_desc()
2161 frag->bv_offset = off; in __skb_fill_page_desc()
2166 skb->pfmemalloc = true; in __skb_fill_page_desc()
2170 * skb_fill_page_desc - initialise a paged fragment in an skb
2177 * As per __skb_fill_page_desc() -- initialises the @i'th fragment of
2187 skb_shinfo(skb)->nr_frags = i + 1; in skb_fill_page_desc()
2201 return skb->head + skb->tail; in skb_tail_pointer()
2206 skb->tail = skb->data - skb->head; in skb_reset_tail_pointer()
2212 skb->tail += offset; in skb_set_tail_pointer()
2218 return skb->tail; in skb_tail_pointer()
2223 skb->tail = skb->data; in skb_reset_tail_pointer()
2228 skb->tail = skb->data + offset; in skb_set_tail_pointer()
2236 if (WARN_ONCE(!skb->len, "%s\n", __func__)) in skb_assert_len()
2250 skb->tail += len; in __skb_put()
2251 skb->len += len; in __skb_put()
2304 skb->data -= len; in __skb_push()
2305 skb->len += len; in __skb_push()
2306 return skb->data; in __skb_push()
2312 skb->len -= len; in __skb_pull()
2313 BUG_ON(skb->len < skb->data_len); in __skb_pull()
2314 return skb->data += len; in __skb_pull()
2319 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len); in skb_pull_inline()
2327 !__pskb_pull_tail(skb, len - skb_headlen(skb))) in __pskb_pull()
2329 skb->len -= len; in __pskb_pull()
2330 return skb->data += len; in __pskb_pull()
2335 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len); in pskb_pull()
2342 if (unlikely(len > skb->len)) in pskb_may_pull()
2344 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL; in pskb_may_pull()
2350 * skb_headroom - bytes at buffer head
2357 return skb->data - skb->head; in skb_headroom()
2361 * skb_tailroom - bytes at buffer end
2368 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail; in skb_tailroom()
2372 * skb_availroom - bytes at buffer end
2383 return skb->end - skb->tail - skb->reserved_tailroom; in skb_availroom()
2387 * skb_reserve - adjust headroom
2396 skb->data += len; in skb_reserve()
2397 skb->tail += len; in skb_reserve()
2401 * skb_tailroom_reserve - adjust reserved_tailroom
2416 if (mtu < skb_tailroom(skb) - needed_tailroom) in skb_tailroom_reserve()
2418 skb->reserved_tailroom = skb_tailroom(skb) - mtu; in skb_tailroom_reserve()
2421 skb->reserved_tailroom = needed_tailroom; in skb_tailroom_reserve()
2430 skb->inner_protocol = protocol; in skb_set_inner_protocol()
2431 skb->inner_protocol_type = ENCAP_TYPE_ETHER; in skb_set_inner_protocol()
2437 skb->inner_ipproto = ipproto; in skb_set_inner_ipproto()
2438 skb->inner_protocol_type = ENCAP_TYPE_IPPROTO; in skb_set_inner_ipproto()
2443 skb->inner_mac_header = skb->mac_header; in skb_reset_inner_headers()
2444 skb->inner_network_header = skb->network_header; in skb_reset_inner_headers()
2445 skb->inner_transport_header = skb->transport_header; in skb_reset_inner_headers()
2450 skb->mac_len = skb->network_header - skb->mac_header; in skb_reset_mac_len()
2456 return skb->head + skb->inner_transport_header; in skb_inner_transport_header()
2461 return skb_inner_transport_header(skb) - skb->data; in skb_inner_transport_offset()
2466 skb->inner_transport_header = skb->data - skb->head; in skb_reset_inner_transport_header()
2473 skb->inner_transport_header += offset; in skb_set_inner_transport_header()
2478 return skb->head + skb->inner_network_header; in skb_inner_network_header()
2483 skb->inner_network_header = skb->data - skb->head; in skb_reset_inner_network_header()
2490 skb->inner_network_header += offset; in skb_set_inner_network_header()
2495 return skb->head + skb->inner_mac_header; in skb_inner_mac_header()
2500 skb->inner_mac_header = skb->data - skb->head; in skb_reset_inner_mac_header()
2507 skb->inner_mac_header += offset; in skb_set_inner_mac_header()
2511 return skb->transport_header != (typeof(skb->transport_header))~0U; in skb_transport_header_was_set()
2516 return skb->head + skb->transport_header; in skb_transport_header()
2521 skb->transport_header = skb->data - skb->head; in skb_reset_transport_header()
2528 skb->transport_header += offset; in skb_set_transport_header()
2533 return skb->head + skb->network_header; in skb_network_header()
2538 skb->network_header = skb->data - skb->head; in skb_reset_network_header()
2544 skb->network_header += offset; in skb_set_network_header()
2549 return skb->head + skb->mac_header; in skb_mac_header()
2554 return skb_mac_header(skb) - skb->data; in skb_mac_offset()
2559 return skb->network_header - skb->mac_header; in skb_mac_header_len()
2564 return skb->mac_header != (typeof(skb->mac_header))~0U; in skb_mac_header_was_set()
2569 skb->mac_header = (typeof(skb->mac_header))~0U; in skb_unset_mac_header()
2574 skb->mac_header = skb->data - skb->head; in skb_reset_mac_header()
2580 skb->mac_header += offset; in skb_set_mac_header()
2585 skb->mac_header = skb->network_header; in skb_pop_mac_header()
2605 skb_set_mac_header(skb, -skb->mac_len); in skb_mac_header_rebuild()
2606 memmove(skb_mac_header(skb), old_mac, skb->mac_len); in skb_mac_header_rebuild()
2612 return skb->csum_start - skb_headroom(skb); in skb_checksum_start_offset()
2617 return skb->head + skb->csum_start; in skb_checksum_start()
2622 return skb_transport_header(skb) - skb->data; in skb_transport_offset()
2627 return skb->transport_header - skb->network_header; in skb_network_header_len()
2632 return skb->inner_transport_header - skb->inner_network_header; in skb_inner_network_header_len()
2637 return skb_network_header(skb) - skb->data; in skb_network_offset()
2642 return skb_inner_network_header(skb) - skb->data; in skb_inner_network_offset()
2704 skb->len = len; in __skb_set_length()
2717 if (skb->data_len) in __pskb_trim()
2725 return (len < skb->len) ? __pskb_trim(skb, len) : 0; in pskb_trim()
2729 * pskb_trim_unique - remove end from a paged unique (not cloned) buffer
2734 * the skb is not cloned so we should never get an error due to out-
2735 * of-memory.
2745 unsigned int diff = len - skb->len; in __skb_grow()
2748 int ret = pskb_expand_head(skb, 0, diff - skb_tailroom(skb), in __skb_grow()
2758 * skb_orphan - orphan a buffer
2767 if (skb->destructor) { in skb_orphan()
2768 skb->destructor(skb); in skb_orphan()
2769 skb->destructor = NULL; in skb_orphan()
2770 skb->sk = NULL; in skb_orphan()
2772 BUG_ON(skb->sk); in skb_orphan()
2777 * skb_orphan_frags - orphan the frags contained in a buffer
2790 skb_uarg(skb)->callback == sock_zerocopy_callback) in skb_orphan_frags()
2804 * __skb_queue_purge - empty a list
2827 * netdev_alloc_skb - allocate an skbuff for rx on a specific device
2894 * __dev_alloc_pages - allocate page for network Rx
2924 * __dev_alloc_page - allocate a page for network Rx
2942 * skb_propagate_pfmemalloc - Propagate pfmemalloc if skb is allocated after RX page
2950 skb->pfmemalloc = true; in skb_propagate_pfmemalloc()
2954 * skb_frag_off() - Returns the offset of a skb fragment
2959 return frag->bv_offset; in skb_frag_off()
2963 * skb_frag_off_add() - Increments the offset of a skb fragment by @delta
2969 frag->bv_offset += delta; in skb_frag_off_add()
2973 * skb_frag_off_set() - Sets the offset of a skb fragment
2979 frag->bv_offset = offset; in skb_frag_off_set()
2983 * skb_frag_off_copy() - Sets the offset of a skb fragment from another fragment
2990 fragto->bv_offset = fragfrom->bv_offset; in skb_frag_off_copy()
2994 * skb_frag_page - retrieve the page referred to by a paged fragment
3001 return frag->bv_page; in skb_frag_page()
3005 * __skb_frag_ref - take an addition reference on a paged fragment.
3016 * skb_frag_ref - take an addition reference on a paged fragment of an skb.
3024 __skb_frag_ref(&skb_shinfo(skb)->frags[f]); in skb_frag_ref()
3028 * __skb_frag_unref - release a reference on a paged fragment.
3039 * skb_frag_unref - release a reference on a paged fragment of an skb.
3047 __skb_frag_unref(&skb_shinfo(skb)->frags[f]); in skb_frag_unref()
3051 * skb_frag_address - gets the address of the data contained in a paged fragment
3063 * skb_frag_address_safe - gets the address of the data contained in a paged fragment
3079 * skb_frag_page_copy() - sets the page in a fragment from another fragment
3086 fragto->bv_page = fragfrom->bv_page; in skb_frag_page_copy()
3090 * __skb_frag_set_page - sets the page contained in a paged fragment
3098 frag->bv_page = page; in __skb_frag_set_page()
3102 * skb_frag_set_page - sets the page contained in a paged fragment of an skb
3112 __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page); in skb_frag_set_page()
3118 * skb_frag_dma_map - maps a paged fragment via the DMA API
3152 * skb_clone_writable - is the header of a clone writable
3162 skb_headroom(skb) + len <= skb->hdr_len; in skb_clone_writable()
3178 delta = headroom - skb_headroom(skb); in __skb_cow()
3187 * skb_cow - copy header of skb when it is required
3195 * The result is skb with writable area skb->head...skb->tail
3204 * skb_cow_head - skb_cow but only making the head writable
3219 * skb_padto - pad an skbuff up to a minimal size
3230 unsigned int size = skb->len; in skb_padto()
3233 return skb_pad(skb, len - size); in skb_padto()
3237 * __skb_put_padto - increase size and pad an skbuff up to a minimal size
3251 unsigned int size = skb->len; in __skb_put_padto()
3254 len -= size; in __skb_put_padto()
3256 return -ENOMEM; in __skb_put_padto()
3263 * skb_put_padto - increase size and pad an skbuff up to a minimal size
3280 const int off = skb->len; in skb_add_data()
3282 if (skb->ip_summed == CHECKSUM_NONE) { in skb_add_data()
3286 skb->csum = csum_block_add(skb->csum, csum, off); in skb_add_data()
3293 return -EFAULT; in skb_add_data()
3302 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; in skb_can_coalesce()
3312 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM; in __skb_linearize()
3316 * skb_linearize - convert paged skb to linear one
3319 * If there is no free memory -ENOMEM is returned, otherwise zero
3328 * skb_has_shared_frag - can any frag be overwritten
3337 skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG; in skb_has_shared_frag()
3341 * skb_linearize_cow - make sure skb is linear and writable
3344 * If there is no free memory -ENOMEM is returned, otherwise zero
3357 if (skb->ip_summed == CHECKSUM_COMPLETE) in __skb_postpull_rcsum()
3358 skb->csum = csum_block_sub(skb->csum, in __skb_postpull_rcsum()
3360 else if (skb->ip_summed == CHECKSUM_PARTIAL && in __skb_postpull_rcsum()
3362 skb->ip_summed = CHECKSUM_NONE; in __skb_postpull_rcsum()
3366 * skb_postpull_rcsum - update checksum for received skb after pull
3385 if (skb->ip_summed == CHECKSUM_COMPLETE) in __skb_postpush_rcsum()
3386 skb->csum = csum_block_add(skb->csum, in __skb_postpush_rcsum()
3391 * skb_postpush_rcsum - update checksum for received skb after push
3408 * skb_push_rcsum - push skb and update receive checksum
3421 skb_postpush_rcsum(skb, skb->data, len); in skb_push_rcsum()
3422 return skb->data; in skb_push_rcsum()
3427 * pskb_trim_rcsum - trim received skb and update checksum
3438 if (likely(len >= skb->len)) in pskb_trim_rcsum()
3445 if (skb->ip_summed == CHECKSUM_COMPLETE) in __skb_trim_rcsum()
3446 skb->ip_summed = CHECKSUM_NONE; in __skb_trim_rcsum()
3453 if (skb->ip_summed == CHECKSUM_COMPLETE) in __skb_grow_rcsum()
3454 skb->ip_summed = CHECKSUM_NONE; in __skb_grow_rcsum()
3461 #define skb_rb_next(skb) rb_to_skb(rb_next(&(skb)->rbnode))
3462 #define skb_rb_prev(skb) rb_to_skb(rb_prev(&(skb)->rbnode))
3465 for (skb = (queue)->next; \
3467 skb = skb->next)
3470 for (skb = (queue)->next, tmp = skb->next; \
3472 skb = tmp, tmp = skb->next)
3476 skb = skb->next)
3491 for (tmp = skb->next; \
3493 skb = tmp, tmp = skb->next)
3496 for (skb = (queue)->prev; \
3498 skb = skb->prev)
3501 for (skb = (queue)->prev, tmp = skb->prev; \
3503 skb = tmp, tmp = skb->prev)
3506 for (tmp = skb->prev; \
3508 skb = tmp, tmp = skb->prev)
3512 return skb_shinfo(skb)->frag_list != NULL; in skb_has_frag_list()
3517 skb_shinfo(skb)->frag_list = NULL; in skb_frag_list_init()
3521 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
3548 return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size); in skb_copy_datagram_msg()
3606 return copy_from_iter_full(data, len, &msg->msg_iter) ? 0 : -EFAULT; in memcpy_from_msg()
3611 return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT; in memcpy_to_msg()
3630 if (hlen - offset >= len) in __skb_header_pointer()
3643 return __skb_header_pointer(skb, offset, len, skb->data, in skb_header_pointer()
3648 * skb_needs_linearize - check if we need to linearize a given skb
3662 (skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG))); in skb_needs_linearize()
3669 memcpy(to, skb->data, len); in skb_copy_from_linear_data()
3676 memcpy(to, skb->data + offset, len); in skb_copy_from_linear_data_offset()
3683 memcpy(skb->data, from, len); in skb_copy_to_linear_data()
3691 memcpy(skb->data + offset, from, len); in skb_copy_to_linear_data_offset()
3698 return skb->tstamp; in skb_get_ktime()
3702 * skb_get_timestamp - get timestamp from a skb
3713 *stamp = ns_to_kernel_old_timeval(skb->tstamp); in skb_get_timestamp()
3719 struct timespec64 ts = ktime_to_timespec64(skb->tstamp); in skb_get_new_timestamp()
3721 stamp->tv_sec = ts.tv_sec; in skb_get_new_timestamp()
3722 stamp->tv_usec = ts.tv_nsec / 1000; in skb_get_new_timestamp()
3728 struct timespec64 ts = ktime_to_timespec64(skb->tstamp); in skb_get_timestampns()
3730 stamp->tv_sec = ts.tv_sec; in skb_get_timestampns()
3731 stamp->tv_nsec = ts.tv_nsec; in skb_get_timestampns()
3737 struct timespec64 ts = ktime_to_timespec64(skb->tstamp); in skb_get_new_timestampns()
3739 stamp->tv_sec = ts.tv_sec; in skb_get_new_timestampns()
3740 stamp->tv_nsec = ts.tv_nsec; in skb_get_new_timestampns()
3745 skb->tstamp = ktime_get_real(); in __net_timestamp()
3760 return skb_shinfo(skb)->meta_len; in skb_metadata_len()
3779 #define __it(x, op) (x -= sizeof(u##op)) in __skb_metadata_differs()
3800 return memcmp(a - meta_len, b - meta_len, meta_len); in __skb_metadata_differs()
3819 skb_shinfo(skb)->meta_len = meta_len; in skb_metadata_set()
3848 * skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps
3867 * skb_tstamp_tx - queue clone of skb with send time stamps
3881 * skb_tx_timestamp() - Driver hook for transmit timestamping
3895 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP) in skb_tx_timestamp()
3900 * skb_complete_wifi_ack - deliver skb with wifi status
3913 return ((skb->ip_summed == CHECKSUM_UNNECESSARY) || in skb_csum_unnecessary()
3914 skb->csum_valid || in skb_csum_unnecessary()
3915 (skb->ip_summed == CHECKSUM_PARTIAL && in skb_csum_unnecessary()
3920 * skb_checksum_complete - Calculate checksum of an entire packet
3924 * the value of skb->csum. The latter can be used to supply the
3932 * if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the
3943 if (skb->ip_summed == CHECKSUM_UNNECESSARY) { in __skb_decr_checksum_unnecessary()
3944 if (skb->csum_level == 0) in __skb_decr_checksum_unnecessary()
3945 skb->ip_summed = CHECKSUM_NONE; in __skb_decr_checksum_unnecessary()
3947 skb->csum_level--; in __skb_decr_checksum_unnecessary()
3953 if (skb->ip_summed == CHECKSUM_UNNECESSARY) { in __skb_incr_checksum_unnecessary()
3954 if (skb->csum_level < SKB_MAX_CSUM_LEVEL) in __skb_incr_checksum_unnecessary()
3955 skb->csum_level++; in __skb_incr_checksum_unnecessary()
3956 } else if (skb->ip_summed == CHECKSUM_NONE) { in __skb_incr_checksum_unnecessary()
3957 skb->ip_summed = CHECKSUM_UNNECESSARY; in __skb_incr_checksum_unnecessary()
3958 skb->csum_level = 0; in __skb_incr_checksum_unnecessary()
3964 if (skb->ip_summed == CHECKSUM_UNNECESSARY) { in __skb_reset_checksum_unnecessary()
3965 skb->ip_summed = CHECKSUM_NONE; in __skb_reset_checksum_unnecessary()
3966 skb->csum_level = 0; in __skb_reset_checksum_unnecessary()
3980 skb->csum_valid = 1; in __skb_checksum_validate_needed()
3993 /* Unset checksum-complete
3996 * (uncompressed for instance) and checksum-complete value is
4001 if (skb->ip_summed == CHECKSUM_COMPLETE) in skb_checksum_complete_unset()
4002 skb->ip_summed = CHECKSUM_NONE; in skb_checksum_complete_unset()
4010 * checksum is stored in skb->csum for use in __skb_checksum_complete
4011 * non-zero: value of invalid checksum
4018 if (skb->ip_summed == CHECKSUM_COMPLETE) { in __skb_checksum_validate_complete()
4019 if (!csum_fold(csum_add(psum, skb->csum))) { in __skb_checksum_validate_complete()
4020 skb->csum_valid = 1; in __skb_checksum_validate_complete()
4025 skb->csum = psum; in __skb_checksum_validate_complete()
4027 if (complete || skb->len <= CHECKSUM_BREAK) { in __skb_checksum_validate_complete()
4031 skb->csum_valid = !csum; in __skb_checksum_validate_complete()
4051 * non-zero: value of invalid checksum
4057 skb->csum_valid = 0; \
4082 return (skb->ip_summed == CHECKSUM_NONE && skb->csum_valid); in __skb_checksum_convert_check()
4087 skb->csum = ~pseudo; in __skb_checksum_convert()
4088 skb->ip_summed = CHECKSUM_COMPLETE; in __skb_checksum_convert()
4100 skb->ip_summed = CHECKSUM_PARTIAL; in skb_remcsum_adjust_partial()
4101 skb->csum_start = ((unsigned char *)ptr + start) - skb->head; in skb_remcsum_adjust_partial()
4102 skb->csum_offset = offset - start; in skb_remcsum_adjust_partial()
4106 * When called, ptr indicates the starting point for skb->csum when
4108 * here, skb_postpull_rcsum is done so skb->csum start is ptr.
4120 if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) { in skb_remcsum_process()
4122 skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data); in skb_remcsum_process()
4125 delta = remcsum_adjust(ptr, skb->csum, start, offset); in skb_remcsum_process()
4127 /* Adjust skb->csum since we changed the packet */ in skb_remcsum_process()
4128 skb->csum = csum_add(skb->csum, delta); in skb_remcsum_process()
4134 return (void *)(skb->_nfct & NFCT_PTRMASK); in skb_nfct()
4143 return skb->_nfct; in skb_get_nfct()
4152 skb->_nfct = nfct; in skb_set_nfct()
4174 * struct skb_ext - sk_buff extensions
4199 if (skb->active_extensions) in skb_ext_put()
4200 __skb_ext_put(skb->extensions); in skb_ext_put()
4206 dst->active_extensions = src->active_extensions; in __skb_ext_copy()
4208 if (src->active_extensions) { in __skb_ext_copy()
4209 struct skb_ext *ext = src->extensions; in __skb_ext_copy()
4211 refcount_inc(&ext->refcnt); in __skb_ext_copy()
4212 dst->extensions = ext; in __skb_ext_copy()
4224 return !!ext->offset[i]; in __skb_ext_exist()
4229 return skb->active_extensions & (1 << id); in skb_ext_exist()
4241 struct skb_ext *ext = skb->extensions; in skb_ext_find()
4243 return (void *)ext + (ext->offset[id] << 3); in skb_ext_find()
4251 if (unlikely(skb->active_extensions)) { in skb_ext_reset()
4252 __skb_ext_put(skb->extensions); in skb_ext_reset()
4253 skb->active_extensions = 0; in skb_ext_reset()
4259 return unlikely(skb->active_extensions); in skb_has_extensions()
4274 skb->_nfct = 0; in nf_reset_ct()
4281 skb->nf_trace = 0; in nf_reset_trace()
4288 skb->ipvs_property = 0; in ipvs_reset()
4297 dst->_nfct = src->_nfct; in __nf_copy()
4302 dst->nf_trace = src->nf_trace; in __nf_copy()
4317 to->secmark = from->secmark; in skb_copy_secmark()
4322 skb->secmark = 0; in skb_init_secmark()
4343 return !skb->destructor && in skb_irq_freeable()
4346 !skb->_skb_refdst && in skb_irq_freeable()
4352 skb->queue_mapping = queue_mapping; in skb_set_queue_mapping()
4357 return skb->queue_mapping; in skb_get_queue_mapping()
4362 to->queue_mapping = from->queue_mapping; in skb_copy_queue_mapping()
4367 skb->queue_mapping = rx_queue + 1; in skb_record_rx_queue()
4372 return skb->queue_mapping - 1; in skb_get_rx_queue()
4377 return skb->queue_mapping != 0; in skb_rx_queue_recorded()
4382 skb->dst_pending_confirm = val; in skb_set_dst_pending_confirm()
4387 return skb->dst_pending_confirm != 0; in skb_get_dst_pending_confirm()
4399 /* Keeps track of mac header offset relative to skb->head.
4401 * For non-tunnel skb it points to skb_mac_header() and for
4415 #define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_GSO_CB_OFFSET))
4419 return (skb_mac_header(inner_skb) - inner_skb->head) - in skb_tnl_header_len()
4420 SKB_GSO_CB(inner_skb)->mac_offset; in skb_tnl_header_len()
4434 SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom); in gso_pskb_expand_head()
4441 if (skb->remcsum_offload) in gso_reset_checksum()
4444 SKB_GSO_CB(skb)->csum = res; in gso_reset_checksum()
4445 SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head; in gso_reset_checksum()
4449 * from the start of transport header to SKB_GSO_CB(skb)->csum_start, and
4450 * then add in skb->csum (checksum from csum_start to end of packet).
4451 * skb->csum and csum_start are then updated to reflect the checksum of the
4452 * resultant packet starting from the transport header-- the resultant checksum
4459 int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start; in gso_make_checksum()
4460 __wsum partial = SKB_GSO_CB(skb)->csum; in gso_make_checksum()
4462 SKB_GSO_CB(skb)->csum = res; in gso_make_checksum()
4463 SKB_GSO_CB(skb)->csum_start = csum_start - skb->head; in gso_make_checksum()
4470 return skb_shinfo(skb)->gso_size; in skb_is_gso()
4476 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; in skb_is_gso_v6()
4482 return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP; in skb_is_gso_sctp()
4488 return skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6); in skb_is_gso_tcp()
4493 skb_shinfo(skb)->gso_size = 0; in skb_gso_reset()
4494 skb_shinfo(skb)->gso_segs = 0; in skb_gso_reset()
4495 skb_shinfo(skb)->gso_type = 0; in skb_gso_reset()
4501 if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS)) in skb_increase_gso_size()
4503 shinfo->gso_size += increment; in skb_increase_gso_size()
4509 if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS)) in skb_decrease_gso_size()
4511 shinfo->gso_size -= decrement; in skb_decrease_gso_size()
4522 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 && in skb_warn_if_lro()
4523 unlikely(shinfo->gso_type == 0)) { in skb_warn_if_lro()
4533 if (skb->ip_summed == CHECKSUM_COMPLETE) in skb_forward_csum()
4534 skb->ip_summed = CHECKSUM_NONE; in skb_forward_csum()
4538 * skb_checksum_none_assert - make sure skb ip_summed is CHECKSUM_NONE
4548 BUG_ON(skb->ip_summed != CHECKSUM_NONE); in skb_checksum_none_assert()
4560 * skb_head_is_locked - Determine if the skb->head is locked down
4570 return !skb->head_frag || skb_cloned(skb); in skb_head_is_locked()
4576 * See Documentation/networking/checksum-offloads.rst for
4579 * pseudo-header) before calling.
4590 skb->csum_offset)); in lco_csum()
4595 return csum_partial(l4_hdr, csum_start - l4_hdr, partial); in lco_csum()
4601 return skb->redirected; in skb_is_redirected()
4610 skb->redirected = 1; in skb_set_redirected()
4611 skb->from_ingress = from_ingress; in skb_set_redirected()
4612 if (skb->from_ingress) in skb_set_redirected()
4613 skb->tstamp = 0; in skb_set_redirected()
4620 skb->redirected = 0; in skb_reset_redirect()
4626 return skb->csum_not_inet; in skb_csum_is_sctp()
4633 skb->kcov_handle = kcov_handle; in skb_set_kcov_handle()
4640 return skb->kcov_handle; in skb_get_kcov_handle()