1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Routines having to do with the 'struct sk_buff' memory handlers.
4 *
5 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
6 * Florian La Roche <rzsfl@rz.uni-sb.de>
7 *
8 * Fixes:
9 * Alan Cox : Fixed the worst of the load
10 * balancer bugs.
11 * Dave Platt : Interrupt stacking fix.
12 * Richard Kooijman : Timestamp fixes.
13 * Alan Cox : Changed buffer format.
14 * Alan Cox : destructor hook for AF_UNIX etc.
15 * Linus Torvalds : Better skb_clone.
16 * Alan Cox : Added skb_copy.
17 * Alan Cox : Added all the changed routines Linus
18 * only put in the headers
19 * Ray VanTassle : Fixed --skb->lock in free
20 * Alan Cox : skb_copy copy arp field
21 * Andi Kleen : slabified it.
22 * Robert Olsson : Removed skb_head_pool
23 *
24 * NOTE:
25 * The __skb_ routines should be called with interrupts
26 * disabled, or you better be *real* sure that the operation is atomic
27 * with respect to whatever list is being frobbed (e.g. via lock_sock()
28 * or via disabling bottom half handlers, etc).
29 */
30
31 /*
32 * The functions in this file will not compile correctly with gcc 2.4.x
33 */
34
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37 #include <linux/module.h>
38 #include <linux/types.h>
39 #include <linux/kernel.h>
40 #include <linux/mm.h>
41 #include <linux/interrupt.h>
42 #include <linux/in.h>
43 #include <linux/inet.h>
44 #include <linux/slab.h>
45 #include <linux/tcp.h>
46 #include <linux/udp.h>
47 #include <linux/sctp.h>
48 #include <linux/netdevice.h>
49 #ifdef CONFIG_NET_CLS_ACT
50 #include <net/pkt_sched.h>
51 #endif
52 #include <linux/string.h>
53 #include <linux/skbuff.h>
54 #include <linux/splice.h>
55 #include <linux/cache.h>
56 #include <linux/rtnetlink.h>
57 #include <linux/init.h>
58 #include <linux/scatterlist.h>
59 #include <linux/errqueue.h>
60 #include <linux/prefetch.h>
61 #include <linux/bitfield.h>
62 #include <linux/if_vlan.h>
63 #include <linux/mpls.h>
64 #include <linux/kcov.h>
65
66 #include <net/protocol.h>
67 #include <net/dst.h>
68 #include <net/sock.h>
69 #include <net/checksum.h>
70 #include <net/gso.h>
71 #include <net/ip6_checksum.h>
72 #include <net/xfrm.h>
73 #include <net/mpls.h>
74 #include <net/mptcp.h>
75 #include <net/mctp.h>
76 #include <net/page_pool/helpers.h>
77 #include <net/dropreason.h>
78
79 #include <linux/uaccess.h>
80 #include <trace/events/skb.h>
81 #include <linux/highmem.h>
82 #include <linux/capability.h>
83 #include <linux/user_namespace.h>
84 #include <linux/indirect_call_wrapper.h>
85 #include <linux/textsearch.h>
86
87 #include <trace/hooks/net.h>
88
89 #include "dev.h"
90 #include "sock_destructor.h"
91
92 struct kmem_cache *skbuff_cache __ro_after_init;
93 static struct kmem_cache *skbuff_fclone_cache __ro_after_init;
94 #ifdef CONFIG_SKB_EXTENSIONS
95 static struct kmem_cache *skbuff_ext_cache __ro_after_init;
96 #endif
97
98
99 static struct kmem_cache *skb_small_head_cache __ro_after_init;
100
101 #define SKB_SMALL_HEAD_SIZE SKB_HEAD_ALIGN(MAX_TCP_HEADER)
102
103 /* We want SKB_SMALL_HEAD_CACHE_SIZE to not be a power of two.
104 * This should ensure that SKB_SMALL_HEAD_HEADROOM is a unique
105 * size, and we can differentiate heads from skb_small_head_cache
106 * vs system slabs by looking at their size (skb_end_offset()).
107 */
108 #define SKB_SMALL_HEAD_CACHE_SIZE \
109 (is_power_of_2(SKB_SMALL_HEAD_SIZE) ? \
110 (SKB_SMALL_HEAD_SIZE + L1_CACHE_BYTES) : \
111 SKB_SMALL_HEAD_SIZE)
112
113 #define SKB_SMALL_HEAD_HEADROOM \
114 SKB_WITH_OVERHEAD(SKB_SMALL_HEAD_CACHE_SIZE)
115
116 int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
117 EXPORT_SYMBOL(sysctl_max_skb_frags);
118
119 #undef FN
120 #define FN(reason) [SKB_DROP_REASON_##reason] = #reason,
121 static const char * const drop_reasons[] = {
122 [SKB_CONSUMED] = "CONSUMED",
123 DEFINE_DROP_REASON(FN, FN)
124 };
125
126 static const struct drop_reason_list drop_reasons_core = {
127 .reasons = drop_reasons,
128 .n_reasons = ARRAY_SIZE(drop_reasons),
129 };
130
131 const struct drop_reason_list __rcu *
132 drop_reasons_by_subsys[SKB_DROP_REASON_SUBSYS_NUM] = {
133 [SKB_DROP_REASON_SUBSYS_CORE] = RCU_INITIALIZER(&drop_reasons_core),
134 };
135 EXPORT_SYMBOL(drop_reasons_by_subsys);
136
137 /**
138 * drop_reasons_register_subsys - register another drop reason subsystem
139 * @subsys: the subsystem to register, must not be the core
140 * @list: the list of drop reasons within the subsystem, must point to
141 * a statically initialized list
142 */
drop_reasons_register_subsys(enum skb_drop_reason_subsys subsys,const struct drop_reason_list * list)143 void drop_reasons_register_subsys(enum skb_drop_reason_subsys subsys,
144 const struct drop_reason_list *list)
145 {
146 if (WARN(subsys <= SKB_DROP_REASON_SUBSYS_CORE ||
147 subsys >= ARRAY_SIZE(drop_reasons_by_subsys),
148 "invalid subsystem %d\n", subsys))
149 return;
150
151 /* must point to statically allocated memory, so INIT is OK */
152 RCU_INIT_POINTER(drop_reasons_by_subsys[subsys], list);
153 }
154 EXPORT_SYMBOL_GPL(drop_reasons_register_subsys);
155
156 /**
157 * drop_reasons_unregister_subsys - unregister a drop reason subsystem
158 * @subsys: the subsystem to remove, must not be the core
159 *
160 * Note: This will synchronize_rcu() to ensure no users when it returns.
161 */
drop_reasons_unregister_subsys(enum skb_drop_reason_subsys subsys)162 void drop_reasons_unregister_subsys(enum skb_drop_reason_subsys subsys)
163 {
164 if (WARN(subsys <= SKB_DROP_REASON_SUBSYS_CORE ||
165 subsys >= ARRAY_SIZE(drop_reasons_by_subsys),
166 "invalid subsystem %d\n", subsys))
167 return;
168
169 RCU_INIT_POINTER(drop_reasons_by_subsys[subsys], NULL);
170
171 synchronize_rcu();
172 }
173 EXPORT_SYMBOL_GPL(drop_reasons_unregister_subsys);
174
175 /**
176 * skb_panic - private function for out-of-line support
177 * @skb: buffer
178 * @sz: size
179 * @addr: address
180 * @msg: skb_over_panic or skb_under_panic
181 *
182 * Out-of-line support for skb_put() and skb_push().
183 * Called via the wrapper skb_over_panic() or skb_under_panic().
184 * Keep out of line to prevent kernel bloat.
185 * __builtin_return_address is not used because it is not always reliable.
186 */
skb_panic(struct sk_buff * skb,unsigned int sz,void * addr,const char msg[])187 static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr,
188 const char msg[])
189 {
190 pr_emerg("%s: text:%px len:%d put:%d head:%px data:%px tail:%#lx end:%#lx dev:%s\n",
191 msg, addr, skb->len, sz, skb->head, skb->data,
192 (unsigned long)skb->tail, (unsigned long)skb->end,
193 skb->dev ? skb->dev->name : "<NULL>");
194 BUG();
195 }
196
skb_over_panic(struct sk_buff * skb,unsigned int sz,void * addr)197 static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr)
198 {
199 skb_panic(skb, sz, addr, __func__);
200 }
201
skb_under_panic(struct sk_buff * skb,unsigned int sz,void * addr)202 static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr)
203 {
204 skb_panic(skb, sz, addr, __func__);
205 }
206
207 #define NAPI_SKB_CACHE_SIZE 64
208 #define NAPI_SKB_CACHE_BULK 16
209 #define NAPI_SKB_CACHE_HALF (NAPI_SKB_CACHE_SIZE / 2)
210
211 #if PAGE_SIZE == SZ_4K
212
213 #define NAPI_HAS_SMALL_PAGE_FRAG 1
214 #define NAPI_SMALL_PAGE_PFMEMALLOC(nc) ((nc).pfmemalloc)
215
216 /* specialized page frag allocator using a single order 0 page
217 * and slicing it into 1K sized fragment. Constrained to systems
218 * with a very limited amount of 1K fragments fitting a single
219 * page - to avoid excessive truesize underestimation
220 */
221
222 struct page_frag_1k {
223 void *va;
224 u16 offset;
225 bool pfmemalloc;
226 };
227
page_frag_alloc_1k(struct page_frag_1k * nc,gfp_t gfp)228 static void *page_frag_alloc_1k(struct page_frag_1k *nc, gfp_t gfp)
229 {
230 struct page *page;
231 int offset;
232
233 offset = nc->offset - SZ_1K;
234 if (likely(offset >= 0))
235 goto use_frag;
236
237 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
238 if (!page)
239 return NULL;
240
241 nc->va = page_address(page);
242 nc->pfmemalloc = page_is_pfmemalloc(page);
243 offset = PAGE_SIZE - SZ_1K;
244 page_ref_add(page, offset / SZ_1K);
245
246 use_frag:
247 nc->offset = offset;
248 return nc->va + offset;
249 }
250 #else
251
252 /* the small page is actually unused in this build; add dummy helpers
253 * to please the compiler and avoid later preprocessor's conditionals
254 */
255 #define NAPI_HAS_SMALL_PAGE_FRAG 0
256 #define NAPI_SMALL_PAGE_PFMEMALLOC(nc) false
257
258 struct page_frag_1k {
259 };
260
page_frag_alloc_1k(struct page_frag_1k * nc,gfp_t gfp_mask)261 static void *page_frag_alloc_1k(struct page_frag_1k *nc, gfp_t gfp_mask)
262 {
263 return NULL;
264 }
265
266 #endif
267
268 struct napi_alloc_cache {
269 struct page_frag_cache page;
270 struct page_frag_1k page_small;
271 unsigned int skb_count;
272 void *skb_cache[NAPI_SKB_CACHE_SIZE];
273 };
274
275 static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
276 static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
277
278 /* Double check that napi_get_frags() allocates skbs with
279 * skb->head being backed by slab, not a page fragment.
280 * This is to make sure bug fixed in 3226b158e67c
281 * ("net: avoid 32 x truesize under-estimation for tiny skbs")
282 * does not accidentally come back.
283 */
napi_get_frags_check(struct napi_struct * napi)284 void napi_get_frags_check(struct napi_struct *napi)
285 {
286 struct sk_buff *skb;
287
288 local_bh_disable();
289 skb = napi_get_frags(napi);
290 WARN_ON_ONCE(!NAPI_HAS_SMALL_PAGE_FRAG && skb && skb->head_frag);
291 napi_free_frags(napi);
292 local_bh_enable();
293 }
294
__napi_alloc_frag_align(unsigned int fragsz,unsigned int align_mask)295 void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
296 {
297 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
298
299 fragsz = SKB_DATA_ALIGN(fragsz);
300
301 return page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align_mask);
302 }
303 EXPORT_SYMBOL(__napi_alloc_frag_align);
304
__netdev_alloc_frag_align(unsigned int fragsz,unsigned int align_mask)305 void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
306 {
307 void *data;
308
309 fragsz = SKB_DATA_ALIGN(fragsz);
310 if (in_hardirq() || irqs_disabled()) {
311 struct page_frag_cache *nc = this_cpu_ptr(&netdev_alloc_cache);
312
313 data = page_frag_alloc_align(nc, fragsz, GFP_ATOMIC, align_mask);
314 } else {
315 struct napi_alloc_cache *nc;
316
317 local_bh_disable();
318 nc = this_cpu_ptr(&napi_alloc_cache);
319 data = page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align_mask);
320 local_bh_enable();
321 }
322 return data;
323 }
324 EXPORT_SYMBOL(__netdev_alloc_frag_align);
325
napi_skb_cache_get(void)326 static struct sk_buff *napi_skb_cache_get(void)
327 {
328 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
329 struct sk_buff *skb;
330
331 if (unlikely(!nc->skb_count)) {
332 nc->skb_count = kmem_cache_alloc_bulk(skbuff_cache,
333 GFP_ATOMIC,
334 NAPI_SKB_CACHE_BULK,
335 nc->skb_cache);
336 if (unlikely(!nc->skb_count))
337 return NULL;
338 }
339
340 skb = nc->skb_cache[--nc->skb_count];
341 kasan_unpoison_object_data(skbuff_cache, skb);
342
343 return skb;
344 }
345
__finalize_skb_around(struct sk_buff * skb,void * data,unsigned int size)346 static inline void __finalize_skb_around(struct sk_buff *skb, void *data,
347 unsigned int size)
348 {
349 struct skb_shared_info *shinfo;
350
351 size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
352
353 /* Assumes caller memset cleared SKB */
354 skb->truesize = SKB_TRUESIZE(size);
355 refcount_set(&skb->users, 1);
356 skb->head = data;
357 skb->data = data;
358 skb_reset_tail_pointer(skb);
359 skb_set_end_offset(skb, size);
360 skb->mac_header = (typeof(skb->mac_header))~0U;
361 skb->transport_header = (typeof(skb->transport_header))~0U;
362 skb->alloc_cpu = raw_smp_processor_id();
363 /* make sure we initialize shinfo sequentially */
364 shinfo = skb_shinfo(skb);
365 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
366 atomic_set(&shinfo->dataref, 1);
367
368 skb_set_kcov_handle(skb, kcov_common_handle());
369 }
370
__slab_build_skb(struct sk_buff * skb,void * data,unsigned int * size)371 static inline void *__slab_build_skb(struct sk_buff *skb, void *data,
372 unsigned int *size)
373 {
374 void *resized;
375
376 /* Must find the allocation size (and grow it to match). */
377 *size = ksize(data);
378 /* krealloc() will immediately return "data" when
379 * "ksize(data)" is requested: it is the existing upper
380 * bounds. As a result, GFP_ATOMIC will be ignored. Note
381 * that this "new" pointer needs to be passed back to the
382 * caller for use so the __alloc_size hinting will be
383 * tracked correctly.
384 */
385 resized = krealloc(data, *size, GFP_ATOMIC);
386 WARN_ON_ONCE(resized != data);
387 return resized;
388 }
389
390 /* build_skb() variant which can operate on slab buffers.
391 * Note that this should be used sparingly as slab buffers
392 * cannot be combined efficiently by GRO!
393 */
slab_build_skb(void * data)394 struct sk_buff *slab_build_skb(void *data)
395 {
396 struct sk_buff *skb;
397 unsigned int size;
398
399 skb = kmem_cache_alloc(skbuff_cache, GFP_ATOMIC);
400 if (unlikely(!skb))
401 return NULL;
402
403 memset(skb, 0, offsetof(struct sk_buff, tail));
404 data = __slab_build_skb(skb, data, &size);
405 __finalize_skb_around(skb, data, size);
406
407 trace_android_vh_build_skb_around(skb);
408
409 return skb;
410 }
411 EXPORT_SYMBOL(slab_build_skb);
412
413 /* Caller must provide SKB that is memset cleared */
__build_skb_around(struct sk_buff * skb,void * data,unsigned int frag_size)414 static void __build_skb_around(struct sk_buff *skb, void *data,
415 unsigned int frag_size)
416 {
417 unsigned int size = frag_size;
418
419 /* frag_size == 0 is considered deprecated now. Callers
420 * using slab buffer should use slab_build_skb() instead.
421 */
422 if (WARN_ONCE(size == 0, "Use slab_build_skb() instead"))
423 data = __slab_build_skb(skb, data, &size);
424
425 __finalize_skb_around(skb, data, size);
426
427 trace_android_vh_build_skb_around(skb);
428 }
429
430 /**
431 * __build_skb - build a network buffer
432 * @data: data buffer provided by caller
433 * @frag_size: size of data (must not be 0)
434 *
435 * Allocate a new &sk_buff. Caller provides space holding head and
436 * skb_shared_info. @data must have been allocated from the page
437 * allocator or vmalloc(). (A @frag_size of 0 to indicate a kmalloc()
438 * allocation is deprecated, and callers should use slab_build_skb()
439 * instead.)
440 * The return is the new skb buffer.
441 * On a failure the return is %NULL, and @data is not freed.
442 * Notes :
443 * Before IO, driver allocates only data buffer where NIC put incoming frame
444 * Driver should add room at head (NET_SKB_PAD) and
445 * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info))
446 * After IO, driver calls build_skb(), to allocate sk_buff and populate it
447 * before giving packet to stack.
448 * RX rings only contains data buffers, not full skbs.
449 */
__build_skb(void * data,unsigned int frag_size)450 struct sk_buff *__build_skb(void *data, unsigned int frag_size)
451 {
452 struct sk_buff *skb;
453
454 skb = kmem_cache_alloc(skbuff_cache, GFP_ATOMIC);
455 if (unlikely(!skb))
456 return NULL;
457
458 memset(skb, 0, offsetof(struct sk_buff, tail));
459 __build_skb_around(skb, data, frag_size);
460
461 return skb;
462 }
463
464 /* build_skb() is wrapper over __build_skb(), that specifically
465 * takes care of skb->head and skb->pfmemalloc
466 */
build_skb(void * data,unsigned int frag_size)467 struct sk_buff *build_skb(void *data, unsigned int frag_size)
468 {
469 struct sk_buff *skb = __build_skb(data, frag_size);
470
471 if (likely(skb && frag_size)) {
472 skb->head_frag = 1;
473 skb_propagate_pfmemalloc(virt_to_head_page(data), skb);
474 }
475 return skb;
476 }
477 EXPORT_SYMBOL(build_skb);
478
479 /**
480 * build_skb_around - build a network buffer around provided skb
481 * @skb: sk_buff provide by caller, must be memset cleared
482 * @data: data buffer provided by caller
483 * @frag_size: size of data
484 */
build_skb_around(struct sk_buff * skb,void * data,unsigned int frag_size)485 struct sk_buff *build_skb_around(struct sk_buff *skb,
486 void *data, unsigned int frag_size)
487 {
488 if (unlikely(!skb))
489 return NULL;
490
491 __build_skb_around(skb, data, frag_size);
492
493 if (frag_size) {
494 skb->head_frag = 1;
495 skb_propagate_pfmemalloc(virt_to_head_page(data), skb);
496 }
497 return skb;
498 }
499 EXPORT_SYMBOL(build_skb_around);
500
501 /**
502 * __napi_build_skb - build a network buffer
503 * @data: data buffer provided by caller
504 * @frag_size: size of data
505 *
506 * Version of __build_skb() that uses NAPI percpu caches to obtain
507 * skbuff_head instead of inplace allocation.
508 *
509 * Returns a new &sk_buff on success, %NULL on allocation failure.
510 */
__napi_build_skb(void * data,unsigned int frag_size)511 static struct sk_buff *__napi_build_skb(void *data, unsigned int frag_size)
512 {
513 struct sk_buff *skb;
514
515 skb = napi_skb_cache_get();
516 if (unlikely(!skb))
517 return NULL;
518
519 memset(skb, 0, offsetof(struct sk_buff, tail));
520 __build_skb_around(skb, data, frag_size);
521
522 return skb;
523 }
524
525 /**
526 * napi_build_skb - build a network buffer
527 * @data: data buffer provided by caller
528 * @frag_size: size of data
529 *
530 * Version of __napi_build_skb() that takes care of skb->head_frag
531 * and skb->pfmemalloc when the data is a page or page fragment.
532 *
533 * Returns a new &sk_buff on success, %NULL on allocation failure.
534 */
napi_build_skb(void * data,unsigned int frag_size)535 struct sk_buff *napi_build_skb(void *data, unsigned int frag_size)
536 {
537 struct sk_buff *skb = __napi_build_skb(data, frag_size);
538
539 if (likely(skb) && frag_size) {
540 skb->head_frag = 1;
541 skb_propagate_pfmemalloc(virt_to_head_page(data), skb);
542 }
543
544 return skb;
545 }
546 EXPORT_SYMBOL(napi_build_skb);
547
548 /*
549 * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells
550 * the caller if emergency pfmemalloc reserves are being used. If it is and
551 * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves
552 * may be used. Otherwise, the packet data may be discarded until enough
553 * memory is free
554 */
kmalloc_reserve(unsigned int * size,gfp_t flags,int node,bool * pfmemalloc)555 static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node,
556 bool *pfmemalloc)
557 {
558 bool ret_pfmemalloc = false;
559 size_t obj_size;
560 void *obj;
561
562 obj_size = SKB_HEAD_ALIGN(*size);
563 if (obj_size <= SKB_SMALL_HEAD_CACHE_SIZE &&
564 !(flags & KMALLOC_NOT_NORMAL_BITS)) {
565 obj = kmem_cache_alloc_node(skb_small_head_cache,
566 flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
567 node);
568 *size = SKB_SMALL_HEAD_CACHE_SIZE;
569 if (obj || !(gfp_pfmemalloc_allowed(flags)))
570 goto out;
571 /* Try again but now we are using pfmemalloc reserves */
572 ret_pfmemalloc = true;
573 obj = kmem_cache_alloc_node(skb_small_head_cache, flags, node);
574 goto out;
575 }
576
577 obj_size = kmalloc_size_roundup(obj_size);
578 /* The following cast might truncate high-order bits of obj_size, this
579 * is harmless because kmalloc(obj_size >= 2^32) will fail anyway.
580 */
581 *size = (unsigned int)obj_size;
582
583 /*
584 * Try a regular allocation, when that fails and we're not entitled
585 * to the reserves, fail.
586 */
587 obj = kmalloc_node_track_caller(obj_size,
588 flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
589 node);
590 if (obj || !(gfp_pfmemalloc_allowed(flags)))
591 goto out;
592
593 /* Try again but now we are using pfmemalloc reserves */
594 ret_pfmemalloc = true;
595 obj = kmalloc_node_track_caller(obj_size, flags, node);
596
597 out:
598 if (pfmemalloc)
599 *pfmemalloc = ret_pfmemalloc;
600
601 return obj;
602 }
603
604 /* Allocate a new skbuff. We do this ourselves so we can fill in a few
605 * 'private' fields and also do memory statistics to find all the
606 * [BEEP] leaks.
607 *
608 */
609
610 /**
611 * __alloc_skb - allocate a network buffer
612 * @size: size to allocate
613 * @gfp_mask: allocation mask
614 * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache
615 * instead of head cache and allocate a cloned (child) skb.
616 * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for
617 * allocations in case the data is required for writeback
618 * @node: numa node to allocate memory on
619 *
620 * Allocate a new &sk_buff. The returned buffer has no headroom and a
621 * tail room of at least size bytes. The object has a reference count
622 * of one. The return is the buffer. On a failure the return is %NULL.
623 *
624 * Buffers may only be allocated from interrupts using a @gfp_mask of
625 * %GFP_ATOMIC.
626 */
__alloc_skb(unsigned int size,gfp_t gfp_mask,int flags,int node)627 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
628 int flags, int node)
629 {
630 struct kmem_cache *cache;
631 struct sk_buff *skb;
632 bool pfmemalloc;
633 u8 *data;
634
635 cache = (flags & SKB_ALLOC_FCLONE)
636 ? skbuff_fclone_cache : skbuff_cache;
637
638 if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
639 gfp_mask |= __GFP_MEMALLOC;
640
641 /* Get the HEAD */
642 if ((flags & (SKB_ALLOC_FCLONE | SKB_ALLOC_NAPI)) == SKB_ALLOC_NAPI &&
643 likely(node == NUMA_NO_NODE || node == numa_mem_id()))
644 skb = napi_skb_cache_get();
645 else
646 skb = kmem_cache_alloc_node(cache, gfp_mask & ~GFP_DMA, node);
647 if (unlikely(!skb))
648 return NULL;
649 prefetchw(skb);
650
651 /* We do our best to align skb_shared_info on a separate cache
652 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
653 * aligned memory blocks, unless SLUB/SLAB debug is enabled.
654 * Both skb->head and skb_shared_info are cache line aligned.
655 */
656 data = kmalloc_reserve(&size, gfp_mask, node, &pfmemalloc);
657 if (unlikely(!data))
658 goto nodata;
659 /* kmalloc_size_roundup() might give us more room than requested.
660 * Put skb_shared_info exactly at the end of allocated zone,
661 * to allow max possible filling before reallocation.
662 */
663 prefetchw(data + SKB_WITH_OVERHEAD(size));
664
665 /*
666 * Only clear those fields we need to clear, not those that we will
667 * actually initialise below. Hence, don't put any more fields after
668 * the tail pointer in struct sk_buff!
669 */
670 memset(skb, 0, offsetof(struct sk_buff, tail));
671 __build_skb_around(skb, data, size);
672 skb->pfmemalloc = pfmemalloc;
673
674 if (flags & SKB_ALLOC_FCLONE) {
675 struct sk_buff_fclones *fclones;
676
677 fclones = container_of(skb, struct sk_buff_fclones, skb1);
678
679 skb->fclone = SKB_FCLONE_ORIG;
680 refcount_set(&fclones->fclone_ref, 1);
681 }
682
683 return skb;
684
685 nodata:
686 kmem_cache_free(cache, skb);
687 return NULL;
688 }
689 EXPORT_SYMBOL(__alloc_skb);
690
691 /**
692 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device
693 * @dev: network device to receive on
694 * @len: length to allocate
695 * @gfp_mask: get_free_pages mask, passed to alloc_skb
696 *
697 * Allocate a new &sk_buff and assign it a usage count of one. The
698 * buffer has NET_SKB_PAD headroom built in. Users should allocate
699 * the headroom they think they need without accounting for the
700 * built in space. The built in space is used for optimisations.
701 *
702 * %NULL is returned if there is no free memory.
703 */
__netdev_alloc_skb(struct net_device * dev,unsigned int len,gfp_t gfp_mask)704 struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
705 gfp_t gfp_mask)
706 {
707 struct page_frag_cache *nc;
708 struct sk_buff *skb;
709 bool pfmemalloc;
710 void *data;
711
712 len += NET_SKB_PAD;
713
714 /* If requested length is either too small or too big,
715 * we use kmalloc() for skb->head allocation.
716 */
717 if (len <= SKB_WITH_OVERHEAD(1024) ||
718 len > SKB_WITH_OVERHEAD(PAGE_SIZE) ||
719 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
720 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
721 if (!skb)
722 goto skb_fail;
723 goto skb_success;
724 }
725
726 len = SKB_HEAD_ALIGN(len);
727
728 if (sk_memalloc_socks())
729 gfp_mask |= __GFP_MEMALLOC;
730
731 if (in_hardirq() || irqs_disabled()) {
732 nc = this_cpu_ptr(&netdev_alloc_cache);
733 data = page_frag_alloc(nc, len, gfp_mask);
734 pfmemalloc = nc->pfmemalloc;
735 } else {
736 local_bh_disable();
737 nc = this_cpu_ptr(&napi_alloc_cache.page);
738 data = page_frag_alloc(nc, len, gfp_mask);
739 pfmemalloc = nc->pfmemalloc;
740 local_bh_enable();
741 }
742
743 if (unlikely(!data))
744 return NULL;
745
746 skb = __build_skb(data, len);
747 if (unlikely(!skb)) {
748 skb_free_frag(data);
749 return NULL;
750 }
751
752 if (pfmemalloc)
753 skb->pfmemalloc = 1;
754 skb->head_frag = 1;
755
756 skb_success:
757 skb_reserve(skb, NET_SKB_PAD);
758 skb->dev = dev;
759
760 skb_fail:
761 return skb;
762 }
763 EXPORT_SYMBOL(__netdev_alloc_skb);
764
765 /**
766 * __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance
767 * @napi: napi instance this buffer was allocated for
768 * @len: length to allocate
769 * @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages
770 *
771 * Allocate a new sk_buff for use in NAPI receive. This buffer will
772 * attempt to allocate the head from a special reserved region used
773 * only for NAPI Rx allocation. By doing this we can save several
774 * CPU cycles by avoiding having to disable and re-enable IRQs.
775 *
776 * %NULL is returned if there is no free memory.
777 */
__napi_alloc_skb(struct napi_struct * napi,unsigned int len,gfp_t gfp_mask)778 struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
779 gfp_t gfp_mask)
780 {
781 struct napi_alloc_cache *nc;
782 struct sk_buff *skb;
783 bool pfmemalloc;
784 void *data;
785
786 DEBUG_NET_WARN_ON_ONCE(!in_softirq());
787 len += NET_SKB_PAD + NET_IP_ALIGN;
788
789 /* If requested length is either too small or too big,
790 * we use kmalloc() for skb->head allocation.
791 * When the small frag allocator is available, prefer it over kmalloc
792 * for small fragments
793 */
794 if ((!NAPI_HAS_SMALL_PAGE_FRAG && len <= SKB_WITH_OVERHEAD(1024)) ||
795 len > SKB_WITH_OVERHEAD(PAGE_SIZE) ||
796 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
797 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX | SKB_ALLOC_NAPI,
798 NUMA_NO_NODE);
799 if (!skb)
800 goto skb_fail;
801 goto skb_success;
802 }
803
804 nc = this_cpu_ptr(&napi_alloc_cache);
805
806 if (sk_memalloc_socks())
807 gfp_mask |= __GFP_MEMALLOC;
808
809 if (NAPI_HAS_SMALL_PAGE_FRAG && len <= SKB_WITH_OVERHEAD(1024)) {
810 /* we are artificially inflating the allocation size, but
811 * that is not as bad as it may look like, as:
812 * - 'len' less than GRO_MAX_HEAD makes little sense
813 * - On most systems, larger 'len' values lead to fragment
814 * size above 512 bytes
815 * - kmalloc would use the kmalloc-1k slab for such values
816 * - Builds with smaller GRO_MAX_HEAD will very likely do
817 * little networking, as that implies no WiFi and no
818 * tunnels support, and 32 bits arches.
819 */
820 len = SZ_1K;
821
822 data = page_frag_alloc_1k(&nc->page_small, gfp_mask);
823 pfmemalloc = NAPI_SMALL_PAGE_PFMEMALLOC(nc->page_small);
824 } else {
825 len = SKB_HEAD_ALIGN(len);
826
827 data = page_frag_alloc(&nc->page, len, gfp_mask);
828 pfmemalloc = nc->page.pfmemalloc;
829 }
830
831 if (unlikely(!data))
832 return NULL;
833
834 skb = __napi_build_skb(data, len);
835 if (unlikely(!skb)) {
836 skb_free_frag(data);
837 return NULL;
838 }
839
840 if (pfmemalloc)
841 skb->pfmemalloc = 1;
842 skb->head_frag = 1;
843
844 skb_success:
845 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
846 skb->dev = napi->dev;
847
848 skb_fail:
849 return skb;
850 }
851 EXPORT_SYMBOL(__napi_alloc_skb);
852
skb_add_rx_frag(struct sk_buff * skb,int i,struct page * page,int off,int size,unsigned int truesize)853 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
854 int size, unsigned int truesize)
855 {
856 skb_fill_page_desc(skb, i, page, off, size);
857 skb->len += size;
858 skb->data_len += size;
859 skb->truesize += truesize;
860 }
861 EXPORT_SYMBOL(skb_add_rx_frag);
862
skb_coalesce_rx_frag(struct sk_buff * skb,int i,int size,unsigned int truesize)863 void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
864 unsigned int truesize)
865 {
866 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
867
868 skb_frag_size_add(frag, size);
869 skb->len += size;
870 skb->data_len += size;
871 skb->truesize += truesize;
872 }
873 EXPORT_SYMBOL(skb_coalesce_rx_frag);
874
skb_drop_list(struct sk_buff ** listp)875 static void skb_drop_list(struct sk_buff **listp)
876 {
877 kfree_skb_list(*listp);
878 *listp = NULL;
879 }
880
skb_drop_fraglist(struct sk_buff * skb)881 static inline void skb_drop_fraglist(struct sk_buff *skb)
882 {
883 skb_drop_list(&skb_shinfo(skb)->frag_list);
884 }
885
skb_clone_fraglist(struct sk_buff * skb)886 static void skb_clone_fraglist(struct sk_buff *skb)
887 {
888 struct sk_buff *list;
889
890 skb_walk_frags(skb, list)
891 skb_get(list);
892 }
893
894 #if IS_ENABLED(CONFIG_PAGE_POOL)
napi_pp_put_page(struct page * page,bool napi_safe)895 bool napi_pp_put_page(struct page *page, bool napi_safe)
896 {
897 bool allow_direct = false;
898 struct page_pool *pp;
899
900 page = compound_head(page);
901
902 /* page->pp_magic is OR'ed with PP_SIGNATURE after the allocation
903 * in order to preserve any existing bits, such as bit 0 for the
904 * head page of compound page and bit 1 for pfmemalloc page, so
905 * mask those bits for freeing side when doing below checking,
906 * and page_is_pfmemalloc() is checked in __page_pool_put_page()
907 * to avoid recycling the pfmemalloc page.
908 */
909 if (unlikely((page->pp_magic & ~0x3UL) != PP_SIGNATURE))
910 return false;
911
912 pp = page->pp;
913
914 /* Allow direct recycle if we have reasons to believe that we are
915 * in the same context as the consumer would run, so there's
916 * no possible race.
917 * __page_pool_put_page() makes sure we're not in hardirq context
918 * and interrupts are enabled prior to accessing the cache.
919 */
920 if (napi_safe || in_softirq()) {
921 const struct napi_struct *napi = READ_ONCE(pp->p.napi);
922
923 allow_direct = napi &&
924 READ_ONCE(napi->list_owner) == smp_processor_id();
925 }
926
927 /* Driver set this to memory recycling info. Reset it on recycle.
928 * This will *not* work for NIC using a split-page memory model.
929 * The page will be returned to the pool here regardless of the
930 * 'flipped' fragment being in use or not.
931 */
932 page_pool_put_full_page(pp, page, allow_direct);
933
934 return true;
935 }
936 EXPORT_SYMBOL(napi_pp_put_page);
937 #endif
938
skb_pp_recycle(struct sk_buff * skb,void * data,bool napi_safe)939 static bool skb_pp_recycle(struct sk_buff *skb, void *data, bool napi_safe)
940 {
941 if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle)
942 return false;
943 return napi_pp_put_page(virt_to_page(data), napi_safe);
944 }
945
skb_kfree_head(void * head,unsigned int end_offset)946 static void skb_kfree_head(void *head, unsigned int end_offset)
947 {
948 if (end_offset == SKB_SMALL_HEAD_HEADROOM)
949 kmem_cache_free(skb_small_head_cache, head);
950 else
951 kfree(head);
952 }
953
skb_free_head(struct sk_buff * skb,bool napi_safe)954 static void skb_free_head(struct sk_buff *skb, bool napi_safe)
955 {
956 unsigned char *head = skb->head;
957
958 if (skb->head_frag) {
959 if (skb_pp_recycle(skb, head, napi_safe))
960 return;
961 skb_free_frag(head);
962 } else {
963 skb_kfree_head(head, skb_end_offset(skb));
964 }
965 }
966
skb_release_data(struct sk_buff * skb,enum skb_drop_reason reason,bool napi_safe)967 static void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason,
968 bool napi_safe)
969 {
970 struct skb_shared_info *shinfo = skb_shinfo(skb);
971 int i;
972
973 if (skb->cloned &&
974 atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
975 &shinfo->dataref))
976 goto exit;
977
978 if (skb_zcopy(skb)) {
979 bool skip_unref = shinfo->flags & SKBFL_MANAGED_FRAG_REFS;
980
981 skb_zcopy_clear(skb, true);
982 if (skip_unref)
983 goto free_head;
984 }
985
986 for (i = 0; i < shinfo->nr_frags; i++)
987 napi_frag_unref(&shinfo->frags[i], skb->pp_recycle, napi_safe);
988
989 free_head:
990 if (shinfo->frag_list)
991 kfree_skb_list_reason(shinfo->frag_list, reason);
992
993 skb_free_head(skb, napi_safe);
994 exit:
995 /* When we clone an SKB we copy the reycling bit. The pp_recycle
996 * bit is only set on the head though, so in order to avoid races
997 * while trying to recycle fragments on __skb_frag_unref() we need
998 * to make one SKB responsible for triggering the recycle path.
999 * So disable the recycling bit if an SKB is cloned and we have
1000 * additional references to the fragmented part of the SKB.
1001 * Eventually the last SKB will have the recycling bit set and it's
1002 * dataref set to 0, which will trigger the recycling
1003 */
1004 skb->pp_recycle = 0;
1005 }
1006
1007 /*
1008 * Free an skbuff by memory without cleaning the state.
1009 */
kfree_skbmem(struct sk_buff * skb)1010 static void kfree_skbmem(struct sk_buff *skb)
1011 {
1012 struct sk_buff_fclones *fclones;
1013
1014 switch (skb->fclone) {
1015 case SKB_FCLONE_UNAVAILABLE:
1016 kmem_cache_free(skbuff_cache, skb);
1017 return;
1018
1019 case SKB_FCLONE_ORIG:
1020 fclones = container_of(skb, struct sk_buff_fclones, skb1);
1021
1022 /* We usually free the clone (TX completion) before original skb
1023 * This test would have no chance to be true for the clone,
1024 * while here, branch prediction will be good.
1025 */
1026 if (refcount_read(&fclones->fclone_ref) == 1)
1027 goto fastpath;
1028 break;
1029
1030 default: /* SKB_FCLONE_CLONE */
1031 fclones = container_of(skb, struct sk_buff_fclones, skb2);
1032 break;
1033 }
1034 if (!refcount_dec_and_test(&fclones->fclone_ref))
1035 return;
1036 fastpath:
1037 kmem_cache_free(skbuff_fclone_cache, fclones);
1038 }
1039
skb_release_head_state(struct sk_buff * skb)1040 void skb_release_head_state(struct sk_buff *skb)
1041 {
1042 skb_dst_drop(skb);
1043 if (skb->destructor) {
1044 DEBUG_NET_WARN_ON_ONCE(in_hardirq());
1045 skb->destructor(skb);
1046 }
1047 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
1048 nf_conntrack_put(skb_nfct(skb));
1049 #endif
1050 skb_ext_put(skb);
1051 }
1052
1053 /* Free everything but the sk_buff shell. */
skb_release_all(struct sk_buff * skb,enum skb_drop_reason reason,bool napi_safe)1054 static void skb_release_all(struct sk_buff *skb, enum skb_drop_reason reason,
1055 bool napi_safe)
1056 {
1057 skb_release_head_state(skb);
1058 if (likely(skb->head))
1059 skb_release_data(skb, reason, napi_safe);
1060 }
1061
1062 /**
1063 * __kfree_skb - private function
1064 * @skb: buffer
1065 *
1066 * Free an sk_buff. Release anything attached to the buffer.
1067 * Clean the state. This is an internal helper function. Users should
1068 * always call kfree_skb
1069 */
1070
__kfree_skb(struct sk_buff * skb)1071 void __kfree_skb(struct sk_buff *skb)
1072 {
1073 skb_release_all(skb, SKB_DROP_REASON_NOT_SPECIFIED, false);
1074 kfree_skbmem(skb);
1075 }
1076 EXPORT_SYMBOL(__kfree_skb);
1077
1078 static __always_inline
__kfree_skb_reason(struct sk_buff * skb,enum skb_drop_reason reason)1079 bool __kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason)
1080 {
1081 if (unlikely(!skb_unref(skb)))
1082 return false;
1083
1084 DEBUG_NET_WARN_ON_ONCE(reason == SKB_NOT_DROPPED_YET ||
1085 u32_get_bits(reason,
1086 SKB_DROP_REASON_SUBSYS_MASK) >=
1087 SKB_DROP_REASON_SUBSYS_NUM);
1088
1089 if (reason == SKB_CONSUMED)
1090 trace_consume_skb(skb, __builtin_return_address(0));
1091 else
1092 trace_kfree_skb(skb, __builtin_return_address(0), reason);
1093 return true;
1094 }
1095
1096 /**
1097 * kfree_skb_reason - free an sk_buff with special reason
1098 * @skb: buffer to free
1099 * @reason: reason why this skb is dropped
1100 *
1101 * Drop a reference to the buffer and free it if the usage count has
1102 * hit zero. Meanwhile, pass the drop reason to 'kfree_skb'
1103 * tracepoint.
1104 */
1105 void __fix_address
kfree_skb_reason(struct sk_buff * skb,enum skb_drop_reason reason)1106 kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason)
1107 {
1108 if (__kfree_skb_reason(skb, reason))
1109 __kfree_skb(skb);
1110 }
1111 EXPORT_SYMBOL(kfree_skb_reason);
1112
1113 #define KFREE_SKB_BULK_SIZE 16
1114
1115 struct skb_free_array {
1116 unsigned int skb_count;
1117 void *skb_array[KFREE_SKB_BULK_SIZE];
1118 };
1119
kfree_skb_add_bulk(struct sk_buff * skb,struct skb_free_array * sa,enum skb_drop_reason reason)1120 static void kfree_skb_add_bulk(struct sk_buff *skb,
1121 struct skb_free_array *sa,
1122 enum skb_drop_reason reason)
1123 {
1124 /* if SKB is a clone, don't handle this case */
1125 if (unlikely(skb->fclone != SKB_FCLONE_UNAVAILABLE)) {
1126 __kfree_skb(skb);
1127 return;
1128 }
1129
1130 skb_release_all(skb, reason, false);
1131 sa->skb_array[sa->skb_count++] = skb;
1132
1133 if (unlikely(sa->skb_count == KFREE_SKB_BULK_SIZE)) {
1134 kmem_cache_free_bulk(skbuff_cache, KFREE_SKB_BULK_SIZE,
1135 sa->skb_array);
1136 sa->skb_count = 0;
1137 }
1138 }
1139
1140 void __fix_address
kfree_skb_list_reason(struct sk_buff * segs,enum skb_drop_reason reason)1141 kfree_skb_list_reason(struct sk_buff *segs, enum skb_drop_reason reason)
1142 {
1143 struct skb_free_array sa;
1144
1145 sa.skb_count = 0;
1146
1147 while (segs) {
1148 struct sk_buff *next = segs->next;
1149
1150 if (__kfree_skb_reason(segs, reason)) {
1151 skb_poison_list(segs);
1152 kfree_skb_add_bulk(segs, &sa, reason);
1153 }
1154
1155 segs = next;
1156 }
1157
1158 if (sa.skb_count)
1159 kmem_cache_free_bulk(skbuff_cache, sa.skb_count, sa.skb_array);
1160 }
1161 EXPORT_SYMBOL(kfree_skb_list_reason);
1162
1163 /* Dump skb information and contents.
1164 *
1165 * Must only be called from net_ratelimit()-ed paths.
1166 *
1167 * Dumps whole packets if full_pkt, only headers otherwise.
1168 */
skb_dump(const char * level,const struct sk_buff * skb,bool full_pkt)1169 void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt)
1170 {
1171 struct skb_shared_info *sh = skb_shinfo(skb);
1172 struct net_device *dev = skb->dev;
1173 struct sock *sk = skb->sk;
1174 struct sk_buff *list_skb;
1175 bool has_mac, has_trans;
1176 int headroom, tailroom;
1177 int i, len, seg_len;
1178
1179 if (full_pkt)
1180 len = skb->len;
1181 else
1182 len = min_t(int, skb->len, MAX_HEADER + 128);
1183
1184 headroom = skb_headroom(skb);
1185 tailroom = skb_tailroom(skb);
1186
1187 has_mac = skb_mac_header_was_set(skb);
1188 has_trans = skb_transport_header_was_set(skb);
1189
1190 printk("%sskb len=%u headroom=%u headlen=%u tailroom=%u\n"
1191 "mac=(%d,%d) net=(%d,%d) trans=%d\n"
1192 "shinfo(txflags=%u nr_frags=%u gso(size=%hu type=%u segs=%hu))\n"
1193 "csum(0x%x ip_summed=%u complete_sw=%u valid=%u level=%u)\n"
1194 "hash(0x%x sw=%u l4=%u) proto=0x%04x pkttype=%u iif=%d\n",
1195 level, skb->len, headroom, skb_headlen(skb), tailroom,
1196 has_mac ? skb->mac_header : -1,
1197 has_mac ? skb_mac_header_len(skb) : -1,
1198 skb->network_header,
1199 has_trans ? skb_network_header_len(skb) : -1,
1200 has_trans ? skb->transport_header : -1,
1201 sh->tx_flags, sh->nr_frags,
1202 sh->gso_size, sh->gso_type, sh->gso_segs,
1203 skb->csum, skb->ip_summed, skb->csum_complete_sw,
1204 skb->csum_valid, skb->csum_level,
1205 skb->hash, skb->sw_hash, skb->l4_hash,
1206 ntohs(skb->protocol), skb->pkt_type, skb->skb_iif);
1207
1208 if (dev)
1209 printk("%sdev name=%s feat=%pNF\n",
1210 level, dev->name, &dev->features);
1211 if (sk)
1212 printk("%ssk family=%hu type=%u proto=%u\n",
1213 level, sk->sk_family, sk->sk_type, sk->sk_protocol);
1214
1215 if (full_pkt && headroom)
1216 print_hex_dump(level, "skb headroom: ", DUMP_PREFIX_OFFSET,
1217 16, 1, skb->head, headroom, false);
1218
1219 seg_len = min_t(int, skb_headlen(skb), len);
1220 if (seg_len)
1221 print_hex_dump(level, "skb linear: ", DUMP_PREFIX_OFFSET,
1222 16, 1, skb->data, seg_len, false);
1223 len -= seg_len;
1224
1225 if (full_pkt && tailroom)
1226 print_hex_dump(level, "skb tailroom: ", DUMP_PREFIX_OFFSET,
1227 16, 1, skb_tail_pointer(skb), tailroom, false);
1228
1229 for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) {
1230 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1231 u32 p_off, p_len, copied;
1232 struct page *p;
1233 u8 *vaddr;
1234
1235 skb_frag_foreach_page(frag, skb_frag_off(frag),
1236 skb_frag_size(frag), p, p_off, p_len,
1237 copied) {
1238 seg_len = min_t(int, p_len, len);
1239 vaddr = kmap_atomic(p);
1240 print_hex_dump(level, "skb frag: ",
1241 DUMP_PREFIX_OFFSET,
1242 16, 1, vaddr + p_off, seg_len, false);
1243 kunmap_atomic(vaddr);
1244 len -= seg_len;
1245 if (!len)
1246 break;
1247 }
1248 }
1249
1250 if (full_pkt && skb_has_frag_list(skb)) {
1251 printk("skb fraglist:\n");
1252 skb_walk_frags(skb, list_skb)
1253 skb_dump(level, list_skb, true);
1254 }
1255 }
1256 EXPORT_SYMBOL(skb_dump);
1257
1258 /**
1259 * skb_tx_error - report an sk_buff xmit error
1260 * @skb: buffer that triggered an error
1261 *
1262 * Report xmit error if a device callback is tracking this skb.
1263 * skb must be freed afterwards.
1264 */
skb_tx_error(struct sk_buff * skb)1265 void skb_tx_error(struct sk_buff *skb)
1266 {
1267 if (skb) {
1268 skb_zcopy_downgrade_managed(skb);
1269 skb_zcopy_clear(skb, true);
1270 }
1271 }
1272 EXPORT_SYMBOL(skb_tx_error);
1273
1274 #ifdef CONFIG_TRACEPOINTS
1275 /**
1276 * consume_skb - free an skbuff
1277 * @skb: buffer to free
1278 *
1279 * Drop a ref to the buffer and free it if the usage count has hit zero
1280 * Functions identically to kfree_skb, but kfree_skb assumes that the frame
1281 * is being dropped after a failure and notes that
1282 */
consume_skb(struct sk_buff * skb)1283 void consume_skb(struct sk_buff *skb)
1284 {
1285 if (!skb_unref(skb))
1286 return;
1287
1288 trace_consume_skb(skb, __builtin_return_address(0));
1289 __kfree_skb(skb);
1290 }
1291 EXPORT_SYMBOL(consume_skb);
1292 #endif
1293
1294 /**
1295 * __consume_stateless_skb - free an skbuff, assuming it is stateless
1296 * @skb: buffer to free
1297 *
1298 * Alike consume_skb(), but this variant assumes that this is the last
1299 * skb reference and all the head states have been already dropped
1300 */
__consume_stateless_skb(struct sk_buff * skb)1301 void __consume_stateless_skb(struct sk_buff *skb)
1302 {
1303 trace_consume_skb(skb, __builtin_return_address(0));
1304 skb_release_data(skb, SKB_CONSUMED, false);
1305 kfree_skbmem(skb);
1306 }
1307
napi_skb_cache_put(struct sk_buff * skb)1308 static void napi_skb_cache_put(struct sk_buff *skb)
1309 {
1310 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
1311 u32 i;
1312
1313 kasan_poison_object_data(skbuff_cache, skb);
1314 nc->skb_cache[nc->skb_count++] = skb;
1315
1316 if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
1317 for (i = NAPI_SKB_CACHE_HALF; i < NAPI_SKB_CACHE_SIZE; i++)
1318 kasan_unpoison_object_data(skbuff_cache,
1319 nc->skb_cache[i]);
1320
1321 kmem_cache_free_bulk(skbuff_cache, NAPI_SKB_CACHE_HALF,
1322 nc->skb_cache + NAPI_SKB_CACHE_HALF);
1323 nc->skb_count = NAPI_SKB_CACHE_HALF;
1324 }
1325 }
1326
__napi_kfree_skb(struct sk_buff * skb,enum skb_drop_reason reason)1327 void __napi_kfree_skb(struct sk_buff *skb, enum skb_drop_reason reason)
1328 {
1329 skb_release_all(skb, reason, true);
1330 napi_skb_cache_put(skb);
1331 }
1332
napi_skb_free_stolen_head(struct sk_buff * skb)1333 void napi_skb_free_stolen_head(struct sk_buff *skb)
1334 {
1335 if (unlikely(skb->slow_gro)) {
1336 nf_reset_ct(skb);
1337 skb_dst_drop(skb);
1338 skb_ext_put(skb);
1339 skb_orphan(skb);
1340 skb->slow_gro = 0;
1341 }
1342 napi_skb_cache_put(skb);
1343 }
1344
napi_consume_skb(struct sk_buff * skb,int budget)1345 void napi_consume_skb(struct sk_buff *skb, int budget)
1346 {
1347 /* Zero budget indicate non-NAPI context called us, like netpoll */
1348 if (unlikely(!budget)) {
1349 dev_consume_skb_any(skb);
1350 return;
1351 }
1352
1353 DEBUG_NET_WARN_ON_ONCE(!in_softirq());
1354
1355 if (!skb_unref(skb))
1356 return;
1357
1358 /* if reaching here SKB is ready to free */
1359 trace_consume_skb(skb, __builtin_return_address(0));
1360
1361 /* if SKB is a clone, don't handle this case */
1362 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) {
1363 __kfree_skb(skb);
1364 return;
1365 }
1366
1367 skb_release_all(skb, SKB_CONSUMED, !!budget);
1368 napi_skb_cache_put(skb);
1369 }
1370 EXPORT_SYMBOL(napi_consume_skb);
1371
1372 /* Make sure a field is contained by headers group */
1373 #define CHECK_SKB_FIELD(field) \
1374 BUILD_BUG_ON(offsetof(struct sk_buff, field) != \
1375 offsetof(struct sk_buff, headers.field)); \
1376
__copy_skb_header(struct sk_buff * new,const struct sk_buff * old)1377 static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
1378 {
1379 new->tstamp = old->tstamp;
1380 /* We do not copy old->sk */
1381 new->dev = old->dev;
1382 memcpy(new->cb, old->cb, sizeof(old->cb));
1383 skb_dst_copy(new, old);
1384 __skb_ext_copy(new, old);
1385 __nf_copy(new, old, false);
1386
1387 /* Note : this field could be in the headers group.
1388 * It is not yet because we do not want to have a 16 bit hole
1389 */
1390 new->queue_mapping = old->queue_mapping;
1391
1392 memcpy(&new->headers, &old->headers, sizeof(new->headers));
1393 CHECK_SKB_FIELD(protocol);
1394 CHECK_SKB_FIELD(csum);
1395 CHECK_SKB_FIELD(hash);
1396 CHECK_SKB_FIELD(priority);
1397 CHECK_SKB_FIELD(skb_iif);
1398 CHECK_SKB_FIELD(vlan_proto);
1399 CHECK_SKB_FIELD(vlan_tci);
1400 CHECK_SKB_FIELD(transport_header);
1401 CHECK_SKB_FIELD(network_header);
1402 CHECK_SKB_FIELD(mac_header);
1403 CHECK_SKB_FIELD(inner_protocol);
1404 CHECK_SKB_FIELD(inner_transport_header);
1405 CHECK_SKB_FIELD(inner_network_header);
1406 CHECK_SKB_FIELD(inner_mac_header);
1407 CHECK_SKB_FIELD(mark);
1408 #ifdef CONFIG_NETWORK_SECMARK
1409 CHECK_SKB_FIELD(secmark);
1410 #endif
1411 #ifdef CONFIG_NET_RX_BUSY_POLL
1412 CHECK_SKB_FIELD(napi_id);
1413 #endif
1414 CHECK_SKB_FIELD(alloc_cpu);
1415 #ifdef CONFIG_XPS
1416 CHECK_SKB_FIELD(sender_cpu);
1417 #endif
1418 #ifdef CONFIG_NET_SCHED
1419 CHECK_SKB_FIELD(tc_index);
1420 #endif
1421
1422 }
1423
1424 /*
1425 * You should not add any new code to this function. Add it to
1426 * __copy_skb_header above instead.
1427 */
__skb_clone(struct sk_buff * n,struct sk_buff * skb)1428 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
1429 {
1430 #define C(x) n->x = skb->x
1431
1432 n->next = n->prev = NULL;
1433 n->sk = NULL;
1434 __copy_skb_header(n, skb);
1435
1436 C(len);
1437 C(data_len);
1438 C(mac_len);
1439 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
1440 n->cloned = 1;
1441 n->nohdr = 0;
1442 n->peeked = 0;
1443 C(pfmemalloc);
1444 C(pp_recycle);
1445 n->destructor = NULL;
1446 C(tail);
1447 C(end);
1448 C(head);
1449 C(head_frag);
1450 C(data);
1451 C(truesize);
1452 refcount_set(&n->users, 1);
1453
1454 atomic_inc(&(skb_shinfo(skb)->dataref));
1455 skb->cloned = 1;
1456
1457 return n;
1458 #undef C
1459 }
1460
1461 /**
1462 * alloc_skb_for_msg() - allocate sk_buff to wrap frag list forming a msg
1463 * @first: first sk_buff of the msg
1464 */
alloc_skb_for_msg(struct sk_buff * first)1465 struct sk_buff *alloc_skb_for_msg(struct sk_buff *first)
1466 {
1467 struct sk_buff *n;
1468
1469 n = alloc_skb(0, GFP_ATOMIC);
1470 if (!n)
1471 return NULL;
1472
1473 n->len = first->len;
1474 n->data_len = first->len;
1475 n->truesize = first->truesize;
1476
1477 skb_shinfo(n)->frag_list = first;
1478
1479 __copy_skb_header(n, first);
1480 n->destructor = NULL;
1481
1482 return n;
1483 }
1484 EXPORT_SYMBOL_GPL(alloc_skb_for_msg);
1485
1486 /**
1487 * skb_morph - morph one skb into another
1488 * @dst: the skb to receive the contents
1489 * @src: the skb to supply the contents
1490 *
1491 * This is identical to skb_clone except that the target skb is
1492 * supplied by the user.
1493 *
1494 * The target skb is returned upon exit.
1495 */
skb_morph(struct sk_buff * dst,struct sk_buff * src)1496 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
1497 {
1498 skb_release_all(dst, SKB_CONSUMED, false);
1499 return __skb_clone(dst, src);
1500 }
1501 EXPORT_SYMBOL_GPL(skb_morph);
1502
mm_account_pinned_pages(struct mmpin * mmp,size_t size)1503 int mm_account_pinned_pages(struct mmpin *mmp, size_t size)
1504 {
1505 unsigned long max_pg, num_pg, new_pg, old_pg, rlim;
1506 struct user_struct *user;
1507
1508 if (capable(CAP_IPC_LOCK) || !size)
1509 return 0;
1510
1511 rlim = rlimit(RLIMIT_MEMLOCK);
1512 if (rlim == RLIM_INFINITY)
1513 return 0;
1514
1515 num_pg = (size >> PAGE_SHIFT) + 2; /* worst case */
1516 max_pg = rlim >> PAGE_SHIFT;
1517 user = mmp->user ? : current_user();
1518
1519 old_pg = atomic_long_read(&user->locked_vm);
1520 do {
1521 new_pg = old_pg + num_pg;
1522 if (new_pg > max_pg)
1523 return -ENOBUFS;
1524 } while (!atomic_long_try_cmpxchg(&user->locked_vm, &old_pg, new_pg));
1525
1526 if (!mmp->user) {
1527 mmp->user = get_uid(user);
1528 mmp->num_pg = num_pg;
1529 } else {
1530 mmp->num_pg += num_pg;
1531 }
1532
1533 return 0;
1534 }
1535 EXPORT_SYMBOL_GPL(mm_account_pinned_pages);
1536
mm_unaccount_pinned_pages(struct mmpin * mmp)1537 void mm_unaccount_pinned_pages(struct mmpin *mmp)
1538 {
1539 if (mmp->user) {
1540 atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm);
1541 free_uid(mmp->user);
1542 }
1543 }
1544 EXPORT_SYMBOL_GPL(mm_unaccount_pinned_pages);
1545
msg_zerocopy_alloc(struct sock * sk,size_t size)1546 static struct ubuf_info *msg_zerocopy_alloc(struct sock *sk, size_t size)
1547 {
1548 struct ubuf_info_msgzc *uarg;
1549 struct sk_buff *skb;
1550
1551 WARN_ON_ONCE(!in_task());
1552
1553 skb = sock_omalloc(sk, 0, GFP_KERNEL);
1554 if (!skb)
1555 return NULL;
1556
1557 BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb));
1558 uarg = (void *)skb->cb;
1559 uarg->mmp.user = NULL;
1560
1561 if (mm_account_pinned_pages(&uarg->mmp, size)) {
1562 kfree_skb(skb);
1563 return NULL;
1564 }
1565
1566 uarg->ubuf.callback = msg_zerocopy_callback;
1567 uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1;
1568 uarg->len = 1;
1569 uarg->bytelen = size;
1570 uarg->zerocopy = 1;
1571 uarg->ubuf.flags = SKBFL_ZEROCOPY_FRAG | SKBFL_DONT_ORPHAN;
1572 refcount_set(&uarg->ubuf.refcnt, 1);
1573 sock_hold(sk);
1574
1575 return &uarg->ubuf;
1576 }
1577
skb_from_uarg(struct ubuf_info_msgzc * uarg)1578 static inline struct sk_buff *skb_from_uarg(struct ubuf_info_msgzc *uarg)
1579 {
1580 return container_of((void *)uarg, struct sk_buff, cb);
1581 }
1582
msg_zerocopy_realloc(struct sock * sk,size_t size,struct ubuf_info * uarg)1583 struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size,
1584 struct ubuf_info *uarg)
1585 {
1586 if (uarg) {
1587 struct ubuf_info_msgzc *uarg_zc;
1588 const u32 byte_limit = 1 << 19; /* limit to a few TSO */
1589 u32 bytelen, next;
1590
1591 /* there might be non MSG_ZEROCOPY users */
1592 if (uarg->callback != msg_zerocopy_callback)
1593 return NULL;
1594
1595 /* realloc only when socket is locked (TCP, UDP cork),
1596 * so uarg->len and sk_zckey access is serialized
1597 */
1598 if (!sock_owned_by_user(sk)) {
1599 WARN_ON_ONCE(1);
1600 return NULL;
1601 }
1602
1603 uarg_zc = uarg_to_msgzc(uarg);
1604 bytelen = uarg_zc->bytelen + size;
1605 if (uarg_zc->len == USHRT_MAX - 1 || bytelen > byte_limit) {
1606 /* TCP can create new skb to attach new uarg */
1607 if (sk->sk_type == SOCK_STREAM)
1608 goto new_alloc;
1609 return NULL;
1610 }
1611
1612 next = (u32)atomic_read(&sk->sk_zckey);
1613 if ((u32)(uarg_zc->id + uarg_zc->len) == next) {
1614 if (mm_account_pinned_pages(&uarg_zc->mmp, size))
1615 return NULL;
1616 uarg_zc->len++;
1617 uarg_zc->bytelen = bytelen;
1618 atomic_set(&sk->sk_zckey, ++next);
1619
1620 /* no extra ref when appending to datagram (MSG_MORE) */
1621 if (sk->sk_type == SOCK_STREAM)
1622 net_zcopy_get(uarg);
1623
1624 return uarg;
1625 }
1626 }
1627
1628 new_alloc:
1629 return msg_zerocopy_alloc(sk, size);
1630 }
1631 EXPORT_SYMBOL_GPL(msg_zerocopy_realloc);
1632
skb_zerocopy_notify_extend(struct sk_buff * skb,u32 lo,u16 len)1633 static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len)
1634 {
1635 struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
1636 u32 old_lo, old_hi;
1637 u64 sum_len;
1638
1639 old_lo = serr->ee.ee_info;
1640 old_hi = serr->ee.ee_data;
1641 sum_len = old_hi - old_lo + 1ULL + len;
1642
1643 if (sum_len >= (1ULL << 32))
1644 return false;
1645
1646 if (lo != old_hi + 1)
1647 return false;
1648
1649 serr->ee.ee_data += len;
1650 return true;
1651 }
1652
__msg_zerocopy_callback(struct ubuf_info_msgzc * uarg)1653 static void __msg_zerocopy_callback(struct ubuf_info_msgzc *uarg)
1654 {
1655 struct sk_buff *tail, *skb = skb_from_uarg(uarg);
1656 struct sock_exterr_skb *serr;
1657 struct sock *sk = skb->sk;
1658 struct sk_buff_head *q;
1659 unsigned long flags;
1660 bool is_zerocopy;
1661 u32 lo, hi;
1662 u16 len;
1663
1664 mm_unaccount_pinned_pages(&uarg->mmp);
1665
1666 /* if !len, there was only 1 call, and it was aborted
1667 * so do not queue a completion notification
1668 */
1669 if (!uarg->len || sock_flag(sk, SOCK_DEAD))
1670 goto release;
1671
1672 len = uarg->len;
1673 lo = uarg->id;
1674 hi = uarg->id + len - 1;
1675 is_zerocopy = uarg->zerocopy;
1676
1677 serr = SKB_EXT_ERR(skb);
1678 memset(serr, 0, sizeof(*serr));
1679 serr->ee.ee_errno = 0;
1680 serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY;
1681 serr->ee.ee_data = hi;
1682 serr->ee.ee_info = lo;
1683 if (!is_zerocopy)
1684 serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED;
1685
1686 q = &sk->sk_error_queue;
1687 spin_lock_irqsave(&q->lock, flags);
1688 tail = skb_peek_tail(q);
1689 if (!tail || SKB_EXT_ERR(tail)->ee.ee_origin != SO_EE_ORIGIN_ZEROCOPY ||
1690 !skb_zerocopy_notify_extend(tail, lo, len)) {
1691 __skb_queue_tail(q, skb);
1692 skb = NULL;
1693 }
1694 spin_unlock_irqrestore(&q->lock, flags);
1695
1696 sk_error_report(sk);
1697
1698 release:
1699 consume_skb(skb);
1700 sock_put(sk);
1701 }
1702
msg_zerocopy_callback(struct sk_buff * skb,struct ubuf_info * uarg,bool success)1703 void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg,
1704 bool success)
1705 {
1706 struct ubuf_info_msgzc *uarg_zc = uarg_to_msgzc(uarg);
1707
1708 uarg_zc->zerocopy = uarg_zc->zerocopy & success;
1709
1710 if (refcount_dec_and_test(&uarg->refcnt))
1711 __msg_zerocopy_callback(uarg_zc);
1712 }
1713 EXPORT_SYMBOL_GPL(msg_zerocopy_callback);
1714
msg_zerocopy_put_abort(struct ubuf_info * uarg,bool have_uref)1715 void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref)
1716 {
1717 struct sock *sk = skb_from_uarg(uarg_to_msgzc(uarg))->sk;
1718
1719 atomic_dec(&sk->sk_zckey);
1720 uarg_to_msgzc(uarg)->len--;
1721
1722 if (have_uref)
1723 msg_zerocopy_callback(NULL, uarg, true);
1724 }
1725 EXPORT_SYMBOL_GPL(msg_zerocopy_put_abort);
1726
skb_zerocopy_iter_stream(struct sock * sk,struct sk_buff * skb,struct msghdr * msg,int len,struct ubuf_info * uarg)1727 int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
1728 struct msghdr *msg, int len,
1729 struct ubuf_info *uarg)
1730 {
1731 struct ubuf_info *orig_uarg = skb_zcopy(skb);
1732 int err, orig_len = skb->len;
1733
1734 /* An skb can only point to one uarg. This edge case happens when
1735 * TCP appends to an skb, but zerocopy_realloc triggered a new alloc.
1736 */
1737 if (orig_uarg && uarg != orig_uarg)
1738 return -EEXIST;
1739
1740 err = __zerocopy_sg_from_iter(msg, sk, skb, &msg->msg_iter, len);
1741 if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) {
1742 struct sock *save_sk = skb->sk;
1743
1744 /* Streams do not free skb on error. Reset to prev state. */
1745 iov_iter_revert(&msg->msg_iter, skb->len - orig_len);
1746 skb->sk = sk;
1747 ___pskb_trim(skb, orig_len);
1748 skb->sk = save_sk;
1749 return err;
1750 }
1751
1752 skb_zcopy_set(skb, uarg, NULL);
1753 return skb->len - orig_len;
1754 }
1755 EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream);
1756
__skb_zcopy_downgrade_managed(struct sk_buff * skb)1757 void __skb_zcopy_downgrade_managed(struct sk_buff *skb)
1758 {
1759 int i;
1760
1761 skb_shinfo(skb)->flags &= ~SKBFL_MANAGED_FRAG_REFS;
1762 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1763 skb_frag_ref(skb, i);
1764 }
1765 EXPORT_SYMBOL_GPL(__skb_zcopy_downgrade_managed);
1766
skb_zerocopy_clone(struct sk_buff * nskb,struct sk_buff * orig,gfp_t gfp_mask)1767 static int skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig,
1768 gfp_t gfp_mask)
1769 {
1770 if (skb_zcopy(orig)) {
1771 if (skb_zcopy(nskb)) {
1772 /* !gfp_mask callers are verified to !skb_zcopy(nskb) */
1773 if (!gfp_mask) {
1774 WARN_ON_ONCE(1);
1775 return -ENOMEM;
1776 }
1777 if (skb_uarg(nskb) == skb_uarg(orig))
1778 return 0;
1779 if (skb_copy_ubufs(nskb, GFP_ATOMIC))
1780 return -EIO;
1781 }
1782 skb_zcopy_set(nskb, skb_uarg(orig), NULL);
1783 }
1784 return 0;
1785 }
1786
1787 /**
1788 * skb_copy_ubufs - copy userspace skb frags buffers to kernel
1789 * @skb: the skb to modify
1790 * @gfp_mask: allocation priority
1791 *
1792 * This must be called on skb with SKBFL_ZEROCOPY_ENABLE.
1793 * It will copy all frags into kernel and drop the reference
1794 * to userspace pages.
1795 *
1796 * If this function is called from an interrupt gfp_mask() must be
1797 * %GFP_ATOMIC.
1798 *
1799 * Returns 0 on success or a negative error code on failure
1800 * to allocate kernel memory to copy to.
1801 */
skb_copy_ubufs(struct sk_buff * skb,gfp_t gfp_mask)1802 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
1803 {
1804 int num_frags = skb_shinfo(skb)->nr_frags;
1805 struct page *page, *head = NULL;
1806 int i, order, psize, new_frags;
1807 u32 d_off;
1808
1809 if (skb_shared(skb) || skb_unclone(skb, gfp_mask))
1810 return -EINVAL;
1811
1812 if (!num_frags)
1813 goto release;
1814
1815 /* We might have to allocate high order pages, so compute what minimum
1816 * page order is needed.
1817 */
1818 order = 0;
1819 while ((PAGE_SIZE << order) * MAX_SKB_FRAGS < __skb_pagelen(skb))
1820 order++;
1821 psize = (PAGE_SIZE << order);
1822
1823 new_frags = (__skb_pagelen(skb) + psize - 1) >> (PAGE_SHIFT + order);
1824 for (i = 0; i < new_frags; i++) {
1825 page = alloc_pages(gfp_mask | __GFP_COMP, order);
1826 if (!page) {
1827 while (head) {
1828 struct page *next = (struct page *)page_private(head);
1829 put_page(head);
1830 head = next;
1831 }
1832 return -ENOMEM;
1833 }
1834 set_page_private(page, (unsigned long)head);
1835 head = page;
1836 }
1837
1838 page = head;
1839 d_off = 0;
1840 for (i = 0; i < num_frags; i++) {
1841 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1842 u32 p_off, p_len, copied;
1843 struct page *p;
1844 u8 *vaddr;
1845
1846 skb_frag_foreach_page(f, skb_frag_off(f), skb_frag_size(f),
1847 p, p_off, p_len, copied) {
1848 u32 copy, done = 0;
1849 vaddr = kmap_atomic(p);
1850
1851 while (done < p_len) {
1852 if (d_off == psize) {
1853 d_off = 0;
1854 page = (struct page *)page_private(page);
1855 }
1856 copy = min_t(u32, psize - d_off, p_len - done);
1857 memcpy(page_address(page) + d_off,
1858 vaddr + p_off + done, copy);
1859 done += copy;
1860 d_off += copy;
1861 }
1862 kunmap_atomic(vaddr);
1863 }
1864 }
1865
1866 /* skb frags release userspace buffers */
1867 for (i = 0; i < num_frags; i++)
1868 skb_frag_unref(skb, i);
1869
1870 /* skb frags point to kernel buffers */
1871 for (i = 0; i < new_frags - 1; i++) {
1872 __skb_fill_page_desc(skb, i, head, 0, psize);
1873 head = (struct page *)page_private(head);
1874 }
1875 __skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off);
1876 skb_shinfo(skb)->nr_frags = new_frags;
1877
1878 release:
1879 skb_zcopy_clear(skb, false);
1880 return 0;
1881 }
1882 EXPORT_SYMBOL_GPL(skb_copy_ubufs);
1883
1884 /**
1885 * skb_clone - duplicate an sk_buff
1886 * @skb: buffer to clone
1887 * @gfp_mask: allocation priority
1888 *
1889 * Duplicate an &sk_buff. The new one is not owned by a socket. Both
1890 * copies share the same packet data but not structure. The new
1891 * buffer has a reference count of 1. If the allocation fails the
1892 * function returns %NULL otherwise the new buffer is returned.
1893 *
1894 * If this function is called from an interrupt gfp_mask() must be
1895 * %GFP_ATOMIC.
1896 */
1897
skb_clone(struct sk_buff * skb,gfp_t gfp_mask)1898 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
1899 {
1900 struct sk_buff_fclones *fclones = container_of(skb,
1901 struct sk_buff_fclones,
1902 skb1);
1903 struct sk_buff *n;
1904
1905 if (skb_orphan_frags(skb, gfp_mask))
1906 return NULL;
1907
1908 if (skb->fclone == SKB_FCLONE_ORIG &&
1909 refcount_read(&fclones->fclone_ref) == 1) {
1910 n = &fclones->skb2;
1911 refcount_set(&fclones->fclone_ref, 2);
1912 n->fclone = SKB_FCLONE_CLONE;
1913 } else {
1914 if (skb_pfmemalloc(skb))
1915 gfp_mask |= __GFP_MEMALLOC;
1916
1917 n = kmem_cache_alloc(skbuff_cache, gfp_mask);
1918 if (!n)
1919 return NULL;
1920
1921 n->fclone = SKB_FCLONE_UNAVAILABLE;
1922 }
1923
1924 return __skb_clone(n, skb);
1925 }
1926 EXPORT_SYMBOL(skb_clone);
1927
skb_headers_offset_update(struct sk_buff * skb,int off)1928 void skb_headers_offset_update(struct sk_buff *skb, int off)
1929 {
1930 /* Only adjust this if it actually is csum_start rather than csum */
1931 if (skb->ip_summed == CHECKSUM_PARTIAL)
1932 skb->csum_start += off;
1933 /* {transport,network,mac}_header and tail are relative to skb->head */
1934 skb->transport_header += off;
1935 skb->network_header += off;
1936 if (skb_mac_header_was_set(skb))
1937 skb->mac_header += off;
1938 skb->inner_transport_header += off;
1939 skb->inner_network_header += off;
1940 skb->inner_mac_header += off;
1941 }
1942 EXPORT_SYMBOL(skb_headers_offset_update);
1943
skb_copy_header(struct sk_buff * new,const struct sk_buff * old)1944 void skb_copy_header(struct sk_buff *new, const struct sk_buff *old)
1945 {
1946 __copy_skb_header(new, old);
1947
1948 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
1949 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
1950 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
1951 }
1952 EXPORT_SYMBOL(skb_copy_header);
1953
skb_alloc_rx_flag(const struct sk_buff * skb)1954 static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
1955 {
1956 if (skb_pfmemalloc(skb))
1957 return SKB_ALLOC_RX;
1958 return 0;
1959 }
1960
1961 /**
1962 * skb_copy - create private copy of an sk_buff
1963 * @skb: buffer to copy
1964 * @gfp_mask: allocation priority
1965 *
1966 * Make a copy of both an &sk_buff and its data. This is used when the
1967 * caller wishes to modify the data and needs a private copy of the
1968 * data to alter. Returns %NULL on failure or the pointer to the buffer
1969 * on success. The returned buffer has a reference count of 1.
1970 *
1971 * As by-product this function converts non-linear &sk_buff to linear
1972 * one, so that &sk_buff becomes completely private and caller is allowed
1973 * to modify all the data of returned buffer. This means that this
1974 * function is not recommended for use in circumstances when only
1975 * header is going to be modified. Use pskb_copy() instead.
1976 */
1977
skb_copy(const struct sk_buff * skb,gfp_t gfp_mask)1978 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
1979 {
1980 struct sk_buff *n;
1981 unsigned int size;
1982 int headerlen;
1983
1984 if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST))
1985 return NULL;
1986
1987 headerlen = skb_headroom(skb);
1988 size = skb_end_offset(skb) + skb->data_len;
1989 n = __alloc_skb(size, gfp_mask,
1990 skb_alloc_rx_flag(skb), NUMA_NO_NODE);
1991 if (!n)
1992 return NULL;
1993
1994 /* Set the data pointer */
1995 skb_reserve(n, headerlen);
1996 /* Set the tail pointer and length */
1997 skb_put(n, skb->len);
1998
1999 BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len));
2000
2001 skb_copy_header(n, skb);
2002 return n;
2003 }
2004 EXPORT_SYMBOL(skb_copy);
2005
2006 /**
2007 * __pskb_copy_fclone - create copy of an sk_buff with private head.
2008 * @skb: buffer to copy
2009 * @headroom: headroom of new skb
2010 * @gfp_mask: allocation priority
2011 * @fclone: if true allocate the copy of the skb from the fclone
2012 * cache instead of the head cache; it is recommended to set this
2013 * to true for the cases where the copy will likely be cloned
2014 *
2015 * Make a copy of both an &sk_buff and part of its data, located
2016 * in header. Fragmented data remain shared. This is used when
2017 * the caller wishes to modify only header of &sk_buff and needs
2018 * private copy of the header to alter. Returns %NULL on failure
2019 * or the pointer to the buffer on success.
2020 * The returned buffer has a reference count of 1.
2021 */
2022
__pskb_copy_fclone(struct sk_buff * skb,int headroom,gfp_t gfp_mask,bool fclone)2023 struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
2024 gfp_t gfp_mask, bool fclone)
2025 {
2026 unsigned int size = skb_headlen(skb) + headroom;
2027 int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0);
2028 struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE);
2029
2030 if (!n)
2031 goto out;
2032
2033 /* Set the data pointer */
2034 skb_reserve(n, headroom);
2035 /* Set the tail pointer and length */
2036 skb_put(n, skb_headlen(skb));
2037 /* Copy the bytes */
2038 skb_copy_from_linear_data(skb, n->data, n->len);
2039
2040 n->truesize += skb->data_len;
2041 n->data_len = skb->data_len;
2042 n->len = skb->len;
2043
2044 if (skb_shinfo(skb)->nr_frags) {
2045 int i;
2046
2047 if (skb_orphan_frags(skb, gfp_mask) ||
2048 skb_zerocopy_clone(n, skb, gfp_mask)) {
2049 kfree_skb(n);
2050 n = NULL;
2051 goto out;
2052 }
2053 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2054 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
2055 skb_frag_ref(skb, i);
2056 }
2057 skb_shinfo(n)->nr_frags = i;
2058 }
2059
2060 if (skb_has_frag_list(skb)) {
2061 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
2062 skb_clone_fraglist(n);
2063 }
2064
2065 skb_copy_header(n, skb);
2066 out:
2067 return n;
2068 }
2069 EXPORT_SYMBOL(__pskb_copy_fclone);
2070
2071 /**
2072 * pskb_expand_head - reallocate header of &sk_buff
2073 * @skb: buffer to reallocate
2074 * @nhead: room to add at head
2075 * @ntail: room to add at tail
2076 * @gfp_mask: allocation priority
2077 *
2078 * Expands (or creates identical copy, if @nhead and @ntail are zero)
2079 * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have
2080 * reference count of 1. Returns zero in the case of success or error,
2081 * if expansion failed. In the last case, &sk_buff is not changed.
2082 *
2083 * All the pointers pointing into skb header may change and must be
2084 * reloaded after call to this function.
2085 */
2086
pskb_expand_head(struct sk_buff * skb,int nhead,int ntail,gfp_t gfp_mask)2087 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
2088 gfp_t gfp_mask)
2089 {
2090 unsigned int osize = skb_end_offset(skb);
2091 unsigned int size = osize + nhead + ntail;
2092 long off;
2093 u8 *data;
2094 int i;
2095
2096 BUG_ON(nhead < 0);
2097
2098 BUG_ON(skb_shared(skb));
2099
2100 skb_zcopy_downgrade_managed(skb);
2101
2102 if (skb_pfmemalloc(skb))
2103 gfp_mask |= __GFP_MEMALLOC;
2104
2105 data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL);
2106 if (!data)
2107 goto nodata;
2108 size = SKB_WITH_OVERHEAD(size);
2109
2110 /* Copy only real data... and, alas, header. This should be
2111 * optimized for the cases when header is void.
2112 */
2113 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head);
2114
2115 memcpy((struct skb_shared_info *)(data + size),
2116 skb_shinfo(skb),
2117 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
2118
2119 /*
2120 * if shinfo is shared we must drop the old head gracefully, but if it
2121 * is not we can just drop the old head and let the existing refcount
2122 * be since all we did is relocate the values
2123 */
2124 if (skb_cloned(skb)) {
2125 if (skb_orphan_frags(skb, gfp_mask))
2126 goto nofrags;
2127 if (skb_zcopy(skb))
2128 refcount_inc(&skb_uarg(skb)->refcnt);
2129 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
2130 skb_frag_ref(skb, i);
2131
2132 if (skb_has_frag_list(skb))
2133 skb_clone_fraglist(skb);
2134
2135 skb_release_data(skb, SKB_CONSUMED, false);
2136 } else {
2137 skb_free_head(skb, false);
2138 }
2139 off = (data + nhead) - skb->head;
2140
2141 skb->head = data;
2142 skb->head_frag = 0;
2143 skb->data += off;
2144
2145 skb_set_end_offset(skb, size);
2146 #ifdef NET_SKBUFF_DATA_USES_OFFSET
2147 off = nhead;
2148 #endif
2149 skb->tail += off;
2150 skb_headers_offset_update(skb, nhead);
2151 skb->cloned = 0;
2152 skb->hdr_len = 0;
2153 skb->nohdr = 0;
2154 atomic_set(&skb_shinfo(skb)->dataref, 1);
2155
2156 skb_metadata_clear(skb);
2157
2158 /* It is not generally safe to change skb->truesize.
2159 * For the moment, we really care of rx path, or
2160 * when skb is orphaned (not attached to a socket).
2161 */
2162 if (!skb->sk || skb->destructor == sock_edemux)
2163 skb->truesize += size - osize;
2164
2165 return 0;
2166
2167 nofrags:
2168 skb_kfree_head(data, size);
2169 nodata:
2170 return -ENOMEM;
2171 }
2172 EXPORT_SYMBOL(pskb_expand_head);
2173
2174 /* Make private copy of skb with writable head and some headroom */
2175
skb_realloc_headroom(struct sk_buff * skb,unsigned int headroom)2176 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
2177 {
2178 struct sk_buff *skb2;
2179 int delta = headroom - skb_headroom(skb);
2180
2181 if (delta <= 0)
2182 skb2 = pskb_copy(skb, GFP_ATOMIC);
2183 else {
2184 skb2 = skb_clone(skb, GFP_ATOMIC);
2185 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
2186 GFP_ATOMIC)) {
2187 kfree_skb(skb2);
2188 skb2 = NULL;
2189 }
2190 }
2191 return skb2;
2192 }
2193 EXPORT_SYMBOL(skb_realloc_headroom);
2194
2195 /* Note: We plan to rework this in linux-6.4 */
__skb_unclone_keeptruesize(struct sk_buff * skb,gfp_t pri)2196 int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri)
2197 {
2198 unsigned int saved_end_offset, saved_truesize;
2199 struct skb_shared_info *shinfo;
2200 int res;
2201
2202 saved_end_offset = skb_end_offset(skb);
2203 saved_truesize = skb->truesize;
2204
2205 res = pskb_expand_head(skb, 0, 0, pri);
2206 if (res)
2207 return res;
2208
2209 skb->truesize = saved_truesize;
2210
2211 if (likely(skb_end_offset(skb) == saved_end_offset))
2212 return 0;
2213
2214 /* We can not change skb->end if the original or new value
2215 * is SKB_SMALL_HEAD_HEADROOM, as it might break skb_kfree_head().
2216 */
2217 if (saved_end_offset == SKB_SMALL_HEAD_HEADROOM ||
2218 skb_end_offset(skb) == SKB_SMALL_HEAD_HEADROOM) {
2219 /* We think this path should not be taken.
2220 * Add a temporary trace to warn us just in case.
2221 */
2222 pr_err_once("__skb_unclone_keeptruesize() skb_end_offset() %u -> %u\n",
2223 saved_end_offset, skb_end_offset(skb));
2224 WARN_ON_ONCE(1);
2225 return 0;
2226 }
2227
2228 shinfo = skb_shinfo(skb);
2229
2230 /* We are about to change back skb->end,
2231 * we need to move skb_shinfo() to its new location.
2232 */
2233 memmove(skb->head + saved_end_offset,
2234 shinfo,
2235 offsetof(struct skb_shared_info, frags[shinfo->nr_frags]));
2236
2237 skb_set_end_offset(skb, saved_end_offset);
2238
2239 return 0;
2240 }
2241
2242 /**
2243 * skb_expand_head - reallocate header of &sk_buff
2244 * @skb: buffer to reallocate
2245 * @headroom: needed headroom
2246 *
2247 * Unlike skb_realloc_headroom, this one does not allocate a new skb
2248 * if possible; copies skb->sk to new skb as needed
2249 * and frees original skb in case of failures.
2250 *
2251 * It expect increased headroom and generates warning otherwise.
2252 */
2253
skb_expand_head(struct sk_buff * skb,unsigned int headroom)2254 struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom)
2255 {
2256 int delta = headroom - skb_headroom(skb);
2257 int osize = skb_end_offset(skb);
2258 struct sock *sk = skb->sk;
2259
2260 if (WARN_ONCE(delta <= 0,
2261 "%s is expecting an increase in the headroom", __func__))
2262 return skb;
2263
2264 delta = SKB_DATA_ALIGN(delta);
2265 /* pskb_expand_head() might crash, if skb is shared. */
2266 if (skb_shared(skb) || !is_skb_wmem(skb)) {
2267 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
2268
2269 if (unlikely(!nskb))
2270 goto fail;
2271
2272 if (sk)
2273 skb_set_owner_w(nskb, sk);
2274 consume_skb(skb);
2275 skb = nskb;
2276 }
2277 if (pskb_expand_head(skb, delta, 0, GFP_ATOMIC))
2278 goto fail;
2279
2280 if (sk && is_skb_wmem(skb)) {
2281 delta = skb_end_offset(skb) - osize;
2282 refcount_add(delta, &sk->sk_wmem_alloc);
2283 skb->truesize += delta;
2284 }
2285 return skb;
2286
2287 fail:
2288 kfree_skb(skb);
2289 return NULL;
2290 }
2291 EXPORT_SYMBOL(skb_expand_head);
2292
2293 /**
2294 * skb_copy_expand - copy and expand sk_buff
2295 * @skb: buffer to copy
2296 * @newheadroom: new free bytes at head
2297 * @newtailroom: new free bytes at tail
2298 * @gfp_mask: allocation priority
2299 *
2300 * Make a copy of both an &sk_buff and its data and while doing so
2301 * allocate additional space.
2302 *
2303 * This is used when the caller wishes to modify the data and needs a
2304 * private copy of the data to alter as well as more space for new fields.
2305 * Returns %NULL on failure or the pointer to the buffer
2306 * on success. The returned buffer has a reference count of 1.
2307 *
2308 * You must pass %GFP_ATOMIC as the allocation priority if this function
2309 * is called from an interrupt.
2310 */
skb_copy_expand(const struct sk_buff * skb,int newheadroom,int newtailroom,gfp_t gfp_mask)2311 struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
2312 int newheadroom, int newtailroom,
2313 gfp_t gfp_mask)
2314 {
2315 /*
2316 * Allocate the copy buffer
2317 */
2318 int head_copy_len, head_copy_off;
2319 struct sk_buff *n;
2320 int oldheadroom;
2321
2322 if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST))
2323 return NULL;
2324
2325 oldheadroom = skb_headroom(skb);
2326 n = __alloc_skb(newheadroom + skb->len + newtailroom,
2327 gfp_mask, skb_alloc_rx_flag(skb),
2328 NUMA_NO_NODE);
2329 if (!n)
2330 return NULL;
2331
2332 skb_reserve(n, newheadroom);
2333
2334 /* Set the tail pointer and length */
2335 skb_put(n, skb->len);
2336
2337 head_copy_len = oldheadroom;
2338 head_copy_off = 0;
2339 if (newheadroom <= head_copy_len)
2340 head_copy_len = newheadroom;
2341 else
2342 head_copy_off = newheadroom - head_copy_len;
2343
2344 /* Copy the linear header and data. */
2345 BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
2346 skb->len + head_copy_len));
2347
2348 skb_copy_header(n, skb);
2349
2350 skb_headers_offset_update(n, newheadroom - oldheadroom);
2351
2352 return n;
2353 }
2354 EXPORT_SYMBOL(skb_copy_expand);
2355
2356 /**
2357 * __skb_pad - zero pad the tail of an skb
2358 * @skb: buffer to pad
2359 * @pad: space to pad
2360 * @free_on_error: free buffer on error
2361 *
2362 * Ensure that a buffer is followed by a padding area that is zero
2363 * filled. Used by network drivers which may DMA or transfer data
2364 * beyond the buffer end onto the wire.
2365 *
2366 * May return error in out of memory cases. The skb is freed on error
2367 * if @free_on_error is true.
2368 */
2369
__skb_pad(struct sk_buff * skb,int pad,bool free_on_error)2370 int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error)
2371 {
2372 int err;
2373 int ntail;
2374
2375 /* If the skbuff is non linear tailroom is always zero.. */
2376 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
2377 memset(skb->data+skb->len, 0, pad);
2378 return 0;
2379 }
2380
2381 ntail = skb->data_len + pad - (skb->end - skb->tail);
2382 if (likely(skb_cloned(skb) || ntail > 0)) {
2383 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
2384 if (unlikely(err))
2385 goto free_skb;
2386 }
2387
2388 /* FIXME: The use of this function with non-linear skb's really needs
2389 * to be audited.
2390 */
2391 err = skb_linearize(skb);
2392 if (unlikely(err))
2393 goto free_skb;
2394
2395 memset(skb->data + skb->len, 0, pad);
2396 return 0;
2397
2398 free_skb:
2399 if (free_on_error)
2400 kfree_skb(skb);
2401 return err;
2402 }
2403 EXPORT_SYMBOL(__skb_pad);
2404
2405 /**
2406 * pskb_put - add data to the tail of a potentially fragmented buffer
2407 * @skb: start of the buffer to use
2408 * @tail: tail fragment of the buffer to use
2409 * @len: amount of data to add
2410 *
2411 * This function extends the used data area of the potentially
2412 * fragmented buffer. @tail must be the last fragment of @skb -- or
2413 * @skb itself. If this would exceed the total buffer size the kernel
2414 * will panic. A pointer to the first byte of the extra data is
2415 * returned.
2416 */
2417
pskb_put(struct sk_buff * skb,struct sk_buff * tail,int len)2418 void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
2419 {
2420 if (tail != skb) {
2421 skb->data_len += len;
2422 skb->len += len;
2423 }
2424 return skb_put(tail, len);
2425 }
2426 EXPORT_SYMBOL_GPL(pskb_put);
2427
2428 /**
2429 * skb_put - add data to a buffer
2430 * @skb: buffer to use
2431 * @len: amount of data to add
2432 *
2433 * This function extends the used data area of the buffer. If this would
2434 * exceed the total buffer size the kernel will panic. A pointer to the
2435 * first byte of the extra data is returned.
2436 */
skb_put(struct sk_buff * skb,unsigned int len)2437 void *skb_put(struct sk_buff *skb, unsigned int len)
2438 {
2439 void *tmp = skb_tail_pointer(skb);
2440 SKB_LINEAR_ASSERT(skb);
2441 skb->tail += len;
2442 skb->len += len;
2443 if (unlikely(skb->tail > skb->end))
2444 skb_over_panic(skb, len, __builtin_return_address(0));
2445 return tmp;
2446 }
2447 EXPORT_SYMBOL(skb_put);
2448
2449 /**
2450 * skb_push - add data to the start of a buffer
2451 * @skb: buffer to use
2452 * @len: amount of data to add
2453 *
2454 * This function extends the used data area of the buffer at the buffer
2455 * start. If this would exceed the total buffer headroom the kernel will
2456 * panic. A pointer to the first byte of the extra data is returned.
2457 */
skb_push(struct sk_buff * skb,unsigned int len)2458 void *skb_push(struct sk_buff *skb, unsigned int len)
2459 {
2460 skb->data -= len;
2461 skb->len += len;
2462 if (unlikely(skb->data < skb->head))
2463 skb_under_panic(skb, len, __builtin_return_address(0));
2464 return skb->data;
2465 }
2466 EXPORT_SYMBOL(skb_push);
2467
2468 /**
2469 * skb_pull - remove data from the start of a buffer
2470 * @skb: buffer to use
2471 * @len: amount of data to remove
2472 *
2473 * This function removes data from the start of a buffer, returning
2474 * the memory to the headroom. A pointer to the next data in the buffer
2475 * is returned. Once the data has been pulled future pushes will overwrite
2476 * the old data.
2477 */
skb_pull(struct sk_buff * skb,unsigned int len)2478 void *skb_pull(struct sk_buff *skb, unsigned int len)
2479 {
2480 return skb_pull_inline(skb, len);
2481 }
2482 EXPORT_SYMBOL(skb_pull);
2483
2484 /**
2485 * skb_pull_data - remove data from the start of a buffer returning its
2486 * original position.
2487 * @skb: buffer to use
2488 * @len: amount of data to remove
2489 *
2490 * This function removes data from the start of a buffer, returning
2491 * the memory to the headroom. A pointer to the original data in the buffer
2492 * is returned after checking if there is enough data to pull. Once the
2493 * data has been pulled future pushes will overwrite the old data.
2494 */
skb_pull_data(struct sk_buff * skb,size_t len)2495 void *skb_pull_data(struct sk_buff *skb, size_t len)
2496 {
2497 void *data = skb->data;
2498
2499 if (skb->len < len)
2500 return NULL;
2501
2502 skb_pull(skb, len);
2503
2504 return data;
2505 }
2506 EXPORT_SYMBOL(skb_pull_data);
2507
2508 /**
2509 * skb_trim - remove end from a buffer
2510 * @skb: buffer to alter
2511 * @len: new length
2512 *
2513 * Cut the length of a buffer down by removing data from the tail. If
2514 * the buffer is already under the length specified it is not modified.
2515 * The skb must be linear.
2516 */
skb_trim(struct sk_buff * skb,unsigned int len)2517 void skb_trim(struct sk_buff *skb, unsigned int len)
2518 {
2519 if (skb->len > len)
2520 __skb_trim(skb, len);
2521 }
2522 EXPORT_SYMBOL(skb_trim);
2523
2524 /* Trims skb to length len. It can change skb pointers.
2525 */
2526
___pskb_trim(struct sk_buff * skb,unsigned int len)2527 int ___pskb_trim(struct sk_buff *skb, unsigned int len)
2528 {
2529 struct sk_buff **fragp;
2530 struct sk_buff *frag;
2531 int offset = skb_headlen(skb);
2532 int nfrags = skb_shinfo(skb)->nr_frags;
2533 int i;
2534 int err;
2535
2536 if (skb_cloned(skb) &&
2537 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
2538 return err;
2539
2540 i = 0;
2541 if (offset >= len)
2542 goto drop_pages;
2543
2544 for (; i < nfrags; i++) {
2545 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]);
2546
2547 if (end < len) {
2548 offset = end;
2549 continue;
2550 }
2551
2552 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset);
2553
2554 drop_pages:
2555 skb_shinfo(skb)->nr_frags = i;
2556
2557 for (; i < nfrags; i++)
2558 skb_frag_unref(skb, i);
2559
2560 if (skb_has_frag_list(skb))
2561 skb_drop_fraglist(skb);
2562 goto done;
2563 }
2564
2565 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
2566 fragp = &frag->next) {
2567 int end = offset + frag->len;
2568
2569 if (skb_shared(frag)) {
2570 struct sk_buff *nfrag;
2571
2572 nfrag = skb_clone(frag, GFP_ATOMIC);
2573 if (unlikely(!nfrag))
2574 return -ENOMEM;
2575
2576 nfrag->next = frag->next;
2577 consume_skb(frag);
2578 frag = nfrag;
2579 *fragp = frag;
2580 }
2581
2582 if (end < len) {
2583 offset = end;
2584 continue;
2585 }
2586
2587 if (end > len &&
2588 unlikely((err = pskb_trim(frag, len - offset))))
2589 return err;
2590
2591 if (frag->next)
2592 skb_drop_list(&frag->next);
2593 break;
2594 }
2595
2596 done:
2597 if (len > skb_headlen(skb)) {
2598 skb->data_len -= skb->len - len;
2599 skb->len = len;
2600 } else {
2601 skb->len = len;
2602 skb->data_len = 0;
2603 skb_set_tail_pointer(skb, len);
2604 }
2605
2606 if (!skb->sk || skb->destructor == sock_edemux)
2607 skb_condense(skb);
2608 return 0;
2609 }
2610 EXPORT_SYMBOL(___pskb_trim);
2611
2612 /* Note : use pskb_trim_rcsum() instead of calling this directly
2613 */
pskb_trim_rcsum_slow(struct sk_buff * skb,unsigned int len)2614 int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len)
2615 {
2616 if (skb->ip_summed == CHECKSUM_COMPLETE) {
2617 int delta = skb->len - len;
2618
2619 skb->csum = csum_block_sub(skb->csum,
2620 skb_checksum(skb, len, delta, 0),
2621 len);
2622 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
2623 int hdlen = (len > skb_headlen(skb)) ? skb_headlen(skb) : len;
2624 int offset = skb_checksum_start_offset(skb) + skb->csum_offset;
2625
2626 if (offset + sizeof(__sum16) > hdlen)
2627 return -EINVAL;
2628 }
2629 return __pskb_trim(skb, len);
2630 }
2631 EXPORT_SYMBOL(pskb_trim_rcsum_slow);
2632
2633 /**
2634 * __pskb_pull_tail - advance tail of skb header
2635 * @skb: buffer to reallocate
2636 * @delta: number of bytes to advance tail
2637 *
2638 * The function makes a sense only on a fragmented &sk_buff,
2639 * it expands header moving its tail forward and copying necessary
2640 * data from fragmented part.
2641 *
2642 * &sk_buff MUST have reference count of 1.
2643 *
2644 * Returns %NULL (and &sk_buff does not change) if pull failed
2645 * or value of new tail of skb in the case of success.
2646 *
2647 * All the pointers pointing into skb header may change and must be
2648 * reloaded after call to this function.
2649 */
2650
2651 /* Moves tail of skb head forward, copying data from fragmented part,
2652 * when it is necessary.
2653 * 1. It may fail due to malloc failure.
2654 * 2. It may change skb pointers.
2655 *
2656 * It is pretty complicated. Luckily, it is called only in exceptional cases.
2657 */
__pskb_pull_tail(struct sk_buff * skb,int delta)2658 void *__pskb_pull_tail(struct sk_buff *skb, int delta)
2659 {
2660 /* If skb has not enough free space at tail, get new one
2661 * plus 128 bytes for future expansions. If we have enough
2662 * room at tail, reallocate without expansion only if skb is cloned.
2663 */
2664 int i, k, eat = (skb->tail + delta) - skb->end;
2665
2666 if (eat > 0 || skb_cloned(skb)) {
2667 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
2668 GFP_ATOMIC))
2669 return NULL;
2670 }
2671
2672 BUG_ON(skb_copy_bits(skb, skb_headlen(skb),
2673 skb_tail_pointer(skb), delta));
2674
2675 /* Optimization: no fragments, no reasons to preestimate
2676 * size of pulled pages. Superb.
2677 */
2678 if (!skb_has_frag_list(skb))
2679 goto pull_pages;
2680
2681 /* Estimate size of pulled pages. */
2682 eat = delta;
2683 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2684 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2685
2686 if (size >= eat)
2687 goto pull_pages;
2688 eat -= size;
2689 }
2690
2691 /* If we need update frag list, we are in troubles.
2692 * Certainly, it is possible to add an offset to skb data,
2693 * but taking into account that pulling is expected to
2694 * be very rare operation, it is worth to fight against
2695 * further bloating skb head and crucify ourselves here instead.
2696 * Pure masohism, indeed. 8)8)
2697 */
2698 if (eat) {
2699 struct sk_buff *list = skb_shinfo(skb)->frag_list;
2700 struct sk_buff *clone = NULL;
2701 struct sk_buff *insp = NULL;
2702
2703 do {
2704 if (list->len <= eat) {
2705 /* Eaten as whole. */
2706 eat -= list->len;
2707 list = list->next;
2708 insp = list;
2709 } else {
2710 /* Eaten partially. */
2711 if (skb_is_gso(skb) && !list->head_frag &&
2712 skb_headlen(list))
2713 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
2714
2715 if (skb_shared(list)) {
2716 /* Sucks! We need to fork list. :-( */
2717 clone = skb_clone(list, GFP_ATOMIC);
2718 if (!clone)
2719 return NULL;
2720 insp = list->next;
2721 list = clone;
2722 } else {
2723 /* This may be pulled without
2724 * problems. */
2725 insp = list;
2726 }
2727 if (!pskb_pull(list, eat)) {
2728 kfree_skb(clone);
2729 return NULL;
2730 }
2731 break;
2732 }
2733 } while (eat);
2734
2735 /* Free pulled out fragments. */
2736 while ((list = skb_shinfo(skb)->frag_list) != insp) {
2737 skb_shinfo(skb)->frag_list = list->next;
2738 consume_skb(list);
2739 }
2740 /* And insert new clone at head. */
2741 if (clone) {
2742 clone->next = list;
2743 skb_shinfo(skb)->frag_list = clone;
2744 }
2745 }
2746 /* Success! Now we may commit changes to skb data. */
2747
2748 pull_pages:
2749 eat = delta;
2750 k = 0;
2751 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2752 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2753
2754 if (size <= eat) {
2755 skb_frag_unref(skb, i);
2756 eat -= size;
2757 } else {
2758 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2759
2760 *frag = skb_shinfo(skb)->frags[i];
2761 if (eat) {
2762 skb_frag_off_add(frag, eat);
2763 skb_frag_size_sub(frag, eat);
2764 if (!i)
2765 goto end;
2766 eat = 0;
2767 }
2768 k++;
2769 }
2770 }
2771 skb_shinfo(skb)->nr_frags = k;
2772
2773 end:
2774 skb->tail += delta;
2775 skb->data_len -= delta;
2776
2777 if (!skb->data_len)
2778 skb_zcopy_clear(skb, false);
2779
2780 return skb_tail_pointer(skb);
2781 }
2782 EXPORT_SYMBOL(__pskb_pull_tail);
2783
2784 /**
2785 * skb_copy_bits - copy bits from skb to kernel buffer
2786 * @skb: source skb
2787 * @offset: offset in source
2788 * @to: destination buffer
2789 * @len: number of bytes to copy
2790 *
2791 * Copy the specified number of bytes from the source skb to the
2792 * destination buffer.
2793 *
2794 * CAUTION ! :
2795 * If its prototype is ever changed,
2796 * check arch/{*}/net/{*}.S files,
2797 * since it is called from BPF assembly code.
2798 */
skb_copy_bits(const struct sk_buff * skb,int offset,void * to,int len)2799 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
2800 {
2801 int start = skb_headlen(skb);
2802 struct sk_buff *frag_iter;
2803 int i, copy;
2804
2805 if (offset > (int)skb->len - len)
2806 goto fault;
2807
2808 /* Copy header. */
2809 if ((copy = start - offset) > 0) {
2810 if (copy > len)
2811 copy = len;
2812 skb_copy_from_linear_data_offset(skb, offset, to, copy);
2813 if ((len -= copy) == 0)
2814 return 0;
2815 offset += copy;
2816 to += copy;
2817 }
2818
2819 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2820 int end;
2821 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
2822
2823 WARN_ON(start > offset + len);
2824
2825 end = start + skb_frag_size(f);
2826 if ((copy = end - offset) > 0) {
2827 u32 p_off, p_len, copied;
2828 struct page *p;
2829 u8 *vaddr;
2830
2831 if (copy > len)
2832 copy = len;
2833
2834 skb_frag_foreach_page(f,
2835 skb_frag_off(f) + offset - start,
2836 copy, p, p_off, p_len, copied) {
2837 vaddr = kmap_atomic(p);
2838 memcpy(to + copied, vaddr + p_off, p_len);
2839 kunmap_atomic(vaddr);
2840 }
2841
2842 if ((len -= copy) == 0)
2843 return 0;
2844 offset += copy;
2845 to += copy;
2846 }
2847 start = end;
2848 }
2849
2850 skb_walk_frags(skb, frag_iter) {
2851 int end;
2852
2853 WARN_ON(start > offset + len);
2854
2855 end = start + frag_iter->len;
2856 if ((copy = end - offset) > 0) {
2857 if (copy > len)
2858 copy = len;
2859 if (skb_copy_bits(frag_iter, offset - start, to, copy))
2860 goto fault;
2861 if ((len -= copy) == 0)
2862 return 0;
2863 offset += copy;
2864 to += copy;
2865 }
2866 start = end;
2867 }
2868
2869 if (!len)
2870 return 0;
2871
2872 fault:
2873 return -EFAULT;
2874 }
2875 EXPORT_SYMBOL(skb_copy_bits);
2876
2877 /*
2878 * Callback from splice_to_pipe(), if we need to release some pages
2879 * at the end of the spd in case we error'ed out in filling the pipe.
2880 */
sock_spd_release(struct splice_pipe_desc * spd,unsigned int i)2881 static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
2882 {
2883 put_page(spd->pages[i]);
2884 }
2885
linear_to_page(struct page * page,unsigned int * len,unsigned int * offset,struct sock * sk)2886 static struct page *linear_to_page(struct page *page, unsigned int *len,
2887 unsigned int *offset,
2888 struct sock *sk)
2889 {
2890 struct page_frag *pfrag = sk_page_frag(sk);
2891
2892 if (!sk_page_frag_refill(sk, pfrag))
2893 return NULL;
2894
2895 *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset);
2896
2897 memcpy(page_address(pfrag->page) + pfrag->offset,
2898 page_address(page) + *offset, *len);
2899 *offset = pfrag->offset;
2900 pfrag->offset += *len;
2901
2902 return pfrag->page;
2903 }
2904
spd_can_coalesce(const struct splice_pipe_desc * spd,struct page * page,unsigned int offset)2905 static bool spd_can_coalesce(const struct splice_pipe_desc *spd,
2906 struct page *page,
2907 unsigned int offset)
2908 {
2909 return spd->nr_pages &&
2910 spd->pages[spd->nr_pages - 1] == page &&
2911 (spd->partial[spd->nr_pages - 1].offset +
2912 spd->partial[spd->nr_pages - 1].len == offset);
2913 }
2914
2915 /*
2916 * Fill page/offset/length into spd, if it can hold more pages.
2917 */
spd_fill_page(struct splice_pipe_desc * spd,struct pipe_inode_info * pipe,struct page * page,unsigned int * len,unsigned int offset,bool linear,struct sock * sk)2918 static bool spd_fill_page(struct splice_pipe_desc *spd,
2919 struct pipe_inode_info *pipe, struct page *page,
2920 unsigned int *len, unsigned int offset,
2921 bool linear,
2922 struct sock *sk)
2923 {
2924 if (unlikely(spd->nr_pages == MAX_SKB_FRAGS))
2925 return true;
2926
2927 if (linear) {
2928 page = linear_to_page(page, len, &offset, sk);
2929 if (!page)
2930 return true;
2931 }
2932 if (spd_can_coalesce(spd, page, offset)) {
2933 spd->partial[spd->nr_pages - 1].len += *len;
2934 return false;
2935 }
2936 get_page(page);
2937 spd->pages[spd->nr_pages] = page;
2938 spd->partial[spd->nr_pages].len = *len;
2939 spd->partial[spd->nr_pages].offset = offset;
2940 spd->nr_pages++;
2941
2942 return false;
2943 }
2944
__splice_segment(struct page * page,unsigned int poff,unsigned int plen,unsigned int * off,unsigned int * len,struct splice_pipe_desc * spd,bool linear,struct sock * sk,struct pipe_inode_info * pipe)2945 static bool __splice_segment(struct page *page, unsigned int poff,
2946 unsigned int plen, unsigned int *off,
2947 unsigned int *len,
2948 struct splice_pipe_desc *spd, bool linear,
2949 struct sock *sk,
2950 struct pipe_inode_info *pipe)
2951 {
2952 if (!*len)
2953 return true;
2954
2955 /* skip this segment if already processed */
2956 if (*off >= plen) {
2957 *off -= plen;
2958 return false;
2959 }
2960
2961 /* ignore any bits we already processed */
2962 poff += *off;
2963 plen -= *off;
2964 *off = 0;
2965
2966 do {
2967 unsigned int flen = min(*len, plen);
2968
2969 if (spd_fill_page(spd, pipe, page, &flen, poff,
2970 linear, sk))
2971 return true;
2972 poff += flen;
2973 plen -= flen;
2974 *len -= flen;
2975 } while (*len && plen);
2976
2977 return false;
2978 }
2979
2980 /*
2981 * Map linear and fragment data from the skb to spd. It reports true if the
2982 * pipe is full or if we already spliced the requested length.
2983 */
__skb_splice_bits(struct sk_buff * skb,struct pipe_inode_info * pipe,unsigned int * offset,unsigned int * len,struct splice_pipe_desc * spd,struct sock * sk)2984 static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
2985 unsigned int *offset, unsigned int *len,
2986 struct splice_pipe_desc *spd, struct sock *sk)
2987 {
2988 int seg;
2989 struct sk_buff *iter;
2990
2991 /* map the linear part :
2992 * If skb->head_frag is set, this 'linear' part is backed by a
2993 * fragment, and if the head is not shared with any clones then
2994 * we can avoid a copy since we own the head portion of this page.
2995 */
2996 if (__splice_segment(virt_to_page(skb->data),
2997 (unsigned long) skb->data & (PAGE_SIZE - 1),
2998 skb_headlen(skb),
2999 offset, len, spd,
3000 skb_head_is_locked(skb),
3001 sk, pipe))
3002 return true;
3003
3004 /*
3005 * then map the fragments
3006 */
3007 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
3008 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
3009
3010 if (__splice_segment(skb_frag_page(f),
3011 skb_frag_off(f), skb_frag_size(f),
3012 offset, len, spd, false, sk, pipe))
3013 return true;
3014 }
3015
3016 skb_walk_frags(skb, iter) {
3017 if (*offset >= iter->len) {
3018 *offset -= iter->len;
3019 continue;
3020 }
3021 /* __skb_splice_bits() only fails if the output has no room
3022 * left, so no point in going over the frag_list for the error
3023 * case.
3024 */
3025 if (__skb_splice_bits(iter, pipe, offset, len, spd, sk))
3026 return true;
3027 }
3028
3029 return false;
3030 }
3031
3032 /*
3033 * Map data from the skb to a pipe. Should handle both the linear part,
3034 * the fragments, and the frag list.
3035 */
skb_splice_bits(struct sk_buff * skb,struct sock * sk,unsigned int offset,struct pipe_inode_info * pipe,unsigned int tlen,unsigned int flags)3036 int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
3037 struct pipe_inode_info *pipe, unsigned int tlen,
3038 unsigned int flags)
3039 {
3040 struct partial_page partial[MAX_SKB_FRAGS];
3041 struct page *pages[MAX_SKB_FRAGS];
3042 struct splice_pipe_desc spd = {
3043 .pages = pages,
3044 .partial = partial,
3045 .nr_pages_max = MAX_SKB_FRAGS,
3046 .ops = &nosteal_pipe_buf_ops,
3047 .spd_release = sock_spd_release,
3048 };
3049 int ret = 0;
3050
3051 __skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk);
3052
3053 if (spd.nr_pages)
3054 ret = splice_to_pipe(pipe, &spd);
3055
3056 return ret;
3057 }
3058 EXPORT_SYMBOL_GPL(skb_splice_bits);
3059
sendmsg_locked(struct sock * sk,struct msghdr * msg)3060 static int sendmsg_locked(struct sock *sk, struct msghdr *msg)
3061 {
3062 struct socket *sock = sk->sk_socket;
3063 size_t size = msg_data_left(msg);
3064
3065 if (!sock)
3066 return -EINVAL;
3067
3068 if (!sock->ops->sendmsg_locked)
3069 return sock_no_sendmsg_locked(sk, msg, size);
3070
3071 return sock->ops->sendmsg_locked(sk, msg, size);
3072 }
3073
sendmsg_unlocked(struct sock * sk,struct msghdr * msg)3074 static int sendmsg_unlocked(struct sock *sk, struct msghdr *msg)
3075 {
3076 struct socket *sock = sk->sk_socket;
3077
3078 if (!sock)
3079 return -EINVAL;
3080 return sock_sendmsg(sock, msg);
3081 }
3082
3083 typedef int (*sendmsg_func)(struct sock *sk, struct msghdr *msg);
__skb_send_sock(struct sock * sk,struct sk_buff * skb,int offset,int len,sendmsg_func sendmsg)3084 static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset,
3085 int len, sendmsg_func sendmsg)
3086 {
3087 unsigned int orig_len = len;
3088 struct sk_buff *head = skb;
3089 unsigned short fragidx;
3090 int slen, ret;
3091
3092 do_frag_list:
3093
3094 /* Deal with head data */
3095 while (offset < skb_headlen(skb) && len) {
3096 struct kvec kv;
3097 struct msghdr msg;
3098
3099 slen = min_t(int, len, skb_headlen(skb) - offset);
3100 kv.iov_base = skb->data + offset;
3101 kv.iov_len = slen;
3102 memset(&msg, 0, sizeof(msg));
3103 msg.msg_flags = MSG_DONTWAIT;
3104
3105 iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &kv, 1, slen);
3106 ret = INDIRECT_CALL_2(sendmsg, sendmsg_locked,
3107 sendmsg_unlocked, sk, &msg);
3108 if (ret <= 0)
3109 goto error;
3110
3111 offset += ret;
3112 len -= ret;
3113 }
3114
3115 /* All the data was skb head? */
3116 if (!len)
3117 goto out;
3118
3119 /* Make offset relative to start of frags */
3120 offset -= skb_headlen(skb);
3121
3122 /* Find where we are in frag list */
3123 for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
3124 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx];
3125
3126 if (offset < skb_frag_size(frag))
3127 break;
3128
3129 offset -= skb_frag_size(frag);
3130 }
3131
3132 for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
3133 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx];
3134
3135 slen = min_t(size_t, len, skb_frag_size(frag) - offset);
3136
3137 while (slen) {
3138 struct bio_vec bvec;
3139 struct msghdr msg = {
3140 .msg_flags = MSG_SPLICE_PAGES | MSG_DONTWAIT,
3141 };
3142
3143 bvec_set_page(&bvec, skb_frag_page(frag), slen,
3144 skb_frag_off(frag) + offset);
3145 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1,
3146 slen);
3147
3148 ret = INDIRECT_CALL_2(sendmsg, sendmsg_locked,
3149 sendmsg_unlocked, sk, &msg);
3150 if (ret <= 0)
3151 goto error;
3152
3153 len -= ret;
3154 offset += ret;
3155 slen -= ret;
3156 }
3157
3158 offset = 0;
3159 }
3160
3161 if (len) {
3162 /* Process any frag lists */
3163
3164 if (skb == head) {
3165 if (skb_has_frag_list(skb)) {
3166 skb = skb_shinfo(skb)->frag_list;
3167 goto do_frag_list;
3168 }
3169 } else if (skb->next) {
3170 skb = skb->next;
3171 goto do_frag_list;
3172 }
3173 }
3174
3175 out:
3176 return orig_len - len;
3177
3178 error:
3179 return orig_len == len ? ret : orig_len - len;
3180 }
3181
3182 /* Send skb data on a socket. Socket must be locked. */
skb_send_sock_locked(struct sock * sk,struct sk_buff * skb,int offset,int len)3183 int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
3184 int len)
3185 {
3186 return __skb_send_sock(sk, skb, offset, len, sendmsg_locked);
3187 }
3188 EXPORT_SYMBOL_GPL(skb_send_sock_locked);
3189
3190 /* Send skb data on a socket. Socket must be unlocked. */
skb_send_sock(struct sock * sk,struct sk_buff * skb,int offset,int len)3191 int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len)
3192 {
3193 return __skb_send_sock(sk, skb, offset, len, sendmsg_unlocked);
3194 }
3195
3196 /**
3197 * skb_store_bits - store bits from kernel buffer to skb
3198 * @skb: destination buffer
3199 * @offset: offset in destination
3200 * @from: source buffer
3201 * @len: number of bytes to copy
3202 *
3203 * Copy the specified number of bytes from the source buffer to the
3204 * destination skb. This function handles all the messy bits of
3205 * traversing fragment lists and such.
3206 */
3207
skb_store_bits(struct sk_buff * skb,int offset,const void * from,int len)3208 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
3209 {
3210 int start = skb_headlen(skb);
3211 struct sk_buff *frag_iter;
3212 int i, copy;
3213
3214 if (offset > (int)skb->len - len)
3215 goto fault;
3216
3217 if ((copy = start - offset) > 0) {
3218 if (copy > len)
3219 copy = len;
3220 skb_copy_to_linear_data_offset(skb, offset, from, copy);
3221 if ((len -= copy) == 0)
3222 return 0;
3223 offset += copy;
3224 from += copy;
3225 }
3226
3227 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3228 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3229 int end;
3230
3231 WARN_ON(start > offset + len);
3232
3233 end = start + skb_frag_size(frag);
3234 if ((copy = end - offset) > 0) {
3235 u32 p_off, p_len, copied;
3236 struct page *p;
3237 u8 *vaddr;
3238
3239 if (copy > len)
3240 copy = len;
3241
3242 skb_frag_foreach_page(frag,
3243 skb_frag_off(frag) + offset - start,
3244 copy, p, p_off, p_len, copied) {
3245 vaddr = kmap_atomic(p);
3246 memcpy(vaddr + p_off, from + copied, p_len);
3247 kunmap_atomic(vaddr);
3248 }
3249
3250 if ((len -= copy) == 0)
3251 return 0;
3252 offset += copy;
3253 from += copy;
3254 }
3255 start = end;
3256 }
3257
3258 skb_walk_frags(skb, frag_iter) {
3259 int end;
3260
3261 WARN_ON(start > offset + len);
3262
3263 end = start + frag_iter->len;
3264 if ((copy = end - offset) > 0) {
3265 if (copy > len)
3266 copy = len;
3267 if (skb_store_bits(frag_iter, offset - start,
3268 from, copy))
3269 goto fault;
3270 if ((len -= copy) == 0)
3271 return 0;
3272 offset += copy;
3273 from += copy;
3274 }
3275 start = end;
3276 }
3277 if (!len)
3278 return 0;
3279
3280 fault:
3281 return -EFAULT;
3282 }
3283 EXPORT_SYMBOL(skb_store_bits);
3284
3285 /* Checksum skb data. */
__skb_checksum(const struct sk_buff * skb,int offset,int len,__wsum csum,const struct skb_checksum_ops * ops)3286 __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
3287 __wsum csum, const struct skb_checksum_ops *ops)
3288 {
3289 int start = skb_headlen(skb);
3290 int i, copy = start - offset;
3291 struct sk_buff *frag_iter;
3292 int pos = 0;
3293
3294 /* Checksum header. */
3295 if (copy > 0) {
3296 if (copy > len)
3297 copy = len;
3298 csum = INDIRECT_CALL_1(ops->update, csum_partial_ext,
3299 skb->data + offset, copy, csum);
3300 if ((len -= copy) == 0)
3301 return csum;
3302 offset += copy;
3303 pos = copy;
3304 }
3305
3306 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3307 int end;
3308 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3309
3310 WARN_ON(start > offset + len);
3311
3312 end = start + skb_frag_size(frag);
3313 if ((copy = end - offset) > 0) {
3314 u32 p_off, p_len, copied;
3315 struct page *p;
3316 __wsum csum2;
3317 u8 *vaddr;
3318
3319 if (copy > len)
3320 copy = len;
3321
3322 skb_frag_foreach_page(frag,
3323 skb_frag_off(frag) + offset - start,
3324 copy, p, p_off, p_len, copied) {
3325 vaddr = kmap_atomic(p);
3326 csum2 = INDIRECT_CALL_1(ops->update,
3327 csum_partial_ext,
3328 vaddr + p_off, p_len, 0);
3329 kunmap_atomic(vaddr);
3330 csum = INDIRECT_CALL_1(ops->combine,
3331 csum_block_add_ext, csum,
3332 csum2, pos, p_len);
3333 pos += p_len;
3334 }
3335
3336 if (!(len -= copy))
3337 return csum;
3338 offset += copy;
3339 }
3340 start = end;
3341 }
3342
3343 skb_walk_frags(skb, frag_iter) {
3344 int end;
3345
3346 WARN_ON(start > offset + len);
3347
3348 end = start + frag_iter->len;
3349 if ((copy = end - offset) > 0) {
3350 __wsum csum2;
3351 if (copy > len)
3352 copy = len;
3353 csum2 = __skb_checksum(frag_iter, offset - start,
3354 copy, 0, ops);
3355 csum = INDIRECT_CALL_1(ops->combine, csum_block_add_ext,
3356 csum, csum2, pos, copy);
3357 if ((len -= copy) == 0)
3358 return csum;
3359 offset += copy;
3360 pos += copy;
3361 }
3362 start = end;
3363 }
3364 BUG_ON(len);
3365
3366 return csum;
3367 }
3368 EXPORT_SYMBOL(__skb_checksum);
3369
skb_checksum(const struct sk_buff * skb,int offset,int len,__wsum csum)3370 __wsum skb_checksum(const struct sk_buff *skb, int offset,
3371 int len, __wsum csum)
3372 {
3373 const struct skb_checksum_ops ops = {
3374 .update = csum_partial_ext,
3375 .combine = csum_block_add_ext,
3376 };
3377
3378 return __skb_checksum(skb, offset, len, csum, &ops);
3379 }
3380 EXPORT_SYMBOL(skb_checksum);
3381
3382 /* Both of above in one bottle. */
3383
skb_copy_and_csum_bits(const struct sk_buff * skb,int offset,u8 * to,int len)3384 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
3385 u8 *to, int len)
3386 {
3387 int start = skb_headlen(skb);
3388 int i, copy = start - offset;
3389 struct sk_buff *frag_iter;
3390 int pos = 0;
3391 __wsum csum = 0;
3392
3393 /* Copy header. */
3394 if (copy > 0) {
3395 if (copy > len)
3396 copy = len;
3397 csum = csum_partial_copy_nocheck(skb->data + offset, to,
3398 copy);
3399 if ((len -= copy) == 0)
3400 return csum;
3401 offset += copy;
3402 to += copy;
3403 pos = copy;
3404 }
3405
3406 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3407 int end;
3408
3409 WARN_ON(start > offset + len);
3410
3411 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
3412 if ((copy = end - offset) > 0) {
3413 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3414 u32 p_off, p_len, copied;
3415 struct page *p;
3416 __wsum csum2;
3417 u8 *vaddr;
3418
3419 if (copy > len)
3420 copy = len;
3421
3422 skb_frag_foreach_page(frag,
3423 skb_frag_off(frag) + offset - start,
3424 copy, p, p_off, p_len, copied) {
3425 vaddr = kmap_atomic(p);
3426 csum2 = csum_partial_copy_nocheck(vaddr + p_off,
3427 to + copied,
3428 p_len);
3429 kunmap_atomic(vaddr);
3430 csum = csum_block_add(csum, csum2, pos);
3431 pos += p_len;
3432 }
3433
3434 if (!(len -= copy))
3435 return csum;
3436 offset += copy;
3437 to += copy;
3438 }
3439 start = end;
3440 }
3441
3442 skb_walk_frags(skb, frag_iter) {
3443 __wsum csum2;
3444 int end;
3445
3446 WARN_ON(start > offset + len);
3447
3448 end = start + frag_iter->len;
3449 if ((copy = end - offset) > 0) {
3450 if (copy > len)
3451 copy = len;
3452 csum2 = skb_copy_and_csum_bits(frag_iter,
3453 offset - start,
3454 to, copy);
3455 csum = csum_block_add(csum, csum2, pos);
3456 if ((len -= copy) == 0)
3457 return csum;
3458 offset += copy;
3459 to += copy;
3460 pos += copy;
3461 }
3462 start = end;
3463 }
3464 BUG_ON(len);
3465 return csum;
3466 }
3467 EXPORT_SYMBOL(skb_copy_and_csum_bits);
3468
__skb_checksum_complete_head(struct sk_buff * skb,int len)3469 __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
3470 {
3471 __sum16 sum;
3472
3473 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum));
3474 /* See comments in __skb_checksum_complete(). */
3475 if (likely(!sum)) {
3476 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
3477 !skb->csum_complete_sw)
3478 netdev_rx_csum_fault(skb->dev, skb);
3479 }
3480 if (!skb_shared(skb))
3481 skb->csum_valid = !sum;
3482 return sum;
3483 }
3484 EXPORT_SYMBOL(__skb_checksum_complete_head);
3485
3486 /* This function assumes skb->csum already holds pseudo header's checksum,
3487 * which has been changed from the hardware checksum, for example, by
3488 * __skb_checksum_validate_complete(). And, the original skb->csum must
3489 * have been validated unsuccessfully for CHECKSUM_COMPLETE case.
3490 *
3491 * It returns non-zero if the recomputed checksum is still invalid, otherwise
3492 * zero. The new checksum is stored back into skb->csum unless the skb is
3493 * shared.
3494 */
__skb_checksum_complete(struct sk_buff * skb)3495 __sum16 __skb_checksum_complete(struct sk_buff *skb)
3496 {
3497 __wsum csum;
3498 __sum16 sum;
3499
3500 csum = skb_checksum(skb, 0, skb->len, 0);
3501
3502 sum = csum_fold(csum_add(skb->csum, csum));
3503 /* This check is inverted, because we already knew the hardware
3504 * checksum is invalid before calling this function. So, if the
3505 * re-computed checksum is valid instead, then we have a mismatch
3506 * between the original skb->csum and skb_checksum(). This means either
3507 * the original hardware checksum is incorrect or we screw up skb->csum
3508 * when moving skb->data around.
3509 */
3510 if (likely(!sum)) {
3511 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
3512 !skb->csum_complete_sw)
3513 netdev_rx_csum_fault(skb->dev, skb);
3514 }
3515
3516 if (!skb_shared(skb)) {
3517 /* Save full packet checksum */
3518 skb->csum = csum;
3519 skb->ip_summed = CHECKSUM_COMPLETE;
3520 skb->csum_complete_sw = 1;
3521 skb->csum_valid = !sum;
3522 }
3523
3524 return sum;
3525 }
3526 EXPORT_SYMBOL(__skb_checksum_complete);
3527
warn_crc32c_csum_update(const void * buff,int len,__wsum sum)3528 static __wsum warn_crc32c_csum_update(const void *buff, int len, __wsum sum)
3529 {
3530 net_warn_ratelimited(
3531 "%s: attempt to compute crc32c without libcrc32c.ko\n",
3532 __func__);
3533 return 0;
3534 }
3535
warn_crc32c_csum_combine(__wsum csum,__wsum csum2,int offset,int len)3536 static __wsum warn_crc32c_csum_combine(__wsum csum, __wsum csum2,
3537 int offset, int len)
3538 {
3539 net_warn_ratelimited(
3540 "%s: attempt to compute crc32c without libcrc32c.ko\n",
3541 __func__);
3542 return 0;
3543 }
3544
3545 static const struct skb_checksum_ops default_crc32c_ops = {
3546 .update = warn_crc32c_csum_update,
3547 .combine = warn_crc32c_csum_combine,
3548 };
3549
3550 const struct skb_checksum_ops *crc32c_csum_stub __read_mostly =
3551 &default_crc32c_ops;
3552 EXPORT_SYMBOL(crc32c_csum_stub);
3553
3554 /**
3555 * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy()
3556 * @from: source buffer
3557 *
3558 * Calculates the amount of linear headroom needed in the 'to' skb passed
3559 * into skb_zerocopy().
3560 */
3561 unsigned int
skb_zerocopy_headlen(const struct sk_buff * from)3562 skb_zerocopy_headlen(const struct sk_buff *from)
3563 {
3564 unsigned int hlen = 0;
3565
3566 if (!from->head_frag ||
3567 skb_headlen(from) < L1_CACHE_BYTES ||
3568 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) {
3569 hlen = skb_headlen(from);
3570 if (!hlen)
3571 hlen = from->len;
3572 }
3573
3574 if (skb_has_frag_list(from))
3575 hlen = from->len;
3576
3577 return hlen;
3578 }
3579 EXPORT_SYMBOL_GPL(skb_zerocopy_headlen);
3580
3581 /**
3582 * skb_zerocopy - Zero copy skb to skb
3583 * @to: destination buffer
3584 * @from: source buffer
3585 * @len: number of bytes to copy from source buffer
3586 * @hlen: size of linear headroom in destination buffer
3587 *
3588 * Copies up to `len` bytes from `from` to `to` by creating references
3589 * to the frags in the source buffer.
3590 *
3591 * The `hlen` as calculated by skb_zerocopy_headlen() specifies the
3592 * headroom in the `to` buffer.
3593 *
3594 * Return value:
3595 * 0: everything is OK
3596 * -ENOMEM: couldn't orphan frags of @from due to lack of memory
3597 * -EFAULT: skb_copy_bits() found some problem with skb geometry
3598 */
3599 int
skb_zerocopy(struct sk_buff * to,struct sk_buff * from,int len,int hlen)3600 skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
3601 {
3602 int i, j = 0;
3603 int plen = 0; /* length of skb->head fragment */
3604 int ret;
3605 struct page *page;
3606 unsigned int offset;
3607
3608 BUG_ON(!from->head_frag && !hlen);
3609
3610 /* dont bother with small payloads */
3611 if (len <= skb_tailroom(to))
3612 return skb_copy_bits(from, 0, skb_put(to, len), len);
3613
3614 if (hlen) {
3615 ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
3616 if (unlikely(ret))
3617 return ret;
3618 len -= hlen;
3619 } else {
3620 plen = min_t(int, skb_headlen(from), len);
3621 if (plen) {
3622 page = virt_to_head_page(from->head);
3623 offset = from->data - (unsigned char *)page_address(page);
3624 __skb_fill_page_desc(to, 0, page, offset, plen);
3625 get_page(page);
3626 j = 1;
3627 len -= plen;
3628 }
3629 }
3630
3631 skb_len_add(to, len + plen);
3632
3633 if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) {
3634 skb_tx_error(from);
3635 return -ENOMEM;
3636 }
3637 skb_zerocopy_clone(to, from, GFP_ATOMIC);
3638
3639 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
3640 int size;
3641
3642 if (!len)
3643 break;
3644 skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i];
3645 size = min_t(int, skb_frag_size(&skb_shinfo(to)->frags[j]),
3646 len);
3647 skb_frag_size_set(&skb_shinfo(to)->frags[j], size);
3648 len -= size;
3649 skb_frag_ref(to, j);
3650 j++;
3651 }
3652 skb_shinfo(to)->nr_frags = j;
3653
3654 return 0;
3655 }
3656 EXPORT_SYMBOL_GPL(skb_zerocopy);
3657
skb_copy_and_csum_dev(const struct sk_buff * skb,u8 * to)3658 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
3659 {
3660 __wsum csum;
3661 long csstart;
3662
3663 if (skb->ip_summed == CHECKSUM_PARTIAL)
3664 csstart = skb_checksum_start_offset(skb);
3665 else
3666 csstart = skb_headlen(skb);
3667
3668 BUG_ON(csstart > skb_headlen(skb));
3669
3670 skb_copy_from_linear_data(skb, to, csstart);
3671
3672 csum = 0;
3673 if (csstart != skb->len)
3674 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
3675 skb->len - csstart);
3676
3677 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3678 long csstuff = csstart + skb->csum_offset;
3679
3680 *((__sum16 *)(to + csstuff)) = csum_fold(csum);
3681 }
3682 }
3683 EXPORT_SYMBOL(skb_copy_and_csum_dev);
3684
3685 /**
3686 * skb_dequeue - remove from the head of the queue
3687 * @list: list to dequeue from
3688 *
3689 * Remove the head of the list. The list lock is taken so the function
3690 * may be used safely with other locking list functions. The head item is
3691 * returned or %NULL if the list is empty.
3692 */
3693
skb_dequeue(struct sk_buff_head * list)3694 struct sk_buff *skb_dequeue(struct sk_buff_head *list)
3695 {
3696 unsigned long flags;
3697 struct sk_buff *result;
3698
3699 spin_lock_irqsave(&list->lock, flags);
3700 result = __skb_dequeue(list);
3701 spin_unlock_irqrestore(&list->lock, flags);
3702 return result;
3703 }
3704 EXPORT_SYMBOL(skb_dequeue);
3705
3706 /**
3707 * skb_dequeue_tail - remove from the tail of the queue
3708 * @list: list to dequeue from
3709 *
3710 * Remove the tail of the list. The list lock is taken so the function
3711 * may be used safely with other locking list functions. The tail item is
3712 * returned or %NULL if the list is empty.
3713 */
skb_dequeue_tail(struct sk_buff_head * list)3714 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
3715 {
3716 unsigned long flags;
3717 struct sk_buff *result;
3718
3719 spin_lock_irqsave(&list->lock, flags);
3720 result = __skb_dequeue_tail(list);
3721 spin_unlock_irqrestore(&list->lock, flags);
3722 return result;
3723 }
3724 EXPORT_SYMBOL(skb_dequeue_tail);
3725
3726 /**
3727 * skb_queue_purge_reason - empty a list
3728 * @list: list to empty
3729 * @reason: drop reason
3730 *
3731 * Delete all buffers on an &sk_buff list. Each buffer is removed from
3732 * the list and one reference dropped. This function takes the list
3733 * lock and is atomic with respect to other list locking functions.
3734 */
skb_queue_purge_reason(struct sk_buff_head * list,enum skb_drop_reason reason)3735 void skb_queue_purge_reason(struct sk_buff_head *list,
3736 enum skb_drop_reason reason)
3737 {
3738 struct sk_buff *skb;
3739
3740 while ((skb = skb_dequeue(list)) != NULL)
3741 kfree_skb_reason(skb, reason);
3742 }
3743 EXPORT_SYMBOL(skb_queue_purge_reason);
3744
3745 /**
3746 * skb_rbtree_purge - empty a skb rbtree
3747 * @root: root of the rbtree to empty
3748 * Return value: the sum of truesizes of all purged skbs.
3749 *
3750 * Delete all buffers on an &sk_buff rbtree. Each buffer is removed from
3751 * the list and one reference dropped. This function does not take
3752 * any lock. Synchronization should be handled by the caller (e.g., TCP
3753 * out-of-order queue is protected by the socket lock).
3754 */
skb_rbtree_purge(struct rb_root * root)3755 unsigned int skb_rbtree_purge(struct rb_root *root)
3756 {
3757 struct rb_node *p = rb_first(root);
3758 unsigned int sum = 0;
3759
3760 while (p) {
3761 struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
3762
3763 p = rb_next(p);
3764 rb_erase(&skb->rbnode, root);
3765 sum += skb->truesize;
3766 kfree_skb(skb);
3767 }
3768 return sum;
3769 }
3770
skb_errqueue_purge(struct sk_buff_head * list)3771 void skb_errqueue_purge(struct sk_buff_head *list)
3772 {
3773 struct sk_buff *skb, *next;
3774 struct sk_buff_head kill;
3775 unsigned long flags;
3776
3777 __skb_queue_head_init(&kill);
3778
3779 spin_lock_irqsave(&list->lock, flags);
3780 skb_queue_walk_safe(list, skb, next) {
3781 if (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ZEROCOPY ||
3782 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING)
3783 continue;
3784 __skb_unlink(skb, list);
3785 __skb_queue_tail(&kill, skb);
3786 }
3787 spin_unlock_irqrestore(&list->lock, flags);
3788 __skb_queue_purge(&kill);
3789 }
3790 EXPORT_SYMBOL(skb_errqueue_purge);
3791
3792 /**
3793 * skb_queue_head - queue a buffer at the list head
3794 * @list: list to use
3795 * @newsk: buffer to queue
3796 *
3797 * Queue a buffer at the start of the list. This function takes the
3798 * list lock and can be used safely with other locking &sk_buff functions
3799 * safely.
3800 *
3801 * A buffer cannot be placed on two lists at the same time.
3802 */
skb_queue_head(struct sk_buff_head * list,struct sk_buff * newsk)3803 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
3804 {
3805 unsigned long flags;
3806
3807 spin_lock_irqsave(&list->lock, flags);
3808 __skb_queue_head(list, newsk);
3809 spin_unlock_irqrestore(&list->lock, flags);
3810 }
3811 EXPORT_SYMBOL(skb_queue_head);
3812
3813 /**
3814 * skb_queue_tail - queue a buffer at the list tail
3815 * @list: list to use
3816 * @newsk: buffer to queue
3817 *
3818 * Queue a buffer at the tail of the list. This function takes the
3819 * list lock and can be used safely with other locking &sk_buff functions
3820 * safely.
3821 *
3822 * A buffer cannot be placed on two lists at the same time.
3823 */
skb_queue_tail(struct sk_buff_head * list,struct sk_buff * newsk)3824 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
3825 {
3826 unsigned long flags;
3827
3828 spin_lock_irqsave(&list->lock, flags);
3829 __skb_queue_tail(list, newsk);
3830 spin_unlock_irqrestore(&list->lock, flags);
3831 }
3832 EXPORT_SYMBOL(skb_queue_tail);
3833
3834 /**
3835 * skb_unlink - remove a buffer from a list
3836 * @skb: buffer to remove
3837 * @list: list to use
3838 *
3839 * Remove a packet from a list. The list locks are taken and this
3840 * function is atomic with respect to other list locked calls
3841 *
3842 * You must know what list the SKB is on.
3843 */
skb_unlink(struct sk_buff * skb,struct sk_buff_head * list)3844 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
3845 {
3846 unsigned long flags;
3847
3848 spin_lock_irqsave(&list->lock, flags);
3849 __skb_unlink(skb, list);
3850 spin_unlock_irqrestore(&list->lock, flags);
3851 }
3852 EXPORT_SYMBOL(skb_unlink);
3853
3854 /**
3855 * skb_append - append a buffer
3856 * @old: buffer to insert after
3857 * @newsk: buffer to insert
3858 * @list: list to use
3859 *
3860 * Place a packet after a given packet in a list. The list locks are taken
3861 * and this function is atomic with respect to other list locked calls.
3862 * A buffer cannot be placed on two lists at the same time.
3863 */
skb_append(struct sk_buff * old,struct sk_buff * newsk,struct sk_buff_head * list)3864 void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
3865 {
3866 unsigned long flags;
3867
3868 spin_lock_irqsave(&list->lock, flags);
3869 __skb_queue_after(list, old, newsk);
3870 spin_unlock_irqrestore(&list->lock, flags);
3871 }
3872 EXPORT_SYMBOL(skb_append);
3873
skb_split_inside_header(struct sk_buff * skb,struct sk_buff * skb1,const u32 len,const int pos)3874 static inline void skb_split_inside_header(struct sk_buff *skb,
3875 struct sk_buff* skb1,
3876 const u32 len, const int pos)
3877 {
3878 int i;
3879
3880 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
3881 pos - len);
3882 /* And move data appendix as is. */
3883 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
3884 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
3885
3886 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
3887 skb_shinfo(skb)->nr_frags = 0;
3888 skb1->data_len = skb->data_len;
3889 skb1->len += skb1->data_len;
3890 skb->data_len = 0;
3891 skb->len = len;
3892 skb_set_tail_pointer(skb, len);
3893 }
3894
skb_split_no_header(struct sk_buff * skb,struct sk_buff * skb1,const u32 len,int pos)3895 static inline void skb_split_no_header(struct sk_buff *skb,
3896 struct sk_buff* skb1,
3897 const u32 len, int pos)
3898 {
3899 int i, k = 0;
3900 const int nfrags = skb_shinfo(skb)->nr_frags;
3901
3902 skb_shinfo(skb)->nr_frags = 0;
3903 skb1->len = skb1->data_len = skb->len - len;
3904 skb->len = len;
3905 skb->data_len = len - pos;
3906
3907 for (i = 0; i < nfrags; i++) {
3908 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
3909
3910 if (pos + size > len) {
3911 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
3912
3913 if (pos < len) {
3914 /* Split frag.
3915 * We have two variants in this case:
3916 * 1. Move all the frag to the second
3917 * part, if it is possible. F.e.
3918 * this approach is mandatory for TUX,
3919 * where splitting is expensive.
3920 * 2. Split is accurately. We make this.
3921 */
3922 skb_frag_ref(skb, i);
3923 skb_frag_off_add(&skb_shinfo(skb1)->frags[0], len - pos);
3924 skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos);
3925 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos);
3926 skb_shinfo(skb)->nr_frags++;
3927 }
3928 k++;
3929 } else
3930 skb_shinfo(skb)->nr_frags++;
3931 pos += size;
3932 }
3933 skb_shinfo(skb1)->nr_frags = k;
3934 }
3935
3936 /**
3937 * skb_split - Split fragmented skb to two parts at length len.
3938 * @skb: the buffer to split
3939 * @skb1: the buffer to receive the second part
3940 * @len: new length for skb
3941 */
skb_split(struct sk_buff * skb,struct sk_buff * skb1,const u32 len)3942 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
3943 {
3944 int pos = skb_headlen(skb);
3945 const int zc_flags = SKBFL_SHARED_FRAG | SKBFL_PURE_ZEROCOPY;
3946
3947 skb_zcopy_downgrade_managed(skb);
3948
3949 skb_shinfo(skb1)->flags |= skb_shinfo(skb)->flags & zc_flags;
3950 skb_zerocopy_clone(skb1, skb, 0);
3951 if (len < pos) /* Split line is inside header. */
3952 skb_split_inside_header(skb, skb1, len, pos);
3953 else /* Second chunk has no header, nothing to copy. */
3954 skb_split_no_header(skb, skb1, len, pos);
3955 }
3956 EXPORT_SYMBOL(skb_split);
3957
3958 /* Shifting from/to a cloned skb is a no-go.
3959 *
3960 * Caller cannot keep skb_shinfo related pointers past calling here!
3961 */
skb_prepare_for_shift(struct sk_buff * skb)3962 static int skb_prepare_for_shift(struct sk_buff *skb)
3963 {
3964 return skb_unclone_keeptruesize(skb, GFP_ATOMIC);
3965 }
3966
3967 /**
3968 * skb_shift - Shifts paged data partially from skb to another
3969 * @tgt: buffer into which tail data gets added
3970 * @skb: buffer from which the paged data comes from
3971 * @shiftlen: shift up to this many bytes
3972 *
3973 * Attempts to shift up to shiftlen worth of bytes, which may be less than
3974 * the length of the skb, from skb to tgt. Returns number bytes shifted.
3975 * It's up to caller to free skb if everything was shifted.
3976 *
3977 * If @tgt runs out of frags, the whole operation is aborted.
3978 *
3979 * Skb cannot include anything else but paged data while tgt is allowed
3980 * to have non-paged data as well.
3981 *
3982 * TODO: full sized shift could be optimized but that would need
3983 * specialized skb free'er to handle frags without up-to-date nr_frags.
3984 */
skb_shift(struct sk_buff * tgt,struct sk_buff * skb,int shiftlen)3985 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
3986 {
3987 int from, to, merge, todo;
3988 skb_frag_t *fragfrom, *fragto;
3989
3990 BUG_ON(shiftlen > skb->len);
3991
3992 if (skb_headlen(skb))
3993 return 0;
3994 if (skb_zcopy(tgt) || skb_zcopy(skb))
3995 return 0;
3996
3997 todo = shiftlen;
3998 from = 0;
3999 to = skb_shinfo(tgt)->nr_frags;
4000 fragfrom = &skb_shinfo(skb)->frags[from];
4001
4002 /* Actual merge is delayed until the point when we know we can
4003 * commit all, so that we don't have to undo partial changes
4004 */
4005 if (!to ||
4006 !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
4007 skb_frag_off(fragfrom))) {
4008 merge = -1;
4009 } else {
4010 merge = to - 1;
4011
4012 todo -= skb_frag_size(fragfrom);
4013 if (todo < 0) {
4014 if (skb_prepare_for_shift(skb) ||
4015 skb_prepare_for_shift(tgt))
4016 return 0;
4017
4018 /* All previous frag pointers might be stale! */
4019 fragfrom = &skb_shinfo(skb)->frags[from];
4020 fragto = &skb_shinfo(tgt)->frags[merge];
4021
4022 skb_frag_size_add(fragto, shiftlen);
4023 skb_frag_size_sub(fragfrom, shiftlen);
4024 skb_frag_off_add(fragfrom, shiftlen);
4025
4026 goto onlymerged;
4027 }
4028
4029 from++;
4030 }
4031
4032 /* Skip full, not-fitting skb to avoid expensive operations */
4033 if ((shiftlen == skb->len) &&
4034 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
4035 return 0;
4036
4037 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
4038 return 0;
4039
4040 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
4041 if (to == MAX_SKB_FRAGS)
4042 return 0;
4043
4044 fragfrom = &skb_shinfo(skb)->frags[from];
4045 fragto = &skb_shinfo(tgt)->frags[to];
4046
4047 if (todo >= skb_frag_size(fragfrom)) {
4048 *fragto = *fragfrom;
4049 todo -= skb_frag_size(fragfrom);
4050 from++;
4051 to++;
4052
4053 } else {
4054 __skb_frag_ref(fragfrom);
4055 skb_frag_page_copy(fragto, fragfrom);
4056 skb_frag_off_copy(fragto, fragfrom);
4057 skb_frag_size_set(fragto, todo);
4058
4059 skb_frag_off_add(fragfrom, todo);
4060 skb_frag_size_sub(fragfrom, todo);
4061 todo = 0;
4062
4063 to++;
4064 break;
4065 }
4066 }
4067
4068 /* Ready to "commit" this state change to tgt */
4069 skb_shinfo(tgt)->nr_frags = to;
4070
4071 if (merge >= 0) {
4072 fragfrom = &skb_shinfo(skb)->frags[0];
4073 fragto = &skb_shinfo(tgt)->frags[merge];
4074
4075 skb_frag_size_add(fragto, skb_frag_size(fragfrom));
4076 __skb_frag_unref(fragfrom, skb->pp_recycle);
4077 }
4078
4079 /* Reposition in the original skb */
4080 to = 0;
4081 while (from < skb_shinfo(skb)->nr_frags)
4082 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
4083 skb_shinfo(skb)->nr_frags = to;
4084
4085 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
4086
4087 onlymerged:
4088 /* Most likely the tgt won't ever need its checksum anymore, skb on
4089 * the other hand might need it if it needs to be resent
4090 */
4091 tgt->ip_summed = CHECKSUM_PARTIAL;
4092 skb->ip_summed = CHECKSUM_PARTIAL;
4093
4094 skb_len_add(skb, -shiftlen);
4095 skb_len_add(tgt, shiftlen);
4096
4097 return shiftlen;
4098 }
4099
4100 /**
4101 * skb_prepare_seq_read - Prepare a sequential read of skb data
4102 * @skb: the buffer to read
4103 * @from: lower offset of data to be read
4104 * @to: upper offset of data to be read
4105 * @st: state variable
4106 *
4107 * Initializes the specified state variable. Must be called before
4108 * invoking skb_seq_read() for the first time.
4109 */
skb_prepare_seq_read(struct sk_buff * skb,unsigned int from,unsigned int to,struct skb_seq_state * st)4110 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
4111 unsigned int to, struct skb_seq_state *st)
4112 {
4113 st->lower_offset = from;
4114 st->upper_offset = to;
4115 st->root_skb = st->cur_skb = skb;
4116 st->frag_idx = st->stepped_offset = 0;
4117 st->frag_data = NULL;
4118 st->frag_off = 0;
4119 }
4120 EXPORT_SYMBOL(skb_prepare_seq_read);
4121
4122 /**
4123 * skb_seq_read - Sequentially read skb data
4124 * @consumed: number of bytes consumed by the caller so far
4125 * @data: destination pointer for data to be returned
4126 * @st: state variable
4127 *
4128 * Reads a block of skb data at @consumed relative to the
4129 * lower offset specified to skb_prepare_seq_read(). Assigns
4130 * the head of the data block to @data and returns the length
4131 * of the block or 0 if the end of the skb data or the upper
4132 * offset has been reached.
4133 *
4134 * The caller is not required to consume all of the data
4135 * returned, i.e. @consumed is typically set to the number
4136 * of bytes already consumed and the next call to
4137 * skb_seq_read() will return the remaining part of the block.
4138 *
4139 * Note 1: The size of each block of data returned can be arbitrary,
4140 * this limitation is the cost for zerocopy sequential
4141 * reads of potentially non linear data.
4142 *
4143 * Note 2: Fragment lists within fragments are not implemented
4144 * at the moment, state->root_skb could be replaced with
4145 * a stack for this purpose.
4146 */
skb_seq_read(unsigned int consumed,const u8 ** data,struct skb_seq_state * st)4147 unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
4148 struct skb_seq_state *st)
4149 {
4150 unsigned int block_limit, abs_offset = consumed + st->lower_offset;
4151 skb_frag_t *frag;
4152
4153 if (unlikely(abs_offset >= st->upper_offset)) {
4154 if (st->frag_data) {
4155 kunmap_atomic(st->frag_data);
4156 st->frag_data = NULL;
4157 }
4158 return 0;
4159 }
4160
4161 next_skb:
4162 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
4163
4164 if (abs_offset < block_limit && !st->frag_data) {
4165 *data = st->cur_skb->data + (abs_offset - st->stepped_offset);
4166 return block_limit - abs_offset;
4167 }
4168
4169 if (st->frag_idx == 0 && !st->frag_data)
4170 st->stepped_offset += skb_headlen(st->cur_skb);
4171
4172 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
4173 unsigned int pg_idx, pg_off, pg_sz;
4174
4175 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
4176
4177 pg_idx = 0;
4178 pg_off = skb_frag_off(frag);
4179 pg_sz = skb_frag_size(frag);
4180
4181 if (skb_frag_must_loop(skb_frag_page(frag))) {
4182 pg_idx = (pg_off + st->frag_off) >> PAGE_SHIFT;
4183 pg_off = offset_in_page(pg_off + st->frag_off);
4184 pg_sz = min_t(unsigned int, pg_sz - st->frag_off,
4185 PAGE_SIZE - pg_off);
4186 }
4187
4188 block_limit = pg_sz + st->stepped_offset;
4189 if (abs_offset < block_limit) {
4190 if (!st->frag_data)
4191 st->frag_data = kmap_atomic(skb_frag_page(frag) + pg_idx);
4192
4193 *data = (u8 *)st->frag_data + pg_off +
4194 (abs_offset - st->stepped_offset);
4195
4196 return block_limit - abs_offset;
4197 }
4198
4199 if (st->frag_data) {
4200 kunmap_atomic(st->frag_data);
4201 st->frag_data = NULL;
4202 }
4203
4204 st->stepped_offset += pg_sz;
4205 st->frag_off += pg_sz;
4206 if (st->frag_off == skb_frag_size(frag)) {
4207 st->frag_off = 0;
4208 st->frag_idx++;
4209 }
4210 }
4211
4212 if (st->frag_data) {
4213 kunmap_atomic(st->frag_data);
4214 st->frag_data = NULL;
4215 }
4216
4217 if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) {
4218 st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
4219 st->frag_idx = 0;
4220 goto next_skb;
4221 } else if (st->cur_skb->next) {
4222 st->cur_skb = st->cur_skb->next;
4223 st->frag_idx = 0;
4224 goto next_skb;
4225 }
4226
4227 return 0;
4228 }
4229 EXPORT_SYMBOL(skb_seq_read);
4230
4231 /**
4232 * skb_abort_seq_read - Abort a sequential read of skb data
4233 * @st: state variable
4234 *
4235 * Must be called if skb_seq_read() was not called until it
4236 * returned 0.
4237 */
skb_abort_seq_read(struct skb_seq_state * st)4238 void skb_abort_seq_read(struct skb_seq_state *st)
4239 {
4240 if (st->frag_data)
4241 kunmap_atomic(st->frag_data);
4242 }
4243 EXPORT_SYMBOL(skb_abort_seq_read);
4244
4245 #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb))
4246
skb_ts_get_next_block(unsigned int offset,const u8 ** text,struct ts_config * conf,struct ts_state * state)4247 static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
4248 struct ts_config *conf,
4249 struct ts_state *state)
4250 {
4251 return skb_seq_read(offset, text, TS_SKB_CB(state));
4252 }
4253
skb_ts_finish(struct ts_config * conf,struct ts_state * state)4254 static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
4255 {
4256 skb_abort_seq_read(TS_SKB_CB(state));
4257 }
4258
4259 /**
4260 * skb_find_text - Find a text pattern in skb data
4261 * @skb: the buffer to look in
4262 * @from: search offset
4263 * @to: search limit
4264 * @config: textsearch configuration
4265 *
4266 * Finds a pattern in the skb data according to the specified
4267 * textsearch configuration. Use textsearch_next() to retrieve
4268 * subsequent occurrences of the pattern. Returns the offset
4269 * to the first occurrence or UINT_MAX if no match was found.
4270 */
skb_find_text(struct sk_buff * skb,unsigned int from,unsigned int to,struct ts_config * config)4271 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
4272 unsigned int to, struct ts_config *config)
4273 {
4274 unsigned int patlen = config->ops->get_pattern_len(config);
4275 struct ts_state state;
4276 unsigned int ret;
4277
4278 BUILD_BUG_ON(sizeof(struct skb_seq_state) > sizeof(state.cb));
4279
4280 config->get_next_block = skb_ts_get_next_block;
4281 config->finish = skb_ts_finish;
4282
4283 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state));
4284
4285 ret = textsearch_find(config, &state);
4286 return (ret + patlen <= to - from ? ret : UINT_MAX);
4287 }
4288 EXPORT_SYMBOL(skb_find_text);
4289
skb_append_pagefrags(struct sk_buff * skb,struct page * page,int offset,size_t size,size_t max_frags)4290 int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
4291 int offset, size_t size, size_t max_frags)
4292 {
4293 int i = skb_shinfo(skb)->nr_frags;
4294
4295 if (skb_can_coalesce(skb, i, page, offset)) {
4296 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size);
4297 } else if (i < max_frags) {
4298 skb_zcopy_downgrade_managed(skb);
4299 get_page(page);
4300 skb_fill_page_desc_noacc(skb, i, page, offset, size);
4301 } else {
4302 return -EMSGSIZE;
4303 }
4304
4305 return 0;
4306 }
4307 EXPORT_SYMBOL_GPL(skb_append_pagefrags);
4308
4309 /**
4310 * skb_pull_rcsum - pull skb and update receive checksum
4311 * @skb: buffer to update
4312 * @len: length of data pulled
4313 *
4314 * This function performs an skb_pull on the packet and updates
4315 * the CHECKSUM_COMPLETE checksum. It should be used on
4316 * receive path processing instead of skb_pull unless you know
4317 * that the checksum difference is zero (e.g., a valid IP header)
4318 * or you are setting ip_summed to CHECKSUM_NONE.
4319 */
skb_pull_rcsum(struct sk_buff * skb,unsigned int len)4320 void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
4321 {
4322 unsigned char *data = skb->data;
4323
4324 BUG_ON(len > skb->len);
4325 __skb_pull(skb, len);
4326 skb_postpull_rcsum(skb, data, len);
4327 return skb->data;
4328 }
4329 EXPORT_SYMBOL_GPL(skb_pull_rcsum);
4330
skb_head_frag_to_page_desc(struct sk_buff * frag_skb)4331 static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb)
4332 {
4333 skb_frag_t head_frag;
4334 struct page *page;
4335
4336 page = virt_to_head_page(frag_skb->head);
4337 skb_frag_fill_page_desc(&head_frag, page, frag_skb->data -
4338 (unsigned char *)page_address(page),
4339 skb_headlen(frag_skb));
4340 return head_frag;
4341 }
4342
skb_segment_list(struct sk_buff * skb,netdev_features_t features,unsigned int offset)4343 struct sk_buff *skb_segment_list(struct sk_buff *skb,
4344 netdev_features_t features,
4345 unsigned int offset)
4346 {
4347 struct sk_buff *list_skb = skb_shinfo(skb)->frag_list;
4348 unsigned int tnl_hlen = skb_tnl_header_len(skb);
4349 unsigned int delta_truesize = 0;
4350 unsigned int delta_len = 0;
4351 struct sk_buff *tail = NULL;
4352 struct sk_buff *nskb, *tmp;
4353 int len_diff, err;
4354
4355 skb_push(skb, -skb_network_offset(skb) + offset);
4356
4357 /* Ensure the head is writeable before touching the shared info */
4358 err = skb_unclone(skb, GFP_ATOMIC);
4359 if (err)
4360 goto err_linearize;
4361
4362 skb_shinfo(skb)->frag_list = NULL;
4363
4364 while (list_skb) {
4365 nskb = list_skb;
4366 list_skb = list_skb->next;
4367
4368 err = 0;
4369 delta_truesize += nskb->truesize;
4370 if (skb_shared(nskb)) {
4371 tmp = skb_clone(nskb, GFP_ATOMIC);
4372 if (tmp) {
4373 consume_skb(nskb);
4374 nskb = tmp;
4375 err = skb_unclone(nskb, GFP_ATOMIC);
4376 } else {
4377 err = -ENOMEM;
4378 }
4379 }
4380
4381 if (!tail)
4382 skb->next = nskb;
4383 else
4384 tail->next = nskb;
4385
4386 if (unlikely(err)) {
4387 nskb->next = list_skb;
4388 goto err_linearize;
4389 }
4390
4391 tail = nskb;
4392
4393 delta_len += nskb->len;
4394
4395 skb_push(nskb, -skb_network_offset(nskb) + offset);
4396
4397 skb_release_head_state(nskb);
4398 len_diff = skb_network_header_len(nskb) - skb_network_header_len(skb);
4399 __copy_skb_header(nskb, skb);
4400
4401 skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb));
4402 nskb->transport_header += len_diff;
4403 skb_copy_from_linear_data_offset(skb, -tnl_hlen,
4404 nskb->data - tnl_hlen,
4405 offset + tnl_hlen);
4406
4407 if (skb_needs_linearize(nskb, features) &&
4408 __skb_linearize(nskb))
4409 goto err_linearize;
4410 }
4411
4412 skb->truesize = skb->truesize - delta_truesize;
4413 skb->data_len = skb->data_len - delta_len;
4414 skb->len = skb->len - delta_len;
4415
4416 skb_gso_reset(skb);
4417
4418 skb->prev = tail;
4419
4420 if (skb_needs_linearize(skb, features) &&
4421 __skb_linearize(skb))
4422 goto err_linearize;
4423
4424 skb_get(skb);
4425
4426 return skb;
4427
4428 err_linearize:
4429 kfree_skb_list(skb->next);
4430 skb->next = NULL;
4431 return ERR_PTR(-ENOMEM);
4432 }
4433 EXPORT_SYMBOL_GPL(skb_segment_list);
4434
4435 /**
4436 * skb_segment - Perform protocol segmentation on skb.
4437 * @head_skb: buffer to segment
4438 * @features: features for the output path (see dev->features)
4439 *
4440 * This function performs segmentation on the given skb. It returns
4441 * a pointer to the first in a list of new skbs for the segments.
4442 * In case of error it returns ERR_PTR(err).
4443 */
skb_segment(struct sk_buff * head_skb,netdev_features_t features)4444 struct sk_buff *skb_segment(struct sk_buff *head_skb,
4445 netdev_features_t features)
4446 {
4447 struct sk_buff *segs = NULL;
4448 struct sk_buff *tail = NULL;
4449 struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list;
4450 unsigned int mss = skb_shinfo(head_skb)->gso_size;
4451 unsigned int doffset = head_skb->data - skb_mac_header(head_skb);
4452 unsigned int offset = doffset;
4453 unsigned int tnl_hlen = skb_tnl_header_len(head_skb);
4454 unsigned int partial_segs = 0;
4455 unsigned int headroom;
4456 unsigned int len = head_skb->len;
4457 struct sk_buff *frag_skb;
4458 skb_frag_t *frag;
4459 __be16 proto;
4460 bool csum, sg;
4461 int err = -ENOMEM;
4462 int i = 0;
4463 int nfrags, pos;
4464
4465 if ((skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY) &&
4466 mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb)) {
4467 struct sk_buff *check_skb;
4468
4469 for (check_skb = list_skb; check_skb; check_skb = check_skb->next) {
4470 if (skb_headlen(check_skb) && !check_skb->head_frag) {
4471 /* gso_size is untrusted, and we have a frag_list with
4472 * a linear non head_frag item.
4473 *
4474 * If head_skb's headlen does not fit requested gso_size,
4475 * it means that the frag_list members do NOT terminate
4476 * on exact gso_size boundaries. Hence we cannot perform
4477 * skb_frag_t page sharing. Therefore we must fallback to
4478 * copying the frag_list skbs; we do so by disabling SG.
4479 */
4480 features &= ~NETIF_F_SG;
4481 break;
4482 }
4483 }
4484 }
4485
4486 __skb_push(head_skb, doffset);
4487 proto = skb_network_protocol(head_skb, NULL);
4488 if (unlikely(!proto))
4489 return ERR_PTR(-EINVAL);
4490
4491 sg = !!(features & NETIF_F_SG);
4492 csum = !!can_checksum_protocol(features, proto);
4493
4494 if (sg && csum && (mss != GSO_BY_FRAGS)) {
4495 if (!(features & NETIF_F_GSO_PARTIAL)) {
4496 struct sk_buff *iter;
4497 unsigned int frag_len;
4498
4499 if (!list_skb ||
4500 !net_gso_ok(features, skb_shinfo(head_skb)->gso_type))
4501 goto normal;
4502
4503 /* If we get here then all the required
4504 * GSO features except frag_list are supported.
4505 * Try to split the SKB to multiple GSO SKBs
4506 * with no frag_list.
4507 * Currently we can do that only when the buffers don't
4508 * have a linear part and all the buffers except
4509 * the last are of the same length.
4510 */
4511 frag_len = list_skb->len;
4512 skb_walk_frags(head_skb, iter) {
4513 if (frag_len != iter->len && iter->next)
4514 goto normal;
4515 if (skb_headlen(iter) && !iter->head_frag)
4516 goto normal;
4517
4518 len -= iter->len;
4519 }
4520
4521 if (len != frag_len)
4522 goto normal;
4523 }
4524
4525 /* GSO partial only requires that we trim off any excess that
4526 * doesn't fit into an MSS sized block, so take care of that
4527 * now.
4528 * Cap len to not accidentally hit GSO_BY_FRAGS.
4529 */
4530 partial_segs = min(len, GSO_BY_FRAGS - 1U) / mss;
4531 if (partial_segs > 1)
4532 mss *= partial_segs;
4533 else
4534 partial_segs = 0;
4535 }
4536
4537 normal:
4538 headroom = skb_headroom(head_skb);
4539 pos = skb_headlen(head_skb);
4540
4541 if (skb_orphan_frags(head_skb, GFP_ATOMIC))
4542 return ERR_PTR(-ENOMEM);
4543
4544 nfrags = skb_shinfo(head_skb)->nr_frags;
4545 frag = skb_shinfo(head_skb)->frags;
4546 frag_skb = head_skb;
4547
4548 do {
4549 struct sk_buff *nskb;
4550 skb_frag_t *nskb_frag;
4551 int hsize;
4552 int size;
4553
4554 if (unlikely(mss == GSO_BY_FRAGS)) {
4555 len = list_skb->len;
4556 } else {
4557 len = head_skb->len - offset;
4558 if (len > mss)
4559 len = mss;
4560 }
4561
4562 hsize = skb_headlen(head_skb) - offset;
4563
4564 if (hsize <= 0 && i >= nfrags && skb_headlen(list_skb) &&
4565 (skb_headlen(list_skb) == len || sg)) {
4566 BUG_ON(skb_headlen(list_skb) > len);
4567
4568 nskb = skb_clone(list_skb, GFP_ATOMIC);
4569 if (unlikely(!nskb))
4570 goto err;
4571
4572 i = 0;
4573 nfrags = skb_shinfo(list_skb)->nr_frags;
4574 frag = skb_shinfo(list_skb)->frags;
4575 frag_skb = list_skb;
4576 pos += skb_headlen(list_skb);
4577
4578 while (pos < offset + len) {
4579 BUG_ON(i >= nfrags);
4580
4581 size = skb_frag_size(frag);
4582 if (pos + size > offset + len)
4583 break;
4584
4585 i++;
4586 pos += size;
4587 frag++;
4588 }
4589
4590 list_skb = list_skb->next;
4591
4592 if (unlikely(pskb_trim(nskb, len))) {
4593 kfree_skb(nskb);
4594 goto err;
4595 }
4596
4597 hsize = skb_end_offset(nskb);
4598 if (skb_cow_head(nskb, doffset + headroom)) {
4599 kfree_skb(nskb);
4600 goto err;
4601 }
4602
4603 nskb->truesize += skb_end_offset(nskb) - hsize;
4604 skb_release_head_state(nskb);
4605 __skb_push(nskb, doffset);
4606 } else {
4607 if (hsize < 0)
4608 hsize = 0;
4609 if (hsize > len || !sg)
4610 hsize = len;
4611
4612 nskb = __alloc_skb(hsize + doffset + headroom,
4613 GFP_ATOMIC, skb_alloc_rx_flag(head_skb),
4614 NUMA_NO_NODE);
4615
4616 if (unlikely(!nskb))
4617 goto err;
4618
4619 skb_reserve(nskb, headroom);
4620 __skb_put(nskb, doffset);
4621 }
4622
4623 if (segs)
4624 tail->next = nskb;
4625 else
4626 segs = nskb;
4627 tail = nskb;
4628
4629 __copy_skb_header(nskb, head_skb);
4630
4631 skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
4632 skb_reset_mac_len(nskb);
4633
4634 skb_copy_from_linear_data_offset(head_skb, -tnl_hlen,
4635 nskb->data - tnl_hlen,
4636 doffset + tnl_hlen);
4637
4638 if (nskb->len == len + doffset)
4639 goto perform_csum_check;
4640
4641 if (!sg) {
4642 if (!csum) {
4643 if (!nskb->remcsum_offload)
4644 nskb->ip_summed = CHECKSUM_NONE;
4645 SKB_GSO_CB(nskb)->csum =
4646 skb_copy_and_csum_bits(head_skb, offset,
4647 skb_put(nskb,
4648 len),
4649 len);
4650 SKB_GSO_CB(nskb)->csum_start =
4651 skb_headroom(nskb) + doffset;
4652 } else {
4653 if (skb_copy_bits(head_skb, offset, skb_put(nskb, len), len))
4654 goto err;
4655 }
4656 continue;
4657 }
4658
4659 nskb_frag = skb_shinfo(nskb)->frags;
4660
4661 skb_copy_from_linear_data_offset(head_skb, offset,
4662 skb_put(nskb, hsize), hsize);
4663
4664 skb_shinfo(nskb)->flags |= skb_shinfo(head_skb)->flags &
4665 SKBFL_SHARED_FRAG;
4666
4667 if (skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC))
4668 goto err;
4669
4670 while (pos < offset + len) {
4671 if (i >= nfrags) {
4672 if (skb_orphan_frags(list_skb, GFP_ATOMIC) ||
4673 skb_zerocopy_clone(nskb, list_skb,
4674 GFP_ATOMIC))
4675 goto err;
4676
4677 i = 0;
4678 nfrags = skb_shinfo(list_skb)->nr_frags;
4679 frag = skb_shinfo(list_skb)->frags;
4680 frag_skb = list_skb;
4681 if (!skb_headlen(list_skb)) {
4682 BUG_ON(!nfrags);
4683 } else {
4684 BUG_ON(!list_skb->head_frag);
4685
4686 /* to make room for head_frag. */
4687 i--;
4688 frag--;
4689 }
4690
4691 list_skb = list_skb->next;
4692 }
4693
4694 if (unlikely(skb_shinfo(nskb)->nr_frags >=
4695 MAX_SKB_FRAGS)) {
4696 net_warn_ratelimited(
4697 "skb_segment: too many frags: %u %u\n",
4698 pos, mss);
4699 err = -EINVAL;
4700 goto err;
4701 }
4702
4703 *nskb_frag = (i < 0) ? skb_head_frag_to_page_desc(frag_skb) : *frag;
4704 __skb_frag_ref(nskb_frag);
4705 size = skb_frag_size(nskb_frag);
4706
4707 if (pos < offset) {
4708 skb_frag_off_add(nskb_frag, offset - pos);
4709 skb_frag_size_sub(nskb_frag, offset - pos);
4710 }
4711
4712 skb_shinfo(nskb)->nr_frags++;
4713
4714 if (pos + size <= offset + len) {
4715 i++;
4716 frag++;
4717 pos += size;
4718 } else {
4719 skb_frag_size_sub(nskb_frag, pos + size - (offset + len));
4720 goto skip_fraglist;
4721 }
4722
4723 nskb_frag++;
4724 }
4725
4726 skip_fraglist:
4727 nskb->data_len = len - hsize;
4728 nskb->len += nskb->data_len;
4729 nskb->truesize += nskb->data_len;
4730
4731 perform_csum_check:
4732 if (!csum) {
4733 if (skb_has_shared_frag(nskb) &&
4734 __skb_linearize(nskb))
4735 goto err;
4736
4737 if (!nskb->remcsum_offload)
4738 nskb->ip_summed = CHECKSUM_NONE;
4739 SKB_GSO_CB(nskb)->csum =
4740 skb_checksum(nskb, doffset,
4741 nskb->len - doffset, 0);
4742 SKB_GSO_CB(nskb)->csum_start =
4743 skb_headroom(nskb) + doffset;
4744 }
4745 } while ((offset += len) < head_skb->len);
4746
4747 /* Some callers want to get the end of the list.
4748 * Put it in segs->prev to avoid walking the list.
4749 * (see validate_xmit_skb_list() for example)
4750 */
4751 segs->prev = tail;
4752
4753 if (partial_segs) {
4754 struct sk_buff *iter;
4755 int type = skb_shinfo(head_skb)->gso_type;
4756 unsigned short gso_size = skb_shinfo(head_skb)->gso_size;
4757
4758 /* Update type to add partial and then remove dodgy if set */
4759 type |= (features & NETIF_F_GSO_PARTIAL) / NETIF_F_GSO_PARTIAL * SKB_GSO_PARTIAL;
4760 type &= ~SKB_GSO_DODGY;
4761
4762 /* Update GSO info and prepare to start updating headers on
4763 * our way back down the stack of protocols.
4764 */
4765 for (iter = segs; iter; iter = iter->next) {
4766 skb_shinfo(iter)->gso_size = gso_size;
4767 skb_shinfo(iter)->gso_segs = partial_segs;
4768 skb_shinfo(iter)->gso_type = type;
4769 SKB_GSO_CB(iter)->data_offset = skb_headroom(iter) + doffset;
4770 }
4771
4772 if (tail->len - doffset <= gso_size)
4773 skb_shinfo(tail)->gso_size = 0;
4774 else if (tail != segs)
4775 skb_shinfo(tail)->gso_segs = DIV_ROUND_UP(tail->len - doffset, gso_size);
4776 }
4777
4778 /* Following permits correct backpressure, for protocols
4779 * using skb_set_owner_w().
4780 * Idea is to tranfert ownership from head_skb to last segment.
4781 */
4782 if (head_skb->destructor == sock_wfree) {
4783 swap(tail->truesize, head_skb->truesize);
4784 swap(tail->destructor, head_skb->destructor);
4785 swap(tail->sk, head_skb->sk);
4786 }
4787 return segs;
4788
4789 err:
4790 kfree_skb_list(segs);
4791 return ERR_PTR(err);
4792 }
4793 EXPORT_SYMBOL_GPL(skb_segment);
4794
4795 #ifdef CONFIG_SKB_EXTENSIONS
4796 #define SKB_EXT_ALIGN_VALUE 8
4797 #define SKB_EXT_CHUNKSIZEOF(x) (ALIGN((sizeof(x)), SKB_EXT_ALIGN_VALUE) / SKB_EXT_ALIGN_VALUE)
4798
4799 static const u8 skb_ext_type_len[] = {
4800 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
4801 [SKB_EXT_BRIDGE_NF] = SKB_EXT_CHUNKSIZEOF(struct nf_bridge_info),
4802 #endif
4803 #ifdef CONFIG_XFRM
4804 [SKB_EXT_SEC_PATH] = SKB_EXT_CHUNKSIZEOF(struct sec_path),
4805 #endif
4806 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
4807 [TC_SKB_EXT] = SKB_EXT_CHUNKSIZEOF(struct tc_skb_ext),
4808 #endif
4809 #if IS_ENABLED(CONFIG_MPTCP)
4810 [SKB_EXT_MPTCP] = SKB_EXT_CHUNKSIZEOF(struct mptcp_ext),
4811 #endif
4812 #if IS_ENABLED(CONFIG_MCTP_FLOWS)
4813 [SKB_EXT_MCTP] = SKB_EXT_CHUNKSIZEOF(struct mctp_flow),
4814 #endif
4815 };
4816
skb_ext_total_length(void)4817 static __always_inline unsigned int skb_ext_total_length(void)
4818 {
4819 unsigned int l = SKB_EXT_CHUNKSIZEOF(struct skb_ext);
4820 int i;
4821
4822 for (i = 0; i < ARRAY_SIZE(skb_ext_type_len); i++)
4823 l += skb_ext_type_len[i];
4824
4825 return l;
4826 }
4827
skb_extensions_init(void)4828 static void skb_extensions_init(void)
4829 {
4830 BUILD_BUG_ON(SKB_EXT_NUM >= 8);
4831 #if !IS_ENABLED(CONFIG_KCOV_INSTRUMENT_ALL)
4832 BUILD_BUG_ON(skb_ext_total_length() > 255);
4833 #endif
4834
4835 skbuff_ext_cache = kmem_cache_create("skbuff_ext_cache",
4836 SKB_EXT_ALIGN_VALUE * skb_ext_total_length(),
4837 0,
4838 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
4839 NULL);
4840 }
4841 #else
skb_extensions_init(void)4842 static void skb_extensions_init(void) {}
4843 #endif
4844
4845 /* The SKB kmem_cache slab is critical for network performance. Never
4846 * merge/alias the slab with similar sized objects. This avoids fragmentation
4847 * that hurts performance of kmem_cache_{alloc,free}_bulk APIs.
4848 */
4849 #ifndef CONFIG_SLUB_TINY
4850 #define FLAG_SKB_NO_MERGE SLAB_NO_MERGE
4851 #else /* CONFIG_SLUB_TINY - simple loop in kmem_cache_alloc_bulk */
4852 #define FLAG_SKB_NO_MERGE 0
4853 #endif
4854
skb_init(void)4855 void __init skb_init(void)
4856 {
4857 skbuff_cache = kmem_cache_create_usercopy("skbuff_head_cache",
4858 sizeof(struct sk_buff),
4859 0,
4860 SLAB_HWCACHE_ALIGN|SLAB_PANIC|
4861 FLAG_SKB_NO_MERGE,
4862 offsetof(struct sk_buff, cb),
4863 sizeof_field(struct sk_buff, cb),
4864 NULL);
4865 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
4866 sizeof(struct sk_buff_fclones),
4867 0,
4868 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
4869 NULL);
4870 /* usercopy should only access first SKB_SMALL_HEAD_HEADROOM bytes.
4871 * struct skb_shared_info is located at the end of skb->head,
4872 * and should not be copied to/from user.
4873 */
4874 skb_small_head_cache = kmem_cache_create_usercopy("skbuff_small_head",
4875 SKB_SMALL_HEAD_CACHE_SIZE,
4876 0,
4877 SLAB_HWCACHE_ALIGN | SLAB_PANIC,
4878 0,
4879 SKB_SMALL_HEAD_HEADROOM,
4880 NULL);
4881 skb_extensions_init();
4882 }
4883
4884 static int
__skb_to_sgvec(struct sk_buff * skb,struct scatterlist * sg,int offset,int len,unsigned int recursion_level)4885 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len,
4886 unsigned int recursion_level)
4887 {
4888 int start = skb_headlen(skb);
4889 int i, copy = start - offset;
4890 struct sk_buff *frag_iter;
4891 int elt = 0;
4892
4893 if (unlikely(recursion_level >= 24))
4894 return -EMSGSIZE;
4895
4896 if (copy > 0) {
4897 if (copy > len)
4898 copy = len;
4899 sg_set_buf(sg, skb->data + offset, copy);
4900 elt++;
4901 if ((len -= copy) == 0)
4902 return elt;
4903 offset += copy;
4904 }
4905
4906 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4907 int end;
4908
4909 WARN_ON(start > offset + len);
4910
4911 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
4912 if ((copy = end - offset) > 0) {
4913 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4914 if (unlikely(elt && sg_is_last(&sg[elt - 1])))
4915 return -EMSGSIZE;
4916
4917 if (copy > len)
4918 copy = len;
4919 sg_set_page(&sg[elt], skb_frag_page(frag), copy,
4920 skb_frag_off(frag) + offset - start);
4921 elt++;
4922 if (!(len -= copy))
4923 return elt;
4924 offset += copy;
4925 }
4926 start = end;
4927 }
4928
4929 skb_walk_frags(skb, frag_iter) {
4930 int end, ret;
4931
4932 WARN_ON(start > offset + len);
4933
4934 end = start + frag_iter->len;
4935 if ((copy = end - offset) > 0) {
4936 if (unlikely(elt && sg_is_last(&sg[elt - 1])))
4937 return -EMSGSIZE;
4938
4939 if (copy > len)
4940 copy = len;
4941 ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start,
4942 copy, recursion_level + 1);
4943 if (unlikely(ret < 0))
4944 return ret;
4945 elt += ret;
4946 if ((len -= copy) == 0)
4947 return elt;
4948 offset += copy;
4949 }
4950 start = end;
4951 }
4952 BUG_ON(len);
4953 return elt;
4954 }
4955
4956 /**
4957 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer
4958 * @skb: Socket buffer containing the buffers to be mapped
4959 * @sg: The scatter-gather list to map into
4960 * @offset: The offset into the buffer's contents to start mapping
4961 * @len: Length of buffer space to be mapped
4962 *
4963 * Fill the specified scatter-gather list with mappings/pointers into a
4964 * region of the buffer space attached to a socket buffer. Returns either
4965 * the number of scatterlist items used, or -EMSGSIZE if the contents
4966 * could not fit.
4967 */
skb_to_sgvec(struct sk_buff * skb,struct scatterlist * sg,int offset,int len)4968 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
4969 {
4970 int nsg = __skb_to_sgvec(skb, sg, offset, len, 0);
4971
4972 if (nsg <= 0)
4973 return nsg;
4974
4975 sg_mark_end(&sg[nsg - 1]);
4976
4977 return nsg;
4978 }
4979 EXPORT_SYMBOL_GPL(skb_to_sgvec);
4980
4981 /* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given
4982 * sglist without mark the sg which contain last skb data as the end.
4983 * So the caller can mannipulate sg list as will when padding new data after
4984 * the first call without calling sg_unmark_end to expend sg list.
4985 *
4986 * Scenario to use skb_to_sgvec_nomark:
4987 * 1. sg_init_table
4988 * 2. skb_to_sgvec_nomark(payload1)
4989 * 3. skb_to_sgvec_nomark(payload2)
4990 *
4991 * This is equivalent to:
4992 * 1. sg_init_table
4993 * 2. skb_to_sgvec(payload1)
4994 * 3. sg_unmark_end
4995 * 4. skb_to_sgvec(payload2)
4996 *
4997 * When mapping mutilple payload conditionally, skb_to_sgvec_nomark
4998 * is more preferable.
4999 */
skb_to_sgvec_nomark(struct sk_buff * skb,struct scatterlist * sg,int offset,int len)5000 int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
5001 int offset, int len)
5002 {
5003 return __skb_to_sgvec(skb, sg, offset, len, 0);
5004 }
5005 EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark);
5006
5007
5008
5009 /**
5010 * skb_cow_data - Check that a socket buffer's data buffers are writable
5011 * @skb: The socket buffer to check.
5012 * @tailbits: Amount of trailing space to be added
5013 * @trailer: Returned pointer to the skb where the @tailbits space begins
5014 *
5015 * Make sure that the data buffers attached to a socket buffer are
5016 * writable. If they are not, private copies are made of the data buffers
5017 * and the socket buffer is set to use these instead.
5018 *
5019 * If @tailbits is given, make sure that there is space to write @tailbits
5020 * bytes of data beyond current end of socket buffer. @trailer will be
5021 * set to point to the skb in which this space begins.
5022 *
5023 * The number of scatterlist elements required to completely map the
5024 * COW'd and extended socket buffer will be returned.
5025 */
skb_cow_data(struct sk_buff * skb,int tailbits,struct sk_buff ** trailer)5026 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
5027 {
5028 int copyflag;
5029 int elt;
5030 struct sk_buff *skb1, **skb_p;
5031
5032 /* If skb is cloned or its head is paged, reallocate
5033 * head pulling out all the pages (pages are considered not writable
5034 * at the moment even if they are anonymous).
5035 */
5036 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
5037 !__pskb_pull_tail(skb, __skb_pagelen(skb)))
5038 return -ENOMEM;
5039
5040 /* Easy case. Most of packets will go this way. */
5041 if (!skb_has_frag_list(skb)) {
5042 /* A little of trouble, not enough of space for trailer.
5043 * This should not happen, when stack is tuned to generate
5044 * good frames. OK, on miss we reallocate and reserve even more
5045 * space, 128 bytes is fair. */
5046
5047 if (skb_tailroom(skb) < tailbits &&
5048 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
5049 return -ENOMEM;
5050
5051 /* Voila! */
5052 *trailer = skb;
5053 return 1;
5054 }
5055
5056 /* Misery. We are in troubles, going to mincer fragments... */
5057
5058 elt = 1;
5059 skb_p = &skb_shinfo(skb)->frag_list;
5060 copyflag = 0;
5061
5062 while ((skb1 = *skb_p) != NULL) {
5063 int ntail = 0;
5064
5065 /* The fragment is partially pulled by someone,
5066 * this can happen on input. Copy it and everything
5067 * after it. */
5068
5069 if (skb_shared(skb1))
5070 copyflag = 1;
5071
5072 /* If the skb is the last, worry about trailer. */
5073
5074 if (skb1->next == NULL && tailbits) {
5075 if (skb_shinfo(skb1)->nr_frags ||
5076 skb_has_frag_list(skb1) ||
5077 skb_tailroom(skb1) < tailbits)
5078 ntail = tailbits + 128;
5079 }
5080
5081 if (copyflag ||
5082 skb_cloned(skb1) ||
5083 ntail ||
5084 skb_shinfo(skb1)->nr_frags ||
5085 skb_has_frag_list(skb1)) {
5086 struct sk_buff *skb2;
5087
5088 /* Fuck, we are miserable poor guys... */
5089 if (ntail == 0)
5090 skb2 = skb_copy(skb1, GFP_ATOMIC);
5091 else
5092 skb2 = skb_copy_expand(skb1,
5093 skb_headroom(skb1),
5094 ntail,
5095 GFP_ATOMIC);
5096 if (unlikely(skb2 == NULL))
5097 return -ENOMEM;
5098
5099 if (skb1->sk)
5100 skb_set_owner_w(skb2, skb1->sk);
5101
5102 /* Looking around. Are we still alive?
5103 * OK, link new skb, drop old one */
5104
5105 skb2->next = skb1->next;
5106 *skb_p = skb2;
5107 kfree_skb(skb1);
5108 skb1 = skb2;
5109 }
5110 elt++;
5111 *trailer = skb1;
5112 skb_p = &skb1->next;
5113 }
5114
5115 return elt;
5116 }
5117 EXPORT_SYMBOL_GPL(skb_cow_data);
5118
sock_rmem_free(struct sk_buff * skb)5119 static void sock_rmem_free(struct sk_buff *skb)
5120 {
5121 struct sock *sk = skb->sk;
5122
5123 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
5124 }
5125
skb_set_err_queue(struct sk_buff * skb)5126 static void skb_set_err_queue(struct sk_buff *skb)
5127 {
5128 /* pkt_type of skbs received on local sockets is never PACKET_OUTGOING.
5129 * So, it is safe to (mis)use it to mark skbs on the error queue.
5130 */
5131 skb->pkt_type = PACKET_OUTGOING;
5132 BUILD_BUG_ON(PACKET_OUTGOING == 0);
5133 }
5134
5135 /*
5136 * Note: We dont mem charge error packets (no sk_forward_alloc changes)
5137 */
sock_queue_err_skb(struct sock * sk,struct sk_buff * skb)5138 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
5139 {
5140 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
5141 (unsigned int)READ_ONCE(sk->sk_rcvbuf))
5142 return -ENOMEM;
5143
5144 skb_orphan(skb);
5145 skb->sk = sk;
5146 skb->destructor = sock_rmem_free;
5147 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
5148 skb_set_err_queue(skb);
5149
5150 /* before exiting rcu section, make sure dst is refcounted */
5151 skb_dst_force(skb);
5152
5153 skb_queue_tail(&sk->sk_error_queue, skb);
5154 if (!sock_flag(sk, SOCK_DEAD))
5155 sk_error_report(sk);
5156 return 0;
5157 }
5158 EXPORT_SYMBOL(sock_queue_err_skb);
5159
is_icmp_err_skb(const struct sk_buff * skb)5160 static bool is_icmp_err_skb(const struct sk_buff *skb)
5161 {
5162 return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
5163 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6);
5164 }
5165
sock_dequeue_err_skb(struct sock * sk)5166 struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
5167 {
5168 struct sk_buff_head *q = &sk->sk_error_queue;
5169 struct sk_buff *skb, *skb_next = NULL;
5170 bool icmp_next = false;
5171 unsigned long flags;
5172
5173 spin_lock_irqsave(&q->lock, flags);
5174 skb = __skb_dequeue(q);
5175 if (skb && (skb_next = skb_peek(q))) {
5176 icmp_next = is_icmp_err_skb(skb_next);
5177 if (icmp_next)
5178 sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_errno;
5179 }
5180 spin_unlock_irqrestore(&q->lock, flags);
5181
5182 if (is_icmp_err_skb(skb) && !icmp_next)
5183 sk->sk_err = 0;
5184
5185 if (skb_next)
5186 sk_error_report(sk);
5187
5188 return skb;
5189 }
5190 EXPORT_SYMBOL(sock_dequeue_err_skb);
5191
5192 /**
5193 * skb_clone_sk - create clone of skb, and take reference to socket
5194 * @skb: the skb to clone
5195 *
5196 * This function creates a clone of a buffer that holds a reference on
5197 * sk_refcnt. Buffers created via this function are meant to be
5198 * returned using sock_queue_err_skb, or free via kfree_skb.
5199 *
5200 * When passing buffers allocated with this function to sock_queue_err_skb
5201 * it is necessary to wrap the call with sock_hold/sock_put in order to
5202 * prevent the socket from being released prior to being enqueued on
5203 * the sk_error_queue.
5204 */
skb_clone_sk(struct sk_buff * skb)5205 struct sk_buff *skb_clone_sk(struct sk_buff *skb)
5206 {
5207 struct sock *sk = skb->sk;
5208 struct sk_buff *clone;
5209
5210 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
5211 return NULL;
5212
5213 clone = skb_clone(skb, GFP_ATOMIC);
5214 if (!clone) {
5215 sock_put(sk);
5216 return NULL;
5217 }
5218
5219 clone->sk = sk;
5220 clone->destructor = sock_efree;
5221
5222 return clone;
5223 }
5224 EXPORT_SYMBOL(skb_clone_sk);
5225
__skb_complete_tx_timestamp(struct sk_buff * skb,struct sock * sk,int tstype,bool opt_stats)5226 static void __skb_complete_tx_timestamp(struct sk_buff *skb,
5227 struct sock *sk,
5228 int tstype,
5229 bool opt_stats)
5230 {
5231 struct sock_exterr_skb *serr;
5232 int err;
5233
5234 BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb));
5235
5236 serr = SKB_EXT_ERR(skb);
5237 memset(serr, 0, sizeof(*serr));
5238 serr->ee.ee_errno = ENOMSG;
5239 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
5240 serr->ee.ee_info = tstype;
5241 serr->opt_stats = opt_stats;
5242 serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0;
5243 if (READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID) {
5244 serr->ee.ee_data = skb_shinfo(skb)->tskey;
5245 if (sk_is_tcp(sk))
5246 serr->ee.ee_data -= atomic_read(&sk->sk_tskey);
5247 }
5248
5249 err = sock_queue_err_skb(sk, skb);
5250
5251 if (err)
5252 kfree_skb(skb);
5253 }
5254
skb_may_tx_timestamp(struct sock * sk,bool tsonly)5255 static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly)
5256 {
5257 bool ret;
5258
5259 if (likely(READ_ONCE(sysctl_tstamp_allow_data) || tsonly))
5260 return true;
5261
5262 read_lock_bh(&sk->sk_callback_lock);
5263 ret = sk->sk_socket && sk->sk_socket->file &&
5264 file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW);
5265 read_unlock_bh(&sk->sk_callback_lock);
5266 return ret;
5267 }
5268
skb_complete_tx_timestamp(struct sk_buff * skb,struct skb_shared_hwtstamps * hwtstamps)5269 void skb_complete_tx_timestamp(struct sk_buff *skb,
5270 struct skb_shared_hwtstamps *hwtstamps)
5271 {
5272 struct sock *sk = skb->sk;
5273
5274 if (!skb_may_tx_timestamp(sk, false))
5275 goto err;
5276
5277 /* Take a reference to prevent skb_orphan() from freeing the socket,
5278 * but only if the socket refcount is not zero.
5279 */
5280 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) {
5281 *skb_hwtstamps(skb) = *hwtstamps;
5282 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false);
5283 sock_put(sk);
5284 return;
5285 }
5286
5287 err:
5288 kfree_skb(skb);
5289 }
5290 EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
5291
__skb_tstamp_tx(struct sk_buff * orig_skb,const struct sk_buff * ack_skb,struct skb_shared_hwtstamps * hwtstamps,struct sock * sk,int tstype)5292 void __skb_tstamp_tx(struct sk_buff *orig_skb,
5293 const struct sk_buff *ack_skb,
5294 struct skb_shared_hwtstamps *hwtstamps,
5295 struct sock *sk, int tstype)
5296 {
5297 struct sk_buff *skb;
5298 bool tsonly, opt_stats = false;
5299 u32 tsflags;
5300
5301 if (!sk)
5302 return;
5303
5304 tsflags = READ_ONCE(sk->sk_tsflags);
5305 if (!hwtstamps && !(tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) &&
5306 skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS)
5307 return;
5308
5309 tsonly = tsflags & SOF_TIMESTAMPING_OPT_TSONLY;
5310 if (!skb_may_tx_timestamp(sk, tsonly))
5311 return;
5312
5313 if (tsonly) {
5314 #ifdef CONFIG_INET
5315 if ((tsflags & SOF_TIMESTAMPING_OPT_STATS) &&
5316 sk_is_tcp(sk)) {
5317 skb = tcp_get_timestamping_opt_stats(sk, orig_skb,
5318 ack_skb);
5319 opt_stats = true;
5320 } else
5321 #endif
5322 skb = alloc_skb(0, GFP_ATOMIC);
5323 } else {
5324 skb = skb_clone(orig_skb, GFP_ATOMIC);
5325
5326 if (skb_orphan_frags_rx(skb, GFP_ATOMIC)) {
5327 kfree_skb(skb);
5328 return;
5329 }
5330 }
5331 if (!skb)
5332 return;
5333
5334 if (tsonly) {
5335 skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags &
5336 SKBTX_ANY_TSTAMP;
5337 skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey;
5338 }
5339
5340 if (hwtstamps)
5341 *skb_hwtstamps(skb) = *hwtstamps;
5342 else
5343 __net_timestamp(skb);
5344
5345 __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats);
5346 }
5347 EXPORT_SYMBOL_GPL(__skb_tstamp_tx);
5348
skb_tstamp_tx(struct sk_buff * orig_skb,struct skb_shared_hwtstamps * hwtstamps)5349 void skb_tstamp_tx(struct sk_buff *orig_skb,
5350 struct skb_shared_hwtstamps *hwtstamps)
5351 {
5352 return __skb_tstamp_tx(orig_skb, NULL, hwtstamps, orig_skb->sk,
5353 SCM_TSTAMP_SND);
5354 }
5355 EXPORT_SYMBOL_GPL(skb_tstamp_tx);
5356
5357 #ifdef CONFIG_WIRELESS
skb_complete_wifi_ack(struct sk_buff * skb,bool acked)5358 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
5359 {
5360 struct sock *sk = skb->sk;
5361 struct sock_exterr_skb *serr;
5362 int err = 1;
5363
5364 skb->wifi_acked_valid = 1;
5365 skb->wifi_acked = acked;
5366
5367 serr = SKB_EXT_ERR(skb);
5368 memset(serr, 0, sizeof(*serr));
5369 serr->ee.ee_errno = ENOMSG;
5370 serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
5371
5372 /* Take a reference to prevent skb_orphan() from freeing the socket,
5373 * but only if the socket refcount is not zero.
5374 */
5375 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) {
5376 err = sock_queue_err_skb(sk, skb);
5377 sock_put(sk);
5378 }
5379 if (err)
5380 kfree_skb(skb);
5381 }
5382 EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
5383 #endif /* CONFIG_WIRELESS */
5384
5385 /**
5386 * skb_partial_csum_set - set up and verify partial csum values for packet
5387 * @skb: the skb to set
5388 * @start: the number of bytes after skb->data to start checksumming.
5389 * @off: the offset from start to place the checksum.
5390 *
5391 * For untrusted partially-checksummed packets, we need to make sure the values
5392 * for skb->csum_start and skb->csum_offset are valid so we don't oops.
5393 *
5394 * This function checks and sets those values and skb->ip_summed: if this
5395 * returns false you should drop the packet.
5396 */
skb_partial_csum_set(struct sk_buff * skb,u16 start,u16 off)5397 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
5398 {
5399 u32 csum_end = (u32)start + (u32)off + sizeof(__sum16);
5400 u32 csum_start = skb_headroom(skb) + (u32)start;
5401
5402 if (unlikely(csum_start >= U16_MAX || csum_end > skb_headlen(skb))) {
5403 net_warn_ratelimited("bad partial csum: csum=%u/%u headroom=%u headlen=%u\n",
5404 start, off, skb_headroom(skb), skb_headlen(skb));
5405 return false;
5406 }
5407 skb->ip_summed = CHECKSUM_PARTIAL;
5408 skb->csum_start = csum_start;
5409 skb->csum_offset = off;
5410 skb->transport_header = csum_start;
5411 return true;
5412 }
5413 EXPORT_SYMBOL_GPL(skb_partial_csum_set);
5414
skb_maybe_pull_tail(struct sk_buff * skb,unsigned int len,unsigned int max)5415 static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len,
5416 unsigned int max)
5417 {
5418 if (skb_headlen(skb) >= len)
5419 return 0;
5420
5421 /* If we need to pullup then pullup to the max, so we
5422 * won't need to do it again.
5423 */
5424 if (max > skb->len)
5425 max = skb->len;
5426
5427 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL)
5428 return -ENOMEM;
5429
5430 if (skb_headlen(skb) < len)
5431 return -EPROTO;
5432
5433 return 0;
5434 }
5435
5436 #define MAX_TCP_HDR_LEN (15 * 4)
5437
skb_checksum_setup_ip(struct sk_buff * skb,typeof(IPPROTO_IP) proto,unsigned int off)5438 static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb,
5439 typeof(IPPROTO_IP) proto,
5440 unsigned int off)
5441 {
5442 int err;
5443
5444 switch (proto) {
5445 case IPPROTO_TCP:
5446 err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr),
5447 off + MAX_TCP_HDR_LEN);
5448 if (!err && !skb_partial_csum_set(skb, off,
5449 offsetof(struct tcphdr,
5450 check)))
5451 err = -EPROTO;
5452 return err ? ERR_PTR(err) : &tcp_hdr(skb)->check;
5453
5454 case IPPROTO_UDP:
5455 err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr),
5456 off + sizeof(struct udphdr));
5457 if (!err && !skb_partial_csum_set(skb, off,
5458 offsetof(struct udphdr,
5459 check)))
5460 err = -EPROTO;
5461 return err ? ERR_PTR(err) : &udp_hdr(skb)->check;
5462 }
5463
5464 return ERR_PTR(-EPROTO);
5465 }
5466
5467 /* This value should be large enough to cover a tagged ethernet header plus
5468 * maximally sized IP and TCP or UDP headers.
5469 */
5470 #define MAX_IP_HDR_LEN 128
5471
skb_checksum_setup_ipv4(struct sk_buff * skb,bool recalculate)5472 static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate)
5473 {
5474 unsigned int off;
5475 bool fragment;
5476 __sum16 *csum;
5477 int err;
5478
5479 fragment = false;
5480
5481 err = skb_maybe_pull_tail(skb,
5482 sizeof(struct iphdr),
5483 MAX_IP_HDR_LEN);
5484 if (err < 0)
5485 goto out;
5486
5487 if (ip_is_fragment(ip_hdr(skb)))
5488 fragment = true;
5489
5490 off = ip_hdrlen(skb);
5491
5492 err = -EPROTO;
5493
5494 if (fragment)
5495 goto out;
5496
5497 csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off);
5498 if (IS_ERR(csum))
5499 return PTR_ERR(csum);
5500
5501 if (recalculate)
5502 *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
5503 ip_hdr(skb)->daddr,
5504 skb->len - off,
5505 ip_hdr(skb)->protocol, 0);
5506 err = 0;
5507
5508 out:
5509 return err;
5510 }
5511
5512 /* This value should be large enough to cover a tagged ethernet header plus
5513 * an IPv6 header, all options, and a maximal TCP or UDP header.
5514 */
5515 #define MAX_IPV6_HDR_LEN 256
5516
5517 #define OPT_HDR(type, skb, off) \
5518 (type *)(skb_network_header(skb) + (off))
5519
skb_checksum_setup_ipv6(struct sk_buff * skb,bool recalculate)5520 static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate)
5521 {
5522 int err;
5523 u8 nexthdr;
5524 unsigned int off;
5525 unsigned int len;
5526 bool fragment;
5527 bool done;
5528 __sum16 *csum;
5529
5530 fragment = false;
5531 done = false;
5532
5533 off = sizeof(struct ipv6hdr);
5534
5535 err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN);
5536 if (err < 0)
5537 goto out;
5538
5539 nexthdr = ipv6_hdr(skb)->nexthdr;
5540
5541 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
5542 while (off <= len && !done) {
5543 switch (nexthdr) {
5544 case IPPROTO_DSTOPTS:
5545 case IPPROTO_HOPOPTS:
5546 case IPPROTO_ROUTING: {
5547 struct ipv6_opt_hdr *hp;
5548
5549 err = skb_maybe_pull_tail(skb,
5550 off +
5551 sizeof(struct ipv6_opt_hdr),
5552 MAX_IPV6_HDR_LEN);
5553 if (err < 0)
5554 goto out;
5555
5556 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
5557 nexthdr = hp->nexthdr;
5558 off += ipv6_optlen(hp);
5559 break;
5560 }
5561 case IPPROTO_AH: {
5562 struct ip_auth_hdr *hp;
5563
5564 err = skb_maybe_pull_tail(skb,
5565 off +
5566 sizeof(struct ip_auth_hdr),
5567 MAX_IPV6_HDR_LEN);
5568 if (err < 0)
5569 goto out;
5570
5571 hp = OPT_HDR(struct ip_auth_hdr, skb, off);
5572 nexthdr = hp->nexthdr;
5573 off += ipv6_authlen(hp);
5574 break;
5575 }
5576 case IPPROTO_FRAGMENT: {
5577 struct frag_hdr *hp;
5578
5579 err = skb_maybe_pull_tail(skb,
5580 off +
5581 sizeof(struct frag_hdr),
5582 MAX_IPV6_HDR_LEN);
5583 if (err < 0)
5584 goto out;
5585
5586 hp = OPT_HDR(struct frag_hdr, skb, off);
5587
5588 if (hp->frag_off & htons(IP6_OFFSET | IP6_MF))
5589 fragment = true;
5590
5591 nexthdr = hp->nexthdr;
5592 off += sizeof(struct frag_hdr);
5593 break;
5594 }
5595 default:
5596 done = true;
5597 break;
5598 }
5599 }
5600
5601 err = -EPROTO;
5602
5603 if (!done || fragment)
5604 goto out;
5605
5606 csum = skb_checksum_setup_ip(skb, nexthdr, off);
5607 if (IS_ERR(csum))
5608 return PTR_ERR(csum);
5609
5610 if (recalculate)
5611 *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
5612 &ipv6_hdr(skb)->daddr,
5613 skb->len - off, nexthdr, 0);
5614 err = 0;
5615
5616 out:
5617 return err;
5618 }
5619
5620 /**
5621 * skb_checksum_setup - set up partial checksum offset
5622 * @skb: the skb to set up
5623 * @recalculate: if true the pseudo-header checksum will be recalculated
5624 */
skb_checksum_setup(struct sk_buff * skb,bool recalculate)5625 int skb_checksum_setup(struct sk_buff *skb, bool recalculate)
5626 {
5627 int err;
5628
5629 switch (skb->protocol) {
5630 case htons(ETH_P_IP):
5631 err = skb_checksum_setup_ipv4(skb, recalculate);
5632 break;
5633
5634 case htons(ETH_P_IPV6):
5635 err = skb_checksum_setup_ipv6(skb, recalculate);
5636 break;
5637
5638 default:
5639 err = -EPROTO;
5640 break;
5641 }
5642
5643 return err;
5644 }
5645 EXPORT_SYMBOL(skb_checksum_setup);
5646
5647 /**
5648 * skb_checksum_maybe_trim - maybe trims the given skb
5649 * @skb: the skb to check
5650 * @transport_len: the data length beyond the network header
5651 *
5652 * Checks whether the given skb has data beyond the given transport length.
5653 * If so, returns a cloned skb trimmed to this transport length.
5654 * Otherwise returns the provided skb. Returns NULL in error cases
5655 * (e.g. transport_len exceeds skb length or out-of-memory).
5656 *
5657 * Caller needs to set the skb transport header and free any returned skb if it
5658 * differs from the provided skb.
5659 */
skb_checksum_maybe_trim(struct sk_buff * skb,unsigned int transport_len)5660 static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
5661 unsigned int transport_len)
5662 {
5663 struct sk_buff *skb_chk;
5664 unsigned int len = skb_transport_offset(skb) + transport_len;
5665 int ret;
5666
5667 if (skb->len < len)
5668 return NULL;
5669 else if (skb->len == len)
5670 return skb;
5671
5672 skb_chk = skb_clone(skb, GFP_ATOMIC);
5673 if (!skb_chk)
5674 return NULL;
5675
5676 ret = pskb_trim_rcsum(skb_chk, len);
5677 if (ret) {
5678 kfree_skb(skb_chk);
5679 return NULL;
5680 }
5681
5682 return skb_chk;
5683 }
5684
5685 /**
5686 * skb_checksum_trimmed - validate checksum of an skb
5687 * @skb: the skb to check
5688 * @transport_len: the data length beyond the network header
5689 * @skb_chkf: checksum function to use
5690 *
5691 * Applies the given checksum function skb_chkf to the provided skb.
5692 * Returns a checked and maybe trimmed skb. Returns NULL on error.
5693 *
5694 * If the skb has data beyond the given transport length, then a
5695 * trimmed & cloned skb is checked and returned.
5696 *
5697 * Caller needs to set the skb transport header and free any returned skb if it
5698 * differs from the provided skb.
5699 */
skb_checksum_trimmed(struct sk_buff * skb,unsigned int transport_len,__sum16 (* skb_chkf)(struct sk_buff * skb))5700 struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
5701 unsigned int transport_len,
5702 __sum16(*skb_chkf)(struct sk_buff *skb))
5703 {
5704 struct sk_buff *skb_chk;
5705 unsigned int offset = skb_transport_offset(skb);
5706 __sum16 ret;
5707
5708 skb_chk = skb_checksum_maybe_trim(skb, transport_len);
5709 if (!skb_chk)
5710 goto err;
5711
5712 if (!pskb_may_pull(skb_chk, offset))
5713 goto err;
5714
5715 skb_pull_rcsum(skb_chk, offset);
5716 ret = skb_chkf(skb_chk);
5717 skb_push_rcsum(skb_chk, offset);
5718
5719 if (ret)
5720 goto err;
5721
5722 return skb_chk;
5723
5724 err:
5725 if (skb_chk && skb_chk != skb)
5726 kfree_skb(skb_chk);
5727
5728 return NULL;
5729
5730 }
5731 EXPORT_SYMBOL(skb_checksum_trimmed);
5732
__skb_warn_lro_forwarding(const struct sk_buff * skb)5733 void __skb_warn_lro_forwarding(const struct sk_buff *skb)
5734 {
5735 net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n",
5736 skb->dev->name);
5737 }
5738 EXPORT_SYMBOL(__skb_warn_lro_forwarding);
5739
kfree_skb_partial(struct sk_buff * skb,bool head_stolen)5740 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
5741 {
5742 if (head_stolen) {
5743 skb_release_head_state(skb);
5744 kmem_cache_free(skbuff_cache, skb);
5745 } else {
5746 __kfree_skb(skb);
5747 }
5748 }
5749 EXPORT_SYMBOL(kfree_skb_partial);
5750
5751 /**
5752 * skb_try_coalesce - try to merge skb to prior one
5753 * @to: prior buffer
5754 * @from: buffer to add
5755 * @fragstolen: pointer to boolean
5756 * @delta_truesize: how much more was allocated than was requested
5757 */
skb_try_coalesce(struct sk_buff * to,struct sk_buff * from,bool * fragstolen,int * delta_truesize)5758 bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
5759 bool *fragstolen, int *delta_truesize)
5760 {
5761 struct skb_shared_info *to_shinfo, *from_shinfo;
5762 int i, delta, len = from->len;
5763
5764 *fragstolen = false;
5765
5766 if (skb_cloned(to))
5767 return false;
5768
5769 /* In general, avoid mixing page_pool and non-page_pool allocated
5770 * pages within the same SKB. Additionally avoid dealing with clones
5771 * with page_pool pages, in case the SKB is using page_pool fragment
5772 * references (PP_FLAG_PAGE_FRAG). Since we only take full page
5773 * references for cloned SKBs at the moment that would result in
5774 * inconsistent reference counts.
5775 * In theory we could take full references if @from is cloned and
5776 * !@to->pp_recycle but its tricky (due to potential race with
5777 * the clone disappearing) and rare, so not worth dealing with.
5778 */
5779 if (to->pp_recycle != from->pp_recycle ||
5780 (from->pp_recycle && skb_cloned(from)))
5781 return false;
5782
5783 if (len <= skb_tailroom(to)) {
5784 if (len)
5785 BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
5786 *delta_truesize = 0;
5787 return true;
5788 }
5789
5790 to_shinfo = skb_shinfo(to);
5791 from_shinfo = skb_shinfo(from);
5792 if (to_shinfo->frag_list || from_shinfo->frag_list)
5793 return false;
5794 if (skb_zcopy(to) || skb_zcopy(from))
5795 return false;
5796
5797 if (skb_headlen(from) != 0) {
5798 struct page *page;
5799 unsigned int offset;
5800
5801 if (to_shinfo->nr_frags +
5802 from_shinfo->nr_frags >= MAX_SKB_FRAGS)
5803 return false;
5804
5805 if (skb_head_is_locked(from))
5806 return false;
5807
5808 delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
5809
5810 page = virt_to_head_page(from->head);
5811 offset = from->data - (unsigned char *)page_address(page);
5812
5813 skb_fill_page_desc(to, to_shinfo->nr_frags,
5814 page, offset, skb_headlen(from));
5815 *fragstolen = true;
5816 } else {
5817 if (to_shinfo->nr_frags +
5818 from_shinfo->nr_frags > MAX_SKB_FRAGS)
5819 return false;
5820
5821 delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from));
5822 }
5823
5824 WARN_ON_ONCE(delta < len);
5825
5826 memcpy(to_shinfo->frags + to_shinfo->nr_frags,
5827 from_shinfo->frags,
5828 from_shinfo->nr_frags * sizeof(skb_frag_t));
5829 to_shinfo->nr_frags += from_shinfo->nr_frags;
5830
5831 if (!skb_cloned(from))
5832 from_shinfo->nr_frags = 0;
5833
5834 /* if the skb is not cloned this does nothing
5835 * since we set nr_frags to 0.
5836 */
5837 for (i = 0; i < from_shinfo->nr_frags; i++)
5838 __skb_frag_ref(&from_shinfo->frags[i]);
5839
5840 to->truesize += delta;
5841 to->len += len;
5842 to->data_len += len;
5843
5844 *delta_truesize = delta;
5845 return true;
5846 }
5847 EXPORT_SYMBOL(skb_try_coalesce);
5848
5849 /**
5850 * skb_scrub_packet - scrub an skb
5851 *
5852 * @skb: buffer to clean
5853 * @xnet: packet is crossing netns
5854 *
5855 * skb_scrub_packet can be used after encapsulating or decapsulting a packet
5856 * into/from a tunnel. Some information have to be cleared during these
5857 * operations.
5858 * skb_scrub_packet can also be used to clean a skb before injecting it in
5859 * another namespace (@xnet == true). We have to clear all information in the
5860 * skb that could impact namespace isolation.
5861 */
skb_scrub_packet(struct sk_buff * skb,bool xnet)5862 void skb_scrub_packet(struct sk_buff *skb, bool xnet)
5863 {
5864 skb->pkt_type = PACKET_HOST;
5865 skb->skb_iif = 0;
5866 skb->ignore_df = 0;
5867 skb_dst_drop(skb);
5868 skb_ext_reset(skb);
5869 nf_reset_ct(skb);
5870 nf_reset_trace(skb);
5871
5872 #ifdef CONFIG_NET_SWITCHDEV
5873 skb->offload_fwd_mark = 0;
5874 skb->offload_l3_fwd_mark = 0;
5875 #endif
5876
5877 if (!xnet)
5878 return;
5879
5880 ipvs_reset(skb);
5881 skb->mark = 0;
5882 skb_clear_tstamp(skb);
5883 }
5884 EXPORT_SYMBOL_GPL(skb_scrub_packet);
5885
skb_reorder_vlan_header(struct sk_buff * skb)5886 static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
5887 {
5888 int mac_len, meta_len;
5889 void *meta;
5890
5891 if (skb_cow(skb, skb_headroom(skb)) < 0) {
5892 kfree_skb(skb);
5893 return NULL;
5894 }
5895
5896 mac_len = skb->data - skb_mac_header(skb);
5897 if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) {
5898 memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb),
5899 mac_len - VLAN_HLEN - ETH_TLEN);
5900 }
5901
5902 meta_len = skb_metadata_len(skb);
5903 if (meta_len) {
5904 meta = skb_metadata_end(skb) - meta_len;
5905 memmove(meta + VLAN_HLEN, meta, meta_len);
5906 }
5907
5908 skb->mac_header += VLAN_HLEN;
5909 return skb;
5910 }
5911
skb_vlan_untag(struct sk_buff * skb)5912 struct sk_buff *skb_vlan_untag(struct sk_buff *skb)
5913 {
5914 struct vlan_hdr *vhdr;
5915 u16 vlan_tci;
5916
5917 if (unlikely(skb_vlan_tag_present(skb))) {
5918 /* vlan_tci is already set-up so leave this for another time */
5919 return skb;
5920 }
5921
5922 skb = skb_share_check(skb, GFP_ATOMIC);
5923 if (unlikely(!skb))
5924 goto err_free;
5925 /* We may access the two bytes after vlan_hdr in vlan_set_encap_proto(). */
5926 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short))))
5927 goto err_free;
5928
5929 vhdr = (struct vlan_hdr *)skb->data;
5930 vlan_tci = ntohs(vhdr->h_vlan_TCI);
5931 __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci);
5932
5933 skb_pull_rcsum(skb, VLAN_HLEN);
5934 vlan_set_encap_proto(skb, vhdr);
5935
5936 skb = skb_reorder_vlan_header(skb);
5937 if (unlikely(!skb))
5938 goto err_free;
5939
5940 skb_reset_network_header(skb);
5941 if (!skb_transport_header_was_set(skb))
5942 skb_reset_transport_header(skb);
5943 skb_reset_mac_len(skb);
5944
5945 return skb;
5946
5947 err_free:
5948 kfree_skb(skb);
5949 return NULL;
5950 }
5951 EXPORT_SYMBOL(skb_vlan_untag);
5952
skb_ensure_writable(struct sk_buff * skb,unsigned int write_len)5953 int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len)
5954 {
5955 if (!pskb_may_pull(skb, write_len))
5956 return -ENOMEM;
5957
5958 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
5959 return 0;
5960
5961 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
5962 }
5963 EXPORT_SYMBOL(skb_ensure_writable);
5964
5965 /* remove VLAN header from packet and update csum accordingly.
5966 * expects a non skb_vlan_tag_present skb with a vlan tag payload
5967 */
__skb_vlan_pop(struct sk_buff * skb,u16 * vlan_tci)5968 int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci)
5969 {
5970 int offset = skb->data - skb_mac_header(skb);
5971 int err;
5972
5973 if (WARN_ONCE(offset,
5974 "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n",
5975 offset)) {
5976 return -EINVAL;
5977 }
5978
5979 err = skb_ensure_writable(skb, VLAN_ETH_HLEN);
5980 if (unlikely(err))
5981 return err;
5982
5983 skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
5984
5985 vlan_remove_tag(skb, vlan_tci);
5986
5987 skb->mac_header += VLAN_HLEN;
5988
5989 if (skb_network_offset(skb) < ETH_HLEN)
5990 skb_set_network_header(skb, ETH_HLEN);
5991
5992 skb_reset_mac_len(skb);
5993
5994 return err;
5995 }
5996 EXPORT_SYMBOL(__skb_vlan_pop);
5997
5998 /* Pop a vlan tag either from hwaccel or from payload.
5999 * Expects skb->data at mac header.
6000 */
skb_vlan_pop(struct sk_buff * skb)6001 int skb_vlan_pop(struct sk_buff *skb)
6002 {
6003 u16 vlan_tci;
6004 __be16 vlan_proto;
6005 int err;
6006
6007 if (likely(skb_vlan_tag_present(skb))) {
6008 __vlan_hwaccel_clear_tag(skb);
6009 } else {
6010 if (unlikely(!eth_type_vlan(skb->protocol)))
6011 return 0;
6012
6013 err = __skb_vlan_pop(skb, &vlan_tci);
6014 if (err)
6015 return err;
6016 }
6017 /* move next vlan tag to hw accel tag */
6018 if (likely(!eth_type_vlan(skb->protocol)))
6019 return 0;
6020
6021 vlan_proto = skb->protocol;
6022 err = __skb_vlan_pop(skb, &vlan_tci);
6023 if (unlikely(err))
6024 return err;
6025
6026 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
6027 return 0;
6028 }
6029 EXPORT_SYMBOL(skb_vlan_pop);
6030
6031 /* Push a vlan tag either into hwaccel or into payload (if hwaccel tag present).
6032 * Expects skb->data at mac header.
6033 */
skb_vlan_push(struct sk_buff * skb,__be16 vlan_proto,u16 vlan_tci)6034 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
6035 {
6036 if (skb_vlan_tag_present(skb)) {
6037 int offset = skb->data - skb_mac_header(skb);
6038 int err;
6039
6040 if (WARN_ONCE(offset,
6041 "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n",
6042 offset)) {
6043 return -EINVAL;
6044 }
6045
6046 err = __vlan_insert_tag(skb, skb->vlan_proto,
6047 skb_vlan_tag_get(skb));
6048 if (err)
6049 return err;
6050
6051 skb->protocol = skb->vlan_proto;
6052 skb->mac_len += VLAN_HLEN;
6053
6054 skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
6055 }
6056 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
6057 return 0;
6058 }
6059 EXPORT_SYMBOL(skb_vlan_push);
6060
6061 /**
6062 * skb_eth_pop() - Drop the Ethernet header at the head of a packet
6063 *
6064 * @skb: Socket buffer to modify
6065 *
6066 * Drop the Ethernet header of @skb.
6067 *
6068 * Expects that skb->data points to the mac header and that no VLAN tags are
6069 * present.
6070 *
6071 * Returns 0 on success, -errno otherwise.
6072 */
skb_eth_pop(struct sk_buff * skb)6073 int skb_eth_pop(struct sk_buff *skb)
6074 {
6075 if (!pskb_may_pull(skb, ETH_HLEN) || skb_vlan_tagged(skb) ||
6076 skb_network_offset(skb) < ETH_HLEN)
6077 return -EPROTO;
6078
6079 skb_pull_rcsum(skb, ETH_HLEN);
6080 skb_reset_mac_header(skb);
6081 skb_reset_mac_len(skb);
6082
6083 return 0;
6084 }
6085 EXPORT_SYMBOL(skb_eth_pop);
6086
6087 /**
6088 * skb_eth_push() - Add a new Ethernet header at the head of a packet
6089 *
6090 * @skb: Socket buffer to modify
6091 * @dst: Destination MAC address of the new header
6092 * @src: Source MAC address of the new header
6093 *
6094 * Prepend @skb with a new Ethernet header.
6095 *
6096 * Expects that skb->data points to the mac header, which must be empty.
6097 *
6098 * Returns 0 on success, -errno otherwise.
6099 */
skb_eth_push(struct sk_buff * skb,const unsigned char * dst,const unsigned char * src)6100 int skb_eth_push(struct sk_buff *skb, const unsigned char *dst,
6101 const unsigned char *src)
6102 {
6103 struct ethhdr *eth;
6104 int err;
6105
6106 if (skb_network_offset(skb) || skb_vlan_tag_present(skb))
6107 return -EPROTO;
6108
6109 err = skb_cow_head(skb, sizeof(*eth));
6110 if (err < 0)
6111 return err;
6112
6113 skb_push(skb, sizeof(*eth));
6114 skb_reset_mac_header(skb);
6115 skb_reset_mac_len(skb);
6116
6117 eth = eth_hdr(skb);
6118 ether_addr_copy(eth->h_dest, dst);
6119 ether_addr_copy(eth->h_source, src);
6120 eth->h_proto = skb->protocol;
6121
6122 skb_postpush_rcsum(skb, eth, sizeof(*eth));
6123
6124 return 0;
6125 }
6126 EXPORT_SYMBOL(skb_eth_push);
6127
6128 /* Update the ethertype of hdr and the skb csum value if required. */
skb_mod_eth_type(struct sk_buff * skb,struct ethhdr * hdr,__be16 ethertype)6129 static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr,
6130 __be16 ethertype)
6131 {
6132 if (skb->ip_summed == CHECKSUM_COMPLETE) {
6133 __be16 diff[] = { ~hdr->h_proto, ethertype };
6134
6135 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum);
6136 }
6137
6138 hdr->h_proto = ethertype;
6139 }
6140
6141 /**
6142 * skb_mpls_push() - push a new MPLS header after mac_len bytes from start of
6143 * the packet
6144 *
6145 * @skb: buffer
6146 * @mpls_lse: MPLS label stack entry to push
6147 * @mpls_proto: ethertype of the new MPLS header (expects 0x8847 or 0x8848)
6148 * @mac_len: length of the MAC header
6149 * @ethernet: flag to indicate if the resulting packet after skb_mpls_push is
6150 * ethernet
6151 *
6152 * Expects skb->data at mac header.
6153 *
6154 * Returns 0 on success, -errno otherwise.
6155 */
skb_mpls_push(struct sk_buff * skb,__be32 mpls_lse,__be16 mpls_proto,int mac_len,bool ethernet)6156 int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
6157 int mac_len, bool ethernet)
6158 {
6159 struct mpls_shim_hdr *lse;
6160 int err;
6161
6162 if (unlikely(!eth_p_mpls(mpls_proto)))
6163 return -EINVAL;
6164
6165 /* Networking stack does not allow simultaneous Tunnel and MPLS GSO. */
6166 if (skb->encapsulation)
6167 return -EINVAL;
6168
6169 err = skb_cow_head(skb, MPLS_HLEN);
6170 if (unlikely(err))
6171 return err;
6172
6173 if (!skb->inner_protocol) {
6174 skb_set_inner_network_header(skb, skb_network_offset(skb));
6175 skb_set_inner_protocol(skb, skb->protocol);
6176 }
6177
6178 skb_push(skb, MPLS_HLEN);
6179 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
6180 mac_len);
6181 skb_reset_mac_header(skb);
6182 skb_set_network_header(skb, mac_len);
6183 skb_reset_mac_len(skb);
6184
6185 lse = mpls_hdr(skb);
6186 lse->label_stack_entry = mpls_lse;
6187 skb_postpush_rcsum(skb, lse, MPLS_HLEN);
6188
6189 if (ethernet && mac_len >= ETH_HLEN)
6190 skb_mod_eth_type(skb, eth_hdr(skb), mpls_proto);
6191 skb->protocol = mpls_proto;
6192
6193 return 0;
6194 }
6195 EXPORT_SYMBOL_GPL(skb_mpls_push);
6196
6197 /**
6198 * skb_mpls_pop() - pop the outermost MPLS header
6199 *
6200 * @skb: buffer
6201 * @next_proto: ethertype of header after popped MPLS header
6202 * @mac_len: length of the MAC header
6203 * @ethernet: flag to indicate if the packet is ethernet
6204 *
6205 * Expects skb->data at mac header.
6206 *
6207 * Returns 0 on success, -errno otherwise.
6208 */
skb_mpls_pop(struct sk_buff * skb,__be16 next_proto,int mac_len,bool ethernet)6209 int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len,
6210 bool ethernet)
6211 {
6212 int err;
6213
6214 if (unlikely(!eth_p_mpls(skb->protocol)))
6215 return 0;
6216
6217 err = skb_ensure_writable(skb, mac_len + MPLS_HLEN);
6218 if (unlikely(err))
6219 return err;
6220
6221 skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN);
6222 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
6223 mac_len);
6224
6225 __skb_pull(skb, MPLS_HLEN);
6226 skb_reset_mac_header(skb);
6227 skb_set_network_header(skb, mac_len);
6228
6229 if (ethernet && mac_len >= ETH_HLEN) {
6230 struct ethhdr *hdr;
6231
6232 /* use mpls_hdr() to get ethertype to account for VLANs. */
6233 hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN);
6234 skb_mod_eth_type(skb, hdr, next_proto);
6235 }
6236 skb->protocol = next_proto;
6237
6238 return 0;
6239 }
6240 EXPORT_SYMBOL_GPL(skb_mpls_pop);
6241
6242 /**
6243 * skb_mpls_update_lse() - modify outermost MPLS header and update csum
6244 *
6245 * @skb: buffer
6246 * @mpls_lse: new MPLS label stack entry to update to
6247 *
6248 * Expects skb->data at mac header.
6249 *
6250 * Returns 0 on success, -errno otherwise.
6251 */
skb_mpls_update_lse(struct sk_buff * skb,__be32 mpls_lse)6252 int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse)
6253 {
6254 int err;
6255
6256 if (unlikely(!eth_p_mpls(skb->protocol)))
6257 return -EINVAL;
6258
6259 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
6260 if (unlikely(err))
6261 return err;
6262
6263 if (skb->ip_summed == CHECKSUM_COMPLETE) {
6264 __be32 diff[] = { ~mpls_hdr(skb)->label_stack_entry, mpls_lse };
6265
6266 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum);
6267 }
6268
6269 mpls_hdr(skb)->label_stack_entry = mpls_lse;
6270
6271 return 0;
6272 }
6273 EXPORT_SYMBOL_GPL(skb_mpls_update_lse);
6274
6275 /**
6276 * skb_mpls_dec_ttl() - decrement the TTL of the outermost MPLS header
6277 *
6278 * @skb: buffer
6279 *
6280 * Expects skb->data at mac header.
6281 *
6282 * Returns 0 on success, -errno otherwise.
6283 */
skb_mpls_dec_ttl(struct sk_buff * skb)6284 int skb_mpls_dec_ttl(struct sk_buff *skb)
6285 {
6286 u32 lse;
6287 u8 ttl;
6288
6289 if (unlikely(!eth_p_mpls(skb->protocol)))
6290 return -EINVAL;
6291
6292 if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN))
6293 return -ENOMEM;
6294
6295 lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry);
6296 ttl = (lse & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT;
6297 if (!--ttl)
6298 return -EINVAL;
6299
6300 lse &= ~MPLS_LS_TTL_MASK;
6301 lse |= ttl << MPLS_LS_TTL_SHIFT;
6302
6303 return skb_mpls_update_lse(skb, cpu_to_be32(lse));
6304 }
6305 EXPORT_SYMBOL_GPL(skb_mpls_dec_ttl);
6306
6307 /**
6308 * alloc_skb_with_frags - allocate skb with page frags
6309 *
6310 * @header_len: size of linear part
6311 * @data_len: needed length in frags
6312 * @order: max page order desired.
6313 * @errcode: pointer to error code if any
6314 * @gfp_mask: allocation mask
6315 *
6316 * This can be used to allocate a paged skb, given a maximal order for frags.
6317 */
alloc_skb_with_frags(unsigned long header_len,unsigned long data_len,int order,int * errcode,gfp_t gfp_mask)6318 struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
6319 unsigned long data_len,
6320 int order,
6321 int *errcode,
6322 gfp_t gfp_mask)
6323 {
6324 unsigned long chunk;
6325 struct sk_buff *skb;
6326 struct page *page;
6327 int nr_frags = 0;
6328
6329 *errcode = -EMSGSIZE;
6330 if (unlikely(data_len > MAX_SKB_FRAGS * (PAGE_SIZE << order)))
6331 return NULL;
6332
6333 *errcode = -ENOBUFS;
6334 skb = alloc_skb(header_len, gfp_mask);
6335 if (!skb)
6336 return NULL;
6337
6338 while (data_len) {
6339 if (nr_frags == MAX_SKB_FRAGS - 1)
6340 goto failure;
6341 while (order && PAGE_ALIGN(data_len) < (PAGE_SIZE << order))
6342 order--;
6343
6344 if (order) {
6345 page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) |
6346 __GFP_COMP |
6347 __GFP_NOWARN,
6348 order);
6349 if (!page) {
6350 order--;
6351 continue;
6352 }
6353 } else {
6354 page = alloc_page(gfp_mask);
6355 if (!page)
6356 goto failure;
6357 }
6358 chunk = min_t(unsigned long, data_len,
6359 PAGE_SIZE << order);
6360 skb_fill_page_desc(skb, nr_frags, page, 0, chunk);
6361 nr_frags++;
6362 skb->truesize += (PAGE_SIZE << order);
6363 data_len -= chunk;
6364 }
6365 return skb;
6366
6367 failure:
6368 kfree_skb(skb);
6369 return NULL;
6370 }
6371 EXPORT_SYMBOL(alloc_skb_with_frags);
6372
6373 /* carve out the first off bytes from skb when off < headlen */
pskb_carve_inside_header(struct sk_buff * skb,const u32 off,const int headlen,gfp_t gfp_mask)6374 static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off,
6375 const int headlen, gfp_t gfp_mask)
6376 {
6377 int i;
6378 unsigned int size = skb_end_offset(skb);
6379 int new_hlen = headlen - off;
6380 u8 *data;
6381
6382 if (skb_pfmemalloc(skb))
6383 gfp_mask |= __GFP_MEMALLOC;
6384
6385 data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL);
6386 if (!data)
6387 return -ENOMEM;
6388 size = SKB_WITH_OVERHEAD(size);
6389
6390 /* Copy real data, and all frags */
6391 skb_copy_from_linear_data_offset(skb, off, data, new_hlen);
6392 skb->len -= off;
6393
6394 memcpy((struct skb_shared_info *)(data + size),
6395 skb_shinfo(skb),
6396 offsetof(struct skb_shared_info,
6397 frags[skb_shinfo(skb)->nr_frags]));
6398 if (skb_cloned(skb)) {
6399 /* drop the old head gracefully */
6400 if (skb_orphan_frags(skb, gfp_mask)) {
6401 skb_kfree_head(data, size);
6402 return -ENOMEM;
6403 }
6404 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
6405 skb_frag_ref(skb, i);
6406 if (skb_has_frag_list(skb))
6407 skb_clone_fraglist(skb);
6408 skb_release_data(skb, SKB_CONSUMED, false);
6409 } else {
6410 /* we can reuse existing recount- all we did was
6411 * relocate values
6412 */
6413 skb_free_head(skb, false);
6414 }
6415
6416 skb->head = data;
6417 skb->data = data;
6418 skb->head_frag = 0;
6419 skb_set_end_offset(skb, size);
6420 skb_set_tail_pointer(skb, skb_headlen(skb));
6421 skb_headers_offset_update(skb, 0);
6422 skb->cloned = 0;
6423 skb->hdr_len = 0;
6424 skb->nohdr = 0;
6425 atomic_set(&skb_shinfo(skb)->dataref, 1);
6426
6427 return 0;
6428 }
6429
6430 static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp);
6431
6432 /* carve out the first eat bytes from skb's frag_list. May recurse into
6433 * pskb_carve()
6434 */
pskb_carve_frag_list(struct sk_buff * skb,struct skb_shared_info * shinfo,int eat,gfp_t gfp_mask)6435 static int pskb_carve_frag_list(struct sk_buff *skb,
6436 struct skb_shared_info *shinfo, int eat,
6437 gfp_t gfp_mask)
6438 {
6439 struct sk_buff *list = shinfo->frag_list;
6440 struct sk_buff *clone = NULL;
6441 struct sk_buff *insp = NULL;
6442
6443 do {
6444 if (!list) {
6445 pr_err("Not enough bytes to eat. Want %d\n", eat);
6446 return -EFAULT;
6447 }
6448 if (list->len <= eat) {
6449 /* Eaten as whole. */
6450 eat -= list->len;
6451 list = list->next;
6452 insp = list;
6453 } else {
6454 /* Eaten partially. */
6455 if (skb_shared(list)) {
6456 clone = skb_clone(list, gfp_mask);
6457 if (!clone)
6458 return -ENOMEM;
6459 insp = list->next;
6460 list = clone;
6461 } else {
6462 /* This may be pulled without problems. */
6463 insp = list;
6464 }
6465 if (pskb_carve(list, eat, gfp_mask) < 0) {
6466 kfree_skb(clone);
6467 return -ENOMEM;
6468 }
6469 break;
6470 }
6471 } while (eat);
6472
6473 /* Free pulled out fragments. */
6474 while ((list = shinfo->frag_list) != insp) {
6475 shinfo->frag_list = list->next;
6476 consume_skb(list);
6477 }
6478 /* And insert new clone at head. */
6479 if (clone) {
6480 clone->next = list;
6481 shinfo->frag_list = clone;
6482 }
6483 return 0;
6484 }
6485
6486 /* carve off first len bytes from skb. Split line (off) is in the
6487 * non-linear part of skb
6488 */
pskb_carve_inside_nonlinear(struct sk_buff * skb,const u32 off,int pos,gfp_t gfp_mask)6489 static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
6490 int pos, gfp_t gfp_mask)
6491 {
6492 int i, k = 0;
6493 unsigned int size = skb_end_offset(skb);
6494 u8 *data;
6495 const int nfrags = skb_shinfo(skb)->nr_frags;
6496 struct skb_shared_info *shinfo;
6497
6498 if (skb_pfmemalloc(skb))
6499 gfp_mask |= __GFP_MEMALLOC;
6500
6501 data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL);
6502 if (!data)
6503 return -ENOMEM;
6504 size = SKB_WITH_OVERHEAD(size);
6505
6506 memcpy((struct skb_shared_info *)(data + size),
6507 skb_shinfo(skb), offsetof(struct skb_shared_info, frags[0]));
6508 if (skb_orphan_frags(skb, gfp_mask)) {
6509 skb_kfree_head(data, size);
6510 return -ENOMEM;
6511 }
6512 shinfo = (struct skb_shared_info *)(data + size);
6513 for (i = 0; i < nfrags; i++) {
6514 int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]);
6515
6516 if (pos + fsize > off) {
6517 shinfo->frags[k] = skb_shinfo(skb)->frags[i];
6518
6519 if (pos < off) {
6520 /* Split frag.
6521 * We have two variants in this case:
6522 * 1. Move all the frag to the second
6523 * part, if it is possible. F.e.
6524 * this approach is mandatory for TUX,
6525 * where splitting is expensive.
6526 * 2. Split is accurately. We make this.
6527 */
6528 skb_frag_off_add(&shinfo->frags[0], off - pos);
6529 skb_frag_size_sub(&shinfo->frags[0], off - pos);
6530 }
6531 skb_frag_ref(skb, i);
6532 k++;
6533 }
6534 pos += fsize;
6535 }
6536 shinfo->nr_frags = k;
6537 if (skb_has_frag_list(skb))
6538 skb_clone_fraglist(skb);
6539
6540 /* split line is in frag list */
6541 if (k == 0 && pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask)) {
6542 /* skb_frag_unref() is not needed here as shinfo->nr_frags = 0. */
6543 if (skb_has_frag_list(skb))
6544 kfree_skb_list(skb_shinfo(skb)->frag_list);
6545 skb_kfree_head(data, size);
6546 return -ENOMEM;
6547 }
6548 skb_release_data(skb, SKB_CONSUMED, false);
6549
6550 skb->head = data;
6551 skb->head_frag = 0;
6552 skb->data = data;
6553 skb_set_end_offset(skb, size);
6554 skb_reset_tail_pointer(skb);
6555 skb_headers_offset_update(skb, 0);
6556 skb->cloned = 0;
6557 skb->hdr_len = 0;
6558 skb->nohdr = 0;
6559 skb->len -= off;
6560 skb->data_len = skb->len;
6561 atomic_set(&skb_shinfo(skb)->dataref, 1);
6562 return 0;
6563 }
6564
6565 /* remove len bytes from the beginning of the skb */
pskb_carve(struct sk_buff * skb,const u32 len,gfp_t gfp)6566 static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp)
6567 {
6568 int headlen = skb_headlen(skb);
6569
6570 if (len < headlen)
6571 return pskb_carve_inside_header(skb, len, headlen, gfp);
6572 else
6573 return pskb_carve_inside_nonlinear(skb, len, headlen, gfp);
6574 }
6575
6576 /* Extract to_copy bytes starting at off from skb, and return this in
6577 * a new skb
6578 */
pskb_extract(struct sk_buff * skb,int off,int to_copy,gfp_t gfp)6579 struct sk_buff *pskb_extract(struct sk_buff *skb, int off,
6580 int to_copy, gfp_t gfp)
6581 {
6582 struct sk_buff *clone = skb_clone(skb, gfp);
6583
6584 if (!clone)
6585 return NULL;
6586
6587 if (pskb_carve(clone, off, gfp) < 0 ||
6588 pskb_trim(clone, to_copy)) {
6589 kfree_skb(clone);
6590 return NULL;
6591 }
6592 return clone;
6593 }
6594 EXPORT_SYMBOL(pskb_extract);
6595
6596 /**
6597 * skb_condense - try to get rid of fragments/frag_list if possible
6598 * @skb: buffer
6599 *
6600 * Can be used to save memory before skb is added to a busy queue.
6601 * If packet has bytes in frags and enough tail room in skb->head,
6602 * pull all of them, so that we can free the frags right now and adjust
6603 * truesize.
6604 * Notes:
6605 * We do not reallocate skb->head thus can not fail.
6606 * Caller must re-evaluate skb->truesize if needed.
6607 */
skb_condense(struct sk_buff * skb)6608 void skb_condense(struct sk_buff *skb)
6609 {
6610 if (skb->data_len) {
6611 if (skb->data_len > skb->end - skb->tail ||
6612 skb_cloned(skb))
6613 return;
6614
6615 /* Nice, we can free page frag(s) right now */
6616 __pskb_pull_tail(skb, skb->data_len);
6617 }
6618 /* At this point, skb->truesize might be over estimated,
6619 * because skb had a fragment, and fragments do not tell
6620 * their truesize.
6621 * When we pulled its content into skb->head, fragment
6622 * was freed, but __pskb_pull_tail() could not possibly
6623 * adjust skb->truesize, not knowing the frag truesize.
6624 */
6625 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
6626 }
6627 EXPORT_SYMBOL(skb_condense);
6628
6629 #ifdef CONFIG_SKB_EXTENSIONS
skb_ext_get_ptr(struct skb_ext * ext,enum skb_ext_id id)6630 static void *skb_ext_get_ptr(struct skb_ext *ext, enum skb_ext_id id)
6631 {
6632 return (void *)ext + (ext->offset[id] * SKB_EXT_ALIGN_VALUE);
6633 }
6634
6635 /**
6636 * __skb_ext_alloc - allocate a new skb extensions storage
6637 *
6638 * @flags: See kmalloc().
6639 *
6640 * Returns the newly allocated pointer. The pointer can later attached to a
6641 * skb via __skb_ext_set().
6642 * Note: caller must handle the skb_ext as an opaque data.
6643 */
__skb_ext_alloc(gfp_t flags)6644 struct skb_ext *__skb_ext_alloc(gfp_t flags)
6645 {
6646 struct skb_ext *new = kmem_cache_alloc(skbuff_ext_cache, flags);
6647
6648 if (new) {
6649 memset(new->offset, 0, sizeof(new->offset));
6650 refcount_set(&new->refcnt, 1);
6651 }
6652
6653 return new;
6654 }
6655
skb_ext_maybe_cow(struct skb_ext * old,unsigned int old_active)6656 static struct skb_ext *skb_ext_maybe_cow(struct skb_ext *old,
6657 unsigned int old_active)
6658 {
6659 struct skb_ext *new;
6660
6661 if (refcount_read(&old->refcnt) == 1)
6662 return old;
6663
6664 new = kmem_cache_alloc(skbuff_ext_cache, GFP_ATOMIC);
6665 if (!new)
6666 return NULL;
6667
6668 memcpy(new, old, old->chunks * SKB_EXT_ALIGN_VALUE);
6669 refcount_set(&new->refcnt, 1);
6670
6671 #ifdef CONFIG_XFRM
6672 if (old_active & (1 << SKB_EXT_SEC_PATH)) {
6673 struct sec_path *sp = skb_ext_get_ptr(old, SKB_EXT_SEC_PATH);
6674 unsigned int i;
6675
6676 for (i = 0; i < sp->len; i++)
6677 xfrm_state_hold(sp->xvec[i]);
6678 }
6679 #endif
6680 #ifdef CONFIG_MCTP_FLOWS
6681 if (old_active & (1 << SKB_EXT_MCTP)) {
6682 struct mctp_flow *flow = skb_ext_get_ptr(old, SKB_EXT_MCTP);
6683
6684 if (flow->key)
6685 refcount_inc(&flow->key->refs);
6686 }
6687 #endif
6688 __skb_ext_put(old);
6689 return new;
6690 }
6691
6692 /**
6693 * __skb_ext_set - attach the specified extension storage to this skb
6694 * @skb: buffer
6695 * @id: extension id
6696 * @ext: extension storage previously allocated via __skb_ext_alloc()
6697 *
6698 * Existing extensions, if any, are cleared.
6699 *
6700 * Returns the pointer to the extension.
6701 */
__skb_ext_set(struct sk_buff * skb,enum skb_ext_id id,struct skb_ext * ext)6702 void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id,
6703 struct skb_ext *ext)
6704 {
6705 unsigned int newlen, newoff = SKB_EXT_CHUNKSIZEOF(*ext);
6706
6707 skb_ext_put(skb);
6708 newlen = newoff + skb_ext_type_len[id];
6709 ext->chunks = newlen;
6710 ext->offset[id] = newoff;
6711 skb->extensions = ext;
6712 skb->active_extensions = 1 << id;
6713 return skb_ext_get_ptr(ext, id);
6714 }
6715
6716 /**
6717 * skb_ext_add - allocate space for given extension, COW if needed
6718 * @skb: buffer
6719 * @id: extension to allocate space for
6720 *
6721 * Allocates enough space for the given extension.
6722 * If the extension is already present, a pointer to that extension
6723 * is returned.
6724 *
6725 * If the skb was cloned, COW applies and the returned memory can be
6726 * modified without changing the extension space of clones buffers.
6727 *
6728 * Returns pointer to the extension or NULL on allocation failure.
6729 */
skb_ext_add(struct sk_buff * skb,enum skb_ext_id id)6730 void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id)
6731 {
6732 struct skb_ext *new, *old = NULL;
6733 unsigned int newlen, newoff;
6734
6735 if (skb->active_extensions) {
6736 old = skb->extensions;
6737
6738 new = skb_ext_maybe_cow(old, skb->active_extensions);
6739 if (!new)
6740 return NULL;
6741
6742 if (__skb_ext_exist(new, id))
6743 goto set_active;
6744
6745 newoff = new->chunks;
6746 } else {
6747 newoff = SKB_EXT_CHUNKSIZEOF(*new);
6748
6749 new = __skb_ext_alloc(GFP_ATOMIC);
6750 if (!new)
6751 return NULL;
6752 }
6753
6754 newlen = newoff + skb_ext_type_len[id];
6755 new->chunks = newlen;
6756 new->offset[id] = newoff;
6757 set_active:
6758 skb->slow_gro = 1;
6759 skb->extensions = new;
6760 skb->active_extensions |= 1 << id;
6761 return skb_ext_get_ptr(new, id);
6762 }
6763 EXPORT_SYMBOL(skb_ext_add);
6764
6765 #ifdef CONFIG_XFRM
skb_ext_put_sp(struct sec_path * sp)6766 static void skb_ext_put_sp(struct sec_path *sp)
6767 {
6768 unsigned int i;
6769
6770 for (i = 0; i < sp->len; i++)
6771 xfrm_state_put(sp->xvec[i]);
6772 }
6773 #endif
6774
6775 #ifdef CONFIG_MCTP_FLOWS
skb_ext_put_mctp(struct mctp_flow * flow)6776 static void skb_ext_put_mctp(struct mctp_flow *flow)
6777 {
6778 if (flow->key)
6779 mctp_key_unref(flow->key);
6780 }
6781 #endif
6782
__skb_ext_del(struct sk_buff * skb,enum skb_ext_id id)6783 void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id)
6784 {
6785 struct skb_ext *ext = skb->extensions;
6786
6787 skb->active_extensions &= ~(1 << id);
6788 if (skb->active_extensions == 0) {
6789 skb->extensions = NULL;
6790 __skb_ext_put(ext);
6791 #ifdef CONFIG_XFRM
6792 } else if (id == SKB_EXT_SEC_PATH &&
6793 refcount_read(&ext->refcnt) == 1) {
6794 struct sec_path *sp = skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH);
6795
6796 skb_ext_put_sp(sp);
6797 sp->len = 0;
6798 #endif
6799 }
6800 }
6801 EXPORT_SYMBOL(__skb_ext_del);
6802
__skb_ext_put(struct skb_ext * ext)6803 void __skb_ext_put(struct skb_ext *ext)
6804 {
6805 /* If this is last clone, nothing can increment
6806 * it after check passes. Avoids one atomic op.
6807 */
6808 if (refcount_read(&ext->refcnt) == 1)
6809 goto free_now;
6810
6811 if (!refcount_dec_and_test(&ext->refcnt))
6812 return;
6813 free_now:
6814 #ifdef CONFIG_XFRM
6815 if (__skb_ext_exist(ext, SKB_EXT_SEC_PATH))
6816 skb_ext_put_sp(skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH));
6817 #endif
6818 #ifdef CONFIG_MCTP_FLOWS
6819 if (__skb_ext_exist(ext, SKB_EXT_MCTP))
6820 skb_ext_put_mctp(skb_ext_get_ptr(ext, SKB_EXT_MCTP));
6821 #endif
6822
6823 kmem_cache_free(skbuff_ext_cache, ext);
6824 }
6825 EXPORT_SYMBOL(__skb_ext_put);
6826 #endif /* CONFIG_SKB_EXTENSIONS */
6827
6828 /**
6829 * skb_attempt_defer_free - queue skb for remote freeing
6830 * @skb: buffer
6831 *
6832 * Put @skb in a per-cpu list, using the cpu which
6833 * allocated the skb/pages to reduce false sharing
6834 * and memory zone spinlock contention.
6835 */
skb_attempt_defer_free(struct sk_buff * skb)6836 void skb_attempt_defer_free(struct sk_buff *skb)
6837 {
6838 int cpu = skb->alloc_cpu;
6839 struct softnet_data *sd;
6840 unsigned int defer_max;
6841 bool kick;
6842
6843 if (WARN_ON_ONCE(cpu >= nr_cpu_ids) ||
6844 !cpu_online(cpu) ||
6845 cpu == raw_smp_processor_id()) {
6846 nodefer: __kfree_skb(skb);
6847 return;
6848 }
6849
6850 DEBUG_NET_WARN_ON_ONCE(skb_dst(skb));
6851 DEBUG_NET_WARN_ON_ONCE(skb->destructor);
6852
6853 sd = &per_cpu(softnet_data, cpu);
6854 defer_max = READ_ONCE(sysctl_skb_defer_max);
6855 if (READ_ONCE(sd->defer_count) >= defer_max)
6856 goto nodefer;
6857
6858 spin_lock_bh(&sd->defer_lock);
6859 /* Send an IPI every time queue reaches half capacity. */
6860 kick = sd->defer_count == (defer_max >> 1);
6861 /* Paired with the READ_ONCE() few lines above */
6862 WRITE_ONCE(sd->defer_count, sd->defer_count + 1);
6863
6864 skb->next = sd->defer_list;
6865 /* Paired with READ_ONCE() in skb_defer_free_flush() */
6866 WRITE_ONCE(sd->defer_list, skb);
6867 spin_unlock_bh(&sd->defer_lock);
6868
6869 /* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU
6870 * if we are unlucky enough (this seems very unlikely).
6871 */
6872 if (unlikely(kick) && !cmpxchg(&sd->defer_ipi_scheduled, 0, 1))
6873 smp_call_function_single_async(cpu, &sd->defer_csd);
6874 }
6875
skb_splice_csum_page(struct sk_buff * skb,struct page * page,size_t offset,size_t len)6876 static void skb_splice_csum_page(struct sk_buff *skb, struct page *page,
6877 size_t offset, size_t len)
6878 {
6879 const char *kaddr;
6880 __wsum csum;
6881
6882 kaddr = kmap_local_page(page);
6883 csum = csum_partial(kaddr + offset, len, 0);
6884 kunmap_local(kaddr);
6885 skb->csum = csum_block_add(skb->csum, csum, skb->len);
6886 }
6887
6888 /**
6889 * skb_splice_from_iter - Splice (or copy) pages to skbuff
6890 * @skb: The buffer to add pages to
6891 * @iter: Iterator representing the pages to be added
6892 * @maxsize: Maximum amount of pages to be added
6893 * @gfp: Allocation flags
6894 *
6895 * This is a common helper function for supporting MSG_SPLICE_PAGES. It
6896 * extracts pages from an iterator and adds them to the socket buffer if
6897 * possible, copying them to fragments if not possible (such as if they're slab
6898 * pages).
6899 *
6900 * Returns the amount of data spliced/copied or -EMSGSIZE if there's
6901 * insufficient space in the buffer to transfer anything.
6902 */
skb_splice_from_iter(struct sk_buff * skb,struct iov_iter * iter,ssize_t maxsize,gfp_t gfp)6903 ssize_t skb_splice_from_iter(struct sk_buff *skb, struct iov_iter *iter,
6904 ssize_t maxsize, gfp_t gfp)
6905 {
6906 size_t frag_limit = READ_ONCE(sysctl_max_skb_frags);
6907 struct page *pages[8], **ppages = pages;
6908 ssize_t spliced = 0, ret = 0;
6909 unsigned int i;
6910
6911 while (iter->count > 0) {
6912 ssize_t space, nr, len;
6913 size_t off;
6914
6915 ret = -EMSGSIZE;
6916 space = frag_limit - skb_shinfo(skb)->nr_frags;
6917 if (space < 0)
6918 break;
6919
6920 /* We might be able to coalesce without increasing nr_frags */
6921 nr = clamp_t(size_t, space, 1, ARRAY_SIZE(pages));
6922
6923 len = iov_iter_extract_pages(iter, &ppages, maxsize, nr, 0, &off);
6924 if (len <= 0) {
6925 ret = len ?: -EIO;
6926 break;
6927 }
6928
6929 i = 0;
6930 do {
6931 struct page *page = pages[i++];
6932 size_t part = min_t(size_t, PAGE_SIZE - off, len);
6933
6934 ret = -EIO;
6935 if (WARN_ON_ONCE(!sendpage_ok(page)))
6936 goto out;
6937
6938 ret = skb_append_pagefrags(skb, page, off, part,
6939 frag_limit);
6940 if (ret < 0) {
6941 iov_iter_revert(iter, len);
6942 goto out;
6943 }
6944
6945 if (skb->ip_summed == CHECKSUM_NONE)
6946 skb_splice_csum_page(skb, page, off, part);
6947
6948 off = 0;
6949 spliced += part;
6950 maxsize -= part;
6951 len -= part;
6952 } while (len > 0);
6953
6954 if (maxsize <= 0)
6955 break;
6956 }
6957
6958 out:
6959 skb_len_add(skb, spliced);
6960 return spliced ?: ret;
6961 }
6962 EXPORT_SYMBOL(skb_splice_from_iter);
6963