1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include <net/gro.h>
3 #include <net/dst_metadata.h>
4 #include <net/busy_poll.h>
5 #include <trace/events/net.h>
6
7 #define MAX_GRO_SKBS 8
8
9 /* This should be increased if a protocol with a bigger head is added. */
10 #define GRO_MAX_HEAD (MAX_HEADER + 128)
11
12 static DEFINE_SPINLOCK(offload_lock);
13 struct list_head offload_base __read_mostly = LIST_HEAD_INIT(offload_base);
14 /* Maximum number of GRO_NORMAL skbs to batch up for list-RX */
15 int gro_normal_batch __read_mostly = 8;
16
17 /**
18 * dev_add_offload - register offload handlers
19 * @po: protocol offload declaration
20 *
21 * Add protocol offload handlers to the networking stack. The passed
22 * &proto_offload is linked into kernel lists and may not be freed until
23 * it has been removed from the kernel lists.
24 *
25 * This call does not sleep therefore it can not
26 * guarantee all CPU's that are in middle of receiving packets
27 * will see the new offload handlers (until the next received packet).
28 */
dev_add_offload(struct packet_offload * po)29 void dev_add_offload(struct packet_offload *po)
30 {
31 struct packet_offload *elem;
32
33 spin_lock(&offload_lock);
34 list_for_each_entry(elem, &offload_base, list) {
35 if (po->priority < elem->priority)
36 break;
37 }
38 list_add_rcu(&po->list, elem->list.prev);
39 spin_unlock(&offload_lock);
40 }
41 EXPORT_SYMBOL(dev_add_offload);
42
43 /**
44 * __dev_remove_offload - remove offload handler
45 * @po: packet offload declaration
46 *
47 * Remove a protocol offload handler that was previously added to the
48 * kernel offload handlers by dev_add_offload(). The passed &offload_type
49 * is removed from the kernel lists and can be freed or reused once this
50 * function returns.
51 *
52 * The packet type might still be in use by receivers
53 * and must not be freed until after all the CPU's have gone
54 * through a quiescent state.
55 */
__dev_remove_offload(struct packet_offload * po)56 static void __dev_remove_offload(struct packet_offload *po)
57 {
58 struct list_head *head = &offload_base;
59 struct packet_offload *po1;
60
61 spin_lock(&offload_lock);
62
63 list_for_each_entry(po1, head, list) {
64 if (po == po1) {
65 list_del_rcu(&po->list);
66 goto out;
67 }
68 }
69
70 pr_warn("dev_remove_offload: %p not found\n", po);
71 out:
72 spin_unlock(&offload_lock);
73 }
74
75 /**
76 * dev_remove_offload - remove packet offload handler
77 * @po: packet offload declaration
78 *
79 * Remove a packet offload handler that was previously added to the kernel
80 * offload handlers by dev_add_offload(). The passed &offload_type is
81 * removed from the kernel lists and can be freed or reused once this
82 * function returns.
83 *
84 * This call sleeps to guarantee that no CPU is looking at the packet
85 * type after return.
86 */
dev_remove_offload(struct packet_offload * po)87 void dev_remove_offload(struct packet_offload *po)
88 {
89 __dev_remove_offload(po);
90
91 synchronize_net();
92 }
93 EXPORT_SYMBOL(dev_remove_offload);
94
95
skb_gro_receive(struct sk_buff * p,struct sk_buff * skb)96 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
97 {
98 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
99 unsigned int offset = skb_gro_offset(skb);
100 unsigned int headlen = skb_headlen(skb);
101 unsigned int len = skb_gro_len(skb);
102 unsigned int delta_truesize;
103 unsigned int gro_max_size;
104 unsigned int new_truesize;
105 struct sk_buff *lp;
106 int segs;
107
108 /* Do not splice page pool based packets w/ non-page pool
109 * packets. This can result in reference count issues as page
110 * pool pages will not decrement the reference count and will
111 * instead be immediately returned to the pool or have frag
112 * count decremented.
113 */
114 if (p->pp_recycle != skb->pp_recycle)
115 return -ETOOMANYREFS;
116
117 /* pairs with WRITE_ONCE() in netif_set_gro(_ipv4)_max_size() */
118 gro_max_size = p->protocol == htons(ETH_P_IPV6) ?
119 READ_ONCE(p->dev->gro_max_size) :
120 READ_ONCE(p->dev->gro_ipv4_max_size);
121
122 if (unlikely(p->len + len >= gro_max_size || NAPI_GRO_CB(skb)->flush))
123 return -E2BIG;
124
125 if (unlikely(p->len + len >= GRO_LEGACY_MAX_SIZE)) {
126 if (NAPI_GRO_CB(skb)->proto != IPPROTO_TCP ||
127 (p->protocol == htons(ETH_P_IPV6) &&
128 skb_headroom(p) < sizeof(struct hop_jumbo_hdr)) ||
129 p->encapsulation)
130 return -E2BIG;
131 }
132
133 segs = NAPI_GRO_CB(skb)->count;
134 lp = NAPI_GRO_CB(p)->last;
135 pinfo = skb_shinfo(lp);
136
137 if (headlen <= offset) {
138 skb_frag_t *frag;
139 skb_frag_t *frag2;
140 int i = skbinfo->nr_frags;
141 int nr_frags = pinfo->nr_frags + i;
142
143 if (nr_frags > MAX_SKB_FRAGS)
144 goto merge;
145
146 offset -= headlen;
147 pinfo->nr_frags = nr_frags;
148 skbinfo->nr_frags = 0;
149
150 frag = pinfo->frags + nr_frags;
151 frag2 = skbinfo->frags + i;
152 do {
153 *--frag = *--frag2;
154 } while (--i);
155
156 skb_frag_off_add(frag, offset);
157 skb_frag_size_sub(frag, offset);
158
159 /* all fragments truesize : remove (head size + sk_buff) */
160 new_truesize = SKB_TRUESIZE(skb_end_offset(skb));
161 delta_truesize = skb->truesize - new_truesize;
162
163 skb->truesize = new_truesize;
164 skb->len -= skb->data_len;
165 skb->data_len = 0;
166
167 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
168 goto done;
169 } else if (skb->head_frag) {
170 int nr_frags = pinfo->nr_frags;
171 skb_frag_t *frag = pinfo->frags + nr_frags;
172 struct page *page = virt_to_head_page(skb->head);
173 unsigned int first_size = headlen - offset;
174 unsigned int first_offset;
175
176 if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
177 goto merge;
178
179 first_offset = skb->data -
180 (unsigned char *)page_address(page) +
181 offset;
182
183 pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
184
185 skb_frag_fill_page_desc(frag, page, first_offset, first_size);
186
187 memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
188 /* We dont need to clear skbinfo->nr_frags here */
189
190 new_truesize = SKB_DATA_ALIGN(sizeof(struct sk_buff));
191 delta_truesize = skb->truesize - new_truesize;
192 skb->truesize = new_truesize;
193 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
194 goto done;
195 }
196
197 merge:
198 /* sk ownership - if any - completely transferred to the aggregated packet */
199 skb->destructor = NULL;
200 skb->sk = NULL;
201 delta_truesize = skb->truesize;
202 if (offset > headlen) {
203 unsigned int eat = offset - headlen;
204
205 skb_frag_off_add(&skbinfo->frags[0], eat);
206 skb_frag_size_sub(&skbinfo->frags[0], eat);
207 skb->data_len -= eat;
208 skb->len -= eat;
209 offset = headlen;
210 }
211
212 __skb_pull(skb, offset);
213
214 if (NAPI_GRO_CB(p)->last == p)
215 skb_shinfo(p)->frag_list = skb;
216 else
217 NAPI_GRO_CB(p)->last->next = skb;
218 NAPI_GRO_CB(p)->last = skb;
219 __skb_header_release(skb);
220 lp = p;
221
222 done:
223 NAPI_GRO_CB(p)->count += segs;
224 p->data_len += len;
225 p->truesize += delta_truesize;
226 p->len += len;
227 if (lp != p) {
228 lp->data_len += len;
229 lp->truesize += delta_truesize;
230 lp->len += len;
231 }
232 NAPI_GRO_CB(skb)->same_flow = 1;
233 return 0;
234 }
235
236
napi_gro_complete(struct napi_struct * napi,struct sk_buff * skb)237 static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
238 {
239 struct packet_offload *ptype;
240 __be16 type = skb->protocol;
241 struct list_head *head = &offload_base;
242 int err = -ENOENT;
243
244 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
245
246 if (NAPI_GRO_CB(skb)->count == 1) {
247 skb_shinfo(skb)->gso_size = 0;
248 goto out;
249 }
250
251 rcu_read_lock();
252 list_for_each_entry_rcu(ptype, head, list) {
253 if (ptype->type != type || !ptype->callbacks.gro_complete)
254 continue;
255
256 err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
257 ipv6_gro_complete, inet_gro_complete,
258 skb, 0);
259 break;
260 }
261 rcu_read_unlock();
262
263 if (err) {
264 WARN_ON(&ptype->list == head);
265 kfree_skb(skb);
266 return;
267 }
268
269 out:
270 gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count);
271 }
272
__napi_gro_flush_chain(struct napi_struct * napi,u32 index,bool flush_old)273 static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
274 bool flush_old)
275 {
276 struct list_head *head = &napi->gro_hash[index].list;
277 struct sk_buff *skb, *p;
278
279 list_for_each_entry_safe_reverse(skb, p, head, list) {
280 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
281 return;
282 skb_list_del_init(skb);
283 napi_gro_complete(napi, skb);
284 napi->gro_hash[index].count--;
285 }
286
287 if (!napi->gro_hash[index].count)
288 __clear_bit(index, &napi->gro_bitmask);
289 }
290
291 /* napi->gro_hash[].list contains packets ordered by age.
292 * youngest packets at the head of it.
293 * Complete skbs in reverse order to reduce latencies.
294 */
napi_gro_flush(struct napi_struct * napi,bool flush_old)295 void napi_gro_flush(struct napi_struct *napi, bool flush_old)
296 {
297 unsigned long bitmask = napi->gro_bitmask;
298 unsigned int i, base = ~0U;
299
300 while ((i = ffs(bitmask)) != 0) {
301 bitmask >>= i;
302 base += i;
303 __napi_gro_flush_chain(napi, base, flush_old);
304 }
305 }
306 EXPORT_SYMBOL(napi_gro_flush);
307
gro_list_prepare_tc_ext(const struct sk_buff * skb,const struct sk_buff * p,unsigned long diffs)308 static unsigned long gro_list_prepare_tc_ext(const struct sk_buff *skb,
309 const struct sk_buff *p,
310 unsigned long diffs)
311 {
312 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
313 struct tc_skb_ext *skb_ext;
314 struct tc_skb_ext *p_ext;
315
316 skb_ext = skb_ext_find(skb, TC_SKB_EXT);
317 p_ext = skb_ext_find(p, TC_SKB_EXT);
318
319 diffs |= (!!p_ext) ^ (!!skb_ext);
320 if (!diffs && unlikely(skb_ext))
321 diffs |= p_ext->chain ^ skb_ext->chain;
322 #endif
323 return diffs;
324 }
325
gro_list_prepare(const struct list_head * head,const struct sk_buff * skb)326 static void gro_list_prepare(const struct list_head *head,
327 const struct sk_buff *skb)
328 {
329 unsigned int maclen = skb->dev->hard_header_len;
330 u32 hash = skb_get_hash_raw(skb);
331 struct sk_buff *p;
332
333 list_for_each_entry(p, head, list) {
334 unsigned long diffs;
335
336 NAPI_GRO_CB(p)->flush = 0;
337
338 if (hash != skb_get_hash_raw(p)) {
339 NAPI_GRO_CB(p)->same_flow = 0;
340 continue;
341 }
342
343 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
344 diffs |= p->vlan_all ^ skb->vlan_all;
345 diffs |= skb_metadata_differs(p, skb);
346 if (maclen == ETH_HLEN)
347 diffs |= compare_ether_header(skb_mac_header(p),
348 skb_mac_header(skb));
349 else if (!diffs)
350 diffs = memcmp(skb_mac_header(p),
351 skb_mac_header(skb),
352 maclen);
353
354 /* in most common scenarions 'slow_gro' is 0
355 * otherwise we are already on some slower paths
356 * either skip all the infrequent tests altogether or
357 * avoid trying too hard to skip each of them individually
358 */
359 if (!diffs && unlikely(skb->slow_gro | p->slow_gro)) {
360 diffs |= p->sk != skb->sk;
361 diffs |= skb_metadata_dst_cmp(p, skb);
362 diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb);
363
364 diffs |= gro_list_prepare_tc_ext(skb, p, diffs);
365 }
366
367 NAPI_GRO_CB(p)->same_flow = !diffs;
368 }
369 }
370
skb_gro_reset_offset(struct sk_buff * skb,u32 nhoff)371 static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
372 {
373 const struct skb_shared_info *pinfo = skb_shinfo(skb);
374 const skb_frag_t *frag0 = &pinfo->frags[0];
375
376 NAPI_GRO_CB(skb)->network_offset = 0;
377 NAPI_GRO_CB(skb)->data_offset = 0;
378 NAPI_GRO_CB(skb)->frag0 = NULL;
379 NAPI_GRO_CB(skb)->frag0_len = 0;
380
381 if (!skb_headlen(skb) && pinfo->nr_frags &&
382 !PageHighMem(skb_frag_page(frag0)) &&
383 (!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) {
384 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
385 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
386 skb_frag_size(frag0),
387 skb->end - skb->tail);
388 }
389 }
390
gro_pull_from_frag0(struct sk_buff * skb,int grow)391 static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
392 {
393 struct skb_shared_info *pinfo = skb_shinfo(skb);
394
395 BUG_ON(skb->end - skb->tail < grow);
396
397 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
398
399 skb->data_len -= grow;
400 skb->tail += grow;
401
402 skb_frag_off_add(&pinfo->frags[0], grow);
403 skb_frag_size_sub(&pinfo->frags[0], grow);
404
405 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
406 skb_frag_unref(skb, 0);
407 memmove(pinfo->frags, pinfo->frags + 1,
408 --pinfo->nr_frags * sizeof(pinfo->frags[0]));
409 }
410 }
411
gro_try_pull_from_frag0(struct sk_buff * skb)412 static void gro_try_pull_from_frag0(struct sk_buff *skb)
413 {
414 int grow = skb_gro_offset(skb) - skb_headlen(skb);
415
416 if (grow > 0)
417 gro_pull_from_frag0(skb, grow);
418 }
419
gro_flush_oldest(struct napi_struct * napi,struct list_head * head)420 static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head)
421 {
422 struct sk_buff *oldest;
423
424 oldest = list_last_entry(head, struct sk_buff, list);
425
426 /* We are called with head length >= MAX_GRO_SKBS, so this is
427 * impossible.
428 */
429 if (WARN_ON_ONCE(!oldest))
430 return;
431
432 /* Do not adjust napi->gro_hash[].count, caller is adding a new
433 * SKB to the chain.
434 */
435 skb_list_del_init(oldest);
436 napi_gro_complete(napi, oldest);
437 }
438
dev_gro_receive(struct napi_struct * napi,struct sk_buff * skb)439 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
440 {
441 u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
442 struct gro_list *gro_list = &napi->gro_hash[bucket];
443 struct list_head *head = &offload_base;
444 struct packet_offload *ptype;
445 __be16 type = skb->protocol;
446 struct sk_buff *pp = NULL;
447 enum gro_result ret;
448 int same_flow;
449
450 if (netif_elide_gro(skb->dev))
451 goto normal;
452
453 gro_list_prepare(&gro_list->list, skb);
454
455 rcu_read_lock();
456 list_for_each_entry_rcu(ptype, head, list) {
457 if (ptype->type == type && ptype->callbacks.gro_receive)
458 goto found_ptype;
459 }
460 rcu_read_unlock();
461 goto normal;
462
463 found_ptype:
464 skb_set_network_header(skb, skb_gro_offset(skb));
465 skb_reset_mac_len(skb);
466 BUILD_BUG_ON(sizeof_field(struct napi_gro_cb, zeroed) != sizeof(u32));
467 BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct napi_gro_cb, zeroed),
468 sizeof(u32))); /* Avoid slow unaligned acc */
469 *(u32 *)&NAPI_GRO_CB(skb)->zeroed = 0;
470 NAPI_GRO_CB(skb)->flush = skb_has_frag_list(skb);
471 NAPI_GRO_CB(skb)->is_atomic = 1;
472 NAPI_GRO_CB(skb)->count = 1;
473 if (unlikely(skb_is_gso(skb))) {
474 NAPI_GRO_CB(skb)->count = skb_shinfo(skb)->gso_segs;
475 /* Only support TCP and non DODGY users. */
476 if (!skb_is_gso_tcp(skb) ||
477 (skb_shinfo(skb)->gso_type & SKB_GSO_DODGY))
478 NAPI_GRO_CB(skb)->flush = 1;
479 }
480
481 /* Setup for GRO checksum validation */
482 switch (skb->ip_summed) {
483 case CHECKSUM_COMPLETE:
484 NAPI_GRO_CB(skb)->csum = skb->csum;
485 NAPI_GRO_CB(skb)->csum_valid = 1;
486 break;
487 case CHECKSUM_UNNECESSARY:
488 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
489 break;
490 }
491
492 pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive,
493 ipv6_gro_receive, inet_gro_receive,
494 &gro_list->list, skb);
495
496 rcu_read_unlock();
497
498 if (PTR_ERR(pp) == -EINPROGRESS) {
499 ret = GRO_CONSUMED;
500 goto ok;
501 }
502
503 same_flow = NAPI_GRO_CB(skb)->same_flow;
504 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
505
506 if (pp) {
507 skb_list_del_init(pp);
508 napi_gro_complete(napi, pp);
509 gro_list->count--;
510 }
511
512 if (same_flow)
513 goto ok;
514
515 if (NAPI_GRO_CB(skb)->flush)
516 goto normal;
517
518 if (unlikely(gro_list->count >= MAX_GRO_SKBS))
519 gro_flush_oldest(napi, &gro_list->list);
520 else
521 gro_list->count++;
522
523 /* Must be called before setting NAPI_GRO_CB(skb)->{age|last} */
524 gro_try_pull_from_frag0(skb);
525 NAPI_GRO_CB(skb)->age = jiffies;
526 NAPI_GRO_CB(skb)->last = skb;
527 if (!skb_is_gso(skb))
528 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
529 list_add(&skb->list, &gro_list->list);
530 ret = GRO_HELD;
531 ok:
532 if (gro_list->count) {
533 if (!test_bit(bucket, &napi->gro_bitmask))
534 __set_bit(bucket, &napi->gro_bitmask);
535 } else if (test_bit(bucket, &napi->gro_bitmask)) {
536 __clear_bit(bucket, &napi->gro_bitmask);
537 }
538
539 return ret;
540
541 normal:
542 ret = GRO_NORMAL;
543 gro_try_pull_from_frag0(skb);
544 goto ok;
545 }
546
gro_find_receive_by_type(__be16 type)547 struct packet_offload *gro_find_receive_by_type(__be16 type)
548 {
549 struct list_head *offload_head = &offload_base;
550 struct packet_offload *ptype;
551
552 list_for_each_entry_rcu(ptype, offload_head, list) {
553 if (ptype->type != type || !ptype->callbacks.gro_receive)
554 continue;
555 return ptype;
556 }
557 return NULL;
558 }
559 EXPORT_SYMBOL(gro_find_receive_by_type);
560
gro_find_complete_by_type(__be16 type)561 struct packet_offload *gro_find_complete_by_type(__be16 type)
562 {
563 struct list_head *offload_head = &offload_base;
564 struct packet_offload *ptype;
565
566 list_for_each_entry_rcu(ptype, offload_head, list) {
567 if (ptype->type != type || !ptype->callbacks.gro_complete)
568 continue;
569 return ptype;
570 }
571 return NULL;
572 }
573 EXPORT_SYMBOL(gro_find_complete_by_type);
574
napi_skb_finish(struct napi_struct * napi,struct sk_buff * skb,gro_result_t ret)575 static gro_result_t napi_skb_finish(struct napi_struct *napi,
576 struct sk_buff *skb,
577 gro_result_t ret)
578 {
579 switch (ret) {
580 case GRO_NORMAL:
581 gro_normal_one(napi, skb, 1);
582 break;
583
584 case GRO_MERGED_FREE:
585 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
586 napi_skb_free_stolen_head(skb);
587 else if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
588 __kfree_skb(skb);
589 else
590 __napi_kfree_skb(skb, SKB_CONSUMED);
591 break;
592
593 case GRO_HELD:
594 case GRO_MERGED:
595 case GRO_CONSUMED:
596 break;
597 }
598
599 return ret;
600 }
601
napi_gro_receive(struct napi_struct * napi,struct sk_buff * skb)602 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
603 {
604 gro_result_t ret;
605
606 skb_mark_napi_id(skb, napi);
607 trace_napi_gro_receive_entry(skb);
608
609 skb_gro_reset_offset(skb, 0);
610
611 ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb));
612 trace_napi_gro_receive_exit(ret);
613
614 return ret;
615 }
616 EXPORT_SYMBOL(napi_gro_receive);
617
napi_reuse_skb(struct napi_struct * napi,struct sk_buff * skb)618 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
619 {
620 if (unlikely(skb->pfmemalloc)) {
621 consume_skb(skb);
622 return;
623 }
624 __skb_pull(skb, skb_headlen(skb));
625 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
626 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
627 __vlan_hwaccel_clear_tag(skb);
628 skb->dev = napi->dev;
629 skb->skb_iif = 0;
630
631 /* eth_type_trans() assumes pkt_type is PACKET_HOST */
632 skb->pkt_type = PACKET_HOST;
633
634 skb->encapsulation = 0;
635 skb_shinfo(skb)->gso_type = 0;
636 skb_shinfo(skb)->gso_size = 0;
637 if (unlikely(skb->slow_gro)) {
638 skb_orphan(skb);
639 skb_ext_reset(skb);
640 nf_reset_ct(skb);
641 skb->slow_gro = 0;
642 }
643
644 napi->skb = skb;
645 }
646
napi_get_frags(struct napi_struct * napi)647 struct sk_buff *napi_get_frags(struct napi_struct *napi)
648 {
649 struct sk_buff *skb = napi->skb;
650
651 if (!skb) {
652 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
653 if (skb) {
654 napi->skb = skb;
655 skb_mark_napi_id(skb, napi);
656 }
657 }
658 return skb;
659 }
660 EXPORT_SYMBOL(napi_get_frags);
661
napi_frags_finish(struct napi_struct * napi,struct sk_buff * skb,gro_result_t ret)662 static gro_result_t napi_frags_finish(struct napi_struct *napi,
663 struct sk_buff *skb,
664 gro_result_t ret)
665 {
666 switch (ret) {
667 case GRO_NORMAL:
668 case GRO_HELD:
669 __skb_push(skb, ETH_HLEN);
670 skb->protocol = eth_type_trans(skb, skb->dev);
671 if (ret == GRO_NORMAL)
672 gro_normal_one(napi, skb, 1);
673 break;
674
675 case GRO_MERGED_FREE:
676 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
677 napi_skb_free_stolen_head(skb);
678 else
679 napi_reuse_skb(napi, skb);
680 break;
681
682 case GRO_MERGED:
683 case GRO_CONSUMED:
684 break;
685 }
686
687 return ret;
688 }
689
690 /* Upper GRO stack assumes network header starts at gro_offset=0
691 * Drivers could call both napi_gro_frags() and napi_gro_receive()
692 * We copy ethernet header into skb->data to have a common layout.
693 */
napi_frags_skb(struct napi_struct * napi)694 static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
695 {
696 struct sk_buff *skb = napi->skb;
697 const struct ethhdr *eth;
698 unsigned int hlen = sizeof(*eth);
699
700 napi->skb = NULL;
701
702 skb_reset_mac_header(skb);
703 skb_gro_reset_offset(skb, hlen);
704
705 if (unlikely(skb_gro_header_hard(skb, hlen))) {
706 eth = skb_gro_header_slow(skb, hlen, 0);
707 if (unlikely(!eth)) {
708 net_warn_ratelimited("%s: dropping impossible skb from %s\n",
709 __func__, napi->dev->name);
710 napi_reuse_skb(napi, skb);
711 return NULL;
712 }
713 } else {
714 eth = (const struct ethhdr *)skb->data;
715 gro_pull_from_frag0(skb, hlen);
716 NAPI_GRO_CB(skb)->frag0 += hlen;
717 NAPI_GRO_CB(skb)->frag0_len -= hlen;
718 }
719 __skb_pull(skb, hlen);
720
721 /*
722 * This works because the only protocols we care about don't require
723 * special handling.
724 * We'll fix it up properly in napi_frags_finish()
725 */
726 skb->protocol = eth->h_proto;
727
728 return skb;
729 }
730
napi_gro_frags(struct napi_struct * napi)731 gro_result_t napi_gro_frags(struct napi_struct *napi)
732 {
733 gro_result_t ret;
734 struct sk_buff *skb = napi_frags_skb(napi);
735
736 trace_napi_gro_frags_entry(skb);
737
738 ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
739 trace_napi_gro_frags_exit(ret);
740
741 return ret;
742 }
743 EXPORT_SYMBOL(napi_gro_frags);
744
745 /* Compute the checksum from gro_offset and return the folded value
746 * after adding in any pseudo checksum.
747 */
__skb_gro_checksum_complete(struct sk_buff * skb)748 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
749 {
750 __wsum wsum;
751 __sum16 sum;
752
753 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
754
755 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
756 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
757 /* See comments in __skb_checksum_complete(). */
758 if (likely(!sum)) {
759 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
760 !skb->csum_complete_sw)
761 netdev_rx_csum_fault(skb->dev, skb);
762 }
763
764 NAPI_GRO_CB(skb)->csum = wsum;
765 NAPI_GRO_CB(skb)->csum_valid = 1;
766
767 return sum;
768 }
769 EXPORT_SYMBOL(__skb_gro_checksum_complete);
770