1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * xfrm_output.c - Common IPsec encapsulation code.
4 *
5 * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
6 */
7
8 #include <linux/errno.h>
9 #include <linux/module.h>
10 #include <linux/netdevice.h>
11 #include <linux/netfilter.h>
12 #include <linux/skbuff.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <net/dst.h>
16 #include <net/icmp.h>
17 #include <net/inet_ecn.h>
18 #include <net/xfrm.h>
19
20 #if IS_ENABLED(CONFIG_IPV6)
21 #include <net/ip6_route.h>
22 #include <net/ipv6_stubs.h>
23 #endif
24
25 #include "xfrm_inout.h"
26
27 static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb);
28 static int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);
29
xfrm_skb_check_space(struct sk_buff * skb)30 static int xfrm_skb_check_space(struct sk_buff *skb)
31 {
32 struct dst_entry *dst = skb_dst(skb);
33 int nhead = dst->header_len + LL_RESERVED_SPACE(dst->dev)
34 - skb_headroom(skb);
35 int ntail = dst->dev->needed_tailroom - skb_tailroom(skb);
36
37 if (nhead <= 0) {
38 if (ntail <= 0)
39 return 0;
40 nhead = 0;
41 } else if (ntail < 0)
42 ntail = 0;
43
44 return pskb_expand_head(skb, nhead, ntail, GFP_ATOMIC);
45 }
46
47 /* Children define the path of the packet through the
48 * Linux networking. Thus, destinations are stackable.
49 */
50
skb_dst_pop(struct sk_buff * skb)51 static struct dst_entry *skb_dst_pop(struct sk_buff *skb)
52 {
53 struct dst_entry *child = dst_clone(xfrm_dst_child(skb_dst(skb)));
54
55 skb_dst_drop(skb);
56 return child;
57 }
58
59 /* Add encapsulation header.
60 *
61 * The IP header will be moved forward to make space for the encapsulation
62 * header.
63 */
xfrm4_transport_output(struct xfrm_state * x,struct sk_buff * skb)64 static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb)
65 {
66 struct iphdr *iph = ip_hdr(skb);
67 int ihl = iph->ihl * 4;
68
69 skb_set_inner_transport_header(skb, skb_transport_offset(skb));
70
71 skb_set_network_header(skb, -x->props.header_len);
72 skb->mac_header = skb->network_header +
73 offsetof(struct iphdr, protocol);
74 skb->transport_header = skb->network_header + ihl;
75 __skb_pull(skb, ihl);
76 memmove(skb_network_header(skb), iph, ihl);
77 return 0;
78 }
79
80 /* Add encapsulation header.
81 *
82 * The IP header and mutable extension headers will be moved forward to make
83 * space for the encapsulation header.
84 */
xfrm6_transport_output(struct xfrm_state * x,struct sk_buff * skb)85 static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb)
86 {
87 #if IS_ENABLED(CONFIG_IPV6)
88 struct ipv6hdr *iph;
89 u8 *prevhdr;
90 int hdr_len;
91
92 iph = ipv6_hdr(skb);
93 skb_set_inner_transport_header(skb, skb_transport_offset(skb));
94
95 hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
96 if (hdr_len < 0)
97 return hdr_len;
98 skb_set_mac_header(skb,
99 (prevhdr - x->props.header_len) - skb->data);
100 skb_set_network_header(skb, -x->props.header_len);
101 skb->transport_header = skb->network_header + hdr_len;
102 __skb_pull(skb, hdr_len);
103 memmove(ipv6_hdr(skb), iph, hdr_len);
104 return 0;
105 #else
106 WARN_ON_ONCE(1);
107 return -EAFNOSUPPORT;
108 #endif
109 }
110
111 /* Add route optimization header space.
112 *
113 * The IP header and mutable extension headers will be moved forward to make
114 * space for the route optimization header.
115 */
xfrm6_ro_output(struct xfrm_state * x,struct sk_buff * skb)116 static int xfrm6_ro_output(struct xfrm_state *x, struct sk_buff *skb)
117 {
118 #if IS_ENABLED(CONFIG_IPV6)
119 struct ipv6hdr *iph;
120 u8 *prevhdr;
121 int hdr_len;
122
123 iph = ipv6_hdr(skb);
124
125 hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
126 if (hdr_len < 0)
127 return hdr_len;
128 skb_set_mac_header(skb,
129 (prevhdr - x->props.header_len) - skb->data);
130 skb_set_network_header(skb, -x->props.header_len);
131 skb->transport_header = skb->network_header + hdr_len;
132 __skb_pull(skb, hdr_len);
133 memmove(ipv6_hdr(skb), iph, hdr_len);
134
135 x->lastused = ktime_get_real_seconds();
136
137 return 0;
138 #else
139 WARN_ON_ONCE(1);
140 return -EAFNOSUPPORT;
141 #endif
142 }
143
144 /* Add encapsulation header.
145 *
146 * The top IP header will be constructed per draft-nikander-esp-beet-mode-06.txt.
147 */
xfrm4_beet_encap_add(struct xfrm_state * x,struct sk_buff * skb)148 static int xfrm4_beet_encap_add(struct xfrm_state *x, struct sk_buff *skb)
149 {
150 struct ip_beet_phdr *ph;
151 struct iphdr *top_iph;
152 int hdrlen, optlen;
153
154 hdrlen = 0;
155 optlen = XFRM_MODE_SKB_CB(skb)->optlen;
156 if (unlikely(optlen))
157 hdrlen += IPV4_BEET_PHMAXLEN - (optlen & 4);
158
159 skb_set_network_header(skb, -x->props.header_len - hdrlen +
160 (XFRM_MODE_SKB_CB(skb)->ihl - sizeof(*top_iph)));
161 if (x->sel.family != AF_INET6)
162 skb->network_header += IPV4_BEET_PHMAXLEN;
163 skb->mac_header = skb->network_header +
164 offsetof(struct iphdr, protocol);
165 skb->transport_header = skb->network_header + sizeof(*top_iph);
166
167 xfrm4_beet_make_header(skb);
168
169 ph = __skb_pull(skb, XFRM_MODE_SKB_CB(skb)->ihl - hdrlen);
170
171 top_iph = ip_hdr(skb);
172
173 if (unlikely(optlen)) {
174 if (WARN_ON(optlen < 0))
175 return -EINVAL;
176
177 ph->padlen = 4 - (optlen & 4);
178 ph->hdrlen = optlen / 8;
179 ph->nexthdr = top_iph->protocol;
180 if (ph->padlen)
181 memset(ph + 1, IPOPT_NOP, ph->padlen);
182
183 top_iph->protocol = IPPROTO_BEETPH;
184 top_iph->ihl = sizeof(struct iphdr) / 4;
185 }
186
187 top_iph->saddr = x->props.saddr.a4;
188 top_iph->daddr = x->id.daddr.a4;
189
190 return 0;
191 }
192
193 /* Add encapsulation header.
194 *
195 * The top IP header will be constructed per RFC 2401.
196 */
xfrm4_tunnel_encap_add(struct xfrm_state * x,struct sk_buff * skb)197 static int xfrm4_tunnel_encap_add(struct xfrm_state *x, struct sk_buff *skb)
198 {
199 bool small_ipv6 = (skb->protocol == htons(ETH_P_IPV6)) && (skb->len <= IPV6_MIN_MTU);
200 struct dst_entry *dst = skb_dst(skb);
201 struct iphdr *top_iph;
202 int flags;
203
204 skb_set_inner_network_header(skb, skb_network_offset(skb));
205 skb_set_inner_transport_header(skb, skb_transport_offset(skb));
206
207 skb_set_network_header(skb, -x->props.header_len);
208 skb->mac_header = skb->network_header +
209 offsetof(struct iphdr, protocol);
210 skb->transport_header = skb->network_header + sizeof(*top_iph);
211 top_iph = ip_hdr(skb);
212
213 top_iph->ihl = 5;
214 top_iph->version = 4;
215
216 top_iph->protocol = xfrm_af2proto(skb_dst(skb)->ops->family);
217
218 /* DS disclosing depends on XFRM_SA_XFLAG_DONT_ENCAP_DSCP */
219 if (x->props.extra_flags & XFRM_SA_XFLAG_DONT_ENCAP_DSCP)
220 top_iph->tos = 0;
221 else
222 top_iph->tos = XFRM_MODE_SKB_CB(skb)->tos;
223 top_iph->tos = INET_ECN_encapsulate(top_iph->tos,
224 XFRM_MODE_SKB_CB(skb)->tos);
225
226 flags = x->props.flags;
227 if (flags & XFRM_STATE_NOECN)
228 IP_ECN_clear(top_iph);
229
230 top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) || small_ipv6 ?
231 0 : (XFRM_MODE_SKB_CB(skb)->frag_off & htons(IP_DF));
232
233 top_iph->ttl = ip4_dst_hoplimit(xfrm_dst_child(dst));
234
235 top_iph->saddr = x->props.saddr.a4;
236 top_iph->daddr = x->id.daddr.a4;
237 ip_select_ident(dev_net(dst->dev), skb, NULL);
238
239 return 0;
240 }
241
242 #if IS_ENABLED(CONFIG_IPV6)
xfrm6_tunnel_encap_add(struct xfrm_state * x,struct sk_buff * skb)243 static int xfrm6_tunnel_encap_add(struct xfrm_state *x, struct sk_buff *skb)
244 {
245 struct dst_entry *dst = skb_dst(skb);
246 struct ipv6hdr *top_iph;
247 int dsfield;
248
249 skb_set_inner_network_header(skb, skb_network_offset(skb));
250 skb_set_inner_transport_header(skb, skb_transport_offset(skb));
251
252 skb_set_network_header(skb, -x->props.header_len);
253 skb->mac_header = skb->network_header +
254 offsetof(struct ipv6hdr, nexthdr);
255 skb->transport_header = skb->network_header + sizeof(*top_iph);
256 top_iph = ipv6_hdr(skb);
257
258 top_iph->version = 6;
259
260 memcpy(top_iph->flow_lbl, XFRM_MODE_SKB_CB(skb)->flow_lbl,
261 sizeof(top_iph->flow_lbl));
262 top_iph->nexthdr = xfrm_af2proto(skb_dst(skb)->ops->family);
263
264 if (x->props.extra_flags & XFRM_SA_XFLAG_DONT_ENCAP_DSCP)
265 dsfield = 0;
266 else
267 dsfield = XFRM_MODE_SKB_CB(skb)->tos;
268 dsfield = INET_ECN_encapsulate(dsfield, XFRM_MODE_SKB_CB(skb)->tos);
269 if (x->props.flags & XFRM_STATE_NOECN)
270 dsfield &= ~INET_ECN_MASK;
271 ipv6_change_dsfield(top_iph, 0, dsfield);
272 top_iph->hop_limit = ip6_dst_hoplimit(xfrm_dst_child(dst));
273 top_iph->saddr = *(struct in6_addr *)&x->props.saddr;
274 top_iph->daddr = *(struct in6_addr *)&x->id.daddr;
275 return 0;
276 }
277
xfrm6_beet_encap_add(struct xfrm_state * x,struct sk_buff * skb)278 static int xfrm6_beet_encap_add(struct xfrm_state *x, struct sk_buff *skb)
279 {
280 struct ipv6hdr *top_iph;
281 struct ip_beet_phdr *ph;
282 int optlen, hdr_len;
283
284 hdr_len = 0;
285 optlen = XFRM_MODE_SKB_CB(skb)->optlen;
286 if (unlikely(optlen))
287 hdr_len += IPV4_BEET_PHMAXLEN - (optlen & 4);
288
289 skb_set_network_header(skb, -x->props.header_len - hdr_len);
290 if (x->sel.family != AF_INET6)
291 skb->network_header += IPV4_BEET_PHMAXLEN;
292 skb->mac_header = skb->network_header +
293 offsetof(struct ipv6hdr, nexthdr);
294 skb->transport_header = skb->network_header + sizeof(*top_iph);
295 ph = __skb_pull(skb, XFRM_MODE_SKB_CB(skb)->ihl - hdr_len);
296
297 xfrm6_beet_make_header(skb);
298
299 top_iph = ipv6_hdr(skb);
300 if (unlikely(optlen)) {
301 if (WARN_ON(optlen < 0))
302 return -EINVAL;
303
304 ph->padlen = 4 - (optlen & 4);
305 ph->hdrlen = optlen / 8;
306 ph->nexthdr = top_iph->nexthdr;
307 if (ph->padlen)
308 memset(ph + 1, IPOPT_NOP, ph->padlen);
309
310 top_iph->nexthdr = IPPROTO_BEETPH;
311 }
312
313 top_iph->saddr = *(struct in6_addr *)&x->props.saddr;
314 top_iph->daddr = *(struct in6_addr *)&x->id.daddr;
315 return 0;
316 }
317 #endif
318
319 /* Add encapsulation header.
320 *
321 * On exit, the transport header will be set to the start of the
322 * encapsulation header to be filled in by x->type->output and the mac
323 * header will be set to the nextheader (protocol for IPv4) field of the
324 * extension header directly preceding the encapsulation header, or in
325 * its absence, that of the top IP header.
326 * The value of the network header will always point to the top IP header
327 * while skb->data will point to the payload.
328 */
xfrm4_prepare_output(struct xfrm_state * x,struct sk_buff * skb)329 static int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
330 {
331 int err;
332
333 err = xfrm_inner_extract_output(x, skb);
334 if (err)
335 return err;
336
337 IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE;
338 skb->protocol = htons(ETH_P_IP);
339
340 switch (x->outer_mode.encap) {
341 case XFRM_MODE_BEET:
342 return xfrm4_beet_encap_add(x, skb);
343 case XFRM_MODE_TUNNEL:
344 return xfrm4_tunnel_encap_add(x, skb);
345 }
346
347 WARN_ON_ONCE(1);
348 return -EOPNOTSUPP;
349 }
350
xfrm6_prepare_output(struct xfrm_state * x,struct sk_buff * skb)351 static int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
352 {
353 #if IS_ENABLED(CONFIG_IPV6)
354 int err;
355
356 err = xfrm_inner_extract_output(x, skb);
357 if (err)
358 return err;
359
360 skb->ignore_df = 1;
361 skb->protocol = htons(ETH_P_IPV6);
362
363 switch (x->outer_mode.encap) {
364 case XFRM_MODE_BEET:
365 return xfrm6_beet_encap_add(x, skb);
366 case XFRM_MODE_TUNNEL:
367 return xfrm6_tunnel_encap_add(x, skb);
368 default:
369 WARN_ON_ONCE(1);
370 return -EOPNOTSUPP;
371 }
372 #endif
373 WARN_ON_ONCE(1);
374 return -EAFNOSUPPORT;
375 }
376
xfrm_outer_mode_output(struct xfrm_state * x,struct sk_buff * skb)377 static int xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb)
378 {
379 switch (x->outer_mode.encap) {
380 case XFRM_MODE_BEET:
381 case XFRM_MODE_TUNNEL:
382 if (x->outer_mode.family == AF_INET)
383 return xfrm4_prepare_output(x, skb);
384 if (x->outer_mode.family == AF_INET6)
385 return xfrm6_prepare_output(x, skb);
386 break;
387 case XFRM_MODE_TRANSPORT:
388 if (x->outer_mode.family == AF_INET)
389 return xfrm4_transport_output(x, skb);
390 if (x->outer_mode.family == AF_INET6)
391 return xfrm6_transport_output(x, skb);
392 break;
393 case XFRM_MODE_ROUTEOPTIMIZATION:
394 if (x->outer_mode.family == AF_INET6)
395 return xfrm6_ro_output(x, skb);
396 WARN_ON_ONCE(1);
397 break;
398 default:
399 WARN_ON_ONCE(1);
400 break;
401 }
402
403 return -EOPNOTSUPP;
404 }
405
pktgen_xfrm_outer_mode_output(struct xfrm_state * x,struct sk_buff * skb)406 int pktgen_xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb)
407 {
408 return xfrm_outer_mode_output(x, skb);
409 }
410 EXPORT_SYMBOL_GPL(pktgen_xfrm_outer_mode_output);
411
xfrm_output_one(struct sk_buff * skb,int err)412 static int xfrm_output_one(struct sk_buff *skb, int err)
413 {
414 struct dst_entry *dst = skb_dst(skb);
415 struct xfrm_state *x = dst->xfrm;
416 struct net *net = xs_net(x);
417
418 if (err <= 0)
419 goto resume;
420
421 do {
422 err = xfrm_skb_check_space(skb);
423 if (err) {
424 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
425 goto error_nolock;
426 }
427
428 skb->mark = xfrm_smark_get(skb->mark, x);
429
430 err = xfrm_outer_mode_output(x, skb);
431 if (err) {
432 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEMODEERROR);
433 goto error_nolock;
434 }
435
436 spin_lock_bh(&x->lock);
437
438 if (unlikely(x->km.state != XFRM_STATE_VALID)) {
439 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEINVALID);
440 err = -EINVAL;
441 goto error;
442 }
443
444 err = xfrm_state_check_expire(x);
445 if (err) {
446 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEEXPIRED);
447 goto error;
448 }
449
450 err = x->repl->overflow(x, skb);
451 if (err) {
452 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATESEQERROR);
453 goto error;
454 }
455
456 x->curlft.bytes += skb->len;
457 x->curlft.packets++;
458
459 spin_unlock_bh(&x->lock);
460
461 skb_dst_force(skb);
462 if (!skb_dst(skb)) {
463 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
464 err = -EHOSTUNREACH;
465 goto error_nolock;
466 }
467
468 if (xfrm_offload(skb)) {
469 x->type_offload->encap(x, skb);
470 } else {
471 /* Inner headers are invalid now. */
472 skb->encapsulation = 0;
473
474 err = x->type->output(x, skb);
475 if (err == -EINPROGRESS)
476 goto out;
477 }
478
479 resume:
480 if (err) {
481 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEPROTOERROR);
482 goto error_nolock;
483 }
484
485 dst = skb_dst_pop(skb);
486 if (!dst) {
487 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
488 err = -EHOSTUNREACH;
489 goto error_nolock;
490 }
491 skb_dst_set(skb, dst);
492 x = dst->xfrm;
493 } while (x && !(x->outer_mode.flags & XFRM_MODE_FLAG_TUNNEL));
494
495 return 0;
496
497 error:
498 spin_unlock_bh(&x->lock);
499 error_nolock:
500 kfree_skb(skb);
501 out:
502 return err;
503 }
504
xfrm_output_resume(struct sk_buff * skb,int err)505 int xfrm_output_resume(struct sk_buff *skb, int err)
506 {
507 struct net *net = xs_net(skb_dst(skb)->xfrm);
508
509 while (likely((err = xfrm_output_one(skb, err)) == 0)) {
510 nf_reset_ct(skb);
511
512 err = skb_dst(skb)->ops->local_out(net, skb->sk, skb);
513 if (unlikely(err != 1))
514 goto out;
515
516 if (!skb_dst(skb)->xfrm)
517 return dst_output(net, skb->sk, skb);
518
519 err = nf_hook(skb_dst(skb)->ops->family,
520 NF_INET_POST_ROUTING, net, skb->sk, skb,
521 NULL, skb_dst(skb)->dev, xfrm_output2);
522 if (unlikely(err != 1))
523 goto out;
524 }
525
526 if (err == -EINPROGRESS)
527 err = 0;
528
529 out:
530 return err;
531 }
532 EXPORT_SYMBOL_GPL(xfrm_output_resume);
533
xfrm_output2(struct net * net,struct sock * sk,struct sk_buff * skb)534 static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
535 {
536 return xfrm_output_resume(skb, 1);
537 }
538
xfrm_output_gso(struct net * net,struct sock * sk,struct sk_buff * skb)539 static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb)
540 {
541 struct sk_buff *segs, *nskb;
542
543 BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_GSO_CB_OFFSET);
544 BUILD_BUG_ON(sizeof(*IP6CB(skb)) > SKB_GSO_CB_OFFSET);
545 segs = skb_gso_segment(skb, 0);
546 kfree_skb(skb);
547 if (IS_ERR(segs))
548 return PTR_ERR(segs);
549 if (segs == NULL)
550 return -EINVAL;
551
552 skb_list_walk_safe(segs, segs, nskb) {
553 int err;
554
555 skb_mark_not_on_list(segs);
556 err = xfrm_output2(net, sk, segs);
557
558 if (unlikely(err)) {
559 kfree_skb_list(nskb);
560 return err;
561 }
562 }
563
564 return 0;
565 }
566
xfrm_output(struct sock * sk,struct sk_buff * skb)567 int xfrm_output(struct sock *sk, struct sk_buff *skb)
568 {
569 struct net *net = dev_net(skb_dst(skb)->dev);
570 struct xfrm_state *x = skb_dst(skb)->xfrm;
571 int err;
572
573 switch (x->outer_mode.family) {
574 case AF_INET:
575 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
576 IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
577 break;
578 case AF_INET6:
579 memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
580
581 IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
582 break;
583 }
584
585 secpath_reset(skb);
586
587 if (xfrm_dev_offload_ok(skb, x)) {
588 struct sec_path *sp;
589
590 sp = secpath_set(skb);
591 if (!sp) {
592 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
593 kfree_skb(skb);
594 return -ENOMEM;
595 }
596 skb->encapsulation = 1;
597
598 sp->olen++;
599 sp->xvec[sp->len++] = x;
600 xfrm_state_hold(x);
601
602 if (skb_is_gso(skb)) {
603 if (skb->inner_protocol)
604 return xfrm_output_gso(net, sk, skb);
605
606 skb_shinfo(skb)->gso_type |= SKB_GSO_ESP;
607 goto out;
608 }
609
610 if (x->xso.dev && x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM)
611 goto out;
612 } else {
613 if (skb_is_gso(skb))
614 return xfrm_output_gso(net, sk, skb);
615 }
616
617 if (skb->ip_summed == CHECKSUM_PARTIAL) {
618 err = skb_checksum_help(skb);
619 if (err) {
620 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
621 kfree_skb(skb);
622 return err;
623 }
624 }
625
626 out:
627 return xfrm_output2(net, sk, skb);
628 }
629 EXPORT_SYMBOL_GPL(xfrm_output);
630
xfrm4_tunnel_check_size(struct sk_buff * skb)631 static int xfrm4_tunnel_check_size(struct sk_buff *skb)
632 {
633 int mtu, ret = 0;
634
635 if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE)
636 goto out;
637
638 if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->ignore_df)
639 goto out;
640
641 mtu = dst_mtu(skb_dst(skb));
642 if ((!skb_is_gso(skb) && skb->len > mtu) ||
643 (skb_is_gso(skb) &&
644 !skb_gso_validate_network_len(skb, ip_skb_dst_mtu(skb->sk, skb)))) {
645 skb->protocol = htons(ETH_P_IP);
646
647 if (skb->sk)
648 xfrm_local_error(skb, mtu);
649 else
650 icmp_send(skb, ICMP_DEST_UNREACH,
651 ICMP_FRAG_NEEDED, htonl(mtu));
652 ret = -EMSGSIZE;
653 }
654 out:
655 return ret;
656 }
657
xfrm4_extract_output(struct xfrm_state * x,struct sk_buff * skb)658 static int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb)
659 {
660 int err;
661
662 if (x->outer_mode.encap == XFRM_MODE_BEET &&
663 ip_is_fragment(ip_hdr(skb))) {
664 net_warn_ratelimited("BEET mode doesn't support inner IPv4 fragments\n");
665 return -EAFNOSUPPORT;
666 }
667
668 err = xfrm4_tunnel_check_size(skb);
669 if (err)
670 return err;
671
672 XFRM_MODE_SKB_CB(skb)->protocol = ip_hdr(skb)->protocol;
673
674 xfrm4_extract_header(skb);
675 return 0;
676 }
677
678 #if IS_ENABLED(CONFIG_IPV6)
xfrm6_tunnel_check_size(struct sk_buff * skb)679 static int xfrm6_tunnel_check_size(struct sk_buff *skb)
680 {
681 int mtu, ret = 0;
682 struct dst_entry *dst = skb_dst(skb);
683
684 if (skb->ignore_df)
685 goto out;
686
687 mtu = dst_mtu(dst);
688 if (mtu < IPV6_MIN_MTU)
689 mtu = IPV6_MIN_MTU;
690
691 if ((!skb_is_gso(skb) && skb->len > mtu) ||
692 (skb_is_gso(skb) &&
693 !skb_gso_validate_network_len(skb, ip6_skb_dst_mtu(skb)))) {
694 skb->dev = dst->dev;
695 skb->protocol = htons(ETH_P_IPV6);
696
697 if (xfrm6_local_dontfrag(skb->sk))
698 ipv6_stub->xfrm6_local_rxpmtu(skb, mtu);
699 else if (skb->sk)
700 xfrm_local_error(skb, mtu);
701 else
702 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
703 ret = -EMSGSIZE;
704 }
705 out:
706 return ret;
707 }
708 #endif
709
xfrm6_extract_output(struct xfrm_state * x,struct sk_buff * skb)710 static int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb)
711 {
712 #if IS_ENABLED(CONFIG_IPV6)
713 int err;
714
715 err = xfrm6_tunnel_check_size(skb);
716 if (err)
717 return err;
718
719 XFRM_MODE_SKB_CB(skb)->protocol = ipv6_hdr(skb)->nexthdr;
720
721 xfrm6_extract_header(skb);
722 return 0;
723 #else
724 WARN_ON_ONCE(1);
725 return -EAFNOSUPPORT;
726 #endif
727 }
728
xfrm_inner_extract_output(struct xfrm_state * x,struct sk_buff * skb)729 static int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb)
730 {
731 const struct xfrm_mode *inner_mode;
732
733 if (x->sel.family == AF_UNSPEC)
734 inner_mode = xfrm_ip2inner_mode(x,
735 xfrm_af2proto(skb_dst(skb)->ops->family));
736 else
737 inner_mode = &x->inner_mode;
738
739 if (inner_mode == NULL)
740 return -EAFNOSUPPORT;
741
742 switch (inner_mode->family) {
743 case AF_INET:
744 return xfrm4_extract_output(x, skb);
745 case AF_INET6:
746 return xfrm6_extract_output(x, skb);
747 }
748
749 return -EAFNOSUPPORT;
750 }
751
xfrm_local_error(struct sk_buff * skb,int mtu)752 void xfrm_local_error(struct sk_buff *skb, int mtu)
753 {
754 unsigned int proto;
755 struct xfrm_state_afinfo *afinfo;
756
757 if (skb->protocol == htons(ETH_P_IP))
758 proto = AF_INET;
759 else if (skb->protocol == htons(ETH_P_IPV6) &&
760 skb->sk->sk_family == AF_INET6)
761 proto = AF_INET6;
762 else
763 return;
764
765 afinfo = xfrm_state_get_afinfo(proto);
766 if (afinfo) {
767 afinfo->local_error(skb, mtu);
768 rcu_read_unlock();
769 }
770 }
771 EXPORT_SYMBOL_GPL(xfrm_local_error);
772