1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Extension Header handling for IPv6
4 * Linux INET6 implementation
5 *
6 * Authors:
7 * Pedro Roque <roque@di.fc.ul.pt>
8 * Andi Kleen <ak@muc.de>
9 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
10 */
11
12 /* Changes:
13 * yoshfuji : ensure not to overrun while parsing
14 * tlv options.
15 * Mitsuru KANDA @USAGI and: Remove ipv6_parse_exthdrs().
16 * YOSHIFUJI Hideaki @USAGI Register inbound extension header
17 * handlers as inet6_protocol{}.
18 */
19
20 #include <linux/errno.h>
21 #include <linux/types.h>
22 #include <linux/socket.h>
23 #include <linux/sockios.h>
24 #include <linux/net.h>
25 #include <linux/netdevice.h>
26 #include <linux/in6.h>
27 #include <linux/icmpv6.h>
28 #include <linux/slab.h>
29 #include <linux/export.h>
30
31 #include <net/dst.h>
32 #include <net/sock.h>
33 #include <net/snmp.h>
34
35 #include <net/ipv6.h>
36 #include <net/protocol.h>
37 #include <net/transp_v6.h>
38 #include <net/rawv6.h>
39 #include <net/ndisc.h>
40 #include <net/ip6_route.h>
41 #include <net/addrconf.h>
42 #include <net/calipso.h>
43 #if IS_ENABLED(CONFIG_IPV6_MIP6)
44 #include <net/xfrm.h>
45 #endif
46 #include <linux/seg6.h>
47 #include <net/seg6.h>
48 #ifdef CONFIG_IPV6_SEG6_HMAC
49 #include <net/seg6_hmac.h>
50 #endif
51 #include <net/rpl.h>
52
53 #include <linux/uaccess.h>
54
55 /*
56 * Parsing tlv encoded headers.
57 *
58 * Parsing function "func" returns true, if parsing succeed
59 * and false, if it failed.
60 * It MUST NOT touch skb->h.
61 */
62
63 struct tlvtype_proc {
64 int type;
65 bool (*func)(struct sk_buff *skb, int offset);
66 };
67
68 /*********************
69 Generic functions
70 *********************/
71
72 /* An unknown option is detected, decide what to do */
73
ip6_tlvopt_unknown(struct sk_buff * skb,int optoff,bool disallow_unknowns)74 static bool ip6_tlvopt_unknown(struct sk_buff *skb, int optoff,
75 bool disallow_unknowns)
76 {
77 if (disallow_unknowns) {
78 /* If unknown TLVs are disallowed by configuration
79 * then always silently drop packet. Note this also
80 * means no ICMP parameter problem is sent which
81 * could be a good property to mitigate a reflection DOS
82 * attack.
83 */
84
85 goto drop;
86 }
87
88 switch ((skb_network_header(skb)[optoff] & 0xC0) >> 6) {
89 case 0: /* ignore */
90 return true;
91
92 case 1: /* drop packet */
93 break;
94
95 case 3: /* Send ICMP if not a multicast address and drop packet */
96 /* Actually, it is redundant check. icmp_send
97 will recheck in any case.
98 */
99 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr))
100 break;
101 fallthrough;
102 case 2: /* send ICMP PARM PROB regardless and drop packet */
103 icmpv6_param_prob(skb, ICMPV6_UNK_OPTION, optoff);
104 return false;
105 }
106
107 drop:
108 kfree_skb(skb);
109 return false;
110 }
111
112 /* Parse tlv encoded option header (hop-by-hop or destination) */
113
ip6_parse_tlv(const struct tlvtype_proc * procs,struct sk_buff * skb,int max_count)114 static bool ip6_parse_tlv(const struct tlvtype_proc *procs,
115 struct sk_buff *skb,
116 int max_count)
117 {
118 int len = (skb_transport_header(skb)[1] + 1) << 3;
119 const unsigned char *nh = skb_network_header(skb);
120 int off = skb_network_header_len(skb);
121 const struct tlvtype_proc *curr;
122 bool disallow_unknowns = false;
123 int tlv_count = 0;
124 int padlen = 0;
125
126 if (unlikely(max_count < 0)) {
127 disallow_unknowns = true;
128 max_count = -max_count;
129 }
130
131 if (skb_transport_offset(skb) + len > skb_headlen(skb))
132 goto bad;
133
134 off += 2;
135 len -= 2;
136
137 while (len > 0) {
138 int optlen, i;
139
140 if (nh[off] == IPV6_TLV_PAD1) {
141 padlen++;
142 if (padlen > 7)
143 goto bad;
144 off++;
145 len--;
146 continue;
147 }
148 if (len < 2)
149 goto bad;
150 optlen = nh[off + 1] + 2;
151 if (optlen > len)
152 goto bad;
153
154 if (nh[off] == IPV6_TLV_PADN) {
155 /* RFC 2460 states that the purpose of PadN is
156 * to align the containing header to multiples
157 * of 8. 7 is therefore the highest valid value.
158 * See also RFC 4942, Section 2.1.9.5.
159 */
160 padlen += optlen;
161 if (padlen > 7)
162 goto bad;
163 /* RFC 4942 recommends receiving hosts to
164 * actively check PadN payload to contain
165 * only zeroes.
166 */
167 for (i = 2; i < optlen; i++) {
168 if (nh[off + i] != 0)
169 goto bad;
170 }
171 } else {
172 tlv_count++;
173 if (tlv_count > max_count)
174 goto bad;
175
176 for (curr = procs; curr->type >= 0; curr++) {
177 if (curr->type == nh[off]) {
178 /* type specific length/alignment
179 checks will be performed in the
180 func(). */
181 if (curr->func(skb, off) == false)
182 return false;
183 break;
184 }
185 }
186 if (curr->type < 0 &&
187 !ip6_tlvopt_unknown(skb, off, disallow_unknowns))
188 return false;
189
190 padlen = 0;
191 }
192 off += optlen;
193 len -= optlen;
194 }
195
196 if (len == 0)
197 return true;
198 bad:
199 kfree_skb(skb);
200 return false;
201 }
202
203 /*****************************
204 Destination options header.
205 *****************************/
206
207 #if IS_ENABLED(CONFIG_IPV6_MIP6)
ipv6_dest_hao(struct sk_buff * skb,int optoff)208 static bool ipv6_dest_hao(struct sk_buff *skb, int optoff)
209 {
210 struct ipv6_destopt_hao *hao;
211 struct inet6_skb_parm *opt = IP6CB(skb);
212 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
213 int ret;
214
215 if (opt->dsthao) {
216 net_dbg_ratelimited("hao duplicated\n");
217 goto discard;
218 }
219 opt->dsthao = opt->dst1;
220 opt->dst1 = 0;
221
222 hao = (struct ipv6_destopt_hao *)(skb_network_header(skb) + optoff);
223
224 if (hao->length != 16) {
225 net_dbg_ratelimited("hao invalid option length = %d\n",
226 hao->length);
227 goto discard;
228 }
229
230 if (!(ipv6_addr_type(&hao->addr) & IPV6_ADDR_UNICAST)) {
231 net_dbg_ratelimited("hao is not an unicast addr: %pI6\n",
232 &hao->addr);
233 goto discard;
234 }
235
236 ret = xfrm6_input_addr(skb, (xfrm_address_t *)&ipv6h->daddr,
237 (xfrm_address_t *)&hao->addr, IPPROTO_DSTOPTS);
238 if (unlikely(ret < 0))
239 goto discard;
240
241 if (skb_cloned(skb)) {
242 if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
243 goto discard;
244
245 /* update all variable using below by copied skbuff */
246 hao = (struct ipv6_destopt_hao *)(skb_network_header(skb) +
247 optoff);
248 ipv6h = ipv6_hdr(skb);
249 }
250
251 if (skb->ip_summed == CHECKSUM_COMPLETE)
252 skb->ip_summed = CHECKSUM_NONE;
253
254 swap(ipv6h->saddr, hao->addr);
255
256 if (skb->tstamp == 0)
257 __net_timestamp(skb);
258
259 return true;
260
261 discard:
262 kfree_skb(skb);
263 return false;
264 }
265 #endif
266
267 static const struct tlvtype_proc tlvprocdestopt_lst[] = {
268 #if IS_ENABLED(CONFIG_IPV6_MIP6)
269 {
270 .type = IPV6_TLV_HAO,
271 .func = ipv6_dest_hao,
272 },
273 #endif
274 {-1, NULL}
275 };
276
ipv6_destopt_rcv(struct sk_buff * skb)277 static int ipv6_destopt_rcv(struct sk_buff *skb)
278 {
279 struct inet6_dev *idev = __in6_dev_get(skb->dev);
280 struct inet6_skb_parm *opt = IP6CB(skb);
281 #if IS_ENABLED(CONFIG_IPV6_MIP6)
282 __u16 dstbuf;
283 #endif
284 struct dst_entry *dst = skb_dst(skb);
285 struct net *net = dev_net(skb->dev);
286 int extlen;
287
288 if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
289 !pskb_may_pull(skb, (skb_transport_offset(skb) +
290 ((skb_transport_header(skb)[1] + 1) << 3)))) {
291 __IP6_INC_STATS(dev_net(dst->dev), idev,
292 IPSTATS_MIB_INHDRERRORS);
293 fail_and_free:
294 kfree_skb(skb);
295 return -1;
296 }
297
298 extlen = (skb_transport_header(skb)[1] + 1) << 3;
299 if (extlen > net->ipv6.sysctl.max_dst_opts_len)
300 goto fail_and_free;
301
302 opt->lastopt = opt->dst1 = skb_network_header_len(skb);
303 #if IS_ENABLED(CONFIG_IPV6_MIP6)
304 dstbuf = opt->dst1;
305 #endif
306
307 if (ip6_parse_tlv(tlvprocdestopt_lst, skb,
308 net->ipv6.sysctl.max_dst_opts_cnt)) {
309 skb->transport_header += extlen;
310 opt = IP6CB(skb);
311 #if IS_ENABLED(CONFIG_IPV6_MIP6)
312 opt->nhoff = dstbuf;
313 #else
314 opt->nhoff = opt->dst1;
315 #endif
316 return 1;
317 }
318
319 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
320 return -1;
321 }
322
seg6_update_csum(struct sk_buff * skb)323 static void seg6_update_csum(struct sk_buff *skb)
324 {
325 struct ipv6_sr_hdr *hdr;
326 struct in6_addr *addr;
327 __be32 from, to;
328
329 /* srh is at transport offset and seg_left is already decremented
330 * but daddr is not yet updated with next segment
331 */
332
333 hdr = (struct ipv6_sr_hdr *)skb_transport_header(skb);
334 addr = hdr->segments + hdr->segments_left;
335
336 hdr->segments_left++;
337 from = *(__be32 *)hdr;
338
339 hdr->segments_left--;
340 to = *(__be32 *)hdr;
341
342 /* update skb csum with diff resulting from seg_left decrement */
343
344 update_csum_diff4(skb, from, to);
345
346 /* compute csum diff between current and next segment and update */
347
348 update_csum_diff16(skb, (__be32 *)(&ipv6_hdr(skb)->daddr),
349 (__be32 *)addr);
350 }
351
ipv6_srh_rcv(struct sk_buff * skb)352 static int ipv6_srh_rcv(struct sk_buff *skb)
353 {
354 struct inet6_skb_parm *opt = IP6CB(skb);
355 struct net *net = dev_net(skb->dev);
356 struct ipv6_sr_hdr *hdr;
357 struct inet6_dev *idev;
358 struct in6_addr *addr;
359 int accept_seg6;
360
361 hdr = (struct ipv6_sr_hdr *)skb_transport_header(skb);
362
363 idev = __in6_dev_get(skb->dev);
364
365 accept_seg6 = net->ipv6.devconf_all->seg6_enabled;
366 if (accept_seg6 > idev->cnf.seg6_enabled)
367 accept_seg6 = idev->cnf.seg6_enabled;
368
369 if (!accept_seg6) {
370 kfree_skb(skb);
371 return -1;
372 }
373
374 #ifdef CONFIG_IPV6_SEG6_HMAC
375 if (!seg6_hmac_validate_skb(skb)) {
376 kfree_skb(skb);
377 return -1;
378 }
379 #endif
380
381 looped_back:
382 if (hdr->segments_left == 0) {
383 if (hdr->nexthdr == NEXTHDR_IPV6) {
384 int offset = (hdr->hdrlen + 1) << 3;
385
386 skb_postpull_rcsum(skb, skb_network_header(skb),
387 skb_network_header_len(skb));
388
389 if (!pskb_pull(skb, offset)) {
390 kfree_skb(skb);
391 return -1;
392 }
393 skb_postpull_rcsum(skb, skb_transport_header(skb),
394 offset);
395
396 skb_reset_network_header(skb);
397 skb_reset_transport_header(skb);
398 skb->encapsulation = 0;
399
400 __skb_tunnel_rx(skb, skb->dev, net);
401
402 netif_rx(skb);
403 return -1;
404 }
405
406 opt->srcrt = skb_network_header_len(skb);
407 opt->lastopt = opt->srcrt;
408 skb->transport_header += (hdr->hdrlen + 1) << 3;
409 opt->nhoff = (&hdr->nexthdr) - skb_network_header(skb);
410
411 return 1;
412 }
413
414 if (hdr->segments_left >= (hdr->hdrlen >> 1)) {
415 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
416 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
417 ((&hdr->segments_left) -
418 skb_network_header(skb)));
419 return -1;
420 }
421
422 if (skb_cloned(skb)) {
423 if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
424 __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
425 IPSTATS_MIB_OUTDISCARDS);
426 kfree_skb(skb);
427 return -1;
428 }
429 }
430
431 hdr = (struct ipv6_sr_hdr *)skb_transport_header(skb);
432
433 hdr->segments_left--;
434 addr = hdr->segments + hdr->segments_left;
435
436 skb_push(skb, sizeof(struct ipv6hdr));
437
438 if (skb->ip_summed == CHECKSUM_COMPLETE)
439 seg6_update_csum(skb);
440
441 ipv6_hdr(skb)->daddr = *addr;
442
443 skb_dst_drop(skb);
444
445 ip6_route_input(skb);
446
447 if (skb_dst(skb)->error) {
448 dst_input(skb);
449 return -1;
450 }
451
452 if (skb_dst(skb)->dev->flags & IFF_LOOPBACK) {
453 if (ipv6_hdr(skb)->hop_limit <= 1) {
454 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
455 icmpv6_send(skb, ICMPV6_TIME_EXCEED,
456 ICMPV6_EXC_HOPLIMIT, 0);
457 kfree_skb(skb);
458 return -1;
459 }
460 ipv6_hdr(skb)->hop_limit--;
461
462 skb_pull(skb, sizeof(struct ipv6hdr));
463 goto looped_back;
464 }
465
466 dst_input(skb);
467
468 return -1;
469 }
470
ipv6_rpl_srh_rcv(struct sk_buff * skb)471 static int ipv6_rpl_srh_rcv(struct sk_buff *skb)
472 {
473 struct ipv6_rpl_sr_hdr *hdr, *ohdr, *chdr;
474 struct inet6_skb_parm *opt = IP6CB(skb);
475 struct net *net = dev_net(skb->dev);
476 struct inet6_dev *idev;
477 struct ipv6hdr *oldhdr;
478 struct in6_addr addr;
479 unsigned char *buf;
480 int accept_rpl_seg;
481 int i, err;
482 u64 n = 0;
483 u32 r;
484
485 idev = __in6_dev_get(skb->dev);
486
487 accept_rpl_seg = net->ipv6.devconf_all->rpl_seg_enabled;
488 if (accept_rpl_seg > idev->cnf.rpl_seg_enabled)
489 accept_rpl_seg = idev->cnf.rpl_seg_enabled;
490
491 if (!accept_rpl_seg) {
492 kfree_skb(skb);
493 return -1;
494 }
495
496 looped_back:
497 hdr = (struct ipv6_rpl_sr_hdr *)skb_transport_header(skb);
498
499 if (hdr->segments_left == 0) {
500 if (hdr->nexthdr == NEXTHDR_IPV6) {
501 int offset = (hdr->hdrlen + 1) << 3;
502
503 skb_postpull_rcsum(skb, skb_network_header(skb),
504 skb_network_header_len(skb));
505
506 if (!pskb_pull(skb, offset)) {
507 kfree_skb(skb);
508 return -1;
509 }
510 skb_postpull_rcsum(skb, skb_transport_header(skb),
511 offset);
512
513 skb_reset_network_header(skb);
514 skb_reset_transport_header(skb);
515 skb->encapsulation = 0;
516
517 __skb_tunnel_rx(skb, skb->dev, net);
518
519 netif_rx(skb);
520 return -1;
521 }
522
523 opt->srcrt = skb_network_header_len(skb);
524 opt->lastopt = opt->srcrt;
525 skb->transport_header += (hdr->hdrlen + 1) << 3;
526 opt->nhoff = (&hdr->nexthdr) - skb_network_header(skb);
527
528 return 1;
529 }
530
531 if (!pskb_may_pull(skb, sizeof(*hdr))) {
532 kfree_skb(skb);
533 return -1;
534 }
535
536 n = (hdr->hdrlen << 3) - hdr->pad - (16 - hdr->cmpre);
537 r = do_div(n, (16 - hdr->cmpri));
538 /* checks if calculation was without remainder and n fits into
539 * unsigned char which is segments_left field. Should not be
540 * higher than that.
541 */
542 if (r || (n + 1) > 255) {
543 kfree_skb(skb);
544 return -1;
545 }
546
547 if (hdr->segments_left > n + 1) {
548 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
549 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
550 ((&hdr->segments_left) -
551 skb_network_header(skb)));
552 return -1;
553 }
554
555 if (skb_cloned(skb)) {
556 if (pskb_expand_head(skb, IPV6_RPL_SRH_WORST_SWAP_SIZE, 0,
557 GFP_ATOMIC)) {
558 __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
559 IPSTATS_MIB_OUTDISCARDS);
560 kfree_skb(skb);
561 return -1;
562 }
563 } else {
564 err = skb_cow_head(skb, IPV6_RPL_SRH_WORST_SWAP_SIZE);
565 if (unlikely(err)) {
566 kfree_skb(skb);
567 return -1;
568 }
569 }
570
571 hdr = (struct ipv6_rpl_sr_hdr *)skb_transport_header(skb);
572
573 if (!pskb_may_pull(skb, ipv6_rpl_srh_size(n, hdr->cmpri,
574 hdr->cmpre))) {
575 kfree_skb(skb);
576 return -1;
577 }
578
579 hdr->segments_left--;
580 i = n - hdr->segments_left;
581
582 buf = kcalloc(struct_size(hdr, segments.addr, n + 2), 2, GFP_ATOMIC);
583 if (unlikely(!buf)) {
584 kfree_skb(skb);
585 return -1;
586 }
587
588 ohdr = (struct ipv6_rpl_sr_hdr *)buf;
589 ipv6_rpl_srh_decompress(ohdr, hdr, &ipv6_hdr(skb)->daddr, n);
590 chdr = (struct ipv6_rpl_sr_hdr *)(buf + ((ohdr->hdrlen + 1) << 3));
591
592 if ((ipv6_addr_type(&ipv6_hdr(skb)->daddr) & IPV6_ADDR_MULTICAST) ||
593 (ipv6_addr_type(&ohdr->rpl_segaddr[i]) & IPV6_ADDR_MULTICAST)) {
594 kfree_skb(skb);
595 kfree(buf);
596 return -1;
597 }
598
599 err = ipv6_chk_rpl_srh_loop(net, ohdr->rpl_segaddr, n + 1);
600 if (err) {
601 icmpv6_send(skb, ICMPV6_PARAMPROB, 0, 0);
602 kfree_skb(skb);
603 kfree(buf);
604 return -1;
605 }
606
607 addr = ipv6_hdr(skb)->daddr;
608 ipv6_hdr(skb)->daddr = ohdr->rpl_segaddr[i];
609 ohdr->rpl_segaddr[i] = addr;
610
611 ipv6_rpl_srh_compress(chdr, ohdr, &ipv6_hdr(skb)->daddr, n);
612
613 oldhdr = ipv6_hdr(skb);
614
615 skb_pull(skb, ((hdr->hdrlen + 1) << 3));
616 skb_postpull_rcsum(skb, oldhdr,
617 sizeof(struct ipv6hdr) + ((hdr->hdrlen + 1) << 3));
618 skb_push(skb, ((chdr->hdrlen + 1) << 3) + sizeof(struct ipv6hdr));
619 skb_reset_network_header(skb);
620 skb_mac_header_rebuild(skb);
621 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
622
623 memmove(ipv6_hdr(skb), oldhdr, sizeof(struct ipv6hdr));
624 memcpy(skb_transport_header(skb), chdr, (chdr->hdrlen + 1) << 3);
625
626 ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
627 skb_postpush_rcsum(skb, ipv6_hdr(skb),
628 sizeof(struct ipv6hdr) + ((chdr->hdrlen + 1) << 3));
629
630 kfree(buf);
631
632 skb_dst_drop(skb);
633
634 ip6_route_input(skb);
635
636 if (skb_dst(skb)->error) {
637 dst_input(skb);
638 return -1;
639 }
640
641 if (skb_dst(skb)->dev->flags & IFF_LOOPBACK) {
642 if (ipv6_hdr(skb)->hop_limit <= 1) {
643 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
644 icmpv6_send(skb, ICMPV6_TIME_EXCEED,
645 ICMPV6_EXC_HOPLIMIT, 0);
646 kfree_skb(skb);
647 return -1;
648 }
649 ipv6_hdr(skb)->hop_limit--;
650
651 skb_pull(skb, sizeof(struct ipv6hdr));
652 goto looped_back;
653 }
654
655 dst_input(skb);
656
657 return -1;
658 }
659
660 /********************************
661 Routing header.
662 ********************************/
663
664 /* called with rcu_read_lock() */
ipv6_rthdr_rcv(struct sk_buff * skb)665 static int ipv6_rthdr_rcv(struct sk_buff *skb)
666 {
667 struct inet6_dev *idev = __in6_dev_get(skb->dev);
668 struct inet6_skb_parm *opt = IP6CB(skb);
669 struct in6_addr *addr = NULL;
670 struct in6_addr daddr;
671 int n, i;
672 struct ipv6_rt_hdr *hdr;
673 struct rt0_hdr *rthdr;
674 struct net *net = dev_net(skb->dev);
675 int accept_source_route = net->ipv6.devconf_all->accept_source_route;
676
677 idev = __in6_dev_get(skb->dev);
678 if (idev && accept_source_route > idev->cnf.accept_source_route)
679 accept_source_route = idev->cnf.accept_source_route;
680
681 if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
682 !pskb_may_pull(skb, (skb_transport_offset(skb) +
683 ((skb_transport_header(skb)[1] + 1) << 3)))) {
684 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
685 kfree_skb(skb);
686 return -1;
687 }
688
689 hdr = (struct ipv6_rt_hdr *)skb_transport_header(skb);
690
691 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ||
692 skb->pkt_type != PACKET_HOST) {
693 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
694 kfree_skb(skb);
695 return -1;
696 }
697
698 switch (hdr->type) {
699 case IPV6_SRCRT_TYPE_4:
700 /* segment routing */
701 return ipv6_srh_rcv(skb);
702 case IPV6_SRCRT_TYPE_3:
703 /* rpl segment routing */
704 return ipv6_rpl_srh_rcv(skb);
705 default:
706 break;
707 }
708
709 looped_back:
710 if (hdr->segments_left == 0) {
711 switch (hdr->type) {
712 #if IS_ENABLED(CONFIG_IPV6_MIP6)
713 case IPV6_SRCRT_TYPE_2:
714 /* Silently discard type 2 header unless it was
715 * processed by own
716 */
717 if (!addr) {
718 __IP6_INC_STATS(net, idev,
719 IPSTATS_MIB_INADDRERRORS);
720 kfree_skb(skb);
721 return -1;
722 }
723 break;
724 #endif
725 default:
726 break;
727 }
728
729 opt->lastopt = opt->srcrt = skb_network_header_len(skb);
730 skb->transport_header += (hdr->hdrlen + 1) << 3;
731 opt->dst0 = opt->dst1;
732 opt->dst1 = 0;
733 opt->nhoff = (&hdr->nexthdr) - skb_network_header(skb);
734 return 1;
735 }
736
737 switch (hdr->type) {
738 #if IS_ENABLED(CONFIG_IPV6_MIP6)
739 case IPV6_SRCRT_TYPE_2:
740 if (accept_source_route < 0)
741 goto unknown_rh;
742 /* Silently discard invalid RTH type 2 */
743 if (hdr->hdrlen != 2 || hdr->segments_left != 1) {
744 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
745 kfree_skb(skb);
746 return -1;
747 }
748 break;
749 #endif
750 default:
751 goto unknown_rh;
752 }
753
754 /*
755 * This is the routing header forwarding algorithm from
756 * RFC 2460, page 16.
757 */
758
759 n = hdr->hdrlen >> 1;
760
761 if (hdr->segments_left > n) {
762 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
763 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
764 ((&hdr->segments_left) -
765 skb_network_header(skb)));
766 return -1;
767 }
768
769 /* We are about to mangle packet header. Be careful!
770 Do not damage packets queued somewhere.
771 */
772 if (skb_cloned(skb)) {
773 /* the copy is a forwarded packet */
774 if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
775 __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
776 IPSTATS_MIB_OUTDISCARDS);
777 kfree_skb(skb);
778 return -1;
779 }
780 hdr = (struct ipv6_rt_hdr *)skb_transport_header(skb);
781 }
782
783 if (skb->ip_summed == CHECKSUM_COMPLETE)
784 skb->ip_summed = CHECKSUM_NONE;
785
786 i = n - --hdr->segments_left;
787
788 rthdr = (struct rt0_hdr *) hdr;
789 addr = rthdr->addr;
790 addr += i - 1;
791
792 switch (hdr->type) {
793 #if IS_ENABLED(CONFIG_IPV6_MIP6)
794 case IPV6_SRCRT_TYPE_2:
795 if (xfrm6_input_addr(skb, (xfrm_address_t *)addr,
796 (xfrm_address_t *)&ipv6_hdr(skb)->saddr,
797 IPPROTO_ROUTING) < 0) {
798 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
799 kfree_skb(skb);
800 return -1;
801 }
802 if (!ipv6_chk_home_addr(dev_net(skb_dst(skb)->dev), addr)) {
803 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
804 kfree_skb(skb);
805 return -1;
806 }
807 break;
808 #endif
809 default:
810 break;
811 }
812
813 if (ipv6_addr_is_multicast(addr)) {
814 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
815 kfree_skb(skb);
816 return -1;
817 }
818
819 daddr = *addr;
820 *addr = ipv6_hdr(skb)->daddr;
821 ipv6_hdr(skb)->daddr = daddr;
822
823 skb_dst_drop(skb);
824 ip6_route_input(skb);
825 if (skb_dst(skb)->error) {
826 skb_push(skb, skb->data - skb_network_header(skb));
827 dst_input(skb);
828 return -1;
829 }
830
831 if (skb_dst(skb)->dev->flags&IFF_LOOPBACK) {
832 if (ipv6_hdr(skb)->hop_limit <= 1) {
833 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
834 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
835 0);
836 kfree_skb(skb);
837 return -1;
838 }
839 ipv6_hdr(skb)->hop_limit--;
840 goto looped_back;
841 }
842
843 skb_push(skb, skb->data - skb_network_header(skb));
844 dst_input(skb);
845 return -1;
846
847 unknown_rh:
848 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
849 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
850 (&hdr->type) - skb_network_header(skb));
851 return -1;
852 }
853
854 static const struct inet6_protocol rthdr_protocol = {
855 .handler = ipv6_rthdr_rcv,
856 .flags = INET6_PROTO_NOPOLICY,
857 };
858
859 static const struct inet6_protocol destopt_protocol = {
860 .handler = ipv6_destopt_rcv,
861 .flags = INET6_PROTO_NOPOLICY,
862 };
863
864 static const struct inet6_protocol nodata_protocol = {
865 .handler = dst_discard,
866 .flags = INET6_PROTO_NOPOLICY,
867 };
868
ipv6_exthdrs_init(void)869 int __init ipv6_exthdrs_init(void)
870 {
871 int ret;
872
873 ret = inet6_add_protocol(&rthdr_protocol, IPPROTO_ROUTING);
874 if (ret)
875 goto out;
876
877 ret = inet6_add_protocol(&destopt_protocol, IPPROTO_DSTOPTS);
878 if (ret)
879 goto out_rthdr;
880
881 ret = inet6_add_protocol(&nodata_protocol, IPPROTO_NONE);
882 if (ret)
883 goto out_destopt;
884
885 out:
886 return ret;
887 out_destopt:
888 inet6_del_protocol(&destopt_protocol, IPPROTO_DSTOPTS);
889 out_rthdr:
890 inet6_del_protocol(&rthdr_protocol, IPPROTO_ROUTING);
891 goto out;
892 };
893
ipv6_exthdrs_exit(void)894 void ipv6_exthdrs_exit(void)
895 {
896 inet6_del_protocol(&nodata_protocol, IPPROTO_NONE);
897 inet6_del_protocol(&destopt_protocol, IPPROTO_DSTOPTS);
898 inet6_del_protocol(&rthdr_protocol, IPPROTO_ROUTING);
899 }
900
901 /**********************************
902 Hop-by-hop options.
903 **********************************/
904
905 /*
906 * Note: we cannot rely on skb_dst(skb) before we assign it in ip6_route_input().
907 */
ipv6_skb_idev(struct sk_buff * skb)908 static inline struct inet6_dev *ipv6_skb_idev(struct sk_buff *skb)
909 {
910 return skb_dst(skb) ? ip6_dst_idev(skb_dst(skb)) : __in6_dev_get(skb->dev);
911 }
912
ipv6_skb_net(struct sk_buff * skb)913 static inline struct net *ipv6_skb_net(struct sk_buff *skb)
914 {
915 return skb_dst(skb) ? dev_net(skb_dst(skb)->dev) : dev_net(skb->dev);
916 }
917
918 /* Router Alert as of RFC 2711 */
919
ipv6_hop_ra(struct sk_buff * skb,int optoff)920 static bool ipv6_hop_ra(struct sk_buff *skb, int optoff)
921 {
922 const unsigned char *nh = skb_network_header(skb);
923
924 if (nh[optoff + 1] == 2) {
925 IP6CB(skb)->flags |= IP6SKB_ROUTERALERT;
926 memcpy(&IP6CB(skb)->ra, nh + optoff + 2, sizeof(IP6CB(skb)->ra));
927 return true;
928 }
929 net_dbg_ratelimited("ipv6_hop_ra: wrong RA length %d\n",
930 nh[optoff + 1]);
931 kfree_skb(skb);
932 return false;
933 }
934
935 /* Jumbo payload */
936
ipv6_hop_jumbo(struct sk_buff * skb,int optoff)937 static bool ipv6_hop_jumbo(struct sk_buff *skb, int optoff)
938 {
939 const unsigned char *nh = skb_network_header(skb);
940 struct inet6_dev *idev = __in6_dev_get_safely(skb->dev);
941 struct net *net = ipv6_skb_net(skb);
942 u32 pkt_len;
943
944 if (nh[optoff + 1] != 4 || (optoff & 3) != 2) {
945 net_dbg_ratelimited("ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n",
946 nh[optoff+1]);
947 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
948 goto drop;
949 }
950
951 pkt_len = ntohl(*(__be32 *)(nh + optoff + 2));
952 if (pkt_len <= IPV6_MAXPLEN) {
953 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
954 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff+2);
955 return false;
956 }
957 if (ipv6_hdr(skb)->payload_len) {
958 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
959 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff);
960 return false;
961 }
962
963 if (pkt_len > skb->len - sizeof(struct ipv6hdr)) {
964 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INTRUNCATEDPKTS);
965 goto drop;
966 }
967
968 if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr)))
969 goto drop;
970
971 IP6CB(skb)->flags |= IP6SKB_JUMBOGRAM;
972 return true;
973
974 drop:
975 kfree_skb(skb);
976 return false;
977 }
978
979 /* CALIPSO RFC 5570 */
980
ipv6_hop_calipso(struct sk_buff * skb,int optoff)981 static bool ipv6_hop_calipso(struct sk_buff *skb, int optoff)
982 {
983 const unsigned char *nh = skb_network_header(skb);
984
985 if (nh[optoff + 1] < 8)
986 goto drop;
987
988 if (nh[optoff + 6] * 4 + 8 > nh[optoff + 1])
989 goto drop;
990
991 if (!calipso_validate(skb, nh + optoff))
992 goto drop;
993
994 return true;
995
996 drop:
997 kfree_skb(skb);
998 return false;
999 }
1000
1001 static const struct tlvtype_proc tlvprochopopt_lst[] = {
1002 {
1003 .type = IPV6_TLV_ROUTERALERT,
1004 .func = ipv6_hop_ra,
1005 },
1006 {
1007 .type = IPV6_TLV_JUMBO,
1008 .func = ipv6_hop_jumbo,
1009 },
1010 {
1011 .type = IPV6_TLV_CALIPSO,
1012 .func = ipv6_hop_calipso,
1013 },
1014 { -1, }
1015 };
1016
ipv6_parse_hopopts(struct sk_buff * skb)1017 int ipv6_parse_hopopts(struct sk_buff *skb)
1018 {
1019 struct inet6_skb_parm *opt = IP6CB(skb);
1020 struct net *net = dev_net(skb->dev);
1021 int extlen;
1022
1023 /*
1024 * skb_network_header(skb) is equal to skb->data, and
1025 * skb_network_header_len(skb) is always equal to
1026 * sizeof(struct ipv6hdr) by definition of
1027 * hop-by-hop options.
1028 */
1029 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + 8) ||
1030 !pskb_may_pull(skb, (sizeof(struct ipv6hdr) +
1031 ((skb_transport_header(skb)[1] + 1) << 3)))) {
1032 fail_and_free:
1033 kfree_skb(skb);
1034 return -1;
1035 }
1036
1037 extlen = (skb_transport_header(skb)[1] + 1) << 3;
1038 if (extlen > net->ipv6.sysctl.max_hbh_opts_len)
1039 goto fail_and_free;
1040
1041 opt->flags |= IP6SKB_HOPBYHOP;
1042 if (ip6_parse_tlv(tlvprochopopt_lst, skb,
1043 net->ipv6.sysctl.max_hbh_opts_cnt)) {
1044 skb->transport_header += extlen;
1045 opt = IP6CB(skb);
1046 opt->nhoff = sizeof(struct ipv6hdr);
1047 return 1;
1048 }
1049 return -1;
1050 }
1051
1052 /*
1053 * Creating outbound headers.
1054 *
1055 * "build" functions work when skb is filled from head to tail (datagram)
1056 * "push" functions work when headers are added from tail to head (tcp)
1057 *
1058 * In both cases we assume, that caller reserved enough room
1059 * for headers.
1060 */
1061
ipv6_push_rthdr0(struct sk_buff * skb,u8 * proto,struct ipv6_rt_hdr * opt,struct in6_addr ** addr_p,struct in6_addr * saddr)1062 static void ipv6_push_rthdr0(struct sk_buff *skb, u8 *proto,
1063 struct ipv6_rt_hdr *opt,
1064 struct in6_addr **addr_p, struct in6_addr *saddr)
1065 {
1066 struct rt0_hdr *phdr, *ihdr;
1067 int hops;
1068
1069 ihdr = (struct rt0_hdr *) opt;
1070
1071 phdr = skb_push(skb, (ihdr->rt_hdr.hdrlen + 1) << 3);
1072 memcpy(phdr, ihdr, sizeof(struct rt0_hdr));
1073
1074 hops = ihdr->rt_hdr.hdrlen >> 1;
1075
1076 if (hops > 1)
1077 memcpy(phdr->addr, ihdr->addr + 1,
1078 (hops - 1) * sizeof(struct in6_addr));
1079
1080 phdr->addr[hops - 1] = **addr_p;
1081 *addr_p = ihdr->addr;
1082
1083 phdr->rt_hdr.nexthdr = *proto;
1084 *proto = NEXTHDR_ROUTING;
1085 }
1086
ipv6_push_rthdr4(struct sk_buff * skb,u8 * proto,struct ipv6_rt_hdr * opt,struct in6_addr ** addr_p,struct in6_addr * saddr)1087 static void ipv6_push_rthdr4(struct sk_buff *skb, u8 *proto,
1088 struct ipv6_rt_hdr *opt,
1089 struct in6_addr **addr_p, struct in6_addr *saddr)
1090 {
1091 struct ipv6_sr_hdr *sr_phdr, *sr_ihdr;
1092 int plen, hops;
1093
1094 sr_ihdr = (struct ipv6_sr_hdr *)opt;
1095 plen = (sr_ihdr->hdrlen + 1) << 3;
1096
1097 sr_phdr = skb_push(skb, plen);
1098 memcpy(sr_phdr, sr_ihdr, sizeof(struct ipv6_sr_hdr));
1099
1100 hops = sr_ihdr->first_segment + 1;
1101 memcpy(sr_phdr->segments + 1, sr_ihdr->segments + 1,
1102 (hops - 1) * sizeof(struct in6_addr));
1103
1104 sr_phdr->segments[0] = **addr_p;
1105 *addr_p = &sr_ihdr->segments[sr_ihdr->segments_left];
1106
1107 if (sr_ihdr->hdrlen > hops * 2) {
1108 int tlvs_offset, tlvs_length;
1109
1110 tlvs_offset = (1 + hops * 2) << 3;
1111 tlvs_length = (sr_ihdr->hdrlen - hops * 2) << 3;
1112 memcpy((char *)sr_phdr + tlvs_offset,
1113 (char *)sr_ihdr + tlvs_offset, tlvs_length);
1114 }
1115
1116 #ifdef CONFIG_IPV6_SEG6_HMAC
1117 if (sr_has_hmac(sr_phdr)) {
1118 struct net *net = NULL;
1119
1120 if (skb->dev)
1121 net = dev_net(skb->dev);
1122 else if (skb->sk)
1123 net = sock_net(skb->sk);
1124
1125 WARN_ON(!net);
1126
1127 if (net)
1128 seg6_push_hmac(net, saddr, sr_phdr);
1129 }
1130 #endif
1131
1132 sr_phdr->nexthdr = *proto;
1133 *proto = NEXTHDR_ROUTING;
1134 }
1135
ipv6_push_rthdr(struct sk_buff * skb,u8 * proto,struct ipv6_rt_hdr * opt,struct in6_addr ** addr_p,struct in6_addr * saddr)1136 static void ipv6_push_rthdr(struct sk_buff *skb, u8 *proto,
1137 struct ipv6_rt_hdr *opt,
1138 struct in6_addr **addr_p, struct in6_addr *saddr)
1139 {
1140 switch (opt->type) {
1141 case IPV6_SRCRT_TYPE_0:
1142 case IPV6_SRCRT_STRICT:
1143 case IPV6_SRCRT_TYPE_2:
1144 ipv6_push_rthdr0(skb, proto, opt, addr_p, saddr);
1145 break;
1146 case IPV6_SRCRT_TYPE_4:
1147 ipv6_push_rthdr4(skb, proto, opt, addr_p, saddr);
1148 break;
1149 default:
1150 break;
1151 }
1152 }
1153
ipv6_push_exthdr(struct sk_buff * skb,u8 * proto,u8 type,struct ipv6_opt_hdr * opt)1154 static void ipv6_push_exthdr(struct sk_buff *skb, u8 *proto, u8 type, struct ipv6_opt_hdr *opt)
1155 {
1156 struct ipv6_opt_hdr *h = skb_push(skb, ipv6_optlen(opt));
1157
1158 memcpy(h, opt, ipv6_optlen(opt));
1159 h->nexthdr = *proto;
1160 *proto = type;
1161 }
1162
ipv6_push_nfrag_opts(struct sk_buff * skb,struct ipv6_txoptions * opt,u8 * proto,struct in6_addr ** daddr,struct in6_addr * saddr)1163 void ipv6_push_nfrag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt,
1164 u8 *proto,
1165 struct in6_addr **daddr, struct in6_addr *saddr)
1166 {
1167 if (opt->srcrt) {
1168 ipv6_push_rthdr(skb, proto, opt->srcrt, daddr, saddr);
1169 /*
1170 * IPV6_RTHDRDSTOPTS is ignored
1171 * unless IPV6_RTHDR is set (RFC3542).
1172 */
1173 if (opt->dst0opt)
1174 ipv6_push_exthdr(skb, proto, NEXTHDR_DEST, opt->dst0opt);
1175 }
1176 if (opt->hopopt)
1177 ipv6_push_exthdr(skb, proto, NEXTHDR_HOP, opt->hopopt);
1178 }
1179
ipv6_push_frag_opts(struct sk_buff * skb,struct ipv6_txoptions * opt,u8 * proto)1180 void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, u8 *proto)
1181 {
1182 if (opt->dst1opt)
1183 ipv6_push_exthdr(skb, proto, NEXTHDR_DEST, opt->dst1opt);
1184 }
1185 EXPORT_SYMBOL(ipv6_push_frag_opts);
1186
1187 struct ipv6_txoptions *
ipv6_dup_options(struct sock * sk,struct ipv6_txoptions * opt)1188 ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt)
1189 {
1190 struct ipv6_txoptions *opt2;
1191
1192 opt2 = sock_kmalloc(sk, opt->tot_len, GFP_ATOMIC);
1193 if (opt2) {
1194 long dif = (char *)opt2 - (char *)opt;
1195 memcpy(opt2, opt, opt->tot_len);
1196 if (opt2->hopopt)
1197 *((char **)&opt2->hopopt) += dif;
1198 if (opt2->dst0opt)
1199 *((char **)&opt2->dst0opt) += dif;
1200 if (opt2->dst1opt)
1201 *((char **)&opt2->dst1opt) += dif;
1202 if (opt2->srcrt)
1203 *((char **)&opt2->srcrt) += dif;
1204 refcount_set(&opt2->refcnt, 1);
1205 }
1206 return opt2;
1207 }
1208 EXPORT_SYMBOL_GPL(ipv6_dup_options);
1209
ipv6_renew_option(int renewtype,struct ipv6_opt_hdr ** dest,struct ipv6_opt_hdr * old,struct ipv6_opt_hdr * new,int newtype,char ** p)1210 static void ipv6_renew_option(int renewtype,
1211 struct ipv6_opt_hdr **dest,
1212 struct ipv6_opt_hdr *old,
1213 struct ipv6_opt_hdr *new,
1214 int newtype, char **p)
1215 {
1216 struct ipv6_opt_hdr *src;
1217
1218 src = (renewtype == newtype ? new : old);
1219 if (!src)
1220 return;
1221
1222 memcpy(*p, src, ipv6_optlen(src));
1223 *dest = (struct ipv6_opt_hdr *)*p;
1224 *p += CMSG_ALIGN(ipv6_optlen(*dest));
1225 }
1226
1227 /**
1228 * ipv6_renew_options - replace a specific ext hdr with a new one.
1229 *
1230 * @sk: sock from which to allocate memory
1231 * @opt: original options
1232 * @newtype: option type to replace in @opt
1233 * @newopt: new option of type @newtype to replace (user-mem)
1234 *
1235 * Returns a new set of options which is a copy of @opt with the
1236 * option type @newtype replaced with @newopt.
1237 *
1238 * @opt may be NULL, in which case a new set of options is returned
1239 * containing just @newopt.
1240 *
1241 * @newopt may be NULL, in which case the specified option type is
1242 * not copied into the new set of options.
1243 *
1244 * The new set of options is allocated from the socket option memory
1245 * buffer of @sk.
1246 */
1247 struct ipv6_txoptions *
ipv6_renew_options(struct sock * sk,struct ipv6_txoptions * opt,int newtype,struct ipv6_opt_hdr * newopt)1248 ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
1249 int newtype, struct ipv6_opt_hdr *newopt)
1250 {
1251 int tot_len = 0;
1252 char *p;
1253 struct ipv6_txoptions *opt2;
1254
1255 if (opt) {
1256 if (newtype != IPV6_HOPOPTS && opt->hopopt)
1257 tot_len += CMSG_ALIGN(ipv6_optlen(opt->hopopt));
1258 if (newtype != IPV6_RTHDRDSTOPTS && opt->dst0opt)
1259 tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst0opt));
1260 if (newtype != IPV6_RTHDR && opt->srcrt)
1261 tot_len += CMSG_ALIGN(ipv6_optlen(opt->srcrt));
1262 if (newtype != IPV6_DSTOPTS && opt->dst1opt)
1263 tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst1opt));
1264 }
1265
1266 if (newopt)
1267 tot_len += CMSG_ALIGN(ipv6_optlen(newopt));
1268
1269 if (!tot_len)
1270 return NULL;
1271
1272 tot_len += sizeof(*opt2);
1273 opt2 = sock_kmalloc(sk, tot_len, GFP_ATOMIC);
1274 if (!opt2)
1275 return ERR_PTR(-ENOBUFS);
1276
1277 memset(opt2, 0, tot_len);
1278 refcount_set(&opt2->refcnt, 1);
1279 opt2->tot_len = tot_len;
1280 p = (char *)(opt2 + 1);
1281
1282 ipv6_renew_option(IPV6_HOPOPTS, &opt2->hopopt,
1283 (opt ? opt->hopopt : NULL),
1284 newopt, newtype, &p);
1285 ipv6_renew_option(IPV6_RTHDRDSTOPTS, &opt2->dst0opt,
1286 (opt ? opt->dst0opt : NULL),
1287 newopt, newtype, &p);
1288 ipv6_renew_option(IPV6_RTHDR,
1289 (struct ipv6_opt_hdr **)&opt2->srcrt,
1290 (opt ? (struct ipv6_opt_hdr *)opt->srcrt : NULL),
1291 newopt, newtype, &p);
1292 ipv6_renew_option(IPV6_DSTOPTS, &opt2->dst1opt,
1293 (opt ? opt->dst1opt : NULL),
1294 newopt, newtype, &p);
1295
1296 opt2->opt_nflen = (opt2->hopopt ? ipv6_optlen(opt2->hopopt) : 0) +
1297 (opt2->dst0opt ? ipv6_optlen(opt2->dst0opt) : 0) +
1298 (opt2->srcrt ? ipv6_optlen(opt2->srcrt) : 0);
1299 opt2->opt_flen = (opt2->dst1opt ? ipv6_optlen(opt2->dst1opt) : 0);
1300
1301 return opt2;
1302 }
1303
ipv6_fixup_options(struct ipv6_txoptions * opt_space,struct ipv6_txoptions * opt)1304 struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
1305 struct ipv6_txoptions *opt)
1306 {
1307 /*
1308 * ignore the dest before srcrt unless srcrt is being included.
1309 * --yoshfuji
1310 */
1311 if (opt && opt->dst0opt && !opt->srcrt) {
1312 if (opt_space != opt) {
1313 memcpy(opt_space, opt, sizeof(*opt_space));
1314 opt = opt_space;
1315 }
1316 opt->opt_nflen -= ipv6_optlen(opt->dst0opt);
1317 opt->dst0opt = NULL;
1318 }
1319
1320 return opt;
1321 }
1322 EXPORT_SYMBOL_GPL(ipv6_fixup_options);
1323
1324 /**
1325 * fl6_update_dst - update flowi destination address with info given
1326 * by srcrt option, if any.
1327 *
1328 * @fl6: flowi6 for which daddr is to be updated
1329 * @opt: struct ipv6_txoptions in which to look for srcrt opt
1330 * @orig: copy of original daddr address if modified
1331 *
1332 * Returns NULL if no txoptions or no srcrt, otherwise returns orig
1333 * and initial value of fl6->daddr set in orig
1334 */
fl6_update_dst(struct flowi6 * fl6,const struct ipv6_txoptions * opt,struct in6_addr * orig)1335 struct in6_addr *fl6_update_dst(struct flowi6 *fl6,
1336 const struct ipv6_txoptions *opt,
1337 struct in6_addr *orig)
1338 {
1339 if (!opt || !opt->srcrt)
1340 return NULL;
1341
1342 *orig = fl6->daddr;
1343
1344 switch (opt->srcrt->type) {
1345 case IPV6_SRCRT_TYPE_0:
1346 case IPV6_SRCRT_STRICT:
1347 case IPV6_SRCRT_TYPE_2:
1348 fl6->daddr = *((struct rt0_hdr *)opt->srcrt)->addr;
1349 break;
1350 case IPV6_SRCRT_TYPE_4:
1351 {
1352 struct ipv6_sr_hdr *srh = (struct ipv6_sr_hdr *)opt->srcrt;
1353
1354 fl6->daddr = srh->segments[srh->segments_left];
1355 break;
1356 }
1357 default:
1358 return NULL;
1359 }
1360
1361 return orig;
1362 }
1363 EXPORT_SYMBOL_GPL(fl6_update_dst);
1364