• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	Internet Control Message Protocol (ICMPv6)
4  *	Linux INET6 implementation
5  *
6  *	Authors:
7  *	Pedro Roque		<roque@di.fc.ul.pt>
8  *
9  *	Based on net/ipv4/icmp.c
10  *
11  *	RFC 1885
12  */
13 
14 /*
15  *	Changes:
16  *
17  *	Andi Kleen		:	exception handling
18  *	Andi Kleen			add rate limits. never reply to a icmp.
19  *					add more length checks and other fixes.
20  *	yoshfuji		:	ensure to sent parameter problem for
21  *					fragments.
22  *	YOSHIFUJI Hideaki @USAGI:	added sysctl for icmp rate limit.
23  *	Randy Dunlap and
24  *	YOSHIFUJI Hideaki @USAGI:	Per-interface statistics support
25  *	Kazunori MIYAZAWA @USAGI:       change output process to use ip6_append_data
26  */
27 
28 #define pr_fmt(fmt) "IPv6: " fmt
29 
30 #include <linux/module.h>
31 #include <linux/errno.h>
32 #include <linux/types.h>
33 #include <linux/socket.h>
34 #include <linux/in.h>
35 #include <linux/kernel.h>
36 #include <linux/sockios.h>
37 #include <linux/net.h>
38 #include <linux/skbuff.h>
39 #include <linux/init.h>
40 #include <linux/netfilter.h>
41 #include <linux/slab.h>
42 
43 #ifdef CONFIG_SYSCTL
44 #include <linux/sysctl.h>
45 #endif
46 
47 #include <linux/inet.h>
48 #include <linux/netdevice.h>
49 #include <linux/icmpv6.h>
50 
51 #include <net/ip.h>
52 #include <net/sock.h>
53 
54 #include <net/ipv6.h>
55 #include <net/ip6_checksum.h>
56 #include <net/ping.h>
57 #include <net/protocol.h>
58 #include <net/raw.h>
59 #include <net/rawv6.h>
60 #include <net/transp_v6.h>
61 #include <net/ip6_route.h>
62 #include <net/addrconf.h>
63 #include <net/icmp.h>
64 #include <net/xfrm.h>
65 #include <net/inet_common.h>
66 #include <net/dsfield.h>
67 #include <net/l3mdev.h>
68 
69 #include <linux/uaccess.h>
70 
71 /*
72  *	The ICMP socket(s). This is the most convenient way to flow control
73  *	our ICMP output as well as maintain a clean interface throughout
74  *	all layers. All Socketless IP sends will soon be gone.
75  *
76  *	On SMP we have one ICMP socket per-cpu.
77  */
icmpv6_sk(struct net * net)78 static struct sock *icmpv6_sk(struct net *net)
79 {
80 	return this_cpu_read(*net->ipv6.icmp_sk);
81 }
82 
icmpv6_err(struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,int offset,__be32 info)83 static int icmpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
84 		       u8 type, u8 code, int offset, __be32 info)
85 {
86 	/* icmpv6_notify checks 8 bytes can be pulled, icmp6hdr is 8 bytes */
87 	struct icmp6hdr *icmp6 = (struct icmp6hdr *) (skb->data + offset);
88 	struct net *net = dev_net(skb->dev);
89 
90 	if (type == ICMPV6_PKT_TOOBIG)
91 		ip6_update_pmtu(skb, net, info, skb->dev->ifindex, 0, sock_net_uid(net, NULL));
92 	else if (type == NDISC_REDIRECT)
93 		ip6_redirect(skb, net, skb->dev->ifindex, 0,
94 			     sock_net_uid(net, NULL));
95 
96 	if (!(type & ICMPV6_INFOMSG_MASK))
97 		if (icmp6->icmp6_type == ICMPV6_ECHO_REQUEST)
98 			ping_err(skb, offset, ntohl(info));
99 
100 	return 0;
101 }
102 
103 static int icmpv6_rcv(struct sk_buff *skb);
104 
105 static const struct inet6_protocol icmpv6_protocol = {
106 	.handler	=	icmpv6_rcv,
107 	.err_handler	=	icmpv6_err,
108 	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
109 };
110 
111 /* Called with BH disabled */
icmpv6_xmit_lock(struct net * net)112 static __inline__ struct sock *icmpv6_xmit_lock(struct net *net)
113 {
114 	struct sock *sk;
115 
116 	sk = icmpv6_sk(net);
117 	if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
118 		/* This can happen if the output path (f.e. SIT or
119 		 * ip6ip6 tunnel) signals dst_link_failure() for an
120 		 * outgoing ICMP6 packet.
121 		 */
122 		return NULL;
123 	}
124 	return sk;
125 }
126 
icmpv6_xmit_unlock(struct sock * sk)127 static __inline__ void icmpv6_xmit_unlock(struct sock *sk)
128 {
129 	spin_unlock(&sk->sk_lock.slock);
130 }
131 
132 /*
133  * Figure out, may we reply to this packet with icmp error.
134  *
135  * We do not reply, if:
136  *	- it was icmp error message.
137  *	- it is truncated, so that it is known, that protocol is ICMPV6
138  *	  (i.e. in the middle of some exthdr)
139  *
140  *	--ANK (980726)
141  */
142 
is_ineligible(const struct sk_buff * skb)143 static bool is_ineligible(const struct sk_buff *skb)
144 {
145 	int ptr = (u8 *)(ipv6_hdr(skb) + 1) - skb->data;
146 	int len = skb->len - ptr;
147 	__u8 nexthdr = ipv6_hdr(skb)->nexthdr;
148 	__be16 frag_off;
149 
150 	if (len < 0)
151 		return true;
152 
153 	ptr = ipv6_skip_exthdr(skb, ptr, &nexthdr, &frag_off);
154 	if (ptr < 0)
155 		return false;
156 	if (nexthdr == IPPROTO_ICMPV6) {
157 		u8 _type, *tp;
158 		tp = skb_header_pointer(skb,
159 			ptr+offsetof(struct icmp6hdr, icmp6_type),
160 			sizeof(_type), &_type);
161 
162 		/* Based on RFC 8200, Section 4.5 Fragment Header, return
163 		 * false if this is a fragment packet with no icmp header info.
164 		 */
165 		if (!tp && frag_off != 0)
166 			return false;
167 		else if (!tp || !(*tp & ICMPV6_INFOMSG_MASK))
168 			return true;
169 	}
170 	return false;
171 }
172 
icmpv6_mask_allow(struct net * net,int type)173 static bool icmpv6_mask_allow(struct net *net, int type)
174 {
175 	if (type > ICMPV6_MSG_MAX)
176 		return true;
177 
178 	/* Limit if icmp type is set in ratemask. */
179 	if (!test_bit(type, net->ipv6.sysctl.icmpv6_ratemask))
180 		return true;
181 
182 	return false;
183 }
184 
icmpv6_global_allow(struct net * net,int type,bool * apply_ratelimit)185 static bool icmpv6_global_allow(struct net *net, int type,
186 				bool *apply_ratelimit)
187 {
188 	if (icmpv6_mask_allow(net, type))
189 		return true;
190 
191 	if (icmp_global_allow()) {
192 		*apply_ratelimit = true;
193 		return true;
194 	}
195 	return false;
196 }
197 
198 /*
199  * Check the ICMP output rate limit
200  */
icmpv6_xrlim_allow(struct sock * sk,u8 type,struct flowi6 * fl6,bool apply_ratelimit)201 static bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
202 			       struct flowi6 *fl6, bool apply_ratelimit)
203 {
204 	struct net *net = sock_net(sk);
205 	struct dst_entry *dst;
206 	bool res = false;
207 
208 	if (!apply_ratelimit)
209 		return true;
210 
211 	/*
212 	 * Look up the output route.
213 	 * XXX: perhaps the expire for routing entries cloned by
214 	 * this lookup should be more aggressive (not longer than timeout).
215 	 */
216 	dst = ip6_route_output(net, sk, fl6);
217 	if (dst->error) {
218 		IP6_INC_STATS(net, ip6_dst_idev(dst),
219 			      IPSTATS_MIB_OUTNOROUTES);
220 	} else if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) {
221 		res = true;
222 	} else {
223 		struct rt6_info *rt = (struct rt6_info *)dst;
224 		int tmo = net->ipv6.sysctl.icmpv6_time;
225 		struct inet_peer *peer;
226 
227 		/* Give more bandwidth to wider prefixes. */
228 		if (rt->rt6i_dst.plen < 128)
229 			tmo >>= ((128 - rt->rt6i_dst.plen)>>5);
230 
231 		peer = inet_getpeer_v6(net->ipv6.peers, &fl6->daddr, 1);
232 		res = inet_peer_xrlim_allow(peer, tmo);
233 		if (peer)
234 			inet_putpeer(peer);
235 	}
236 	if (res)
237 		icmp_global_consume();
238 	dst_release(dst);
239 	return res;
240 }
241 
icmpv6_rt_has_prefsrc(struct sock * sk,u8 type,struct flowi6 * fl6)242 static bool icmpv6_rt_has_prefsrc(struct sock *sk, u8 type,
243 				  struct flowi6 *fl6)
244 {
245 	struct net *net = sock_net(sk);
246 	struct dst_entry *dst;
247 	bool res = false;
248 
249 	dst = ip6_route_output(net, sk, fl6);
250 	if (!dst->error) {
251 		struct rt6_info *rt = (struct rt6_info *)dst;
252 		struct in6_addr prefsrc;
253 
254 		rt6_get_prefsrc(rt, &prefsrc);
255 		res = !ipv6_addr_any(&prefsrc);
256 	}
257 	dst_release(dst);
258 	return res;
259 }
260 
261 /*
262  *	an inline helper for the "simple" if statement below
263  *	checks if parameter problem report is caused by an
264  *	unrecognized IPv6 option that has the Option Type
265  *	highest-order two bits set to 10
266  */
267 
opt_unrec(struct sk_buff * skb,__u32 offset)268 static bool opt_unrec(struct sk_buff *skb, __u32 offset)
269 {
270 	u8 _optval, *op;
271 
272 	offset += skb_network_offset(skb);
273 	op = skb_header_pointer(skb, offset, sizeof(_optval), &_optval);
274 	if (!op)
275 		return true;
276 	return (*op & 0xC0) == 0x80;
277 }
278 
icmpv6_push_pending_frames(struct sock * sk,struct flowi6 * fl6,struct icmp6hdr * thdr,int len)279 void icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
280 				struct icmp6hdr *thdr, int len)
281 {
282 	struct sk_buff *skb;
283 	struct icmp6hdr *icmp6h;
284 
285 	skb = skb_peek(&sk->sk_write_queue);
286 	if (!skb)
287 		return;
288 
289 	icmp6h = icmp6_hdr(skb);
290 	memcpy(icmp6h, thdr, sizeof(struct icmp6hdr));
291 	icmp6h->icmp6_cksum = 0;
292 
293 	if (skb_queue_len(&sk->sk_write_queue) == 1) {
294 		skb->csum = csum_partial(icmp6h,
295 					sizeof(struct icmp6hdr), skb->csum);
296 		icmp6h->icmp6_cksum = csum_ipv6_magic(&fl6->saddr,
297 						      &fl6->daddr,
298 						      len, fl6->flowi6_proto,
299 						      skb->csum);
300 	} else {
301 		__wsum tmp_csum = 0;
302 
303 		skb_queue_walk(&sk->sk_write_queue, skb) {
304 			tmp_csum = csum_add(tmp_csum, skb->csum);
305 		}
306 
307 		tmp_csum = csum_partial(icmp6h,
308 					sizeof(struct icmp6hdr), tmp_csum);
309 		icmp6h->icmp6_cksum = csum_ipv6_magic(&fl6->saddr,
310 						      &fl6->daddr,
311 						      len, fl6->flowi6_proto,
312 						      tmp_csum);
313 	}
314 	ip6_push_pending_frames(sk);
315 }
316 
317 struct icmpv6_msg {
318 	struct sk_buff	*skb;
319 	int		offset;
320 	uint8_t		type;
321 };
322 
icmpv6_getfrag(void * from,char * to,int offset,int len,int odd,struct sk_buff * skb)323 static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
324 {
325 	struct icmpv6_msg *msg = (struct icmpv6_msg *) from;
326 	struct sk_buff *org_skb = msg->skb;
327 	__wsum csum;
328 
329 	csum = skb_copy_and_csum_bits(org_skb, msg->offset + offset,
330 				      to, len);
331 	skb->csum = csum_block_add(skb->csum, csum, odd);
332 	if (!(msg->type & ICMPV6_INFOMSG_MASK))
333 		nf_ct_attach(skb, org_skb);
334 	return 0;
335 }
336 
337 #if IS_ENABLED(CONFIG_IPV6_MIP6)
mip6_addr_swap(struct sk_buff * skb,const struct inet6_skb_parm * opt)338 static void mip6_addr_swap(struct sk_buff *skb, const struct inet6_skb_parm *opt)
339 {
340 	struct ipv6hdr *iph = ipv6_hdr(skb);
341 	struct ipv6_destopt_hao *hao;
342 	struct in6_addr tmp;
343 	int off;
344 
345 	if (opt->dsthao) {
346 		off = ipv6_find_tlv(skb, opt->dsthao, IPV6_TLV_HAO);
347 		if (likely(off >= 0)) {
348 			hao = (struct ipv6_destopt_hao *)
349 					(skb_network_header(skb) + off);
350 			tmp = iph->saddr;
351 			iph->saddr = hao->addr;
352 			hao->addr = tmp;
353 		}
354 	}
355 }
356 #else
mip6_addr_swap(struct sk_buff * skb,const struct inet6_skb_parm * opt)357 static inline void mip6_addr_swap(struct sk_buff *skb, const struct inet6_skb_parm *opt) {}
358 #endif
359 
icmpv6_route_lookup(struct net * net,struct sk_buff * skb,struct sock * sk,struct flowi6 * fl6)360 static struct dst_entry *icmpv6_route_lookup(struct net *net,
361 					     struct sk_buff *skb,
362 					     struct sock *sk,
363 					     struct flowi6 *fl6)
364 {
365 	struct dst_entry *dst, *dst2;
366 	struct flowi6 fl2;
367 	int err;
368 
369 	err = ip6_dst_lookup(net, sk, &dst, fl6);
370 	if (err)
371 		return ERR_PTR(err);
372 
373 	/*
374 	 * We won't send icmp if the destination is known
375 	 * anycast.
376 	 */
377 	if (ipv6_anycast_destination(dst, &fl6->daddr)) {
378 		net_dbg_ratelimited("icmp6_send: acast source\n");
379 		dst_release(dst);
380 		return ERR_PTR(-EINVAL);
381 	}
382 
383 	/* No need to clone since we're just using its address. */
384 	dst2 = dst;
385 
386 	dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), sk, 0);
387 	if (!IS_ERR(dst)) {
388 		if (dst != dst2)
389 			return dst;
390 	} else {
391 		if (PTR_ERR(dst) == -EPERM)
392 			dst = NULL;
393 		else
394 			return dst;
395 	}
396 
397 	err = xfrm_decode_session_reverse(skb, flowi6_to_flowi(&fl2), AF_INET6);
398 	if (err)
399 		goto relookup_failed;
400 
401 	err = ip6_dst_lookup(net, sk, &dst2, &fl2);
402 	if (err)
403 		goto relookup_failed;
404 
405 	dst2 = xfrm_lookup(net, dst2, flowi6_to_flowi(&fl2), sk, XFRM_LOOKUP_ICMP);
406 	if (!IS_ERR(dst2)) {
407 		dst_release(dst);
408 		dst = dst2;
409 	} else {
410 		err = PTR_ERR(dst2);
411 		if (err == -EPERM) {
412 			dst_release(dst);
413 			return dst2;
414 		} else
415 			goto relookup_failed;
416 	}
417 
418 relookup_failed:
419 	if (dst)
420 		return dst;
421 	return ERR_PTR(err);
422 }
423 
icmp6_dev(const struct sk_buff * skb)424 static struct net_device *icmp6_dev(const struct sk_buff *skb)
425 {
426 	struct net_device *dev = skb->dev;
427 
428 	/* for local traffic to local address, skb dev is the loopback
429 	 * device. Check if there is a dst attached to the skb and if so
430 	 * get the real device index. Same is needed for replies to a link
431 	 * local address on a device enslaved to an L3 master device
432 	 */
433 	if (unlikely(dev->ifindex == LOOPBACK_IFINDEX || netif_is_l3_master(skb->dev))) {
434 		const struct rt6_info *rt6 = skb_rt6_info(skb);
435 
436 		/* The destination could be an external IP in Ext Hdr (SRv6, RPL, etc.),
437 		 * and ip6_null_entry could be set to skb if no route is found.
438 		 */
439 		if (rt6 && rt6->rt6i_idev)
440 			dev = rt6->rt6i_idev->dev;
441 	}
442 
443 	return dev;
444 }
445 
icmp6_iif(const struct sk_buff * skb)446 static int icmp6_iif(const struct sk_buff *skb)
447 {
448 	return icmp6_dev(skb)->ifindex;
449 }
450 
451 /*
452  *	Send an ICMP message in response to a packet in error
453  */
icmp6_send(struct sk_buff * skb,u8 type,u8 code,__u32 info,const struct in6_addr * force_saddr,const struct inet6_skb_parm * parm)454 void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
455 		const struct in6_addr *force_saddr,
456 		const struct inet6_skb_parm *parm)
457 {
458 	struct inet6_dev *idev = NULL;
459 	struct ipv6hdr *hdr = ipv6_hdr(skb);
460 	struct sock *sk;
461 	struct net *net;
462 	struct ipv6_pinfo *np;
463 	const struct in6_addr *saddr = NULL;
464 	bool apply_ratelimit = false;
465 	struct dst_entry *dst;
466 	struct icmp6hdr tmp_hdr;
467 	struct flowi6 fl6;
468 	struct icmpv6_msg msg;
469 	struct ipcm6_cookie ipc6;
470 	int iif = 0;
471 	int addr_type = 0;
472 	int len;
473 	u32 mark;
474 
475 	if ((u8 *)hdr < skb->head ||
476 	    (skb_network_header(skb) + sizeof(*hdr)) > skb_tail_pointer(skb))
477 		return;
478 
479 	if (!skb->dev)
480 		return;
481 	net = dev_net(skb->dev);
482 	mark = IP6_REPLY_MARK(net, skb->mark);
483 	/*
484 	 *	Make sure we respect the rules
485 	 *	i.e. RFC 1885 2.4(e)
486 	 *	Rule (e.1) is enforced by not using icmp6_send
487 	 *	in any code that processes icmp errors.
488 	 */
489 	addr_type = ipv6_addr_type(&hdr->daddr);
490 
491 	if (ipv6_chk_addr(net, &hdr->daddr, skb->dev, 0) ||
492 	    ipv6_chk_acast_addr_src(net, skb->dev, &hdr->daddr))
493 		saddr = &hdr->daddr;
494 
495 	/*
496 	 *	Dest addr check
497 	 */
498 
499 	if (addr_type & IPV6_ADDR_MULTICAST || skb->pkt_type != PACKET_HOST) {
500 		if (type != ICMPV6_PKT_TOOBIG &&
501 		    !(type == ICMPV6_PARAMPROB &&
502 		      code == ICMPV6_UNK_OPTION &&
503 		      (opt_unrec(skb, info))))
504 			return;
505 
506 		saddr = NULL;
507 	}
508 
509 	addr_type = ipv6_addr_type(&hdr->saddr);
510 
511 	/*
512 	 *	Source addr check
513 	 */
514 
515 	if (__ipv6_addr_needs_scope_id(addr_type)) {
516 		iif = icmp6_iif(skb);
517 	} else {
518 		/*
519 		 * The source device is used for looking up which routing table
520 		 * to use for sending an ICMP error.
521 		 */
522 		iif = l3mdev_master_ifindex(skb->dev);
523 	}
524 
525 	/*
526 	 *	Must not send error if the source does not uniquely
527 	 *	identify a single node (RFC2463 Section 2.4).
528 	 *	We check unspecified / multicast addresses here,
529 	 *	and anycast addresses will be checked later.
530 	 */
531 	if ((addr_type == IPV6_ADDR_ANY) || (addr_type & IPV6_ADDR_MULTICAST)) {
532 		net_dbg_ratelimited("icmp6_send: addr_any/mcast source [%pI6c > %pI6c]\n",
533 				    &hdr->saddr, &hdr->daddr);
534 		return;
535 	}
536 
537 	/*
538 	 *	Never answer to a ICMP packet.
539 	 */
540 	if (is_ineligible(skb)) {
541 		net_dbg_ratelimited("icmp6_send: no reply to icmp error [%pI6c > %pI6c]\n",
542 				    &hdr->saddr, &hdr->daddr);
543 		return;
544 	}
545 
546 	/* Needed by both icmpv6_global_allow and icmpv6_xmit_lock */
547 	local_bh_disable();
548 
549 	/* Check global sysctl_icmp_msgs_per_sec ratelimit */
550 	if (!(skb->dev->flags & IFF_LOOPBACK) &&
551 	    !icmpv6_global_allow(net, type, &apply_ratelimit))
552 		goto out_bh_enable;
553 
554 	mip6_addr_swap(skb, parm);
555 
556 	sk = icmpv6_xmit_lock(net);
557 	if (!sk)
558 		goto out_bh_enable;
559 
560 	memset(&fl6, 0, sizeof(fl6));
561 	fl6.flowi6_proto = IPPROTO_ICMPV6;
562 	fl6.daddr = hdr->saddr;
563 	if (force_saddr)
564 		saddr = force_saddr;
565 	if (saddr) {
566 		fl6.saddr = *saddr;
567 	} else if (!icmpv6_rt_has_prefsrc(sk, type, &fl6)) {
568 		/* select a more meaningful saddr from input if */
569 		struct net_device *in_netdev;
570 
571 		in_netdev = dev_get_by_index(net, parm->iif);
572 		if (in_netdev) {
573 			ipv6_dev_get_saddr(net, in_netdev, &fl6.daddr,
574 					   inet6_sk(sk)->srcprefs,
575 					   &fl6.saddr);
576 			dev_put(in_netdev);
577 		}
578 	}
579 	fl6.flowi6_mark = mark;
580 	fl6.flowi6_oif = iif;
581 	fl6.fl6_icmp_type = type;
582 	fl6.fl6_icmp_code = code;
583 	fl6.flowi6_uid = sock_net_uid(net, NULL);
584 	fl6.mp_hash = rt6_multipath_hash(net, &fl6, skb, NULL);
585 	security_skb_classify_flow(skb, flowi6_to_flowi_common(&fl6));
586 
587 	np = inet6_sk(sk);
588 
589 	if (!icmpv6_xrlim_allow(sk, type, &fl6, apply_ratelimit))
590 		goto out;
591 
592 	tmp_hdr.icmp6_type = type;
593 	tmp_hdr.icmp6_code = code;
594 	tmp_hdr.icmp6_cksum = 0;
595 	tmp_hdr.icmp6_pointer = htonl(info);
596 
597 	if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
598 		fl6.flowi6_oif = np->mcast_oif;
599 	else if (!fl6.flowi6_oif)
600 		fl6.flowi6_oif = np->ucast_oif;
601 
602 	ipcm6_init_sk(&ipc6, np);
603 	ipc6.sockc.mark = mark;
604 	fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
605 
606 	dst = icmpv6_route_lookup(net, skb, sk, &fl6);
607 	if (IS_ERR(dst))
608 		goto out;
609 
610 	ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
611 
612 	msg.skb = skb;
613 	msg.offset = skb_network_offset(skb);
614 	msg.type = type;
615 
616 	len = skb->len - msg.offset;
617 	len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) - sizeof(struct icmp6hdr));
618 	if (len < 0) {
619 		net_dbg_ratelimited("icmp: len problem [%pI6c > %pI6c]\n",
620 				    &hdr->saddr, &hdr->daddr);
621 		goto out_dst_release;
622 	}
623 
624 	rcu_read_lock();
625 	idev = __in6_dev_get(skb->dev);
626 
627 	if (ip6_append_data(sk, icmpv6_getfrag, &msg,
628 			    len + sizeof(struct icmp6hdr),
629 			    sizeof(struct icmp6hdr),
630 			    &ipc6, &fl6, (struct rt6_info *)dst,
631 			    MSG_DONTWAIT)) {
632 		ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTERRORS);
633 		ip6_flush_pending_frames(sk);
634 	} else {
635 		icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr,
636 					   len + sizeof(struct icmp6hdr));
637 	}
638 	rcu_read_unlock();
639 out_dst_release:
640 	dst_release(dst);
641 out:
642 	icmpv6_xmit_unlock(sk);
643 out_bh_enable:
644 	local_bh_enable();
645 }
646 EXPORT_SYMBOL(icmp6_send);
647 
648 /* Slightly more convenient version of icmp6_send.
649  */
icmpv6_param_prob(struct sk_buff * skb,u8 code,int pos)650 void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos)
651 {
652 	icmp6_send(skb, ICMPV6_PARAMPROB, code, pos, NULL, IP6CB(skb));
653 	kfree_skb(skb);
654 }
655 
656 /* Generate icmpv6 with type/code ICMPV6_DEST_UNREACH/ICMPV6_ADDR_UNREACH
657  * if sufficient data bytes are available
658  * @nhs is the size of the tunnel header(s) :
659  *  Either an IPv4 header for SIT encap
660  *         an IPv4 header + GRE header for GRE encap
661  */
ip6_err_gen_icmpv6_unreach(struct sk_buff * skb,int nhs,int type,unsigned int data_len)662 int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type,
663 			       unsigned int data_len)
664 {
665 	struct in6_addr temp_saddr;
666 	struct rt6_info *rt;
667 	struct sk_buff *skb2;
668 	u32 info = 0;
669 
670 	if (!pskb_may_pull(skb, nhs + sizeof(struct ipv6hdr) + 8))
671 		return 1;
672 
673 	/* RFC 4884 (partial) support for ICMP extensions */
674 	if (data_len < 128 || (data_len & 7) || skb->len < data_len)
675 		data_len = 0;
676 
677 	skb2 = data_len ? skb_copy(skb, GFP_ATOMIC) : skb_clone(skb, GFP_ATOMIC);
678 
679 	if (!skb2)
680 		return 1;
681 
682 	skb_dst_drop(skb2);
683 	skb_pull(skb2, nhs);
684 	skb_reset_network_header(skb2);
685 
686 	rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr, NULL, 0,
687 			skb, 0);
688 
689 	if (rt && rt->dst.dev)
690 		skb2->dev = rt->dst.dev;
691 
692 	ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr, &temp_saddr);
693 
694 	if (data_len) {
695 		/* RFC 4884 (partial) support :
696 		 * insert 0 padding at the end, before the extensions
697 		 */
698 		__skb_push(skb2, nhs);
699 		skb_reset_network_header(skb2);
700 		memmove(skb2->data, skb2->data + nhs, data_len - nhs);
701 		memset(skb2->data + data_len - nhs, 0, nhs);
702 		/* RFC 4884 4.5 : Length is measured in 64-bit words,
703 		 * and stored in reserved[0]
704 		 */
705 		info = (data_len/8) << 24;
706 	}
707 	if (type == ICMP_TIME_EXCEEDED)
708 		icmp6_send(skb2, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
709 			   info, &temp_saddr, IP6CB(skb2));
710 	else
711 		icmp6_send(skb2, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH,
712 			   info, &temp_saddr, IP6CB(skb2));
713 	if (rt)
714 		ip6_rt_put(rt);
715 
716 	kfree_skb(skb2);
717 
718 	return 0;
719 }
720 EXPORT_SYMBOL(ip6_err_gen_icmpv6_unreach);
721 
icmpv6_echo_reply(struct sk_buff * skb)722 static void icmpv6_echo_reply(struct sk_buff *skb)
723 {
724 	struct net *net = dev_net(skb->dev);
725 	struct sock *sk;
726 	struct inet6_dev *idev;
727 	struct ipv6_pinfo *np;
728 	const struct in6_addr *saddr = NULL;
729 	struct icmp6hdr *icmph = icmp6_hdr(skb);
730 	bool apply_ratelimit = false;
731 	struct icmp6hdr tmp_hdr;
732 	struct flowi6 fl6;
733 	struct icmpv6_msg msg;
734 	struct dst_entry *dst;
735 	struct ipcm6_cookie ipc6;
736 	u32 mark = IP6_REPLY_MARK(net, skb->mark);
737 	bool acast;
738 
739 	if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) &&
740 	    net->ipv6.sysctl.icmpv6_echo_ignore_multicast)
741 		return;
742 
743 	saddr = &ipv6_hdr(skb)->daddr;
744 
745 	acast = ipv6_anycast_destination(skb_dst(skb), saddr);
746 	if (acast && net->ipv6.sysctl.icmpv6_echo_ignore_anycast)
747 		return;
748 
749 	if (!ipv6_unicast_destination(skb) &&
750 	    !(net->ipv6.sysctl.anycast_src_echo_reply && acast))
751 		saddr = NULL;
752 
753 	memcpy(&tmp_hdr, icmph, sizeof(tmp_hdr));
754 	tmp_hdr.icmp6_type = ICMPV6_ECHO_REPLY;
755 
756 	memset(&fl6, 0, sizeof(fl6));
757 	if (net->ipv6.sysctl.flowlabel_reflect & FLOWLABEL_REFLECT_ICMPV6_ECHO_REPLIES)
758 		fl6.flowlabel = ip6_flowlabel(ipv6_hdr(skb));
759 
760 	fl6.flowi6_proto = IPPROTO_ICMPV6;
761 	fl6.daddr = ipv6_hdr(skb)->saddr;
762 	if (saddr)
763 		fl6.saddr = *saddr;
764 	fl6.flowi6_oif = icmp6_iif(skb);
765 	fl6.fl6_icmp_type = ICMPV6_ECHO_REPLY;
766 	fl6.flowi6_mark = mark;
767 	fl6.flowi6_uid = sock_net_uid(net, NULL);
768 	security_skb_classify_flow(skb, flowi6_to_flowi_common(&fl6));
769 
770 	local_bh_disable();
771 	sk = icmpv6_xmit_lock(net);
772 	if (!sk)
773 		goto out_bh_enable;
774 	np = inet6_sk(sk);
775 
776 	if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
777 		fl6.flowi6_oif = np->mcast_oif;
778 	else if (!fl6.flowi6_oif)
779 		fl6.flowi6_oif = np->ucast_oif;
780 
781 	if (ip6_dst_lookup(net, sk, &dst, &fl6))
782 		goto out;
783 	dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), sk, 0);
784 	if (IS_ERR(dst))
785 		goto out;
786 
787 	/* Check the ratelimit */
788 	if ((!(skb->dev->flags & IFF_LOOPBACK) &&
789 	    !icmpv6_global_allow(net, ICMPV6_ECHO_REPLY, &apply_ratelimit)) ||
790 	    !icmpv6_xrlim_allow(sk, ICMPV6_ECHO_REPLY, &fl6, apply_ratelimit))
791 		goto out_dst_release;
792 
793 	idev = __in6_dev_get(skb->dev);
794 
795 	msg.skb = skb;
796 	msg.offset = 0;
797 	msg.type = ICMPV6_ECHO_REPLY;
798 
799 	ipcm6_init_sk(&ipc6, np);
800 	ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
801 	ipc6.tclass = ipv6_get_dsfield(ipv6_hdr(skb));
802 	ipc6.sockc.mark = mark;
803 
804 	if (ip6_append_data(sk, icmpv6_getfrag, &msg,
805 			    skb->len + sizeof(struct icmp6hdr),
806 			    sizeof(struct icmp6hdr), &ipc6, &fl6,
807 			    (struct rt6_info *)dst, MSG_DONTWAIT)) {
808 		__ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTERRORS);
809 		ip6_flush_pending_frames(sk);
810 	} else {
811 		icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr,
812 					   skb->len + sizeof(struct icmp6hdr));
813 	}
814 out_dst_release:
815 	dst_release(dst);
816 out:
817 	icmpv6_xmit_unlock(sk);
818 out_bh_enable:
819 	local_bh_enable();
820 }
821 
icmpv6_notify(struct sk_buff * skb,u8 type,u8 code,__be32 info)822 void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
823 {
824 	const struct inet6_protocol *ipprot;
825 	int inner_offset;
826 	__be16 frag_off;
827 	u8 nexthdr;
828 	struct net *net = dev_net(skb->dev);
829 
830 	if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
831 		goto out;
832 
833 	nexthdr = ((struct ipv6hdr *)skb->data)->nexthdr;
834 	if (ipv6_ext_hdr(nexthdr)) {
835 		/* now skip over extension headers */
836 		inner_offset = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr),
837 						&nexthdr, &frag_off);
838 		if (inner_offset < 0)
839 			goto out;
840 	} else {
841 		inner_offset = sizeof(struct ipv6hdr);
842 	}
843 
844 	/* Checkin header including 8 bytes of inner protocol header. */
845 	if (!pskb_may_pull(skb, inner_offset+8))
846 		goto out;
847 
848 	/* BUGGG_FUTURE: we should try to parse exthdrs in this packet.
849 	   Without this we will not able f.e. to make source routed
850 	   pmtu discovery.
851 	   Corresponding argument (opt) to notifiers is already added.
852 	   --ANK (980726)
853 	 */
854 
855 	ipprot = rcu_dereference(inet6_protos[nexthdr]);
856 	if (ipprot && ipprot->err_handler)
857 		ipprot->err_handler(skb, NULL, type, code, inner_offset, info);
858 
859 	raw6_icmp_error(skb, nexthdr, type, code, inner_offset, info);
860 	return;
861 
862 out:
863 	__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
864 }
865 
866 /*
867  *	Handle icmp messages
868  */
869 
icmpv6_rcv(struct sk_buff * skb)870 static int icmpv6_rcv(struct sk_buff *skb)
871 {
872 	struct net *net = dev_net(skb->dev);
873 	struct net_device *dev = icmp6_dev(skb);
874 	struct inet6_dev *idev = __in6_dev_get(dev);
875 	const struct in6_addr *saddr, *daddr;
876 	struct icmp6hdr *hdr;
877 	u8 type;
878 	bool success = false;
879 
880 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
881 		struct sec_path *sp = skb_sec_path(skb);
882 		int nh;
883 
884 		if (!(sp && sp->xvec[sp->len - 1]->props.flags &
885 				 XFRM_STATE_ICMP))
886 			goto drop_no_count;
887 
888 		if (!pskb_may_pull(skb, sizeof(*hdr) + sizeof(struct ipv6hdr)))
889 			goto drop_no_count;
890 
891 		nh = skb_network_offset(skb);
892 		skb_set_network_header(skb, sizeof(*hdr));
893 
894 		if (!xfrm6_policy_check_reverse(NULL, XFRM_POLICY_IN, skb))
895 			goto drop_no_count;
896 
897 		skb_set_network_header(skb, nh);
898 	}
899 
900 	__ICMP6_INC_STATS(dev_net(dev), idev, ICMP6_MIB_INMSGS);
901 
902 	saddr = &ipv6_hdr(skb)->saddr;
903 	daddr = &ipv6_hdr(skb)->daddr;
904 
905 	if (skb_checksum_validate(skb, IPPROTO_ICMPV6, ip6_compute_pseudo)) {
906 		net_dbg_ratelimited("ICMPv6 checksum failed [%pI6c > %pI6c]\n",
907 				    saddr, daddr);
908 		goto csum_error;
909 	}
910 
911 	if (!pskb_pull(skb, sizeof(*hdr)))
912 		goto discard_it;
913 
914 	hdr = icmp6_hdr(skb);
915 
916 	type = hdr->icmp6_type;
917 
918 	ICMP6MSGIN_INC_STATS(dev_net(dev), idev, type);
919 
920 	switch (type) {
921 	case ICMPV6_ECHO_REQUEST:
922 		if (!net->ipv6.sysctl.icmpv6_echo_ignore_all)
923 			icmpv6_echo_reply(skb);
924 		break;
925 
926 	case ICMPV6_ECHO_REPLY:
927 		success = ping_rcv(skb);
928 		break;
929 
930 	case ICMPV6_PKT_TOOBIG:
931 		/* BUGGG_FUTURE: if packet contains rthdr, we cannot update
932 		   standard destination cache. Seems, only "advanced"
933 		   destination cache will allow to solve this problem
934 		   --ANK (980726)
935 		 */
936 		if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
937 			goto discard_it;
938 		hdr = icmp6_hdr(skb);
939 
940 		/* to notify */
941 		fallthrough;
942 	case ICMPV6_DEST_UNREACH:
943 	case ICMPV6_TIME_EXCEED:
944 	case ICMPV6_PARAMPROB:
945 		icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu);
946 		break;
947 
948 	case NDISC_ROUTER_SOLICITATION:
949 	case NDISC_ROUTER_ADVERTISEMENT:
950 	case NDISC_NEIGHBOUR_SOLICITATION:
951 	case NDISC_NEIGHBOUR_ADVERTISEMENT:
952 	case NDISC_REDIRECT:
953 		ndisc_rcv(skb);
954 		break;
955 
956 	case ICMPV6_MGM_QUERY:
957 		igmp6_event_query(skb);
958 		break;
959 
960 	case ICMPV6_MGM_REPORT:
961 		igmp6_event_report(skb);
962 		break;
963 
964 	case ICMPV6_MGM_REDUCTION:
965 	case ICMPV6_NI_QUERY:
966 	case ICMPV6_NI_REPLY:
967 	case ICMPV6_MLD2_REPORT:
968 	case ICMPV6_DHAAD_REQUEST:
969 	case ICMPV6_DHAAD_REPLY:
970 	case ICMPV6_MOBILE_PREFIX_SOL:
971 	case ICMPV6_MOBILE_PREFIX_ADV:
972 		break;
973 
974 	default:
975 		/* informational */
976 		if (type & ICMPV6_INFOMSG_MASK)
977 			break;
978 
979 		net_dbg_ratelimited("icmpv6: msg of unknown type [%pI6c > %pI6c]\n",
980 				    saddr, daddr);
981 
982 		/*
983 		 * error of unknown type.
984 		 * must pass to upper level
985 		 */
986 
987 		icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu);
988 	}
989 
990 	/* until the v6 path can be better sorted assume failure and
991 	 * preserve the status quo behaviour for the rest of the paths to here
992 	 */
993 	if (success)
994 		consume_skb(skb);
995 	else
996 		kfree_skb(skb);
997 
998 	return 0;
999 
1000 csum_error:
1001 	__ICMP6_INC_STATS(dev_net(dev), idev, ICMP6_MIB_CSUMERRORS);
1002 discard_it:
1003 	__ICMP6_INC_STATS(dev_net(dev), idev, ICMP6_MIB_INERRORS);
1004 drop_no_count:
1005 	kfree_skb(skb);
1006 	return 0;
1007 }
1008 
icmpv6_flow_init(struct sock * sk,struct flowi6 * fl6,u8 type,const struct in6_addr * saddr,const struct in6_addr * daddr,int oif)1009 void icmpv6_flow_init(struct sock *sk, struct flowi6 *fl6,
1010 		      u8 type,
1011 		      const struct in6_addr *saddr,
1012 		      const struct in6_addr *daddr,
1013 		      int oif)
1014 {
1015 	memset(fl6, 0, sizeof(*fl6));
1016 	fl6->saddr = *saddr;
1017 	fl6->daddr = *daddr;
1018 	fl6->flowi6_proto	= IPPROTO_ICMPV6;
1019 	fl6->fl6_icmp_type	= type;
1020 	fl6->fl6_icmp_code	= 0;
1021 	fl6->flowi6_oif		= oif;
1022 	security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6));
1023 }
1024 
icmpv6_sk_exit(struct net * net)1025 static void __net_exit icmpv6_sk_exit(struct net *net)
1026 {
1027 	int i;
1028 
1029 	for_each_possible_cpu(i)
1030 		inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv6.icmp_sk, i));
1031 	free_percpu(net->ipv6.icmp_sk);
1032 }
1033 
icmpv6_sk_init(struct net * net)1034 static int __net_init icmpv6_sk_init(struct net *net)
1035 {
1036 	struct sock *sk;
1037 	int err, i;
1038 
1039 	net->ipv6.icmp_sk = alloc_percpu(struct sock *);
1040 	if (!net->ipv6.icmp_sk)
1041 		return -ENOMEM;
1042 
1043 	for_each_possible_cpu(i) {
1044 		err = inet_ctl_sock_create(&sk, PF_INET6,
1045 					   SOCK_RAW, IPPROTO_ICMPV6, net);
1046 		if (err < 0) {
1047 			pr_err("Failed to initialize the ICMP6 control socket (err %d)\n",
1048 			       err);
1049 			goto fail;
1050 		}
1051 
1052 		*per_cpu_ptr(net->ipv6.icmp_sk, i) = sk;
1053 
1054 		/* Enough space for 2 64K ICMP packets, including
1055 		 * sk_buff struct overhead.
1056 		 */
1057 		sk->sk_sndbuf = 2 * SKB_TRUESIZE(64 * 1024);
1058 	}
1059 	return 0;
1060 
1061  fail:
1062 	icmpv6_sk_exit(net);
1063 	return err;
1064 }
1065 
1066 static struct pernet_operations icmpv6_sk_ops = {
1067 	.init = icmpv6_sk_init,
1068 	.exit = icmpv6_sk_exit,
1069 };
1070 
icmpv6_init(void)1071 int __init icmpv6_init(void)
1072 {
1073 	int err;
1074 
1075 	err = register_pernet_subsys(&icmpv6_sk_ops);
1076 	if (err < 0)
1077 		return err;
1078 
1079 	err = -EAGAIN;
1080 	if (inet6_add_protocol(&icmpv6_protocol, IPPROTO_ICMPV6) < 0)
1081 		goto fail;
1082 
1083 	err = inet6_register_icmp_sender(icmp6_send);
1084 	if (err)
1085 		goto sender_reg_err;
1086 	return 0;
1087 
1088 sender_reg_err:
1089 	inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6);
1090 fail:
1091 	pr_err("Failed to register ICMP6 protocol\n");
1092 	unregister_pernet_subsys(&icmpv6_sk_ops);
1093 	return err;
1094 }
1095 
icmpv6_cleanup(void)1096 void icmpv6_cleanup(void)
1097 {
1098 	inet6_unregister_icmp_sender(icmp6_send);
1099 	unregister_pernet_subsys(&icmpv6_sk_ops);
1100 	inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6);
1101 }
1102 
1103 
1104 static const struct icmp6_err {
1105 	int err;
1106 	int fatal;
1107 } tab_unreach[] = {
1108 	{	/* NOROUTE */
1109 		.err	= ENETUNREACH,
1110 		.fatal	= 0,
1111 	},
1112 	{	/* ADM_PROHIBITED */
1113 		.err	= EACCES,
1114 		.fatal	= 1,
1115 	},
1116 	{	/* Was NOT_NEIGHBOUR, now reserved */
1117 		.err	= EHOSTUNREACH,
1118 		.fatal	= 0,
1119 	},
1120 	{	/* ADDR_UNREACH	*/
1121 		.err	= EHOSTUNREACH,
1122 		.fatal	= 0,
1123 	},
1124 	{	/* PORT_UNREACH	*/
1125 		.err	= ECONNREFUSED,
1126 		.fatal	= 1,
1127 	},
1128 	{	/* POLICY_FAIL */
1129 		.err	= EACCES,
1130 		.fatal	= 1,
1131 	},
1132 	{	/* REJECT_ROUTE	*/
1133 		.err	= EACCES,
1134 		.fatal	= 1,
1135 	},
1136 };
1137 
icmpv6_err_convert(u8 type,u8 code,int * err)1138 int icmpv6_err_convert(u8 type, u8 code, int *err)
1139 {
1140 	int fatal = 0;
1141 
1142 	*err = EPROTO;
1143 
1144 	switch (type) {
1145 	case ICMPV6_DEST_UNREACH:
1146 		fatal = 1;
1147 		if (code < ARRAY_SIZE(tab_unreach)) {
1148 			*err  = tab_unreach[code].err;
1149 			fatal = tab_unreach[code].fatal;
1150 		}
1151 		break;
1152 
1153 	case ICMPV6_PKT_TOOBIG:
1154 		*err = EMSGSIZE;
1155 		break;
1156 
1157 	case ICMPV6_PARAMPROB:
1158 		*err = EPROTO;
1159 		fatal = 1;
1160 		break;
1161 
1162 	case ICMPV6_TIME_EXCEED:
1163 		*err = EHOSTUNREACH;
1164 		break;
1165 	}
1166 
1167 	return fatal;
1168 }
1169 EXPORT_SYMBOL(icmpv6_err_convert);
1170 
1171 #ifdef CONFIG_SYSCTL
1172 static struct ctl_table ipv6_icmp_table_template[] = {
1173 	{
1174 		.procname	= "ratelimit",
1175 		.data		= &init_net.ipv6.sysctl.icmpv6_time,
1176 		.maxlen		= sizeof(int),
1177 		.mode		= 0644,
1178 		.proc_handler	= proc_dointvec_ms_jiffies,
1179 	},
1180 	{
1181 		.procname	= "echo_ignore_all",
1182 		.data		= &init_net.ipv6.sysctl.icmpv6_echo_ignore_all,
1183 		.maxlen		= sizeof(int),
1184 		.mode		= 0644,
1185 		.proc_handler = proc_dointvec,
1186 	},
1187 	{
1188 		.procname	= "echo_ignore_multicast",
1189 		.data		= &init_net.ipv6.sysctl.icmpv6_echo_ignore_multicast,
1190 		.maxlen		= sizeof(int),
1191 		.mode		= 0644,
1192 		.proc_handler = proc_dointvec,
1193 	},
1194 	{
1195 		.procname	= "echo_ignore_anycast",
1196 		.data		= &init_net.ipv6.sysctl.icmpv6_echo_ignore_anycast,
1197 		.maxlen		= sizeof(int),
1198 		.mode		= 0644,
1199 		.proc_handler = proc_dointvec,
1200 	},
1201 	{
1202 		.procname	= "ratemask",
1203 		.data		= &init_net.ipv6.sysctl.icmpv6_ratemask_ptr,
1204 		.maxlen		= ICMPV6_MSG_MAX + 1,
1205 		.mode		= 0644,
1206 		.proc_handler = proc_do_large_bitmap,
1207 	},
1208 	{ },
1209 };
1210 
ipv6_icmp_sysctl_init(struct net * net)1211 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
1212 {
1213 	struct ctl_table *table;
1214 
1215 	table = kmemdup(ipv6_icmp_table_template,
1216 			sizeof(ipv6_icmp_table_template),
1217 			GFP_KERNEL);
1218 
1219 	if (table) {
1220 		table[0].data = &net->ipv6.sysctl.icmpv6_time;
1221 		table[1].data = &net->ipv6.sysctl.icmpv6_echo_ignore_all;
1222 		table[2].data = &net->ipv6.sysctl.icmpv6_echo_ignore_multicast;
1223 		table[3].data = &net->ipv6.sysctl.icmpv6_echo_ignore_anycast;
1224 		table[4].data = &net->ipv6.sysctl.icmpv6_ratemask_ptr;
1225 	}
1226 	return table;
1227 }
1228 #endif
1229