• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	Internet Control Message Protocol (ICMPv6)
4  *	Linux INET6 implementation
5  *
6  *	Authors:
7  *	Pedro Roque		<roque@di.fc.ul.pt>
8  *
9  *	Based on net/ipv4/icmp.c
10  *
11  *	RFC 1885
12  */
13 
14 /*
15  *	Changes:
16  *
17  *	Andi Kleen		:	exception handling
18  *	Andi Kleen			add rate limits. never reply to a icmp.
19  *					add more length checks and other fixes.
20  *	yoshfuji		:	ensure to sent parameter problem for
21  *					fragments.
22  *	YOSHIFUJI Hideaki @USAGI:	added sysctl for icmp rate limit.
23  *	Randy Dunlap and
24  *	YOSHIFUJI Hideaki @USAGI:	Per-interface statistics support
25  *	Kazunori MIYAZAWA @USAGI:       change output process to use ip6_append_data
26  */
27 
28 #define pr_fmt(fmt) "IPv6: " fmt
29 
30 #include <linux/module.h>
31 #include <linux/errno.h>
32 #include <linux/types.h>
33 #include <linux/socket.h>
34 #include <linux/in.h>
35 #include <linux/kernel.h>
36 #include <linux/sockios.h>
37 #include <linux/net.h>
38 #include <linux/skbuff.h>
39 #include <linux/init.h>
40 #include <linux/netfilter.h>
41 #include <linux/slab.h>
42 
43 #ifdef CONFIG_SYSCTL
44 #include <linux/sysctl.h>
45 #endif
46 
47 #include <linux/inet.h>
48 #include <linux/netdevice.h>
49 #include <linux/icmpv6.h>
50 
51 #include <net/ip.h>
52 #include <net/sock.h>
53 
54 #include <net/ipv6.h>
55 #include <net/ip6_checksum.h>
56 #include <net/ping.h>
57 #include <net/protocol.h>
58 #include <net/raw.h>
59 #include <net/rawv6.h>
60 #include <net/seg6.h>
61 #include <net/transp_v6.h>
62 #include <net/ip6_route.h>
63 #include <net/addrconf.h>
64 #include <net/icmp.h>
65 #include <net/xfrm.h>
66 #include <net/inet_common.h>
67 #include <net/dsfield.h>
68 #include <net/l3mdev.h>
69 
70 #include <linux/uaccess.h>
71 
72 /*
73  *	The ICMP socket(s). This is the most convenient way to flow control
74  *	our ICMP output as well as maintain a clean interface throughout
75  *	all layers. All Socketless IP sends will soon be gone.
76  *
77  *	On SMP we have one ICMP socket per-cpu.
78  */
icmpv6_sk(struct net * net)79 static struct sock *icmpv6_sk(struct net *net)
80 {
81 	return this_cpu_read(*net->ipv6.icmp_sk);
82 }
83 
icmpv6_err(struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,int offset,__be32 info)84 static int icmpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
85 		       u8 type, u8 code, int offset, __be32 info)
86 {
87 	/* icmpv6_notify checks 8 bytes can be pulled, icmp6hdr is 8 bytes */
88 	struct icmp6hdr *icmp6 = (struct icmp6hdr *) (skb->data + offset);
89 	struct net *net = dev_net(skb->dev);
90 
91 	if (type == ICMPV6_PKT_TOOBIG)
92 		ip6_update_pmtu(skb, net, info, skb->dev->ifindex, 0, sock_net_uid(net, NULL));
93 	else if (type == NDISC_REDIRECT)
94 		ip6_redirect(skb, net, skb->dev->ifindex, 0,
95 			     sock_net_uid(net, NULL));
96 
97 	if (!(type & ICMPV6_INFOMSG_MASK))
98 		if (icmp6->icmp6_type == ICMPV6_ECHO_REQUEST)
99 			ping_err(skb, offset, ntohl(info));
100 
101 	return 0;
102 }
103 
104 static int icmpv6_rcv(struct sk_buff *skb);
105 
106 static const struct inet6_protocol icmpv6_protocol = {
107 	.handler	=	icmpv6_rcv,
108 	.err_handler	=	icmpv6_err,
109 	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
110 };
111 
112 /* Called with BH disabled */
icmpv6_xmit_lock(struct net * net)113 static __inline__ struct sock *icmpv6_xmit_lock(struct net *net)
114 {
115 	struct sock *sk;
116 
117 	sk = icmpv6_sk(net);
118 	if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
119 		/* This can happen if the output path (f.e. SIT or
120 		 * ip6ip6 tunnel) signals dst_link_failure() for an
121 		 * outgoing ICMP6 packet.
122 		 */
123 		return NULL;
124 	}
125 	return sk;
126 }
127 
icmpv6_xmit_unlock(struct sock * sk)128 static __inline__ void icmpv6_xmit_unlock(struct sock *sk)
129 {
130 	spin_unlock(&sk->sk_lock.slock);
131 }
132 
133 /*
134  * Figure out, may we reply to this packet with icmp error.
135  *
136  * We do not reply, if:
137  *	- it was icmp error message.
138  *	- it is truncated, so that it is known, that protocol is ICMPV6
139  *	  (i.e. in the middle of some exthdr)
140  *
141  *	--ANK (980726)
142  */
143 
is_ineligible(const struct sk_buff * skb)144 static bool is_ineligible(const struct sk_buff *skb)
145 {
146 	int ptr = (u8 *)(ipv6_hdr(skb) + 1) - skb->data;
147 	int len = skb->len - ptr;
148 	__u8 nexthdr = ipv6_hdr(skb)->nexthdr;
149 	__be16 frag_off;
150 
151 	if (len < 0)
152 		return true;
153 
154 	ptr = ipv6_skip_exthdr(skb, ptr, &nexthdr, &frag_off);
155 	if (ptr < 0)
156 		return false;
157 	if (nexthdr == IPPROTO_ICMPV6) {
158 		u8 _type, *tp;
159 		tp = skb_header_pointer(skb,
160 			ptr+offsetof(struct icmp6hdr, icmp6_type),
161 			sizeof(_type), &_type);
162 
163 		/* Based on RFC 8200, Section 4.5 Fragment Header, return
164 		 * false if this is a fragment packet with no icmp header info.
165 		 */
166 		if (!tp && frag_off != 0)
167 			return false;
168 		else if (!tp || !(*tp & ICMPV6_INFOMSG_MASK))
169 			return true;
170 	}
171 	return false;
172 }
173 
icmpv6_mask_allow(struct net * net,int type)174 static bool icmpv6_mask_allow(struct net *net, int type)
175 {
176 	if (type > ICMPV6_MSG_MAX)
177 		return true;
178 
179 	/* Limit if icmp type is set in ratemask. */
180 	if (!test_bit(type, net->ipv6.sysctl.icmpv6_ratemask))
181 		return true;
182 
183 	return false;
184 }
185 
icmpv6_global_allow(struct net * net,int type)186 static bool icmpv6_global_allow(struct net *net, int type)
187 {
188 	if (icmpv6_mask_allow(net, type))
189 		return true;
190 
191 	if (icmp_global_allow())
192 		return true;
193 
194 	return false;
195 }
196 
197 /*
198  * Check the ICMP output rate limit
199  */
icmpv6_xrlim_allow(struct sock * sk,u8 type,struct flowi6 * fl6)200 static bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
201 			       struct flowi6 *fl6)
202 {
203 	struct net *net = sock_net(sk);
204 	struct dst_entry *dst;
205 	bool res = false;
206 
207 	if (icmpv6_mask_allow(net, type))
208 		return true;
209 
210 	/*
211 	 * Look up the output route.
212 	 * XXX: perhaps the expire for routing entries cloned by
213 	 * this lookup should be more aggressive (not longer than timeout).
214 	 */
215 	dst = ip6_route_output(net, sk, fl6);
216 	if (dst->error) {
217 		IP6_INC_STATS(net, ip6_dst_idev(dst),
218 			      IPSTATS_MIB_OUTNOROUTES);
219 	} else if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) {
220 		res = true;
221 	} else {
222 		struct rt6_info *rt = (struct rt6_info *)dst;
223 		int tmo = net->ipv6.sysctl.icmpv6_time;
224 		struct inet_peer *peer;
225 
226 		/* Give more bandwidth to wider prefixes. */
227 		if (rt->rt6i_dst.plen < 128)
228 			tmo >>= ((128 - rt->rt6i_dst.plen)>>5);
229 
230 		peer = inet_getpeer_v6(net->ipv6.peers, &fl6->daddr, 1);
231 		res = inet_peer_xrlim_allow(peer, tmo);
232 		if (peer)
233 			inet_putpeer(peer);
234 	}
235 	dst_release(dst);
236 	return res;
237 }
238 
icmpv6_rt_has_prefsrc(struct sock * sk,u8 type,struct flowi6 * fl6)239 static bool icmpv6_rt_has_prefsrc(struct sock *sk, u8 type,
240 				  struct flowi6 *fl6)
241 {
242 	struct net *net = sock_net(sk);
243 	struct dst_entry *dst;
244 	bool res = false;
245 
246 	dst = ip6_route_output(net, sk, fl6);
247 	if (!dst->error) {
248 		struct rt6_info *rt = (struct rt6_info *)dst;
249 		struct in6_addr prefsrc;
250 
251 		rt6_get_prefsrc(rt, &prefsrc);
252 		res = !ipv6_addr_any(&prefsrc);
253 	}
254 	dst_release(dst);
255 	return res;
256 }
257 
258 /*
259  *	an inline helper for the "simple" if statement below
260  *	checks if parameter problem report is caused by an
261  *	unrecognized IPv6 option that has the Option Type
262  *	highest-order two bits set to 10
263  */
264 
opt_unrec(struct sk_buff * skb,__u32 offset)265 static bool opt_unrec(struct sk_buff *skb, __u32 offset)
266 {
267 	u8 _optval, *op;
268 
269 	offset += skb_network_offset(skb);
270 	op = skb_header_pointer(skb, offset, sizeof(_optval), &_optval);
271 	if (!op)
272 		return true;
273 	return (*op & 0xC0) == 0x80;
274 }
275 
icmpv6_push_pending_frames(struct sock * sk,struct flowi6 * fl6,struct icmp6hdr * thdr,int len)276 void icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
277 				struct icmp6hdr *thdr, int len)
278 {
279 	struct sk_buff *skb;
280 	struct icmp6hdr *icmp6h;
281 
282 	skb = skb_peek(&sk->sk_write_queue);
283 	if (!skb)
284 		return;
285 
286 	icmp6h = icmp6_hdr(skb);
287 	memcpy(icmp6h, thdr, sizeof(struct icmp6hdr));
288 	icmp6h->icmp6_cksum = 0;
289 
290 	if (skb_queue_len(&sk->sk_write_queue) == 1) {
291 		skb->csum = csum_partial(icmp6h,
292 					sizeof(struct icmp6hdr), skb->csum);
293 		icmp6h->icmp6_cksum = csum_ipv6_magic(&fl6->saddr,
294 						      &fl6->daddr,
295 						      len, fl6->flowi6_proto,
296 						      skb->csum);
297 	} else {
298 		__wsum tmp_csum = 0;
299 
300 		skb_queue_walk(&sk->sk_write_queue, skb) {
301 			tmp_csum = csum_add(tmp_csum, skb->csum);
302 		}
303 
304 		tmp_csum = csum_partial(icmp6h,
305 					sizeof(struct icmp6hdr), tmp_csum);
306 		icmp6h->icmp6_cksum = csum_ipv6_magic(&fl6->saddr,
307 						      &fl6->daddr,
308 						      len, fl6->flowi6_proto,
309 						      tmp_csum);
310 	}
311 	ip6_push_pending_frames(sk);
312 }
313 
314 struct icmpv6_msg {
315 	struct sk_buff	*skb;
316 	int		offset;
317 	uint8_t		type;
318 };
319 
icmpv6_getfrag(void * from,char * to,int offset,int len,int odd,struct sk_buff * skb)320 static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
321 {
322 	struct icmpv6_msg *msg = (struct icmpv6_msg *) from;
323 	struct sk_buff *org_skb = msg->skb;
324 	__wsum csum;
325 
326 	csum = skb_copy_and_csum_bits(org_skb, msg->offset + offset,
327 				      to, len);
328 	skb->csum = csum_block_add(skb->csum, csum, odd);
329 	if (!(msg->type & ICMPV6_INFOMSG_MASK))
330 		nf_ct_attach(skb, org_skb);
331 	return 0;
332 }
333 
334 #if IS_ENABLED(CONFIG_IPV6_MIP6)
mip6_addr_swap(struct sk_buff * skb,const struct inet6_skb_parm * opt)335 static void mip6_addr_swap(struct sk_buff *skb, const struct inet6_skb_parm *opt)
336 {
337 	struct ipv6hdr *iph = ipv6_hdr(skb);
338 	struct ipv6_destopt_hao *hao;
339 	struct in6_addr tmp;
340 	int off;
341 
342 	if (opt->dsthao) {
343 		off = ipv6_find_tlv(skb, opt->dsthao, IPV6_TLV_HAO);
344 		if (likely(off >= 0)) {
345 			hao = (struct ipv6_destopt_hao *)
346 					(skb_network_header(skb) + off);
347 			tmp = iph->saddr;
348 			iph->saddr = hao->addr;
349 			hao->addr = tmp;
350 		}
351 	}
352 }
353 #else
mip6_addr_swap(struct sk_buff * skb,const struct inet6_skb_parm * opt)354 static inline void mip6_addr_swap(struct sk_buff *skb, const struct inet6_skb_parm *opt) {}
355 #endif
356 
icmpv6_route_lookup(struct net * net,struct sk_buff * skb,struct sock * sk,struct flowi6 * fl6)357 static struct dst_entry *icmpv6_route_lookup(struct net *net,
358 					     struct sk_buff *skb,
359 					     struct sock *sk,
360 					     struct flowi6 *fl6)
361 {
362 	struct dst_entry *dst, *dst2;
363 	struct flowi6 fl2;
364 	int err;
365 
366 	err = ip6_dst_lookup(net, sk, &dst, fl6);
367 	if (err)
368 		return ERR_PTR(err);
369 
370 	/*
371 	 * We won't send icmp if the destination is known
372 	 * anycast.
373 	 */
374 	if (ipv6_anycast_destination(dst, &fl6->daddr)) {
375 		net_dbg_ratelimited("icmp6_send: acast source\n");
376 		dst_release(dst);
377 		return ERR_PTR(-EINVAL);
378 	}
379 
380 	/* No need to clone since we're just using its address. */
381 	dst2 = dst;
382 
383 	dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), sk, 0);
384 	if (!IS_ERR(dst)) {
385 		if (dst != dst2)
386 			return dst;
387 	} else {
388 		if (PTR_ERR(dst) == -EPERM)
389 			dst = NULL;
390 		else
391 			return dst;
392 	}
393 
394 	err = xfrm_decode_session_reverse(skb, flowi6_to_flowi(&fl2), AF_INET6);
395 	if (err)
396 		goto relookup_failed;
397 
398 	err = ip6_dst_lookup(net, sk, &dst2, &fl2);
399 	if (err)
400 		goto relookup_failed;
401 
402 	dst2 = xfrm_lookup(net, dst2, flowi6_to_flowi(&fl2), sk, XFRM_LOOKUP_ICMP);
403 	if (!IS_ERR(dst2)) {
404 		dst_release(dst);
405 		dst = dst2;
406 	} else {
407 		err = PTR_ERR(dst2);
408 		if (err == -EPERM) {
409 			dst_release(dst);
410 			return dst2;
411 		} else
412 			goto relookup_failed;
413 	}
414 
415 relookup_failed:
416 	if (dst)
417 		return dst;
418 	return ERR_PTR(err);
419 }
420 
icmp6_dev(const struct sk_buff * skb)421 static struct net_device *icmp6_dev(const struct sk_buff *skb)
422 {
423 	struct net_device *dev = skb->dev;
424 
425 	/* for local traffic to local address, skb dev is the loopback
426 	 * device. Check if there is a dst attached to the skb and if so
427 	 * get the real device index. Same is needed for replies to a link
428 	 * local address on a device enslaved to an L3 master device
429 	 */
430 	if (unlikely(dev->ifindex == LOOPBACK_IFINDEX || netif_is_l3_master(skb->dev))) {
431 		const struct rt6_info *rt6 = skb_rt6_info(skb);
432 
433 		/* The destination could be an external IP in Ext Hdr (SRv6, RPL, etc.),
434 		 * and ip6_null_entry could be set to skb if no route is found.
435 		 */
436 		if (rt6 && rt6->rt6i_idev)
437 			dev = rt6->rt6i_idev->dev;
438 	}
439 
440 	return dev;
441 }
442 
icmp6_iif(const struct sk_buff * skb)443 static int icmp6_iif(const struct sk_buff *skb)
444 {
445 	return icmp6_dev(skb)->ifindex;
446 }
447 
448 /*
449  *	Send an ICMP message in response to a packet in error
450  */
icmp6_send(struct sk_buff * skb,u8 type,u8 code,__u32 info,const struct in6_addr * force_saddr,const struct inet6_skb_parm * parm)451 void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
452 		const struct in6_addr *force_saddr,
453 		const struct inet6_skb_parm *parm)
454 {
455 	struct inet6_dev *idev = NULL;
456 	struct ipv6hdr *hdr = ipv6_hdr(skb);
457 	struct sock *sk;
458 	struct net *net;
459 	struct ipv6_pinfo *np;
460 	const struct in6_addr *saddr = NULL;
461 	struct dst_entry *dst;
462 	struct icmp6hdr tmp_hdr;
463 	struct flowi6 fl6;
464 	struct icmpv6_msg msg;
465 	struct ipcm6_cookie ipc6;
466 	int iif = 0;
467 	int addr_type = 0;
468 	int len;
469 	u32 mark;
470 
471 	if ((u8 *)hdr < skb->head ||
472 	    (skb_network_header(skb) + sizeof(*hdr)) > skb_tail_pointer(skb))
473 		return;
474 
475 	if (!skb->dev)
476 		return;
477 	net = dev_net(skb->dev);
478 	mark = IP6_REPLY_MARK(net, skb->mark);
479 	/*
480 	 *	Make sure we respect the rules
481 	 *	i.e. RFC 1885 2.4(e)
482 	 *	Rule (e.1) is enforced by not using icmp6_send
483 	 *	in any code that processes icmp errors.
484 	 */
485 	addr_type = ipv6_addr_type(&hdr->daddr);
486 
487 	if (ipv6_chk_addr(net, &hdr->daddr, skb->dev, 0) ||
488 	    ipv6_chk_acast_addr_src(net, skb->dev, &hdr->daddr))
489 		saddr = &hdr->daddr;
490 
491 	/*
492 	 *	Dest addr check
493 	 */
494 
495 	if (addr_type & IPV6_ADDR_MULTICAST || skb->pkt_type != PACKET_HOST) {
496 		if (type != ICMPV6_PKT_TOOBIG &&
497 		    !(type == ICMPV6_PARAMPROB &&
498 		      code == ICMPV6_UNK_OPTION &&
499 		      (opt_unrec(skb, info))))
500 			return;
501 
502 		saddr = NULL;
503 	}
504 
505 	addr_type = ipv6_addr_type(&hdr->saddr);
506 
507 	/*
508 	 *	Source addr check
509 	 */
510 
511 	if (__ipv6_addr_needs_scope_id(addr_type)) {
512 		iif = icmp6_iif(skb);
513 	} else {
514 		/*
515 		 * The source device is used for looking up which routing table
516 		 * to use for sending an ICMP error.
517 		 */
518 		iif = l3mdev_master_ifindex(skb->dev);
519 	}
520 
521 	/*
522 	 *	Must not send error if the source does not uniquely
523 	 *	identify a single node (RFC2463 Section 2.4).
524 	 *	We check unspecified / multicast addresses here,
525 	 *	and anycast addresses will be checked later.
526 	 */
527 	if ((addr_type == IPV6_ADDR_ANY) || (addr_type & IPV6_ADDR_MULTICAST)) {
528 		net_dbg_ratelimited("icmp6_send: addr_any/mcast source [%pI6c > %pI6c]\n",
529 				    &hdr->saddr, &hdr->daddr);
530 		return;
531 	}
532 
533 	/*
534 	 *	Never answer to a ICMP packet.
535 	 */
536 	if (is_ineligible(skb)) {
537 		net_dbg_ratelimited("icmp6_send: no reply to icmp error [%pI6c > %pI6c]\n",
538 				    &hdr->saddr, &hdr->daddr);
539 		return;
540 	}
541 
542 	/* Needed by both icmp_global_allow and icmpv6_xmit_lock */
543 	local_bh_disable();
544 
545 	/* Check global sysctl_icmp_msgs_per_sec ratelimit */
546 	if (!(skb->dev->flags & IFF_LOOPBACK) && !icmpv6_global_allow(net, type))
547 		goto out_bh_enable;
548 
549 	mip6_addr_swap(skb, parm);
550 
551 	sk = icmpv6_xmit_lock(net);
552 	if (!sk)
553 		goto out_bh_enable;
554 
555 	memset(&fl6, 0, sizeof(fl6));
556 	fl6.flowi6_proto = IPPROTO_ICMPV6;
557 	fl6.daddr = hdr->saddr;
558 	if (force_saddr)
559 		saddr = force_saddr;
560 	if (saddr) {
561 		fl6.saddr = *saddr;
562 	} else if (!icmpv6_rt_has_prefsrc(sk, type, &fl6)) {
563 		/* select a more meaningful saddr from input if */
564 		struct net_device *in_netdev;
565 
566 		in_netdev = dev_get_by_index(net, parm->iif);
567 		if (in_netdev) {
568 			ipv6_dev_get_saddr(net, in_netdev, &fl6.daddr,
569 					   inet6_sk(sk)->srcprefs,
570 					   &fl6.saddr);
571 			dev_put(in_netdev);
572 		}
573 	}
574 	fl6.flowi6_mark = mark;
575 	fl6.flowi6_oif = iif;
576 	fl6.fl6_icmp_type = type;
577 	fl6.fl6_icmp_code = code;
578 	fl6.flowi6_uid = sock_net_uid(net, NULL);
579 	fl6.mp_hash = rt6_multipath_hash(net, &fl6, skb, NULL);
580 	security_skb_classify_flow(skb, flowi6_to_flowi_common(&fl6));
581 
582 	np = inet6_sk(sk);
583 
584 	if (!icmpv6_xrlim_allow(sk, type, &fl6))
585 		goto out;
586 
587 	tmp_hdr.icmp6_type = type;
588 	tmp_hdr.icmp6_code = code;
589 	tmp_hdr.icmp6_cksum = 0;
590 	tmp_hdr.icmp6_pointer = htonl(info);
591 
592 	if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
593 		fl6.flowi6_oif = np->mcast_oif;
594 	else if (!fl6.flowi6_oif)
595 		fl6.flowi6_oif = np->ucast_oif;
596 
597 	ipcm6_init_sk(&ipc6, np);
598 	ipc6.sockc.mark = mark;
599 	fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
600 
601 	dst = icmpv6_route_lookup(net, skb, sk, &fl6);
602 	if (IS_ERR(dst))
603 		goto out;
604 
605 	ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
606 
607 	msg.skb = skb;
608 	msg.offset = skb_network_offset(skb);
609 	msg.type = type;
610 
611 	len = skb->len - msg.offset;
612 	len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) - sizeof(struct icmp6hdr));
613 	if (len < 0) {
614 		net_dbg_ratelimited("icmp: len problem [%pI6c > %pI6c]\n",
615 				    &hdr->saddr, &hdr->daddr);
616 		goto out_dst_release;
617 	}
618 
619 	rcu_read_lock();
620 	idev = __in6_dev_get(skb->dev);
621 
622 	if (ip6_append_data(sk, icmpv6_getfrag, &msg,
623 			    len + sizeof(struct icmp6hdr),
624 			    sizeof(struct icmp6hdr),
625 			    &ipc6, &fl6, (struct rt6_info *)dst,
626 			    MSG_DONTWAIT)) {
627 		ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTERRORS);
628 		ip6_flush_pending_frames(sk);
629 	} else {
630 		icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr,
631 					   len + sizeof(struct icmp6hdr));
632 	}
633 	rcu_read_unlock();
634 out_dst_release:
635 	dst_release(dst);
636 out:
637 	icmpv6_xmit_unlock(sk);
638 out_bh_enable:
639 	local_bh_enable();
640 }
641 EXPORT_SYMBOL(icmp6_send);
642 
643 /* Slightly more convenient version of icmp6_send.
644  */
icmpv6_param_prob(struct sk_buff * skb,u8 code,int pos)645 void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos)
646 {
647 	icmp6_send(skb, ICMPV6_PARAMPROB, code, pos, NULL, IP6CB(skb));
648 	kfree_skb(skb);
649 }
650 
651 /* Generate icmpv6 with type/code ICMPV6_DEST_UNREACH/ICMPV6_ADDR_UNREACH
652  * if sufficient data bytes are available
653  * @nhs is the size of the tunnel header(s) :
654  *  Either an IPv4 header for SIT encap
655  *         an IPv4 header + GRE header for GRE encap
656  */
ip6_err_gen_icmpv6_unreach(struct sk_buff * skb,int nhs,int type,unsigned int data_len)657 int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type,
658 			       unsigned int data_len)
659 {
660 	struct in6_addr temp_saddr;
661 	struct rt6_info *rt;
662 	struct sk_buff *skb2;
663 	u32 info = 0;
664 
665 	if (!pskb_may_pull(skb, nhs + sizeof(struct ipv6hdr) + 8))
666 		return 1;
667 
668 	/* RFC 4884 (partial) support for ICMP extensions */
669 	if (data_len < 128 || (data_len & 7) || skb->len < data_len)
670 		data_len = 0;
671 
672 	skb2 = data_len ? skb_copy(skb, GFP_ATOMIC) : skb_clone(skb, GFP_ATOMIC);
673 
674 	if (!skb2)
675 		return 1;
676 
677 	skb_dst_drop(skb2);
678 	skb_pull(skb2, nhs);
679 	skb_reset_network_header(skb2);
680 
681 	rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr, NULL, 0,
682 			skb, 0);
683 
684 	if (rt && rt->dst.dev)
685 		skb2->dev = rt->dst.dev;
686 
687 	ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr, &temp_saddr);
688 
689 	if (data_len) {
690 		/* RFC 4884 (partial) support :
691 		 * insert 0 padding at the end, before the extensions
692 		 */
693 		__skb_push(skb2, nhs);
694 		skb_reset_network_header(skb2);
695 		memmove(skb2->data, skb2->data + nhs, data_len - nhs);
696 		memset(skb2->data + data_len - nhs, 0, nhs);
697 		/* RFC 4884 4.5 : Length is measured in 64-bit words,
698 		 * and stored in reserved[0]
699 		 */
700 		info = (data_len/8) << 24;
701 	}
702 	if (type == ICMP_TIME_EXCEEDED)
703 		icmp6_send(skb2, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
704 			   info, &temp_saddr, IP6CB(skb2));
705 	else
706 		icmp6_send(skb2, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH,
707 			   info, &temp_saddr, IP6CB(skb2));
708 	if (rt)
709 		ip6_rt_put(rt);
710 
711 	kfree_skb(skb2);
712 
713 	return 0;
714 }
715 EXPORT_SYMBOL(ip6_err_gen_icmpv6_unreach);
716 
icmpv6_echo_reply(struct sk_buff * skb)717 static void icmpv6_echo_reply(struct sk_buff *skb)
718 {
719 	struct net *net = dev_net(skb->dev);
720 	struct sock *sk;
721 	struct inet6_dev *idev;
722 	struct ipv6_pinfo *np;
723 	const struct in6_addr *saddr = NULL;
724 	struct icmp6hdr *icmph = icmp6_hdr(skb);
725 	struct icmp6hdr tmp_hdr;
726 	struct flowi6 fl6;
727 	struct icmpv6_msg msg;
728 	struct dst_entry *dst;
729 	struct ipcm6_cookie ipc6;
730 	u32 mark = IP6_REPLY_MARK(net, skb->mark);
731 	bool acast;
732 	u8 type;
733 
734 	if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) &&
735 	    net->ipv6.sysctl.icmpv6_echo_ignore_multicast)
736 		return;
737 
738 	saddr = &ipv6_hdr(skb)->daddr;
739 
740 	acast = ipv6_anycast_destination(skb_dst(skb), saddr);
741 	if (acast && net->ipv6.sysctl.icmpv6_echo_ignore_anycast)
742 		return;
743 
744 	if (!ipv6_unicast_destination(skb) &&
745 	    !(net->ipv6.sysctl.anycast_src_echo_reply && acast))
746 		saddr = NULL;
747 
748 	if (icmph->icmp6_type == ICMPV6_EXT_ECHO_REQUEST)
749 		type = ICMPV6_EXT_ECHO_REPLY;
750 	else
751 		type = ICMPV6_ECHO_REPLY;
752 
753 	memcpy(&tmp_hdr, icmph, sizeof(tmp_hdr));
754 	tmp_hdr.icmp6_type = type;
755 
756 	memset(&fl6, 0, sizeof(fl6));
757 	if (net->ipv6.sysctl.flowlabel_reflect & FLOWLABEL_REFLECT_ICMPV6_ECHO_REPLIES)
758 		fl6.flowlabel = ip6_flowlabel(ipv6_hdr(skb));
759 
760 	fl6.flowi6_proto = IPPROTO_ICMPV6;
761 	fl6.daddr = ipv6_hdr(skb)->saddr;
762 	if (saddr)
763 		fl6.saddr = *saddr;
764 	fl6.flowi6_oif = icmp6_iif(skb);
765 	fl6.fl6_icmp_type = type;
766 	fl6.flowi6_mark = mark;
767 	fl6.flowi6_uid = sock_net_uid(net, NULL);
768 	security_skb_classify_flow(skb, flowi6_to_flowi_common(&fl6));
769 
770 	local_bh_disable();
771 	sk = icmpv6_xmit_lock(net);
772 	if (!sk)
773 		goto out_bh_enable;
774 	np = inet6_sk(sk);
775 
776 	if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
777 		fl6.flowi6_oif = np->mcast_oif;
778 	else if (!fl6.flowi6_oif)
779 		fl6.flowi6_oif = np->ucast_oif;
780 
781 	if (ip6_dst_lookup(net, sk, &dst, &fl6))
782 		goto out;
783 	dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), sk, 0);
784 	if (IS_ERR(dst))
785 		goto out;
786 
787 	/* Check the ratelimit */
788 	if ((!(skb->dev->flags & IFF_LOOPBACK) && !icmpv6_global_allow(net, ICMPV6_ECHO_REPLY)) ||
789 	    !icmpv6_xrlim_allow(sk, ICMPV6_ECHO_REPLY, &fl6))
790 		goto out_dst_release;
791 
792 	idev = __in6_dev_get(skb->dev);
793 
794 	msg.skb = skb;
795 	msg.offset = 0;
796 	msg.type = type;
797 
798 	ipcm6_init_sk(&ipc6, np);
799 	ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
800 	ipc6.tclass = ipv6_get_dsfield(ipv6_hdr(skb));
801 	ipc6.sockc.mark = mark;
802 
803 	if (icmph->icmp6_type == ICMPV6_EXT_ECHO_REQUEST)
804 		if (!icmp_build_probe(skb, (struct icmphdr *)&tmp_hdr))
805 			goto out_dst_release;
806 
807 	if (ip6_append_data(sk, icmpv6_getfrag, &msg,
808 			    skb->len + sizeof(struct icmp6hdr),
809 			    sizeof(struct icmp6hdr), &ipc6, &fl6,
810 			    (struct rt6_info *)dst, MSG_DONTWAIT)) {
811 		__ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTERRORS);
812 		ip6_flush_pending_frames(sk);
813 	} else {
814 		icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr,
815 					   skb->len + sizeof(struct icmp6hdr));
816 	}
817 out_dst_release:
818 	dst_release(dst);
819 out:
820 	icmpv6_xmit_unlock(sk);
821 out_bh_enable:
822 	local_bh_enable();
823 }
824 
icmpv6_notify(struct sk_buff * skb,u8 type,u8 code,__be32 info)825 void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
826 {
827 	struct inet6_skb_parm *opt = IP6CB(skb);
828 	const struct inet6_protocol *ipprot;
829 	int inner_offset;
830 	__be16 frag_off;
831 	u8 nexthdr;
832 	struct net *net = dev_net(skb->dev);
833 
834 	if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
835 		goto out;
836 
837 	seg6_icmp_srh(skb, opt);
838 
839 	nexthdr = ((struct ipv6hdr *)skb->data)->nexthdr;
840 	if (ipv6_ext_hdr(nexthdr)) {
841 		/* now skip over extension headers */
842 		inner_offset = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr),
843 						&nexthdr, &frag_off);
844 		if (inner_offset < 0)
845 			goto out;
846 	} else {
847 		inner_offset = sizeof(struct ipv6hdr);
848 	}
849 
850 	/* Checkin header including 8 bytes of inner protocol header. */
851 	if (!pskb_may_pull(skb, inner_offset+8))
852 		goto out;
853 
854 	/* BUGGG_FUTURE: we should try to parse exthdrs in this packet.
855 	   Without this we will not able f.e. to make source routed
856 	   pmtu discovery.
857 	   Corresponding argument (opt) to notifiers is already added.
858 	   --ANK (980726)
859 	 */
860 
861 	ipprot = rcu_dereference(inet6_protos[nexthdr]);
862 	if (ipprot && ipprot->err_handler)
863 		ipprot->err_handler(skb, opt, type, code, inner_offset, info);
864 
865 	raw6_icmp_error(skb, nexthdr, type, code, inner_offset, info);
866 	return;
867 
868 out:
869 	__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
870 }
871 
872 /*
873  *	Handle icmp messages
874  */
875 
icmpv6_rcv(struct sk_buff * skb)876 static int icmpv6_rcv(struct sk_buff *skb)
877 {
878 	struct net *net = dev_net(skb->dev);
879 	struct net_device *dev = icmp6_dev(skb);
880 	struct inet6_dev *idev = __in6_dev_get(dev);
881 	const struct in6_addr *saddr, *daddr;
882 	struct icmp6hdr *hdr;
883 	u8 type;
884 	bool success = false;
885 
886 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
887 		struct sec_path *sp = skb_sec_path(skb);
888 		int nh;
889 
890 		if (!(sp && sp->xvec[sp->len - 1]->props.flags &
891 				 XFRM_STATE_ICMP))
892 			goto drop_no_count;
893 
894 		if (!pskb_may_pull(skb, sizeof(*hdr) + sizeof(struct ipv6hdr)))
895 			goto drop_no_count;
896 
897 		nh = skb_network_offset(skb);
898 		skb_set_network_header(skb, sizeof(*hdr));
899 
900 		if (!xfrm6_policy_check_reverse(NULL, XFRM_POLICY_IN, skb))
901 			goto drop_no_count;
902 
903 		skb_set_network_header(skb, nh);
904 	}
905 
906 	__ICMP6_INC_STATS(dev_net(dev), idev, ICMP6_MIB_INMSGS);
907 
908 	saddr = &ipv6_hdr(skb)->saddr;
909 	daddr = &ipv6_hdr(skb)->daddr;
910 
911 	if (skb_checksum_validate(skb, IPPROTO_ICMPV6, ip6_compute_pseudo)) {
912 		net_dbg_ratelimited("ICMPv6 checksum failed [%pI6c > %pI6c]\n",
913 				    saddr, daddr);
914 		goto csum_error;
915 	}
916 
917 	if (!pskb_pull(skb, sizeof(*hdr)))
918 		goto discard_it;
919 
920 	hdr = icmp6_hdr(skb);
921 
922 	type = hdr->icmp6_type;
923 
924 	ICMP6MSGIN_INC_STATS(dev_net(dev), idev, type);
925 
926 	switch (type) {
927 	case ICMPV6_ECHO_REQUEST:
928 		if (!net->ipv6.sysctl.icmpv6_echo_ignore_all)
929 			icmpv6_echo_reply(skb);
930 		break;
931 	case ICMPV6_EXT_ECHO_REQUEST:
932 		if (!net->ipv6.sysctl.icmpv6_echo_ignore_all &&
933 		    READ_ONCE(net->ipv4.sysctl_icmp_echo_enable_probe))
934 			icmpv6_echo_reply(skb);
935 		break;
936 
937 	case ICMPV6_ECHO_REPLY:
938 		success = ping_rcv(skb);
939 		break;
940 
941 	case ICMPV6_EXT_ECHO_REPLY:
942 		success = ping_rcv(skb);
943 		break;
944 
945 	case ICMPV6_PKT_TOOBIG:
946 		/* BUGGG_FUTURE: if packet contains rthdr, we cannot update
947 		   standard destination cache. Seems, only "advanced"
948 		   destination cache will allow to solve this problem
949 		   --ANK (980726)
950 		 */
951 		if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
952 			goto discard_it;
953 		hdr = icmp6_hdr(skb);
954 
955 		/* to notify */
956 		fallthrough;
957 	case ICMPV6_DEST_UNREACH:
958 	case ICMPV6_TIME_EXCEED:
959 	case ICMPV6_PARAMPROB:
960 		icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu);
961 		break;
962 
963 	case NDISC_ROUTER_SOLICITATION:
964 	case NDISC_ROUTER_ADVERTISEMENT:
965 	case NDISC_NEIGHBOUR_SOLICITATION:
966 	case NDISC_NEIGHBOUR_ADVERTISEMENT:
967 	case NDISC_REDIRECT:
968 		ndisc_rcv(skb);
969 		break;
970 
971 	case ICMPV6_MGM_QUERY:
972 		igmp6_event_query(skb);
973 		return 0;
974 
975 	case ICMPV6_MGM_REPORT:
976 		igmp6_event_report(skb);
977 		return 0;
978 
979 	case ICMPV6_MGM_REDUCTION:
980 	case ICMPV6_NI_QUERY:
981 	case ICMPV6_NI_REPLY:
982 	case ICMPV6_MLD2_REPORT:
983 	case ICMPV6_DHAAD_REQUEST:
984 	case ICMPV6_DHAAD_REPLY:
985 	case ICMPV6_MOBILE_PREFIX_SOL:
986 	case ICMPV6_MOBILE_PREFIX_ADV:
987 		break;
988 
989 	default:
990 		/* informational */
991 		if (type & ICMPV6_INFOMSG_MASK)
992 			break;
993 
994 		net_dbg_ratelimited("icmpv6: msg of unknown type [%pI6c > %pI6c]\n",
995 				    saddr, daddr);
996 
997 		/*
998 		 * error of unknown type.
999 		 * must pass to upper level
1000 		 */
1001 
1002 		icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu);
1003 	}
1004 
1005 	/* until the v6 path can be better sorted assume failure and
1006 	 * preserve the status quo behaviour for the rest of the paths to here
1007 	 */
1008 	if (success)
1009 		consume_skb(skb);
1010 	else
1011 		kfree_skb(skb);
1012 
1013 	return 0;
1014 
1015 csum_error:
1016 	__ICMP6_INC_STATS(dev_net(dev), idev, ICMP6_MIB_CSUMERRORS);
1017 discard_it:
1018 	__ICMP6_INC_STATS(dev_net(dev), idev, ICMP6_MIB_INERRORS);
1019 drop_no_count:
1020 	kfree_skb(skb);
1021 	return 0;
1022 }
1023 
icmpv6_flow_init(struct sock * sk,struct flowi6 * fl6,u8 type,const struct in6_addr * saddr,const struct in6_addr * daddr,int oif)1024 void icmpv6_flow_init(struct sock *sk, struct flowi6 *fl6,
1025 		      u8 type,
1026 		      const struct in6_addr *saddr,
1027 		      const struct in6_addr *daddr,
1028 		      int oif)
1029 {
1030 	memset(fl6, 0, sizeof(*fl6));
1031 	fl6->saddr = *saddr;
1032 	fl6->daddr = *daddr;
1033 	fl6->flowi6_proto	= IPPROTO_ICMPV6;
1034 	fl6->fl6_icmp_type	= type;
1035 	fl6->fl6_icmp_code	= 0;
1036 	fl6->flowi6_oif		= oif;
1037 	security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6));
1038 }
1039 
icmpv6_sk_exit(struct net * net)1040 static void __net_exit icmpv6_sk_exit(struct net *net)
1041 {
1042 	int i;
1043 
1044 	for_each_possible_cpu(i)
1045 		inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv6.icmp_sk, i));
1046 	free_percpu(net->ipv6.icmp_sk);
1047 }
1048 
icmpv6_sk_init(struct net * net)1049 static int __net_init icmpv6_sk_init(struct net *net)
1050 {
1051 	struct sock *sk;
1052 	int err, i;
1053 
1054 	net->ipv6.icmp_sk = alloc_percpu(struct sock *);
1055 	if (!net->ipv6.icmp_sk)
1056 		return -ENOMEM;
1057 
1058 	for_each_possible_cpu(i) {
1059 		err = inet_ctl_sock_create(&sk, PF_INET6,
1060 					   SOCK_RAW, IPPROTO_ICMPV6, net);
1061 		if (err < 0) {
1062 			pr_err("Failed to initialize the ICMP6 control socket (err %d)\n",
1063 			       err);
1064 			goto fail;
1065 		}
1066 
1067 		*per_cpu_ptr(net->ipv6.icmp_sk, i) = sk;
1068 
1069 		/* Enough space for 2 64K ICMP packets, including
1070 		 * sk_buff struct overhead.
1071 		 */
1072 		sk->sk_sndbuf = 2 * SKB_TRUESIZE(64 * 1024);
1073 	}
1074 	return 0;
1075 
1076  fail:
1077 	icmpv6_sk_exit(net);
1078 	return err;
1079 }
1080 
1081 static struct pernet_operations icmpv6_sk_ops = {
1082 	.init = icmpv6_sk_init,
1083 	.exit = icmpv6_sk_exit,
1084 };
1085 
icmpv6_init(void)1086 int __init icmpv6_init(void)
1087 {
1088 	int err;
1089 
1090 	err = register_pernet_subsys(&icmpv6_sk_ops);
1091 	if (err < 0)
1092 		return err;
1093 
1094 	err = -EAGAIN;
1095 	if (inet6_add_protocol(&icmpv6_protocol, IPPROTO_ICMPV6) < 0)
1096 		goto fail;
1097 
1098 	err = inet6_register_icmp_sender(icmp6_send);
1099 	if (err)
1100 		goto sender_reg_err;
1101 	return 0;
1102 
1103 sender_reg_err:
1104 	inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6);
1105 fail:
1106 	pr_err("Failed to register ICMP6 protocol\n");
1107 	unregister_pernet_subsys(&icmpv6_sk_ops);
1108 	return err;
1109 }
1110 
icmpv6_cleanup(void)1111 void icmpv6_cleanup(void)
1112 {
1113 	inet6_unregister_icmp_sender(icmp6_send);
1114 	unregister_pernet_subsys(&icmpv6_sk_ops);
1115 	inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6);
1116 }
1117 
1118 
1119 static const struct icmp6_err {
1120 	int err;
1121 	int fatal;
1122 } tab_unreach[] = {
1123 	{	/* NOROUTE */
1124 		.err	= ENETUNREACH,
1125 		.fatal	= 0,
1126 	},
1127 	{	/* ADM_PROHIBITED */
1128 		.err	= EACCES,
1129 		.fatal	= 1,
1130 	},
1131 	{	/* Was NOT_NEIGHBOUR, now reserved */
1132 		.err	= EHOSTUNREACH,
1133 		.fatal	= 0,
1134 	},
1135 	{	/* ADDR_UNREACH	*/
1136 		.err	= EHOSTUNREACH,
1137 		.fatal	= 0,
1138 	},
1139 	{	/* PORT_UNREACH	*/
1140 		.err	= ECONNREFUSED,
1141 		.fatal	= 1,
1142 	},
1143 	{	/* POLICY_FAIL */
1144 		.err	= EACCES,
1145 		.fatal	= 1,
1146 	},
1147 	{	/* REJECT_ROUTE	*/
1148 		.err	= EACCES,
1149 		.fatal	= 1,
1150 	},
1151 };
1152 
icmpv6_err_convert(u8 type,u8 code,int * err)1153 int icmpv6_err_convert(u8 type, u8 code, int *err)
1154 {
1155 	int fatal = 0;
1156 
1157 	*err = EPROTO;
1158 
1159 	switch (type) {
1160 	case ICMPV6_DEST_UNREACH:
1161 		fatal = 1;
1162 		if (code < ARRAY_SIZE(tab_unreach)) {
1163 			*err  = tab_unreach[code].err;
1164 			fatal = tab_unreach[code].fatal;
1165 		}
1166 		break;
1167 
1168 	case ICMPV6_PKT_TOOBIG:
1169 		*err = EMSGSIZE;
1170 		break;
1171 
1172 	case ICMPV6_PARAMPROB:
1173 		*err = EPROTO;
1174 		fatal = 1;
1175 		break;
1176 
1177 	case ICMPV6_TIME_EXCEED:
1178 		*err = EHOSTUNREACH;
1179 		break;
1180 	}
1181 
1182 	return fatal;
1183 }
1184 EXPORT_SYMBOL(icmpv6_err_convert);
1185 
1186 #ifdef CONFIG_SYSCTL
1187 static struct ctl_table ipv6_icmp_table_template[] = {
1188 	{
1189 		.procname	= "ratelimit",
1190 		.data		= &init_net.ipv6.sysctl.icmpv6_time,
1191 		.maxlen		= sizeof(int),
1192 		.mode		= 0644,
1193 		.proc_handler	= proc_dointvec_ms_jiffies,
1194 	},
1195 	{
1196 		.procname	= "echo_ignore_all",
1197 		.data		= &init_net.ipv6.sysctl.icmpv6_echo_ignore_all,
1198 		.maxlen		= sizeof(u8),
1199 		.mode		= 0644,
1200 		.proc_handler = proc_dou8vec_minmax,
1201 	},
1202 	{
1203 		.procname	= "echo_ignore_multicast",
1204 		.data		= &init_net.ipv6.sysctl.icmpv6_echo_ignore_multicast,
1205 		.maxlen		= sizeof(u8),
1206 		.mode		= 0644,
1207 		.proc_handler = proc_dou8vec_minmax,
1208 	},
1209 	{
1210 		.procname	= "echo_ignore_anycast",
1211 		.data		= &init_net.ipv6.sysctl.icmpv6_echo_ignore_anycast,
1212 		.maxlen		= sizeof(u8),
1213 		.mode		= 0644,
1214 		.proc_handler = proc_dou8vec_minmax,
1215 	},
1216 	{
1217 		.procname	= "ratemask",
1218 		.data		= &init_net.ipv6.sysctl.icmpv6_ratemask_ptr,
1219 		.maxlen		= ICMPV6_MSG_MAX + 1,
1220 		.mode		= 0644,
1221 		.proc_handler = proc_do_large_bitmap,
1222 	},
1223 	{ },
1224 };
1225 
ipv6_icmp_sysctl_init(struct net * net)1226 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
1227 {
1228 	struct ctl_table *table;
1229 
1230 	table = kmemdup(ipv6_icmp_table_template,
1231 			sizeof(ipv6_icmp_table_template),
1232 			GFP_KERNEL);
1233 
1234 	if (table) {
1235 		table[0].data = &net->ipv6.sysctl.icmpv6_time;
1236 		table[1].data = &net->ipv6.sysctl.icmpv6_echo_ignore_all;
1237 		table[2].data = &net->ipv6.sysctl.icmpv6_echo_ignore_multicast;
1238 		table[3].data = &net->ipv6.sysctl.icmpv6_echo_ignore_anycast;
1239 		table[4].data = &net->ipv6.sysctl.icmpv6_ratemask_ptr;
1240 	}
1241 	return table;
1242 }
1243 #endif
1244