1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * IPv6 Address [auto]configuration
4 * Linux INET6 implementation
5 *
6 * Authors:
7 * Pedro Roque <roque@di.fc.ul.pt>
8 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
9 */
10
11 /*
12 * Changes:
13 *
14 * Janos Farkas : delete timer on ifdown
15 * <chexum@bankinf.banki.hu>
16 * Andi Kleen : kill double kfree on module
17 * unload.
18 * Maciej W. Rozycki : FDDI support
19 * sekiya@USAGI : Don't send too many RS
20 * packets.
21 * yoshfuji@USAGI : Fixed interval between DAD
22 * packets.
23 * YOSHIFUJI Hideaki @USAGI : improved accuracy of
24 * address validation timer.
25 * YOSHIFUJI Hideaki @USAGI : Privacy Extensions (RFC3041)
26 * support.
27 * Yuji SEKIYA @USAGI : Don't assign a same IPv6
28 * address on a same interface.
29 * YOSHIFUJI Hideaki @USAGI : ARCnet support
30 * YOSHIFUJI Hideaki @USAGI : convert /proc/net/if_inet6 to
31 * seq_file.
32 * YOSHIFUJI Hideaki @USAGI : improved source address
33 * selection; consider scope,
34 * status etc.
35 */
36
37 #define pr_fmt(fmt) "IPv6: " fmt
38
39 #include <linux/errno.h>
40 #include <linux/types.h>
41 #include <linux/kernel.h>
42 #include <linux/sched/signal.h>
43 #include <linux/socket.h>
44 #include <linux/sockios.h>
45 #include <linux/net.h>
46 #include <linux/inet.h>
47 #include <linux/in6.h>
48 #include <linux/netdevice.h>
49 #include <linux/if_addr.h>
50 #include <linux/if_arp.h>
51 #include <linux/if_arcnet.h>
52 #include <linux/if_infiniband.h>
53 #include <linux/route.h>
54 #include <linux/inetdevice.h>
55 #include <linux/init.h>
56 #include <linux/slab.h>
57 #ifdef CONFIG_SYSCTL
58 #include <linux/sysctl.h>
59 #endif
60 #include <linux/capability.h>
61 #include <linux/delay.h>
62 #include <linux/notifier.h>
63 #include <linux/string.h>
64 #include <linux/hash.h>
65
66 #include <net/ip_tunnels.h>
67 #include <net/net_namespace.h>
68 #include <net/sock.h>
69 #include <net/snmp.h>
70
71 #include <net/6lowpan.h>
72 #include <net/firewire.h>
73 #include <net/ipv6.h>
74 #include <net/protocol.h>
75 #include <net/ndisc.h>
76 #include <net/ip6_route.h>
77 #include <net/addrconf.h>
78 #include <net/tcp.h>
79 #include <net/ip.h>
80 #include <net/netlink.h>
81 #include <net/pkt_sched.h>
82 #include <net/l3mdev.h>
83 #include <linux/if_tunnel.h>
84 #include <linux/rtnetlink.h>
85 #include <linux/netconf.h>
86 #include <linux/random.h>
87 #include <linux/uaccess.h>
88 #include <linux/unaligned.h>
89
90 #include <linux/proc_fs.h>
91 #include <linux/seq_file.h>
92 #include <linux/export.h>
93 #include <linux/ioam6.h>
94
95 #define IPV6_MAX_STRLEN \
96 sizeof("ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255")
97
cstamp_delta(unsigned long cstamp)98 static inline u32 cstamp_delta(unsigned long cstamp)
99 {
100 return (cstamp - INITIAL_JIFFIES) * 100UL / HZ;
101 }
102
rfc3315_s14_backoff_init(s32 irt)103 static inline s32 rfc3315_s14_backoff_init(s32 irt)
104 {
105 /* multiply 'initial retransmission time' by 0.9 .. 1.1 */
106 u64 tmp = get_random_u32_inclusive(900000, 1100000) * (u64)irt;
107 do_div(tmp, 1000000);
108 return (s32)tmp;
109 }
110
rfc3315_s14_backoff_update(s32 rt,s32 mrt)111 static inline s32 rfc3315_s14_backoff_update(s32 rt, s32 mrt)
112 {
113 /* multiply 'retransmission timeout' by 1.9 .. 2.1 */
114 u64 tmp = get_random_u32_inclusive(1900000, 2100000) * (u64)rt;
115 do_div(tmp, 1000000);
116 if ((s32)tmp > mrt) {
117 /* multiply 'maximum retransmission time' by 0.9 .. 1.1 */
118 tmp = get_random_u32_inclusive(900000, 1100000) * (u64)mrt;
119 do_div(tmp, 1000000);
120 }
121 return (s32)tmp;
122 }
123
124 #ifdef CONFIG_SYSCTL
125 static int addrconf_sysctl_register(struct inet6_dev *idev);
126 static void addrconf_sysctl_unregister(struct inet6_dev *idev);
127 #else
addrconf_sysctl_register(struct inet6_dev * idev)128 static inline int addrconf_sysctl_register(struct inet6_dev *idev)
129 {
130 return 0;
131 }
132
addrconf_sysctl_unregister(struct inet6_dev * idev)133 static inline void addrconf_sysctl_unregister(struct inet6_dev *idev)
134 {
135 }
136 #endif
137
138 static void ipv6_gen_rnd_iid(struct in6_addr *addr);
139
140 static int ipv6_generate_eui64(u8 *eui, struct net_device *dev);
141 static int ipv6_count_addresses(const struct inet6_dev *idev);
142 static int ipv6_generate_stable_address(struct in6_addr *addr,
143 u8 dad_count,
144 const struct inet6_dev *idev);
145
146 #define IN6_ADDR_HSIZE_SHIFT 8
147 #define IN6_ADDR_HSIZE (1 << IN6_ADDR_HSIZE_SHIFT)
148
149 static void addrconf_verify(struct net *net);
150 static void addrconf_verify_rtnl(struct net *net);
151
152 static struct workqueue_struct *addrconf_wq;
153
154 static void addrconf_join_anycast(struct inet6_ifaddr *ifp);
155 static void addrconf_leave_anycast(struct inet6_ifaddr *ifp);
156
157 static void addrconf_type_change(struct net_device *dev,
158 unsigned long event);
159 static int addrconf_ifdown(struct net_device *dev, bool unregister);
160
161 static struct fib6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
162 int plen,
163 const struct net_device *dev,
164 u32 flags, u32 noflags,
165 bool no_gw);
166
167 static void addrconf_dad_start(struct inet6_ifaddr *ifp);
168 static void addrconf_dad_work(struct work_struct *w);
169 static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
170 bool send_na);
171 static void addrconf_dad_run(struct inet6_dev *idev, bool restart);
172 static void addrconf_rs_timer(struct timer_list *t);
173 static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
174 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
175
176 static void inet6_prefix_notify(int event, struct inet6_dev *idev,
177 struct prefix_info *pinfo);
178
179 static struct ipv6_devconf ipv6_devconf __read_mostly = {
180 .forwarding = 0,
181 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
182 .mtu6 = IPV6_MIN_MTU,
183 .accept_ra = 1,
184 .accept_redirects = 1,
185 .autoconf = 1,
186 .force_mld_version = 0,
187 .mldv1_unsolicited_report_interval = 10 * HZ,
188 .mldv2_unsolicited_report_interval = HZ,
189 .dad_transmits = 1,
190 .rtr_solicits = MAX_RTR_SOLICITATIONS,
191 .rtr_solicit_interval = RTR_SOLICITATION_INTERVAL,
192 .rtr_solicit_max_interval = RTR_SOLICITATION_MAX_INTERVAL,
193 .rtr_solicit_delay = MAX_RTR_SOLICITATION_DELAY,
194 .use_tempaddr = 0,
195 .temp_valid_lft = TEMP_VALID_LIFETIME,
196 .temp_prefered_lft = TEMP_PREFERRED_LIFETIME,
197 .regen_min_advance = REGEN_MIN_ADVANCE,
198 .regen_max_retry = REGEN_MAX_RETRY,
199 .max_desync_factor = MAX_DESYNC_FACTOR,
200 .max_addresses = IPV6_MAX_ADDRESSES,
201 .accept_ra_defrtr = 1,
202 .ra_defrtr_metric = IP6_RT_PRIO_USER,
203 .accept_ra_from_local = 0,
204 .accept_ra_min_hop_limit= 1,
205 .accept_ra_min_lft = 0,
206 .accept_ra_pinfo = 1,
207 #ifdef CONFIG_IPV6_ROUTER_PREF
208 .accept_ra_rtr_pref = 1,
209 .rtr_probe_interval = 60 * HZ,
210 #ifdef CONFIG_IPV6_ROUTE_INFO
211 .accept_ra_rt_info_min_plen = 0,
212 .accept_ra_rt_info_max_plen = 0,
213 #endif
214 #endif
215 .accept_ra_rt_table = 0,
216 .proxy_ndp = 0,
217 .accept_source_route = 0, /* we do not accept RH0 by default. */
218 .disable_ipv6 = 0,
219 .accept_dad = 0,
220 .suppress_frag_ndisc = 1,
221 .accept_ra_mtu = 1,
222 .stable_secret = {
223 .initialized = false,
224 },
225 .use_oif_addrs_only = 0,
226 .ignore_routes_with_linkdown = 0,
227 .keep_addr_on_down = 0,
228 .seg6_enabled = 0,
229 #ifdef CONFIG_IPV6_SEG6_HMAC
230 .seg6_require_hmac = 0,
231 #endif
232 .enhanced_dad = 1,
233 .addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64,
234 .disable_policy = 0,
235 .rpl_seg_enabled = 0,
236 .ioam6_enabled = 0,
237 .ioam6_id = IOAM6_DEFAULT_IF_ID,
238 .ioam6_id_wide = IOAM6_DEFAULT_IF_ID_WIDE,
239 .ndisc_evict_nocarrier = 1,
240 .ra_honor_pio_life = 0,
241 .ra_honor_pio_pflag = 0,
242 };
243
244 static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
245 .forwarding = 0,
246 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
247 .mtu6 = IPV6_MIN_MTU,
248 .accept_ra = 1,
249 .accept_redirects = 1,
250 .autoconf = 1,
251 .force_mld_version = 0,
252 .mldv1_unsolicited_report_interval = 10 * HZ,
253 .mldv2_unsolicited_report_interval = HZ,
254 .dad_transmits = 1,
255 .rtr_solicits = MAX_RTR_SOLICITATIONS,
256 .rtr_solicit_interval = RTR_SOLICITATION_INTERVAL,
257 .rtr_solicit_max_interval = RTR_SOLICITATION_MAX_INTERVAL,
258 .rtr_solicit_delay = MAX_RTR_SOLICITATION_DELAY,
259 .use_tempaddr = 0,
260 .temp_valid_lft = TEMP_VALID_LIFETIME,
261 .temp_prefered_lft = TEMP_PREFERRED_LIFETIME,
262 .regen_min_advance = REGEN_MIN_ADVANCE,
263 .regen_max_retry = REGEN_MAX_RETRY,
264 .max_desync_factor = MAX_DESYNC_FACTOR,
265 .max_addresses = IPV6_MAX_ADDRESSES,
266 .accept_ra_defrtr = 1,
267 .ra_defrtr_metric = IP6_RT_PRIO_USER,
268 .accept_ra_from_local = 0,
269 .accept_ra_min_hop_limit= 1,
270 .accept_ra_min_lft = 0,
271 .accept_ra_pinfo = 1,
272 #ifdef CONFIG_IPV6_ROUTER_PREF
273 .accept_ra_rtr_pref = 1,
274 .rtr_probe_interval = 60 * HZ,
275 #ifdef CONFIG_IPV6_ROUTE_INFO
276 .accept_ra_rt_info_min_plen = 0,
277 .accept_ra_rt_info_max_plen = 0,
278 #endif
279 #endif
280 .accept_ra_rt_table = 0,
281 .proxy_ndp = 0,
282 .accept_source_route = 0, /* we do not accept RH0 by default. */
283 .disable_ipv6 = 0,
284 .accept_dad = 1,
285 .suppress_frag_ndisc = 1,
286 .accept_ra_mtu = 1,
287 .stable_secret = {
288 .initialized = false,
289 },
290 .use_oif_addrs_only = 0,
291 .ignore_routes_with_linkdown = 0,
292 .keep_addr_on_down = 0,
293 .seg6_enabled = 0,
294 #ifdef CONFIG_IPV6_SEG6_HMAC
295 .seg6_require_hmac = 0,
296 #endif
297 .enhanced_dad = 1,
298 .addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64,
299 .disable_policy = 0,
300 .rpl_seg_enabled = 0,
301 .ioam6_enabled = 0,
302 .ioam6_id = IOAM6_DEFAULT_IF_ID,
303 .ioam6_id_wide = IOAM6_DEFAULT_IF_ID_WIDE,
304 .ndisc_evict_nocarrier = 1,
305 .ra_honor_pio_life = 0,
306 .ra_honor_pio_pflag = 0,
307 };
308
309 /* Check if link is ready: is it up and is a valid qdisc available */
addrconf_link_ready(const struct net_device * dev)310 static inline bool addrconf_link_ready(const struct net_device *dev)
311 {
312 return netif_oper_up(dev) && !qdisc_tx_is_noop(dev);
313 }
314
addrconf_del_rs_timer(struct inet6_dev * idev)315 static void addrconf_del_rs_timer(struct inet6_dev *idev)
316 {
317 if (del_timer(&idev->rs_timer))
318 __in6_dev_put(idev);
319 }
320
addrconf_del_dad_work(struct inet6_ifaddr * ifp)321 static void addrconf_del_dad_work(struct inet6_ifaddr *ifp)
322 {
323 if (cancel_delayed_work(&ifp->dad_work))
324 __in6_ifa_put(ifp);
325 }
326
addrconf_mod_rs_timer(struct inet6_dev * idev,unsigned long when)327 static void addrconf_mod_rs_timer(struct inet6_dev *idev,
328 unsigned long when)
329 {
330 if (!mod_timer(&idev->rs_timer, jiffies + when))
331 in6_dev_hold(idev);
332 }
333
addrconf_mod_dad_work(struct inet6_ifaddr * ifp,unsigned long delay)334 static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp,
335 unsigned long delay)
336 {
337 in6_ifa_hold(ifp);
338 if (mod_delayed_work(addrconf_wq, &ifp->dad_work, delay))
339 in6_ifa_put(ifp);
340 }
341
snmp6_alloc_dev(struct inet6_dev * idev)342 static int snmp6_alloc_dev(struct inet6_dev *idev)
343 {
344 int i;
345
346 idev->stats.ipv6 = alloc_percpu_gfp(struct ipstats_mib, GFP_KERNEL_ACCOUNT);
347 if (!idev->stats.ipv6)
348 goto err_ip;
349
350 for_each_possible_cpu(i) {
351 struct ipstats_mib *addrconf_stats;
352 addrconf_stats = per_cpu_ptr(idev->stats.ipv6, i);
353 u64_stats_init(&addrconf_stats->syncp);
354 }
355
356
357 idev->stats.icmpv6dev = kzalloc(sizeof(struct icmpv6_mib_device),
358 GFP_KERNEL);
359 if (!idev->stats.icmpv6dev)
360 goto err_icmp;
361 idev->stats.icmpv6msgdev = kzalloc(sizeof(struct icmpv6msg_mib_device),
362 GFP_KERNEL_ACCOUNT);
363 if (!idev->stats.icmpv6msgdev)
364 goto err_icmpmsg;
365
366 return 0;
367
368 err_icmpmsg:
369 kfree(idev->stats.icmpv6dev);
370 err_icmp:
371 free_percpu(idev->stats.ipv6);
372 err_ip:
373 return -ENOMEM;
374 }
375
ipv6_add_dev(struct net_device * dev)376 static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
377 {
378 struct inet6_dev *ndev;
379 int err = -ENOMEM;
380
381 ASSERT_RTNL();
382
383 if (dev->mtu < IPV6_MIN_MTU && dev != blackhole_netdev)
384 return ERR_PTR(-EINVAL);
385
386 ndev = kzalloc(sizeof(*ndev), GFP_KERNEL_ACCOUNT);
387 if (!ndev)
388 return ERR_PTR(err);
389
390 rwlock_init(&ndev->lock);
391 ndev->dev = dev;
392 INIT_LIST_HEAD(&ndev->addr_list);
393 timer_setup(&ndev->rs_timer, addrconf_rs_timer, 0);
394 memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf));
395
396 if (ndev->cnf.stable_secret.initialized)
397 ndev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
398
399 ndev->cnf.mtu6 = dev->mtu;
400 ndev->ra_mtu = 0;
401 ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl);
402 if (!ndev->nd_parms) {
403 kfree(ndev);
404 return ERR_PTR(err);
405 }
406 if (ndev->cnf.forwarding)
407 dev_disable_lro(dev);
408 /* We refer to the device */
409 netdev_hold(dev, &ndev->dev_tracker, GFP_KERNEL);
410
411 if (snmp6_alloc_dev(ndev) < 0) {
412 netdev_dbg(dev, "%s: cannot allocate memory for statistics\n",
413 __func__);
414 neigh_parms_release(&nd_tbl, ndev->nd_parms);
415 netdev_put(dev, &ndev->dev_tracker);
416 kfree(ndev);
417 return ERR_PTR(err);
418 }
419
420 if (dev != blackhole_netdev) {
421 if (snmp6_register_dev(ndev) < 0) {
422 netdev_dbg(dev, "%s: cannot create /proc/net/dev_snmp6/%s\n",
423 __func__, dev->name);
424 goto err_release;
425 }
426 }
427 /* One reference from device. */
428 refcount_set(&ndev->refcnt, 1);
429
430 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
431 ndev->cnf.accept_dad = -1;
432
433 #if IS_ENABLED(CONFIG_IPV6_SIT)
434 if (dev->type == ARPHRD_SIT && (dev->priv_flags & IFF_ISATAP)) {
435 pr_info("%s: Disabled Multicast RS\n", dev->name);
436 ndev->cnf.rtr_solicits = 0;
437 }
438 #endif
439
440 INIT_LIST_HEAD(&ndev->tempaddr_list);
441 ndev->desync_factor = U32_MAX;
442 if ((dev->flags&IFF_LOOPBACK) ||
443 dev->type == ARPHRD_TUNNEL ||
444 dev->type == ARPHRD_TUNNEL6 ||
445 dev->type == ARPHRD_SIT ||
446 dev->type == ARPHRD_NONE) {
447 ndev->cnf.use_tempaddr = -1;
448 }
449
450 ndev->token = in6addr_any;
451
452 if (netif_running(dev) && addrconf_link_ready(dev))
453 ndev->if_flags |= IF_READY;
454
455 ipv6_mc_init_dev(ndev);
456 ndev->tstamp = jiffies;
457 if (dev != blackhole_netdev) {
458 err = addrconf_sysctl_register(ndev);
459 if (err) {
460 ipv6_mc_destroy_dev(ndev);
461 snmp6_unregister_dev(ndev);
462 goto err_release;
463 }
464 }
465 /* protected by rtnl_lock */
466 rcu_assign_pointer(dev->ip6_ptr, ndev);
467
468 if (dev != blackhole_netdev) {
469 /* Join interface-local all-node multicast group */
470 ipv6_dev_mc_inc(dev, &in6addr_interfacelocal_allnodes);
471
472 /* Join all-node multicast group */
473 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes);
474
475 /* Join all-router multicast group if forwarding is set */
476 if (ndev->cnf.forwarding && (dev->flags & IFF_MULTICAST))
477 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
478 }
479 return ndev;
480
481 err_release:
482 neigh_parms_release(&nd_tbl, ndev->nd_parms);
483 ndev->dead = 1;
484 in6_dev_finish_destroy(ndev);
485 return ERR_PTR(err);
486 }
487
ipv6_find_idev(struct net_device * dev)488 static struct inet6_dev *ipv6_find_idev(struct net_device *dev)
489 {
490 struct inet6_dev *idev;
491
492 ASSERT_RTNL();
493
494 idev = __in6_dev_get(dev);
495 if (!idev) {
496 idev = ipv6_add_dev(dev);
497 if (IS_ERR(idev))
498 return idev;
499 }
500
501 if (dev->flags&IFF_UP)
502 ipv6_mc_up(idev);
503 return idev;
504 }
505
inet6_netconf_msgsize_devconf(int type)506 static int inet6_netconf_msgsize_devconf(int type)
507 {
508 int size = NLMSG_ALIGN(sizeof(struct netconfmsg))
509 + nla_total_size(4); /* NETCONFA_IFINDEX */
510 bool all = false;
511
512 if (type == NETCONFA_ALL)
513 all = true;
514
515 if (all || type == NETCONFA_FORWARDING)
516 size += nla_total_size(4);
517 #ifdef CONFIG_IPV6_MROUTE
518 if (all || type == NETCONFA_MC_FORWARDING)
519 size += nla_total_size(4);
520 #endif
521 if (all || type == NETCONFA_PROXY_NEIGH)
522 size += nla_total_size(4);
523
524 if (all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN)
525 size += nla_total_size(4);
526
527 return size;
528 }
529
inet6_netconf_fill_devconf(struct sk_buff * skb,int ifindex,struct ipv6_devconf * devconf,u32 portid,u32 seq,int event,unsigned int flags,int type)530 static int inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
531 struct ipv6_devconf *devconf, u32 portid,
532 u32 seq, int event, unsigned int flags,
533 int type)
534 {
535 struct nlmsghdr *nlh;
536 struct netconfmsg *ncm;
537 bool all = false;
538
539 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
540 flags);
541 if (!nlh)
542 return -EMSGSIZE;
543
544 if (type == NETCONFA_ALL)
545 all = true;
546
547 ncm = nlmsg_data(nlh);
548 ncm->ncm_family = AF_INET6;
549
550 if (nla_put_s32(skb, NETCONFA_IFINDEX, ifindex) < 0)
551 goto nla_put_failure;
552
553 if (!devconf)
554 goto out;
555
556 if ((all || type == NETCONFA_FORWARDING) &&
557 nla_put_s32(skb, NETCONFA_FORWARDING,
558 READ_ONCE(devconf->forwarding)) < 0)
559 goto nla_put_failure;
560 #ifdef CONFIG_IPV6_MROUTE
561 if ((all || type == NETCONFA_MC_FORWARDING) &&
562 nla_put_s32(skb, NETCONFA_MC_FORWARDING,
563 atomic_read(&devconf->mc_forwarding)) < 0)
564 goto nla_put_failure;
565 #endif
566 if ((all || type == NETCONFA_PROXY_NEIGH) &&
567 nla_put_s32(skb, NETCONFA_PROXY_NEIGH,
568 READ_ONCE(devconf->proxy_ndp)) < 0)
569 goto nla_put_failure;
570
571 if ((all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
572 nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
573 READ_ONCE(devconf->ignore_routes_with_linkdown)) < 0)
574 goto nla_put_failure;
575
576 out:
577 nlmsg_end(skb, nlh);
578 return 0;
579
580 nla_put_failure:
581 nlmsg_cancel(skb, nlh);
582 return -EMSGSIZE;
583 }
584
inet6_netconf_notify_devconf(struct net * net,int event,int type,int ifindex,struct ipv6_devconf * devconf)585 void inet6_netconf_notify_devconf(struct net *net, int event, int type,
586 int ifindex, struct ipv6_devconf *devconf)
587 {
588 struct sk_buff *skb;
589 int err = -ENOBUFS;
590
591 skb = nlmsg_new(inet6_netconf_msgsize_devconf(type), GFP_KERNEL);
592 if (!skb)
593 goto errout;
594
595 err = inet6_netconf_fill_devconf(skb, ifindex, devconf, 0, 0,
596 event, 0, type);
597 if (err < 0) {
598 /* -EMSGSIZE implies BUG in inet6_netconf_msgsize_devconf() */
599 WARN_ON(err == -EMSGSIZE);
600 kfree_skb(skb);
601 goto errout;
602 }
603 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_NETCONF, NULL, GFP_KERNEL);
604 return;
605 errout:
606 rtnl_set_sk_err(net, RTNLGRP_IPV6_NETCONF, err);
607 }
608
609 static const struct nla_policy devconf_ipv6_policy[NETCONFA_MAX+1] = {
610 [NETCONFA_IFINDEX] = { .len = sizeof(int) },
611 [NETCONFA_FORWARDING] = { .len = sizeof(int) },
612 [NETCONFA_PROXY_NEIGH] = { .len = sizeof(int) },
613 [NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN] = { .len = sizeof(int) },
614 };
615
inet6_netconf_valid_get_req(struct sk_buff * skb,const struct nlmsghdr * nlh,struct nlattr ** tb,struct netlink_ext_ack * extack)616 static int inet6_netconf_valid_get_req(struct sk_buff *skb,
617 const struct nlmsghdr *nlh,
618 struct nlattr **tb,
619 struct netlink_ext_ack *extack)
620 {
621 int i, err;
622
623 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(struct netconfmsg))) {
624 NL_SET_ERR_MSG_MOD(extack, "Invalid header for netconf get request");
625 return -EINVAL;
626 }
627
628 if (!netlink_strict_get_check(skb))
629 return nlmsg_parse_deprecated(nlh, sizeof(struct netconfmsg),
630 tb, NETCONFA_MAX,
631 devconf_ipv6_policy, extack);
632
633 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct netconfmsg),
634 tb, NETCONFA_MAX,
635 devconf_ipv6_policy, extack);
636 if (err)
637 return err;
638
639 for (i = 0; i <= NETCONFA_MAX; i++) {
640 if (!tb[i])
641 continue;
642
643 switch (i) {
644 case NETCONFA_IFINDEX:
645 break;
646 default:
647 NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in netconf get request");
648 return -EINVAL;
649 }
650 }
651
652 return 0;
653 }
654
inet6_netconf_get_devconf(struct sk_buff * in_skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)655 static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
656 struct nlmsghdr *nlh,
657 struct netlink_ext_ack *extack)
658 {
659 struct net *net = sock_net(in_skb->sk);
660 struct nlattr *tb[NETCONFA_MAX+1];
661 struct inet6_dev *in6_dev = NULL;
662 struct net_device *dev = NULL;
663 struct sk_buff *skb;
664 struct ipv6_devconf *devconf;
665 int ifindex;
666 int err;
667
668 err = inet6_netconf_valid_get_req(in_skb, nlh, tb, extack);
669 if (err < 0)
670 return err;
671
672 if (!tb[NETCONFA_IFINDEX])
673 return -EINVAL;
674
675 err = -EINVAL;
676 ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
677 switch (ifindex) {
678 case NETCONFA_IFINDEX_ALL:
679 devconf = net->ipv6.devconf_all;
680 break;
681 case NETCONFA_IFINDEX_DEFAULT:
682 devconf = net->ipv6.devconf_dflt;
683 break;
684 default:
685 dev = dev_get_by_index(net, ifindex);
686 if (!dev)
687 return -EINVAL;
688 in6_dev = in6_dev_get(dev);
689 if (!in6_dev)
690 goto errout;
691 devconf = &in6_dev->cnf;
692 break;
693 }
694
695 err = -ENOBUFS;
696 skb = nlmsg_new(inet6_netconf_msgsize_devconf(NETCONFA_ALL), GFP_KERNEL);
697 if (!skb)
698 goto errout;
699
700 err = inet6_netconf_fill_devconf(skb, ifindex, devconf,
701 NETLINK_CB(in_skb).portid,
702 nlh->nlmsg_seq, RTM_NEWNETCONF, 0,
703 NETCONFA_ALL);
704 if (err < 0) {
705 /* -EMSGSIZE implies BUG in inet6_netconf_msgsize_devconf() */
706 WARN_ON(err == -EMSGSIZE);
707 kfree_skb(skb);
708 goto errout;
709 }
710 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
711 errout:
712 if (in6_dev)
713 in6_dev_put(in6_dev);
714 dev_put(dev);
715 return err;
716 }
717
718 /* Combine dev_addr_genid and dev_base_seq to detect changes.
719 */
inet6_base_seq(const struct net * net)720 static u32 inet6_base_seq(const struct net *net)
721 {
722 u32 res = atomic_read(&net->ipv6.dev_addr_genid) +
723 READ_ONCE(net->dev_base_seq);
724
725 /* Must not return 0 (see nl_dump_check_consistent()).
726 * Chose a value far away from 0.
727 */
728 if (!res)
729 res = 0x80000000;
730 return res;
731 }
732
inet6_netconf_dump_devconf(struct sk_buff * skb,struct netlink_callback * cb)733 static int inet6_netconf_dump_devconf(struct sk_buff *skb,
734 struct netlink_callback *cb)
735 {
736 const struct nlmsghdr *nlh = cb->nlh;
737 struct net *net = sock_net(skb->sk);
738 struct {
739 unsigned long ifindex;
740 unsigned int all_default;
741 } *ctx = (void *)cb->ctx;
742 struct net_device *dev;
743 struct inet6_dev *idev;
744 int err = 0;
745
746 if (cb->strict_check) {
747 struct netlink_ext_ack *extack = cb->extack;
748 struct netconfmsg *ncm;
749
750 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ncm))) {
751 NL_SET_ERR_MSG_MOD(extack, "Invalid header for netconf dump request");
752 return -EINVAL;
753 }
754
755 if (nlmsg_attrlen(nlh, sizeof(*ncm))) {
756 NL_SET_ERR_MSG_MOD(extack, "Invalid data after header in netconf dump request");
757 return -EINVAL;
758 }
759 }
760
761 rcu_read_lock();
762 for_each_netdev_dump(net, dev, ctx->ifindex) {
763 idev = __in6_dev_get(dev);
764 if (!idev)
765 continue;
766 err = inet6_netconf_fill_devconf(skb, dev->ifindex,
767 &idev->cnf,
768 NETLINK_CB(cb->skb).portid,
769 nlh->nlmsg_seq,
770 RTM_NEWNETCONF,
771 NLM_F_MULTI,
772 NETCONFA_ALL);
773 if (err < 0)
774 goto done;
775 }
776 if (ctx->all_default == 0) {
777 err = inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
778 net->ipv6.devconf_all,
779 NETLINK_CB(cb->skb).portid,
780 nlh->nlmsg_seq,
781 RTM_NEWNETCONF, NLM_F_MULTI,
782 NETCONFA_ALL);
783 if (err < 0)
784 goto done;
785 ctx->all_default++;
786 }
787 if (ctx->all_default == 1) {
788 err = inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
789 net->ipv6.devconf_dflt,
790 NETLINK_CB(cb->skb).portid,
791 nlh->nlmsg_seq,
792 RTM_NEWNETCONF, NLM_F_MULTI,
793 NETCONFA_ALL);
794 if (err < 0)
795 goto done;
796 ctx->all_default++;
797 }
798 done:
799 rcu_read_unlock();
800 return err;
801 }
802
803 #ifdef CONFIG_SYSCTL
dev_forward_change(struct inet6_dev * idev)804 static void dev_forward_change(struct inet6_dev *idev)
805 {
806 struct net_device *dev;
807 struct inet6_ifaddr *ifa;
808 LIST_HEAD(tmp_addr_list);
809
810 if (!idev)
811 return;
812 dev = idev->dev;
813 if (idev->cnf.forwarding)
814 dev_disable_lro(dev);
815 if (dev->flags & IFF_MULTICAST) {
816 if (idev->cnf.forwarding) {
817 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
818 ipv6_dev_mc_inc(dev, &in6addr_interfacelocal_allrouters);
819 ipv6_dev_mc_inc(dev, &in6addr_sitelocal_allrouters);
820 } else {
821 ipv6_dev_mc_dec(dev, &in6addr_linklocal_allrouters);
822 ipv6_dev_mc_dec(dev, &in6addr_interfacelocal_allrouters);
823 ipv6_dev_mc_dec(dev, &in6addr_sitelocal_allrouters);
824 }
825 }
826
827 read_lock_bh(&idev->lock);
828 list_for_each_entry(ifa, &idev->addr_list, if_list) {
829 if (ifa->flags&IFA_F_TENTATIVE)
830 continue;
831 list_add_tail(&ifa->if_list_aux, &tmp_addr_list);
832 }
833 read_unlock_bh(&idev->lock);
834
835 while (!list_empty(&tmp_addr_list)) {
836 ifa = list_first_entry(&tmp_addr_list,
837 struct inet6_ifaddr, if_list_aux);
838 list_del(&ifa->if_list_aux);
839 if (idev->cnf.forwarding)
840 addrconf_join_anycast(ifa);
841 else
842 addrconf_leave_anycast(ifa);
843 }
844
845 inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
846 NETCONFA_FORWARDING,
847 dev->ifindex, &idev->cnf);
848 }
849
850
addrconf_forward_change(struct net * net,__s32 newf)851 static void addrconf_forward_change(struct net *net, __s32 newf)
852 {
853 struct net_device *dev;
854 struct inet6_dev *idev;
855
856 for_each_netdev(net, dev) {
857 idev = __in6_dev_get(dev);
858 if (idev) {
859 int changed = (!idev->cnf.forwarding) ^ (!newf);
860
861 WRITE_ONCE(idev->cnf.forwarding, newf);
862 if (changed)
863 dev_forward_change(idev);
864 }
865 }
866 }
867
addrconf_fixup_forwarding(const struct ctl_table * table,int * p,int newf)868 static int addrconf_fixup_forwarding(const struct ctl_table *table, int *p, int newf)
869 {
870 struct net *net;
871 int old;
872
873 if (!rtnl_trylock())
874 return restart_syscall();
875
876 net = (struct net *)table->extra2;
877 old = *p;
878 WRITE_ONCE(*p, newf);
879
880 if (p == &net->ipv6.devconf_dflt->forwarding) {
881 if ((!newf) ^ (!old))
882 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
883 NETCONFA_FORWARDING,
884 NETCONFA_IFINDEX_DEFAULT,
885 net->ipv6.devconf_dflt);
886 rtnl_unlock();
887 return 0;
888 }
889
890 if (p == &net->ipv6.devconf_all->forwarding) {
891 int old_dflt = net->ipv6.devconf_dflt->forwarding;
892
893 WRITE_ONCE(net->ipv6.devconf_dflt->forwarding, newf);
894 if ((!newf) ^ (!old_dflt))
895 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
896 NETCONFA_FORWARDING,
897 NETCONFA_IFINDEX_DEFAULT,
898 net->ipv6.devconf_dflt);
899
900 addrconf_forward_change(net, newf);
901 if ((!newf) ^ (!old))
902 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
903 NETCONFA_FORWARDING,
904 NETCONFA_IFINDEX_ALL,
905 net->ipv6.devconf_all);
906 } else if ((!newf) ^ (!old))
907 dev_forward_change((struct inet6_dev *)table->extra1);
908 rtnl_unlock();
909
910 if (newf)
911 rt6_purge_dflt_routers(net);
912 return 1;
913 }
914
addrconf_linkdown_change(struct net * net,__s32 newf)915 static void addrconf_linkdown_change(struct net *net, __s32 newf)
916 {
917 struct net_device *dev;
918 struct inet6_dev *idev;
919
920 for_each_netdev(net, dev) {
921 idev = __in6_dev_get(dev);
922 if (idev) {
923 int changed = (!idev->cnf.ignore_routes_with_linkdown) ^ (!newf);
924
925 WRITE_ONCE(idev->cnf.ignore_routes_with_linkdown, newf);
926 if (changed)
927 inet6_netconf_notify_devconf(dev_net(dev),
928 RTM_NEWNETCONF,
929 NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
930 dev->ifindex,
931 &idev->cnf);
932 }
933 }
934 }
935
addrconf_fixup_linkdown(const struct ctl_table * table,int * p,int newf)936 static int addrconf_fixup_linkdown(const struct ctl_table *table, int *p, int newf)
937 {
938 struct net *net;
939 int old;
940
941 if (!rtnl_trylock())
942 return restart_syscall();
943
944 net = (struct net *)table->extra2;
945 old = *p;
946 WRITE_ONCE(*p, newf);
947
948 if (p == &net->ipv6.devconf_dflt->ignore_routes_with_linkdown) {
949 if ((!newf) ^ (!old))
950 inet6_netconf_notify_devconf(net,
951 RTM_NEWNETCONF,
952 NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
953 NETCONFA_IFINDEX_DEFAULT,
954 net->ipv6.devconf_dflt);
955 rtnl_unlock();
956 return 0;
957 }
958
959 if (p == &net->ipv6.devconf_all->ignore_routes_with_linkdown) {
960 WRITE_ONCE(net->ipv6.devconf_dflt->ignore_routes_with_linkdown, newf);
961 addrconf_linkdown_change(net, newf);
962 if ((!newf) ^ (!old))
963 inet6_netconf_notify_devconf(net,
964 RTM_NEWNETCONF,
965 NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
966 NETCONFA_IFINDEX_ALL,
967 net->ipv6.devconf_all);
968 }
969 rtnl_unlock();
970
971 return 1;
972 }
973
974 #endif
975
976 /* Nobody refers to this ifaddr, destroy it */
inet6_ifa_finish_destroy(struct inet6_ifaddr * ifp)977 void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
978 {
979 WARN_ON(!hlist_unhashed(&ifp->addr_lst));
980
981 #ifdef NET_REFCNT_DEBUG
982 pr_debug("%s\n", __func__);
983 #endif
984
985 in6_dev_put(ifp->idev);
986
987 if (cancel_delayed_work(&ifp->dad_work))
988 pr_notice("delayed DAD work was pending while freeing ifa=%p\n",
989 ifp);
990
991 if (ifp->state != INET6_IFADDR_STATE_DEAD) {
992 pr_warn("Freeing alive inet6 address %p\n", ifp);
993 return;
994 }
995
996 kfree_rcu(ifp, rcu);
997 }
998
999 static void
ipv6_link_dev_addr(struct inet6_dev * idev,struct inet6_ifaddr * ifp)1000 ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp)
1001 {
1002 struct list_head *p;
1003 int ifp_scope = ipv6_addr_src_scope(&ifp->addr);
1004
1005 /*
1006 * Each device address list is sorted in order of scope -
1007 * global before linklocal.
1008 */
1009 list_for_each(p, &idev->addr_list) {
1010 struct inet6_ifaddr *ifa
1011 = list_entry(p, struct inet6_ifaddr, if_list);
1012 if (ifp_scope >= ipv6_addr_src_scope(&ifa->addr))
1013 break;
1014 }
1015
1016 list_add_tail_rcu(&ifp->if_list, p);
1017 }
1018
inet6_addr_hash(const struct net * net,const struct in6_addr * addr)1019 static u32 inet6_addr_hash(const struct net *net, const struct in6_addr *addr)
1020 {
1021 u32 val = ipv6_addr_hash(addr) ^ net_hash_mix(net);
1022
1023 return hash_32(val, IN6_ADDR_HSIZE_SHIFT);
1024 }
1025
ipv6_chk_same_addr(struct net * net,const struct in6_addr * addr,struct net_device * dev,unsigned int hash)1026 static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
1027 struct net_device *dev, unsigned int hash)
1028 {
1029 struct inet6_ifaddr *ifp;
1030
1031 hlist_for_each_entry(ifp, &net->ipv6.inet6_addr_lst[hash], addr_lst) {
1032 if (ipv6_addr_equal(&ifp->addr, addr)) {
1033 if (!dev || ifp->idev->dev == dev)
1034 return true;
1035 }
1036 }
1037 return false;
1038 }
1039
ipv6_add_addr_hash(struct net_device * dev,struct inet6_ifaddr * ifa)1040 static int ipv6_add_addr_hash(struct net_device *dev, struct inet6_ifaddr *ifa)
1041 {
1042 struct net *net = dev_net(dev);
1043 unsigned int hash = inet6_addr_hash(net, &ifa->addr);
1044 int err = 0;
1045
1046 spin_lock_bh(&net->ipv6.addrconf_hash_lock);
1047
1048 /* Ignore adding duplicate addresses on an interface */
1049 if (ipv6_chk_same_addr(net, &ifa->addr, dev, hash)) {
1050 netdev_dbg(dev, "ipv6_add_addr: already assigned\n");
1051 err = -EEXIST;
1052 } else {
1053 hlist_add_head_rcu(&ifa->addr_lst, &net->ipv6.inet6_addr_lst[hash]);
1054 }
1055
1056 spin_unlock_bh(&net->ipv6.addrconf_hash_lock);
1057
1058 return err;
1059 }
1060
1061 /* On success it returns ifp with increased reference count */
1062
1063 static struct inet6_ifaddr *
ipv6_add_addr(struct inet6_dev * idev,struct ifa6_config * cfg,bool can_block,struct netlink_ext_ack * extack)1064 ipv6_add_addr(struct inet6_dev *idev, struct ifa6_config *cfg,
1065 bool can_block, struct netlink_ext_ack *extack)
1066 {
1067 gfp_t gfp_flags = can_block ? GFP_KERNEL : GFP_ATOMIC;
1068 int addr_type = ipv6_addr_type(cfg->pfx);
1069 struct net *net = dev_net(idev->dev);
1070 struct inet6_ifaddr *ifa = NULL;
1071 struct fib6_info *f6i = NULL;
1072 int err = 0;
1073
1074 if (addr_type == IPV6_ADDR_ANY) {
1075 NL_SET_ERR_MSG_MOD(extack, "Invalid address");
1076 return ERR_PTR(-EADDRNOTAVAIL);
1077 } else if (addr_type & IPV6_ADDR_MULTICAST &&
1078 !(cfg->ifa_flags & IFA_F_MCAUTOJOIN)) {
1079 NL_SET_ERR_MSG_MOD(extack, "Cannot assign multicast address without \"IFA_F_MCAUTOJOIN\" flag");
1080 return ERR_PTR(-EADDRNOTAVAIL);
1081 } else if (!(idev->dev->flags & IFF_LOOPBACK) &&
1082 !netif_is_l3_master(idev->dev) &&
1083 addr_type & IPV6_ADDR_LOOPBACK) {
1084 NL_SET_ERR_MSG_MOD(extack, "Cannot assign loopback address on this device");
1085 return ERR_PTR(-EADDRNOTAVAIL);
1086 }
1087
1088 if (idev->dead) {
1089 NL_SET_ERR_MSG_MOD(extack, "device is going away");
1090 err = -ENODEV;
1091 goto out;
1092 }
1093
1094 if (idev->cnf.disable_ipv6) {
1095 NL_SET_ERR_MSG_MOD(extack, "IPv6 is disabled on this device");
1096 err = -EACCES;
1097 goto out;
1098 }
1099
1100 /* validator notifier needs to be blocking;
1101 * do not call in atomic context
1102 */
1103 if (can_block) {
1104 struct in6_validator_info i6vi = {
1105 .i6vi_addr = *cfg->pfx,
1106 .i6vi_dev = idev,
1107 .extack = extack,
1108 };
1109
1110 err = inet6addr_validator_notifier_call_chain(NETDEV_UP, &i6vi);
1111 err = notifier_to_errno(err);
1112 if (err < 0)
1113 goto out;
1114 }
1115
1116 ifa = kzalloc(sizeof(*ifa), gfp_flags | __GFP_ACCOUNT);
1117 if (!ifa) {
1118 err = -ENOBUFS;
1119 goto out;
1120 }
1121
1122 f6i = addrconf_f6i_alloc(net, idev, cfg->pfx, false, gfp_flags, extack);
1123 if (IS_ERR(f6i)) {
1124 err = PTR_ERR(f6i);
1125 f6i = NULL;
1126 goto out;
1127 }
1128
1129 neigh_parms_data_state_setall(idev->nd_parms);
1130
1131 ifa->addr = *cfg->pfx;
1132 if (cfg->peer_pfx)
1133 ifa->peer_addr = *cfg->peer_pfx;
1134
1135 spin_lock_init(&ifa->lock);
1136 INIT_DELAYED_WORK(&ifa->dad_work, addrconf_dad_work);
1137 INIT_HLIST_NODE(&ifa->addr_lst);
1138 ifa->scope = cfg->scope;
1139 ifa->prefix_len = cfg->plen;
1140 ifa->rt_priority = cfg->rt_priority;
1141 ifa->flags = cfg->ifa_flags;
1142 ifa->ifa_proto = cfg->ifa_proto;
1143 /* No need to add the TENTATIVE flag for addresses with NODAD */
1144 if (!(cfg->ifa_flags & IFA_F_NODAD))
1145 ifa->flags |= IFA_F_TENTATIVE;
1146 ifa->valid_lft = cfg->valid_lft;
1147 ifa->prefered_lft = cfg->preferred_lft;
1148 ifa->cstamp = ifa->tstamp = jiffies;
1149 ifa->tokenized = false;
1150
1151 ifa->rt = f6i;
1152
1153 ifa->idev = idev;
1154 in6_dev_hold(idev);
1155
1156 /* For caller */
1157 refcount_set(&ifa->refcnt, 1);
1158
1159 rcu_read_lock();
1160
1161 err = ipv6_add_addr_hash(idev->dev, ifa);
1162 if (err < 0) {
1163 rcu_read_unlock();
1164 goto out;
1165 }
1166
1167 write_lock_bh(&idev->lock);
1168
1169 /* Add to inet6_dev unicast addr list. */
1170 ipv6_link_dev_addr(idev, ifa);
1171
1172 if (ifa->flags&IFA_F_TEMPORARY) {
1173 list_add(&ifa->tmp_list, &idev->tempaddr_list);
1174 in6_ifa_hold(ifa);
1175 }
1176
1177 in6_ifa_hold(ifa);
1178 write_unlock_bh(&idev->lock);
1179
1180 rcu_read_unlock();
1181
1182 inet6addr_notifier_call_chain(NETDEV_UP, ifa);
1183 out:
1184 if (unlikely(err < 0)) {
1185 fib6_info_release(f6i);
1186
1187 if (ifa) {
1188 if (ifa->idev)
1189 in6_dev_put(ifa->idev);
1190 kfree(ifa);
1191 }
1192 ifa = ERR_PTR(err);
1193 }
1194
1195 return ifa;
1196 }
1197
1198 enum cleanup_prefix_rt_t {
1199 CLEANUP_PREFIX_RT_NOP, /* no cleanup action for prefix route */
1200 CLEANUP_PREFIX_RT_DEL, /* delete the prefix route */
1201 CLEANUP_PREFIX_RT_EXPIRE, /* update the lifetime of the prefix route */
1202 };
1203
1204 /*
1205 * Check, whether the prefix for ifp would still need a prefix route
1206 * after deleting ifp. The function returns one of the CLEANUP_PREFIX_RT_*
1207 * constants.
1208 *
1209 * 1) we don't purge prefix if address was not permanent.
1210 * prefix is managed by its own lifetime.
1211 * 2) we also don't purge, if the address was IFA_F_NOPREFIXROUTE.
1212 * 3) if there are no addresses, delete prefix.
1213 * 4) if there are still other permanent address(es),
1214 * corresponding prefix is still permanent.
1215 * 5) if there are still other addresses with IFA_F_NOPREFIXROUTE,
1216 * don't purge the prefix, assume user space is managing it.
1217 * 6) otherwise, update prefix lifetime to the
1218 * longest valid lifetime among the corresponding
1219 * addresses on the device.
1220 * Note: subsequent RA will update lifetime.
1221 **/
1222 static enum cleanup_prefix_rt_t
check_cleanup_prefix_route(struct inet6_ifaddr * ifp,unsigned long * expires)1223 check_cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long *expires)
1224 {
1225 struct inet6_ifaddr *ifa;
1226 struct inet6_dev *idev = ifp->idev;
1227 unsigned long lifetime;
1228 enum cleanup_prefix_rt_t action = CLEANUP_PREFIX_RT_DEL;
1229
1230 *expires = jiffies;
1231
1232 list_for_each_entry(ifa, &idev->addr_list, if_list) {
1233 if (ifa == ifp)
1234 continue;
1235 if (ifa->prefix_len != ifp->prefix_len ||
1236 !ipv6_prefix_equal(&ifa->addr, &ifp->addr,
1237 ifp->prefix_len))
1238 continue;
1239 if (ifa->flags & (IFA_F_PERMANENT | IFA_F_NOPREFIXROUTE))
1240 return CLEANUP_PREFIX_RT_NOP;
1241
1242 action = CLEANUP_PREFIX_RT_EXPIRE;
1243
1244 spin_lock(&ifa->lock);
1245
1246 lifetime = addrconf_timeout_fixup(ifa->valid_lft, HZ);
1247 /*
1248 * Note: Because this address is
1249 * not permanent, lifetime <
1250 * LONG_MAX / HZ here.
1251 */
1252 if (time_before(*expires, ifa->tstamp + lifetime * HZ))
1253 *expires = ifa->tstamp + lifetime * HZ;
1254 spin_unlock(&ifa->lock);
1255 }
1256
1257 return action;
1258 }
1259
1260 static void
cleanup_prefix_route(struct inet6_ifaddr * ifp,unsigned long expires,bool del_rt,bool del_peer)1261 cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires,
1262 bool del_rt, bool del_peer)
1263 {
1264 struct fib6_table *table;
1265 struct fib6_info *f6i;
1266
1267 f6i = addrconf_get_prefix_route(del_peer ? &ifp->peer_addr : &ifp->addr,
1268 ifp->prefix_len,
1269 ifp->idev->dev, 0, RTF_DEFAULT, true);
1270 if (f6i) {
1271 if (del_rt)
1272 ip6_del_rt(dev_net(ifp->idev->dev), f6i, false);
1273 else {
1274 if (!(f6i->fib6_flags & RTF_EXPIRES)) {
1275 table = f6i->fib6_table;
1276 spin_lock_bh(&table->tb6_lock);
1277
1278 fib6_set_expires(f6i, expires);
1279 fib6_add_gc_list(f6i);
1280
1281 spin_unlock_bh(&table->tb6_lock);
1282 }
1283 fib6_info_release(f6i);
1284 }
1285 }
1286 }
1287
1288
1289 /* This function wants to get referenced ifp and releases it before return */
1290
ipv6_del_addr(struct inet6_ifaddr * ifp)1291 static void ipv6_del_addr(struct inet6_ifaddr *ifp)
1292 {
1293 enum cleanup_prefix_rt_t action = CLEANUP_PREFIX_RT_NOP;
1294 struct net *net = dev_net(ifp->idev->dev);
1295 unsigned long expires;
1296 int state;
1297
1298 ASSERT_RTNL();
1299
1300 spin_lock_bh(&ifp->lock);
1301 state = ifp->state;
1302 ifp->state = INET6_IFADDR_STATE_DEAD;
1303 spin_unlock_bh(&ifp->lock);
1304
1305 if (state == INET6_IFADDR_STATE_DEAD)
1306 goto out;
1307
1308 spin_lock_bh(&net->ipv6.addrconf_hash_lock);
1309 hlist_del_init_rcu(&ifp->addr_lst);
1310 spin_unlock_bh(&net->ipv6.addrconf_hash_lock);
1311
1312 write_lock_bh(&ifp->idev->lock);
1313
1314 if (ifp->flags&IFA_F_TEMPORARY) {
1315 list_del(&ifp->tmp_list);
1316 if (ifp->ifpub) {
1317 in6_ifa_put(ifp->ifpub);
1318 ifp->ifpub = NULL;
1319 }
1320 __in6_ifa_put(ifp);
1321 }
1322
1323 if (ifp->flags & IFA_F_PERMANENT && !(ifp->flags & IFA_F_NOPREFIXROUTE))
1324 action = check_cleanup_prefix_route(ifp, &expires);
1325
1326 list_del_rcu(&ifp->if_list);
1327 __in6_ifa_put(ifp);
1328
1329 write_unlock_bh(&ifp->idev->lock);
1330
1331 addrconf_del_dad_work(ifp);
1332
1333 ipv6_ifa_notify(RTM_DELADDR, ifp);
1334
1335 inet6addr_notifier_call_chain(NETDEV_DOWN, ifp);
1336
1337 if (action != CLEANUP_PREFIX_RT_NOP) {
1338 cleanup_prefix_route(ifp, expires,
1339 action == CLEANUP_PREFIX_RT_DEL, false);
1340 }
1341
1342 /* clean up prefsrc entries */
1343 rt6_remove_prefsrc(ifp);
1344 out:
1345 in6_ifa_put(ifp);
1346 }
1347
ipv6_get_regen_advance(const struct inet6_dev * idev)1348 static unsigned long ipv6_get_regen_advance(const struct inet6_dev *idev)
1349 {
1350 return READ_ONCE(idev->cnf.regen_min_advance) +
1351 READ_ONCE(idev->cnf.regen_max_retry) *
1352 READ_ONCE(idev->cnf.dad_transmits) *
1353 max(NEIGH_VAR(idev->nd_parms, RETRANS_TIME), HZ/100) / HZ;
1354 }
1355
ipv6_create_tempaddr(struct inet6_ifaddr * ifp,bool block)1356 static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, bool block)
1357 {
1358 struct inet6_dev *idev = ifp->idev;
1359 unsigned long tmp_tstamp, age;
1360 unsigned long regen_advance;
1361 unsigned long now = jiffies;
1362 u32 if_public_preferred_lft;
1363 s32 cnf_temp_preferred_lft;
1364 struct inet6_ifaddr *ift;
1365 struct ifa6_config cfg;
1366 long max_desync_factor;
1367 struct in6_addr addr;
1368 int ret = 0;
1369
1370 write_lock_bh(&idev->lock);
1371
1372 retry:
1373 in6_dev_hold(idev);
1374 if (READ_ONCE(idev->cnf.use_tempaddr) <= 0) {
1375 write_unlock_bh(&idev->lock);
1376 pr_info("%s: use_tempaddr is disabled\n", __func__);
1377 in6_dev_put(idev);
1378 ret = -1;
1379 goto out;
1380 }
1381 spin_lock_bh(&ifp->lock);
1382 if (ifp->regen_count++ >= READ_ONCE(idev->cnf.regen_max_retry)) {
1383 WRITE_ONCE(idev->cnf.use_tempaddr, -1); /*XXX*/
1384 spin_unlock_bh(&ifp->lock);
1385 write_unlock_bh(&idev->lock);
1386 pr_warn("%s: regeneration time exceeded - disabled temporary address support\n",
1387 __func__);
1388 in6_dev_put(idev);
1389 ret = -1;
1390 goto out;
1391 }
1392 in6_ifa_hold(ifp);
1393 memcpy(addr.s6_addr, ifp->addr.s6_addr, 8);
1394 ipv6_gen_rnd_iid(&addr);
1395
1396 age = (now - ifp->tstamp) / HZ;
1397
1398 regen_advance = ipv6_get_regen_advance(idev);
1399
1400 /* recalculate max_desync_factor each time and update
1401 * idev->desync_factor if it's larger
1402 */
1403 cnf_temp_preferred_lft = READ_ONCE(idev->cnf.temp_prefered_lft);
1404 max_desync_factor = min_t(long,
1405 READ_ONCE(idev->cnf.max_desync_factor),
1406 cnf_temp_preferred_lft - regen_advance);
1407
1408 if (unlikely(idev->desync_factor > max_desync_factor)) {
1409 if (max_desync_factor > 0) {
1410 get_random_bytes(&idev->desync_factor,
1411 sizeof(idev->desync_factor));
1412 idev->desync_factor %= max_desync_factor;
1413 } else {
1414 idev->desync_factor = 0;
1415 }
1416 }
1417
1418 if_public_preferred_lft = ifp->prefered_lft;
1419
1420 memset(&cfg, 0, sizeof(cfg));
1421 cfg.valid_lft = min_t(__u32, ifp->valid_lft,
1422 READ_ONCE(idev->cnf.temp_valid_lft) + age);
1423 cfg.preferred_lft = cnf_temp_preferred_lft + age - idev->desync_factor;
1424 cfg.preferred_lft = min_t(__u32, if_public_preferred_lft, cfg.preferred_lft);
1425 cfg.preferred_lft = min_t(__u32, cfg.valid_lft, cfg.preferred_lft);
1426
1427 cfg.plen = ifp->prefix_len;
1428 tmp_tstamp = ifp->tstamp;
1429 spin_unlock_bh(&ifp->lock);
1430
1431 write_unlock_bh(&idev->lock);
1432
1433 /* From RFC 4941:
1434 *
1435 * A temporary address is created only if this calculated Preferred
1436 * Lifetime is greater than REGEN_ADVANCE time units. In
1437 * particular, an implementation must not create a temporary address
1438 * with a zero Preferred Lifetime.
1439 *
1440 * ...
1441 *
1442 * When creating a temporary address, the lifetime values MUST be
1443 * derived from the corresponding prefix as follows:
1444 *
1445 * ...
1446 *
1447 * * Its Preferred Lifetime is the lower of the Preferred Lifetime
1448 * of the public address or TEMP_PREFERRED_LIFETIME -
1449 * DESYNC_FACTOR.
1450 *
1451 * To comply with the RFC's requirements, clamp the preferred lifetime
1452 * to a minimum of regen_advance, unless that would exceed valid_lft or
1453 * ifp->prefered_lft.
1454 *
1455 * Use age calculation as in addrconf_verify to avoid unnecessary
1456 * temporary addresses being generated.
1457 */
1458 age = (now - tmp_tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
1459 if (cfg.preferred_lft <= regen_advance + age) {
1460 cfg.preferred_lft = regen_advance + age + 1;
1461 if (cfg.preferred_lft > cfg.valid_lft ||
1462 cfg.preferred_lft > if_public_preferred_lft) {
1463 in6_ifa_put(ifp);
1464 in6_dev_put(idev);
1465 ret = -1;
1466 goto out;
1467 }
1468 }
1469
1470 cfg.ifa_flags = IFA_F_TEMPORARY;
1471 /* set in addrconf_prefix_rcv() */
1472 if (ifp->flags & IFA_F_OPTIMISTIC)
1473 cfg.ifa_flags |= IFA_F_OPTIMISTIC;
1474
1475 cfg.pfx = &addr;
1476 cfg.scope = ipv6_addr_scope(cfg.pfx);
1477
1478 ift = ipv6_add_addr(idev, &cfg, block, NULL);
1479 if (IS_ERR(ift)) {
1480 in6_ifa_put(ifp);
1481 in6_dev_put(idev);
1482 pr_info("%s: retry temporary address regeneration\n", __func__);
1483 write_lock_bh(&idev->lock);
1484 goto retry;
1485 }
1486
1487 spin_lock_bh(&ift->lock);
1488 ift->ifpub = ifp;
1489 ift->cstamp = now;
1490 ift->tstamp = tmp_tstamp;
1491 spin_unlock_bh(&ift->lock);
1492
1493 addrconf_dad_start(ift);
1494 in6_ifa_put(ift);
1495 in6_dev_put(idev);
1496 out:
1497 return ret;
1498 }
1499
1500 /*
1501 * Choose an appropriate source address (RFC3484)
1502 */
1503 enum {
1504 IPV6_SADDR_RULE_INIT = 0,
1505 IPV6_SADDR_RULE_LOCAL,
1506 IPV6_SADDR_RULE_SCOPE,
1507 IPV6_SADDR_RULE_PREFERRED,
1508 #ifdef CONFIG_IPV6_MIP6
1509 IPV6_SADDR_RULE_HOA,
1510 #endif
1511 IPV6_SADDR_RULE_OIF,
1512 IPV6_SADDR_RULE_LABEL,
1513 IPV6_SADDR_RULE_PRIVACY,
1514 IPV6_SADDR_RULE_ORCHID,
1515 IPV6_SADDR_RULE_PREFIX,
1516 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1517 IPV6_SADDR_RULE_NOT_OPTIMISTIC,
1518 #endif
1519 IPV6_SADDR_RULE_MAX
1520 };
1521
1522 struct ipv6_saddr_score {
1523 int rule;
1524 int addr_type;
1525 struct inet6_ifaddr *ifa;
1526 DECLARE_BITMAP(scorebits, IPV6_SADDR_RULE_MAX);
1527 int scopedist;
1528 int matchlen;
1529 };
1530
1531 struct ipv6_saddr_dst {
1532 const struct in6_addr *addr;
1533 int ifindex;
1534 int scope;
1535 int label;
1536 unsigned int prefs;
1537 };
1538
ipv6_saddr_preferred(int type)1539 static inline int ipv6_saddr_preferred(int type)
1540 {
1541 if (type & (IPV6_ADDR_MAPPED|IPV6_ADDR_COMPATv4|IPV6_ADDR_LOOPBACK))
1542 return 1;
1543 return 0;
1544 }
1545
ipv6_use_optimistic_addr(const struct net * net,const struct inet6_dev * idev)1546 static bool ipv6_use_optimistic_addr(const struct net *net,
1547 const struct inet6_dev *idev)
1548 {
1549 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1550 if (!idev)
1551 return false;
1552 if (!READ_ONCE(net->ipv6.devconf_all->optimistic_dad) &&
1553 !READ_ONCE(idev->cnf.optimistic_dad))
1554 return false;
1555 if (!READ_ONCE(net->ipv6.devconf_all->use_optimistic) &&
1556 !READ_ONCE(idev->cnf.use_optimistic))
1557 return false;
1558
1559 return true;
1560 #else
1561 return false;
1562 #endif
1563 }
1564
ipv6_allow_optimistic_dad(const struct net * net,const struct inet6_dev * idev)1565 static bool ipv6_allow_optimistic_dad(const struct net *net,
1566 const struct inet6_dev *idev)
1567 {
1568 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1569 if (!idev)
1570 return false;
1571 if (!READ_ONCE(net->ipv6.devconf_all->optimistic_dad) &&
1572 !READ_ONCE(idev->cnf.optimistic_dad))
1573 return false;
1574
1575 return true;
1576 #else
1577 return false;
1578 #endif
1579 }
1580
ipv6_get_saddr_eval(struct net * net,struct ipv6_saddr_score * score,struct ipv6_saddr_dst * dst,int i)1581 static int ipv6_get_saddr_eval(struct net *net,
1582 struct ipv6_saddr_score *score,
1583 struct ipv6_saddr_dst *dst,
1584 int i)
1585 {
1586 int ret;
1587
1588 if (i <= score->rule) {
1589 switch (i) {
1590 case IPV6_SADDR_RULE_SCOPE:
1591 ret = score->scopedist;
1592 break;
1593 case IPV6_SADDR_RULE_PREFIX:
1594 ret = score->matchlen;
1595 break;
1596 default:
1597 ret = !!test_bit(i, score->scorebits);
1598 }
1599 goto out;
1600 }
1601
1602 switch (i) {
1603 case IPV6_SADDR_RULE_INIT:
1604 /* Rule 0: remember if hiscore is not ready yet */
1605 ret = !!score->ifa;
1606 break;
1607 case IPV6_SADDR_RULE_LOCAL:
1608 /* Rule 1: Prefer same address */
1609 ret = ipv6_addr_equal(&score->ifa->addr, dst->addr);
1610 break;
1611 case IPV6_SADDR_RULE_SCOPE:
1612 /* Rule 2: Prefer appropriate scope
1613 *
1614 * ret
1615 * ^
1616 * -1 | d 15
1617 * ---+--+-+---> scope
1618 * |
1619 * | d is scope of the destination.
1620 * B-d | \
1621 * | \ <- smaller scope is better if
1622 * B-15 | \ if scope is enough for destination.
1623 * | ret = B - scope (-1 <= scope >= d <= 15).
1624 * d-C-1 | /
1625 * |/ <- greater is better
1626 * -C / if scope is not enough for destination.
1627 * /| ret = scope - C (-1 <= d < scope <= 15).
1628 *
1629 * d - C - 1 < B -15 (for all -1 <= d <= 15).
1630 * C > d + 14 - B >= 15 + 14 - B = 29 - B.
1631 * Assume B = 0 and we get C > 29.
1632 */
1633 ret = __ipv6_addr_src_scope(score->addr_type);
1634 if (ret >= dst->scope)
1635 ret = -ret;
1636 else
1637 ret -= 128; /* 30 is enough */
1638 score->scopedist = ret;
1639 break;
1640 case IPV6_SADDR_RULE_PREFERRED:
1641 {
1642 /* Rule 3: Avoid deprecated and optimistic addresses */
1643 u8 avoid = IFA_F_DEPRECATED;
1644
1645 if (!ipv6_use_optimistic_addr(net, score->ifa->idev))
1646 avoid |= IFA_F_OPTIMISTIC;
1647 ret = ipv6_saddr_preferred(score->addr_type) ||
1648 !(score->ifa->flags & avoid);
1649 break;
1650 }
1651 #ifdef CONFIG_IPV6_MIP6
1652 case IPV6_SADDR_RULE_HOA:
1653 {
1654 /* Rule 4: Prefer home address */
1655 int prefhome = !(dst->prefs & IPV6_PREFER_SRC_COA);
1656 ret = !(score->ifa->flags & IFA_F_HOMEADDRESS) ^ prefhome;
1657 break;
1658 }
1659 #endif
1660 case IPV6_SADDR_RULE_OIF:
1661 /* Rule 5: Prefer outgoing interface */
1662 ret = (!dst->ifindex ||
1663 dst->ifindex == score->ifa->idev->dev->ifindex);
1664 break;
1665 case IPV6_SADDR_RULE_LABEL:
1666 /* Rule 6: Prefer matching label */
1667 ret = ipv6_addr_label(net,
1668 &score->ifa->addr, score->addr_type,
1669 score->ifa->idev->dev->ifindex) == dst->label;
1670 break;
1671 case IPV6_SADDR_RULE_PRIVACY:
1672 {
1673 /* Rule 7: Prefer public address
1674 * Note: prefer temporary address if use_tempaddr >= 2
1675 */
1676 int preftmp = dst->prefs & (IPV6_PREFER_SRC_PUBLIC|IPV6_PREFER_SRC_TMP) ?
1677 !!(dst->prefs & IPV6_PREFER_SRC_TMP) :
1678 READ_ONCE(score->ifa->idev->cnf.use_tempaddr) >= 2;
1679 ret = (!(score->ifa->flags & IFA_F_TEMPORARY)) ^ preftmp;
1680 break;
1681 }
1682 case IPV6_SADDR_RULE_ORCHID:
1683 /* Rule 8-: Prefer ORCHID vs ORCHID or
1684 * non-ORCHID vs non-ORCHID
1685 */
1686 ret = !(ipv6_addr_orchid(&score->ifa->addr) ^
1687 ipv6_addr_orchid(dst->addr));
1688 break;
1689 case IPV6_SADDR_RULE_PREFIX:
1690 /* Rule 8: Use longest matching prefix */
1691 ret = ipv6_addr_diff(&score->ifa->addr, dst->addr);
1692 if (ret > score->ifa->prefix_len)
1693 ret = score->ifa->prefix_len;
1694 score->matchlen = ret;
1695 break;
1696 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1697 case IPV6_SADDR_RULE_NOT_OPTIMISTIC:
1698 /* Optimistic addresses still have lower precedence than other
1699 * preferred addresses.
1700 */
1701 ret = !(score->ifa->flags & IFA_F_OPTIMISTIC);
1702 break;
1703 #endif
1704 default:
1705 ret = 0;
1706 }
1707
1708 if (ret)
1709 __set_bit(i, score->scorebits);
1710 score->rule = i;
1711 out:
1712 return ret;
1713 }
1714
__ipv6_dev_get_saddr(struct net * net,struct ipv6_saddr_dst * dst,struct inet6_dev * idev,struct ipv6_saddr_score * scores,int hiscore_idx)1715 static int __ipv6_dev_get_saddr(struct net *net,
1716 struct ipv6_saddr_dst *dst,
1717 struct inet6_dev *idev,
1718 struct ipv6_saddr_score *scores,
1719 int hiscore_idx)
1720 {
1721 struct ipv6_saddr_score *score = &scores[1 - hiscore_idx], *hiscore = &scores[hiscore_idx];
1722
1723 list_for_each_entry_rcu(score->ifa, &idev->addr_list, if_list) {
1724 int i;
1725
1726 /*
1727 * - Tentative Address (RFC2462 section 5.4)
1728 * - A tentative address is not considered
1729 * "assigned to an interface" in the traditional
1730 * sense, unless it is also flagged as optimistic.
1731 * - Candidate Source Address (section 4)
1732 * - In any case, anycast addresses, multicast
1733 * addresses, and the unspecified address MUST
1734 * NOT be included in a candidate set.
1735 */
1736 if ((score->ifa->flags & IFA_F_TENTATIVE) &&
1737 (!(score->ifa->flags & IFA_F_OPTIMISTIC)))
1738 continue;
1739
1740 score->addr_type = __ipv6_addr_type(&score->ifa->addr);
1741
1742 if (unlikely(score->addr_type == IPV6_ADDR_ANY ||
1743 score->addr_type & IPV6_ADDR_MULTICAST)) {
1744 net_dbg_ratelimited("ADDRCONF: unspecified / multicast address assigned as unicast address on %s",
1745 idev->dev->name);
1746 continue;
1747 }
1748
1749 score->rule = -1;
1750 bitmap_zero(score->scorebits, IPV6_SADDR_RULE_MAX);
1751
1752 for (i = 0; i < IPV6_SADDR_RULE_MAX; i++) {
1753 int minihiscore, miniscore;
1754
1755 minihiscore = ipv6_get_saddr_eval(net, hiscore, dst, i);
1756 miniscore = ipv6_get_saddr_eval(net, score, dst, i);
1757
1758 if (minihiscore > miniscore) {
1759 if (i == IPV6_SADDR_RULE_SCOPE &&
1760 score->scopedist > 0) {
1761 /*
1762 * special case:
1763 * each remaining entry
1764 * has too small (not enough)
1765 * scope, because ifa entries
1766 * are sorted by their scope
1767 * values.
1768 */
1769 goto out;
1770 }
1771 break;
1772 } else if (minihiscore < miniscore) {
1773 swap(hiscore, score);
1774 hiscore_idx = 1 - hiscore_idx;
1775
1776 /* restore our iterator */
1777 score->ifa = hiscore->ifa;
1778
1779 break;
1780 }
1781 }
1782 }
1783 out:
1784 return hiscore_idx;
1785 }
1786
ipv6_get_saddr_master(struct net * net,const struct net_device * dst_dev,const struct net_device * master,struct ipv6_saddr_dst * dst,struct ipv6_saddr_score * scores,int hiscore_idx)1787 static int ipv6_get_saddr_master(struct net *net,
1788 const struct net_device *dst_dev,
1789 const struct net_device *master,
1790 struct ipv6_saddr_dst *dst,
1791 struct ipv6_saddr_score *scores,
1792 int hiscore_idx)
1793 {
1794 struct inet6_dev *idev;
1795
1796 idev = __in6_dev_get(dst_dev);
1797 if (idev)
1798 hiscore_idx = __ipv6_dev_get_saddr(net, dst, idev,
1799 scores, hiscore_idx);
1800
1801 idev = __in6_dev_get(master);
1802 if (idev)
1803 hiscore_idx = __ipv6_dev_get_saddr(net, dst, idev,
1804 scores, hiscore_idx);
1805
1806 return hiscore_idx;
1807 }
1808
ipv6_dev_get_saddr(struct net * net,const struct net_device * dst_dev,const struct in6_addr * daddr,unsigned int prefs,struct in6_addr * saddr)1809 int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev,
1810 const struct in6_addr *daddr, unsigned int prefs,
1811 struct in6_addr *saddr)
1812 {
1813 struct ipv6_saddr_score scores[2], *hiscore;
1814 struct ipv6_saddr_dst dst;
1815 struct inet6_dev *idev;
1816 struct net_device *dev;
1817 int dst_type;
1818 bool use_oif_addr = false;
1819 int hiscore_idx = 0;
1820 int ret = 0;
1821
1822 dst_type = __ipv6_addr_type(daddr);
1823 dst.addr = daddr;
1824 dst.ifindex = dst_dev ? dst_dev->ifindex : 0;
1825 dst.scope = __ipv6_addr_src_scope(dst_type);
1826 dst.label = ipv6_addr_label(net, daddr, dst_type, dst.ifindex);
1827 dst.prefs = prefs;
1828
1829 scores[hiscore_idx].rule = -1;
1830 scores[hiscore_idx].ifa = NULL;
1831
1832 rcu_read_lock();
1833
1834 /* Candidate Source Address (section 4)
1835 * - multicast and link-local destination address,
1836 * the set of candidate source address MUST only
1837 * include addresses assigned to interfaces
1838 * belonging to the same link as the outgoing
1839 * interface.
1840 * (- For site-local destination addresses, the
1841 * set of candidate source addresses MUST only
1842 * include addresses assigned to interfaces
1843 * belonging to the same site as the outgoing
1844 * interface.)
1845 * - "It is RECOMMENDED that the candidate source addresses
1846 * be the set of unicast addresses assigned to the
1847 * interface that will be used to send to the destination
1848 * (the 'outgoing' interface)." (RFC 6724)
1849 */
1850 if (dst_dev) {
1851 idev = __in6_dev_get(dst_dev);
1852 if ((dst_type & IPV6_ADDR_MULTICAST) ||
1853 dst.scope <= IPV6_ADDR_SCOPE_LINKLOCAL ||
1854 (idev && READ_ONCE(idev->cnf.use_oif_addrs_only))) {
1855 use_oif_addr = true;
1856 }
1857 }
1858
1859 if (use_oif_addr) {
1860 if (idev)
1861 hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx);
1862 } else {
1863 const struct net_device *master;
1864 int master_idx = 0;
1865
1866 /* if dst_dev exists and is enslaved to an L3 device, then
1867 * prefer addresses from dst_dev and then the master over
1868 * any other enslaved devices in the L3 domain.
1869 */
1870 master = l3mdev_master_dev_rcu(dst_dev);
1871 if (master) {
1872 master_idx = master->ifindex;
1873
1874 hiscore_idx = ipv6_get_saddr_master(net, dst_dev,
1875 master, &dst,
1876 scores, hiscore_idx);
1877
1878 if (scores[hiscore_idx].ifa &&
1879 scores[hiscore_idx].scopedist >= 0)
1880 goto out;
1881 }
1882
1883 for_each_netdev_rcu(net, dev) {
1884 /* only consider addresses on devices in the
1885 * same L3 domain
1886 */
1887 if (l3mdev_master_ifindex_rcu(dev) != master_idx)
1888 continue;
1889 idev = __in6_dev_get(dev);
1890 if (!idev)
1891 continue;
1892 hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx);
1893 }
1894 }
1895
1896 out:
1897 hiscore = &scores[hiscore_idx];
1898 if (!hiscore->ifa)
1899 ret = -EADDRNOTAVAIL;
1900 else
1901 *saddr = hiscore->ifa->addr;
1902
1903 rcu_read_unlock();
1904 return ret;
1905 }
1906 EXPORT_SYMBOL(ipv6_dev_get_saddr);
1907
__ipv6_get_lladdr(struct inet6_dev * idev,struct in6_addr * addr,u32 banned_flags)1908 static int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
1909 u32 banned_flags)
1910 {
1911 struct inet6_ifaddr *ifp;
1912 int err = -EADDRNOTAVAIL;
1913
1914 list_for_each_entry_reverse(ifp, &idev->addr_list, if_list) {
1915 if (ifp->scope > IFA_LINK)
1916 break;
1917 if (ifp->scope == IFA_LINK &&
1918 !(ifp->flags & banned_flags)) {
1919 *addr = ifp->addr;
1920 err = 0;
1921 break;
1922 }
1923 }
1924 return err;
1925 }
1926
ipv6_get_lladdr(struct net_device * dev,struct in6_addr * addr,u32 banned_flags)1927 int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
1928 u32 banned_flags)
1929 {
1930 struct inet6_dev *idev;
1931 int err = -EADDRNOTAVAIL;
1932
1933 rcu_read_lock();
1934 idev = __in6_dev_get(dev);
1935 if (idev) {
1936 read_lock_bh(&idev->lock);
1937 err = __ipv6_get_lladdr(idev, addr, banned_flags);
1938 read_unlock_bh(&idev->lock);
1939 }
1940 rcu_read_unlock();
1941 return err;
1942 }
1943
ipv6_count_addresses(const struct inet6_dev * idev)1944 static int ipv6_count_addresses(const struct inet6_dev *idev)
1945 {
1946 const struct inet6_ifaddr *ifp;
1947 int cnt = 0;
1948
1949 rcu_read_lock();
1950 list_for_each_entry_rcu(ifp, &idev->addr_list, if_list)
1951 cnt++;
1952 rcu_read_unlock();
1953 return cnt;
1954 }
1955
ipv6_chk_addr(struct net * net,const struct in6_addr * addr,const struct net_device * dev,int strict)1956 int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
1957 const struct net_device *dev, int strict)
1958 {
1959 return ipv6_chk_addr_and_flags(net, addr, dev, !dev,
1960 strict, IFA_F_TENTATIVE);
1961 }
1962 EXPORT_SYMBOL(ipv6_chk_addr);
1963
1964 /* device argument is used to find the L3 domain of interest. If
1965 * skip_dev_check is set, then the ifp device is not checked against
1966 * the passed in dev argument. So the 2 cases for addresses checks are:
1967 * 1. does the address exist in the L3 domain that dev is part of
1968 * (skip_dev_check = true), or
1969 *
1970 * 2. does the address exist on the specific device
1971 * (skip_dev_check = false)
1972 */
1973 static struct net_device *
__ipv6_chk_addr_and_flags(struct net * net,const struct in6_addr * addr,const struct net_device * dev,bool skip_dev_check,int strict,u32 banned_flags)1974 __ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr,
1975 const struct net_device *dev, bool skip_dev_check,
1976 int strict, u32 banned_flags)
1977 {
1978 unsigned int hash = inet6_addr_hash(net, addr);
1979 struct net_device *l3mdev, *ndev;
1980 struct inet6_ifaddr *ifp;
1981 u32 ifp_flags;
1982
1983 rcu_read_lock();
1984
1985 l3mdev = l3mdev_master_dev_rcu(dev);
1986 if (skip_dev_check)
1987 dev = NULL;
1988
1989 hlist_for_each_entry_rcu(ifp, &net->ipv6.inet6_addr_lst[hash], addr_lst) {
1990 ndev = ifp->idev->dev;
1991
1992 if (l3mdev_master_dev_rcu(ndev) != l3mdev)
1993 continue;
1994
1995 /* Decouple optimistic from tentative for evaluation here.
1996 * Ban optimistic addresses explicitly, when required.
1997 */
1998 ifp_flags = (ifp->flags&IFA_F_OPTIMISTIC)
1999 ? (ifp->flags&~IFA_F_TENTATIVE)
2000 : ifp->flags;
2001 if (ipv6_addr_equal(&ifp->addr, addr) &&
2002 !(ifp_flags&banned_flags) &&
2003 (!dev || ndev == dev ||
2004 !(ifp->scope&(IFA_LINK|IFA_HOST) || strict))) {
2005 rcu_read_unlock();
2006 return ndev;
2007 }
2008 }
2009
2010 rcu_read_unlock();
2011 return NULL;
2012 }
2013
ipv6_chk_addr_and_flags(struct net * net,const struct in6_addr * addr,const struct net_device * dev,bool skip_dev_check,int strict,u32 banned_flags)2014 int ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr,
2015 const struct net_device *dev, bool skip_dev_check,
2016 int strict, u32 banned_flags)
2017 {
2018 return __ipv6_chk_addr_and_flags(net, addr, dev, skip_dev_check,
2019 strict, banned_flags) ? 1 : 0;
2020 }
2021 EXPORT_SYMBOL(ipv6_chk_addr_and_flags);
2022
2023
2024 /* Compares an address/prefix_len with addresses on device @dev.
2025 * If one is found it returns true.
2026 */
ipv6_chk_custom_prefix(const struct in6_addr * addr,const unsigned int prefix_len,struct net_device * dev)2027 bool ipv6_chk_custom_prefix(const struct in6_addr *addr,
2028 const unsigned int prefix_len, struct net_device *dev)
2029 {
2030 const struct inet6_ifaddr *ifa;
2031 const struct inet6_dev *idev;
2032 bool ret = false;
2033
2034 rcu_read_lock();
2035 idev = __in6_dev_get(dev);
2036 if (idev) {
2037 list_for_each_entry_rcu(ifa, &idev->addr_list, if_list) {
2038 ret = ipv6_prefix_equal(addr, &ifa->addr, prefix_len);
2039 if (ret)
2040 break;
2041 }
2042 }
2043 rcu_read_unlock();
2044
2045 return ret;
2046 }
2047 EXPORT_SYMBOL(ipv6_chk_custom_prefix);
2048
ipv6_chk_prefix(const struct in6_addr * addr,struct net_device * dev)2049 int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev)
2050 {
2051 const struct inet6_ifaddr *ifa;
2052 const struct inet6_dev *idev;
2053 int onlink;
2054
2055 onlink = 0;
2056 rcu_read_lock();
2057 idev = __in6_dev_get(dev);
2058 if (idev) {
2059 list_for_each_entry_rcu(ifa, &idev->addr_list, if_list) {
2060 onlink = ipv6_prefix_equal(addr, &ifa->addr,
2061 ifa->prefix_len);
2062 if (onlink)
2063 break;
2064 }
2065 }
2066 rcu_read_unlock();
2067 return onlink;
2068 }
2069 EXPORT_SYMBOL(ipv6_chk_prefix);
2070
2071 /**
2072 * ipv6_dev_find - find the first device with a given source address.
2073 * @net: the net namespace
2074 * @addr: the source address
2075 * @dev: used to find the L3 domain of interest
2076 *
2077 * The caller should be protected by RCU, or RTNL.
2078 */
ipv6_dev_find(struct net * net,const struct in6_addr * addr,struct net_device * dev)2079 struct net_device *ipv6_dev_find(struct net *net, const struct in6_addr *addr,
2080 struct net_device *dev)
2081 {
2082 return __ipv6_chk_addr_and_flags(net, addr, dev, !dev, 1,
2083 IFA_F_TENTATIVE);
2084 }
2085 EXPORT_SYMBOL(ipv6_dev_find);
2086
ipv6_get_ifaddr(struct net * net,const struct in6_addr * addr,struct net_device * dev,int strict)2087 struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr,
2088 struct net_device *dev, int strict)
2089 {
2090 unsigned int hash = inet6_addr_hash(net, addr);
2091 struct inet6_ifaddr *ifp, *result = NULL;
2092
2093 rcu_read_lock();
2094 hlist_for_each_entry_rcu(ifp, &net->ipv6.inet6_addr_lst[hash], addr_lst) {
2095 if (ipv6_addr_equal(&ifp->addr, addr)) {
2096 if (!dev || ifp->idev->dev == dev ||
2097 !(ifp->scope&(IFA_LINK|IFA_HOST) || strict)) {
2098 if (in6_ifa_hold_safe(ifp)) {
2099 result = ifp;
2100 break;
2101 }
2102 }
2103 }
2104 }
2105 rcu_read_unlock();
2106
2107 return result;
2108 }
2109
2110 /* Gets referenced address, destroys ifaddr */
2111
addrconf_dad_stop(struct inet6_ifaddr * ifp,int dad_failed)2112 static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
2113 {
2114 if (dad_failed)
2115 ifp->flags |= IFA_F_DADFAILED;
2116
2117 if (ifp->flags&IFA_F_TEMPORARY) {
2118 struct inet6_ifaddr *ifpub;
2119 spin_lock_bh(&ifp->lock);
2120 ifpub = ifp->ifpub;
2121 if (ifpub) {
2122 in6_ifa_hold(ifpub);
2123 spin_unlock_bh(&ifp->lock);
2124 ipv6_create_tempaddr(ifpub, true);
2125 in6_ifa_put(ifpub);
2126 } else {
2127 spin_unlock_bh(&ifp->lock);
2128 }
2129 ipv6_del_addr(ifp);
2130 } else if (ifp->flags&IFA_F_PERMANENT || !dad_failed) {
2131 spin_lock_bh(&ifp->lock);
2132 addrconf_del_dad_work(ifp);
2133 ifp->flags |= IFA_F_TENTATIVE;
2134 if (dad_failed)
2135 ifp->flags &= ~IFA_F_OPTIMISTIC;
2136 spin_unlock_bh(&ifp->lock);
2137 if (dad_failed)
2138 ipv6_ifa_notify(0, ifp);
2139 in6_ifa_put(ifp);
2140 } else {
2141 ipv6_del_addr(ifp);
2142 }
2143 }
2144
addrconf_dad_end(struct inet6_ifaddr * ifp)2145 static int addrconf_dad_end(struct inet6_ifaddr *ifp)
2146 {
2147 int err = -ENOENT;
2148
2149 spin_lock_bh(&ifp->lock);
2150 if (ifp->state == INET6_IFADDR_STATE_DAD) {
2151 ifp->state = INET6_IFADDR_STATE_POSTDAD;
2152 err = 0;
2153 }
2154 spin_unlock_bh(&ifp->lock);
2155
2156 return err;
2157 }
2158
addrconf_dad_failure(struct sk_buff * skb,struct inet6_ifaddr * ifp)2159 void addrconf_dad_failure(struct sk_buff *skb, struct inet6_ifaddr *ifp)
2160 {
2161 struct inet6_dev *idev = ifp->idev;
2162 struct net *net = dev_net(idev->dev);
2163 int max_addresses;
2164
2165 if (addrconf_dad_end(ifp)) {
2166 in6_ifa_put(ifp);
2167 return;
2168 }
2169
2170 net_info_ratelimited("%s: IPv6 duplicate address %pI6c used by %pM detected!\n",
2171 ifp->idev->dev->name, &ifp->addr, eth_hdr(skb)->h_source);
2172
2173 spin_lock_bh(&ifp->lock);
2174
2175 if (ifp->flags & IFA_F_STABLE_PRIVACY) {
2176 struct in6_addr new_addr;
2177 struct inet6_ifaddr *ifp2;
2178 int retries = ifp->stable_privacy_retry + 1;
2179 struct ifa6_config cfg = {
2180 .pfx = &new_addr,
2181 .plen = ifp->prefix_len,
2182 .ifa_flags = ifp->flags,
2183 .valid_lft = ifp->valid_lft,
2184 .preferred_lft = ifp->prefered_lft,
2185 .scope = ifp->scope,
2186 };
2187
2188 if (retries > net->ipv6.sysctl.idgen_retries) {
2189 net_info_ratelimited("%s: privacy stable address generation failed because of DAD conflicts!\n",
2190 ifp->idev->dev->name);
2191 goto errdad;
2192 }
2193
2194 new_addr = ifp->addr;
2195 if (ipv6_generate_stable_address(&new_addr, retries,
2196 idev))
2197 goto errdad;
2198
2199 spin_unlock_bh(&ifp->lock);
2200
2201 max_addresses = READ_ONCE(idev->cnf.max_addresses);
2202 if (max_addresses &&
2203 ipv6_count_addresses(idev) >= max_addresses)
2204 goto lock_errdad;
2205
2206 net_info_ratelimited("%s: generating new stable privacy address because of DAD conflict\n",
2207 ifp->idev->dev->name);
2208
2209 ifp2 = ipv6_add_addr(idev, &cfg, false, NULL);
2210 if (IS_ERR(ifp2))
2211 goto lock_errdad;
2212
2213 spin_lock_bh(&ifp2->lock);
2214 ifp2->stable_privacy_retry = retries;
2215 ifp2->state = INET6_IFADDR_STATE_PREDAD;
2216 spin_unlock_bh(&ifp2->lock);
2217
2218 addrconf_mod_dad_work(ifp2, net->ipv6.sysctl.idgen_delay);
2219 in6_ifa_put(ifp2);
2220 lock_errdad:
2221 spin_lock_bh(&ifp->lock);
2222 }
2223
2224 errdad:
2225 /* transition from _POSTDAD to _ERRDAD */
2226 ifp->state = INET6_IFADDR_STATE_ERRDAD;
2227 spin_unlock_bh(&ifp->lock);
2228
2229 addrconf_mod_dad_work(ifp, 0);
2230 in6_ifa_put(ifp);
2231 }
2232
2233 /* Join to solicited addr multicast group. */
addrconf_join_solict(struct net_device * dev,const struct in6_addr * addr)2234 void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr)
2235 {
2236 struct in6_addr maddr;
2237
2238 if (READ_ONCE(dev->flags) & (IFF_LOOPBACK | IFF_NOARP))
2239 return;
2240
2241 addrconf_addr_solict_mult(addr, &maddr);
2242 ipv6_dev_mc_inc(dev, &maddr);
2243 }
2244
2245 /* caller must hold RTNL */
addrconf_leave_solict(struct inet6_dev * idev,const struct in6_addr * addr)2246 void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
2247 {
2248 struct in6_addr maddr;
2249
2250 if (idev->dev->flags&(IFF_LOOPBACK|IFF_NOARP))
2251 return;
2252
2253 addrconf_addr_solict_mult(addr, &maddr);
2254 __ipv6_dev_mc_dec(idev, &maddr);
2255 }
2256
2257 /* caller must hold RTNL */
addrconf_join_anycast(struct inet6_ifaddr * ifp)2258 static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
2259 {
2260 struct in6_addr addr;
2261
2262 if (ifp->prefix_len >= 127) /* RFC 6164 */
2263 return;
2264 ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
2265 if (ipv6_addr_any(&addr))
2266 return;
2267 __ipv6_dev_ac_inc(ifp->idev, &addr);
2268 }
2269
2270 /* caller must hold RTNL */
addrconf_leave_anycast(struct inet6_ifaddr * ifp)2271 static void addrconf_leave_anycast(struct inet6_ifaddr *ifp)
2272 {
2273 struct in6_addr addr;
2274
2275 if (ifp->prefix_len >= 127) /* RFC 6164 */
2276 return;
2277 ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
2278 if (ipv6_addr_any(&addr))
2279 return;
2280 __ipv6_dev_ac_dec(ifp->idev, &addr);
2281 }
2282
addrconf_ifid_6lowpan(u8 * eui,struct net_device * dev)2283 static int addrconf_ifid_6lowpan(u8 *eui, struct net_device *dev)
2284 {
2285 switch (dev->addr_len) {
2286 case ETH_ALEN:
2287 memcpy(eui, dev->dev_addr, 3);
2288 eui[3] = 0xFF;
2289 eui[4] = 0xFE;
2290 memcpy(eui + 5, dev->dev_addr + 3, 3);
2291 break;
2292 case EUI64_ADDR_LEN:
2293 memcpy(eui, dev->dev_addr, EUI64_ADDR_LEN);
2294 eui[0] ^= 2;
2295 break;
2296 default:
2297 return -1;
2298 }
2299
2300 return 0;
2301 }
2302
addrconf_ifid_ieee1394(u8 * eui,struct net_device * dev)2303 static int addrconf_ifid_ieee1394(u8 *eui, struct net_device *dev)
2304 {
2305 const union fwnet_hwaddr *ha;
2306
2307 if (dev->addr_len != FWNET_ALEN)
2308 return -1;
2309
2310 ha = (const union fwnet_hwaddr *)dev->dev_addr;
2311
2312 memcpy(eui, &ha->uc.uniq_id, sizeof(ha->uc.uniq_id));
2313 eui[0] ^= 2;
2314 return 0;
2315 }
2316
addrconf_ifid_arcnet(u8 * eui,struct net_device * dev)2317 static int addrconf_ifid_arcnet(u8 *eui, struct net_device *dev)
2318 {
2319 /* XXX: inherit EUI-64 from other interface -- yoshfuji */
2320 if (dev->addr_len != ARCNET_ALEN)
2321 return -1;
2322 memset(eui, 0, 7);
2323 eui[7] = *(u8 *)dev->dev_addr;
2324 return 0;
2325 }
2326
addrconf_ifid_infiniband(u8 * eui,struct net_device * dev)2327 static int addrconf_ifid_infiniband(u8 *eui, struct net_device *dev)
2328 {
2329 if (dev->addr_len != INFINIBAND_ALEN)
2330 return -1;
2331 memcpy(eui, dev->dev_addr + 12, 8);
2332 eui[0] |= 2;
2333 return 0;
2334 }
2335
__ipv6_isatap_ifid(u8 * eui,__be32 addr)2336 static int __ipv6_isatap_ifid(u8 *eui, __be32 addr)
2337 {
2338 if (addr == 0)
2339 return -1;
2340 eui[0] = (ipv4_is_zeronet(addr) || ipv4_is_private_10(addr) ||
2341 ipv4_is_loopback(addr) || ipv4_is_linklocal_169(addr) ||
2342 ipv4_is_private_172(addr) || ipv4_is_test_192(addr) ||
2343 ipv4_is_anycast_6to4(addr) || ipv4_is_private_192(addr) ||
2344 ipv4_is_test_198(addr) || ipv4_is_multicast(addr) ||
2345 ipv4_is_lbcast(addr)) ? 0x00 : 0x02;
2346 eui[1] = 0;
2347 eui[2] = 0x5E;
2348 eui[3] = 0xFE;
2349 memcpy(eui + 4, &addr, 4);
2350 return 0;
2351 }
2352
addrconf_ifid_sit(u8 * eui,struct net_device * dev)2353 static int addrconf_ifid_sit(u8 *eui, struct net_device *dev)
2354 {
2355 if (dev->priv_flags & IFF_ISATAP)
2356 return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr);
2357 return -1;
2358 }
2359
addrconf_ifid_gre(u8 * eui,struct net_device * dev)2360 static int addrconf_ifid_gre(u8 *eui, struct net_device *dev)
2361 {
2362 return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr);
2363 }
2364
addrconf_ifid_ip6tnl(u8 * eui,struct net_device * dev)2365 static int addrconf_ifid_ip6tnl(u8 *eui, struct net_device *dev)
2366 {
2367 memcpy(eui, dev->perm_addr, 3);
2368 memcpy(eui + 5, dev->perm_addr + 3, 3);
2369 eui[3] = 0xFF;
2370 eui[4] = 0xFE;
2371 eui[0] ^= 2;
2372 return 0;
2373 }
2374
ipv6_generate_eui64(u8 * eui,struct net_device * dev)2375 static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
2376 {
2377 switch (dev->type) {
2378 case ARPHRD_ETHER:
2379 case ARPHRD_FDDI:
2380 return addrconf_ifid_eui48(eui, dev);
2381 case ARPHRD_ARCNET:
2382 return addrconf_ifid_arcnet(eui, dev);
2383 case ARPHRD_INFINIBAND:
2384 return addrconf_ifid_infiniband(eui, dev);
2385 case ARPHRD_SIT:
2386 return addrconf_ifid_sit(eui, dev);
2387 case ARPHRD_IPGRE:
2388 case ARPHRD_TUNNEL:
2389 return addrconf_ifid_gre(eui, dev);
2390 case ARPHRD_6LOWPAN:
2391 return addrconf_ifid_6lowpan(eui, dev);
2392 case ARPHRD_IEEE1394:
2393 return addrconf_ifid_ieee1394(eui, dev);
2394 case ARPHRD_TUNNEL6:
2395 case ARPHRD_IP6GRE:
2396 case ARPHRD_RAWIP:
2397 return addrconf_ifid_ip6tnl(eui, dev);
2398 }
2399 return -1;
2400 }
2401
ipv6_inherit_eui64(u8 * eui,struct inet6_dev * idev)2402 static int ipv6_inherit_eui64(u8 *eui, struct inet6_dev *idev)
2403 {
2404 int err = -1;
2405 struct inet6_ifaddr *ifp;
2406
2407 read_lock_bh(&idev->lock);
2408 list_for_each_entry_reverse(ifp, &idev->addr_list, if_list) {
2409 if (ifp->scope > IFA_LINK)
2410 break;
2411 if (ifp->scope == IFA_LINK && !(ifp->flags&IFA_F_TENTATIVE)) {
2412 memcpy(eui, ifp->addr.s6_addr+8, 8);
2413 err = 0;
2414 break;
2415 }
2416 }
2417 read_unlock_bh(&idev->lock);
2418 return err;
2419 }
2420
2421 /* Generation of a randomized Interface Identifier
2422 * draft-ietf-6man-rfc4941bis, Section 3.3.1
2423 */
2424
ipv6_gen_rnd_iid(struct in6_addr * addr)2425 static void ipv6_gen_rnd_iid(struct in6_addr *addr)
2426 {
2427 regen:
2428 get_random_bytes(&addr->s6_addr[8], 8);
2429
2430 /* <draft-ietf-6man-rfc4941bis-08.txt>, Section 3.3.1:
2431 * check if generated address is not inappropriate:
2432 *
2433 * - Reserved IPv6 Interface Identifiers
2434 * - XXX: already assigned to an address on the device
2435 */
2436
2437 /* Subnet-router anycast: 0000:0000:0000:0000 */
2438 if (!(addr->s6_addr32[2] | addr->s6_addr32[3]))
2439 goto regen;
2440
2441 /* IANA Ethernet block: 0200:5EFF:FE00:0000-0200:5EFF:FE00:5212
2442 * Proxy Mobile IPv6: 0200:5EFF:FE00:5213
2443 * IANA Ethernet block: 0200:5EFF:FE00:5214-0200:5EFF:FEFF:FFFF
2444 */
2445 if (ntohl(addr->s6_addr32[2]) == 0x02005eff &&
2446 (ntohl(addr->s6_addr32[3]) & 0Xff000000) == 0xfe000000)
2447 goto regen;
2448
2449 /* Reserved subnet anycast addresses */
2450 if (ntohl(addr->s6_addr32[2]) == 0xfdffffff &&
2451 ntohl(addr->s6_addr32[3]) >= 0Xffffff80)
2452 goto regen;
2453 }
2454
addrconf_rt_table(const struct net_device * dev,u32 default_table)2455 u32 addrconf_rt_table(const struct net_device *dev, u32 default_table)
2456 {
2457 struct inet6_dev *idev = in6_dev_get(dev);
2458 int sysctl;
2459 u32 table;
2460
2461 if (!idev)
2462 return default_table;
2463 sysctl = idev->cnf.accept_ra_rt_table;
2464 if (sysctl == 0) {
2465 table = default_table;
2466 } else if (sysctl > 0) {
2467 table = (u32) sysctl;
2468 } else {
2469 table = (unsigned) dev->ifindex + (-sysctl);
2470 }
2471 in6_dev_put(idev);
2472 return table;
2473 }
2474
2475 /*
2476 * Add prefix route.
2477 */
2478
2479 static void
addrconf_prefix_route(struct in6_addr * pfx,int plen,u32 metric,struct net_device * dev,unsigned long expires,u32 flags,gfp_t gfp_flags)2480 addrconf_prefix_route(struct in6_addr *pfx, int plen, u32 metric,
2481 struct net_device *dev, unsigned long expires,
2482 u32 flags, gfp_t gfp_flags)
2483 {
2484 struct fib6_config cfg = {
2485 .fc_table = l3mdev_fib_table(dev) ? : addrconf_rt_table(dev, RT6_TABLE_PREFIX),
2486 .fc_metric = metric ? : IP6_RT_PRIO_ADDRCONF,
2487 .fc_ifindex = dev->ifindex,
2488 .fc_expires = expires,
2489 .fc_dst_len = plen,
2490 .fc_flags = RTF_UP | flags,
2491 .fc_nlinfo.nl_net = dev_net(dev),
2492 .fc_protocol = RTPROT_KERNEL,
2493 .fc_type = RTN_UNICAST,
2494 };
2495
2496 cfg.fc_dst = *pfx;
2497
2498 /* Prevent useless cloning on PtP SIT.
2499 This thing is done here expecting that the whole
2500 class of non-broadcast devices need not cloning.
2501 */
2502 #if IS_ENABLED(CONFIG_IPV6_SIT)
2503 if (dev->type == ARPHRD_SIT && (dev->flags & IFF_POINTOPOINT))
2504 cfg.fc_flags |= RTF_NONEXTHOP;
2505 #endif
2506
2507 ip6_route_add(&cfg, gfp_flags, NULL);
2508 }
2509
2510
addrconf_get_prefix_route(const struct in6_addr * pfx,int plen,const struct net_device * dev,u32 flags,u32 noflags,bool no_gw)2511 static struct fib6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
2512 int plen,
2513 const struct net_device *dev,
2514 u32 flags, u32 noflags,
2515 bool no_gw)
2516 {
2517 struct fib6_node *fn;
2518 struct fib6_info *rt = NULL;
2519 struct fib6_table *table;
2520 u32 tb_id = l3mdev_fib_table(dev) ? : addrconf_rt_table(dev, RT6_TABLE_PREFIX);
2521
2522 table = fib6_get_table(dev_net(dev), tb_id);
2523 if (!table)
2524 return NULL;
2525
2526 rcu_read_lock();
2527 fn = fib6_locate(&table->tb6_root, pfx, plen, NULL, 0, true);
2528 if (!fn)
2529 goto out;
2530
2531 for_each_fib6_node_rt_rcu(fn) {
2532 /* prefix routes only use builtin fib6_nh */
2533 if (rt->nh)
2534 continue;
2535
2536 if (rt->fib6_nh->fib_nh_dev->ifindex != dev->ifindex)
2537 continue;
2538 if (no_gw && rt->fib6_nh->fib_nh_gw_family)
2539 continue;
2540 if ((rt->fib6_flags & flags) != flags)
2541 continue;
2542 if ((rt->fib6_flags & noflags) != 0)
2543 continue;
2544 if (!fib6_info_hold_safe(rt))
2545 continue;
2546 break;
2547 }
2548 out:
2549 rcu_read_unlock();
2550 return rt;
2551 }
2552
2553
2554 /* Create "default" multicast route to the interface */
2555
addrconf_add_mroute(struct net_device * dev)2556 static void addrconf_add_mroute(struct net_device *dev)
2557 {
2558 struct fib6_config cfg = {
2559 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_LOCAL,
2560 .fc_metric = IP6_RT_PRIO_ADDRCONF,
2561 .fc_ifindex = dev->ifindex,
2562 .fc_dst_len = 8,
2563 .fc_flags = RTF_UP,
2564 .fc_type = RTN_MULTICAST,
2565 .fc_nlinfo.nl_net = dev_net(dev),
2566 .fc_protocol = RTPROT_KERNEL,
2567 };
2568
2569 ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0);
2570
2571 ip6_route_add(&cfg, GFP_KERNEL, NULL);
2572 }
2573
addrconf_add_dev(struct net_device * dev)2574 static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
2575 {
2576 struct inet6_dev *idev;
2577
2578 ASSERT_RTNL();
2579
2580 idev = ipv6_find_idev(dev);
2581 if (IS_ERR(idev))
2582 return idev;
2583
2584 if (idev->cnf.disable_ipv6)
2585 return ERR_PTR(-EACCES);
2586
2587 /* Add default multicast route */
2588 if (!(dev->flags & IFF_LOOPBACK) && !netif_is_l3_master(dev))
2589 addrconf_add_mroute(dev);
2590
2591 return idev;
2592 }
2593
delete_tempaddrs(struct inet6_dev * idev,struct inet6_ifaddr * ifp)2594 static void delete_tempaddrs(struct inet6_dev *idev,
2595 struct inet6_ifaddr *ifp)
2596 {
2597 struct inet6_ifaddr *ift, *tmp;
2598
2599 write_lock_bh(&idev->lock);
2600 list_for_each_entry_safe(ift, tmp, &idev->tempaddr_list, tmp_list) {
2601 if (ift->ifpub != ifp)
2602 continue;
2603
2604 in6_ifa_hold(ift);
2605 write_unlock_bh(&idev->lock);
2606 ipv6_del_addr(ift);
2607 write_lock_bh(&idev->lock);
2608 }
2609 write_unlock_bh(&idev->lock);
2610 }
2611
manage_tempaddrs(struct inet6_dev * idev,struct inet6_ifaddr * ifp,__u32 valid_lft,__u32 prefered_lft,bool create,unsigned long now)2612 static void manage_tempaddrs(struct inet6_dev *idev,
2613 struct inet6_ifaddr *ifp,
2614 __u32 valid_lft, __u32 prefered_lft,
2615 bool create, unsigned long now)
2616 {
2617 u32 flags;
2618 struct inet6_ifaddr *ift;
2619
2620 read_lock_bh(&idev->lock);
2621 /* update all temporary addresses in the list */
2622 list_for_each_entry(ift, &idev->tempaddr_list, tmp_list) {
2623 int age, max_valid, max_prefered;
2624
2625 if (ifp != ift->ifpub)
2626 continue;
2627
2628 /* RFC 4941 section 3.3:
2629 * If a received option will extend the lifetime of a public
2630 * address, the lifetimes of temporary addresses should
2631 * be extended, subject to the overall constraint that no
2632 * temporary addresses should ever remain "valid" or "preferred"
2633 * for a time longer than (TEMP_VALID_LIFETIME) or
2634 * (TEMP_PREFERRED_LIFETIME - DESYNC_FACTOR), respectively.
2635 */
2636 age = (now - ift->cstamp) / HZ;
2637 max_valid = READ_ONCE(idev->cnf.temp_valid_lft) - age;
2638 if (max_valid < 0)
2639 max_valid = 0;
2640
2641 max_prefered = READ_ONCE(idev->cnf.temp_prefered_lft) -
2642 idev->desync_factor - age;
2643 if (max_prefered < 0)
2644 max_prefered = 0;
2645
2646 if (valid_lft > max_valid)
2647 valid_lft = max_valid;
2648
2649 if (prefered_lft > max_prefered)
2650 prefered_lft = max_prefered;
2651
2652 spin_lock(&ift->lock);
2653 flags = ift->flags;
2654 ift->valid_lft = valid_lft;
2655 ift->prefered_lft = prefered_lft;
2656 ift->tstamp = now;
2657 if (prefered_lft > 0)
2658 ift->flags &= ~IFA_F_DEPRECATED;
2659
2660 spin_unlock(&ift->lock);
2661 if (!(flags&IFA_F_TENTATIVE))
2662 ipv6_ifa_notify(0, ift);
2663 }
2664
2665 /* Also create a temporary address if it's enabled but no temporary
2666 * address currently exists.
2667 * However, we get called with valid_lft == 0, prefered_lft == 0, create == false
2668 * as part of cleanup (ie. deleting the mngtmpaddr).
2669 * We don't want that to result in creating a new temporary ip address.
2670 */
2671 if (list_empty(&idev->tempaddr_list) && (valid_lft || prefered_lft))
2672 create = true;
2673
2674 if (create && READ_ONCE(idev->cnf.use_tempaddr) > 0) {
2675 /* When a new public address is created as described
2676 * in [ADDRCONF], also create a new temporary address.
2677 */
2678 read_unlock_bh(&idev->lock);
2679 ipv6_create_tempaddr(ifp, false);
2680 } else {
2681 read_unlock_bh(&idev->lock);
2682 }
2683 }
2684
is_addr_mode_generate_stable(struct inet6_dev * idev)2685 static bool is_addr_mode_generate_stable(struct inet6_dev *idev)
2686 {
2687 return idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY ||
2688 idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_RANDOM;
2689 }
2690
addrconf_prefix_rcv_add_addr(struct net * net,struct net_device * dev,const struct prefix_info * pinfo,struct inet6_dev * in6_dev,const struct in6_addr * addr,int addr_type,u32 addr_flags,bool sllao,bool tokenized,__u32 valid_lft,u32 prefered_lft)2691 int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev,
2692 const struct prefix_info *pinfo,
2693 struct inet6_dev *in6_dev,
2694 const struct in6_addr *addr, int addr_type,
2695 u32 addr_flags, bool sllao, bool tokenized,
2696 __u32 valid_lft, u32 prefered_lft)
2697 {
2698 struct inet6_ifaddr *ifp = ipv6_get_ifaddr(net, addr, dev, 1);
2699 int create = 0, update_lft = 0;
2700
2701 if (!ifp && valid_lft) {
2702 int max_addresses = READ_ONCE(in6_dev->cnf.max_addresses);
2703 struct ifa6_config cfg = {
2704 .pfx = addr,
2705 .plen = pinfo->prefix_len,
2706 .ifa_flags = addr_flags,
2707 .valid_lft = valid_lft,
2708 .preferred_lft = prefered_lft,
2709 .scope = addr_type & IPV6_ADDR_SCOPE_MASK,
2710 .ifa_proto = IFAPROT_KERNEL_RA
2711 };
2712
2713 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
2714 if ((READ_ONCE(net->ipv6.devconf_all->optimistic_dad) ||
2715 READ_ONCE(in6_dev->cnf.optimistic_dad)) &&
2716 !net->ipv6.devconf_all->forwarding && sllao)
2717 cfg.ifa_flags |= IFA_F_OPTIMISTIC;
2718 #endif
2719
2720 /* Do not allow to create too much of autoconfigured
2721 * addresses; this would be too easy way to crash kernel.
2722 */
2723 if (!max_addresses ||
2724 ipv6_count_addresses(in6_dev) < max_addresses)
2725 ifp = ipv6_add_addr(in6_dev, &cfg, false, NULL);
2726
2727 if (IS_ERR_OR_NULL(ifp))
2728 return -1;
2729
2730 create = 1;
2731 spin_lock_bh(&ifp->lock);
2732 ifp->flags |= IFA_F_MANAGETEMPADDR;
2733 ifp->cstamp = jiffies;
2734 ifp->tokenized = tokenized;
2735 spin_unlock_bh(&ifp->lock);
2736 addrconf_dad_start(ifp);
2737 }
2738
2739 if (ifp) {
2740 u32 flags;
2741 unsigned long now;
2742 u32 stored_lft;
2743
2744 /* update lifetime (RFC2462 5.5.3 e) */
2745 spin_lock_bh(&ifp->lock);
2746 now = jiffies;
2747 if (ifp->valid_lft > (now - ifp->tstamp) / HZ)
2748 stored_lft = ifp->valid_lft - (now - ifp->tstamp) / HZ;
2749 else
2750 stored_lft = 0;
2751
2752 /* RFC4862 Section 5.5.3e:
2753 * "Note that the preferred lifetime of the
2754 * corresponding address is always reset to
2755 * the Preferred Lifetime in the received
2756 * Prefix Information option, regardless of
2757 * whether the valid lifetime is also reset or
2758 * ignored."
2759 *
2760 * So we should always update prefered_lft here.
2761 */
2762 update_lft = !create && stored_lft;
2763
2764 if (update_lft && !READ_ONCE(in6_dev->cnf.ra_honor_pio_life)) {
2765 const u32 minimum_lft = min_t(u32,
2766 stored_lft, MIN_VALID_LIFETIME);
2767 valid_lft = max(valid_lft, minimum_lft);
2768 }
2769
2770 if (update_lft) {
2771 ifp->valid_lft = valid_lft;
2772 ifp->prefered_lft = prefered_lft;
2773 WRITE_ONCE(ifp->tstamp, now);
2774 flags = ifp->flags;
2775 ifp->flags &= ~IFA_F_DEPRECATED;
2776 spin_unlock_bh(&ifp->lock);
2777
2778 if (!(flags&IFA_F_TENTATIVE))
2779 ipv6_ifa_notify(0, ifp);
2780 } else
2781 spin_unlock_bh(&ifp->lock);
2782
2783 manage_tempaddrs(in6_dev, ifp, valid_lft, prefered_lft,
2784 create, now);
2785
2786 in6_ifa_put(ifp);
2787 addrconf_verify(net);
2788 }
2789
2790 return 0;
2791 }
2792 EXPORT_SYMBOL_GPL(addrconf_prefix_rcv_add_addr);
2793
addrconf_prefix_rcv(struct net_device * dev,u8 * opt,int len,bool sllao)2794 void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
2795 {
2796 struct prefix_info *pinfo;
2797 struct fib6_table *table;
2798 __u32 valid_lft;
2799 __u32 prefered_lft;
2800 int addr_type, err;
2801 u32 addr_flags = 0;
2802 struct inet6_dev *in6_dev;
2803 struct net *net = dev_net(dev);
2804 bool ignore_autoconf = false;
2805
2806 pinfo = (struct prefix_info *) opt;
2807
2808 if (len < sizeof(struct prefix_info)) {
2809 netdev_dbg(dev, "addrconf: prefix option too short\n");
2810 return;
2811 }
2812
2813 /*
2814 * Validation checks ([ADDRCONF], page 19)
2815 */
2816
2817 addr_type = ipv6_addr_type(&pinfo->prefix);
2818
2819 if (addr_type & (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL))
2820 return;
2821
2822 valid_lft = ntohl(pinfo->valid);
2823 prefered_lft = ntohl(pinfo->prefered);
2824
2825 if (prefered_lft > valid_lft) {
2826 net_warn_ratelimited("addrconf: prefix option has invalid lifetime\n");
2827 return;
2828 }
2829
2830 in6_dev = in6_dev_get(dev);
2831
2832 if (!in6_dev) {
2833 net_dbg_ratelimited("addrconf: device %s not configured\n",
2834 dev->name);
2835 return;
2836 }
2837
2838 if (valid_lft != 0 && valid_lft < in6_dev->cnf.accept_ra_min_lft)
2839 goto put;
2840
2841 /*
2842 * Two things going on here:
2843 * 1) Add routes for on-link prefixes
2844 * 2) Configure prefixes with the auto flag set
2845 */
2846
2847 if (pinfo->onlink) {
2848 struct fib6_info *rt;
2849 unsigned long rt_expires;
2850
2851 /* Avoid arithmetic overflow. Really, we could
2852 * save rt_expires in seconds, likely valid_lft,
2853 * but it would require division in fib gc, that it
2854 * not good.
2855 */
2856 if (HZ > USER_HZ)
2857 rt_expires = addrconf_timeout_fixup(valid_lft, HZ);
2858 else
2859 rt_expires = addrconf_timeout_fixup(valid_lft, USER_HZ);
2860
2861 if (addrconf_finite_timeout(rt_expires))
2862 rt_expires *= HZ;
2863
2864 rt = addrconf_get_prefix_route(&pinfo->prefix,
2865 pinfo->prefix_len,
2866 dev,
2867 RTF_ADDRCONF | RTF_PREFIX_RT,
2868 RTF_DEFAULT, true);
2869
2870 if (rt) {
2871 /* Autoconf prefix route */
2872 if (valid_lft == 0) {
2873 ip6_del_rt(net, rt, false);
2874 rt = NULL;
2875 } else {
2876 table = rt->fib6_table;
2877 spin_lock_bh(&table->tb6_lock);
2878
2879 if (addrconf_finite_timeout(rt_expires)) {
2880 /* not infinity */
2881 fib6_set_expires(rt, jiffies + rt_expires);
2882 fib6_add_gc_list(rt);
2883 } else {
2884 fib6_clean_expires(rt);
2885 fib6_remove_gc_list(rt);
2886 }
2887
2888 spin_unlock_bh(&table->tb6_lock);
2889 }
2890 } else if (valid_lft) {
2891 clock_t expires = 0;
2892 int flags = RTF_ADDRCONF | RTF_PREFIX_RT;
2893 if (addrconf_finite_timeout(rt_expires)) {
2894 /* not infinity */
2895 flags |= RTF_EXPIRES;
2896 expires = jiffies_to_clock_t(rt_expires);
2897 }
2898 addrconf_prefix_route(&pinfo->prefix, pinfo->prefix_len,
2899 0, dev, expires, flags,
2900 GFP_ATOMIC);
2901 }
2902 fib6_info_release(rt);
2903 }
2904
2905 /* Try to figure out our local address for this prefix */
2906
2907 ignore_autoconf = READ_ONCE(in6_dev->cnf.ra_honor_pio_pflag) && pinfo->preferpd;
2908 if (pinfo->autoconf && in6_dev->cnf.autoconf && !ignore_autoconf) {
2909 struct in6_addr addr;
2910 bool tokenized = false, dev_addr_generated = false;
2911
2912 if (pinfo->prefix_len == 64) {
2913 memcpy(&addr, &pinfo->prefix, 8);
2914
2915 if (!ipv6_addr_any(&in6_dev->token)) {
2916 read_lock_bh(&in6_dev->lock);
2917 memcpy(addr.s6_addr + 8,
2918 in6_dev->token.s6_addr + 8, 8);
2919 read_unlock_bh(&in6_dev->lock);
2920 tokenized = true;
2921 } else if (is_addr_mode_generate_stable(in6_dev) &&
2922 !ipv6_generate_stable_address(&addr, 0,
2923 in6_dev)) {
2924 addr_flags |= IFA_F_STABLE_PRIVACY;
2925 goto ok;
2926 } else if (ipv6_generate_eui64(addr.s6_addr + 8, dev) &&
2927 ipv6_inherit_eui64(addr.s6_addr + 8, in6_dev)) {
2928 goto put;
2929 } else {
2930 dev_addr_generated = true;
2931 }
2932 goto ok;
2933 }
2934 net_dbg_ratelimited("IPv6 addrconf: prefix with wrong length %d\n",
2935 pinfo->prefix_len);
2936 goto put;
2937
2938 ok:
2939 err = addrconf_prefix_rcv_add_addr(net, dev, pinfo, in6_dev,
2940 &addr, addr_type,
2941 addr_flags, sllao,
2942 tokenized, valid_lft,
2943 prefered_lft);
2944 if (err)
2945 goto put;
2946
2947 /* Ignore error case here because previous prefix add addr was
2948 * successful which will be notified.
2949 */
2950 ndisc_ops_prefix_rcv_add_addr(net, dev, pinfo, in6_dev, &addr,
2951 addr_type, addr_flags, sllao,
2952 tokenized, valid_lft,
2953 prefered_lft,
2954 dev_addr_generated);
2955 }
2956 inet6_prefix_notify(RTM_NEWPREFIX, in6_dev, pinfo);
2957 put:
2958 in6_dev_put(in6_dev);
2959 }
2960
addrconf_set_sit_dstaddr(struct net * net,struct net_device * dev,struct in6_ifreq * ireq)2961 static int addrconf_set_sit_dstaddr(struct net *net, struct net_device *dev,
2962 struct in6_ifreq *ireq)
2963 {
2964 struct ip_tunnel_parm_kern p = { };
2965 int err;
2966
2967 if (!(ipv6_addr_type(&ireq->ifr6_addr) & IPV6_ADDR_COMPATv4))
2968 return -EADDRNOTAVAIL;
2969
2970 p.iph.daddr = ireq->ifr6_addr.s6_addr32[3];
2971 p.iph.version = 4;
2972 p.iph.ihl = 5;
2973 p.iph.protocol = IPPROTO_IPV6;
2974 p.iph.ttl = 64;
2975
2976 if (!dev->netdev_ops->ndo_tunnel_ctl)
2977 return -EOPNOTSUPP;
2978 err = dev->netdev_ops->ndo_tunnel_ctl(dev, &p, SIOCADDTUNNEL);
2979 if (err)
2980 return err;
2981
2982 dev = __dev_get_by_name(net, p.name);
2983 if (!dev)
2984 return -ENOBUFS;
2985 return dev_open(dev, NULL);
2986 }
2987
2988 /*
2989 * Set destination address.
2990 * Special case for SIT interfaces where we create a new "virtual"
2991 * device.
2992 */
addrconf_set_dstaddr(struct net * net,void __user * arg)2993 int addrconf_set_dstaddr(struct net *net, void __user *arg)
2994 {
2995 struct net_device *dev;
2996 struct in6_ifreq ireq;
2997 int err = -ENODEV;
2998
2999 if (!IS_ENABLED(CONFIG_IPV6_SIT))
3000 return -ENODEV;
3001 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
3002 return -EFAULT;
3003
3004 rtnl_lock();
3005 dev = __dev_get_by_index(net, ireq.ifr6_ifindex);
3006 if (dev && dev->type == ARPHRD_SIT)
3007 err = addrconf_set_sit_dstaddr(net, dev, &ireq);
3008 rtnl_unlock();
3009 return err;
3010 }
3011
ipv6_mc_config(struct sock * sk,bool join,const struct in6_addr * addr,int ifindex)3012 static int ipv6_mc_config(struct sock *sk, bool join,
3013 const struct in6_addr *addr, int ifindex)
3014 {
3015 int ret;
3016
3017 ASSERT_RTNL();
3018
3019 lock_sock(sk);
3020 if (join)
3021 ret = ipv6_sock_mc_join(sk, ifindex, addr);
3022 else
3023 ret = ipv6_sock_mc_drop(sk, ifindex, addr);
3024 release_sock(sk);
3025
3026 return ret;
3027 }
3028
3029 /*
3030 * Manual configuration of address on an interface
3031 */
inet6_addr_add(struct net * net,int ifindex,struct ifa6_config * cfg,struct netlink_ext_ack * extack)3032 static int inet6_addr_add(struct net *net, int ifindex,
3033 struct ifa6_config *cfg,
3034 struct netlink_ext_ack *extack)
3035 {
3036 struct inet6_ifaddr *ifp;
3037 struct inet6_dev *idev;
3038 struct net_device *dev;
3039 unsigned long timeout;
3040 clock_t expires;
3041 u32 flags;
3042
3043 ASSERT_RTNL();
3044
3045 if (cfg->plen > 128) {
3046 NL_SET_ERR_MSG_MOD(extack, "Invalid prefix length");
3047 return -EINVAL;
3048 }
3049
3050 /* check the lifetime */
3051 if (!cfg->valid_lft || cfg->preferred_lft > cfg->valid_lft) {
3052 NL_SET_ERR_MSG_MOD(extack, "address lifetime invalid");
3053 return -EINVAL;
3054 }
3055
3056 if (cfg->ifa_flags & IFA_F_MANAGETEMPADDR && cfg->plen != 64) {
3057 NL_SET_ERR_MSG_MOD(extack, "address with \"mngtmpaddr\" flag must have a prefix length of 64");
3058 return -EINVAL;
3059 }
3060
3061 dev = __dev_get_by_index(net, ifindex);
3062 if (!dev)
3063 return -ENODEV;
3064
3065 idev = addrconf_add_dev(dev);
3066 if (IS_ERR(idev)) {
3067 NL_SET_ERR_MSG_MOD(extack, "IPv6 is disabled on this device");
3068 return PTR_ERR(idev);
3069 }
3070
3071 if (cfg->ifa_flags & IFA_F_MCAUTOJOIN) {
3072 int ret = ipv6_mc_config(net->ipv6.mc_autojoin_sk,
3073 true, cfg->pfx, ifindex);
3074
3075 if (ret < 0) {
3076 NL_SET_ERR_MSG_MOD(extack, "Multicast auto join failed");
3077 return ret;
3078 }
3079 }
3080
3081 cfg->scope = ipv6_addr_scope(cfg->pfx);
3082
3083 timeout = addrconf_timeout_fixup(cfg->valid_lft, HZ);
3084 if (addrconf_finite_timeout(timeout)) {
3085 expires = jiffies_to_clock_t(timeout * HZ);
3086 cfg->valid_lft = timeout;
3087 flags = RTF_EXPIRES;
3088 } else {
3089 expires = 0;
3090 flags = 0;
3091 cfg->ifa_flags |= IFA_F_PERMANENT;
3092 }
3093
3094 timeout = addrconf_timeout_fixup(cfg->preferred_lft, HZ);
3095 if (addrconf_finite_timeout(timeout)) {
3096 if (timeout == 0)
3097 cfg->ifa_flags |= IFA_F_DEPRECATED;
3098 cfg->preferred_lft = timeout;
3099 }
3100
3101 ifp = ipv6_add_addr(idev, cfg, true, extack);
3102 if (!IS_ERR(ifp)) {
3103 if (!(cfg->ifa_flags & IFA_F_NOPREFIXROUTE)) {
3104 addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
3105 ifp->rt_priority, dev, expires,
3106 flags, GFP_KERNEL);
3107 }
3108
3109 /* Send a netlink notification if DAD is enabled and
3110 * optimistic flag is not set
3111 */
3112 if (!(ifp->flags & (IFA_F_OPTIMISTIC | IFA_F_NODAD)))
3113 ipv6_ifa_notify(0, ifp);
3114 /*
3115 * Note that section 3.1 of RFC 4429 indicates
3116 * that the Optimistic flag should not be set for
3117 * manually configured addresses
3118 */
3119 addrconf_dad_start(ifp);
3120 if (cfg->ifa_flags & IFA_F_MANAGETEMPADDR)
3121 manage_tempaddrs(idev, ifp, cfg->valid_lft,
3122 cfg->preferred_lft, true, jiffies);
3123 in6_ifa_put(ifp);
3124 addrconf_verify_rtnl(net);
3125 return 0;
3126 } else if (cfg->ifa_flags & IFA_F_MCAUTOJOIN) {
3127 ipv6_mc_config(net->ipv6.mc_autojoin_sk, false,
3128 cfg->pfx, ifindex);
3129 }
3130
3131 return PTR_ERR(ifp);
3132 }
3133
inet6_addr_del(struct net * net,int ifindex,u32 ifa_flags,const struct in6_addr * pfx,unsigned int plen,struct netlink_ext_ack * extack)3134 static int inet6_addr_del(struct net *net, int ifindex, u32 ifa_flags,
3135 const struct in6_addr *pfx, unsigned int plen,
3136 struct netlink_ext_ack *extack)
3137 {
3138 struct inet6_ifaddr *ifp;
3139 struct inet6_dev *idev;
3140 struct net_device *dev;
3141
3142 if (plen > 128) {
3143 NL_SET_ERR_MSG_MOD(extack, "Invalid prefix length");
3144 return -EINVAL;
3145 }
3146
3147 dev = __dev_get_by_index(net, ifindex);
3148 if (!dev) {
3149 NL_SET_ERR_MSG_MOD(extack, "Unable to find the interface");
3150 return -ENODEV;
3151 }
3152
3153 idev = __in6_dev_get(dev);
3154 if (!idev) {
3155 NL_SET_ERR_MSG_MOD(extack, "IPv6 is disabled on this device");
3156 return -ENXIO;
3157 }
3158
3159 read_lock_bh(&idev->lock);
3160 list_for_each_entry(ifp, &idev->addr_list, if_list) {
3161 if (ifp->prefix_len == plen &&
3162 ipv6_addr_equal(pfx, &ifp->addr)) {
3163 in6_ifa_hold(ifp);
3164 read_unlock_bh(&idev->lock);
3165
3166 ipv6_del_addr(ifp);
3167
3168 if (!(ifp->flags & IFA_F_TEMPORARY) &&
3169 (ifp->flags & IFA_F_MANAGETEMPADDR))
3170 delete_tempaddrs(idev, ifp);
3171
3172 addrconf_verify_rtnl(net);
3173 if (ipv6_addr_is_multicast(pfx)) {
3174 ipv6_mc_config(net->ipv6.mc_autojoin_sk,
3175 false, pfx, dev->ifindex);
3176 }
3177 return 0;
3178 }
3179 }
3180 read_unlock_bh(&idev->lock);
3181
3182 NL_SET_ERR_MSG_MOD(extack, "address not found");
3183 return -EADDRNOTAVAIL;
3184 }
3185
3186
addrconf_add_ifaddr(struct net * net,void __user * arg)3187 int addrconf_add_ifaddr(struct net *net, void __user *arg)
3188 {
3189 struct ifa6_config cfg = {
3190 .ifa_flags = IFA_F_PERMANENT,
3191 .preferred_lft = INFINITY_LIFE_TIME,
3192 .valid_lft = INFINITY_LIFE_TIME,
3193 };
3194 struct in6_ifreq ireq;
3195 int err;
3196
3197 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3198 return -EPERM;
3199
3200 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
3201 return -EFAULT;
3202
3203 cfg.pfx = &ireq.ifr6_addr;
3204 cfg.plen = ireq.ifr6_prefixlen;
3205
3206 rtnl_lock();
3207 err = inet6_addr_add(net, ireq.ifr6_ifindex, &cfg, NULL);
3208 rtnl_unlock();
3209 return err;
3210 }
3211
addrconf_del_ifaddr(struct net * net,void __user * arg)3212 int addrconf_del_ifaddr(struct net *net, void __user *arg)
3213 {
3214 struct in6_ifreq ireq;
3215 int err;
3216
3217 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3218 return -EPERM;
3219
3220 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
3221 return -EFAULT;
3222
3223 rtnl_lock();
3224 err = inet6_addr_del(net, ireq.ifr6_ifindex, 0, &ireq.ifr6_addr,
3225 ireq.ifr6_prefixlen, NULL);
3226 rtnl_unlock();
3227 return err;
3228 }
3229
add_addr(struct inet6_dev * idev,const struct in6_addr * addr,int plen,int scope,u8 proto)3230 static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
3231 int plen, int scope, u8 proto)
3232 {
3233 struct inet6_ifaddr *ifp;
3234 struct ifa6_config cfg = {
3235 .pfx = addr,
3236 .plen = plen,
3237 .ifa_flags = IFA_F_PERMANENT,
3238 .valid_lft = INFINITY_LIFE_TIME,
3239 .preferred_lft = INFINITY_LIFE_TIME,
3240 .scope = scope,
3241 .ifa_proto = proto
3242 };
3243
3244 ifp = ipv6_add_addr(idev, &cfg, true, NULL);
3245 if (!IS_ERR(ifp)) {
3246 spin_lock_bh(&ifp->lock);
3247 ifp->flags &= ~IFA_F_TENTATIVE;
3248 spin_unlock_bh(&ifp->lock);
3249 rt_genid_bump_ipv6(dev_net(idev->dev));
3250 ipv6_ifa_notify(RTM_NEWADDR, ifp);
3251 in6_ifa_put(ifp);
3252 }
3253 }
3254
3255 #if IS_ENABLED(CONFIG_IPV6_SIT) || IS_ENABLED(CONFIG_NET_IPGRE) || IS_ENABLED(CONFIG_IPV6_GRE)
add_v4_addrs(struct inet6_dev * idev)3256 static void add_v4_addrs(struct inet6_dev *idev)
3257 {
3258 struct in6_addr addr;
3259 struct net_device *dev;
3260 struct net *net = dev_net(idev->dev);
3261 int scope, plen;
3262 u32 pflags = 0;
3263
3264 ASSERT_RTNL();
3265
3266 memset(&addr, 0, sizeof(struct in6_addr));
3267 memcpy(&addr.s6_addr32[3], idev->dev->dev_addr, 4);
3268
3269 if (!(idev->dev->flags & IFF_POINTOPOINT) && idev->dev->type == ARPHRD_SIT) {
3270 scope = IPV6_ADDR_COMPATv4;
3271 plen = 96;
3272 pflags |= RTF_NONEXTHOP;
3273 } else {
3274 if (idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_NONE)
3275 return;
3276
3277 addr.s6_addr32[0] = htonl(0xfe800000);
3278 scope = IFA_LINK;
3279 plen = 64;
3280 }
3281
3282 if (addr.s6_addr32[3]) {
3283 add_addr(idev, &addr, plen, scope, IFAPROT_UNSPEC);
3284 addrconf_prefix_route(&addr, plen, 0, idev->dev, 0, pflags,
3285 GFP_KERNEL);
3286 return;
3287 }
3288
3289 for_each_netdev(net, dev) {
3290 struct in_device *in_dev = __in_dev_get_rtnl(dev);
3291 if (in_dev && (dev->flags & IFF_UP)) {
3292 struct in_ifaddr *ifa;
3293 int flag = scope;
3294
3295 in_dev_for_each_ifa_rtnl(ifa, in_dev) {
3296 addr.s6_addr32[3] = ifa->ifa_local;
3297
3298 if (ifa->ifa_scope == RT_SCOPE_LINK)
3299 continue;
3300 if (ifa->ifa_scope >= RT_SCOPE_HOST) {
3301 if (idev->dev->flags&IFF_POINTOPOINT)
3302 continue;
3303 flag |= IFA_HOST;
3304 }
3305
3306 add_addr(idev, &addr, plen, flag,
3307 IFAPROT_UNSPEC);
3308 addrconf_prefix_route(&addr, plen, 0, idev->dev,
3309 0, pflags, GFP_KERNEL);
3310 }
3311 }
3312 }
3313 }
3314 #endif
3315
init_loopback(struct net_device * dev)3316 static void init_loopback(struct net_device *dev)
3317 {
3318 struct inet6_dev *idev;
3319
3320 /* ::1 */
3321
3322 ASSERT_RTNL();
3323
3324 idev = ipv6_find_idev(dev);
3325 if (IS_ERR(idev)) {
3326 pr_debug("%s: add_dev failed\n", __func__);
3327 return;
3328 }
3329
3330 add_addr(idev, &in6addr_loopback, 128, IFA_HOST, IFAPROT_KERNEL_LO);
3331 }
3332
addrconf_add_linklocal(struct inet6_dev * idev,const struct in6_addr * addr,u32 flags)3333 void addrconf_add_linklocal(struct inet6_dev *idev,
3334 const struct in6_addr *addr, u32 flags)
3335 {
3336 struct ifa6_config cfg = {
3337 .pfx = addr,
3338 .plen = 64,
3339 .ifa_flags = flags | IFA_F_PERMANENT,
3340 .valid_lft = INFINITY_LIFE_TIME,
3341 .preferred_lft = INFINITY_LIFE_TIME,
3342 .scope = IFA_LINK,
3343 .ifa_proto = IFAPROT_KERNEL_LL
3344 };
3345 struct inet6_ifaddr *ifp;
3346
3347 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
3348 if ((READ_ONCE(dev_net(idev->dev)->ipv6.devconf_all->optimistic_dad) ||
3349 READ_ONCE(idev->cnf.optimistic_dad)) &&
3350 !dev_net(idev->dev)->ipv6.devconf_all->forwarding)
3351 cfg.ifa_flags |= IFA_F_OPTIMISTIC;
3352 #endif
3353
3354 ifp = ipv6_add_addr(idev, &cfg, true, NULL);
3355 if (!IS_ERR(ifp)) {
3356 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, 0, idev->dev,
3357 0, 0, GFP_ATOMIC);
3358 addrconf_dad_start(ifp);
3359 in6_ifa_put(ifp);
3360 }
3361 }
3362 EXPORT_SYMBOL_GPL(addrconf_add_linklocal);
3363
ipv6_reserved_interfaceid(struct in6_addr address)3364 static bool ipv6_reserved_interfaceid(struct in6_addr address)
3365 {
3366 if ((address.s6_addr32[2] | address.s6_addr32[3]) == 0)
3367 return true;
3368
3369 if (address.s6_addr32[2] == htonl(0x02005eff) &&
3370 ((address.s6_addr32[3] & htonl(0xfe000000)) == htonl(0xfe000000)))
3371 return true;
3372
3373 if (address.s6_addr32[2] == htonl(0xfdffffff) &&
3374 ((address.s6_addr32[3] & htonl(0xffffff80)) == htonl(0xffffff80)))
3375 return true;
3376
3377 return false;
3378 }
3379
ipv6_generate_stable_address(struct in6_addr * address,u8 dad_count,const struct inet6_dev * idev)3380 static int ipv6_generate_stable_address(struct in6_addr *address,
3381 u8 dad_count,
3382 const struct inet6_dev *idev)
3383 {
3384 static DEFINE_SPINLOCK(lock);
3385 static __u32 digest[SHA1_DIGEST_WORDS];
3386 static __u32 workspace[SHA1_WORKSPACE_WORDS];
3387
3388 static union {
3389 char __data[SHA1_BLOCK_SIZE];
3390 struct {
3391 struct in6_addr secret;
3392 __be32 prefix[2];
3393 unsigned char hwaddr[MAX_ADDR_LEN];
3394 u8 dad_count;
3395 } __packed;
3396 } data;
3397
3398 struct in6_addr secret;
3399 struct in6_addr temp;
3400 struct net *net = dev_net(idev->dev);
3401
3402 BUILD_BUG_ON(sizeof(data.__data) != sizeof(data));
3403
3404 if (idev->cnf.stable_secret.initialized)
3405 secret = idev->cnf.stable_secret.secret;
3406 else if (net->ipv6.devconf_dflt->stable_secret.initialized)
3407 secret = net->ipv6.devconf_dflt->stable_secret.secret;
3408 else
3409 return -1;
3410
3411 retry:
3412 spin_lock_bh(&lock);
3413
3414 sha1_init(digest);
3415 memset(&data, 0, sizeof(data));
3416 memset(workspace, 0, sizeof(workspace));
3417 memcpy(data.hwaddr, idev->dev->perm_addr, idev->dev->addr_len);
3418 data.prefix[0] = address->s6_addr32[0];
3419 data.prefix[1] = address->s6_addr32[1];
3420 data.secret = secret;
3421 data.dad_count = dad_count;
3422
3423 sha1_transform(digest, data.__data, workspace);
3424
3425 temp = *address;
3426 temp.s6_addr32[2] = (__force __be32)digest[0];
3427 temp.s6_addr32[3] = (__force __be32)digest[1];
3428
3429 spin_unlock_bh(&lock);
3430
3431 if (ipv6_reserved_interfaceid(temp)) {
3432 dad_count++;
3433 if (dad_count > dev_net(idev->dev)->ipv6.sysctl.idgen_retries)
3434 return -1;
3435 goto retry;
3436 }
3437
3438 *address = temp;
3439 return 0;
3440 }
3441
ipv6_gen_mode_random_init(struct inet6_dev * idev)3442 static void ipv6_gen_mode_random_init(struct inet6_dev *idev)
3443 {
3444 struct ipv6_stable_secret *s = &idev->cnf.stable_secret;
3445
3446 if (s->initialized)
3447 return;
3448 s = &idev->cnf.stable_secret;
3449 get_random_bytes(&s->secret, sizeof(s->secret));
3450 s->initialized = true;
3451 }
3452
addrconf_addr_gen(struct inet6_dev * idev,bool prefix_route)3453 static void addrconf_addr_gen(struct inet6_dev *idev, bool prefix_route)
3454 {
3455 struct in6_addr addr;
3456
3457 /* no link local addresses on L3 master devices */
3458 if (netif_is_l3_master(idev->dev))
3459 return;
3460
3461 /* no link local addresses on devices flagged as slaves */
3462 if (idev->dev->priv_flags & IFF_NO_ADDRCONF)
3463 return;
3464
3465 ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0);
3466
3467 switch (idev->cnf.addr_gen_mode) {
3468 case IN6_ADDR_GEN_MODE_RANDOM:
3469 ipv6_gen_mode_random_init(idev);
3470 fallthrough;
3471 case IN6_ADDR_GEN_MODE_STABLE_PRIVACY:
3472 if (!ipv6_generate_stable_address(&addr, 0, idev))
3473 addrconf_add_linklocal(idev, &addr,
3474 IFA_F_STABLE_PRIVACY);
3475 else if (prefix_route)
3476 addrconf_prefix_route(&addr, 64, 0, idev->dev,
3477 0, 0, GFP_KERNEL);
3478 break;
3479 case IN6_ADDR_GEN_MODE_EUI64:
3480 /* addrconf_add_linklocal also adds a prefix_route and we
3481 * only need to care about prefix routes if ipv6_generate_eui64
3482 * couldn't generate one.
3483 */
3484 if (ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) == 0)
3485 addrconf_add_linklocal(idev, &addr, 0);
3486 else if (prefix_route)
3487 addrconf_prefix_route(&addr, 64, 0, idev->dev,
3488 0, 0, GFP_KERNEL);
3489 break;
3490 case IN6_ADDR_GEN_MODE_NONE:
3491 default:
3492 /* will not add any link local address */
3493 break;
3494 }
3495 }
3496
addrconf_dev_config(struct net_device * dev)3497 static void addrconf_dev_config(struct net_device *dev)
3498 {
3499 struct inet6_dev *idev;
3500
3501 ASSERT_RTNL();
3502
3503 if ((dev->type != ARPHRD_ETHER) &&
3504 (dev->type != ARPHRD_FDDI) &&
3505 (dev->type != ARPHRD_ARCNET) &&
3506 (dev->type != ARPHRD_INFINIBAND) &&
3507 (dev->type != ARPHRD_IEEE1394) &&
3508 (dev->type != ARPHRD_TUNNEL6) &&
3509 (dev->type != ARPHRD_6LOWPAN) &&
3510 (dev->type != ARPHRD_TUNNEL) &&
3511 (dev->type != ARPHRD_NONE) &&
3512 (dev->type != ARPHRD_RAWIP)) {
3513 /* Alas, we support only Ethernet autoconfiguration. */
3514 idev = __in6_dev_get(dev);
3515 if (!IS_ERR_OR_NULL(idev) && dev->flags & IFF_UP &&
3516 dev->flags & IFF_MULTICAST)
3517 ipv6_mc_up(idev);
3518 return;
3519 }
3520
3521 idev = addrconf_add_dev(dev);
3522 if (IS_ERR(idev))
3523 return;
3524
3525 /* this device type has no EUI support */
3526 if (dev->type == ARPHRD_NONE &&
3527 idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64)
3528 WRITE_ONCE(idev->cnf.addr_gen_mode,
3529 IN6_ADDR_GEN_MODE_RANDOM);
3530
3531 addrconf_addr_gen(idev, false);
3532 }
3533
3534 #if IS_ENABLED(CONFIG_IPV6_SIT)
addrconf_sit_config(struct net_device * dev)3535 static void addrconf_sit_config(struct net_device *dev)
3536 {
3537 struct inet6_dev *idev;
3538
3539 ASSERT_RTNL();
3540
3541 /*
3542 * Configure the tunnel with one of our IPv4
3543 * addresses... we should configure all of
3544 * our v4 addrs in the tunnel
3545 */
3546
3547 idev = ipv6_find_idev(dev);
3548 if (IS_ERR(idev)) {
3549 pr_debug("%s: add_dev failed\n", __func__);
3550 return;
3551 }
3552
3553 if (dev->priv_flags & IFF_ISATAP) {
3554 addrconf_addr_gen(idev, false);
3555 return;
3556 }
3557
3558 add_v4_addrs(idev);
3559
3560 if (dev->flags&IFF_POINTOPOINT)
3561 addrconf_add_mroute(dev);
3562 }
3563 #endif
3564
3565 #if IS_ENABLED(CONFIG_NET_IPGRE) || IS_ENABLED(CONFIG_IPV6_GRE)
addrconf_gre_config(struct net_device * dev)3566 static void addrconf_gre_config(struct net_device *dev)
3567 {
3568 struct inet6_dev *idev;
3569
3570 ASSERT_RTNL();
3571
3572 idev = addrconf_add_dev(dev);
3573 if (IS_ERR(idev))
3574 return;
3575
3576 /* Generate the IPv6 link-local address using addrconf_addr_gen(),
3577 * unless we have an IPv4 GRE device not bound to an IP address and
3578 * which is in EUI64 mode (as __ipv6_isatap_ifid() would fail in this
3579 * case). Such devices fall back to add_v4_addrs() instead.
3580 */
3581 if (!(dev->type == ARPHRD_IPGRE && *(__be32 *)dev->dev_addr == 0 &&
3582 idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64)) {
3583 addrconf_addr_gen(idev, true);
3584 return;
3585 }
3586
3587 add_v4_addrs(idev);
3588 }
3589 #endif
3590
addrconf_init_auto_addrs(struct net_device * dev)3591 static void addrconf_init_auto_addrs(struct net_device *dev)
3592 {
3593 switch (dev->type) {
3594 #if IS_ENABLED(CONFIG_IPV6_SIT)
3595 case ARPHRD_SIT:
3596 addrconf_sit_config(dev);
3597 break;
3598 #endif
3599 #if IS_ENABLED(CONFIG_NET_IPGRE) || IS_ENABLED(CONFIG_IPV6_GRE)
3600 case ARPHRD_IP6GRE:
3601 case ARPHRD_IPGRE:
3602 addrconf_gre_config(dev);
3603 break;
3604 #endif
3605 case ARPHRD_LOOPBACK:
3606 init_loopback(dev);
3607 break;
3608
3609 default:
3610 addrconf_dev_config(dev);
3611 break;
3612 }
3613 }
3614
fixup_permanent_addr(struct net * net,struct inet6_dev * idev,struct inet6_ifaddr * ifp)3615 static int fixup_permanent_addr(struct net *net,
3616 struct inet6_dev *idev,
3617 struct inet6_ifaddr *ifp)
3618 {
3619 /* !fib6_node means the host route was removed from the
3620 * FIB, for example, if 'lo' device is taken down. In that
3621 * case regenerate the host route.
3622 */
3623 if (!ifp->rt || !ifp->rt->fib6_node) {
3624 struct fib6_info *f6i, *prev;
3625
3626 f6i = addrconf_f6i_alloc(net, idev, &ifp->addr, false,
3627 GFP_ATOMIC, NULL);
3628 if (IS_ERR(f6i))
3629 return PTR_ERR(f6i);
3630
3631 /* ifp->rt can be accessed outside of rtnl */
3632 spin_lock(&ifp->lock);
3633 prev = ifp->rt;
3634 ifp->rt = f6i;
3635 spin_unlock(&ifp->lock);
3636
3637 fib6_info_release(prev);
3638 }
3639
3640 if (!(ifp->flags & IFA_F_NOPREFIXROUTE)) {
3641 addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
3642 ifp->rt_priority, idev->dev, 0, 0,
3643 GFP_ATOMIC);
3644 }
3645
3646 if (ifp->state == INET6_IFADDR_STATE_PREDAD)
3647 addrconf_dad_start(ifp);
3648
3649 return 0;
3650 }
3651
addrconf_permanent_addr(struct net * net,struct net_device * dev)3652 static void addrconf_permanent_addr(struct net *net, struct net_device *dev)
3653 {
3654 struct inet6_ifaddr *ifp, *tmp;
3655 struct inet6_dev *idev;
3656
3657 idev = __in6_dev_get(dev);
3658 if (!idev)
3659 return;
3660
3661 write_lock_bh(&idev->lock);
3662
3663 list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) {
3664 if ((ifp->flags & IFA_F_PERMANENT) &&
3665 fixup_permanent_addr(net, idev, ifp) < 0) {
3666 write_unlock_bh(&idev->lock);
3667 in6_ifa_hold(ifp);
3668 ipv6_del_addr(ifp);
3669 write_lock_bh(&idev->lock);
3670
3671 net_info_ratelimited("%s: Failed to add prefix route for address %pI6c; dropping\n",
3672 idev->dev->name, &ifp->addr);
3673 }
3674 }
3675
3676 write_unlock_bh(&idev->lock);
3677 }
3678
addrconf_notify(struct notifier_block * this,unsigned long event,void * ptr)3679 static int addrconf_notify(struct notifier_block *this, unsigned long event,
3680 void *ptr)
3681 {
3682 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3683 struct netdev_notifier_change_info *change_info;
3684 struct netdev_notifier_changeupper_info *info;
3685 struct inet6_dev *idev = __in6_dev_get(dev);
3686 struct net *net = dev_net(dev);
3687 int run_pending = 0;
3688 int err;
3689
3690 switch (event) {
3691 case NETDEV_REGISTER:
3692 if (!idev && dev->mtu >= IPV6_MIN_MTU) {
3693 idev = ipv6_add_dev(dev);
3694 if (IS_ERR(idev))
3695 return notifier_from_errno(PTR_ERR(idev));
3696 }
3697 break;
3698
3699 case NETDEV_CHANGEMTU:
3700 /* if MTU under IPV6_MIN_MTU stop IPv6 on this interface. */
3701 if (dev->mtu < IPV6_MIN_MTU) {
3702 addrconf_ifdown(dev, dev != net->loopback_dev);
3703 break;
3704 }
3705
3706 if (idev) {
3707 rt6_mtu_change(dev, dev->mtu);
3708 WRITE_ONCE(idev->cnf.mtu6, dev->mtu);
3709 break;
3710 }
3711
3712 /* allocate new idev */
3713 idev = ipv6_add_dev(dev);
3714 if (IS_ERR(idev))
3715 break;
3716
3717 /* device is still not ready */
3718 if (!(idev->if_flags & IF_READY))
3719 break;
3720
3721 run_pending = 1;
3722 fallthrough;
3723 case NETDEV_UP:
3724 case NETDEV_CHANGE:
3725 if (idev && idev->cnf.disable_ipv6)
3726 break;
3727
3728 if (dev->priv_flags & IFF_NO_ADDRCONF) {
3729 if (event == NETDEV_UP && !IS_ERR_OR_NULL(idev) &&
3730 dev->flags & IFF_UP && dev->flags & IFF_MULTICAST)
3731 ipv6_mc_up(idev);
3732 break;
3733 }
3734
3735 if (event == NETDEV_UP) {
3736 /* restore routes for permanent addresses */
3737 addrconf_permanent_addr(net, dev);
3738
3739 if (!addrconf_link_ready(dev)) {
3740 /* device is not ready yet. */
3741 pr_debug("ADDRCONF(NETDEV_UP): %s: link is not ready\n",
3742 dev->name);
3743 break;
3744 }
3745
3746 if (!idev && dev->mtu >= IPV6_MIN_MTU)
3747 idev = ipv6_add_dev(dev);
3748
3749 if (!IS_ERR_OR_NULL(idev)) {
3750 idev->if_flags |= IF_READY;
3751 run_pending = 1;
3752 }
3753 } else if (event == NETDEV_CHANGE) {
3754 if (!addrconf_link_ready(dev)) {
3755 /* device is still not ready. */
3756 rt6_sync_down_dev(dev, event);
3757 break;
3758 }
3759
3760 if (!IS_ERR_OR_NULL(idev)) {
3761 if (idev->if_flags & IF_READY) {
3762 /* device is already configured -
3763 * but resend MLD reports, we might
3764 * have roamed and need to update
3765 * multicast snooping switches
3766 */
3767 ipv6_mc_up(idev);
3768 change_info = ptr;
3769 if (change_info->flags_changed & IFF_NOARP)
3770 addrconf_dad_run(idev, true);
3771 rt6_sync_up(dev, RTNH_F_LINKDOWN);
3772 break;
3773 }
3774 idev->if_flags |= IF_READY;
3775 }
3776
3777 pr_debug("ADDRCONF(NETDEV_CHANGE): %s: link becomes ready\n",
3778 dev->name);
3779
3780 run_pending = 1;
3781 }
3782
3783 addrconf_init_auto_addrs(dev);
3784
3785 if (!IS_ERR_OR_NULL(idev)) {
3786 if (run_pending)
3787 addrconf_dad_run(idev, false);
3788
3789 /* Device has an address by now */
3790 rt6_sync_up(dev, RTNH_F_DEAD);
3791
3792 /*
3793 * If the MTU changed during the interface down,
3794 * when the interface up, the changed MTU must be
3795 * reflected in the idev as well as routers.
3796 */
3797 if (idev->cnf.mtu6 != dev->mtu &&
3798 dev->mtu >= IPV6_MIN_MTU) {
3799 rt6_mtu_change(dev, dev->mtu);
3800 WRITE_ONCE(idev->cnf.mtu6, dev->mtu);
3801 }
3802 WRITE_ONCE(idev->tstamp, jiffies);
3803 inet6_ifinfo_notify(RTM_NEWLINK, idev);
3804
3805 /*
3806 * If the changed mtu during down is lower than
3807 * IPV6_MIN_MTU stop IPv6 on this interface.
3808 */
3809 if (dev->mtu < IPV6_MIN_MTU)
3810 addrconf_ifdown(dev, dev != net->loopback_dev);
3811 }
3812 break;
3813
3814 case NETDEV_DOWN:
3815 case NETDEV_UNREGISTER:
3816 /*
3817 * Remove all addresses from this interface.
3818 */
3819 addrconf_ifdown(dev, event != NETDEV_DOWN);
3820 break;
3821
3822 case NETDEV_CHANGENAME:
3823 if (idev) {
3824 snmp6_unregister_dev(idev);
3825 addrconf_sysctl_unregister(idev);
3826 err = addrconf_sysctl_register(idev);
3827 if (err)
3828 return notifier_from_errno(err);
3829 err = snmp6_register_dev(idev);
3830 if (err) {
3831 addrconf_sysctl_unregister(idev);
3832 return notifier_from_errno(err);
3833 }
3834 }
3835 break;
3836
3837 case NETDEV_PRE_TYPE_CHANGE:
3838 case NETDEV_POST_TYPE_CHANGE:
3839 if (idev)
3840 addrconf_type_change(dev, event);
3841 break;
3842
3843 case NETDEV_CHANGEUPPER:
3844 info = ptr;
3845
3846 /* flush all routes if dev is linked to or unlinked from
3847 * an L3 master device (e.g., VRF)
3848 */
3849 if (info->upper_dev && netif_is_l3_master(info->upper_dev))
3850 addrconf_ifdown(dev, false);
3851 }
3852
3853 return NOTIFY_OK;
3854 }
3855
3856 /*
3857 * addrconf module should be notified of a device going up
3858 */
3859 static struct notifier_block ipv6_dev_notf = {
3860 .notifier_call = addrconf_notify,
3861 .priority = ADDRCONF_NOTIFY_PRIORITY,
3862 };
3863
addrconf_type_change(struct net_device * dev,unsigned long event)3864 static void addrconf_type_change(struct net_device *dev, unsigned long event)
3865 {
3866 struct inet6_dev *idev;
3867 ASSERT_RTNL();
3868
3869 idev = __in6_dev_get(dev);
3870
3871 if (event == NETDEV_POST_TYPE_CHANGE)
3872 ipv6_mc_remap(idev);
3873 else if (event == NETDEV_PRE_TYPE_CHANGE)
3874 ipv6_mc_unmap(idev);
3875 }
3876
addr_is_local(const struct in6_addr * addr)3877 static bool addr_is_local(const struct in6_addr *addr)
3878 {
3879 return ipv6_addr_type(addr) &
3880 (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
3881 }
3882
addrconf_ifdown(struct net_device * dev,bool unregister)3883 static int addrconf_ifdown(struct net_device *dev, bool unregister)
3884 {
3885 unsigned long event = unregister ? NETDEV_UNREGISTER : NETDEV_DOWN;
3886 struct net *net = dev_net(dev);
3887 struct inet6_dev *idev;
3888 struct inet6_ifaddr *ifa;
3889 LIST_HEAD(tmp_addr_list);
3890 bool keep_addr = false;
3891 bool was_ready;
3892 int state, i;
3893
3894 ASSERT_RTNL();
3895
3896 rt6_disable_ip(dev, event);
3897
3898 idev = __in6_dev_get(dev);
3899 if (!idev)
3900 return -ENODEV;
3901
3902 /*
3903 * Step 1: remove reference to ipv6 device from parent device.
3904 * Do not dev_put!
3905 */
3906 if (unregister) {
3907 WRITE_ONCE(idev->dead, 1);
3908
3909 /* protected by rtnl_lock */
3910 RCU_INIT_POINTER(dev->ip6_ptr, NULL);
3911
3912 /* Step 1.5: remove snmp6 entry */
3913 snmp6_unregister_dev(idev);
3914
3915 }
3916
3917 /* combine the user config with event to determine if permanent
3918 * addresses are to be removed from address hash table
3919 */
3920 if (!unregister && !idev->cnf.disable_ipv6) {
3921 /* aggregate the system setting and interface setting */
3922 int _keep_addr = READ_ONCE(net->ipv6.devconf_all->keep_addr_on_down);
3923
3924 if (!_keep_addr)
3925 _keep_addr = READ_ONCE(idev->cnf.keep_addr_on_down);
3926
3927 keep_addr = (_keep_addr > 0);
3928 }
3929
3930 /* Step 2: clear hash table */
3931 for (i = 0; i < IN6_ADDR_HSIZE; i++) {
3932 struct hlist_head *h = &net->ipv6.inet6_addr_lst[i];
3933
3934 spin_lock_bh(&net->ipv6.addrconf_hash_lock);
3935 restart:
3936 hlist_for_each_entry_rcu(ifa, h, addr_lst) {
3937 if (ifa->idev == idev) {
3938 addrconf_del_dad_work(ifa);
3939 /* combined flag + permanent flag decide if
3940 * address is retained on a down event
3941 */
3942 if (!keep_addr ||
3943 !(ifa->flags & IFA_F_PERMANENT) ||
3944 addr_is_local(&ifa->addr)) {
3945 hlist_del_init_rcu(&ifa->addr_lst);
3946 goto restart;
3947 }
3948 }
3949 }
3950 spin_unlock_bh(&net->ipv6.addrconf_hash_lock);
3951 }
3952
3953 write_lock_bh(&idev->lock);
3954
3955 addrconf_del_rs_timer(idev);
3956
3957 /* Step 2: clear flags for stateless addrconf, repeated down
3958 * detection
3959 */
3960 was_ready = idev->if_flags & IF_READY;
3961 if (!unregister)
3962 idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY);
3963
3964 /* Step 3: clear tempaddr list */
3965 while (!list_empty(&idev->tempaddr_list)) {
3966 ifa = list_first_entry(&idev->tempaddr_list,
3967 struct inet6_ifaddr, tmp_list);
3968 list_del(&ifa->tmp_list);
3969 write_unlock_bh(&idev->lock);
3970 spin_lock_bh(&ifa->lock);
3971
3972 if (ifa->ifpub) {
3973 in6_ifa_put(ifa->ifpub);
3974 ifa->ifpub = NULL;
3975 }
3976 spin_unlock_bh(&ifa->lock);
3977 in6_ifa_put(ifa);
3978 write_lock_bh(&idev->lock);
3979 }
3980
3981 list_for_each_entry(ifa, &idev->addr_list, if_list)
3982 list_add_tail(&ifa->if_list_aux, &tmp_addr_list);
3983 write_unlock_bh(&idev->lock);
3984
3985 while (!list_empty(&tmp_addr_list)) {
3986 struct fib6_info *rt = NULL;
3987 bool keep;
3988
3989 ifa = list_first_entry(&tmp_addr_list,
3990 struct inet6_ifaddr, if_list_aux);
3991 list_del(&ifa->if_list_aux);
3992
3993 addrconf_del_dad_work(ifa);
3994
3995 keep = keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
3996 !addr_is_local(&ifa->addr);
3997
3998 spin_lock_bh(&ifa->lock);
3999
4000 if (keep) {
4001 /* set state to skip the notifier below */
4002 state = INET6_IFADDR_STATE_DEAD;
4003 ifa->state = INET6_IFADDR_STATE_PREDAD;
4004 if (!(ifa->flags & IFA_F_NODAD))
4005 ifa->flags |= IFA_F_TENTATIVE;
4006
4007 rt = ifa->rt;
4008 ifa->rt = NULL;
4009 } else {
4010 state = ifa->state;
4011 ifa->state = INET6_IFADDR_STATE_DEAD;
4012 }
4013
4014 spin_unlock_bh(&ifa->lock);
4015
4016 if (rt)
4017 ip6_del_rt(net, rt, false);
4018
4019 if (state != INET6_IFADDR_STATE_DEAD) {
4020 __ipv6_ifa_notify(RTM_DELADDR, ifa);
4021 inet6addr_notifier_call_chain(NETDEV_DOWN, ifa);
4022 } else {
4023 if (idev->cnf.forwarding)
4024 addrconf_leave_anycast(ifa);
4025 addrconf_leave_solict(ifa->idev, &ifa->addr);
4026 }
4027
4028 if (!keep) {
4029 write_lock_bh(&idev->lock);
4030 list_del_rcu(&ifa->if_list);
4031 write_unlock_bh(&idev->lock);
4032 in6_ifa_put(ifa);
4033 }
4034 }
4035
4036 /* Step 5: Discard anycast and multicast list */
4037 if (unregister) {
4038 ipv6_ac_destroy_dev(idev);
4039 ipv6_mc_destroy_dev(idev);
4040 } else if (was_ready) {
4041 ipv6_mc_down(idev);
4042 }
4043
4044 WRITE_ONCE(idev->tstamp, jiffies);
4045 idev->ra_mtu = 0;
4046
4047 /* Last: Shot the device (if unregistered) */
4048 if (unregister) {
4049 addrconf_sysctl_unregister(idev);
4050 neigh_parms_release(&nd_tbl, idev->nd_parms);
4051 neigh_ifdown(&nd_tbl, dev);
4052 in6_dev_put(idev);
4053 }
4054 return 0;
4055 }
4056
addrconf_rs_timer(struct timer_list * t)4057 static void addrconf_rs_timer(struct timer_list *t)
4058 {
4059 struct inet6_dev *idev = from_timer(idev, t, rs_timer);
4060 struct net_device *dev = idev->dev;
4061 struct in6_addr lladdr;
4062 int rtr_solicits;
4063
4064 write_lock(&idev->lock);
4065 if (idev->dead || !(idev->if_flags & IF_READY))
4066 goto out;
4067
4068 if (!ipv6_accept_ra(idev))
4069 goto out;
4070
4071 /* Announcement received after solicitation was sent */
4072 if (idev->if_flags & IF_RA_RCVD)
4073 goto out;
4074
4075 rtr_solicits = READ_ONCE(idev->cnf.rtr_solicits);
4076
4077 if (idev->rs_probes++ < rtr_solicits || rtr_solicits < 0) {
4078 write_unlock(&idev->lock);
4079 if (!ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
4080 ndisc_send_rs(dev, &lladdr,
4081 &in6addr_linklocal_allrouters);
4082 else
4083 goto put;
4084
4085 write_lock(&idev->lock);
4086 idev->rs_interval = rfc3315_s14_backoff_update(
4087 idev->rs_interval,
4088 READ_ONCE(idev->cnf.rtr_solicit_max_interval));
4089 /* The wait after the last probe can be shorter */
4090 addrconf_mod_rs_timer(idev, (idev->rs_probes ==
4091 READ_ONCE(idev->cnf.rtr_solicits)) ?
4092 READ_ONCE(idev->cnf.rtr_solicit_delay) :
4093 idev->rs_interval);
4094 } else {
4095 /*
4096 * Note: we do not support deprecated "all on-link"
4097 * assumption any longer.
4098 */
4099 pr_debug("%s: no IPv6 routers present\n", idev->dev->name);
4100 }
4101
4102 out:
4103 write_unlock(&idev->lock);
4104 put:
4105 in6_dev_put(idev);
4106 }
4107
4108 /*
4109 * Duplicate Address Detection
4110 */
addrconf_dad_kick(struct inet6_ifaddr * ifp)4111 static void addrconf_dad_kick(struct inet6_ifaddr *ifp)
4112 {
4113 struct inet6_dev *idev = ifp->idev;
4114 unsigned long rand_num;
4115 u64 nonce;
4116
4117 if (ifp->flags & IFA_F_OPTIMISTIC)
4118 rand_num = 0;
4119 else
4120 rand_num = get_random_u32_below(
4121 READ_ONCE(idev->cnf.rtr_solicit_delay) ? : 1);
4122
4123 nonce = 0;
4124 if (READ_ONCE(idev->cnf.enhanced_dad) ||
4125 READ_ONCE(dev_net(idev->dev)->ipv6.devconf_all->enhanced_dad)) {
4126 do
4127 get_random_bytes(&nonce, 6);
4128 while (nonce == 0);
4129 }
4130 ifp->dad_nonce = nonce;
4131 ifp->dad_probes = READ_ONCE(idev->cnf.dad_transmits);
4132 addrconf_mod_dad_work(ifp, rand_num);
4133 }
4134
addrconf_dad_begin(struct inet6_ifaddr * ifp)4135 static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
4136 {
4137 struct inet6_dev *idev = ifp->idev;
4138 struct net_device *dev = idev->dev;
4139 bool bump_id, notify = false;
4140 struct net *net;
4141
4142 addrconf_join_solict(dev, &ifp->addr);
4143
4144 read_lock_bh(&idev->lock);
4145 spin_lock(&ifp->lock);
4146 if (ifp->state == INET6_IFADDR_STATE_DEAD)
4147 goto out;
4148
4149 net = dev_net(dev);
4150 if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) ||
4151 (READ_ONCE(net->ipv6.devconf_all->accept_dad) < 1 &&
4152 READ_ONCE(idev->cnf.accept_dad) < 1) ||
4153 !(ifp->flags&IFA_F_TENTATIVE) ||
4154 ifp->flags & IFA_F_NODAD) {
4155 bool send_na = false;
4156
4157 if (ifp->flags & IFA_F_TENTATIVE &&
4158 !(ifp->flags & IFA_F_OPTIMISTIC))
4159 send_na = true;
4160 bump_id = ifp->flags & IFA_F_TENTATIVE;
4161 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
4162 spin_unlock(&ifp->lock);
4163 read_unlock_bh(&idev->lock);
4164
4165 addrconf_dad_completed(ifp, bump_id, send_na);
4166 return;
4167 }
4168
4169 if (!(idev->if_flags & IF_READY)) {
4170 spin_unlock(&ifp->lock);
4171 read_unlock_bh(&idev->lock);
4172 /*
4173 * If the device is not ready:
4174 * - keep it tentative if it is a permanent address.
4175 * - otherwise, kill it.
4176 */
4177 in6_ifa_hold(ifp);
4178 addrconf_dad_stop(ifp, 0);
4179 return;
4180 }
4181
4182 /*
4183 * Optimistic nodes can start receiving
4184 * Frames right away
4185 */
4186 if (ifp->flags & IFA_F_OPTIMISTIC) {
4187 ip6_ins_rt(net, ifp->rt);
4188 if (ipv6_use_optimistic_addr(net, idev)) {
4189 /* Because optimistic nodes can use this address,
4190 * notify listeners. If DAD fails, RTM_DELADDR is sent.
4191 */
4192 notify = true;
4193 }
4194 }
4195
4196 addrconf_dad_kick(ifp);
4197 out:
4198 spin_unlock(&ifp->lock);
4199 read_unlock_bh(&idev->lock);
4200 if (notify)
4201 ipv6_ifa_notify(RTM_NEWADDR, ifp);
4202 }
4203
addrconf_dad_start(struct inet6_ifaddr * ifp)4204 static void addrconf_dad_start(struct inet6_ifaddr *ifp)
4205 {
4206 bool begin_dad = false;
4207
4208 spin_lock_bh(&ifp->lock);
4209 if (ifp->state != INET6_IFADDR_STATE_DEAD) {
4210 ifp->state = INET6_IFADDR_STATE_PREDAD;
4211 begin_dad = true;
4212 }
4213 spin_unlock_bh(&ifp->lock);
4214
4215 if (begin_dad)
4216 addrconf_mod_dad_work(ifp, 0);
4217 }
4218
addrconf_dad_work(struct work_struct * w)4219 static void addrconf_dad_work(struct work_struct *w)
4220 {
4221 struct inet6_ifaddr *ifp = container_of(to_delayed_work(w),
4222 struct inet6_ifaddr,
4223 dad_work);
4224 struct inet6_dev *idev = ifp->idev;
4225 bool bump_id, disable_ipv6 = false;
4226 struct in6_addr mcaddr;
4227
4228 enum {
4229 DAD_PROCESS,
4230 DAD_BEGIN,
4231 DAD_ABORT,
4232 } action = DAD_PROCESS;
4233
4234 rtnl_lock();
4235
4236 spin_lock_bh(&ifp->lock);
4237 if (ifp->state == INET6_IFADDR_STATE_PREDAD) {
4238 action = DAD_BEGIN;
4239 ifp->state = INET6_IFADDR_STATE_DAD;
4240 } else if (ifp->state == INET6_IFADDR_STATE_ERRDAD) {
4241 action = DAD_ABORT;
4242 ifp->state = INET6_IFADDR_STATE_POSTDAD;
4243
4244 if ((READ_ONCE(dev_net(idev->dev)->ipv6.devconf_all->accept_dad) > 1 ||
4245 READ_ONCE(idev->cnf.accept_dad) > 1) &&
4246 !idev->cnf.disable_ipv6 &&
4247 !(ifp->flags & IFA_F_STABLE_PRIVACY)) {
4248 struct in6_addr addr;
4249
4250 addr.s6_addr32[0] = htonl(0xfe800000);
4251 addr.s6_addr32[1] = 0;
4252
4253 if (!ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) &&
4254 ipv6_addr_equal(&ifp->addr, &addr)) {
4255 /* DAD failed for link-local based on MAC */
4256 WRITE_ONCE(idev->cnf.disable_ipv6, 1);
4257
4258 pr_info("%s: IPv6 being disabled!\n",
4259 ifp->idev->dev->name);
4260 disable_ipv6 = true;
4261 }
4262 }
4263 }
4264 spin_unlock_bh(&ifp->lock);
4265
4266 if (action == DAD_BEGIN) {
4267 addrconf_dad_begin(ifp);
4268 goto out;
4269 } else if (action == DAD_ABORT) {
4270 in6_ifa_hold(ifp);
4271 addrconf_dad_stop(ifp, 1);
4272 if (disable_ipv6)
4273 addrconf_ifdown(idev->dev, false);
4274 goto out;
4275 }
4276
4277 if (!ifp->dad_probes && addrconf_dad_end(ifp))
4278 goto out;
4279
4280 write_lock_bh(&idev->lock);
4281 if (idev->dead || !(idev->if_flags & IF_READY)) {
4282 write_unlock_bh(&idev->lock);
4283 goto out;
4284 }
4285
4286 spin_lock(&ifp->lock);
4287 if (ifp->state == INET6_IFADDR_STATE_DEAD) {
4288 spin_unlock(&ifp->lock);
4289 write_unlock_bh(&idev->lock);
4290 goto out;
4291 }
4292
4293 if (ifp->dad_probes == 0) {
4294 bool send_na = false;
4295
4296 /*
4297 * DAD was successful
4298 */
4299
4300 if (ifp->flags & IFA_F_TENTATIVE &&
4301 !(ifp->flags & IFA_F_OPTIMISTIC))
4302 send_na = true;
4303 bump_id = ifp->flags & IFA_F_TENTATIVE;
4304 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
4305 spin_unlock(&ifp->lock);
4306 write_unlock_bh(&idev->lock);
4307
4308 addrconf_dad_completed(ifp, bump_id, send_na);
4309
4310 goto out;
4311 }
4312
4313 ifp->dad_probes--;
4314 addrconf_mod_dad_work(ifp,
4315 max(NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME),
4316 HZ/100));
4317 spin_unlock(&ifp->lock);
4318 write_unlock_bh(&idev->lock);
4319
4320 /* send a neighbour solicitation for our addr */
4321 addrconf_addr_solict_mult(&ifp->addr, &mcaddr);
4322 ndisc_send_ns(ifp->idev->dev, &ifp->addr, &mcaddr, &in6addr_any,
4323 ifp->dad_nonce);
4324 out:
4325 in6_ifa_put(ifp);
4326 rtnl_unlock();
4327 }
4328
4329 /* ifp->idev must be at least read locked */
ipv6_lonely_lladdr(struct inet6_ifaddr * ifp)4330 static bool ipv6_lonely_lladdr(struct inet6_ifaddr *ifp)
4331 {
4332 struct inet6_ifaddr *ifpiter;
4333 struct inet6_dev *idev = ifp->idev;
4334
4335 list_for_each_entry_reverse(ifpiter, &idev->addr_list, if_list) {
4336 if (ifpiter->scope > IFA_LINK)
4337 break;
4338 if (ifp != ifpiter && ifpiter->scope == IFA_LINK &&
4339 (ifpiter->flags & (IFA_F_PERMANENT|IFA_F_TENTATIVE|
4340 IFA_F_OPTIMISTIC|IFA_F_DADFAILED)) ==
4341 IFA_F_PERMANENT)
4342 return false;
4343 }
4344 return true;
4345 }
4346
addrconf_dad_completed(struct inet6_ifaddr * ifp,bool bump_id,bool send_na)4347 static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
4348 bool send_na)
4349 {
4350 struct net_device *dev = ifp->idev->dev;
4351 struct in6_addr lladdr;
4352 bool send_rs, send_mld;
4353
4354 addrconf_del_dad_work(ifp);
4355
4356 /*
4357 * Configure the address for reception. Now it is valid.
4358 */
4359
4360 ipv6_ifa_notify(RTM_NEWADDR, ifp);
4361
4362 /* If added prefix is link local and we are prepared to process
4363 router advertisements, start sending router solicitations.
4364 */
4365
4366 read_lock_bh(&ifp->idev->lock);
4367 send_mld = ifp->scope == IFA_LINK && ipv6_lonely_lladdr(ifp);
4368 send_rs = send_mld &&
4369 ipv6_accept_ra(ifp->idev) &&
4370 READ_ONCE(ifp->idev->cnf.rtr_solicits) != 0 &&
4371 (dev->flags & IFF_LOOPBACK) == 0 &&
4372 (dev->type != ARPHRD_TUNNEL) &&
4373 !netif_is_team_port(dev);
4374 read_unlock_bh(&ifp->idev->lock);
4375
4376 /* While dad is in progress mld report's source address is in6_addrany.
4377 * Resend with proper ll now.
4378 */
4379 if (send_mld)
4380 ipv6_mc_dad_complete(ifp->idev);
4381
4382 /* send unsolicited NA if enabled */
4383 if (send_na &&
4384 (READ_ONCE(ifp->idev->cnf.ndisc_notify) ||
4385 READ_ONCE(dev_net(dev)->ipv6.devconf_all->ndisc_notify))) {
4386 ndisc_send_na(dev, &in6addr_linklocal_allnodes, &ifp->addr,
4387 /*router=*/ !!ifp->idev->cnf.forwarding,
4388 /*solicited=*/ false, /*override=*/ true,
4389 /*inc_opt=*/ true);
4390 }
4391
4392 if (send_rs) {
4393 /*
4394 * If a host as already performed a random delay
4395 * [...] as part of DAD [...] there is no need
4396 * to delay again before sending the first RS
4397 */
4398 if (ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
4399 return;
4400 ndisc_send_rs(dev, &lladdr, &in6addr_linklocal_allrouters);
4401
4402 write_lock_bh(&ifp->idev->lock);
4403 spin_lock(&ifp->lock);
4404 ifp->idev->rs_interval = rfc3315_s14_backoff_init(
4405 READ_ONCE(ifp->idev->cnf.rtr_solicit_interval));
4406 ifp->idev->rs_probes = 1;
4407 ifp->idev->if_flags |= IF_RS_SENT;
4408 addrconf_mod_rs_timer(ifp->idev, ifp->idev->rs_interval);
4409 spin_unlock(&ifp->lock);
4410 write_unlock_bh(&ifp->idev->lock);
4411 }
4412
4413 if (bump_id)
4414 rt_genid_bump_ipv6(dev_net(dev));
4415
4416 /* Make sure that a new temporary address will be created
4417 * before this temporary address becomes deprecated.
4418 */
4419 if (ifp->flags & IFA_F_TEMPORARY)
4420 addrconf_verify_rtnl(dev_net(dev));
4421 }
4422
addrconf_dad_run(struct inet6_dev * idev,bool restart)4423 static void addrconf_dad_run(struct inet6_dev *idev, bool restart)
4424 {
4425 struct inet6_ifaddr *ifp;
4426
4427 read_lock_bh(&idev->lock);
4428 list_for_each_entry(ifp, &idev->addr_list, if_list) {
4429 spin_lock(&ifp->lock);
4430 if ((ifp->flags & IFA_F_TENTATIVE &&
4431 ifp->state == INET6_IFADDR_STATE_DAD) || restart) {
4432 if (restart)
4433 ifp->state = INET6_IFADDR_STATE_PREDAD;
4434 addrconf_dad_kick(ifp);
4435 }
4436 spin_unlock(&ifp->lock);
4437 }
4438 read_unlock_bh(&idev->lock);
4439 }
4440
4441 #ifdef CONFIG_PROC_FS
4442 struct if6_iter_state {
4443 struct seq_net_private p;
4444 int bucket;
4445 int offset;
4446 };
4447
if6_get_first(struct seq_file * seq,loff_t pos)4448 static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos)
4449 {
4450 struct if6_iter_state *state = seq->private;
4451 struct net *net = seq_file_net(seq);
4452 struct inet6_ifaddr *ifa = NULL;
4453 int p = 0;
4454
4455 /* initial bucket if pos is 0 */
4456 if (pos == 0) {
4457 state->bucket = 0;
4458 state->offset = 0;
4459 }
4460
4461 for (; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) {
4462 hlist_for_each_entry_rcu(ifa, &net->ipv6.inet6_addr_lst[state->bucket],
4463 addr_lst) {
4464 /* sync with offset */
4465 if (p < state->offset) {
4466 p++;
4467 continue;
4468 }
4469 return ifa;
4470 }
4471
4472 /* prepare for next bucket */
4473 state->offset = 0;
4474 p = 0;
4475 }
4476 return NULL;
4477 }
4478
if6_get_next(struct seq_file * seq,struct inet6_ifaddr * ifa)4479 static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
4480 struct inet6_ifaddr *ifa)
4481 {
4482 struct if6_iter_state *state = seq->private;
4483 struct net *net = seq_file_net(seq);
4484
4485 hlist_for_each_entry_continue_rcu(ifa, addr_lst) {
4486 state->offset++;
4487 return ifa;
4488 }
4489
4490 state->offset = 0;
4491 while (++state->bucket < IN6_ADDR_HSIZE) {
4492 hlist_for_each_entry_rcu(ifa,
4493 &net->ipv6.inet6_addr_lst[state->bucket], addr_lst) {
4494 return ifa;
4495 }
4496 }
4497
4498 return NULL;
4499 }
4500
if6_seq_start(struct seq_file * seq,loff_t * pos)4501 static void *if6_seq_start(struct seq_file *seq, loff_t *pos)
4502 __acquires(rcu)
4503 {
4504 rcu_read_lock();
4505 return if6_get_first(seq, *pos);
4506 }
4507
if6_seq_next(struct seq_file * seq,void * v,loff_t * pos)4508 static void *if6_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4509 {
4510 struct inet6_ifaddr *ifa;
4511
4512 ifa = if6_get_next(seq, v);
4513 ++*pos;
4514 return ifa;
4515 }
4516
if6_seq_stop(struct seq_file * seq,void * v)4517 static void if6_seq_stop(struct seq_file *seq, void *v)
4518 __releases(rcu)
4519 {
4520 rcu_read_unlock();
4521 }
4522
if6_seq_show(struct seq_file * seq,void * v)4523 static int if6_seq_show(struct seq_file *seq, void *v)
4524 {
4525 struct inet6_ifaddr *ifp = (struct inet6_ifaddr *)v;
4526 seq_printf(seq, "%pi6 %02x %02x %02x %02x %8s\n",
4527 &ifp->addr,
4528 ifp->idev->dev->ifindex,
4529 ifp->prefix_len,
4530 ifp->scope,
4531 (u8) ifp->flags,
4532 ifp->idev->dev->name);
4533 return 0;
4534 }
4535
4536 static const struct seq_operations if6_seq_ops = {
4537 .start = if6_seq_start,
4538 .next = if6_seq_next,
4539 .show = if6_seq_show,
4540 .stop = if6_seq_stop,
4541 };
4542
if6_proc_net_init(struct net * net)4543 static int __net_init if6_proc_net_init(struct net *net)
4544 {
4545 if (!proc_create_net("if_inet6", 0444, net->proc_net, &if6_seq_ops,
4546 sizeof(struct if6_iter_state)))
4547 return -ENOMEM;
4548 return 0;
4549 }
4550
if6_proc_net_exit(struct net * net)4551 static void __net_exit if6_proc_net_exit(struct net *net)
4552 {
4553 remove_proc_entry("if_inet6", net->proc_net);
4554 }
4555
4556 static struct pernet_operations if6_proc_net_ops = {
4557 .init = if6_proc_net_init,
4558 .exit = if6_proc_net_exit,
4559 };
4560
if6_proc_init(void)4561 int __init if6_proc_init(void)
4562 {
4563 return register_pernet_subsys(&if6_proc_net_ops);
4564 }
4565
if6_proc_exit(void)4566 void if6_proc_exit(void)
4567 {
4568 unregister_pernet_subsys(&if6_proc_net_ops);
4569 }
4570 #endif /* CONFIG_PROC_FS */
4571
4572 #if IS_ENABLED(CONFIG_IPV6_MIP6)
4573 /* Check if address is a home address configured on any interface. */
ipv6_chk_home_addr(struct net * net,const struct in6_addr * addr)4574 int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr)
4575 {
4576 unsigned int hash = inet6_addr_hash(net, addr);
4577 struct inet6_ifaddr *ifp = NULL;
4578 int ret = 0;
4579
4580 rcu_read_lock();
4581 hlist_for_each_entry_rcu(ifp, &net->ipv6.inet6_addr_lst[hash], addr_lst) {
4582 if (ipv6_addr_equal(&ifp->addr, addr) &&
4583 (ifp->flags & IFA_F_HOMEADDRESS)) {
4584 ret = 1;
4585 break;
4586 }
4587 }
4588 rcu_read_unlock();
4589 return ret;
4590 }
4591 #endif
4592
4593 /* RFC6554 has some algorithm to avoid loops in segment routing by
4594 * checking if the segments contains any of a local interface address.
4595 *
4596 * Quote:
4597 *
4598 * To detect loops in the SRH, a router MUST determine if the SRH
4599 * includes multiple addresses assigned to any interface on that router.
4600 * If such addresses appear more than once and are separated by at least
4601 * one address not assigned to that router.
4602 */
ipv6_chk_rpl_srh_loop(struct net * net,const struct in6_addr * segs,unsigned char nsegs)4603 int ipv6_chk_rpl_srh_loop(struct net *net, const struct in6_addr *segs,
4604 unsigned char nsegs)
4605 {
4606 const struct in6_addr *addr;
4607 int i, ret = 0, found = 0;
4608 struct inet6_ifaddr *ifp;
4609 bool separated = false;
4610 unsigned int hash;
4611 bool hash_found;
4612
4613 rcu_read_lock();
4614 for (i = 0; i < nsegs; i++) {
4615 addr = &segs[i];
4616 hash = inet6_addr_hash(net, addr);
4617
4618 hash_found = false;
4619 hlist_for_each_entry_rcu(ifp, &net->ipv6.inet6_addr_lst[hash], addr_lst) {
4620
4621 if (ipv6_addr_equal(&ifp->addr, addr)) {
4622 hash_found = true;
4623 break;
4624 }
4625 }
4626
4627 if (hash_found) {
4628 if (found > 1 && separated) {
4629 ret = 1;
4630 break;
4631 }
4632
4633 separated = false;
4634 found++;
4635 } else {
4636 separated = true;
4637 }
4638 }
4639 rcu_read_unlock();
4640
4641 return ret;
4642 }
4643
4644 /*
4645 * Periodic address status verification
4646 */
4647
addrconf_verify_rtnl(struct net * net)4648 static void addrconf_verify_rtnl(struct net *net)
4649 {
4650 unsigned long now, next, next_sec, next_sched;
4651 struct inet6_ifaddr *ifp;
4652 int i;
4653
4654 ASSERT_RTNL();
4655
4656 rcu_read_lock_bh();
4657 now = jiffies;
4658 next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
4659
4660 cancel_delayed_work(&net->ipv6.addr_chk_work);
4661
4662 for (i = 0; i < IN6_ADDR_HSIZE; i++) {
4663 restart:
4664 hlist_for_each_entry_rcu_bh(ifp, &net->ipv6.inet6_addr_lst[i], addr_lst) {
4665 unsigned long age;
4666
4667 /* When setting preferred_lft to a value not zero or
4668 * infinity, while valid_lft is infinity
4669 * IFA_F_PERMANENT has a non-infinity life time.
4670 */
4671 if ((ifp->flags & IFA_F_PERMANENT) &&
4672 (ifp->prefered_lft == INFINITY_LIFE_TIME))
4673 continue;
4674
4675 spin_lock(&ifp->lock);
4676 /* We try to batch several events at once. */
4677 age = (now - ifp->tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
4678
4679 if ((ifp->flags&IFA_F_TEMPORARY) &&
4680 !(ifp->flags&IFA_F_TENTATIVE) &&
4681 ifp->prefered_lft != INFINITY_LIFE_TIME &&
4682 !ifp->regen_count && ifp->ifpub) {
4683 /* This is a non-regenerated temporary addr. */
4684
4685 unsigned long regen_advance = ipv6_get_regen_advance(ifp->idev);
4686
4687 if (age + regen_advance >= ifp->prefered_lft) {
4688 struct inet6_ifaddr *ifpub = ifp->ifpub;
4689 if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next))
4690 next = ifp->tstamp + ifp->prefered_lft * HZ;
4691
4692 ifp->regen_count++;
4693 in6_ifa_hold(ifp);
4694 in6_ifa_hold(ifpub);
4695 spin_unlock(&ifp->lock);
4696
4697 spin_lock(&ifpub->lock);
4698 ifpub->regen_count = 0;
4699 spin_unlock(&ifpub->lock);
4700 rcu_read_unlock_bh();
4701 ipv6_create_tempaddr(ifpub, true);
4702 in6_ifa_put(ifpub);
4703 in6_ifa_put(ifp);
4704 rcu_read_lock_bh();
4705 goto restart;
4706 } else if (time_before(ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ, next))
4707 next = ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ;
4708 }
4709
4710 if (ifp->valid_lft != INFINITY_LIFE_TIME &&
4711 age >= ifp->valid_lft) {
4712 spin_unlock(&ifp->lock);
4713 in6_ifa_hold(ifp);
4714 rcu_read_unlock_bh();
4715 ipv6_del_addr(ifp);
4716 rcu_read_lock_bh();
4717 goto restart;
4718 } else if (ifp->prefered_lft == INFINITY_LIFE_TIME) {
4719 spin_unlock(&ifp->lock);
4720 continue;
4721 } else if (age >= ifp->prefered_lft) {
4722 /* jiffies - ifp->tstamp > age >= ifp->prefered_lft */
4723 int deprecate = 0;
4724
4725 if (!(ifp->flags&IFA_F_DEPRECATED)) {
4726 deprecate = 1;
4727 ifp->flags |= IFA_F_DEPRECATED;
4728 }
4729
4730 if ((ifp->valid_lft != INFINITY_LIFE_TIME) &&
4731 (time_before(ifp->tstamp + ifp->valid_lft * HZ, next)))
4732 next = ifp->tstamp + ifp->valid_lft * HZ;
4733
4734 spin_unlock(&ifp->lock);
4735
4736 if (deprecate) {
4737 in6_ifa_hold(ifp);
4738
4739 ipv6_ifa_notify(0, ifp);
4740 in6_ifa_put(ifp);
4741 goto restart;
4742 }
4743 } else {
4744 /* ifp->prefered_lft <= ifp->valid_lft */
4745 if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next))
4746 next = ifp->tstamp + ifp->prefered_lft * HZ;
4747 spin_unlock(&ifp->lock);
4748 }
4749 }
4750 }
4751
4752 next_sec = round_jiffies_up(next);
4753 next_sched = next;
4754
4755 /* If rounded timeout is accurate enough, accept it. */
4756 if (time_before(next_sec, next + ADDRCONF_TIMER_FUZZ))
4757 next_sched = next_sec;
4758
4759 /* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */
4760 if (time_before(next_sched, jiffies + ADDRCONF_TIMER_FUZZ_MAX))
4761 next_sched = jiffies + ADDRCONF_TIMER_FUZZ_MAX;
4762
4763 pr_debug("now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n",
4764 now, next, next_sec, next_sched);
4765 mod_delayed_work(addrconf_wq, &net->ipv6.addr_chk_work, next_sched - now);
4766 rcu_read_unlock_bh();
4767 }
4768
addrconf_verify_work(struct work_struct * w)4769 static void addrconf_verify_work(struct work_struct *w)
4770 {
4771 struct net *net = container_of(to_delayed_work(w), struct net,
4772 ipv6.addr_chk_work);
4773
4774 rtnl_lock();
4775 addrconf_verify_rtnl(net);
4776 rtnl_unlock();
4777 }
4778
addrconf_verify(struct net * net)4779 static void addrconf_verify(struct net *net)
4780 {
4781 mod_delayed_work(addrconf_wq, &net->ipv6.addr_chk_work, 0);
4782 }
4783
extract_addr(struct nlattr * addr,struct nlattr * local,struct in6_addr ** peer_pfx)4784 static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local,
4785 struct in6_addr **peer_pfx)
4786 {
4787 struct in6_addr *pfx = NULL;
4788
4789 *peer_pfx = NULL;
4790
4791 if (addr)
4792 pfx = nla_data(addr);
4793
4794 if (local) {
4795 if (pfx && nla_memcmp(local, pfx, sizeof(*pfx)))
4796 *peer_pfx = pfx;
4797 pfx = nla_data(local);
4798 }
4799
4800 return pfx;
4801 }
4802
4803 static const struct nla_policy ifa_ipv6_policy[IFA_MAX+1] = {
4804 [IFA_ADDRESS] = { .len = sizeof(struct in6_addr) },
4805 [IFA_LOCAL] = { .len = sizeof(struct in6_addr) },
4806 [IFA_CACHEINFO] = { .len = sizeof(struct ifa_cacheinfo) },
4807 [IFA_FLAGS] = { .len = sizeof(u32) },
4808 [IFA_RT_PRIORITY] = { .len = sizeof(u32) },
4809 [IFA_TARGET_NETNSID] = { .type = NLA_S32 },
4810 [IFA_PROTO] = { .type = NLA_U8 },
4811 };
4812
4813 static int
inet6_rtm_deladdr(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)4814 inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
4815 struct netlink_ext_ack *extack)
4816 {
4817 struct net *net = sock_net(skb->sk);
4818 struct ifaddrmsg *ifm;
4819 struct nlattr *tb[IFA_MAX+1];
4820 struct in6_addr *pfx, *peer_pfx;
4821 u32 ifa_flags;
4822 int err;
4823
4824 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
4825 ifa_ipv6_policy, extack);
4826 if (err < 0)
4827 return err;
4828
4829 ifm = nlmsg_data(nlh);
4830 pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx);
4831 if (!pfx)
4832 return -EINVAL;
4833
4834 ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) : ifm->ifa_flags;
4835
4836 /* We ignore other flags so far. */
4837 ifa_flags &= IFA_F_MANAGETEMPADDR;
4838
4839 return inet6_addr_del(net, ifm->ifa_index, ifa_flags, pfx,
4840 ifm->ifa_prefixlen, extack);
4841 }
4842
modify_prefix_route(struct net * net,struct inet6_ifaddr * ifp,unsigned long expires,u32 flags,bool modify_peer)4843 static int modify_prefix_route(struct net *net, struct inet6_ifaddr *ifp,
4844 unsigned long expires, u32 flags,
4845 bool modify_peer)
4846 {
4847 struct fib6_table *table;
4848 struct fib6_info *f6i;
4849 u32 prio;
4850
4851 f6i = addrconf_get_prefix_route(modify_peer ? &ifp->peer_addr : &ifp->addr,
4852 ifp->prefix_len,
4853 ifp->idev->dev, 0, RTF_DEFAULT, true);
4854 if (!f6i)
4855 return -ENOENT;
4856
4857 prio = ifp->rt_priority ? : IP6_RT_PRIO_ADDRCONF;
4858 if (f6i->fib6_metric != prio) {
4859 /* delete old one */
4860 ip6_del_rt(dev_net(ifp->idev->dev), f6i, false);
4861
4862 /* add new one */
4863 addrconf_prefix_route(modify_peer ? &ifp->peer_addr : &ifp->addr,
4864 ifp->prefix_len,
4865 ifp->rt_priority, ifp->idev->dev,
4866 expires, flags, GFP_KERNEL);
4867 return 0;
4868 }
4869 if (f6i != net->ipv6.fib6_null_entry) {
4870 table = f6i->fib6_table;
4871 spin_lock_bh(&table->tb6_lock);
4872
4873 if (!(flags & RTF_EXPIRES)) {
4874 fib6_clean_expires(f6i);
4875 fib6_remove_gc_list(f6i);
4876 } else {
4877 fib6_set_expires(f6i, expires);
4878 fib6_add_gc_list(f6i);
4879 }
4880
4881 spin_unlock_bh(&table->tb6_lock);
4882 }
4883 fib6_info_release(f6i);
4884
4885 return 0;
4886 }
4887
inet6_addr_modify(struct net * net,struct inet6_ifaddr * ifp,struct ifa6_config * cfg)4888 static int inet6_addr_modify(struct net *net, struct inet6_ifaddr *ifp,
4889 struct ifa6_config *cfg)
4890 {
4891 u32 flags;
4892 clock_t expires;
4893 unsigned long timeout;
4894 bool was_managetempaddr;
4895 bool had_prefixroute;
4896 bool new_peer = false;
4897
4898 ASSERT_RTNL();
4899
4900 if (!cfg->valid_lft || cfg->preferred_lft > cfg->valid_lft)
4901 return -EINVAL;
4902
4903 if (cfg->ifa_flags & IFA_F_MANAGETEMPADDR &&
4904 (ifp->flags & IFA_F_TEMPORARY || ifp->prefix_len != 64))
4905 return -EINVAL;
4906
4907 if (!(ifp->flags & IFA_F_TENTATIVE) || ifp->flags & IFA_F_DADFAILED)
4908 cfg->ifa_flags &= ~IFA_F_OPTIMISTIC;
4909
4910 timeout = addrconf_timeout_fixup(cfg->valid_lft, HZ);
4911 if (addrconf_finite_timeout(timeout)) {
4912 expires = jiffies_to_clock_t(timeout * HZ);
4913 cfg->valid_lft = timeout;
4914 flags = RTF_EXPIRES;
4915 } else {
4916 expires = 0;
4917 flags = 0;
4918 cfg->ifa_flags |= IFA_F_PERMANENT;
4919 }
4920
4921 timeout = addrconf_timeout_fixup(cfg->preferred_lft, HZ);
4922 if (addrconf_finite_timeout(timeout)) {
4923 if (timeout == 0)
4924 cfg->ifa_flags |= IFA_F_DEPRECATED;
4925 cfg->preferred_lft = timeout;
4926 }
4927
4928 if (cfg->peer_pfx &&
4929 memcmp(&ifp->peer_addr, cfg->peer_pfx, sizeof(struct in6_addr))) {
4930 if (!ipv6_addr_any(&ifp->peer_addr))
4931 cleanup_prefix_route(ifp, expires, true, true);
4932 new_peer = true;
4933 }
4934
4935 spin_lock_bh(&ifp->lock);
4936 was_managetempaddr = ifp->flags & IFA_F_MANAGETEMPADDR;
4937 had_prefixroute = ifp->flags & IFA_F_PERMANENT &&
4938 !(ifp->flags & IFA_F_NOPREFIXROUTE);
4939 ifp->flags &= ~(IFA_F_DEPRECATED | IFA_F_PERMANENT | IFA_F_NODAD |
4940 IFA_F_HOMEADDRESS | IFA_F_MANAGETEMPADDR |
4941 IFA_F_NOPREFIXROUTE);
4942 ifp->flags |= cfg->ifa_flags;
4943 WRITE_ONCE(ifp->tstamp, jiffies);
4944 WRITE_ONCE(ifp->valid_lft, cfg->valid_lft);
4945 WRITE_ONCE(ifp->prefered_lft, cfg->preferred_lft);
4946 WRITE_ONCE(ifp->ifa_proto, cfg->ifa_proto);
4947
4948 if (cfg->rt_priority && cfg->rt_priority != ifp->rt_priority)
4949 WRITE_ONCE(ifp->rt_priority, cfg->rt_priority);
4950
4951 if (new_peer)
4952 ifp->peer_addr = *cfg->peer_pfx;
4953
4954 spin_unlock_bh(&ifp->lock);
4955 if (!(ifp->flags&IFA_F_TENTATIVE))
4956 ipv6_ifa_notify(0, ifp);
4957
4958 if (!(cfg->ifa_flags & IFA_F_NOPREFIXROUTE)) {
4959 int rc = -ENOENT;
4960
4961 if (had_prefixroute)
4962 rc = modify_prefix_route(net, ifp, expires, flags, false);
4963
4964 /* prefix route could have been deleted; if so restore it */
4965 if (rc == -ENOENT) {
4966 addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
4967 ifp->rt_priority, ifp->idev->dev,
4968 expires, flags, GFP_KERNEL);
4969 }
4970
4971 if (had_prefixroute && !ipv6_addr_any(&ifp->peer_addr))
4972 rc = modify_prefix_route(net, ifp, expires, flags, true);
4973
4974 if (rc == -ENOENT && !ipv6_addr_any(&ifp->peer_addr)) {
4975 addrconf_prefix_route(&ifp->peer_addr, ifp->prefix_len,
4976 ifp->rt_priority, ifp->idev->dev,
4977 expires, flags, GFP_KERNEL);
4978 }
4979 } else if (had_prefixroute) {
4980 enum cleanup_prefix_rt_t action;
4981 unsigned long rt_expires;
4982
4983 write_lock_bh(&ifp->idev->lock);
4984 action = check_cleanup_prefix_route(ifp, &rt_expires);
4985 write_unlock_bh(&ifp->idev->lock);
4986
4987 if (action != CLEANUP_PREFIX_RT_NOP) {
4988 cleanup_prefix_route(ifp, rt_expires,
4989 action == CLEANUP_PREFIX_RT_DEL, false);
4990 }
4991 }
4992
4993 if (was_managetempaddr || ifp->flags & IFA_F_MANAGETEMPADDR) {
4994 if (was_managetempaddr && !(ifp->flags & IFA_F_MANAGETEMPADDR))
4995 delete_tempaddrs(ifp->idev, ifp);
4996 else
4997 manage_tempaddrs(ifp->idev, ifp, cfg->valid_lft,
4998 cfg->preferred_lft, !was_managetempaddr,
4999 jiffies);
5000 }
5001
5002 addrconf_verify_rtnl(net);
5003
5004 return 0;
5005 }
5006
5007 static int
inet6_rtm_newaddr(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)5008 inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
5009 struct netlink_ext_ack *extack)
5010 {
5011 struct net *net = sock_net(skb->sk);
5012 struct ifaddrmsg *ifm;
5013 struct nlattr *tb[IFA_MAX+1];
5014 struct in6_addr *peer_pfx;
5015 struct inet6_ifaddr *ifa;
5016 struct net_device *dev;
5017 struct inet6_dev *idev;
5018 struct ifa6_config cfg;
5019 int err;
5020
5021 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
5022 ifa_ipv6_policy, extack);
5023 if (err < 0)
5024 return err;
5025
5026 memset(&cfg, 0, sizeof(cfg));
5027
5028 ifm = nlmsg_data(nlh);
5029 cfg.pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx);
5030 if (!cfg.pfx)
5031 return -EINVAL;
5032
5033 cfg.peer_pfx = peer_pfx;
5034 cfg.plen = ifm->ifa_prefixlen;
5035 if (tb[IFA_RT_PRIORITY])
5036 cfg.rt_priority = nla_get_u32(tb[IFA_RT_PRIORITY]);
5037
5038 if (tb[IFA_PROTO])
5039 cfg.ifa_proto = nla_get_u8(tb[IFA_PROTO]);
5040
5041 cfg.valid_lft = INFINITY_LIFE_TIME;
5042 cfg.preferred_lft = INFINITY_LIFE_TIME;
5043
5044 if (tb[IFA_CACHEINFO]) {
5045 struct ifa_cacheinfo *ci;
5046
5047 ci = nla_data(tb[IFA_CACHEINFO]);
5048 cfg.valid_lft = ci->ifa_valid;
5049 cfg.preferred_lft = ci->ifa_prefered;
5050 }
5051
5052 dev = __dev_get_by_index(net, ifm->ifa_index);
5053 if (!dev) {
5054 NL_SET_ERR_MSG_MOD(extack, "Unable to find the interface");
5055 return -ENODEV;
5056 }
5057
5058 if (tb[IFA_FLAGS])
5059 cfg.ifa_flags = nla_get_u32(tb[IFA_FLAGS]);
5060 else
5061 cfg.ifa_flags = ifm->ifa_flags;
5062
5063 /* We ignore other flags so far. */
5064 cfg.ifa_flags &= IFA_F_NODAD | IFA_F_HOMEADDRESS |
5065 IFA_F_MANAGETEMPADDR | IFA_F_NOPREFIXROUTE |
5066 IFA_F_MCAUTOJOIN | IFA_F_OPTIMISTIC;
5067
5068 idev = ipv6_find_idev(dev);
5069 if (IS_ERR(idev))
5070 return PTR_ERR(idev);
5071
5072 if (!ipv6_allow_optimistic_dad(net, idev))
5073 cfg.ifa_flags &= ~IFA_F_OPTIMISTIC;
5074
5075 if (cfg.ifa_flags & IFA_F_NODAD &&
5076 cfg.ifa_flags & IFA_F_OPTIMISTIC) {
5077 NL_SET_ERR_MSG(extack, "IFA_F_NODAD and IFA_F_OPTIMISTIC are mutually exclusive");
5078 return -EINVAL;
5079 }
5080
5081 ifa = ipv6_get_ifaddr(net, cfg.pfx, dev, 1);
5082 if (!ifa) {
5083 /*
5084 * It would be best to check for !NLM_F_CREATE here but
5085 * userspace already relies on not having to provide this.
5086 */
5087 return inet6_addr_add(net, ifm->ifa_index, &cfg, extack);
5088 }
5089
5090 if (nlh->nlmsg_flags & NLM_F_EXCL ||
5091 !(nlh->nlmsg_flags & NLM_F_REPLACE)) {
5092 NL_SET_ERR_MSG_MOD(extack, "address already assigned");
5093 err = -EEXIST;
5094 } else {
5095 err = inet6_addr_modify(net, ifa, &cfg);
5096 }
5097
5098 in6_ifa_put(ifa);
5099
5100 return err;
5101 }
5102
put_ifaddrmsg(struct nlmsghdr * nlh,u8 prefixlen,u32 flags,u8 scope,int ifindex)5103 static void put_ifaddrmsg(struct nlmsghdr *nlh, u8 prefixlen, u32 flags,
5104 u8 scope, int ifindex)
5105 {
5106 struct ifaddrmsg *ifm;
5107
5108 ifm = nlmsg_data(nlh);
5109 ifm->ifa_family = AF_INET6;
5110 ifm->ifa_prefixlen = prefixlen;
5111 ifm->ifa_flags = flags;
5112 ifm->ifa_scope = scope;
5113 ifm->ifa_index = ifindex;
5114 }
5115
put_cacheinfo(struct sk_buff * skb,unsigned long cstamp,unsigned long tstamp,u32 preferred,u32 valid)5116 static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp,
5117 unsigned long tstamp, u32 preferred, u32 valid)
5118 {
5119 struct ifa_cacheinfo ci;
5120
5121 ci.cstamp = cstamp_delta(cstamp);
5122 ci.tstamp = cstamp_delta(tstamp);
5123 ci.ifa_prefered = preferred;
5124 ci.ifa_valid = valid;
5125
5126 return nla_put(skb, IFA_CACHEINFO, sizeof(ci), &ci);
5127 }
5128
rt_scope(int ifa_scope)5129 static inline int rt_scope(int ifa_scope)
5130 {
5131 if (ifa_scope & IFA_HOST)
5132 return RT_SCOPE_HOST;
5133 else if (ifa_scope & IFA_LINK)
5134 return RT_SCOPE_LINK;
5135 else if (ifa_scope & IFA_SITE)
5136 return RT_SCOPE_SITE;
5137 else
5138 return RT_SCOPE_UNIVERSE;
5139 }
5140
inet6_ifaddr_msgsize(void)5141 static inline int inet6_ifaddr_msgsize(void)
5142 {
5143 return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
5144 + nla_total_size(16) /* IFA_LOCAL */
5145 + nla_total_size(16) /* IFA_ADDRESS */
5146 + nla_total_size(sizeof(struct ifa_cacheinfo))
5147 + nla_total_size(4) /* IFA_FLAGS */
5148 + nla_total_size(1) /* IFA_PROTO */
5149 + nla_total_size(4) /* IFA_RT_PRIORITY */;
5150 }
5151
5152 enum addr_type_t {
5153 UNICAST_ADDR,
5154 MULTICAST_ADDR,
5155 ANYCAST_ADDR,
5156 };
5157
5158 struct inet6_fill_args {
5159 u32 portid;
5160 u32 seq;
5161 int event;
5162 unsigned int flags;
5163 int netnsid;
5164 int ifindex;
5165 enum addr_type_t type;
5166 };
5167
inet6_fill_ifaddr(struct sk_buff * skb,const struct inet6_ifaddr * ifa,struct inet6_fill_args * args)5168 static int inet6_fill_ifaddr(struct sk_buff *skb,
5169 const struct inet6_ifaddr *ifa,
5170 struct inet6_fill_args *args)
5171 {
5172 struct nlmsghdr *nlh;
5173 u32 preferred, valid;
5174 u32 flags, priority;
5175 u8 proto;
5176
5177 nlh = nlmsg_put(skb, args->portid, args->seq, args->event,
5178 sizeof(struct ifaddrmsg), args->flags);
5179 if (!nlh)
5180 return -EMSGSIZE;
5181
5182 flags = READ_ONCE(ifa->flags);
5183 put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope),
5184 ifa->idev->dev->ifindex);
5185
5186 if (args->netnsid >= 0 &&
5187 nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid))
5188 goto error;
5189
5190 preferred = READ_ONCE(ifa->prefered_lft);
5191 valid = READ_ONCE(ifa->valid_lft);
5192
5193 if (!((flags & IFA_F_PERMANENT) &&
5194 (preferred == INFINITY_LIFE_TIME))) {
5195 if (preferred != INFINITY_LIFE_TIME) {
5196 long tval = (jiffies - READ_ONCE(ifa->tstamp)) / HZ;
5197
5198 if (preferred > tval)
5199 preferred -= tval;
5200 else
5201 preferred = 0;
5202 if (valid != INFINITY_LIFE_TIME) {
5203 if (valid > tval)
5204 valid -= tval;
5205 else
5206 valid = 0;
5207 }
5208 }
5209 } else {
5210 preferred = INFINITY_LIFE_TIME;
5211 valid = INFINITY_LIFE_TIME;
5212 }
5213
5214 if (!ipv6_addr_any(&ifa->peer_addr)) {
5215 if (nla_put_in6_addr(skb, IFA_LOCAL, &ifa->addr) < 0 ||
5216 nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->peer_addr) < 0)
5217 goto error;
5218 } else {
5219 if (nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->addr) < 0)
5220 goto error;
5221 }
5222
5223 priority = READ_ONCE(ifa->rt_priority);
5224 if (priority && nla_put_u32(skb, IFA_RT_PRIORITY, priority))
5225 goto error;
5226
5227 if (put_cacheinfo(skb, ifa->cstamp, READ_ONCE(ifa->tstamp),
5228 preferred, valid) < 0)
5229 goto error;
5230
5231 if (nla_put_u32(skb, IFA_FLAGS, flags) < 0)
5232 goto error;
5233
5234 proto = READ_ONCE(ifa->ifa_proto);
5235 if (proto && nla_put_u8(skb, IFA_PROTO, proto))
5236 goto error;
5237
5238 nlmsg_end(skb, nlh);
5239 return 0;
5240
5241 error:
5242 nlmsg_cancel(skb, nlh);
5243 return -EMSGSIZE;
5244 }
5245
inet6_fill_ifmcaddr(struct sk_buff * skb,const struct ifmcaddr6 * ifmca,struct inet6_fill_args * args)5246 static int inet6_fill_ifmcaddr(struct sk_buff *skb,
5247 const struct ifmcaddr6 *ifmca,
5248 struct inet6_fill_args *args)
5249 {
5250 int ifindex = ifmca->idev->dev->ifindex;
5251 u8 scope = RT_SCOPE_UNIVERSE;
5252 struct nlmsghdr *nlh;
5253
5254 if (ipv6_addr_scope(&ifmca->mca_addr) & IFA_SITE)
5255 scope = RT_SCOPE_SITE;
5256
5257 nlh = nlmsg_put(skb, args->portid, args->seq, args->event,
5258 sizeof(struct ifaddrmsg), args->flags);
5259 if (!nlh)
5260 return -EMSGSIZE;
5261
5262 if (args->netnsid >= 0 &&
5263 nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid)) {
5264 nlmsg_cancel(skb, nlh);
5265 return -EMSGSIZE;
5266 }
5267
5268 put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
5269 if (nla_put_in6_addr(skb, IFA_MULTICAST, &ifmca->mca_addr) < 0 ||
5270 put_cacheinfo(skb, ifmca->mca_cstamp, READ_ONCE(ifmca->mca_tstamp),
5271 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
5272 nlmsg_cancel(skb, nlh);
5273 return -EMSGSIZE;
5274 }
5275
5276 nlmsg_end(skb, nlh);
5277 return 0;
5278 }
5279
inet6_fill_ifacaddr(struct sk_buff * skb,const struct ifacaddr6 * ifaca,struct inet6_fill_args * args)5280 static int inet6_fill_ifacaddr(struct sk_buff *skb,
5281 const struct ifacaddr6 *ifaca,
5282 struct inet6_fill_args *args)
5283 {
5284 struct net_device *dev = fib6_info_nh_dev(ifaca->aca_rt);
5285 int ifindex = dev ? dev->ifindex : 1;
5286 u8 scope = RT_SCOPE_UNIVERSE;
5287 struct nlmsghdr *nlh;
5288
5289 if (ipv6_addr_scope(&ifaca->aca_addr) & IFA_SITE)
5290 scope = RT_SCOPE_SITE;
5291
5292 nlh = nlmsg_put(skb, args->portid, args->seq, args->event,
5293 sizeof(struct ifaddrmsg), args->flags);
5294 if (!nlh)
5295 return -EMSGSIZE;
5296
5297 if (args->netnsid >= 0 &&
5298 nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid)) {
5299 nlmsg_cancel(skb, nlh);
5300 return -EMSGSIZE;
5301 }
5302
5303 put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
5304 if (nla_put_in6_addr(skb, IFA_ANYCAST, &ifaca->aca_addr) < 0 ||
5305 put_cacheinfo(skb, ifaca->aca_cstamp, READ_ONCE(ifaca->aca_tstamp),
5306 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
5307 nlmsg_cancel(skb, nlh);
5308 return -EMSGSIZE;
5309 }
5310
5311 nlmsg_end(skb, nlh);
5312 return 0;
5313 }
5314
5315 /* called with rcu_read_lock() */
in6_dump_addrs(const struct inet6_dev * idev,struct sk_buff * skb,struct netlink_callback * cb,int * s_ip_idx,struct inet6_fill_args * fillargs)5316 static int in6_dump_addrs(const struct inet6_dev *idev, struct sk_buff *skb,
5317 struct netlink_callback *cb, int *s_ip_idx,
5318 struct inet6_fill_args *fillargs)
5319 {
5320 const struct ifmcaddr6 *ifmca;
5321 const struct ifacaddr6 *ifaca;
5322 int ip_idx = 0;
5323 int err = 0;
5324
5325 switch (fillargs->type) {
5326 case UNICAST_ADDR: {
5327 const struct inet6_ifaddr *ifa;
5328 fillargs->event = RTM_NEWADDR;
5329
5330 /* unicast address incl. temp addr */
5331 list_for_each_entry_rcu(ifa, &idev->addr_list, if_list) {
5332 if (ip_idx < *s_ip_idx)
5333 goto next;
5334 err = inet6_fill_ifaddr(skb, ifa, fillargs);
5335 if (err < 0)
5336 break;
5337 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
5338 next:
5339 ip_idx++;
5340 }
5341 break;
5342 }
5343 case MULTICAST_ADDR:
5344 fillargs->event = RTM_GETMULTICAST;
5345
5346 /* multicast address */
5347 for (ifmca = rcu_dereference(idev->mc_list);
5348 ifmca;
5349 ifmca = rcu_dereference(ifmca->next), ip_idx++) {
5350 if (ip_idx < *s_ip_idx)
5351 continue;
5352 err = inet6_fill_ifmcaddr(skb, ifmca, fillargs);
5353 if (err < 0)
5354 break;
5355 }
5356 break;
5357 case ANYCAST_ADDR:
5358 fillargs->event = RTM_GETANYCAST;
5359 /* anycast address */
5360 for (ifaca = rcu_dereference(idev->ac_list); ifaca;
5361 ifaca = rcu_dereference(ifaca->aca_next), ip_idx++) {
5362 if (ip_idx < *s_ip_idx)
5363 continue;
5364 err = inet6_fill_ifacaddr(skb, ifaca, fillargs);
5365 if (err < 0)
5366 break;
5367 }
5368 break;
5369 default:
5370 break;
5371 }
5372 *s_ip_idx = err ? ip_idx : 0;
5373 return err;
5374 }
5375
inet6_valid_dump_ifaddr_req(const struct nlmsghdr * nlh,struct inet6_fill_args * fillargs,struct net ** tgt_net,struct sock * sk,struct netlink_callback * cb)5376 static int inet6_valid_dump_ifaddr_req(const struct nlmsghdr *nlh,
5377 struct inet6_fill_args *fillargs,
5378 struct net **tgt_net, struct sock *sk,
5379 struct netlink_callback *cb)
5380 {
5381 struct netlink_ext_ack *extack = cb->extack;
5382 struct nlattr *tb[IFA_MAX+1];
5383 struct ifaddrmsg *ifm;
5384 int err, i;
5385
5386 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
5387 NL_SET_ERR_MSG_MOD(extack, "Invalid header for address dump request");
5388 return -EINVAL;
5389 }
5390
5391 ifm = nlmsg_data(nlh);
5392 if (ifm->ifa_prefixlen || ifm->ifa_flags || ifm->ifa_scope) {
5393 NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for address dump request");
5394 return -EINVAL;
5395 }
5396
5397 fillargs->ifindex = ifm->ifa_index;
5398 if (fillargs->ifindex) {
5399 cb->answer_flags |= NLM_F_DUMP_FILTERED;
5400 fillargs->flags |= NLM_F_DUMP_FILTERED;
5401 }
5402
5403 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFA_MAX,
5404 ifa_ipv6_policy, extack);
5405 if (err < 0)
5406 return err;
5407
5408 for (i = 0; i <= IFA_MAX; ++i) {
5409 if (!tb[i])
5410 continue;
5411
5412 if (i == IFA_TARGET_NETNSID) {
5413 struct net *net;
5414
5415 fillargs->netnsid = nla_get_s32(tb[i]);
5416 net = rtnl_get_net_ns_capable(sk, fillargs->netnsid);
5417 if (IS_ERR(net)) {
5418 fillargs->netnsid = -1;
5419 NL_SET_ERR_MSG_MOD(extack, "Invalid target network namespace id");
5420 return PTR_ERR(net);
5421 }
5422 *tgt_net = net;
5423 } else {
5424 NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in dump request");
5425 return -EINVAL;
5426 }
5427 }
5428
5429 return 0;
5430 }
5431
inet6_dump_addr(struct sk_buff * skb,struct netlink_callback * cb,enum addr_type_t type)5432 static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
5433 enum addr_type_t type)
5434 {
5435 struct net *tgt_net = sock_net(skb->sk);
5436 const struct nlmsghdr *nlh = cb->nlh;
5437 struct inet6_fill_args fillargs = {
5438 .portid = NETLINK_CB(cb->skb).portid,
5439 .seq = cb->nlh->nlmsg_seq,
5440 .flags = NLM_F_MULTI,
5441 .netnsid = -1,
5442 .type = type,
5443 };
5444 struct {
5445 unsigned long ifindex;
5446 int ip_idx;
5447 } *ctx = (void *)cb->ctx;
5448 struct net_device *dev;
5449 struct inet6_dev *idev;
5450 int err = 0;
5451
5452 rcu_read_lock();
5453 if (cb->strict_check) {
5454 err = inet6_valid_dump_ifaddr_req(nlh, &fillargs, &tgt_net,
5455 skb->sk, cb);
5456 if (err < 0)
5457 goto done;
5458
5459 err = 0;
5460 if (fillargs.ifindex) {
5461 dev = dev_get_by_index_rcu(tgt_net, fillargs.ifindex);
5462 if (!dev) {
5463 err = -ENODEV;
5464 goto done;
5465 }
5466 idev = __in6_dev_get(dev);
5467 if (idev)
5468 err = in6_dump_addrs(idev, skb, cb,
5469 &ctx->ip_idx,
5470 &fillargs);
5471 goto done;
5472 }
5473 }
5474
5475 cb->seq = inet6_base_seq(tgt_net);
5476 for_each_netdev_dump(tgt_net, dev, ctx->ifindex) {
5477 idev = __in6_dev_get(dev);
5478 if (!idev)
5479 continue;
5480 err = in6_dump_addrs(idev, skb, cb, &ctx->ip_idx,
5481 &fillargs);
5482 if (err < 0)
5483 goto done;
5484 }
5485 done:
5486 rcu_read_unlock();
5487 if (fillargs.netnsid >= 0)
5488 put_net(tgt_net);
5489
5490 return err;
5491 }
5492
inet6_dump_ifaddr(struct sk_buff * skb,struct netlink_callback * cb)5493 static int inet6_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
5494 {
5495 enum addr_type_t type = UNICAST_ADDR;
5496
5497 return inet6_dump_addr(skb, cb, type);
5498 }
5499
inet6_dump_ifmcaddr(struct sk_buff * skb,struct netlink_callback * cb)5500 static int inet6_dump_ifmcaddr(struct sk_buff *skb, struct netlink_callback *cb)
5501 {
5502 enum addr_type_t type = MULTICAST_ADDR;
5503
5504 return inet6_dump_addr(skb, cb, type);
5505 }
5506
5507
inet6_dump_ifacaddr(struct sk_buff * skb,struct netlink_callback * cb)5508 static int inet6_dump_ifacaddr(struct sk_buff *skb, struct netlink_callback *cb)
5509 {
5510 enum addr_type_t type = ANYCAST_ADDR;
5511
5512 return inet6_dump_addr(skb, cb, type);
5513 }
5514
inet6_rtm_valid_getaddr_req(struct sk_buff * skb,const struct nlmsghdr * nlh,struct nlattr ** tb,struct netlink_ext_ack * extack)5515 static int inet6_rtm_valid_getaddr_req(struct sk_buff *skb,
5516 const struct nlmsghdr *nlh,
5517 struct nlattr **tb,
5518 struct netlink_ext_ack *extack)
5519 {
5520 struct ifaddrmsg *ifm;
5521 int i, err;
5522
5523 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
5524 NL_SET_ERR_MSG_MOD(extack, "Invalid header for get address request");
5525 return -EINVAL;
5526 }
5527
5528 if (!netlink_strict_get_check(skb))
5529 return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
5530 ifa_ipv6_policy, extack);
5531
5532 ifm = nlmsg_data(nlh);
5533 if (ifm->ifa_prefixlen || ifm->ifa_flags || ifm->ifa_scope) {
5534 NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for get address request");
5535 return -EINVAL;
5536 }
5537
5538 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFA_MAX,
5539 ifa_ipv6_policy, extack);
5540 if (err)
5541 return err;
5542
5543 for (i = 0; i <= IFA_MAX; i++) {
5544 if (!tb[i])
5545 continue;
5546
5547 switch (i) {
5548 case IFA_TARGET_NETNSID:
5549 case IFA_ADDRESS:
5550 case IFA_LOCAL:
5551 break;
5552 default:
5553 NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in get address request");
5554 return -EINVAL;
5555 }
5556 }
5557
5558 return 0;
5559 }
5560
inet6_rtm_getaddr(struct sk_buff * in_skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)5561 static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh,
5562 struct netlink_ext_ack *extack)
5563 {
5564 struct net *tgt_net = sock_net(in_skb->sk);
5565 struct inet6_fill_args fillargs = {
5566 .portid = NETLINK_CB(in_skb).portid,
5567 .seq = nlh->nlmsg_seq,
5568 .event = RTM_NEWADDR,
5569 .flags = 0,
5570 .netnsid = -1,
5571 };
5572 struct ifaddrmsg *ifm;
5573 struct nlattr *tb[IFA_MAX+1];
5574 struct in6_addr *addr = NULL, *peer;
5575 struct net_device *dev = NULL;
5576 struct inet6_ifaddr *ifa;
5577 struct sk_buff *skb;
5578 int err;
5579
5580 err = inet6_rtm_valid_getaddr_req(in_skb, nlh, tb, extack);
5581 if (err < 0)
5582 return err;
5583
5584 if (tb[IFA_TARGET_NETNSID]) {
5585 fillargs.netnsid = nla_get_s32(tb[IFA_TARGET_NETNSID]);
5586
5587 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(in_skb).sk,
5588 fillargs.netnsid);
5589 if (IS_ERR(tgt_net))
5590 return PTR_ERR(tgt_net);
5591 }
5592
5593 addr = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer);
5594 if (!addr) {
5595 err = -EINVAL;
5596 goto errout;
5597 }
5598 ifm = nlmsg_data(nlh);
5599 if (ifm->ifa_index)
5600 dev = dev_get_by_index(tgt_net, ifm->ifa_index);
5601
5602 ifa = ipv6_get_ifaddr(tgt_net, addr, dev, 1);
5603 if (!ifa) {
5604 err = -EADDRNOTAVAIL;
5605 goto errout;
5606 }
5607
5608 skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_KERNEL);
5609 if (!skb) {
5610 err = -ENOBUFS;
5611 goto errout_ifa;
5612 }
5613
5614 err = inet6_fill_ifaddr(skb, ifa, &fillargs);
5615 if (err < 0) {
5616 /* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
5617 WARN_ON(err == -EMSGSIZE);
5618 kfree_skb(skb);
5619 goto errout_ifa;
5620 }
5621 err = rtnl_unicast(skb, tgt_net, NETLINK_CB(in_skb).portid);
5622 errout_ifa:
5623 in6_ifa_put(ifa);
5624 errout:
5625 dev_put(dev);
5626 if (fillargs.netnsid >= 0)
5627 put_net(tgt_net);
5628
5629 return err;
5630 }
5631
inet6_ifa_notify(int event,struct inet6_ifaddr * ifa)5632 static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa)
5633 {
5634 struct sk_buff *skb;
5635 struct net *net = dev_net(ifa->idev->dev);
5636 struct inet6_fill_args fillargs = {
5637 .portid = 0,
5638 .seq = 0,
5639 .event = event,
5640 .flags = 0,
5641 .netnsid = -1,
5642 };
5643 int err = -ENOBUFS;
5644
5645 skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_ATOMIC);
5646 if (!skb)
5647 goto errout;
5648
5649 err = inet6_fill_ifaddr(skb, ifa, &fillargs);
5650 if (err < 0) {
5651 /* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
5652 WARN_ON(err == -EMSGSIZE);
5653 kfree_skb(skb);
5654 goto errout;
5655 }
5656 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC);
5657 return;
5658 errout:
5659 rtnl_set_sk_err(net, RTNLGRP_IPV6_IFADDR, err);
5660 }
5661
ipv6_store_devconf(const struct ipv6_devconf * cnf,__s32 * array,int bytes)5662 static void ipv6_store_devconf(const struct ipv6_devconf *cnf,
5663 __s32 *array, int bytes)
5664 {
5665 BUG_ON(bytes < (DEVCONF_MAX * 4));
5666
5667 memset(array, 0, bytes);
5668 array[DEVCONF_FORWARDING] = READ_ONCE(cnf->forwarding);
5669 array[DEVCONF_HOPLIMIT] = READ_ONCE(cnf->hop_limit);
5670 array[DEVCONF_MTU6] = READ_ONCE(cnf->mtu6);
5671 array[DEVCONF_ACCEPT_RA] = READ_ONCE(cnf->accept_ra);
5672 array[DEVCONF_ACCEPT_REDIRECTS] = READ_ONCE(cnf->accept_redirects);
5673 array[DEVCONF_AUTOCONF] = READ_ONCE(cnf->autoconf);
5674 array[DEVCONF_DAD_TRANSMITS] = READ_ONCE(cnf->dad_transmits);
5675 array[DEVCONF_RTR_SOLICITS] = READ_ONCE(cnf->rtr_solicits);
5676 array[DEVCONF_RTR_SOLICIT_INTERVAL] =
5677 jiffies_to_msecs(READ_ONCE(cnf->rtr_solicit_interval));
5678 array[DEVCONF_RTR_SOLICIT_MAX_INTERVAL] =
5679 jiffies_to_msecs(READ_ONCE(cnf->rtr_solicit_max_interval));
5680 array[DEVCONF_RTR_SOLICIT_DELAY] =
5681 jiffies_to_msecs(READ_ONCE(cnf->rtr_solicit_delay));
5682 array[DEVCONF_FORCE_MLD_VERSION] = READ_ONCE(cnf->force_mld_version);
5683 array[DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL] =
5684 jiffies_to_msecs(READ_ONCE(cnf->mldv1_unsolicited_report_interval));
5685 array[DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL] =
5686 jiffies_to_msecs(READ_ONCE(cnf->mldv2_unsolicited_report_interval));
5687 array[DEVCONF_USE_TEMPADDR] = READ_ONCE(cnf->use_tempaddr);
5688 array[DEVCONF_TEMP_VALID_LFT] = READ_ONCE(cnf->temp_valid_lft);
5689 array[DEVCONF_TEMP_PREFERED_LFT] = READ_ONCE(cnf->temp_prefered_lft);
5690 array[DEVCONF_REGEN_MAX_RETRY] = READ_ONCE(cnf->regen_max_retry);
5691 array[DEVCONF_MAX_DESYNC_FACTOR] = READ_ONCE(cnf->max_desync_factor);
5692 array[DEVCONF_MAX_ADDRESSES] = READ_ONCE(cnf->max_addresses);
5693 array[DEVCONF_ACCEPT_RA_DEFRTR] = READ_ONCE(cnf->accept_ra_defrtr);
5694 array[DEVCONF_RA_DEFRTR_METRIC] = READ_ONCE(cnf->ra_defrtr_metric);
5695 array[DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT] =
5696 READ_ONCE(cnf->accept_ra_min_hop_limit);
5697 array[DEVCONF_ACCEPT_RA_PINFO] = READ_ONCE(cnf->accept_ra_pinfo);
5698 #ifdef CONFIG_IPV6_ROUTER_PREF
5699 array[DEVCONF_ACCEPT_RA_RTR_PREF] = READ_ONCE(cnf->accept_ra_rtr_pref);
5700 array[DEVCONF_RTR_PROBE_INTERVAL] =
5701 jiffies_to_msecs(READ_ONCE(cnf->rtr_probe_interval));
5702 #ifdef CONFIG_IPV6_ROUTE_INFO
5703 array[DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN] =
5704 READ_ONCE(cnf->accept_ra_rt_info_min_plen);
5705 array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] =
5706 READ_ONCE(cnf->accept_ra_rt_info_max_plen);
5707 #endif
5708 #endif
5709 array[DEVCONF_PROXY_NDP] = READ_ONCE(cnf->proxy_ndp);
5710 array[DEVCONF_ACCEPT_SOURCE_ROUTE] =
5711 READ_ONCE(cnf->accept_source_route);
5712 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
5713 array[DEVCONF_OPTIMISTIC_DAD] = READ_ONCE(cnf->optimistic_dad);
5714 array[DEVCONF_USE_OPTIMISTIC] = READ_ONCE(cnf->use_optimistic);
5715 #endif
5716 #ifdef CONFIG_IPV6_MROUTE
5717 array[DEVCONF_MC_FORWARDING] = atomic_read(&cnf->mc_forwarding);
5718 #endif
5719 array[DEVCONF_DISABLE_IPV6] = READ_ONCE(cnf->disable_ipv6);
5720 array[DEVCONF_ACCEPT_DAD] = READ_ONCE(cnf->accept_dad);
5721 array[DEVCONF_FORCE_TLLAO] = READ_ONCE(cnf->force_tllao);
5722 array[DEVCONF_NDISC_NOTIFY] = READ_ONCE(cnf->ndisc_notify);
5723 array[DEVCONF_SUPPRESS_FRAG_NDISC] =
5724 READ_ONCE(cnf->suppress_frag_ndisc);
5725 array[DEVCONF_ACCEPT_RA_FROM_LOCAL] =
5726 READ_ONCE(cnf->accept_ra_from_local);
5727 array[DEVCONF_ACCEPT_RA_MTU] = READ_ONCE(cnf->accept_ra_mtu);
5728 array[DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN] =
5729 READ_ONCE(cnf->ignore_routes_with_linkdown);
5730 /* we omit DEVCONF_STABLE_SECRET for now */
5731 array[DEVCONF_USE_OIF_ADDRS_ONLY] = READ_ONCE(cnf->use_oif_addrs_only);
5732 array[DEVCONF_DROP_UNICAST_IN_L2_MULTICAST] =
5733 READ_ONCE(cnf->drop_unicast_in_l2_multicast);
5734 array[DEVCONF_DROP_UNSOLICITED_NA] = READ_ONCE(cnf->drop_unsolicited_na);
5735 array[DEVCONF_KEEP_ADDR_ON_DOWN] = READ_ONCE(cnf->keep_addr_on_down);
5736 array[DEVCONF_SEG6_ENABLED] = READ_ONCE(cnf->seg6_enabled);
5737 #ifdef CONFIG_IPV6_SEG6_HMAC
5738 array[DEVCONF_SEG6_REQUIRE_HMAC] = READ_ONCE(cnf->seg6_require_hmac);
5739 #endif
5740 array[DEVCONF_ENHANCED_DAD] = READ_ONCE(cnf->enhanced_dad);
5741 array[DEVCONF_ADDR_GEN_MODE] = READ_ONCE(cnf->addr_gen_mode);
5742 array[DEVCONF_DISABLE_POLICY] = READ_ONCE(cnf->disable_policy);
5743 array[DEVCONF_NDISC_TCLASS] = READ_ONCE(cnf->ndisc_tclass);
5744 array[DEVCONF_RPL_SEG_ENABLED] = READ_ONCE(cnf->rpl_seg_enabled);
5745 array[DEVCONF_IOAM6_ENABLED] = READ_ONCE(cnf->ioam6_enabled);
5746 array[DEVCONF_IOAM6_ID] = READ_ONCE(cnf->ioam6_id);
5747 array[DEVCONF_IOAM6_ID_WIDE] = READ_ONCE(cnf->ioam6_id_wide);
5748 array[DEVCONF_NDISC_EVICT_NOCARRIER] =
5749 READ_ONCE(cnf->ndisc_evict_nocarrier);
5750 array[DEVCONF_ACCEPT_UNTRACKED_NA] =
5751 READ_ONCE(cnf->accept_untracked_na);
5752 array[DEVCONF_ACCEPT_RA_MIN_LFT] = READ_ONCE(cnf->accept_ra_min_lft);
5753 }
5754
inet6_ifla6_size(void)5755 static inline size_t inet6_ifla6_size(void)
5756 {
5757 return nla_total_size(4) /* IFLA_INET6_FLAGS */
5758 + nla_total_size(sizeof(struct ifla_cacheinfo))
5759 + nla_total_size(DEVCONF_MAX * 4) /* IFLA_INET6_CONF */
5760 + nla_total_size(IPSTATS_MIB_MAX * 8) /* IFLA_INET6_STATS */
5761 + nla_total_size(ICMP6_MIB_MAX * 8) /* IFLA_INET6_ICMP6STATS */
5762 + nla_total_size(sizeof(struct in6_addr)) /* IFLA_INET6_TOKEN */
5763 + nla_total_size(1) /* IFLA_INET6_ADDR_GEN_MODE */
5764 + nla_total_size(4) /* IFLA_INET6_RA_MTU */
5765 + 0;
5766 }
5767
inet6_if_nlmsg_size(void)5768 static inline size_t inet6_if_nlmsg_size(void)
5769 {
5770 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
5771 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
5772 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
5773 + nla_total_size(4) /* IFLA_MTU */
5774 + nla_total_size(4) /* IFLA_LINK */
5775 + nla_total_size(1) /* IFLA_OPERSTATE */
5776 + nla_total_size(inet6_ifla6_size()); /* IFLA_PROTINFO */
5777 }
5778
__snmp6_fill_statsdev(u64 * stats,atomic_long_t * mib,int bytes)5779 static inline void __snmp6_fill_statsdev(u64 *stats, atomic_long_t *mib,
5780 int bytes)
5781 {
5782 int i;
5783 int pad = bytes - sizeof(u64) * ICMP6_MIB_MAX;
5784 BUG_ON(pad < 0);
5785
5786 /* Use put_unaligned() because stats may not be aligned for u64. */
5787 put_unaligned(ICMP6_MIB_MAX, &stats[0]);
5788 for (i = 1; i < ICMP6_MIB_MAX; i++)
5789 put_unaligned(atomic_long_read(&mib[i]), &stats[i]);
5790
5791 memset(&stats[ICMP6_MIB_MAX], 0, pad);
5792 }
5793
__snmp6_fill_stats64(u64 * stats,void __percpu * mib,int bytes,size_t syncpoff)5794 static inline void __snmp6_fill_stats64(u64 *stats, void __percpu *mib,
5795 int bytes, size_t syncpoff)
5796 {
5797 int i, c;
5798 u64 buff[IPSTATS_MIB_MAX];
5799 int pad = bytes - sizeof(u64) * IPSTATS_MIB_MAX;
5800
5801 BUG_ON(pad < 0);
5802
5803 memset(buff, 0, sizeof(buff));
5804 buff[0] = IPSTATS_MIB_MAX;
5805
5806 for_each_possible_cpu(c) {
5807 for (i = 1; i < IPSTATS_MIB_MAX; i++)
5808 buff[i] += snmp_get_cpu_field64(mib, c, i, syncpoff);
5809 }
5810
5811 memcpy(stats, buff, IPSTATS_MIB_MAX * sizeof(u64));
5812 memset(&stats[IPSTATS_MIB_MAX], 0, pad);
5813 }
5814
snmp6_fill_stats(u64 * stats,struct inet6_dev * idev,int attrtype,int bytes)5815 static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
5816 int bytes)
5817 {
5818 switch (attrtype) {
5819 case IFLA_INET6_STATS:
5820 __snmp6_fill_stats64(stats, idev->stats.ipv6, bytes,
5821 offsetof(struct ipstats_mib, syncp));
5822 break;
5823 case IFLA_INET6_ICMP6STATS:
5824 __snmp6_fill_statsdev(stats, idev->stats.icmpv6dev->mibs, bytes);
5825 break;
5826 }
5827 }
5828
inet6_fill_ifla6_stats_attrs(struct sk_buff * skb,struct inet6_dev * idev)5829 static int inet6_fill_ifla6_stats_attrs(struct sk_buff *skb,
5830 struct inet6_dev *idev)
5831 {
5832 struct nlattr *nla;
5833
5834 nla = nla_reserve(skb, IFLA_INET6_STATS, IPSTATS_MIB_MAX * sizeof(u64));
5835 if (!nla)
5836 goto nla_put_failure;
5837 snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_STATS, nla_len(nla));
5838
5839 nla = nla_reserve(skb, IFLA_INET6_ICMP6STATS, ICMP6_MIB_MAX * sizeof(u64));
5840 if (!nla)
5841 goto nla_put_failure;
5842 snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_ICMP6STATS, nla_len(nla));
5843
5844 return 0;
5845
5846 nla_put_failure:
5847 return -EMSGSIZE;
5848 }
5849
inet6_fill_ifla6_attrs(struct sk_buff * skb,struct inet6_dev * idev,u32 ext_filter_mask)5850 static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev,
5851 u32 ext_filter_mask)
5852 {
5853 struct ifla_cacheinfo ci;
5854 struct nlattr *nla;
5855 u32 ra_mtu;
5856
5857 if (nla_put_u32(skb, IFLA_INET6_FLAGS, READ_ONCE(idev->if_flags)))
5858 goto nla_put_failure;
5859 ci.max_reasm_len = IPV6_MAXPLEN;
5860 ci.tstamp = cstamp_delta(READ_ONCE(idev->tstamp));
5861 ci.reachable_time = jiffies_to_msecs(idev->nd_parms->reachable_time);
5862 ci.retrans_time = jiffies_to_msecs(NEIGH_VAR(idev->nd_parms, RETRANS_TIME));
5863 if (nla_put(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci))
5864 goto nla_put_failure;
5865 nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32));
5866 if (!nla)
5867 goto nla_put_failure;
5868 ipv6_store_devconf(&idev->cnf, nla_data(nla), nla_len(nla));
5869
5870 /* XXX - MC not implemented */
5871
5872 if (!(ext_filter_mask & RTEXT_FILTER_SKIP_STATS)) {
5873 if (inet6_fill_ifla6_stats_attrs(skb, idev) < 0)
5874 goto nla_put_failure;
5875 }
5876
5877 nla = nla_reserve(skb, IFLA_INET6_TOKEN, sizeof(struct in6_addr));
5878 if (!nla)
5879 goto nla_put_failure;
5880 read_lock_bh(&idev->lock);
5881 memcpy(nla_data(nla), idev->token.s6_addr, nla_len(nla));
5882 read_unlock_bh(&idev->lock);
5883
5884 if (nla_put_u8(skb, IFLA_INET6_ADDR_GEN_MODE,
5885 READ_ONCE(idev->cnf.addr_gen_mode)))
5886 goto nla_put_failure;
5887
5888 ra_mtu = READ_ONCE(idev->ra_mtu);
5889 if (ra_mtu && nla_put_u32(skb, IFLA_INET6_RA_MTU, ra_mtu))
5890 goto nla_put_failure;
5891
5892 return 0;
5893
5894 nla_put_failure:
5895 return -EMSGSIZE;
5896 }
5897
inet6_get_link_af_size(const struct net_device * dev,u32 ext_filter_mask)5898 static size_t inet6_get_link_af_size(const struct net_device *dev,
5899 u32 ext_filter_mask)
5900 {
5901 if (!__in6_dev_get(dev))
5902 return 0;
5903
5904 return inet6_ifla6_size();
5905 }
5906
inet6_fill_link_af(struct sk_buff * skb,const struct net_device * dev,u32 ext_filter_mask)5907 static int inet6_fill_link_af(struct sk_buff *skb, const struct net_device *dev,
5908 u32 ext_filter_mask)
5909 {
5910 struct inet6_dev *idev = __in6_dev_get(dev);
5911
5912 if (!idev)
5913 return -ENODATA;
5914
5915 if (inet6_fill_ifla6_attrs(skb, idev, ext_filter_mask) < 0)
5916 return -EMSGSIZE;
5917
5918 return 0;
5919 }
5920
inet6_set_iftoken(struct inet6_dev * idev,struct in6_addr * token,struct netlink_ext_ack * extack)5921 static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token,
5922 struct netlink_ext_ack *extack)
5923 {
5924 struct inet6_ifaddr *ifp;
5925 struct net_device *dev = idev->dev;
5926 bool clear_token, update_rs = false;
5927 struct in6_addr ll_addr;
5928
5929 ASSERT_RTNL();
5930
5931 if (!token)
5932 return -EINVAL;
5933
5934 if (dev->flags & IFF_LOOPBACK) {
5935 NL_SET_ERR_MSG_MOD(extack, "Device is loopback");
5936 return -EINVAL;
5937 }
5938
5939 if (dev->flags & IFF_NOARP) {
5940 NL_SET_ERR_MSG_MOD(extack,
5941 "Device does not do neighbour discovery");
5942 return -EINVAL;
5943 }
5944
5945 if (!ipv6_accept_ra(idev)) {
5946 NL_SET_ERR_MSG_MOD(extack,
5947 "Router advertisement is disabled on device");
5948 return -EINVAL;
5949 }
5950
5951 if (READ_ONCE(idev->cnf.rtr_solicits) == 0) {
5952 NL_SET_ERR_MSG(extack,
5953 "Router solicitation is disabled on device");
5954 return -EINVAL;
5955 }
5956
5957 write_lock_bh(&idev->lock);
5958
5959 BUILD_BUG_ON(sizeof(token->s6_addr) != 16);
5960 memcpy(idev->token.s6_addr + 8, token->s6_addr + 8, 8);
5961
5962 write_unlock_bh(&idev->lock);
5963
5964 clear_token = ipv6_addr_any(token);
5965 if (clear_token)
5966 goto update_lft;
5967
5968 if (!idev->dead && (idev->if_flags & IF_READY) &&
5969 !ipv6_get_lladdr(dev, &ll_addr, IFA_F_TENTATIVE |
5970 IFA_F_OPTIMISTIC)) {
5971 /* If we're not ready, then normal ifup will take care
5972 * of this. Otherwise, we need to request our rs here.
5973 */
5974 ndisc_send_rs(dev, &ll_addr, &in6addr_linklocal_allrouters);
5975 update_rs = true;
5976 }
5977
5978 update_lft:
5979 write_lock_bh(&idev->lock);
5980
5981 if (update_rs) {
5982 idev->if_flags |= IF_RS_SENT;
5983 idev->rs_interval = rfc3315_s14_backoff_init(
5984 READ_ONCE(idev->cnf.rtr_solicit_interval));
5985 idev->rs_probes = 1;
5986 addrconf_mod_rs_timer(idev, idev->rs_interval);
5987 }
5988
5989 /* Well, that's kinda nasty ... */
5990 list_for_each_entry(ifp, &idev->addr_list, if_list) {
5991 spin_lock(&ifp->lock);
5992 if (ifp->tokenized) {
5993 ifp->valid_lft = 0;
5994 ifp->prefered_lft = 0;
5995 }
5996 spin_unlock(&ifp->lock);
5997 }
5998
5999 write_unlock_bh(&idev->lock);
6000 inet6_ifinfo_notify(RTM_NEWLINK, idev);
6001 addrconf_verify_rtnl(dev_net(dev));
6002 return 0;
6003 }
6004
6005 static const struct nla_policy inet6_af_policy[IFLA_INET6_MAX + 1] = {
6006 [IFLA_INET6_ADDR_GEN_MODE] = { .type = NLA_U8 },
6007 [IFLA_INET6_TOKEN] = { .len = sizeof(struct in6_addr) },
6008 [IFLA_INET6_RA_MTU] = { .type = NLA_REJECT,
6009 .reject_message =
6010 "IFLA_INET6_RA_MTU can not be set" },
6011 };
6012
check_addr_gen_mode(int mode)6013 static int check_addr_gen_mode(int mode)
6014 {
6015 if (mode != IN6_ADDR_GEN_MODE_EUI64 &&
6016 mode != IN6_ADDR_GEN_MODE_NONE &&
6017 mode != IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
6018 mode != IN6_ADDR_GEN_MODE_RANDOM)
6019 return -EINVAL;
6020 return 1;
6021 }
6022
check_stable_privacy(struct inet6_dev * idev,struct net * net,int mode)6023 static int check_stable_privacy(struct inet6_dev *idev, struct net *net,
6024 int mode)
6025 {
6026 if (mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
6027 !idev->cnf.stable_secret.initialized &&
6028 !net->ipv6.devconf_dflt->stable_secret.initialized)
6029 return -EINVAL;
6030 return 1;
6031 }
6032
inet6_validate_link_af(const struct net_device * dev,const struct nlattr * nla,struct netlink_ext_ack * extack)6033 static int inet6_validate_link_af(const struct net_device *dev,
6034 const struct nlattr *nla,
6035 struct netlink_ext_ack *extack)
6036 {
6037 struct nlattr *tb[IFLA_INET6_MAX + 1];
6038 struct inet6_dev *idev = NULL;
6039 int err;
6040
6041 if (dev) {
6042 idev = __in6_dev_get(dev);
6043 if (!idev)
6044 return -EAFNOSUPPORT;
6045 }
6046
6047 err = nla_parse_nested_deprecated(tb, IFLA_INET6_MAX, nla,
6048 inet6_af_policy, extack);
6049 if (err)
6050 return err;
6051
6052 if (!tb[IFLA_INET6_TOKEN] && !tb[IFLA_INET6_ADDR_GEN_MODE])
6053 return -EINVAL;
6054
6055 if (tb[IFLA_INET6_ADDR_GEN_MODE]) {
6056 u8 mode = nla_get_u8(tb[IFLA_INET6_ADDR_GEN_MODE]);
6057
6058 if (check_addr_gen_mode(mode) < 0)
6059 return -EINVAL;
6060 if (dev && check_stable_privacy(idev, dev_net(dev), mode) < 0)
6061 return -EINVAL;
6062 }
6063
6064 return 0;
6065 }
6066
inet6_set_link_af(struct net_device * dev,const struct nlattr * nla,struct netlink_ext_ack * extack)6067 static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla,
6068 struct netlink_ext_ack *extack)
6069 {
6070 struct inet6_dev *idev = __in6_dev_get(dev);
6071 struct nlattr *tb[IFLA_INET6_MAX + 1];
6072 int err;
6073
6074 if (!idev)
6075 return -EAFNOSUPPORT;
6076
6077 if (nla_parse_nested_deprecated(tb, IFLA_INET6_MAX, nla, NULL, NULL) < 0)
6078 return -EINVAL;
6079
6080 if (tb[IFLA_INET6_TOKEN]) {
6081 err = inet6_set_iftoken(idev, nla_data(tb[IFLA_INET6_TOKEN]),
6082 extack);
6083 if (err)
6084 return err;
6085 }
6086
6087 if (tb[IFLA_INET6_ADDR_GEN_MODE]) {
6088 u8 mode = nla_get_u8(tb[IFLA_INET6_ADDR_GEN_MODE]);
6089
6090 WRITE_ONCE(idev->cnf.addr_gen_mode, mode);
6091 }
6092
6093 return 0;
6094 }
6095
inet6_fill_ifinfo(struct sk_buff * skb,struct inet6_dev * idev,u32 portid,u32 seq,int event,unsigned int flags)6096 static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
6097 u32 portid, u32 seq, int event, unsigned int flags)
6098 {
6099 struct net_device *dev = idev->dev;
6100 struct ifinfomsg *hdr;
6101 struct nlmsghdr *nlh;
6102 int ifindex, iflink;
6103 void *protoinfo;
6104
6105 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*hdr), flags);
6106 if (!nlh)
6107 return -EMSGSIZE;
6108
6109 hdr = nlmsg_data(nlh);
6110 hdr->ifi_family = AF_INET6;
6111 hdr->__ifi_pad = 0;
6112 hdr->ifi_type = dev->type;
6113 ifindex = READ_ONCE(dev->ifindex);
6114 hdr->ifi_index = ifindex;
6115 hdr->ifi_flags = dev_get_flags(dev);
6116 hdr->ifi_change = 0;
6117
6118 iflink = dev_get_iflink(dev);
6119 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
6120 (dev->addr_len &&
6121 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
6122 nla_put_u32(skb, IFLA_MTU, READ_ONCE(dev->mtu)) ||
6123 (ifindex != iflink &&
6124 nla_put_u32(skb, IFLA_LINK, iflink)) ||
6125 nla_put_u8(skb, IFLA_OPERSTATE,
6126 netif_running(dev) ? READ_ONCE(dev->operstate) : IF_OPER_DOWN))
6127 goto nla_put_failure;
6128 protoinfo = nla_nest_start_noflag(skb, IFLA_PROTINFO);
6129 if (!protoinfo)
6130 goto nla_put_failure;
6131
6132 if (inet6_fill_ifla6_attrs(skb, idev, 0) < 0)
6133 goto nla_put_failure;
6134
6135 nla_nest_end(skb, protoinfo);
6136 nlmsg_end(skb, nlh);
6137 return 0;
6138
6139 nla_put_failure:
6140 nlmsg_cancel(skb, nlh);
6141 return -EMSGSIZE;
6142 }
6143
inet6_valid_dump_ifinfo(const struct nlmsghdr * nlh,struct netlink_ext_ack * extack)6144 static int inet6_valid_dump_ifinfo(const struct nlmsghdr *nlh,
6145 struct netlink_ext_ack *extack)
6146 {
6147 struct ifinfomsg *ifm;
6148
6149 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
6150 NL_SET_ERR_MSG_MOD(extack, "Invalid header for link dump request");
6151 return -EINVAL;
6152 }
6153
6154 if (nlmsg_attrlen(nlh, sizeof(*ifm))) {
6155 NL_SET_ERR_MSG_MOD(extack, "Invalid data after header");
6156 return -EINVAL;
6157 }
6158
6159 ifm = nlmsg_data(nlh);
6160 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
6161 ifm->ifi_change || ifm->ifi_index) {
6162 NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for dump request");
6163 return -EINVAL;
6164 }
6165
6166 return 0;
6167 }
6168
inet6_dump_ifinfo(struct sk_buff * skb,struct netlink_callback * cb)6169 static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
6170 {
6171 struct net *net = sock_net(skb->sk);
6172 struct {
6173 unsigned long ifindex;
6174 } *ctx = (void *)cb->ctx;
6175 struct net_device *dev;
6176 struct inet6_dev *idev;
6177 int err;
6178
6179 /* only requests using strict checking can pass data to
6180 * influence the dump
6181 */
6182 if (cb->strict_check) {
6183 err = inet6_valid_dump_ifinfo(cb->nlh, cb->extack);
6184
6185 if (err < 0)
6186 return err;
6187 }
6188
6189 err = 0;
6190 rcu_read_lock();
6191 for_each_netdev_dump(net, dev, ctx->ifindex) {
6192 idev = __in6_dev_get(dev);
6193 if (!idev)
6194 continue;
6195 err = inet6_fill_ifinfo(skb, idev,
6196 NETLINK_CB(cb->skb).portid,
6197 cb->nlh->nlmsg_seq,
6198 RTM_NEWLINK, NLM_F_MULTI);
6199 if (err < 0)
6200 break;
6201 }
6202 rcu_read_unlock();
6203
6204 return err;
6205 }
6206
inet6_ifinfo_notify(int event,struct inet6_dev * idev)6207 void inet6_ifinfo_notify(int event, struct inet6_dev *idev)
6208 {
6209 struct sk_buff *skb;
6210 struct net *net = dev_net(idev->dev);
6211 int err = -ENOBUFS;
6212
6213 skb = nlmsg_new(inet6_if_nlmsg_size(), GFP_ATOMIC);
6214 if (!skb)
6215 goto errout;
6216
6217 err = inet6_fill_ifinfo(skb, idev, 0, 0, event, 0);
6218 if (err < 0) {
6219 /* -EMSGSIZE implies BUG in inet6_if_nlmsg_size() */
6220 WARN_ON(err == -EMSGSIZE);
6221 kfree_skb(skb);
6222 goto errout;
6223 }
6224 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFINFO, NULL, GFP_ATOMIC);
6225 return;
6226 errout:
6227 rtnl_set_sk_err(net, RTNLGRP_IPV6_IFINFO, err);
6228 }
6229
inet6_prefix_nlmsg_size(void)6230 static inline size_t inet6_prefix_nlmsg_size(void)
6231 {
6232 return NLMSG_ALIGN(sizeof(struct prefixmsg))
6233 + nla_total_size(sizeof(struct in6_addr))
6234 + nla_total_size(sizeof(struct prefix_cacheinfo));
6235 }
6236
inet6_fill_prefix(struct sk_buff * skb,struct inet6_dev * idev,struct prefix_info * pinfo,u32 portid,u32 seq,int event,unsigned int flags)6237 static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev,
6238 struct prefix_info *pinfo, u32 portid, u32 seq,
6239 int event, unsigned int flags)
6240 {
6241 struct prefixmsg *pmsg;
6242 struct nlmsghdr *nlh;
6243 struct prefix_cacheinfo ci;
6244
6245 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*pmsg), flags);
6246 if (!nlh)
6247 return -EMSGSIZE;
6248
6249 pmsg = nlmsg_data(nlh);
6250 pmsg->prefix_family = AF_INET6;
6251 pmsg->prefix_pad1 = 0;
6252 pmsg->prefix_pad2 = 0;
6253 pmsg->prefix_ifindex = idev->dev->ifindex;
6254 pmsg->prefix_len = pinfo->prefix_len;
6255 pmsg->prefix_type = pinfo->type;
6256 pmsg->prefix_pad3 = 0;
6257 pmsg->prefix_flags = pinfo->flags;
6258
6259 if (nla_put(skb, PREFIX_ADDRESS, sizeof(pinfo->prefix), &pinfo->prefix))
6260 goto nla_put_failure;
6261 ci.preferred_time = ntohl(pinfo->prefered);
6262 ci.valid_time = ntohl(pinfo->valid);
6263 if (nla_put(skb, PREFIX_CACHEINFO, sizeof(ci), &ci))
6264 goto nla_put_failure;
6265 nlmsg_end(skb, nlh);
6266 return 0;
6267
6268 nla_put_failure:
6269 nlmsg_cancel(skb, nlh);
6270 return -EMSGSIZE;
6271 }
6272
inet6_prefix_notify(int event,struct inet6_dev * idev,struct prefix_info * pinfo)6273 static void inet6_prefix_notify(int event, struct inet6_dev *idev,
6274 struct prefix_info *pinfo)
6275 {
6276 struct sk_buff *skb;
6277 struct net *net = dev_net(idev->dev);
6278 int err = -ENOBUFS;
6279
6280 skb = nlmsg_new(inet6_prefix_nlmsg_size(), GFP_ATOMIC);
6281 if (!skb)
6282 goto errout;
6283
6284 err = inet6_fill_prefix(skb, idev, pinfo, 0, 0, event, 0);
6285 if (err < 0) {
6286 /* -EMSGSIZE implies BUG in inet6_prefix_nlmsg_size() */
6287 WARN_ON(err == -EMSGSIZE);
6288 kfree_skb(skb);
6289 goto errout;
6290 }
6291 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_PREFIX, NULL, GFP_ATOMIC);
6292 return;
6293 errout:
6294 rtnl_set_sk_err(net, RTNLGRP_IPV6_PREFIX, err);
6295 }
6296
__ipv6_ifa_notify(int event,struct inet6_ifaddr * ifp)6297 static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
6298 {
6299 struct net *net = dev_net(ifp->idev->dev);
6300
6301 if (event)
6302 ASSERT_RTNL();
6303
6304 inet6_ifa_notify(event ? : RTM_NEWADDR, ifp);
6305
6306 switch (event) {
6307 case RTM_NEWADDR:
6308 /*
6309 * If the address was optimistic we inserted the route at the
6310 * start of our DAD process, so we don't need to do it again.
6311 * If the device was taken down in the middle of the DAD
6312 * cycle there is a race where we could get here without a
6313 * host route, so nothing to insert. That will be fixed when
6314 * the device is brought up.
6315 */
6316 if (ifp->rt && !rcu_access_pointer(ifp->rt->fib6_node)) {
6317 ip6_ins_rt(net, ifp->rt);
6318 } else if (!ifp->rt && (ifp->idev->dev->flags & IFF_UP)) {
6319 pr_warn("BUG: Address %pI6c on device %s is missing its host route.\n",
6320 &ifp->addr, ifp->idev->dev->name);
6321 }
6322
6323 if (ifp->idev->cnf.forwarding)
6324 addrconf_join_anycast(ifp);
6325 if (!ipv6_addr_any(&ifp->peer_addr))
6326 addrconf_prefix_route(&ifp->peer_addr, 128,
6327 ifp->rt_priority, ifp->idev->dev,
6328 0, 0, GFP_ATOMIC);
6329 break;
6330 case RTM_DELADDR:
6331 if (ifp->idev->cnf.forwarding)
6332 addrconf_leave_anycast(ifp);
6333 addrconf_leave_solict(ifp->idev, &ifp->addr);
6334 if (!ipv6_addr_any(&ifp->peer_addr)) {
6335 struct fib6_info *rt;
6336
6337 rt = addrconf_get_prefix_route(&ifp->peer_addr, 128,
6338 ifp->idev->dev, 0, 0,
6339 false);
6340 if (rt)
6341 ip6_del_rt(net, rt, false);
6342 }
6343 if (ifp->rt) {
6344 ip6_del_rt(net, ifp->rt, false);
6345 ifp->rt = NULL;
6346 }
6347 rt_genid_bump_ipv6(net);
6348 break;
6349 }
6350 atomic_inc(&net->ipv6.dev_addr_genid);
6351 }
6352
ipv6_ifa_notify(int event,struct inet6_ifaddr * ifp)6353 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
6354 {
6355 if (likely(ifp->idev->dead == 0))
6356 __ipv6_ifa_notify(event, ifp);
6357 }
6358
6359 #ifdef CONFIG_SYSCTL
6360
addrconf_sysctl_forward(const struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)6361 static int addrconf_sysctl_forward(const struct ctl_table *ctl, int write,
6362 void *buffer, size_t *lenp, loff_t *ppos)
6363 {
6364 int *valp = ctl->data;
6365 int val = *valp;
6366 loff_t pos = *ppos;
6367 struct ctl_table lctl;
6368 int ret;
6369
6370 /*
6371 * ctl->data points to idev->cnf.forwarding, we should
6372 * not modify it until we get the rtnl lock.
6373 */
6374 lctl = *ctl;
6375 lctl.data = &val;
6376
6377 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
6378
6379 if (write)
6380 ret = addrconf_fixup_forwarding(ctl, valp, val);
6381 if (ret)
6382 *ppos = pos;
6383 return ret;
6384 }
6385
addrconf_sysctl_mtu(const struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)6386 static int addrconf_sysctl_mtu(const struct ctl_table *ctl, int write,
6387 void *buffer, size_t *lenp, loff_t *ppos)
6388 {
6389 struct inet6_dev *idev = ctl->extra1;
6390 int min_mtu = IPV6_MIN_MTU;
6391 struct ctl_table lctl;
6392
6393 lctl = *ctl;
6394 lctl.extra1 = &min_mtu;
6395 lctl.extra2 = idev ? &idev->dev->mtu : NULL;
6396
6397 return proc_dointvec_minmax(&lctl, write, buffer, lenp, ppos);
6398 }
6399
dev_disable_change(struct inet6_dev * idev)6400 static void dev_disable_change(struct inet6_dev *idev)
6401 {
6402 struct netdev_notifier_info info;
6403
6404 if (!idev || !idev->dev)
6405 return;
6406
6407 netdev_notifier_info_init(&info, idev->dev);
6408 if (idev->cnf.disable_ipv6)
6409 addrconf_notify(NULL, NETDEV_DOWN, &info);
6410 else
6411 addrconf_notify(NULL, NETDEV_UP, &info);
6412 }
6413
addrconf_disable_change(struct net * net,__s32 newf)6414 static void addrconf_disable_change(struct net *net, __s32 newf)
6415 {
6416 struct net_device *dev;
6417 struct inet6_dev *idev;
6418
6419 for_each_netdev(net, dev) {
6420 idev = __in6_dev_get(dev);
6421 if (idev) {
6422 int changed = (!idev->cnf.disable_ipv6) ^ (!newf);
6423
6424 WRITE_ONCE(idev->cnf.disable_ipv6, newf);
6425 if (changed)
6426 dev_disable_change(idev);
6427 }
6428 }
6429 }
6430
addrconf_disable_ipv6(const struct ctl_table * table,int * p,int newf)6431 static int addrconf_disable_ipv6(const struct ctl_table *table, int *p, int newf)
6432 {
6433 struct net *net = (struct net *)table->extra2;
6434 int old;
6435
6436 if (p == &net->ipv6.devconf_dflt->disable_ipv6) {
6437 WRITE_ONCE(*p, newf);
6438 return 0;
6439 }
6440
6441 if (!rtnl_trylock())
6442 return restart_syscall();
6443
6444 old = *p;
6445 WRITE_ONCE(*p, newf);
6446
6447 if (p == &net->ipv6.devconf_all->disable_ipv6) {
6448 WRITE_ONCE(net->ipv6.devconf_dflt->disable_ipv6, newf);
6449 addrconf_disable_change(net, newf);
6450 } else if ((!newf) ^ (!old))
6451 dev_disable_change((struct inet6_dev *)table->extra1);
6452
6453 rtnl_unlock();
6454 return 0;
6455 }
6456
addrconf_sysctl_disable(const struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)6457 static int addrconf_sysctl_disable(const struct ctl_table *ctl, int write,
6458 void *buffer, size_t *lenp, loff_t *ppos)
6459 {
6460 int *valp = ctl->data;
6461 int val = *valp;
6462 loff_t pos = *ppos;
6463 struct ctl_table lctl;
6464 int ret;
6465
6466 /*
6467 * ctl->data points to idev->cnf.disable_ipv6, we should
6468 * not modify it until we get the rtnl lock.
6469 */
6470 lctl = *ctl;
6471 lctl.data = &val;
6472
6473 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
6474
6475 if (write)
6476 ret = addrconf_disable_ipv6(ctl, valp, val);
6477 if (ret)
6478 *ppos = pos;
6479 return ret;
6480 }
6481
addrconf_sysctl_proxy_ndp(const struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)6482 static int addrconf_sysctl_proxy_ndp(const struct ctl_table *ctl, int write,
6483 void *buffer, size_t *lenp, loff_t *ppos)
6484 {
6485 int *valp = ctl->data;
6486 int ret;
6487 int old, new;
6488
6489 old = *valp;
6490 ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
6491 new = *valp;
6492
6493 if (write && old != new) {
6494 struct net *net = ctl->extra2;
6495
6496 if (!rtnl_trylock())
6497 return restart_syscall();
6498
6499 if (valp == &net->ipv6.devconf_dflt->proxy_ndp)
6500 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
6501 NETCONFA_PROXY_NEIGH,
6502 NETCONFA_IFINDEX_DEFAULT,
6503 net->ipv6.devconf_dflt);
6504 else if (valp == &net->ipv6.devconf_all->proxy_ndp)
6505 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
6506 NETCONFA_PROXY_NEIGH,
6507 NETCONFA_IFINDEX_ALL,
6508 net->ipv6.devconf_all);
6509 else {
6510 struct inet6_dev *idev = ctl->extra1;
6511
6512 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
6513 NETCONFA_PROXY_NEIGH,
6514 idev->dev->ifindex,
6515 &idev->cnf);
6516 }
6517 rtnl_unlock();
6518 }
6519
6520 return ret;
6521 }
6522
addrconf_sysctl_addr_gen_mode(const struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)6523 static int addrconf_sysctl_addr_gen_mode(const struct ctl_table *ctl, int write,
6524 void *buffer, size_t *lenp,
6525 loff_t *ppos)
6526 {
6527 int ret = 0;
6528 u32 new_val;
6529 struct inet6_dev *idev = (struct inet6_dev *)ctl->extra1;
6530 struct net *net = (struct net *)ctl->extra2;
6531 struct ctl_table tmp = {
6532 .data = &new_val,
6533 .maxlen = sizeof(new_val),
6534 .mode = ctl->mode,
6535 };
6536
6537 if (!rtnl_trylock())
6538 return restart_syscall();
6539
6540 new_val = *((u32 *)ctl->data);
6541
6542 ret = proc_douintvec(&tmp, write, buffer, lenp, ppos);
6543 if (ret != 0)
6544 goto out;
6545
6546 if (write) {
6547 if (check_addr_gen_mode(new_val) < 0) {
6548 ret = -EINVAL;
6549 goto out;
6550 }
6551
6552 if (idev) {
6553 if (check_stable_privacy(idev, net, new_val) < 0) {
6554 ret = -EINVAL;
6555 goto out;
6556 }
6557
6558 if (idev->cnf.addr_gen_mode != new_val) {
6559 WRITE_ONCE(idev->cnf.addr_gen_mode, new_val);
6560 addrconf_init_auto_addrs(idev->dev);
6561 }
6562 } else if (&net->ipv6.devconf_all->addr_gen_mode == ctl->data) {
6563 struct net_device *dev;
6564
6565 WRITE_ONCE(net->ipv6.devconf_dflt->addr_gen_mode, new_val);
6566 for_each_netdev(net, dev) {
6567 idev = __in6_dev_get(dev);
6568 if (idev &&
6569 idev->cnf.addr_gen_mode != new_val) {
6570 WRITE_ONCE(idev->cnf.addr_gen_mode,
6571 new_val);
6572 addrconf_init_auto_addrs(idev->dev);
6573 }
6574 }
6575 }
6576
6577 WRITE_ONCE(*((u32 *)ctl->data), new_val);
6578 }
6579
6580 out:
6581 rtnl_unlock();
6582
6583 return ret;
6584 }
6585
addrconf_sysctl_stable_secret(const struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)6586 static int addrconf_sysctl_stable_secret(const struct ctl_table *ctl, int write,
6587 void *buffer, size_t *lenp,
6588 loff_t *ppos)
6589 {
6590 int err;
6591 struct in6_addr addr;
6592 char str[IPV6_MAX_STRLEN];
6593 struct ctl_table lctl = *ctl;
6594 struct net *net = ctl->extra2;
6595 struct ipv6_stable_secret *secret = ctl->data;
6596
6597 if (&net->ipv6.devconf_all->stable_secret == ctl->data)
6598 return -EIO;
6599
6600 lctl.maxlen = IPV6_MAX_STRLEN;
6601 lctl.data = str;
6602
6603 if (!rtnl_trylock())
6604 return restart_syscall();
6605
6606 if (!write && !secret->initialized) {
6607 err = -EIO;
6608 goto out;
6609 }
6610
6611 err = snprintf(str, sizeof(str), "%pI6", &secret->secret);
6612 if (err >= sizeof(str)) {
6613 err = -EIO;
6614 goto out;
6615 }
6616
6617 err = proc_dostring(&lctl, write, buffer, lenp, ppos);
6618 if (err || !write)
6619 goto out;
6620
6621 if (in6_pton(str, -1, addr.in6_u.u6_addr8, -1, NULL) != 1) {
6622 err = -EIO;
6623 goto out;
6624 }
6625
6626 secret->initialized = true;
6627 secret->secret = addr;
6628
6629 if (&net->ipv6.devconf_dflt->stable_secret == ctl->data) {
6630 struct net_device *dev;
6631
6632 for_each_netdev(net, dev) {
6633 struct inet6_dev *idev = __in6_dev_get(dev);
6634
6635 if (idev) {
6636 WRITE_ONCE(idev->cnf.addr_gen_mode,
6637 IN6_ADDR_GEN_MODE_STABLE_PRIVACY);
6638 }
6639 }
6640 } else {
6641 struct inet6_dev *idev = ctl->extra1;
6642
6643 WRITE_ONCE(idev->cnf.addr_gen_mode,
6644 IN6_ADDR_GEN_MODE_STABLE_PRIVACY);
6645 }
6646
6647 out:
6648 rtnl_unlock();
6649
6650 return err;
6651 }
6652
6653 static
addrconf_sysctl_ignore_routes_with_linkdown(const struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)6654 int addrconf_sysctl_ignore_routes_with_linkdown(const struct ctl_table *ctl,
6655 int write, void *buffer,
6656 size_t *lenp,
6657 loff_t *ppos)
6658 {
6659 int *valp = ctl->data;
6660 int val = *valp;
6661 loff_t pos = *ppos;
6662 struct ctl_table lctl;
6663 int ret;
6664
6665 /* ctl->data points to idev->cnf.ignore_routes_when_linkdown
6666 * we should not modify it until we get the rtnl lock.
6667 */
6668 lctl = *ctl;
6669 lctl.data = &val;
6670
6671 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
6672
6673 if (write)
6674 ret = addrconf_fixup_linkdown(ctl, valp, val);
6675 if (ret)
6676 *ppos = pos;
6677 return ret;
6678 }
6679
6680 static
addrconf_set_nopolicy(struct rt6_info * rt,int action)6681 void addrconf_set_nopolicy(struct rt6_info *rt, int action)
6682 {
6683 if (rt) {
6684 if (action)
6685 rt->dst.flags |= DST_NOPOLICY;
6686 else
6687 rt->dst.flags &= ~DST_NOPOLICY;
6688 }
6689 }
6690
6691 static
addrconf_disable_policy_idev(struct inet6_dev * idev,int val)6692 void addrconf_disable_policy_idev(struct inet6_dev *idev, int val)
6693 {
6694 struct inet6_ifaddr *ifa;
6695
6696 read_lock_bh(&idev->lock);
6697 list_for_each_entry(ifa, &idev->addr_list, if_list) {
6698 spin_lock(&ifa->lock);
6699 if (ifa->rt) {
6700 /* host routes only use builtin fib6_nh */
6701 struct fib6_nh *nh = ifa->rt->fib6_nh;
6702 int cpu;
6703
6704 rcu_read_lock();
6705 ifa->rt->dst_nopolicy = val ? true : false;
6706 if (nh->rt6i_pcpu) {
6707 for_each_possible_cpu(cpu) {
6708 struct rt6_info **rtp;
6709
6710 rtp = per_cpu_ptr(nh->rt6i_pcpu, cpu);
6711 addrconf_set_nopolicy(*rtp, val);
6712 }
6713 }
6714 rcu_read_unlock();
6715 }
6716 spin_unlock(&ifa->lock);
6717 }
6718 read_unlock_bh(&idev->lock);
6719 }
6720
6721 static
addrconf_disable_policy(const struct ctl_table * ctl,int * valp,int val)6722 int addrconf_disable_policy(const struct ctl_table *ctl, int *valp, int val)
6723 {
6724 struct net *net = (struct net *)ctl->extra2;
6725 struct inet6_dev *idev;
6726
6727 if (valp == &net->ipv6.devconf_dflt->disable_policy) {
6728 WRITE_ONCE(*valp, val);
6729 return 0;
6730 }
6731
6732 if (!rtnl_trylock())
6733 return restart_syscall();
6734
6735 WRITE_ONCE(*valp, val);
6736
6737 if (valp == &net->ipv6.devconf_all->disable_policy) {
6738 struct net_device *dev;
6739
6740 for_each_netdev(net, dev) {
6741 idev = __in6_dev_get(dev);
6742 if (idev)
6743 addrconf_disable_policy_idev(idev, val);
6744 }
6745 } else {
6746 idev = (struct inet6_dev *)ctl->extra1;
6747 addrconf_disable_policy_idev(idev, val);
6748 }
6749
6750 rtnl_unlock();
6751 return 0;
6752 }
6753
addrconf_sysctl_disable_policy(const struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)6754 static int addrconf_sysctl_disable_policy(const struct ctl_table *ctl, int write,
6755 void *buffer, size_t *lenp, loff_t *ppos)
6756 {
6757 int *valp = ctl->data;
6758 int val = *valp;
6759 loff_t pos = *ppos;
6760 struct ctl_table lctl;
6761 int ret;
6762
6763 lctl = *ctl;
6764 lctl.data = &val;
6765 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
6766
6767 if (write && (*valp != val))
6768 ret = addrconf_disable_policy(ctl, valp, val);
6769
6770 if (ret)
6771 *ppos = pos;
6772
6773 return ret;
6774 }
6775
6776 static int minus_one = -1;
6777 static const int two_five_five = 255;
6778 static u32 ioam6_if_id_max = U16_MAX;
6779
6780 static const struct ctl_table addrconf_sysctl[] = {
6781 {
6782 .procname = "forwarding",
6783 .data = &ipv6_devconf.forwarding,
6784 .maxlen = sizeof(int),
6785 .mode = 0644,
6786 .proc_handler = addrconf_sysctl_forward,
6787 },
6788 {
6789 .procname = "hop_limit",
6790 .data = &ipv6_devconf.hop_limit,
6791 .maxlen = sizeof(int),
6792 .mode = 0644,
6793 .proc_handler = proc_dointvec_minmax,
6794 .extra1 = (void *)SYSCTL_ONE,
6795 .extra2 = (void *)&two_five_five,
6796 },
6797 {
6798 .procname = "mtu",
6799 .data = &ipv6_devconf.mtu6,
6800 .maxlen = sizeof(int),
6801 .mode = 0644,
6802 .proc_handler = addrconf_sysctl_mtu,
6803 },
6804 {
6805 .procname = "accept_ra",
6806 .data = &ipv6_devconf.accept_ra,
6807 .maxlen = sizeof(int),
6808 .mode = 0644,
6809 .proc_handler = proc_dointvec,
6810 },
6811 {
6812 .procname = "accept_redirects",
6813 .data = &ipv6_devconf.accept_redirects,
6814 .maxlen = sizeof(int),
6815 .mode = 0644,
6816 .proc_handler = proc_dointvec,
6817 },
6818 {
6819 .procname = "autoconf",
6820 .data = &ipv6_devconf.autoconf,
6821 .maxlen = sizeof(int),
6822 .mode = 0644,
6823 .proc_handler = proc_dointvec,
6824 },
6825 {
6826 .procname = "dad_transmits",
6827 .data = &ipv6_devconf.dad_transmits,
6828 .maxlen = sizeof(int),
6829 .mode = 0644,
6830 .proc_handler = proc_dointvec,
6831 },
6832 {
6833 .procname = "router_solicitations",
6834 .data = &ipv6_devconf.rtr_solicits,
6835 .maxlen = sizeof(int),
6836 .mode = 0644,
6837 .proc_handler = proc_dointvec_minmax,
6838 .extra1 = &minus_one,
6839 },
6840 {
6841 .procname = "router_solicitation_interval",
6842 .data = &ipv6_devconf.rtr_solicit_interval,
6843 .maxlen = sizeof(int),
6844 .mode = 0644,
6845 .proc_handler = proc_dointvec_jiffies,
6846 },
6847 {
6848 .procname = "router_solicitation_max_interval",
6849 .data = &ipv6_devconf.rtr_solicit_max_interval,
6850 .maxlen = sizeof(int),
6851 .mode = 0644,
6852 .proc_handler = proc_dointvec_jiffies,
6853 },
6854 {
6855 .procname = "router_solicitation_delay",
6856 .data = &ipv6_devconf.rtr_solicit_delay,
6857 .maxlen = sizeof(int),
6858 .mode = 0644,
6859 .proc_handler = proc_dointvec_jiffies,
6860 },
6861 {
6862 .procname = "force_mld_version",
6863 .data = &ipv6_devconf.force_mld_version,
6864 .maxlen = sizeof(int),
6865 .mode = 0644,
6866 .proc_handler = proc_dointvec,
6867 },
6868 {
6869 .procname = "mldv1_unsolicited_report_interval",
6870 .data =
6871 &ipv6_devconf.mldv1_unsolicited_report_interval,
6872 .maxlen = sizeof(int),
6873 .mode = 0644,
6874 .proc_handler = proc_dointvec_ms_jiffies,
6875 },
6876 {
6877 .procname = "mldv2_unsolicited_report_interval",
6878 .data =
6879 &ipv6_devconf.mldv2_unsolicited_report_interval,
6880 .maxlen = sizeof(int),
6881 .mode = 0644,
6882 .proc_handler = proc_dointvec_ms_jiffies,
6883 },
6884 {
6885 .procname = "use_tempaddr",
6886 .data = &ipv6_devconf.use_tempaddr,
6887 .maxlen = sizeof(int),
6888 .mode = 0644,
6889 .proc_handler = proc_dointvec,
6890 },
6891 {
6892 .procname = "temp_valid_lft",
6893 .data = &ipv6_devconf.temp_valid_lft,
6894 .maxlen = sizeof(int),
6895 .mode = 0644,
6896 .proc_handler = proc_dointvec,
6897 },
6898 {
6899 .procname = "temp_prefered_lft",
6900 .data = &ipv6_devconf.temp_prefered_lft,
6901 .maxlen = sizeof(int),
6902 .mode = 0644,
6903 .proc_handler = proc_dointvec,
6904 },
6905 {
6906 .procname = "regen_min_advance",
6907 .data = &ipv6_devconf.regen_min_advance,
6908 .maxlen = sizeof(int),
6909 .mode = 0644,
6910 .proc_handler = proc_dointvec,
6911 },
6912 {
6913 .procname = "regen_max_retry",
6914 .data = &ipv6_devconf.regen_max_retry,
6915 .maxlen = sizeof(int),
6916 .mode = 0644,
6917 .proc_handler = proc_dointvec,
6918 },
6919 {
6920 .procname = "max_desync_factor",
6921 .data = &ipv6_devconf.max_desync_factor,
6922 .maxlen = sizeof(int),
6923 .mode = 0644,
6924 .proc_handler = proc_dointvec,
6925 },
6926 {
6927 .procname = "max_addresses",
6928 .data = &ipv6_devconf.max_addresses,
6929 .maxlen = sizeof(int),
6930 .mode = 0644,
6931 .proc_handler = proc_dointvec,
6932 },
6933 {
6934 .procname = "accept_ra_defrtr",
6935 .data = &ipv6_devconf.accept_ra_defrtr,
6936 .maxlen = sizeof(int),
6937 .mode = 0644,
6938 .proc_handler = proc_dointvec,
6939 },
6940 {
6941 .procname = "ra_defrtr_metric",
6942 .data = &ipv6_devconf.ra_defrtr_metric,
6943 .maxlen = sizeof(u32),
6944 .mode = 0644,
6945 .proc_handler = proc_douintvec_minmax,
6946 .extra1 = (void *)SYSCTL_ONE,
6947 },
6948 {
6949 .procname = "accept_ra_min_hop_limit",
6950 .data = &ipv6_devconf.accept_ra_min_hop_limit,
6951 .maxlen = sizeof(int),
6952 .mode = 0644,
6953 .proc_handler = proc_dointvec,
6954 },
6955 {
6956 .procname = "accept_ra_min_lft",
6957 .data = &ipv6_devconf.accept_ra_min_lft,
6958 .maxlen = sizeof(int),
6959 .mode = 0644,
6960 .proc_handler = proc_dointvec,
6961 },
6962 {
6963 .procname = "accept_ra_pinfo",
6964 .data = &ipv6_devconf.accept_ra_pinfo,
6965 .maxlen = sizeof(int),
6966 .mode = 0644,
6967 .proc_handler = proc_dointvec,
6968 },
6969 {
6970 .procname = "ra_honor_pio_life",
6971 .data = &ipv6_devconf.ra_honor_pio_life,
6972 .maxlen = sizeof(u8),
6973 .mode = 0644,
6974 .proc_handler = proc_dou8vec_minmax,
6975 .extra1 = SYSCTL_ZERO,
6976 .extra2 = SYSCTL_ONE,
6977 },
6978 {
6979 .procname = "ra_honor_pio_pflag",
6980 .data = &ipv6_devconf.ra_honor_pio_pflag,
6981 .maxlen = sizeof(u8),
6982 .mode = 0644,
6983 .proc_handler = proc_dou8vec_minmax,
6984 .extra1 = SYSCTL_ZERO,
6985 .extra2 = SYSCTL_ONE,
6986 },
6987 #ifdef CONFIG_IPV6_ROUTER_PREF
6988 {
6989 .procname = "accept_ra_rtr_pref",
6990 .data = &ipv6_devconf.accept_ra_rtr_pref,
6991 .maxlen = sizeof(int),
6992 .mode = 0644,
6993 .proc_handler = proc_dointvec,
6994 },
6995 {
6996 .procname = "router_probe_interval",
6997 .data = &ipv6_devconf.rtr_probe_interval,
6998 .maxlen = sizeof(int),
6999 .mode = 0644,
7000 .proc_handler = proc_dointvec_jiffies,
7001 },
7002 #ifdef CONFIG_IPV6_ROUTE_INFO
7003 {
7004 .procname = "accept_ra_rt_info_min_plen",
7005 .data = &ipv6_devconf.accept_ra_rt_info_min_plen,
7006 .maxlen = sizeof(int),
7007 .mode = 0644,
7008 .proc_handler = proc_dointvec,
7009 },
7010 {
7011 .procname = "accept_ra_rt_info_max_plen",
7012 .data = &ipv6_devconf.accept_ra_rt_info_max_plen,
7013 .maxlen = sizeof(int),
7014 .mode = 0644,
7015 .proc_handler = proc_dointvec,
7016 },
7017 #endif
7018 #endif
7019 {
7020 .procname = "accept_ra_rt_table",
7021 .data = &ipv6_devconf.accept_ra_rt_table,
7022 .maxlen = sizeof(int),
7023 .mode = 0644,
7024 .proc_handler = proc_dointvec,
7025 },
7026 {
7027 .procname = "proxy_ndp",
7028 .data = &ipv6_devconf.proxy_ndp,
7029 .maxlen = sizeof(int),
7030 .mode = 0644,
7031 .proc_handler = addrconf_sysctl_proxy_ndp,
7032 },
7033 {
7034 .procname = "accept_source_route",
7035 .data = &ipv6_devconf.accept_source_route,
7036 .maxlen = sizeof(int),
7037 .mode = 0644,
7038 .proc_handler = proc_dointvec,
7039 },
7040 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
7041 {
7042 .procname = "optimistic_dad",
7043 .data = &ipv6_devconf.optimistic_dad,
7044 .maxlen = sizeof(int),
7045 .mode = 0644,
7046 .proc_handler = proc_dointvec,
7047 },
7048 {
7049 .procname = "use_optimistic",
7050 .data = &ipv6_devconf.use_optimistic,
7051 .maxlen = sizeof(int),
7052 .mode = 0644,
7053 .proc_handler = proc_dointvec,
7054 },
7055 #endif
7056 #ifdef CONFIG_IPV6_MROUTE
7057 {
7058 .procname = "mc_forwarding",
7059 .data = &ipv6_devconf.mc_forwarding,
7060 .maxlen = sizeof(int),
7061 .mode = 0444,
7062 .proc_handler = proc_dointvec,
7063 },
7064 #endif
7065 {
7066 .procname = "disable_ipv6",
7067 .data = &ipv6_devconf.disable_ipv6,
7068 .maxlen = sizeof(int),
7069 .mode = 0644,
7070 .proc_handler = addrconf_sysctl_disable,
7071 },
7072 {
7073 .procname = "accept_dad",
7074 .data = &ipv6_devconf.accept_dad,
7075 .maxlen = sizeof(int),
7076 .mode = 0644,
7077 .proc_handler = proc_dointvec,
7078 },
7079 {
7080 .procname = "force_tllao",
7081 .data = &ipv6_devconf.force_tllao,
7082 .maxlen = sizeof(int),
7083 .mode = 0644,
7084 .proc_handler = proc_dointvec
7085 },
7086 {
7087 .procname = "ndisc_notify",
7088 .data = &ipv6_devconf.ndisc_notify,
7089 .maxlen = sizeof(int),
7090 .mode = 0644,
7091 .proc_handler = proc_dointvec
7092 },
7093 {
7094 .procname = "suppress_frag_ndisc",
7095 .data = &ipv6_devconf.suppress_frag_ndisc,
7096 .maxlen = sizeof(int),
7097 .mode = 0644,
7098 .proc_handler = proc_dointvec
7099 },
7100 {
7101 .procname = "accept_ra_from_local",
7102 .data = &ipv6_devconf.accept_ra_from_local,
7103 .maxlen = sizeof(int),
7104 .mode = 0644,
7105 .proc_handler = proc_dointvec,
7106 },
7107 {
7108 .procname = "accept_ra_mtu",
7109 .data = &ipv6_devconf.accept_ra_mtu,
7110 .maxlen = sizeof(int),
7111 .mode = 0644,
7112 .proc_handler = proc_dointvec,
7113 },
7114 {
7115 .procname = "stable_secret",
7116 .data = &ipv6_devconf.stable_secret,
7117 .maxlen = IPV6_MAX_STRLEN,
7118 .mode = 0600,
7119 .proc_handler = addrconf_sysctl_stable_secret,
7120 },
7121 {
7122 .procname = "use_oif_addrs_only",
7123 .data = &ipv6_devconf.use_oif_addrs_only,
7124 .maxlen = sizeof(int),
7125 .mode = 0644,
7126 .proc_handler = proc_dointvec,
7127 },
7128 {
7129 .procname = "ignore_routes_with_linkdown",
7130 .data = &ipv6_devconf.ignore_routes_with_linkdown,
7131 .maxlen = sizeof(int),
7132 .mode = 0644,
7133 .proc_handler = addrconf_sysctl_ignore_routes_with_linkdown,
7134 },
7135 {
7136 .procname = "drop_unicast_in_l2_multicast",
7137 .data = &ipv6_devconf.drop_unicast_in_l2_multicast,
7138 .maxlen = sizeof(int),
7139 .mode = 0644,
7140 .proc_handler = proc_dointvec,
7141 },
7142 {
7143 .procname = "drop_unsolicited_na",
7144 .data = &ipv6_devconf.drop_unsolicited_na,
7145 .maxlen = sizeof(int),
7146 .mode = 0644,
7147 .proc_handler = proc_dointvec,
7148 },
7149 {
7150 .procname = "keep_addr_on_down",
7151 .data = &ipv6_devconf.keep_addr_on_down,
7152 .maxlen = sizeof(int),
7153 .mode = 0644,
7154 .proc_handler = proc_dointvec,
7155
7156 },
7157 {
7158 .procname = "seg6_enabled",
7159 .data = &ipv6_devconf.seg6_enabled,
7160 .maxlen = sizeof(int),
7161 .mode = 0644,
7162 .proc_handler = proc_dointvec,
7163 },
7164 #ifdef CONFIG_IPV6_SEG6_HMAC
7165 {
7166 .procname = "seg6_require_hmac",
7167 .data = &ipv6_devconf.seg6_require_hmac,
7168 .maxlen = sizeof(int),
7169 .mode = 0644,
7170 .proc_handler = proc_dointvec,
7171 },
7172 #endif
7173 {
7174 .procname = "enhanced_dad",
7175 .data = &ipv6_devconf.enhanced_dad,
7176 .maxlen = sizeof(int),
7177 .mode = 0644,
7178 .proc_handler = proc_dointvec,
7179 },
7180 {
7181 .procname = "addr_gen_mode",
7182 .data = &ipv6_devconf.addr_gen_mode,
7183 .maxlen = sizeof(int),
7184 .mode = 0644,
7185 .proc_handler = addrconf_sysctl_addr_gen_mode,
7186 },
7187 {
7188 .procname = "disable_policy",
7189 .data = &ipv6_devconf.disable_policy,
7190 .maxlen = sizeof(int),
7191 .mode = 0644,
7192 .proc_handler = addrconf_sysctl_disable_policy,
7193 },
7194 {
7195 .procname = "ndisc_tclass",
7196 .data = &ipv6_devconf.ndisc_tclass,
7197 .maxlen = sizeof(int),
7198 .mode = 0644,
7199 .proc_handler = proc_dointvec_minmax,
7200 .extra1 = (void *)SYSCTL_ZERO,
7201 .extra2 = (void *)&two_five_five,
7202 },
7203 {
7204 .procname = "rpl_seg_enabled",
7205 .data = &ipv6_devconf.rpl_seg_enabled,
7206 .maxlen = sizeof(int),
7207 .mode = 0644,
7208 .proc_handler = proc_dointvec,
7209 },
7210 {
7211 .procname = "ioam6_enabled",
7212 .data = &ipv6_devconf.ioam6_enabled,
7213 .maxlen = sizeof(u8),
7214 .mode = 0644,
7215 .proc_handler = proc_dou8vec_minmax,
7216 .extra1 = (void *)SYSCTL_ZERO,
7217 .extra2 = (void *)SYSCTL_ONE,
7218 },
7219 {
7220 .procname = "ioam6_id",
7221 .data = &ipv6_devconf.ioam6_id,
7222 .maxlen = sizeof(u32),
7223 .mode = 0644,
7224 .proc_handler = proc_douintvec_minmax,
7225 .extra1 = (void *)SYSCTL_ZERO,
7226 .extra2 = (void *)&ioam6_if_id_max,
7227 },
7228 {
7229 .procname = "ioam6_id_wide",
7230 .data = &ipv6_devconf.ioam6_id_wide,
7231 .maxlen = sizeof(u32),
7232 .mode = 0644,
7233 .proc_handler = proc_douintvec,
7234 },
7235 {
7236 .procname = "ndisc_evict_nocarrier",
7237 .data = &ipv6_devconf.ndisc_evict_nocarrier,
7238 .maxlen = sizeof(u8),
7239 .mode = 0644,
7240 .proc_handler = proc_dou8vec_minmax,
7241 .extra1 = (void *)SYSCTL_ZERO,
7242 .extra2 = (void *)SYSCTL_ONE,
7243 },
7244 {
7245 .procname = "accept_untracked_na",
7246 .data = &ipv6_devconf.accept_untracked_na,
7247 .maxlen = sizeof(int),
7248 .mode = 0644,
7249 .proc_handler = proc_dointvec_minmax,
7250 .extra1 = SYSCTL_ZERO,
7251 .extra2 = SYSCTL_TWO,
7252 },
7253 };
7254
__addrconf_sysctl_register(struct net * net,char * dev_name,struct inet6_dev * idev,struct ipv6_devconf * p)7255 static int __addrconf_sysctl_register(struct net *net, char *dev_name,
7256 struct inet6_dev *idev, struct ipv6_devconf *p)
7257 {
7258 size_t table_size = ARRAY_SIZE(addrconf_sysctl);
7259 int i, ifindex;
7260 struct ctl_table *table;
7261 char path[sizeof("net/ipv6/conf/") + IFNAMSIZ];
7262
7263 table = kmemdup(addrconf_sysctl, sizeof(addrconf_sysctl), GFP_KERNEL_ACCOUNT);
7264 if (!table)
7265 goto out;
7266
7267 for (i = 0; i < table_size; i++) {
7268 table[i].data += (char *)p - (char *)&ipv6_devconf;
7269 /* If one of these is already set, then it is not safe to
7270 * overwrite either of them: this makes proc_dointvec_minmax
7271 * usable.
7272 */
7273 if (!table[i].extra1 && !table[i].extra2) {
7274 table[i].extra1 = idev; /* embedded; no ref */
7275 table[i].extra2 = net;
7276 }
7277 }
7278
7279 snprintf(path, sizeof(path), "net/ipv6/conf/%s", dev_name);
7280
7281 p->sysctl_header = register_net_sysctl_sz(net, path, table,
7282 table_size);
7283 if (!p->sysctl_header)
7284 goto free;
7285
7286 if (!strcmp(dev_name, "all"))
7287 ifindex = NETCONFA_IFINDEX_ALL;
7288 else if (!strcmp(dev_name, "default"))
7289 ifindex = NETCONFA_IFINDEX_DEFAULT;
7290 else
7291 ifindex = idev->dev->ifindex;
7292 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_ALL,
7293 ifindex, p);
7294 return 0;
7295
7296 free:
7297 kfree(table);
7298 out:
7299 return -ENOBUFS;
7300 }
7301
__addrconf_sysctl_unregister(struct net * net,struct ipv6_devconf * p,int ifindex)7302 static void __addrconf_sysctl_unregister(struct net *net,
7303 struct ipv6_devconf *p, int ifindex)
7304 {
7305 const struct ctl_table *table;
7306
7307 if (!p->sysctl_header)
7308 return;
7309
7310 table = p->sysctl_header->ctl_table_arg;
7311 unregister_net_sysctl_table(p->sysctl_header);
7312 p->sysctl_header = NULL;
7313 kfree(table);
7314
7315 inet6_netconf_notify_devconf(net, RTM_DELNETCONF, 0, ifindex, NULL);
7316 }
7317
addrconf_sysctl_register(struct inet6_dev * idev)7318 static int addrconf_sysctl_register(struct inet6_dev *idev)
7319 {
7320 int err;
7321
7322 if (!sysctl_dev_name_is_allowed(idev->dev->name))
7323 return -EINVAL;
7324
7325 err = neigh_sysctl_register(idev->dev, idev->nd_parms,
7326 &ndisc_ifinfo_sysctl_change);
7327 if (err)
7328 return err;
7329 err = __addrconf_sysctl_register(dev_net(idev->dev), idev->dev->name,
7330 idev, &idev->cnf);
7331 if (err)
7332 neigh_sysctl_unregister(idev->nd_parms);
7333
7334 return err;
7335 }
7336
addrconf_sysctl_unregister(struct inet6_dev * idev)7337 static void addrconf_sysctl_unregister(struct inet6_dev *idev)
7338 {
7339 __addrconf_sysctl_unregister(dev_net(idev->dev), &idev->cnf,
7340 idev->dev->ifindex);
7341 neigh_sysctl_unregister(idev->nd_parms);
7342 }
7343
7344
7345 #endif
7346
addrconf_init_net(struct net * net)7347 static int __net_init addrconf_init_net(struct net *net)
7348 {
7349 int err = -ENOMEM;
7350 struct ipv6_devconf *all, *dflt;
7351
7352 spin_lock_init(&net->ipv6.addrconf_hash_lock);
7353 INIT_DEFERRABLE_WORK(&net->ipv6.addr_chk_work, addrconf_verify_work);
7354 net->ipv6.inet6_addr_lst = kcalloc(IN6_ADDR_HSIZE,
7355 sizeof(struct hlist_head),
7356 GFP_KERNEL);
7357 if (!net->ipv6.inet6_addr_lst)
7358 goto err_alloc_addr;
7359
7360 all = kmemdup(&ipv6_devconf, sizeof(ipv6_devconf), GFP_KERNEL);
7361 if (!all)
7362 goto err_alloc_all;
7363
7364 dflt = kmemdup(&ipv6_devconf_dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL);
7365 if (!dflt)
7366 goto err_alloc_dflt;
7367
7368 if (!net_eq(net, &init_net)) {
7369 switch (net_inherit_devconf()) {
7370 case 1: /* copy from init_net */
7371 memcpy(all, init_net.ipv6.devconf_all,
7372 sizeof(ipv6_devconf));
7373 memcpy(dflt, init_net.ipv6.devconf_dflt,
7374 sizeof(ipv6_devconf_dflt));
7375 break;
7376 case 3: /* copy from the current netns */
7377 memcpy(all, current->nsproxy->net_ns->ipv6.devconf_all,
7378 sizeof(ipv6_devconf));
7379 memcpy(dflt,
7380 current->nsproxy->net_ns->ipv6.devconf_dflt,
7381 sizeof(ipv6_devconf_dflt));
7382 break;
7383 case 0:
7384 case 2:
7385 /* use compiled values */
7386 break;
7387 }
7388 }
7389
7390 /* these will be inherited by all namespaces */
7391 dflt->autoconf = ipv6_defaults.autoconf;
7392 dflt->disable_ipv6 = ipv6_defaults.disable_ipv6;
7393
7394 dflt->stable_secret.initialized = false;
7395 all->stable_secret.initialized = false;
7396
7397 net->ipv6.devconf_all = all;
7398 net->ipv6.devconf_dflt = dflt;
7399
7400 #ifdef CONFIG_SYSCTL
7401 err = __addrconf_sysctl_register(net, "all", NULL, all);
7402 if (err < 0)
7403 goto err_reg_all;
7404
7405 err = __addrconf_sysctl_register(net, "default", NULL, dflt);
7406 if (err < 0)
7407 goto err_reg_dflt;
7408 #endif
7409 return 0;
7410
7411 #ifdef CONFIG_SYSCTL
7412 err_reg_dflt:
7413 __addrconf_sysctl_unregister(net, all, NETCONFA_IFINDEX_ALL);
7414 err_reg_all:
7415 kfree(dflt);
7416 net->ipv6.devconf_dflt = NULL;
7417 #endif
7418 err_alloc_dflt:
7419 kfree(all);
7420 net->ipv6.devconf_all = NULL;
7421 err_alloc_all:
7422 kfree(net->ipv6.inet6_addr_lst);
7423 err_alloc_addr:
7424 return err;
7425 }
7426
addrconf_exit_net(struct net * net)7427 static void __net_exit addrconf_exit_net(struct net *net)
7428 {
7429 int i;
7430
7431 #ifdef CONFIG_SYSCTL
7432 __addrconf_sysctl_unregister(net, net->ipv6.devconf_dflt,
7433 NETCONFA_IFINDEX_DEFAULT);
7434 __addrconf_sysctl_unregister(net, net->ipv6.devconf_all,
7435 NETCONFA_IFINDEX_ALL);
7436 #endif
7437 kfree(net->ipv6.devconf_dflt);
7438 net->ipv6.devconf_dflt = NULL;
7439 kfree(net->ipv6.devconf_all);
7440 net->ipv6.devconf_all = NULL;
7441
7442 cancel_delayed_work_sync(&net->ipv6.addr_chk_work);
7443 /*
7444 * Check hash table, then free it.
7445 */
7446 for (i = 0; i < IN6_ADDR_HSIZE; i++)
7447 WARN_ON_ONCE(!hlist_empty(&net->ipv6.inet6_addr_lst[i]));
7448
7449 kfree(net->ipv6.inet6_addr_lst);
7450 net->ipv6.inet6_addr_lst = NULL;
7451 }
7452
7453 static struct pernet_operations addrconf_ops = {
7454 .init = addrconf_init_net,
7455 .exit = addrconf_exit_net,
7456 };
7457
7458 static struct rtnl_af_ops inet6_ops __read_mostly = {
7459 .family = AF_INET6,
7460 .fill_link_af = inet6_fill_link_af,
7461 .get_link_af_size = inet6_get_link_af_size,
7462 .validate_link_af = inet6_validate_link_af,
7463 .set_link_af = inet6_set_link_af,
7464 };
7465
7466 /*
7467 * Init / cleanup code
7468 */
7469
addrconf_init(void)7470 int __init addrconf_init(void)
7471 {
7472 struct inet6_dev *idev;
7473 int err;
7474
7475 err = ipv6_addr_label_init();
7476 if (err < 0) {
7477 pr_crit("%s: cannot initialize default policy table: %d\n",
7478 __func__, err);
7479 goto out;
7480 }
7481
7482 err = register_pernet_subsys(&addrconf_ops);
7483 if (err < 0)
7484 goto out_addrlabel;
7485
7486 /* All works using addrconf_wq need to lock rtnl. */
7487 addrconf_wq = create_singlethread_workqueue("ipv6_addrconf");
7488 if (!addrconf_wq) {
7489 err = -ENOMEM;
7490 goto out_nowq;
7491 }
7492
7493 rtnl_lock();
7494 idev = ipv6_add_dev(blackhole_netdev);
7495 rtnl_unlock();
7496 if (IS_ERR(idev)) {
7497 err = PTR_ERR(idev);
7498 goto errlo;
7499 }
7500
7501 ip6_route_init_special_entries();
7502
7503 register_netdevice_notifier(&ipv6_dev_notf);
7504
7505 addrconf_verify(&init_net);
7506
7507 rtnl_af_register(&inet6_ops);
7508
7509 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETLINK,
7510 NULL, inet6_dump_ifinfo, RTNL_FLAG_DUMP_UNLOCKED);
7511 if (err < 0)
7512 goto errout;
7513
7514 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_NEWADDR,
7515 inet6_rtm_newaddr, NULL, 0);
7516 if (err < 0)
7517 goto errout;
7518 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_DELADDR,
7519 inet6_rtm_deladdr, NULL, 0);
7520 if (err < 0)
7521 goto errout;
7522 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETADDR,
7523 inet6_rtm_getaddr, inet6_dump_ifaddr,
7524 RTNL_FLAG_DOIT_UNLOCKED |
7525 RTNL_FLAG_DUMP_UNLOCKED);
7526 if (err < 0)
7527 goto errout;
7528 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETMULTICAST,
7529 NULL, inet6_dump_ifmcaddr,
7530 RTNL_FLAG_DUMP_UNLOCKED);
7531 if (err < 0)
7532 goto errout;
7533 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETANYCAST,
7534 NULL, inet6_dump_ifacaddr,
7535 RTNL_FLAG_DUMP_UNLOCKED);
7536 if (err < 0)
7537 goto errout;
7538 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETNETCONF,
7539 inet6_netconf_get_devconf,
7540 inet6_netconf_dump_devconf,
7541 RTNL_FLAG_DOIT_UNLOCKED |
7542 RTNL_FLAG_DUMP_UNLOCKED);
7543 if (err < 0)
7544 goto errout;
7545 err = ipv6_addr_label_rtnl_register();
7546 if (err < 0)
7547 goto errout;
7548
7549 return 0;
7550 errout:
7551 rtnl_unregister_all(PF_INET6);
7552 rtnl_af_unregister(&inet6_ops);
7553 unregister_netdevice_notifier(&ipv6_dev_notf);
7554 errlo:
7555 destroy_workqueue(addrconf_wq);
7556 out_nowq:
7557 unregister_pernet_subsys(&addrconf_ops);
7558 out_addrlabel:
7559 ipv6_addr_label_cleanup();
7560 out:
7561 return err;
7562 }
7563
addrconf_cleanup(void)7564 void addrconf_cleanup(void)
7565 {
7566 struct net_device *dev;
7567
7568 unregister_netdevice_notifier(&ipv6_dev_notf);
7569 unregister_pernet_subsys(&addrconf_ops);
7570 ipv6_addr_label_cleanup();
7571
7572 rtnl_af_unregister(&inet6_ops);
7573
7574 rtnl_lock();
7575
7576 /* clean dev list */
7577 for_each_netdev(&init_net, dev) {
7578 if (__in6_dev_get(dev) == NULL)
7579 continue;
7580 addrconf_ifdown(dev, true);
7581 }
7582 addrconf_ifdown(init_net.loopback_dev, true);
7583
7584 rtnl_unlock();
7585
7586 destroy_workqueue(addrconf_wq);
7587 }
7588